sk                215 arch/arc/include/asm/entry.h 	ld  \out, [\tsk, TASK_THREAD_INFO]
sk                255 arch/arc/include/asm/entry.h 	st   \tsk, [\tmp]
sk                257 arch/arc/include/asm/entry.h 	mov r25, \tsk
sk                270 arch/arc/include/asm/entry.h 	st  \tsk, [@_current_task]
sk                272 arch/arc/include/asm/entry.h 	mov r25, \tsk
sk                 87 arch/arm/include/asm/uaccess-asm.h 	ldr	\tmp1, [\tsk, #TI_ADDR_LIMIT]
sk                 89 arch/arm/include/asm/uaccess-asm.h 	str	\tmp2, [\tsk, #TI_ADDR_LIMIT]
sk                111 arch/arm/include/asm/uaccess-asm.h 	str	\tmp1, [\tsk, #TI_ADDR_LIMIT]
sk                120 crypto/af_alg.c 	if (sock->sk) {
sk                121 crypto/af_alg.c 		sock_put(sock->sk);
sk                122 crypto/af_alg.c 		sock->sk = NULL;
sk                128 crypto/af_alg.c void af_alg_release_parent(struct sock *sk)
sk                130 crypto/af_alg.c 	struct alg_sock *ask = alg_sk(sk);
sk                134 crypto/af_alg.c 	sk = ask->parent;
sk                135 crypto/af_alg.c 	ask = alg_sk(sk);
sk                138 crypto/af_alg.c 	bh_lock_sock(sk);
sk                142 crypto/af_alg.c 	bh_unlock_sock(sk);
sk                146 crypto/af_alg.c 		sock_put(sk);
sk                153 crypto/af_alg.c 	struct sock *sk = sock->sk;
sk                154 crypto/af_alg.c 	struct alg_sock *ask = alg_sk(sk);
sk                189 crypto/af_alg.c 	lock_sock(sk);
sk                199 crypto/af_alg.c 	release_sock(sk);
sk                206 crypto/af_alg.c static int alg_setkey(struct sock *sk, char __user *ukey,
sk                209 crypto/af_alg.c 	struct alg_sock *ask = alg_sk(sk);
sk                214 crypto/af_alg.c 	key = sock_kmalloc(sk, keylen, GFP_KERNEL);
sk                225 crypto/af_alg.c 	sock_kzfree_s(sk, key, keylen);
sk                233 crypto/af_alg.c 	struct sock *sk = sock->sk;
sk                234 crypto/af_alg.c 	struct alg_sock *ask = alg_sk(sk);
sk                238 crypto/af_alg.c 	lock_sock(sk);
sk                255 crypto/af_alg.c 		err = alg_setkey(sk, optval, optlen);
sk                266 crypto/af_alg.c 	release_sock(sk);
sk                271 crypto/af_alg.c int af_alg_accept(struct sock *sk, struct socket *newsock, bool kern)
sk                273 crypto/af_alg.c 	struct alg_sock *ask = alg_sk(sk);
sk                279 crypto/af_alg.c 	lock_sock(sk);
sk                286 crypto/af_alg.c 	sk2 = sk_alloc(sock_net(sk), PF_ALG, GFP_KERNEL, &alg_proto, kern);
sk                293 crypto/af_alg.c 	security_sk_clone(sk, sk2);
sk                305 crypto/af_alg.c 		sock_hold(sk);
sk                307 crypto/af_alg.c 	alg_sk(sk2)->parent = sk;
sk                320 crypto/af_alg.c 	release_sock(sk);
sk                329 crypto/af_alg.c 	return af_alg_accept(sock->sk, newsock, kern);
sk                354 crypto/af_alg.c static void alg_sock_destruct(struct sock *sk)
sk                356 crypto/af_alg.c 	struct alg_sock *ask = alg_sk(sk);
sk                364 crypto/af_alg.c 	struct sock *sk;
sk                373 crypto/af_alg.c 	sk = sk_alloc(net, PF_ALG, GFP_KERNEL, &alg_proto, kern);
sk                374 crypto/af_alg.c 	if (!sk)
sk                378 crypto/af_alg.c 	sock_init_data(sock, sk);
sk                380 crypto/af_alg.c 	sk->sk_destruct = alg_sock_destruct;
sk                486 crypto/af_alg.c static int af_alg_alloc_tsgl(struct sock *sk)
sk                488 crypto/af_alg.c 	struct alg_sock *ask = alg_sk(sk);
sk                498 crypto/af_alg.c 		sgl = sock_kmalloc(sk,
sk                527 crypto/af_alg.c unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset)
sk                529 crypto/af_alg.c 	const struct alg_sock *ask = alg_sk(sk);
sk                583 crypto/af_alg.c void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst,
sk                586 crypto/af_alg.c 	struct alg_sock *ask = alg_sk(sk);
sk                637 crypto/af_alg.c 		sock_kfree_s(sk, sgl, struct_size(sgl, sg, MAX_SGL_ENTS + 1));
sk                652 crypto/af_alg.c 	struct sock *sk = areq->sk;
sk                653 crypto/af_alg.c 	struct alg_sock *ask = alg_sk(sk);
sk                665 crypto/af_alg.c 			sock_kfree_s(sk, rsgl, sizeof(*rsgl));
sk                676 crypto/af_alg.c 		sock_kfree_s(sk, tsgl, areq->tsgl_entries * sizeof(*tsgl));
sk                687 crypto/af_alg.c static int af_alg_wait_for_wmem(struct sock *sk, unsigned int flags)
sk                696 crypto/af_alg.c 	sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
sk                698 crypto/af_alg.c 	add_wait_queue(sk_sleep(sk), &wait);
sk                703 crypto/af_alg.c 		if (sk_wait_event(sk, &timeout, af_alg_writable(sk), &wait)) {
sk                708 crypto/af_alg.c 	remove_wait_queue(sk_sleep(sk), &wait);
sk                718 crypto/af_alg.c void af_alg_wmem_wakeup(struct sock *sk)
sk                722 crypto/af_alg.c 	if (!af_alg_writable(sk))
sk                726 crypto/af_alg.c 	wq = rcu_dereference(sk->sk_wq);
sk                731 crypto/af_alg.c 	sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
sk                743 crypto/af_alg.c int af_alg_wait_for_data(struct sock *sk, unsigned flags)
sk                746 crypto/af_alg.c 	struct alg_sock *ask = alg_sk(sk);
sk                754 crypto/af_alg.c 	sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
sk                756 crypto/af_alg.c 	add_wait_queue(sk_sleep(sk), &wait);
sk                761 crypto/af_alg.c 		if (sk_wait_event(sk, &timeout, (ctx->used || !ctx->more),
sk                767 crypto/af_alg.c 	remove_wait_queue(sk_sleep(sk), &wait);
sk                769 crypto/af_alg.c 	sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
sk                780 crypto/af_alg.c static void af_alg_data_wakeup(struct sock *sk)
sk                782 crypto/af_alg.c 	struct alg_sock *ask = alg_sk(sk);
sk                790 crypto/af_alg.c 	wq = rcu_dereference(sk->sk_wq);
sk                795 crypto/af_alg.c 	sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
sk                818 crypto/af_alg.c 	struct sock *sk = sock->sk;
sk                819 crypto/af_alg.c 	struct alg_sock *ask = alg_sk(sk);
sk                849 crypto/af_alg.c 	lock_sock(sk);
sk                892 crypto/af_alg.c 		if (!af_alg_writable(sk)) {
sk                893 crypto/af_alg.c 			err = af_alg_wait_for_wmem(sk, msg->msg_flags);
sk                899 crypto/af_alg.c 		len = min_t(unsigned long, len, af_alg_sndbuf(sk));
sk                901 crypto/af_alg.c 		err = af_alg_alloc_tsgl(sk);
sk                949 crypto/af_alg.c 	af_alg_data_wakeup(sk);
sk                950 crypto/af_alg.c 	release_sock(sk);
sk                964 crypto/af_alg.c 	struct sock *sk = sock->sk;
sk                965 crypto/af_alg.c 	struct alg_sock *ask = alg_sk(sk);
sk                973 crypto/af_alg.c 	lock_sock(sk);
sk                980 crypto/af_alg.c 	if (!af_alg_writable(sk)) {
sk                981 crypto/af_alg.c 		err = af_alg_wait_for_wmem(sk, flags);
sk                986 crypto/af_alg.c 	err = af_alg_alloc_tsgl(sk);
sk               1007 crypto/af_alg.c 	af_alg_data_wakeup(sk);
sk               1008 crypto/af_alg.c 	release_sock(sk);
sk               1019 crypto/af_alg.c 	struct sock *sk = areq->sk;
sk               1022 crypto/af_alg.c 	sock_kfree_s(sk, areq, areq->areqlen);
sk               1038 crypto/af_alg.c 	struct sock *sk = areq->sk;
sk               1046 crypto/af_alg.c 	sock_put(sk);
sk               1058 crypto/af_alg.c 	struct sock *sk = sock->sk;
sk               1059 crypto/af_alg.c 	struct alg_sock *ask = alg_sk(sk);
sk               1069 crypto/af_alg.c 	if (af_alg_writable(sk))
sk               1083 crypto/af_alg.c struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk,
sk               1086 crypto/af_alg.c 	struct af_alg_async_req *areq = sock_kmalloc(sk, areqlen, GFP_KERNEL);
sk               1092 crypto/af_alg.c 	areq->sk = sk;
sk               1114 crypto/af_alg.c int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags,
sk               1118 crypto/af_alg.c 	struct alg_sock *ask = alg_sk(sk);
sk               1128 crypto/af_alg.c 		if (!af_alg_readable(sk))
sk               1137 crypto/af_alg.c 			rsgl = sock_kmalloc(sk, sizeof(*rsgl), GFP_KERNEL);
sk                 44 crypto/algif_aead.c static inline bool aead_sufficient_data(struct sock *sk)
sk                 46 crypto/algif_aead.c 	struct alg_sock *ask = alg_sk(sk);
sk                 63 crypto/algif_aead.c 	struct sock *sk = sock->sk;
sk                 64 crypto/algif_aead.c 	struct alg_sock *ask = alg_sk(sk);
sk                 91 crypto/algif_aead.c 	struct sock *sk = sock->sk;
sk                 92 crypto/algif_aead.c 	struct alg_sock *ask = alg_sk(sk);
sk                110 crypto/algif_aead.c 		err = af_alg_wait_for_data(sk, flags);
sk                130 crypto/algif_aead.c 	if (!aead_sufficient_data(sk))
sk                153 crypto/algif_aead.c 	areq = af_alg_alloc_areq(sk, sizeof(struct af_alg_async_req) +
sk                159 crypto/algif_aead.c 	err = af_alg_get_rsgl(sk, msg, flags, areq, outlen, &usedpages);
sk                230 crypto/algif_aead.c 		af_alg_pull_tsgl(sk, processed, NULL, 0);
sk                250 crypto/algif_aead.c 		areq->tsgl_entries = af_alg_count_tsgl(sk, processed,
sk                254 crypto/algif_aead.c 		areq->tsgl = sock_kmalloc(sk, array_size(sizeof(*areq->tsgl),
sk                264 crypto/algif_aead.c 		af_alg_pull_tsgl(sk, processed, areq->tsgl, processed - as);
sk                287 crypto/algif_aead.c 		sock_hold(sk);
sk                303 crypto/algif_aead.c 		sock_put(sk);
sk                325 crypto/algif_aead.c 	struct sock *sk = sock->sk;
sk                328 crypto/algif_aead.c 	lock_sock(sk);
sk                350 crypto/algif_aead.c 	af_alg_wmem_wakeup(sk);
sk                351 crypto/algif_aead.c 	release_sock(sk);
sk                383 crypto/algif_aead.c 	struct sock *sk = sock->sk;
sk                384 crypto/algif_aead.c 	struct alg_sock *ask = alg_sk(sk);
sk                386 crypto/algif_aead.c 	lock_sock(sk);
sk                410 crypto/algif_aead.c 	release_sock(sk);
sk                525 crypto/algif_aead.c static void aead_sock_destruct(struct sock *sk)
sk                527 crypto/algif_aead.c 	struct alg_sock *ask = alg_sk(sk);
sk                535 crypto/algif_aead.c 	af_alg_pull_tsgl(sk, ctx->used, NULL, 0);
sk                536 crypto/algif_aead.c 	sock_kzfree_s(sk, ctx->iv, ivlen);
sk                537 crypto/algif_aead.c 	sock_kfree_s(sk, ctx, ctx->len);
sk                538 crypto/algif_aead.c 	af_alg_release_parent(sk);
sk                541 crypto/algif_aead.c static int aead_accept_parent_nokey(void *private, struct sock *sk)
sk                544 crypto/algif_aead.c 	struct alg_sock *ask = alg_sk(sk);
sk                550 crypto/algif_aead.c 	ctx = sock_kmalloc(sk, len, GFP_KERNEL);
sk                555 crypto/algif_aead.c 	ctx->iv = sock_kmalloc(sk, ivlen, GFP_KERNEL);
sk                557 crypto/algif_aead.c 		sock_kfree_s(sk, ctx, len);
sk                574 crypto/algif_aead.c 	sk->sk_destruct = aead_sock_destruct;
sk                579 crypto/algif_aead.c static int aead_accept_parent(void *private, struct sock *sk)
sk                586 crypto/algif_aead.c 	return aead_accept_parent_nokey(private, sk);
sk                 32 crypto/algif_hash.c static int hash_alloc_result(struct sock *sk, struct hash_ctx *ctx)
sk                 41 crypto/algif_hash.c 	ctx->result = sock_kmalloc(sk, ds, GFP_KERNEL);
sk                 50 crypto/algif_hash.c static void hash_free_result(struct sock *sk, struct hash_ctx *ctx)
sk                 59 crypto/algif_hash.c 	sock_kzfree_s(sk, ctx->result, ds);
sk                 67 crypto/algif_hash.c 	struct sock *sk = sock->sk;
sk                 68 crypto/algif_hash.c 	struct alg_sock *ask = alg_sk(sk);
sk                 73 crypto/algif_hash.c 	if (limit > sk->sk_sndbuf)
sk                 74 crypto/algif_hash.c 		limit = sk->sk_sndbuf;
sk                 76 crypto/algif_hash.c 	lock_sock(sk);
sk                 79 crypto/algif_hash.c 			hash_free_result(sk, ctx);
sk                116 crypto/algif_hash.c 		err = hash_alloc_result(sk, ctx);
sk                126 crypto/algif_hash.c 	release_sock(sk);
sk                134 crypto/algif_hash.c 	struct sock *sk = sock->sk;
sk                135 crypto/algif_hash.c 	struct alg_sock *ask = alg_sk(sk);
sk                142 crypto/algif_hash.c 	lock_sock(sk);
sk                147 crypto/algif_hash.c 		err = hash_alloc_result(sk, ctx);
sk                151 crypto/algif_hash.c 		hash_free_result(sk, ctx);
sk                178 crypto/algif_hash.c 	release_sock(sk);
sk                186 crypto/algif_hash.c 	struct sock *sk = sock->sk;
sk                187 crypto/algif_hash.c 	struct alg_sock *ask = alg_sk(sk);
sk                198 crypto/algif_hash.c 	lock_sock(sk);
sk                200 crypto/algif_hash.c 	err = hash_alloc_result(sk, ctx);
sk                224 crypto/algif_hash.c 	hash_free_result(sk, ctx);
sk                225 crypto/algif_hash.c 	release_sock(sk);
sk                233 crypto/algif_hash.c 	struct sock *sk = sock->sk;
sk                234 crypto/algif_hash.c 	struct alg_sock *ask = alg_sk(sk);
sk                244 crypto/algif_hash.c 	lock_sock(sk);
sk                247 crypto/algif_hash.c 	release_sock(sk);
sk                256 crypto/algif_hash.c 	sk2 = newsock->sk;
sk                300 crypto/algif_hash.c 	struct sock *sk = sock->sk;
sk                301 crypto/algif_hash.c 	struct alg_sock *ask = alg_sk(sk);
sk                303 crypto/algif_hash.c 	lock_sock(sk);
sk                327 crypto/algif_hash.c 	release_sock(sk);
sk                416 crypto/algif_hash.c static void hash_sock_destruct(struct sock *sk)
sk                418 crypto/algif_hash.c 	struct alg_sock *ask = alg_sk(sk);
sk                421 crypto/algif_hash.c 	hash_free_result(sk, ctx);
sk                422 crypto/algif_hash.c 	sock_kfree_s(sk, ctx, ctx->len);
sk                423 crypto/algif_hash.c 	af_alg_release_parent(sk);
sk                426 crypto/algif_hash.c static int hash_accept_parent_nokey(void *private, struct sock *sk)
sk                429 crypto/algif_hash.c 	struct alg_sock *ask = alg_sk(sk);
sk                433 crypto/algif_hash.c 	ctx = sock_kmalloc(sk, len, GFP_KERNEL);
sk                448 crypto/algif_hash.c 	sk->sk_destruct = hash_sock_destruct;
sk                453 crypto/algif_hash.c static int hash_accept_parent(void *private, struct sock *sk)
sk                460 crypto/algif_hash.c 	return hash_accept_parent_nokey(private, sk);
sk                 61 crypto/algif_rng.c 	struct sock *sk = sock->sk;
sk                 62 crypto/algif_rng.c 	struct alg_sock *ask = alg_sk(sk);
sk                126 crypto/algif_rng.c static void rng_sock_destruct(struct sock *sk)
sk                128 crypto/algif_rng.c 	struct alg_sock *ask = alg_sk(sk);
sk                131 crypto/algif_rng.c 	sock_kfree_s(sk, ctx, ctx->len);
sk                132 crypto/algif_rng.c 	af_alg_release_parent(sk);
sk                135 crypto/algif_rng.c static int rng_accept_parent(void *private, struct sock *sk)
sk                138 crypto/algif_rng.c 	struct alg_sock *ask = alg_sk(sk);
sk                141 crypto/algif_rng.c 	ctx = sock_kmalloc(sk, len, GFP_KERNEL);
sk                155 crypto/algif_rng.c 	sk->sk_destruct = rng_sock_destruct;
sk                 40 crypto/algif_skcipher.c 	struct sock *sk = sock->sk;
sk                 41 crypto/algif_skcipher.c 	struct alg_sock *ask = alg_sk(sk);
sk                 53 crypto/algif_skcipher.c 	struct sock *sk = sock->sk;
sk                 54 crypto/algif_skcipher.c 	struct alg_sock *ask = alg_sk(sk);
sk                 65 crypto/algif_skcipher.c 		err = af_alg_wait_for_data(sk, flags);
sk                 71 crypto/algif_skcipher.c 	areq = af_alg_alloc_areq(sk, sizeof(struct af_alg_async_req) +
sk                 77 crypto/algif_skcipher.c 	err = af_alg_get_rsgl(sk, msg, flags, areq, -1, &len);
sk                 96 crypto/algif_skcipher.c 	areq->tsgl_entries = af_alg_count_tsgl(sk, len, 0);
sk                 99 crypto/algif_skcipher.c 	areq->tsgl = sock_kmalloc(sk, array_size(sizeof(*areq->tsgl),
sk                107 crypto/algif_skcipher.c 	af_alg_pull_tsgl(sk, len, areq->tsgl, 0);
sk                116 crypto/algif_skcipher.c 		sock_hold(sk);
sk                133 crypto/algif_skcipher.c 		sock_put(sk);
sk                156 crypto/algif_skcipher.c 	struct sock *sk = sock->sk;
sk                159 crypto/algif_skcipher.c 	lock_sock(sk);
sk                181 crypto/algif_skcipher.c 	af_alg_wmem_wakeup(sk);
sk                182 crypto/algif_skcipher.c 	release_sock(sk);
sk                214 crypto/algif_skcipher.c 	struct sock *sk = sock->sk;
sk                215 crypto/algif_skcipher.c 	struct alg_sock *ask = alg_sk(sk);
sk                217 crypto/algif_skcipher.c 	lock_sock(sk);
sk                241 crypto/algif_skcipher.c 	release_sock(sk);
sk                319 crypto/algif_skcipher.c static void skcipher_sock_destruct(struct sock *sk)
sk                321 crypto/algif_skcipher.c 	struct alg_sock *ask = alg_sk(sk);
sk                327 crypto/algif_skcipher.c 	af_alg_pull_tsgl(sk, ctx->used, NULL, 0);
sk                328 crypto/algif_skcipher.c 	sock_kzfree_s(sk, ctx->iv, crypto_skcipher_ivsize(tfm));
sk                329 crypto/algif_skcipher.c 	sock_kfree_s(sk, ctx, ctx->len);
sk                330 crypto/algif_skcipher.c 	af_alg_release_parent(sk);
sk                333 crypto/algif_skcipher.c static int skcipher_accept_parent_nokey(void *private, struct sock *sk)
sk                336 crypto/algif_skcipher.c 	struct alg_sock *ask = alg_sk(sk);
sk                340 crypto/algif_skcipher.c 	ctx = sock_kmalloc(sk, len, GFP_KERNEL);
sk                344 crypto/algif_skcipher.c 	ctx->iv = sock_kmalloc(sk, crypto_skcipher_ivsize(tfm),
sk                347 crypto/algif_skcipher.c 		sock_kfree_s(sk, ctx, len);
sk                364 crypto/algif_skcipher.c 	sk->sk_destruct = skcipher_sock_destruct;
sk                369 crypto/algif_skcipher.c static int skcipher_accept_parent(void *private, struct sock *sk)
sk                376 crypto/algif_skcipher.c 	return skcipher_accept_parent_nokey(private, sk);
sk                187 crypto/crypto_user_base.c 	struct net *net = sock_net(in_skb->sk);
sk                424 crypto/crypto_user_base.c 	struct net *net = sock_net(skb->sk);
sk                302 crypto/crypto_user_stat.c 	struct net *net = sock_net(in_skb->sk);
sk               1151 crypto/ecc.c   	u64 sk[2][ECC_MAX_DIGITS];
sk               1157 crypto/ecc.c   	carry = vli_add(sk[0], scalar, curve->n, ndigits);
sk               1158 crypto/ecc.c   	vli_add(sk[1], sk[0], curve->n, ndigits);
sk               1159 crypto/ecc.c   	scalar = sk[!carry];
sk                494 crypto/twofish_common.c    ctx->s[2][i] = mds[2][q1[q0[(a) ^ sc] ^ sg] ^ sk]; \
sk                502 crypto/twofish_common.c    ctx->s[2][i] = mds[2][q1[q0[q0[(a) ^ sc] ^ sg] ^ sk] ^ so]; \
sk                580 crypto/twofish_common.c 	u8 si = 0, sj = 0, sk = 0, sl = 0, sm = 0, sn = 0, so = 0, sp = 0;
sk                615 crypto/twofish_common.c 		CALC_S (si, sj, sk, sl, 16, 0x00, 0x2D, 0x01, 0x2D); /* 01 A4 02 A4 */
sk                616 crypto/twofish_common.c 		CALC_S (si, sj, sk, sl, 17, 0x2D, 0xA4, 0x44, 0x8A); /* A4 56 A1 55 */
sk                617 crypto/twofish_common.c 		CALC_S (si, sj, sk, sl, 18, 0x8A, 0xD5, 0xBF, 0xD1); /* 55 82 FC 87 */
sk                618 crypto/twofish_common.c 		CALC_S (si, sj, sk, sl, 19, 0xD1, 0x7F, 0x3D, 0x99); /* 87 F3 C1 5A */
sk                619 crypto/twofish_common.c 		CALC_S (si, sj, sk, sl, 20, 0x99, 0x46, 0x66, 0x96); /* 5A 1E 47 58 */
sk                620 crypto/twofish_common.c 		CALC_S (si, sj, sk, sl, 21, 0x96, 0x3C, 0x5B, 0xED); /* 58 C6 AE DB */
sk                621 crypto/twofish_common.c 		CALC_S (si, sj, sk, sl, 22, 0xED, 0x37, 0x4F, 0xE0); /* DB 68 3D 9E */
sk                622 crypto/twofish_common.c 		CALC_S (si, sj, sk, sl, 23, 0xE0, 0xD0, 0x8C, 0x17); /* 9E E5 19 03 */
sk                338 drivers/ata/libata-scsi.c 			u8 sk, u8 asc, u8 ascq)
sk                347 drivers/ata/libata-scsi.c 	scsi_build_sense_buffer(d_sense, cmd->sense_buffer, sk, asc, ascq);
sk                937 drivers/ata/libata-scsi.c static void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk,
sk               1014 drivers/ata/libata-scsi.c 				*sk = sense_table[i][1];
sk               1029 drivers/ata/libata-scsi.c 			*sk = stat_table[i][1];
sk               1040 drivers/ata/libata-scsi.c 	*sk = ABORTED_COMMAND;
sk               1047 drivers/ata/libata-scsi.c 		       id, drv_stat, drv_err, *sk, *asc, *ascq);
sk                120 drivers/ata/libata.h 			       struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq);
sk                724 drivers/atm/idt77252.c 		struct sock *sk = sk_atm(vcc);
sk                727 drivers/atm/idt77252.c 		if (refcount_read(&sk->sk_wmem_alloc) >
sk                728 drivers/atm/idt77252.c 		    (sk->sk_sndbuf >> 1)) {
sk               1798 drivers/atm/iphase.c 	   struct sock *sk = sk_atm(vcc);
sk               1802 drivers/atm/iphase.c                   sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 5;
sk               1804 drivers/atm/iphase.c                   sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 4;
sk               1806 drivers/atm/iphase.c                  sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 3;
sk               1809 drivers/atm/iphase.c              sk->sk_sndbuf = 24576;
sk                388 drivers/block/drbd/drbd_debugfs.c 		struct tcp_sock *tp = tcp_sk(connection->data.socket->sk);
sk               1500 drivers/block/drbd/drbd_main.c 	struct sock *sk = connection->data.socket->sk;
sk               1501 drivers/block/drbd/drbd_main.c 	if (sk->sk_wmem_queued > sk->sk_sndbuf * 4 / 5)
sk                577 drivers/block/drbd/drbd_receiver.c 		sock->sk->sk_sndbuf = snd;
sk                578 drivers/block/drbd/drbd_receiver.c 		sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
sk                581 drivers/block/drbd/drbd_receiver.c 		sock->sk->sk_rcvbuf = rcv;
sk                582 drivers/block/drbd/drbd_receiver.c 		sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
sk                627 drivers/block/drbd/drbd_receiver.c 	sock->sk->sk_rcvtimeo =
sk                628 drivers/block/drbd/drbd_receiver.c 	sock->sk->sk_sndtimeo = connect_int * HZ;
sk                678 drivers/block/drbd/drbd_receiver.c 	void (*original_sk_state_change)(struct sock *sk);
sk                682 drivers/block/drbd/drbd_receiver.c static void drbd_incoming_connection(struct sock *sk)
sk                684 drivers/block/drbd/drbd_receiver.c 	struct accept_wait_data *ad = sk->sk_user_data;
sk                685 drivers/block/drbd/drbd_receiver.c 	void (*state_change)(struct sock *sk);
sk                688 drivers/block/drbd/drbd_receiver.c 	if (sk->sk_state == TCP_ESTABLISHED)
sk                690 drivers/block/drbd/drbd_receiver.c 	state_change(sk);
sk                722 drivers/block/drbd/drbd_receiver.c 	s_listen->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
sk                731 drivers/block/drbd/drbd_receiver.c 	write_lock_bh(&s_listen->sk->sk_callback_lock);
sk                732 drivers/block/drbd/drbd_receiver.c 	ad->original_sk_state_change = s_listen->sk->sk_state_change;
sk                733 drivers/block/drbd/drbd_receiver.c 	s_listen->sk->sk_state_change = drbd_incoming_connection;
sk                734 drivers/block/drbd/drbd_receiver.c 	s_listen->sk->sk_user_data = ad;
sk                735 drivers/block/drbd/drbd_receiver.c 	write_unlock_bh(&s_listen->sk->sk_callback_lock);
sk                756 drivers/block/drbd/drbd_receiver.c static void unregister_state_change(struct sock *sk, struct accept_wait_data *ad)
sk                758 drivers/block/drbd/drbd_receiver.c 	write_lock_bh(&sk->sk_callback_lock);
sk                759 drivers/block/drbd/drbd_receiver.c 	sk->sk_state_change = ad->original_sk_state_change;
sk                760 drivers/block/drbd/drbd_receiver.c 	sk->sk_user_data = NULL;
sk                761 drivers/block/drbd/drbd_receiver.c 	write_unlock_bh(&sk->sk_callback_lock);
sk                796 drivers/block/drbd/drbd_receiver.c 		unregister_state_change(s_estab->sk, ad);
sk                824 drivers/block/drbd/drbd_receiver.c 	sock->sk->sk_rcvtimeo = nc->ping_timeo * 4 * HZ / 10;
sk               1024 drivers/block/drbd/drbd_receiver.c 	sock.socket->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
sk               1025 drivers/block/drbd/drbd_receiver.c 	msock.socket->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
sk               1027 drivers/block/drbd/drbd_receiver.c 	sock.socket->sk->sk_allocation = GFP_NOIO;
sk               1028 drivers/block/drbd/drbd_receiver.c 	msock.socket->sk->sk_allocation = GFP_NOIO;
sk               1030 drivers/block/drbd/drbd_receiver.c 	sock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
sk               1031 drivers/block/drbd/drbd_receiver.c 	msock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE;
sk               1041 drivers/block/drbd/drbd_receiver.c 	sock.socket->sk->sk_sndtimeo =
sk               1042 drivers/block/drbd/drbd_receiver.c 	sock.socket->sk->sk_rcvtimeo = nc->ping_timeo*4*HZ/10;
sk               1044 drivers/block/drbd/drbd_receiver.c 	msock.socket->sk->sk_rcvtimeo = nc->ping_int*HZ;
sk               1049 drivers/block/drbd/drbd_receiver.c 	msock.socket->sk->sk_sndtimeo = timeout;
sk               1076 drivers/block/drbd/drbd_receiver.c 	connection->data.socket->sk->sk_sndtimeo = timeout;
sk               1077 drivers/block/drbd/drbd_receiver.c 	connection->data.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
sk               5678 drivers/block/drbd/drbd_receiver.c 	connection->meta.socket->sk->sk_rcvtimeo = connection->net_conf->ping_int*HZ;
sk               5977 drivers/block/drbd/drbd_receiver.c 	connection->meta.socket->sk->sk_rcvtimeo = t;
sk                634 drivers/block/drbd/drbd_worker.c 			struct sock *sk = connection->data.socket->sk;
sk                635 drivers/block/drbd/drbd_worker.c 			int queued = sk->sk_wmem_queued;
sk                636 drivers/block/drbd/drbd_worker.c 			int sndbuf = sk->sk_sndbuf;
sk                639 drivers/block/drbd/drbd_worker.c 				if (sk->sk_socket)
sk                640 drivers/block/drbd/drbd_worker.c 					set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
sk                479 drivers/block/nbd.c 		sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC;
sk               1086 drivers/block/nbd.c 		sk_set_memalloc(sock->sk);
sk               1088 drivers/block/nbd.c 			sock->sk->sk_sndtimeo = nbd->tag_set.timeout;
sk               1280 drivers/block/nbd.c 		sk_set_memalloc(config->socks[i]->sock->sk);
sk               1282 drivers/block/nbd.c 			config->socks[i]->sock->sk->sk_sndtimeo =
sk                944 drivers/crypto/amcc/crypto4xx_core.c static int crypto4xx_sk_init(struct crypto_skcipher *sk)
sk                946 drivers/crypto/amcc/crypto4xx_core.c 	struct skcipher_alg *alg = crypto_skcipher_alg(sk);
sk                948 drivers/crypto/amcc/crypto4xx_core.c 	struct crypto4xx_ctx *ctx =  crypto_skcipher_ctx(sk);
sk                968 drivers/crypto/amcc/crypto4xx_core.c static void crypto4xx_sk_exit(struct crypto_skcipher *sk)
sk                970 drivers/crypto/amcc/crypto4xx_core.c 	struct crypto4xx_ctx *ctx =  crypto_skcipher_ctx(sk);
sk                 71 drivers/crypto/chelsio/chtls/chtls.h 	struct sock *sk;           /* The listening socket */
sk                155 drivers/crypto/chelsio/chtls/chtls.h 	struct sock *sk;
sk                185 drivers/crypto/chelsio/chtls/chtls.h 	struct sock *sk;
sk                320 drivers/crypto/chelsio/chtls/chtls.h 	void (*backlog_rcv)(struct sock *sk, struct sk_buff *skb);
sk                362 drivers/crypto/chelsio/chtls/chtls.h #define TCP_PAGE(sk)   (sk->sk_frag.page)
sk                363 drivers/crypto/chelsio/chtls/chtls.h #define TCP_OFF(sk)    (sk->sk_frag.offset)
sk                387 drivers/crypto/chelsio/chtls/chtls.h static inline int csk_flag(const struct sock *sk, enum csk_flags flag)
sk                389 drivers/crypto/chelsio/chtls/chtls.h 	struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
sk                415 drivers/crypto/chelsio/chtls/chtls.h 				   struct sock *sk,
sk                422 drivers/crypto/chelsio/chtls/chtls.h 	bh_lock_sock(sk);
sk                423 drivers/crypto/chelsio/chtls/chtls.h 	if (unlikely(sock_owned_by_user(sk))) {
sk                425 drivers/crypto/chelsio/chtls/chtls.h 		__sk_add_backlog(sk, skb);
sk                427 drivers/crypto/chelsio/chtls/chtls.h 		fn(sk, skb);
sk                429 drivers/crypto/chelsio/chtls/chtls.h 	bh_unlock_sock(sk);
sk                450 drivers/crypto/chelsio/chtls/chtls.h static inline void send_or_defer(struct sock *sk, struct tcp_sock *tp,
sk                453 drivers/crypto/chelsio/chtls/chtls.h 	struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
sk                466 drivers/crypto/chelsio/chtls/chtls.h void chtls_install_cpl_ops(struct sock *sk);
sk                468 drivers/crypto/chelsio/chtls/chtls.h void chtls_listen_stop(struct chtls_dev *cdev, struct sock *sk);
sk                469 drivers/crypto/chelsio/chtls/chtls.h int chtls_listen_start(struct chtls_dev *cdev, struct sock *sk);
sk                470 drivers/crypto/chelsio/chtls/chtls.h void chtls_close(struct sock *sk, long timeout);
sk                471 drivers/crypto/chelsio/chtls/chtls.h int chtls_disconnect(struct sock *sk, int flags);
sk                472 drivers/crypto/chelsio/chtls/chtls.h void chtls_shutdown(struct sock *sk, int how);
sk                473 drivers/crypto/chelsio/chtls/chtls.h void chtls_destroy_sock(struct sock *sk);
sk                474 drivers/crypto/chelsio/chtls/chtls.h int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
sk                475 drivers/crypto/chelsio/chtls/chtls.h int chtls_recvmsg(struct sock *sk, struct msghdr *msg,
sk                477 drivers/crypto/chelsio/chtls/chtls.h int chtls_sendpage(struct sock *sk, struct page *page,
sk                479 drivers/crypto/chelsio/chtls/chtls.h int send_tx_flowc_wr(struct sock *sk, int compl,
sk                481 drivers/crypto/chelsio/chtls/chtls.h void chtls_tcp_push(struct sock *sk, int flags);
sk                483 drivers/crypto/chelsio/chtls/chtls.h int chtls_set_tcb_tflag(struct sock *sk, unsigned int bit_pos, int val);
sk                485 drivers/crypto/chelsio/chtls/chtls.h void skb_entail(struct sock *sk, struct sk_buff *skb, int flags);
sk                487 drivers/crypto/chelsio/chtls/chtls.h void free_tls_keyid(struct sock *sk);
sk                 86 drivers/crypto/chelsio/chtls/chtls_cm.c 					    struct sock *sk)
sk                 90 drivers/crypto/chelsio/chtls/chtls_cm.c 	if (likely(!inet_sk(sk)->inet_rcv_saddr))
sk                 93 drivers/crypto/chelsio/chtls/chtls_cm.c 	ndev = ip_dev_find(&init_net, inet_sk(sk)->inet_rcv_saddr);
sk                102 drivers/crypto/chelsio/chtls/chtls_cm.c static void assign_rxopt(struct sock *sk, unsigned int opt)
sk                108 drivers/crypto/chelsio/chtls/chtls_cm.c 	csk = rcu_dereference_sk_user_data(sk);
sk                109 drivers/crypto/chelsio/chtls/chtls_cm.c 	tp = tcp_sk(sk);
sk                130 drivers/crypto/chelsio/chtls/chtls_cm.c static void chtls_purge_receive_queue(struct sock *sk)
sk                134 drivers/crypto/chelsio/chtls/chtls_cm.c 	while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
sk                140 drivers/crypto/chelsio/chtls/chtls_cm.c static void chtls_purge_write_queue(struct sock *sk)
sk                142 drivers/crypto/chelsio/chtls/chtls_cm.c 	struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
sk                146 drivers/crypto/chelsio/chtls/chtls_cm.c 		sk->sk_wmem_queued -= skb->truesize;
sk                151 drivers/crypto/chelsio/chtls/chtls_cm.c static void chtls_purge_recv_queue(struct sock *sk)
sk                153 drivers/crypto/chelsio/chtls/chtls_cm.c 	struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
sk                184 drivers/crypto/chelsio/chtls/chtls_cm.c static void chtls_send_abort(struct sock *sk, int mode, struct sk_buff *skb)
sk                190 drivers/crypto/chelsio/chtls/chtls_cm.c 	csk = rcu_dereference_sk_user_data(sk);
sk                191 drivers/crypto/chelsio/chtls/chtls_cm.c 	tp = tcp_sk(sk);
sk                203 drivers/crypto/chelsio/chtls/chtls_cm.c 	send_or_defer(sk, tp, skb, mode == CPL_ABORT_SEND_RST);
sk                206 drivers/crypto/chelsio/chtls/chtls_cm.c static void chtls_send_reset(struct sock *sk, int mode, struct sk_buff *skb)
sk                208 drivers/crypto/chelsio/chtls/chtls_cm.c 	struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
sk                212 drivers/crypto/chelsio/chtls/chtls_cm.c 		if (sk->sk_state == TCP_SYN_RECV)
sk                218 drivers/crypto/chelsio/chtls/chtls_cm.c 		struct tcp_sock *tp = tcp_sk(sk);
sk                220 drivers/crypto/chelsio/chtls/chtls_cm.c 		if (send_tx_flowc_wr(sk, 0, tp->snd_nxt, tp->rcv_nxt) < 0)
sk                226 drivers/crypto/chelsio/chtls/chtls_cm.c 	chtls_purge_write_queue(sk);
sk                229 drivers/crypto/chelsio/chtls/chtls_cm.c 	if (sk->sk_state != TCP_SYN_RECV)
sk                230 drivers/crypto/chelsio/chtls/chtls_cm.c 		chtls_send_abort(sk, mode, skb);
sk                239 drivers/crypto/chelsio/chtls/chtls_cm.c static void release_tcp_port(struct sock *sk)
sk                241 drivers/crypto/chelsio/chtls/chtls_cm.c 	if (inet_csk(sk)->icsk_bind_hash)
sk                242 drivers/crypto/chelsio/chtls/chtls_cm.c 		inet_put_port(sk);
sk                245 drivers/crypto/chelsio/chtls/chtls_cm.c static void tcp_uncork(struct sock *sk)
sk                247 drivers/crypto/chelsio/chtls/chtls_cm.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                251 drivers/crypto/chelsio/chtls/chtls_cm.c 		chtls_tcp_push(sk, 0);
sk                255 drivers/crypto/chelsio/chtls/chtls_cm.c static void chtls_close_conn(struct sock *sk)
sk                264 drivers/crypto/chelsio/chtls/chtls_cm.c 	csk = rcu_dereference_sk_user_data(sk);
sk                278 drivers/crypto/chelsio/chtls/chtls_cm.c 	tcp_uncork(sk);
sk                279 drivers/crypto/chelsio/chtls/chtls_cm.c 	skb_entail(sk, skb, ULPCB_FLAG_NO_HDR | ULPCB_FLAG_NO_APPEND);
sk                280 drivers/crypto/chelsio/chtls/chtls_cm.c 	if (sk->sk_state != TCP_SYN_SENT)
sk                289 drivers/crypto/chelsio/chtls/chtls_cm.c static int make_close_transition(struct sock *sk)
sk                291 drivers/crypto/chelsio/chtls/chtls_cm.c 	int next = (int)new_state[sk->sk_state];
sk                293 drivers/crypto/chelsio/chtls/chtls_cm.c 	tcp_set_state(sk, next & TCP_STATE_MASK);
sk                297 drivers/crypto/chelsio/chtls/chtls_cm.c void chtls_close(struct sock *sk, long timeout)
sk                302 drivers/crypto/chelsio/chtls/chtls_cm.c 	csk = rcu_dereference_sk_user_data(sk);
sk                304 drivers/crypto/chelsio/chtls/chtls_cm.c 	lock_sock(sk);
sk                305 drivers/crypto/chelsio/chtls/chtls_cm.c 	sk->sk_shutdown |= SHUTDOWN_MASK;
sk                307 drivers/crypto/chelsio/chtls/chtls_cm.c 	data_lost = skb_queue_len(&sk->sk_receive_queue);
sk                309 drivers/crypto/chelsio/chtls/chtls_cm.c 	chtls_purge_recv_queue(sk);
sk                310 drivers/crypto/chelsio/chtls/chtls_cm.c 	chtls_purge_receive_queue(sk);
sk                312 drivers/crypto/chelsio/chtls/chtls_cm.c 	if (sk->sk_state == TCP_CLOSE) {
sk                314 drivers/crypto/chelsio/chtls/chtls_cm.c 	} else if (data_lost || sk->sk_state == TCP_SYN_SENT) {
sk                315 drivers/crypto/chelsio/chtls/chtls_cm.c 		chtls_send_reset(sk, CPL_ABORT_SEND_RST, NULL);
sk                316 drivers/crypto/chelsio/chtls/chtls_cm.c 		release_tcp_port(sk);
sk                318 drivers/crypto/chelsio/chtls/chtls_cm.c 	} else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
sk                319 drivers/crypto/chelsio/chtls/chtls_cm.c 		sk->sk_prot->disconnect(sk, 0);
sk                320 drivers/crypto/chelsio/chtls/chtls_cm.c 	} else if (make_close_transition(sk)) {
sk                321 drivers/crypto/chelsio/chtls/chtls_cm.c 		chtls_close_conn(sk);
sk                325 drivers/crypto/chelsio/chtls/chtls_cm.c 		sk_stream_wait_close(sk, timeout);
sk                328 drivers/crypto/chelsio/chtls/chtls_cm.c 	prev_state = sk->sk_state;
sk                329 drivers/crypto/chelsio/chtls/chtls_cm.c 	sock_hold(sk);
sk                330 drivers/crypto/chelsio/chtls/chtls_cm.c 	sock_orphan(sk);
sk                332 drivers/crypto/chelsio/chtls/chtls_cm.c 	release_sock(sk);
sk                335 drivers/crypto/chelsio/chtls/chtls_cm.c 	bh_lock_sock(sk);
sk                337 drivers/crypto/chelsio/chtls/chtls_cm.c 	if (prev_state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
sk                340 drivers/crypto/chelsio/chtls/chtls_cm.c 	if (sk->sk_state == TCP_FIN_WAIT2 && tcp_sk(sk)->linger2 < 0 &&
sk                341 drivers/crypto/chelsio/chtls/chtls_cm.c 	    !csk_flag(sk, CSK_ABORT_SHUTDOWN)) {
sk                346 drivers/crypto/chelsio/chtls/chtls_cm.c 			chtls_send_reset(sk, CPL_ABORT_SEND_RST, skb);
sk                349 drivers/crypto/chelsio/chtls/chtls_cm.c 	if (sk->sk_state == TCP_CLOSE)
sk                350 drivers/crypto/chelsio/chtls/chtls_cm.c 		inet_csk_destroy_sock(sk);
sk                353 drivers/crypto/chelsio/chtls/chtls_cm.c 	bh_unlock_sock(sk);
sk                355 drivers/crypto/chelsio/chtls/chtls_cm.c 	sock_put(sk);
sk                361 drivers/crypto/chelsio/chtls/chtls_cm.c static int wait_for_states(struct sock *sk, unsigned int states)
sk                374 drivers/crypto/chelsio/chtls/chtls_cm.c 	if (!sk->sk_wq) {
sk                378 drivers/crypto/chelsio/chtls/chtls_cm.c 		RCU_INIT_POINTER(sk->sk_wq, &_sk_wq);
sk                381 drivers/crypto/chelsio/chtls/chtls_cm.c 	add_wait_queue(sk_sleep(sk), &wait);
sk                382 drivers/crypto/chelsio/chtls/chtls_cm.c 	while (!sk_in_state(sk, states)) {
sk                392 drivers/crypto/chelsio/chtls/chtls_cm.c 		release_sock(sk);
sk                393 drivers/crypto/chelsio/chtls/chtls_cm.c 		if (!sk_in_state(sk, states))
sk                396 drivers/crypto/chelsio/chtls/chtls_cm.c 		lock_sock(sk);
sk                398 drivers/crypto/chelsio/chtls/chtls_cm.c 	remove_wait_queue(sk_sleep(sk), &wait);
sk                400 drivers/crypto/chelsio/chtls/chtls_cm.c 	if (rcu_dereference(sk->sk_wq) == &_sk_wq)
sk                401 drivers/crypto/chelsio/chtls/chtls_cm.c 		sk->sk_wq = NULL;
sk                405 drivers/crypto/chelsio/chtls/chtls_cm.c int chtls_disconnect(struct sock *sk, int flags)
sk                410 drivers/crypto/chelsio/chtls/chtls_cm.c 	tp = tcp_sk(sk);
sk                411 drivers/crypto/chelsio/chtls/chtls_cm.c 	chtls_purge_recv_queue(sk);
sk                412 drivers/crypto/chelsio/chtls/chtls_cm.c 	chtls_purge_receive_queue(sk);
sk                413 drivers/crypto/chelsio/chtls/chtls_cm.c 	chtls_purge_write_queue(sk);
sk                415 drivers/crypto/chelsio/chtls/chtls_cm.c 	if (sk->sk_state != TCP_CLOSE) {
sk                416 drivers/crypto/chelsio/chtls/chtls_cm.c 		sk->sk_err = ECONNRESET;
sk                417 drivers/crypto/chelsio/chtls/chtls_cm.c 		chtls_send_reset(sk, CPL_ABORT_SEND_RST, NULL);
sk                418 drivers/crypto/chelsio/chtls/chtls_cm.c 		err = wait_for_states(sk, TCPF_CLOSE);
sk                422 drivers/crypto/chelsio/chtls/chtls_cm.c 	chtls_purge_recv_queue(sk);
sk                423 drivers/crypto/chelsio/chtls/chtls_cm.c 	chtls_purge_receive_queue(sk);
sk                425 drivers/crypto/chelsio/chtls/chtls_cm.c 	return tcp_disconnect(sk, flags);
sk                430 drivers/crypto/chelsio/chtls/chtls_cm.c void chtls_shutdown(struct sock *sk, int how)
sk                433 drivers/crypto/chelsio/chtls/chtls_cm.c 	    sk_in_state(sk, SHUTDOWN_ELIGIBLE_STATE) &&
sk                434 drivers/crypto/chelsio/chtls/chtls_cm.c 	    make_close_transition(sk))
sk                435 drivers/crypto/chelsio/chtls/chtls_cm.c 		chtls_close_conn(sk);
sk                438 drivers/crypto/chelsio/chtls/chtls_cm.c void chtls_destroy_sock(struct sock *sk)
sk                442 drivers/crypto/chelsio/chtls/chtls_cm.c 	csk = rcu_dereference_sk_user_data(sk);
sk                443 drivers/crypto/chelsio/chtls/chtls_cm.c 	chtls_purge_recv_queue(sk);
sk                445 drivers/crypto/chelsio/chtls/chtls_cm.c 	chtls_purge_write_queue(sk);
sk                446 drivers/crypto/chelsio/chtls/chtls_cm.c 	free_tls_keyid(sk);
sk                448 drivers/crypto/chelsio/chtls/chtls_cm.c 	sk->sk_prot = &tcp_prot;
sk                449 drivers/crypto/chelsio/chtls/chtls_cm.c 	sk->sk_prot->destroy(sk);
sk                476 drivers/crypto/chelsio/chtls/chtls_cm.c 			struct sock *child = req->sk;
sk                495 drivers/crypto/chelsio/chtls/chtls_cm.c static int listen_hashfn(const struct sock *sk)
sk                497 drivers/crypto/chelsio/chtls/chtls_cm.c 	return ((unsigned long)sk >> 10) & (LISTEN_INFO_HASH_SIZE - 1);
sk                501 drivers/crypto/chelsio/chtls/chtls_cm.c 					   struct sock *sk,
sk                507 drivers/crypto/chelsio/chtls/chtls_cm.c 		int key = listen_hashfn(sk);
sk                509 drivers/crypto/chelsio/chtls/chtls_cm.c 		p->sk = sk;
sk                520 drivers/crypto/chelsio/chtls/chtls_cm.c 			    struct sock *sk)
sk                526 drivers/crypto/chelsio/chtls/chtls_cm.c 	key = listen_hashfn(sk);
sk                530 drivers/crypto/chelsio/chtls/chtls_cm.c 		if (p->sk == sk) {
sk                539 drivers/crypto/chelsio/chtls/chtls_cm.c 			   struct sock *sk)
sk                545 drivers/crypto/chelsio/chtls/chtls_cm.c 	key = listen_hashfn(sk);
sk                550 drivers/crypto/chelsio/chtls/chtls_cm.c 		if (p->sk == sk) {
sk                582 drivers/crypto/chelsio/chtls/chtls_cm.c 		struct sock *child = csk->sk;
sk                596 drivers/crypto/chelsio/chtls/chtls_cm.c int chtls_listen_start(struct chtls_dev *cdev, struct sock *sk)
sk                605 drivers/crypto/chelsio/chtls/chtls_cm.c 	if (sk->sk_family != PF_INET)
sk                609 drivers/crypto/chelsio/chtls/chtls_cm.c 	ndev = chtls_ipv4_netdev(cdev, sk);
sk                619 drivers/crypto/chelsio/chtls/chtls_cm.c 	if (listen_hash_find(cdev, sk) >= 0)   /* already have it */
sk                627 drivers/crypto/chelsio/chtls/chtls_cm.c 	ctx->lsk = sk;
sk                632 drivers/crypto/chelsio/chtls/chtls_cm.c 	stid = cxgb4_alloc_stid(cdev->tids, sk->sk_family, ctx);
sk                636 drivers/crypto/chelsio/chtls/chtls_cm.c 	sock_hold(sk);
sk                637 drivers/crypto/chelsio/chtls/chtls_cm.c 	if (!listen_hash_add(cdev, sk, stid))
sk                641 drivers/crypto/chelsio/chtls/chtls_cm.c 				  inet_sk(sk)->inet_rcv_saddr,
sk                642 drivers/crypto/chelsio/chtls/chtls_cm.c 				  inet_sk(sk)->inet_sport, 0,
sk                650 drivers/crypto/chelsio/chtls/chtls_cm.c 	listen_hash_del(cdev, sk);
sk                652 drivers/crypto/chelsio/chtls/chtls_cm.c 	cxgb4_free_stid(cdev->tids, stid, sk->sk_family);
sk                653 drivers/crypto/chelsio/chtls/chtls_cm.c 	sock_put(sk);
sk                660 drivers/crypto/chelsio/chtls/chtls_cm.c void chtls_listen_stop(struct chtls_dev *cdev, struct sock *sk)
sk                665 drivers/crypto/chelsio/chtls/chtls_cm.c 	stid = listen_hash_del(cdev, sk);
sk                674 drivers/crypto/chelsio/chtls/chtls_cm.c 	chtls_disconnect_acceptq(sk);
sk                730 drivers/crypto/chelsio/chtls/chtls_cm.c static void chtls_purge_wr_queue(struct sock *sk)
sk                734 drivers/crypto/chelsio/chtls/chtls_cm.c 	while ((skb = dequeue_wr(sk)) != NULL)
sk                738 drivers/crypto/chelsio/chtls/chtls_cm.c static void chtls_release_resources(struct sock *sk)
sk                740 drivers/crypto/chelsio/chtls/chtls_cm.c 	struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
sk                753 drivers/crypto/chelsio/chtls/chtls_cm.c 		chtls_purge_wr_queue(sk);
sk                762 drivers/crypto/chelsio/chtls/chtls_cm.c 	cxgb4_remove_tid(tids, csk->port_id, tid, sk->sk_family);
sk                763 drivers/crypto/chelsio/chtls/chtls_cm.c 	sock_put(sk);
sk                766 drivers/crypto/chelsio/chtls/chtls_cm.c static void chtls_conn_done(struct sock *sk)
sk                768 drivers/crypto/chelsio/chtls/chtls_cm.c 	if (sock_flag(sk, SOCK_DEAD))
sk                769 drivers/crypto/chelsio/chtls/chtls_cm.c 		chtls_purge_receive_queue(sk);
sk                770 drivers/crypto/chelsio/chtls/chtls_cm.c 	sk_wakeup_sleepers(sk, 0);
sk                771 drivers/crypto/chelsio/chtls/chtls_cm.c 	tcp_done(sk);
sk                811 drivers/crypto/chelsio/chtls/chtls_cm.c 	pass_open_abort(skb->sk, lsk, skb);
sk                814 drivers/crypto/chelsio/chtls/chtls_cm.c static void chtls_pass_open_arp_failure(struct sock *sk,
sk                823 drivers/crypto/chelsio/chtls/chtls_cm.c 	csk = rcu_dereference_sk_user_data(sk);
sk                831 drivers/crypto/chelsio/chtls/chtls_cm.c 	if (csk_flag(sk, CSK_ABORT_RPL_PENDING)) {
sk                842 drivers/crypto/chelsio/chtls/chtls_cm.c 		pass_open_abort(sk, parent, skb);
sk                853 drivers/crypto/chelsio/chtls/chtls_cm.c 	struct sock *sk = (struct sock *)handle;
sk                855 drivers/crypto/chelsio/chtls/chtls_cm.c 	sock_hold(sk);
sk                856 drivers/crypto/chelsio/chtls/chtls_cm.c 	process_cpl_msg(chtls_pass_open_arp_failure, sk, skb);
sk                857 drivers/crypto/chelsio/chtls/chtls_cm.c 	sock_put(sk);
sk                871 drivers/crypto/chelsio/chtls/chtls_cm.c 	struct sock *sk;
sk                874 drivers/crypto/chelsio/chtls/chtls_cm.c 	sk = csk->sk;
sk                875 drivers/crypto/chelsio/chtls/chtls_cm.c 	dst = __sk_dst_get(sk);
sk                877 drivers/crypto/chelsio/chtls/chtls_cm.c 	tp = tcp_sk(sk);
sk                898 drivers/crypto/chelsio/chtls/chtls_cm.c 	inet_csk(sk)->icsk_pmtu_cookie = pmtu;
sk                929 drivers/crypto/chelsio/chtls/chtls_cm.c 	struct sock *sk;
sk                933 drivers/crypto/chelsio/chtls/chtls_cm.c 	sk = skb->sk;
sk                934 drivers/crypto/chelsio/chtls/chtls_cm.c 	tp = tcp_sk(sk);
sk                935 drivers/crypto/chelsio/chtls/chtls_cm.c 	csk = sk->sk_user_data;
sk                945 drivers/crypto/chelsio/chtls/chtls_cm.c 	csk->mtu_idx = chtls_select_mss(csk, dst_mtu(__sk_dst_get(sk)),
sk                980 drivers/crypto/chelsio/chtls/chtls_cm.c 	t4_set_arp_err_handler(skb, sk, chtls_accept_rpl_arp_failure);
sk                992 drivers/crypto/chelsio/chtls/chtls_cm.c static int chtls_backlog_rcv(struct sock *sk, struct sk_buff *skb)
sk                998 drivers/crypto/chelsio/chtls/chtls_cm.c 	BLOG_SKB_CB(skb)->backlog_rcv(sk, skb);
sk               1080 drivers/crypto/chelsio/chtls/chtls_cm.c 	csk->sk = newsk;
sk               1135 drivers/crypto/chelsio/chtls/chtls_cm.c static int chtls_get_module(struct sock *sk)
sk               1137 drivers/crypto/chelsio/chtls/chtls_cm.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk               1145 drivers/crypto/chelsio/chtls/chtls_cm.c static void chtls_pass_accept_request(struct sock *sk,
sk               1182 drivers/crypto/chelsio/chtls/chtls_cm.c 		cxgb4_remove_tid(cdev->tids, 0, tid, sk->sk_family);
sk               1187 drivers/crypto/chelsio/chtls/chtls_cm.c 	if (sk->sk_state != TCP_LISTEN)
sk               1190 drivers/crypto/chelsio/chtls/chtls_cm.c 	if (inet_csk_reqsk_queue_is_full(sk))
sk               1193 drivers/crypto/chelsio/chtls/chtls_cm.c 	if (sk_acceptq_is_full(sk))
sk               1196 drivers/crypto/chelsio/chtls/chtls_cm.c 	oreq = inet_reqsk_alloc(&chtls_rsk_ops, sk, true);
sk               1228 drivers/crypto/chelsio/chtls/chtls_cm.c 	    sock_net(sk)->ipv4.sysctl_tcp_window_scaling) {
sk               1232 drivers/crypto/chelsio/chtls/chtls_cm.c 	inet_rsk(oreq)->ir_iif = sk->sk_bound_dev_if;
sk               1236 drivers/crypto/chelsio/chtls/chtls_cm.c 		ecn_ok = sock_net(sk)->ipv4.sysctl_tcp_ecn;
sk               1237 drivers/crypto/chelsio/chtls/chtls_cm.c 		if ((!ect && ecn_ok) || tcp_ca_needs_ecn(sk))
sk               1241 drivers/crypto/chelsio/chtls/chtls_cm.c 	newsk = chtls_recv_sock(sk, oreq, network_hdr, req, cdev);
sk               1247 drivers/crypto/chelsio/chtls/chtls_cm.c 	inet_csk_reqsk_queue_added(sk);
sk               1248 drivers/crypto/chelsio/chtls/chtls_cm.c 	reply_skb->sk = newsk;
sk               1305 drivers/crypto/chelsio/chtls/chtls_cm.c static void make_established(struct sock *sk, u32 snd_isn, unsigned int opt)
sk               1307 drivers/crypto/chelsio/chtls/chtls_cm.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               1313 drivers/crypto/chelsio/chtls/chtls_cm.c 	inet_sk(sk)->inet_id = prandom_u32();
sk               1314 drivers/crypto/chelsio/chtls/chtls_cm.c 	assign_rxopt(sk, opt);
sk               1320 drivers/crypto/chelsio/chtls/chtls_cm.c 	tcp_set_state(sk, TCP_ESTABLISHED);
sk               1323 drivers/crypto/chelsio/chtls/chtls_cm.c static void chtls_abort_conn(struct sock *sk, struct sk_buff *skb)
sk               1329 drivers/crypto/chelsio/chtls/chtls_cm.c 		chtls_send_reset(sk, CPL_ABORT_SEND_RST, abort_skb);
sk               1342 drivers/crypto/chelsio/chtls/chtls_cm.c 		struct sock *sk = reap_list;
sk               1343 drivers/crypto/chelsio/chtls/chtls_cm.c 		struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
sk               1348 drivers/crypto/chelsio/chtls/chtls_cm.c 		sock_hold(sk);
sk               1350 drivers/crypto/chelsio/chtls/chtls_cm.c 		bh_lock_sock(sk);
sk               1351 drivers/crypto/chelsio/chtls/chtls_cm.c 		chtls_abort_conn(sk, NULL);
sk               1352 drivers/crypto/chelsio/chtls/chtls_cm.c 		sock_orphan(sk);
sk               1353 drivers/crypto/chelsio/chtls/chtls_cm.c 		if (sk->sk_state == TCP_CLOSE)
sk               1354 drivers/crypto/chelsio/chtls/chtls_cm.c 			inet_csk_destroy_sock(sk);
sk               1355 drivers/crypto/chelsio/chtls/chtls_cm.c 		bh_unlock_sock(sk);
sk               1356 drivers/crypto/chelsio/chtls/chtls_cm.c 		sock_put(sk);
sk               1364 drivers/crypto/chelsio/chtls/chtls_cm.c static void add_to_reap_list(struct sock *sk)
sk               1366 drivers/crypto/chelsio/chtls/chtls_cm.c 	struct chtls_sock *csk = sk->sk_user_data;
sk               1369 drivers/crypto/chelsio/chtls/chtls_cm.c 	bh_lock_sock(sk);
sk               1370 drivers/crypto/chelsio/chtls/chtls_cm.c 	release_tcp_port(sk); /* release the port immediately */
sk               1374 drivers/crypto/chelsio/chtls/chtls_cm.c 	reap_list = sk;
sk               1378 drivers/crypto/chelsio/chtls/chtls_cm.c 	bh_unlock_sock(sk);
sk               1410 drivers/crypto/chelsio/chtls/chtls_cm.c 	struct sock *child = skb->sk;
sk               1412 drivers/crypto/chelsio/chtls/chtls_cm.c 	skb->sk = NULL;
sk               1421 drivers/crypto/chelsio/chtls/chtls_cm.c 	struct sock *lsk, *sk;
sk               1425 drivers/crypto/chelsio/chtls/chtls_cm.c 	sk = lookup_tid(cdev->tids, hwtid);
sk               1426 drivers/crypto/chelsio/chtls/chtls_cm.c 	if (!sk)
sk               1429 drivers/crypto/chelsio/chtls/chtls_cm.c 	bh_lock_sock(sk);
sk               1430 drivers/crypto/chelsio/chtls/chtls_cm.c 	if (unlikely(sock_owned_by_user(sk))) {
sk               1436 drivers/crypto/chelsio/chtls/chtls_cm.c 		csk = sk->sk_user_data;
sk               1440 drivers/crypto/chelsio/chtls/chtls_cm.c 		make_established(sk, ntohl(req->snd_isn), ntohs(req->tcp_opt));
sk               1442 drivers/crypto/chelsio/chtls/chtls_cm.c 		sk->sk_state_change(sk);
sk               1443 drivers/crypto/chelsio/chtls/chtls_cm.c 		if (unlikely(sk->sk_socket))
sk               1444 drivers/crypto/chelsio/chtls/chtls_cm.c 			sk_wake_async(sk, 0, POLL_OUT);
sk               1459 drivers/crypto/chelsio/chtls/chtls_cm.c 			add_pass_open_to_parent(sk, lsk, cdev);
sk               1461 drivers/crypto/chelsio/chtls/chtls_cm.c 			skb->sk = sk;
sk               1470 drivers/crypto/chelsio/chtls/chtls_cm.c 	bh_unlock_sock(sk);
sk               1477 drivers/crypto/chelsio/chtls/chtls_cm.c static void handle_urg_ptr(struct sock *sk, u32 urg_seq)
sk               1479 drivers/crypto/chelsio/chtls/chtls_cm.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               1485 drivers/crypto/chelsio/chtls/chtls_cm.c 	sk_send_sigurg(sk);
sk               1487 drivers/crypto/chelsio/chtls/chtls_cm.c 	    !sock_flag(sk, SOCK_URGINLINE) &&
sk               1489 drivers/crypto/chelsio/chtls/chtls_cm.c 		struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
sk               1493 drivers/crypto/chelsio/chtls/chtls_cm.c 			chtls_free_skb(sk, skb);
sk               1502 drivers/crypto/chelsio/chtls/chtls_cm.c 	struct sock *sk = csk->sk;
sk               1504 drivers/crypto/chelsio/chtls/chtls_cm.c 	if (unlikely(sk->sk_user_data &&
sk               1513 drivers/crypto/chelsio/chtls/chtls_cm.c static void handle_excess_rx(struct sock *sk, struct sk_buff *skb)
sk               1515 drivers/crypto/chelsio/chtls/chtls_cm.c 	if (!csk_flag(sk, CSK_ABORT_SHUTDOWN))
sk               1516 drivers/crypto/chelsio/chtls/chtls_cm.c 		chtls_abort_conn(sk, skb);
sk               1521 drivers/crypto/chelsio/chtls/chtls_cm.c static void chtls_recv_data(struct sock *sk, struct sk_buff *skb)
sk               1527 drivers/crypto/chelsio/chtls/chtls_cm.c 	csk = rcu_dereference_sk_user_data(sk);
sk               1528 drivers/crypto/chelsio/chtls/chtls_cm.c 	tp = tcp_sk(sk);
sk               1530 drivers/crypto/chelsio/chtls/chtls_cm.c 	if (unlikely(sk->sk_shutdown & RCV_SHUTDOWN)) {
sk               1531 drivers/crypto/chelsio/chtls/chtls_cm.c 		handle_excess_rx(sk, skb);
sk               1545 drivers/crypto/chelsio/chtls/chtls_cm.c 		handle_urg_ptr(sk, tp->rcv_nxt + ntohs(hdr->urg));
sk               1559 drivers/crypto/chelsio/chtls/chtls_cm.c 	__skb_queue_tail(&sk->sk_receive_queue, skb);
sk               1561 drivers/crypto/chelsio/chtls/chtls_cm.c 	if (!sock_flag(sk, SOCK_DEAD)) {
sk               1563 drivers/crypto/chelsio/chtls/chtls_cm.c 		sk->sk_data_ready(sk);
sk               1571 drivers/crypto/chelsio/chtls/chtls_cm.c 	struct sock *sk;
sk               1573 drivers/crypto/chelsio/chtls/chtls_cm.c 	sk = lookup_tid(cdev->tids, hwtid);
sk               1574 drivers/crypto/chelsio/chtls/chtls_cm.c 	if (unlikely(!sk)) {
sk               1579 drivers/crypto/chelsio/chtls/chtls_cm.c 	process_cpl_msg(chtls_recv_data, sk, skb);
sk               1583 drivers/crypto/chelsio/chtls/chtls_cm.c static void chtls_recv_pdu(struct sock *sk, struct sk_buff *skb)
sk               1590 drivers/crypto/chelsio/chtls/chtls_cm.c 	csk = rcu_dereference_sk_user_data(sk);
sk               1592 drivers/crypto/chelsio/chtls/chtls_cm.c 	tp = tcp_sk(sk);
sk               1594 drivers/crypto/chelsio/chtls/chtls_cm.c 	if (unlikely(sk->sk_shutdown & RCV_SHUTDOWN)) {
sk               1595 drivers/crypto/chelsio/chtls/chtls_cm.c 		handle_excess_rx(sk, skb);
sk               1623 drivers/crypto/chelsio/chtls/chtls_cm.c 	struct sock *sk;
sk               1625 drivers/crypto/chelsio/chtls/chtls_cm.c 	sk = lookup_tid(cdev->tids, hwtid);
sk               1626 drivers/crypto/chelsio/chtls/chtls_cm.c 	if (unlikely(!sk)) {
sk               1631 drivers/crypto/chelsio/chtls/chtls_cm.c 	process_cpl_msg(chtls_recv_pdu, sk, skb);
sk               1643 drivers/crypto/chelsio/chtls/chtls_cm.c static void chtls_rx_hdr(struct sock *sk, struct sk_buff *skb)
sk               1653 drivers/crypto/chelsio/chtls/chtls_cm.c 	csk = rcu_dereference_sk_user_data(sk);
sk               1655 drivers/crypto/chelsio/chtls/chtls_cm.c 	tp = tcp_sk(sk);
sk               1674 drivers/crypto/chelsio/chtls/chtls_cm.c 		__skb_queue_tail(&sk->sk_receive_queue, skb);
sk               1678 drivers/crypto/chelsio/chtls/chtls_cm.c 		__skb_queue_tail(&sk->sk_receive_queue, skb);
sk               1679 drivers/crypto/chelsio/chtls/chtls_cm.c 		__skb_queue_tail(&sk->sk_receive_queue, skb_rec);
sk               1682 drivers/crypto/chelsio/chtls/chtls_cm.c 	if (!sock_flag(sk, SOCK_DEAD)) {
sk               1684 drivers/crypto/chelsio/chtls/chtls_cm.c 		sk->sk_data_ready(sk);
sk               1692 drivers/crypto/chelsio/chtls/chtls_cm.c 	struct sock *sk;
sk               1694 drivers/crypto/chelsio/chtls/chtls_cm.c 	sk = lookup_tid(cdev->tids, hwtid);
sk               1695 drivers/crypto/chelsio/chtls/chtls_cm.c 	if (unlikely(!sk)) {
sk               1700 drivers/crypto/chelsio/chtls/chtls_cm.c 	process_cpl_msg(chtls_rx_hdr, sk, skb);
sk               1705 drivers/crypto/chelsio/chtls/chtls_cm.c static void chtls_timewait(struct sock *sk)
sk               1707 drivers/crypto/chelsio/chtls/chtls_cm.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               1712 drivers/crypto/chelsio/chtls/chtls_cm.c 	tcp_time_wait(sk, TCP_TIME_WAIT, 0);
sk               1715 drivers/crypto/chelsio/chtls/chtls_cm.c static void chtls_peer_close(struct sock *sk, struct sk_buff *skb)
sk               1717 drivers/crypto/chelsio/chtls/chtls_cm.c 	struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
sk               1719 drivers/crypto/chelsio/chtls/chtls_cm.c 	sk->sk_shutdown |= RCV_SHUTDOWN;
sk               1720 drivers/crypto/chelsio/chtls/chtls_cm.c 	sock_set_flag(sk, SOCK_DONE);
sk               1722 drivers/crypto/chelsio/chtls/chtls_cm.c 	switch (sk->sk_state) {
sk               1725 drivers/crypto/chelsio/chtls/chtls_cm.c 		tcp_set_state(sk, TCP_CLOSE_WAIT);
sk               1728 drivers/crypto/chelsio/chtls/chtls_cm.c 		tcp_set_state(sk, TCP_CLOSING);
sk               1731 drivers/crypto/chelsio/chtls/chtls_cm.c 		chtls_release_resources(sk);
sk               1733 drivers/crypto/chelsio/chtls/chtls_cm.c 			chtls_conn_done(sk);
sk               1735 drivers/crypto/chelsio/chtls/chtls_cm.c 			chtls_timewait(sk);
sk               1738 drivers/crypto/chelsio/chtls/chtls_cm.c 		pr_info("cpl_peer_close in bad state %d\n", sk->sk_state);
sk               1741 drivers/crypto/chelsio/chtls/chtls_cm.c 	if (!sock_flag(sk, SOCK_DEAD)) {
sk               1742 drivers/crypto/chelsio/chtls/chtls_cm.c 		sk->sk_state_change(sk);
sk               1745 drivers/crypto/chelsio/chtls/chtls_cm.c 		if ((sk->sk_shutdown & SEND_SHUTDOWN) ||
sk               1746 drivers/crypto/chelsio/chtls/chtls_cm.c 		    sk->sk_state == TCP_CLOSE)
sk               1747 drivers/crypto/chelsio/chtls/chtls_cm.c 			sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
sk               1749 drivers/crypto/chelsio/chtls/chtls_cm.c 			sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
sk               1754 drivers/crypto/chelsio/chtls/chtls_cm.c static void chtls_close_con_rpl(struct sock *sk, struct sk_buff *skb)
sk               1760 drivers/crypto/chelsio/chtls/chtls_cm.c 	csk = rcu_dereference_sk_user_data(sk);
sk               1761 drivers/crypto/chelsio/chtls/chtls_cm.c 	tp = tcp_sk(sk);
sk               1765 drivers/crypto/chelsio/chtls/chtls_cm.c 	switch (sk->sk_state) {
sk               1767 drivers/crypto/chelsio/chtls/chtls_cm.c 		chtls_release_resources(sk);
sk               1769 drivers/crypto/chelsio/chtls/chtls_cm.c 			chtls_conn_done(sk);
sk               1771 drivers/crypto/chelsio/chtls/chtls_cm.c 			chtls_timewait(sk);
sk               1774 drivers/crypto/chelsio/chtls/chtls_cm.c 		chtls_release_resources(sk);
sk               1775 drivers/crypto/chelsio/chtls/chtls_cm.c 		chtls_conn_done(sk);
sk               1778 drivers/crypto/chelsio/chtls/chtls_cm.c 		tcp_set_state(sk, TCP_FIN_WAIT2);
sk               1779 drivers/crypto/chelsio/chtls/chtls_cm.c 		sk->sk_shutdown |= SEND_SHUTDOWN;
sk               1781 drivers/crypto/chelsio/chtls/chtls_cm.c 		if (!sock_flag(sk, SOCK_DEAD))
sk               1782 drivers/crypto/chelsio/chtls/chtls_cm.c 			sk->sk_state_change(sk);
sk               1783 drivers/crypto/chelsio/chtls/chtls_cm.c 		else if (tcp_sk(sk)->linger2 < 0 &&
sk               1785 drivers/crypto/chelsio/chtls/chtls_cm.c 			chtls_abort_conn(sk, skb);
sk               1788 drivers/crypto/chelsio/chtls/chtls_cm.c 		pr_info("close_con_rpl in bad state %d\n", sk->sk_state);
sk               1832 drivers/crypto/chelsio/chtls/chtls_cm.c static void send_abort_rpl(struct sock *sk, struct sk_buff *skb,
sk               1839 drivers/crypto/chelsio/chtls/chtls_cm.c 	csk = rcu_dereference_sk_user_data(sk);
sk               1857 drivers/crypto/chelsio/chtls/chtls_cm.c 		if (e && sk->sk_state != TCP_SYN_RECV) {
sk               1879 drivers/crypto/chelsio/chtls/chtls_cm.c static void chtls_send_abort_rpl(struct sock *sk, struct sk_buff *skb,
sk               1888 drivers/crypto/chelsio/chtls/chtls_cm.c 	csk = rcu_dereference_sk_user_data(sk);
sk               1903 drivers/crypto/chelsio/chtls/chtls_cm.c 		if (e && sk->sk_state != TCP_SYN_RECV) {
sk               1922 drivers/crypto/chelsio/chtls/chtls_cm.c 	child = skb->sk;
sk               1926 drivers/crypto/chelsio/chtls/chtls_cm.c 	skb->sk	= NULL;
sk               1932 drivers/crypto/chelsio/chtls/chtls_cm.c static int abort_syn_rcv(struct sock *sk, struct sk_buff *skb)
sk               1941 drivers/crypto/chelsio/chtls/chtls_cm.c 	csk = sk->sk_user_data;
sk               1959 drivers/crypto/chelsio/chtls/chtls_cm.c 		do_abort_syn_rcv(sk, psk);
sk               1960 drivers/crypto/chelsio/chtls/chtls_cm.c 		send_abort_rpl(sk, skb, cdev, CPL_ABORT_NO_RST, queue);
sk               1962 drivers/crypto/chelsio/chtls/chtls_cm.c 		skb->sk = sk;
sk               1970 drivers/crypto/chelsio/chtls/chtls_cm.c static void chtls_abort_req_rss(struct sock *sk, struct sk_buff *skb)
sk               1973 drivers/crypto/chelsio/chtls/chtls_cm.c 	struct chtls_sock *csk = sk->sk_user_data;
sk               1978 drivers/crypto/chelsio/chtls/chtls_cm.c 		if (sk->sk_state == TCP_SYN_RECV)
sk               1979 drivers/crypto/chelsio/chtls/chtls_cm.c 			chtls_set_tcb_tflag(sk, 0, 0);
sk               1989 drivers/crypto/chelsio/chtls/chtls_cm.c 		struct tcp_sock *tp = tcp_sk(sk);
sk               1991 drivers/crypto/chelsio/chtls/chtls_cm.c 		if (send_tx_flowc_wr(sk, 0, tp->snd_nxt, tp->rcv_nxt) < 0)
sk               1999 drivers/crypto/chelsio/chtls/chtls_cm.c 		sk->sk_err = ETIMEDOUT;
sk               2001 drivers/crypto/chelsio/chtls/chtls_cm.c 		if (!sock_flag(sk, SOCK_DEAD))
sk               2002 drivers/crypto/chelsio/chtls/chtls_cm.c 			sk->sk_error_report(sk);
sk               2004 drivers/crypto/chelsio/chtls/chtls_cm.c 		if (sk->sk_state == TCP_SYN_RECV && !abort_syn_rcv(sk, skb))
sk               2007 drivers/crypto/chelsio/chtls/chtls_cm.c 		chtls_release_resources(sk);
sk               2008 drivers/crypto/chelsio/chtls/chtls_cm.c 		chtls_conn_done(sk);
sk               2011 drivers/crypto/chelsio/chtls/chtls_cm.c 	chtls_send_abort_rpl(sk, skb, csk->cdev, rst_status, queue);
sk               2014 drivers/crypto/chelsio/chtls/chtls_cm.c static void chtls_abort_rpl_rss(struct sock *sk, struct sk_buff *skb)
sk               2020 drivers/crypto/chelsio/chtls/chtls_cm.c 	csk = rcu_dereference_sk_user_data(sk);
sk               2026 drivers/crypto/chelsio/chtls/chtls_cm.c 			if (sk->sk_state == TCP_SYN_SENT) {
sk               2030 drivers/crypto/chelsio/chtls/chtls_cm.c 						 sk->sk_family);
sk               2031 drivers/crypto/chelsio/chtls/chtls_cm.c 				sock_put(sk);
sk               2033 drivers/crypto/chelsio/chtls/chtls_cm.c 			chtls_release_resources(sk);
sk               2034 drivers/crypto/chelsio/chtls/chtls_cm.c 			chtls_conn_done(sk);
sk               2043 drivers/crypto/chelsio/chtls/chtls_cm.c 	void (*fn)(struct sock *sk, struct sk_buff *skb);
sk               2045 drivers/crypto/chelsio/chtls/chtls_cm.c 	struct sock *sk;
sk               2050 drivers/crypto/chelsio/chtls/chtls_cm.c 	sk = lookup_tid(cdev->tids, hwtid);
sk               2051 drivers/crypto/chelsio/chtls/chtls_cm.c 	if (!sk)
sk               2071 drivers/crypto/chelsio/chtls/chtls_cm.c 	process_cpl_msg(fn, sk, skb);
sk               2079 drivers/crypto/chelsio/chtls/chtls_cm.c static void chtls_rx_ack(struct sock *sk, struct sk_buff *skb)
sk               2082 drivers/crypto/chelsio/chtls/chtls_cm.c 	struct chtls_sock *csk = sk->sk_user_data;
sk               2083 drivers/crypto/chelsio/chtls/chtls_cm.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               2107 drivers/crypto/chelsio/chtls/chtls_cm.c 		dequeue_wr(sk);
sk               2134 drivers/crypto/chelsio/chtls/chtls_cm.c 		sk->sk_write_space(sk);
sk               2143 drivers/crypto/chelsio/chtls/chtls_cm.c 	struct sock *sk;
sk               2145 drivers/crypto/chelsio/chtls/chtls_cm.c 	sk = lookup_tid(cdev->tids, hwtid);
sk               2146 drivers/crypto/chelsio/chtls/chtls_cm.c 	if (unlikely(!sk)) {
sk               2150 drivers/crypto/chelsio/chtls/chtls_cm.c 	process_cpl_msg(chtls_rx_ack, sk, skb);
sk                 91 drivers/crypto/chelsio/chtls/chtls_cm.h #define ACCEPT_QUEUE(sk) (&inet_csk(sk)->icsk_accept_queue.rskq_accept_head)
sk                100 drivers/crypto/chelsio/chtls/chtls_cm.h #define INC_ORPHAN_COUNT(sk) percpu_counter_inc((sk)->sk_prot->orphan_count)
sk                112 drivers/crypto/chelsio/chtls/chtls_cm.h static inline unsigned int sk_in_state(const struct sock *sk,
sk                115 drivers/crypto/chelsio/chtls/chtls_cm.h 	return states & (1 << sk->sk_state);
sk                145 drivers/crypto/chelsio/chtls/chtls_cm.h static inline void sk_wakeup_sleepers(struct sock *sk, bool interruptable)
sk                150 drivers/crypto/chelsio/chtls/chtls_cm.h 	wq = rcu_dereference(sk->sk_wq);
sk                153 drivers/crypto/chelsio/chtls/chtls_cm.h 			wake_up_interruptible(sk_sleep(sk));
sk                155 drivers/crypto/chelsio/chtls/chtls_cm.h 			wake_up_all(sk_sleep(sk));
sk                174 drivers/crypto/chelsio/chtls/chtls_cm.h static inline void chtls_free_skb(struct sock *sk, struct sk_buff *skb)
sk                177 drivers/crypto/chelsio/chtls/chtls_cm.h 	__skb_unlink(skb, &sk->sk_receive_queue);
sk                181 drivers/crypto/chelsio/chtls/chtls_cm.h static inline void chtls_kfree_skb(struct sock *sk, struct sk_buff *skb)
sk                184 drivers/crypto/chelsio/chtls/chtls_cm.h 	__skb_unlink(skb, &sk->sk_receive_queue);
sk                207 drivers/crypto/chelsio/chtls/chtls_cm.h static inline struct sk_buff *dequeue_wr(struct sock *sk)
sk                209 drivers/crypto/chelsio/chtls/chtls_cm.h 	struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
sk                 41 drivers/crypto/chelsio/chtls/chtls_hw.c static void __set_tcb_field(struct sock *sk, struct sk_buff *skb, u16 word,
sk                 50 drivers/crypto/chelsio/chtls/chtls_hw.c 	csk = rcu_dereference_sk_user_data(sk);
sk                 61 drivers/crypto/chelsio/chtls/chtls_hw.c static int chtls_set_tcb_field(struct sock *sk, u16 word, u64 mask, u64 val)
sk                 78 drivers/crypto/chelsio/chtls/chtls_hw.c 	csk = rcu_dereference_sk_user_data(sk);
sk                 80 drivers/crypto/chelsio/chtls/chtls_hw.c 	__set_tcb_field(sk, skb, word, mask, val, 0, 1);
sk                 94 drivers/crypto/chelsio/chtls/chtls_hw.c int chtls_set_tcb_tflag(struct sock *sk, unsigned int bit_pos, int val)
sk                 96 drivers/crypto/chelsio/chtls/chtls_hw.c 	return chtls_set_tcb_field(sk, 1, 1ULL << bit_pos,
sk                100 drivers/crypto/chelsio/chtls/chtls_hw.c static int chtls_set_tcb_keyid(struct sock *sk, int keyid)
sk                102 drivers/crypto/chelsio/chtls/chtls_hw.c 	return chtls_set_tcb_field(sk, 31, 0xFFFFFFFFULL, keyid);
sk                105 drivers/crypto/chelsio/chtls/chtls_hw.c static int chtls_set_tcb_seqno(struct sock *sk)
sk                107 drivers/crypto/chelsio/chtls/chtls_hw.c 	return chtls_set_tcb_field(sk, 28, ~0ULL, 0);
sk                110 drivers/crypto/chelsio/chtls/chtls_hw.c static int chtls_set_tcb_quiesce(struct sock *sk, int val)
sk                112 drivers/crypto/chelsio/chtls/chtls_hw.c 	return chtls_set_tcb_field(sk, 1, (1ULL << TF_RX_QUIESCE_S),
sk                164 drivers/crypto/chelsio/chtls/chtls_hw.c void free_tls_keyid(struct sock *sk)
sk                166 drivers/crypto/chelsio/chtls/chtls_hw.c 	struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
sk                298 drivers/crypto/chelsio/chtls/chtls_hw.c 	struct sock *sk;
sk                304 drivers/crypto/chelsio/chtls/chtls_hw.c 	sk = csk->sk;
sk                312 drivers/crypto/chelsio/chtls/chtls_hw.c 		lock_sock(sk);
sk                315 drivers/crypto/chelsio/chtls/chtls_hw.c 		release_sock(sk);
sk                353 drivers/crypto/chelsio/chtls/chtls_hw.c 	lock_sock(sk);
sk                369 drivers/crypto/chelsio/chtls/chtls_hw.c 		ret = chtls_set_tcb_keyid(sk, keyid);
sk                372 drivers/crypto/chelsio/chtls/chtls_hw.c 		ret = chtls_set_tcb_field(sk, 0,
sk                380 drivers/crypto/chelsio/chtls/chtls_hw.c 		ret = chtls_set_tcb_seqno(sk);
sk                383 drivers/crypto/chelsio/chtls/chtls_hw.c 		ret = chtls_set_tcb_quiesce(sk, 0);
sk                392 drivers/crypto/chelsio/chtls/chtls_hw.c 	release_sock(sk);
sk                395 drivers/crypto/chelsio/chtls/chtls_hw.c 	release_sock(sk);
sk                396 drivers/crypto/chelsio/chtls/chtls_hw.c 	free_tls_keyid(sk);
sk                 43 drivers/crypto/chelsio/chtls/chtls_io.c static int nos_ivs(struct sock *sk, unsigned int size)
sk                 45 drivers/crypto/chelsio/chtls/chtls_io.c 	struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
sk                 50 drivers/crypto/chelsio/chtls/chtls_io.c static int set_ivs_imm(struct sock *sk, const struct sk_buff *skb)
sk                 52 drivers/crypto/chelsio/chtls/chtls_io.c 	int ivs_size = nos_ivs(sk, skb->len) * CIPHER_BLOCK_SIZE;
sk                 64 drivers/crypto/chelsio/chtls/chtls_io.c static int max_ivs_size(struct sock *sk, int size)
sk                 66 drivers/crypto/chelsio/chtls/chtls_io.c 	return nos_ivs(sk, size) * CIPHER_BLOCK_SIZE;
sk                 69 drivers/crypto/chelsio/chtls/chtls_io.c static int ivs_size(struct sock *sk, const struct sk_buff *skb)
sk                 71 drivers/crypto/chelsio/chtls/chtls_io.c 	return set_ivs_imm(sk, skb) ? (nos_ivs(sk, skb->len) *
sk                 89 drivers/crypto/chelsio/chtls/chtls_io.c static struct sk_buff *create_flowc_wr_skb(struct sock *sk,
sk                 93 drivers/crypto/chelsio/chtls/chtls_io.c 	struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
sk                106 drivers/crypto/chelsio/chtls/chtls_io.c static int send_flowc_wr(struct sock *sk, struct fw_flowc_wr *flowc,
sk                109 drivers/crypto/chelsio/chtls/chtls_io.c 	struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
sk                110 drivers/crypto/chelsio/chtls/chtls_io.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                117 drivers/crypto/chelsio/chtls/chtls_io.c 	if (csk_flag(sk, CSK_TX_DATA_SENT)) {
sk                118 drivers/crypto/chelsio/chtls/chtls_io.c 		skb = create_flowc_wr_skb(sk, flowc, flowclen);
sk                122 drivers/crypto/chelsio/chtls/chtls_io.c 		skb_entail(sk, skb,
sk                132 drivers/crypto/chelsio/chtls/chtls_io.c 	skb = create_flowc_wr_skb(sk, flowc, flowclen);
sk                135 drivers/crypto/chelsio/chtls/chtls_io.c 	send_or_defer(sk, tp, skb, 0);
sk                159 drivers/crypto/chelsio/chtls/chtls_io.c int send_tx_flowc_wr(struct sock *sk, int compl,
sk                171 drivers/crypto/chelsio/chtls/chtls_io.c 	csk = rcu_dereference_sk_user_data(sk);
sk                172 drivers/crypto/chelsio/chtls/chtls_io.c 	tp = tcp_sk(sk);
sk                193 drivers/crypto/chelsio/chtls/chtls_io.c 	FLOWC_PARAM(TCPSTATE, tcp_state_to_flowc_state(sk->sk_state));
sk                215 drivers/crypto/chelsio/chtls/chtls_io.c 	return send_flowc_wr(sk, flowc, flowclen);
sk                219 drivers/crypto/chelsio/chtls/chtls_io.c static int tls_copy_ivs(struct sock *sk, struct sk_buff *skb)
sk                230 drivers/crypto/chelsio/chtls/chtls_io.c 	csk = rcu_dereference_sk_user_data(sk);
sk                232 drivers/crypto/chelsio/chtls/chtls_io.c 	number_of_ivs = nos_ivs(sk, skb->len);
sk                257 drivers/crypto/chelsio/chtls/chtls_io.c 		page = alloc_pages(sk->sk_allocation | __GFP_COMP, 0);
sk                276 drivers/crypto/chelsio/chtls/chtls_io.c static void tls_copy_tx_key(struct sock *sk, struct sk_buff *skb)
sk                286 drivers/crypto/chelsio/chtls/chtls_io.c 	csk = rcu_dereference_sk_user_data(sk);
sk                340 drivers/crypto/chelsio/chtls/chtls_io.c static void tls_tx_data_wr(struct sock *sk, struct sk_buff *skb,
sk                359 drivers/crypto/chelsio/chtls/chtls_io.c 	csk = rcu_dereference_sk_user_data(sk);
sk                392 drivers/crypto/chelsio/chtls/chtls_io.c 			((tcp_sk(sk)->nonagle & TCP_NAGLE_OFF) ? 0 :
sk                399 drivers/crypto/chelsio/chtls/chtls_io.c 			      TX_SHOVE_V((!csk_flag(sk, CSK_TX_MORE_DATA)) &&
sk                442 drivers/crypto/chelsio/chtls/chtls_io.c static int chtls_expansion_size(struct sock *sk, int data_len,
sk                446 drivers/crypto/chelsio/chtls/chtls_io.c 	struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
sk                479 drivers/crypto/chelsio/chtls/chtls_io.c static void make_tlstx_data_wr(struct sock *sk, struct sk_buff *skb,
sk                488 drivers/crypto/chelsio/chtls/chtls_io.c 	csk = rcu_dereference_sk_user_data(sk);
sk                491 drivers/crypto/chelsio/chtls/chtls_io.c 	expn_sz = chtls_expansion_size(sk, tls_len, 0, NULL);
sk                493 drivers/crypto/chelsio/chtls/chtls_io.c 		hws->expansion = chtls_expansion_size(sk,
sk                501 drivers/crypto/chelsio/chtls/chtls_io.c 	if (tls_copy_ivs(sk, skb))
sk                503 drivers/crypto/chelsio/chtls/chtls_io.c 	tls_copy_tx_key(sk, skb);
sk                504 drivers/crypto/chelsio/chtls/chtls_io.c 	tls_tx_data_wr(sk, skb, tls_len, tls_tx_imm, credits, expn_sz, pdus);
sk                508 drivers/crypto/chelsio/chtls/chtls_io.c static void make_tx_data_wr(struct sock *sk, struct sk_buff *skb,
sk                517 drivers/crypto/chelsio/chtls/chtls_io.c 	csk = rcu_dereference_sk_user_data(sk);
sk                530 drivers/crypto/chelsio/chtls/chtls_io.c 			((tcp_sk(sk)->nonagle & TCP_NAGLE_OFF) ? 0 :
sk                535 drivers/crypto/chelsio/chtls/chtls_io.c 			TX_SHOVE_V((!csk_flag(sk, CSK_TX_MORE_DATA)) &&
sk                547 drivers/crypto/chelsio/chtls/chtls_io.c 	wr_size += ivs_size(csk->sk, skb);
sk                602 drivers/crypto/chelsio/chtls/chtls_io.c 	struct sock *sk;
sk                606 drivers/crypto/chelsio/chtls/chtls_io.c 	sk = csk->sk;
sk                607 drivers/crypto/chelsio/chtls/chtls_io.c 	tp = tcp_sk(sk);
sk                609 drivers/crypto/chelsio/chtls/chtls_io.c 	if (unlikely(sk_in_state(sk, TCPF_SYN_SENT | TCPF_CLOSE)))
sk                612 drivers/crypto/chelsio/chtls/chtls_io.c 	if (unlikely(csk_flag(sk, CSK_ABORT_SHUTDOWN)))
sk                643 drivers/crypto/chelsio/chtls/chtls_io.c 			flowclen16 = send_tx_flowc_wr(sk, 1, tp->snd_nxt,
sk                679 drivers/crypto/chelsio/chtls/chtls_io.c 				make_tlstx_data_wr(sk, skb, tls_tx_imm,
sk                682 drivers/crypto/chelsio/chtls/chtls_io.c 				make_tx_data_wr(sk, skb, immdlen, len,
sk                709 drivers/crypto/chelsio/chtls/chtls_io.c 	sk->sk_wmem_queued -= total_size;
sk                728 drivers/crypto/chelsio/chtls/chtls_io.c static bool should_push(struct sock *sk)
sk                730 drivers/crypto/chelsio/chtls/chtls_io.c 	struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
sk                732 drivers/crypto/chelsio/chtls/chtls_io.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                760 drivers/crypto/chelsio/chtls/chtls_io.c static bool send_should_push(struct sock *sk, int flags)
sk                762 drivers/crypto/chelsio/chtls/chtls_io.c 	return should_push(sk) && !corked(tcp_sk(sk), flags);
sk                765 drivers/crypto/chelsio/chtls/chtls_io.c void chtls_tcp_push(struct sock *sk, int flags)
sk                767 drivers/crypto/chelsio/chtls/chtls_io.c 	struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
sk                772 drivers/crypto/chelsio/chtls/chtls_io.c 		struct tcp_sock *tp = tcp_sk(sk);
sk                785 drivers/crypto/chelsio/chtls/chtls_io.c 		     should_push(sk)))
sk                798 drivers/crypto/chelsio/chtls/chtls_io.c static int select_size(struct sock *sk, int io_len, int flags, int len)
sk                813 drivers/crypto/chelsio/chtls/chtls_io.c 	if (!send_should_push(sk, flags))
sk                819 drivers/crypto/chelsio/chtls/chtls_io.c void skb_entail(struct sock *sk, struct sk_buff *skb, int flags)
sk                821 drivers/crypto/chelsio/chtls/chtls_io.c 	struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
sk                822 drivers/crypto/chelsio/chtls/chtls_io.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                827 drivers/crypto/chelsio/chtls/chtls_io.c 	sk->sk_wmem_queued += skb->truesize;
sk                829 drivers/crypto/chelsio/chtls/chtls_io.c 	if (TCP_PAGE(sk) && TCP_OFF(sk)) {
sk                830 drivers/crypto/chelsio/chtls/chtls_io.c 		put_page(TCP_PAGE(sk));
sk                831 drivers/crypto/chelsio/chtls/chtls_io.c 		TCP_PAGE(sk) = NULL;
sk                832 drivers/crypto/chelsio/chtls/chtls_io.c 		TCP_OFF(sk) = 0;
sk                836 drivers/crypto/chelsio/chtls/chtls_io.c static struct sk_buff *get_tx_skb(struct sock *sk, int size)
sk                840 drivers/crypto/chelsio/chtls/chtls_io.c 	skb = alloc_skb(size + TX_HEADER_LEN, sk->sk_allocation);
sk                843 drivers/crypto/chelsio/chtls/chtls_io.c 		skb_entail(sk, skb, ULPCB_FLAG_NEED_HDR);
sk                849 drivers/crypto/chelsio/chtls/chtls_io.c static struct sk_buff *get_record_skb(struct sock *sk, int size, bool zcopy)
sk                851 drivers/crypto/chelsio/chtls/chtls_io.c 	struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
sk                855 drivers/crypto/chelsio/chtls/chtls_io.c 			KEY_ON_MEM_SZ + max_ivs_size(sk, size)),
sk                856 drivers/crypto/chelsio/chtls/chtls_io.c 			sk->sk_allocation);
sk                859 drivers/crypto/chelsio/chtls/chtls_io.c 			    KEY_ON_MEM_SZ + max_ivs_size(sk, size)));
sk                860 drivers/crypto/chelsio/chtls/chtls_io.c 		skb_entail(sk, skb, ULPCB_FLAG_NEED_HDR);
sk                877 drivers/crypto/chelsio/chtls/chtls_io.c static void push_frames_if_head(struct sock *sk)
sk                879 drivers/crypto/chelsio/chtls/chtls_io.c 	struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
sk                885 drivers/crypto/chelsio/chtls/chtls_io.c static int chtls_skb_copy_to_page_nocache(struct sock *sk,
sk                893 drivers/crypto/chelsio/chtls/chtls_io.c 	err = skb_do_copy_data_nocache(sk, skb, from, page_address(page) +
sk                901 drivers/crypto/chelsio/chtls/chtls_io.c 	sk->sk_wmem_queued   += copy;
sk                913 drivers/crypto/chelsio/chtls/chtls_io.c static int csk_mem_free(struct chtls_dev *cdev, struct sock *sk)
sk                915 drivers/crypto/chelsio/chtls/chtls_io.c 	return (cdev->max_host_sndbuf - sk->sk_wmem_queued);
sk                919 drivers/crypto/chelsio/chtls/chtls_io.c 			   struct sock *sk, long *timeo_p)
sk                929 drivers/crypto/chelsio/chtls/chtls_io.c 	if (csk_mem_free(cdev, sk)) {
sk                934 drivers/crypto/chelsio/chtls/chtls_io.c 	add_wait_queue(sk_sleep(sk), &wait);
sk                936 drivers/crypto/chelsio/chtls/chtls_io.c 		sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
sk                938 drivers/crypto/chelsio/chtls/chtls_io.c 		if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
sk                942 drivers/crypto/chelsio/chtls/chtls_io.c 				set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
sk                947 drivers/crypto/chelsio/chtls/chtls_io.c 		sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
sk                948 drivers/crypto/chelsio/chtls/chtls_io.c 		if (csk_mem_free(cdev, sk) && !vm_wait)
sk                951 drivers/crypto/chelsio/chtls/chtls_io.c 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
sk                952 drivers/crypto/chelsio/chtls/chtls_io.c 		sk->sk_write_pending++;
sk                953 drivers/crypto/chelsio/chtls/chtls_io.c 		sk_wait_event(sk, &current_timeo, sk->sk_err ||
sk                954 drivers/crypto/chelsio/chtls/chtls_io.c 			      (sk->sk_shutdown & SEND_SHUTDOWN) ||
sk                955 drivers/crypto/chelsio/chtls/chtls_io.c 			      (csk_mem_free(cdev, sk) && !vm_wait), &wait);
sk                956 drivers/crypto/chelsio/chtls/chtls_io.c 		sk->sk_write_pending--;
sk                971 drivers/crypto/chelsio/chtls/chtls_io.c 	remove_wait_queue(sk_sleep(sk), &wait);
sk                984 drivers/crypto/chelsio/chtls/chtls_io.c int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
sk                986 drivers/crypto/chelsio/chtls/chtls_io.c 	struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
sk                988 drivers/crypto/chelsio/chtls/chtls_io.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                995 drivers/crypto/chelsio/chtls/chtls_io.c 	lock_sock(sk);
sk                997 drivers/crypto/chelsio/chtls/chtls_io.c 	timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
sk                999 drivers/crypto/chelsio/chtls/chtls_io.c 	if (!sk_in_state(sk, TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
sk               1000 drivers/crypto/chelsio/chtls/chtls_io.c 		err = sk_stream_wait_connect(sk, &timeo);
sk               1005 drivers/crypto/chelsio/chtls/chtls_io.c 	sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
sk               1007 drivers/crypto/chelsio/chtls/chtls_io.c 	if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
sk               1021 drivers/crypto/chelsio/chtls/chtls_io.c 		if (!csk_mem_free(cdev, sk))
sk               1041 drivers/crypto/chelsio/chtls/chtls_io.c 				push_frames_if_head(sk);
sk               1045 drivers/crypto/chelsio/chtls/chtls_io.c 				skb = get_record_skb(sk,
sk               1046 drivers/crypto/chelsio/chtls/chtls_io.c 						     select_size(sk,
sk               1052 drivers/crypto/chelsio/chtls/chtls_io.c 				skb = get_tx_skb(sk,
sk               1053 drivers/crypto/chelsio/chtls/chtls_io.c 						 select_size(sk, size, flags,
sk               1069 drivers/crypto/chelsio/chtls/chtls_io.c 			err = skb_add_data_nocache(sk, skb,
sk               1075 drivers/crypto/chelsio/chtls/chtls_io.c 			struct page *page = TCP_PAGE(sk);
sk               1077 drivers/crypto/chelsio/chtls/chtls_io.c 			int off = TCP_OFF(sk);
sk               1094 drivers/crypto/chelsio/chtls/chtls_io.c 				TCP_PAGE(sk) = page = NULL;
sk               1099 drivers/crypto/chelsio/chtls/chtls_io.c 				gfp_t gfp = sk->sk_allocation;
sk               1124 drivers/crypto/chelsio/chtls/chtls_io.c 			err = chtls_skb_copy_to_page_nocache(sk, &msg->msg_iter,
sk               1128 drivers/crypto/chelsio/chtls/chtls_io.c 				if (!TCP_PAGE(sk)) {
sk               1129 drivers/crypto/chelsio/chtls/chtls_io.c 					TCP_PAGE(sk) = page;
sk               1130 drivers/crypto/chelsio/chtls/chtls_io.c 					TCP_OFF(sk) = 0;
sk               1144 drivers/crypto/chelsio/chtls/chtls_io.c 					TCP_PAGE(sk) = page;
sk               1146 drivers/crypto/chelsio/chtls/chtls_io.c 					TCP_PAGE(sk) = NULL;
sk               1149 drivers/crypto/chelsio/chtls/chtls_io.c 			TCP_OFF(sk) = off + copy;
sk               1161 drivers/crypto/chelsio/chtls/chtls_io.c 		    (sk_stream_wspace(sk) < sk_stream_min_wspace(sk)))
sk               1168 drivers/crypto/chelsio/chtls/chtls_io.c 			push_frames_if_head(sk);
sk               1171 drivers/crypto/chelsio/chtls/chtls_io.c 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
sk               1173 drivers/crypto/chelsio/chtls/chtls_io.c 		err = csk_wait_memory(cdev, sk, &timeo);
sk               1180 drivers/crypto/chelsio/chtls/chtls_io.c 		chtls_tcp_push(sk, flags);
sk               1182 drivers/crypto/chelsio/chtls/chtls_io.c 	release_sock(sk);
sk               1187 drivers/crypto/chelsio/chtls/chtls_io.c 		sk->sk_wmem_queued -= skb->truesize;
sk               1196 drivers/crypto/chelsio/chtls/chtls_io.c 	copied = sk_stream_error(sk, flags, err);
sk               1200 drivers/crypto/chelsio/chtls/chtls_io.c int chtls_sendpage(struct sock *sk, struct page *page,
sk               1209 drivers/crypto/chelsio/chtls/chtls_io.c 	tp = tcp_sk(sk);
sk               1211 drivers/crypto/chelsio/chtls/chtls_io.c 	csk = rcu_dereference_sk_user_data(sk);
sk               1213 drivers/crypto/chelsio/chtls/chtls_io.c 	timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
sk               1215 drivers/crypto/chelsio/chtls/chtls_io.c 	err = sk_stream_wait_connect(sk, &timeo);
sk               1216 drivers/crypto/chelsio/chtls/chtls_io.c 	if (!sk_in_state(sk, TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
sk               1230 drivers/crypto/chelsio/chtls/chtls_io.c 			if (!csk_mem_free(cdev, sk))
sk               1234 drivers/crypto/chelsio/chtls/chtls_io.c 				skb = get_record_skb(sk,
sk               1235 drivers/crypto/chelsio/chtls/chtls_io.c 						     select_size(sk, size,
sk               1240 drivers/crypto/chelsio/chtls/chtls_io.c 				skb = get_tx_skb(sk, 0);
sk               1257 drivers/crypto/chelsio/chtls/chtls_io.c 			push_frames_if_head(sk);
sk               1266 drivers/crypto/chelsio/chtls/chtls_io.c 		sk->sk_wmem_queued += copy;
sk               1273 drivers/crypto/chelsio/chtls/chtls_io.c 		    (sk_stream_wspace(sk) < sk_stream_min_wspace(sk)))
sk               1280 drivers/crypto/chelsio/chtls/chtls_io.c 			push_frames_if_head(sk);
sk               1283 drivers/crypto/chelsio/chtls/chtls_io.c 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
sk               1285 drivers/crypto/chelsio/chtls/chtls_io.c 		err = csk_wait_memory(cdev, sk, &timeo);
sk               1292 drivers/crypto/chelsio/chtls/chtls_io.c 		chtls_tcp_push(sk, flags);
sk               1294 drivers/crypto/chelsio/chtls/chtls_io.c 	release_sock(sk);
sk               1304 drivers/crypto/chelsio/chtls/chtls_io.c 	copied = sk_stream_error(sk, flags, err);
sk               1308 drivers/crypto/chelsio/chtls/chtls_io.c static void chtls_select_window(struct sock *sk)
sk               1310 drivers/crypto/chelsio/chtls/chtls_io.c 	struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
sk               1311 drivers/crypto/chelsio/chtls/chtls_io.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               1314 drivers/crypto/chelsio/chtls/chtls_io.c 	wnd = max_t(unsigned int, wnd, tcp_full_space(sk));
sk               1368 drivers/crypto/chelsio/chtls/chtls_io.c static void chtls_cleanup_rbuf(struct sock *sk, int copied)
sk               1370 drivers/crypto/chelsio/chtls/chtls_io.c 	struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
sk               1378 drivers/crypto/chelsio/chtls/chtls_io.c 	if (!sk_in_state(sk, CREDIT_RETURN_STATE))
sk               1381 drivers/crypto/chelsio/chtls/chtls_io.c 	chtls_select_window(sk);
sk               1382 drivers/crypto/chelsio/chtls/chtls_io.c 	tp = tcp_sk(sk);
sk               1397 drivers/crypto/chelsio/chtls/chtls_io.c static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
sk               1400 drivers/crypto/chelsio/chtls/chtls_io.c 	struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
sk               1402 drivers/crypto/chelsio/chtls/chtls_io.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               1411 drivers/crypto/chelsio/chtls/chtls_io.c 	timeo = sock_rcvtimeo(sk, nonblock);
sk               1412 drivers/crypto/chelsio/chtls/chtls_io.c 	target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
sk               1414 drivers/crypto/chelsio/chtls/chtls_io.c 	if (unlikely(csk_flag(sk, CSK_UPDATE_RCV_WND)))
sk               1415 drivers/crypto/chelsio/chtls/chtls_io.c 		chtls_cleanup_rbuf(sk, copied);
sk               1431 drivers/crypto/chelsio/chtls/chtls_io.c 		skb = skb_peek(&sk->sk_receive_queue);
sk               1438 drivers/crypto/chelsio/chtls/chtls_io.c 			sk->sk_write_space(sk);
sk               1440 drivers/crypto/chelsio/chtls/chtls_io.c 		if (copied >= target && !sk->sk_backlog.tail)
sk               1444 drivers/crypto/chelsio/chtls/chtls_io.c 			if (sk->sk_err || sk->sk_state == TCP_CLOSE ||
sk               1445 drivers/crypto/chelsio/chtls/chtls_io.c 			    (sk->sk_shutdown & RCV_SHUTDOWN) ||
sk               1452 drivers/crypto/chelsio/chtls/chtls_io.c 			if (sock_flag(sk, SOCK_DONE))
sk               1454 drivers/crypto/chelsio/chtls/chtls_io.c 			if (sk->sk_err) {
sk               1455 drivers/crypto/chelsio/chtls/chtls_io.c 				copied = sock_error(sk);
sk               1458 drivers/crypto/chelsio/chtls/chtls_io.c 			if (sk->sk_shutdown & RCV_SHUTDOWN)
sk               1460 drivers/crypto/chelsio/chtls/chtls_io.c 			if (sk->sk_state == TCP_CLOSE) {
sk               1473 drivers/crypto/chelsio/chtls/chtls_io.c 		if (sk->sk_backlog.tail) {
sk               1474 drivers/crypto/chelsio/chtls/chtls_io.c 			release_sock(sk);
sk               1475 drivers/crypto/chelsio/chtls/chtls_io.c 			lock_sock(sk);
sk               1476 drivers/crypto/chelsio/chtls/chtls_io.c 			chtls_cleanup_rbuf(sk, copied);
sk               1482 drivers/crypto/chelsio/chtls/chtls_io.c 		chtls_cleanup_rbuf(sk, copied);
sk               1483 drivers/crypto/chelsio/chtls/chtls_io.c 		sk_wait_data(sk, &timeo, NULL);
sk               1488 drivers/crypto/chelsio/chtls/chtls_io.c 			__skb_unlink(skb, &sk->sk_receive_queue);
sk               1497 drivers/crypto/chelsio/chtls/chtls_io.c 				release_sock(sk);
sk               1498 drivers/crypto/chelsio/chtls/chtls_io.c 				lock_sock(sk);
sk               1514 drivers/crypto/chelsio/chtls/chtls_io.c 				} else if (!sock_flag(sk, SOCK_URGINLINE)) {
sk               1545 drivers/crypto/chelsio/chtls/chtls_io.c 			chtls_free_skb(sk, skb);
sk               1549 drivers/crypto/chelsio/chtls/chtls_io.c 			    !skb_peek(&sk->sk_receive_queue))
sk               1555 drivers/crypto/chelsio/chtls/chtls_io.c 		chtls_cleanup_rbuf(sk, copied);
sk               1556 drivers/crypto/chelsio/chtls/chtls_io.c 	release_sock(sk);
sk               1563 drivers/crypto/chelsio/chtls/chtls_io.c static int peekmsg(struct sock *sk, struct msghdr *msg,
sk               1566 drivers/crypto/chelsio/chtls/chtls_io.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               1573 drivers/crypto/chelsio/chtls/chtls_io.c 	lock_sock(sk);
sk               1574 drivers/crypto/chelsio/chtls/chtls_io.c 	timeo = sock_rcvtimeo(sk, nonblock);
sk               1588 drivers/crypto/chelsio/chtls/chtls_io.c 		skb_queue_walk(&sk->sk_receive_queue, skb) {
sk               1597 drivers/crypto/chelsio/chtls/chtls_io.c 		if (sock_flag(sk, SOCK_DONE))
sk               1599 drivers/crypto/chelsio/chtls/chtls_io.c 		if (sk->sk_err) {
sk               1600 drivers/crypto/chelsio/chtls/chtls_io.c 			copied = sock_error(sk);
sk               1603 drivers/crypto/chelsio/chtls/chtls_io.c 		if (sk->sk_shutdown & RCV_SHUTDOWN)
sk               1605 drivers/crypto/chelsio/chtls/chtls_io.c 		if (sk->sk_state == TCP_CLOSE) {
sk               1618 drivers/crypto/chelsio/chtls/chtls_io.c 		if (sk->sk_backlog.tail) {
sk               1620 drivers/crypto/chelsio/chtls/chtls_io.c 			release_sock(sk);
sk               1621 drivers/crypto/chelsio/chtls/chtls_io.c 			lock_sock(sk);
sk               1623 drivers/crypto/chelsio/chtls/chtls_io.c 			sk_wait_data(sk, &timeo, NULL);
sk               1651 drivers/crypto/chelsio/chtls/chtls_io.c 					if (!sock_flag(sk, SOCK_URGINLINE)) {
sk               1680 drivers/crypto/chelsio/chtls/chtls_io.c 	release_sock(sk);
sk               1684 drivers/crypto/chelsio/chtls/chtls_io.c int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
sk               1687 drivers/crypto/chelsio/chtls/chtls_io.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               1698 drivers/crypto/chelsio/chtls/chtls_io.c 		return tcp_prot.recvmsg(sk, msg, len, nonblock, flags,
sk               1702 drivers/crypto/chelsio/chtls/chtls_io.c 		return peekmsg(sk, msg, len, nonblock, flags);
sk               1704 drivers/crypto/chelsio/chtls/chtls_io.c 	if (sk_can_busy_loop(sk) &&
sk               1705 drivers/crypto/chelsio/chtls/chtls_io.c 	    skb_queue_empty_lockless(&sk->sk_receive_queue) &&
sk               1706 drivers/crypto/chelsio/chtls/chtls_io.c 	    sk->sk_state == TCP_ESTABLISHED)
sk               1707 drivers/crypto/chelsio/chtls/chtls_io.c 		sk_busy_loop(sk, nonblock);
sk               1709 drivers/crypto/chelsio/chtls/chtls_io.c 	lock_sock(sk);
sk               1710 drivers/crypto/chelsio/chtls/chtls_io.c 	csk = rcu_dereference_sk_user_data(sk);
sk               1713 drivers/crypto/chelsio/chtls/chtls_io.c 		return chtls_pt_recvmsg(sk, msg, len, nonblock,
sk               1716 drivers/crypto/chelsio/chtls/chtls_io.c 	timeo = sock_rcvtimeo(sk, nonblock);
sk               1717 drivers/crypto/chelsio/chtls/chtls_io.c 	target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
sk               1719 drivers/crypto/chelsio/chtls/chtls_io.c 	if (unlikely(csk_flag(sk, CSK_UPDATE_RCV_WND)))
sk               1720 drivers/crypto/chelsio/chtls/chtls_io.c 		chtls_cleanup_rbuf(sk, copied);
sk               1736 drivers/crypto/chelsio/chtls/chtls_io.c 		skb = skb_peek(&sk->sk_receive_queue);
sk               1744 drivers/crypto/chelsio/chtls/chtls_io.c 			sk->sk_write_space(sk);
sk               1746 drivers/crypto/chelsio/chtls/chtls_io.c 		if (copied >= target && !sk->sk_backlog.tail)
sk               1750 drivers/crypto/chelsio/chtls/chtls_io.c 			if (sk->sk_err || sk->sk_state == TCP_CLOSE ||
sk               1751 drivers/crypto/chelsio/chtls/chtls_io.c 			    (sk->sk_shutdown & RCV_SHUTDOWN) ||
sk               1755 drivers/crypto/chelsio/chtls/chtls_io.c 			if (sock_flag(sk, SOCK_DONE))
sk               1757 drivers/crypto/chelsio/chtls/chtls_io.c 			if (sk->sk_err) {
sk               1758 drivers/crypto/chelsio/chtls/chtls_io.c 				copied = sock_error(sk);
sk               1761 drivers/crypto/chelsio/chtls/chtls_io.c 			if (sk->sk_shutdown & RCV_SHUTDOWN)
sk               1763 drivers/crypto/chelsio/chtls/chtls_io.c 			if (sk->sk_state == TCP_CLOSE) {
sk               1777 drivers/crypto/chelsio/chtls/chtls_io.c 		if (sk->sk_backlog.tail) {
sk               1778 drivers/crypto/chelsio/chtls/chtls_io.c 			release_sock(sk);
sk               1779 drivers/crypto/chelsio/chtls/chtls_io.c 			lock_sock(sk);
sk               1780 drivers/crypto/chelsio/chtls/chtls_io.c 			chtls_cleanup_rbuf(sk, copied);
sk               1786 drivers/crypto/chelsio/chtls/chtls_io.c 		chtls_cleanup_rbuf(sk, copied);
sk               1787 drivers/crypto/chelsio/chtls/chtls_io.c 		sk_wait_data(sk, &timeo, NULL);
sk               1792 drivers/crypto/chelsio/chtls/chtls_io.c 			chtls_kfree_skb(sk, skb);
sk               1815 drivers/crypto/chelsio/chtls/chtls_io.c 				} else if (!sock_flag(sk, SOCK_URGINLINE)) {
sk               1845 drivers/crypto/chelsio/chtls/chtls_io.c 				chtls_free_skb(sk, skb);
sk               1849 drivers/crypto/chelsio/chtls/chtls_io.c 			     !skb_peek(&sk->sk_receive_queue))
sk               1855 drivers/crypto/chelsio/chtls/chtls_io.c 		chtls_cleanup_rbuf(sk, copied);
sk               1857 drivers/crypto/chelsio/chtls/chtls_io.c 	release_sock(sk);
sk                 61 drivers/crypto/chelsio/chtls/chtls_main.c 		ret = chtls_listen_start(clisten->cdev, clisten->sk);
sk                 65 drivers/crypto/chelsio/chtls/chtls_main.c 		chtls_listen_stop(clisten->cdev, clisten->sk);
sk                 76 drivers/crypto/chelsio/chtls/chtls_main.c static int listen_backlog_rcv(struct sock *sk, struct sk_buff *skb)
sk                 79 drivers/crypto/chelsio/chtls/chtls_main.c 		return tcp_v4_do_rcv(sk, skb);
sk                 80 drivers/crypto/chelsio/chtls/chtls_main.c 	BLOG_SKB_CB(skb)->backlog_rcv(sk, skb);
sk                 84 drivers/crypto/chelsio/chtls/chtls_main.c static int chtls_start_listen(struct chtls_dev *cdev, struct sock *sk)
sk                 89 drivers/crypto/chelsio/chtls/chtls_main.c 	if (sk->sk_protocol != IPPROTO_TCP)
sk                 92 drivers/crypto/chelsio/chtls/chtls_main.c 	if (sk->sk_family == PF_INET &&
sk                 93 drivers/crypto/chelsio/chtls/chtls_main.c 	    LOOPBACK(inet_sk(sk)->inet_rcv_saddr))
sk                 96 drivers/crypto/chelsio/chtls/chtls_main.c 	sk->sk_backlog_rcv = listen_backlog_rcv;
sk                101 drivers/crypto/chelsio/chtls/chtls_main.c 	clisten->sk = sk;
sk                109 drivers/crypto/chelsio/chtls/chtls_main.c static void chtls_stop_listen(struct chtls_dev *cdev, struct sock *sk)
sk                113 drivers/crypto/chelsio/chtls/chtls_main.c 	if (sk->sk_protocol != IPPROTO_TCP)
sk                120 drivers/crypto/chelsio/chtls/chtls_main.c 	clisten->sk = sk;
sk                143 drivers/crypto/chelsio/chtls/chtls_main.c static int chtls_create_hash(struct tls_device *dev, struct sock *sk)
sk                147 drivers/crypto/chelsio/chtls/chtls_main.c 	if (sk->sk_state == TCP_LISTEN)
sk                148 drivers/crypto/chelsio/chtls/chtls_main.c 		return chtls_start_listen(cdev, sk);
sk                152 drivers/crypto/chelsio/chtls/chtls_main.c static void chtls_destroy_hash(struct tls_device *dev, struct sock *sk)
sk                156 drivers/crypto/chelsio/chtls/chtls_main.c 	if (sk->sk_state == TCP_LISTEN)
sk                157 drivers/crypto/chelsio/chtls/chtls_main.c 		chtls_stop_listen(cdev, sk);
sk                460 drivers/crypto/chelsio/chtls/chtls_main.c static int do_chtls_getsockopt(struct sock *sk, char __user *optval,
sk                471 drivers/crypto/chelsio/chtls/chtls_main.c static int chtls_getsockopt(struct sock *sk, int level, int optname,
sk                474 drivers/crypto/chelsio/chtls/chtls_main.c 	struct tls_context *ctx = tls_get_ctx(sk);
sk                477 drivers/crypto/chelsio/chtls/chtls_main.c 		return ctx->sk_proto->getsockopt(sk, level,
sk                480 drivers/crypto/chelsio/chtls/chtls_main.c 	return do_chtls_getsockopt(sk, optval, optlen);
sk                483 drivers/crypto/chelsio/chtls/chtls_main.c static int do_chtls_setsockopt(struct sock *sk, int optname,
sk                491 drivers/crypto/chelsio/chtls/chtls_main.c 	csk = rcu_dereference_sk_user_data(sk);
sk                539 drivers/crypto/chelsio/chtls/chtls_main.c static int chtls_setsockopt(struct sock *sk, int level, int optname,
sk                542 drivers/crypto/chelsio/chtls/chtls_main.c 	struct tls_context *ctx = tls_get_ctx(sk);
sk                545 drivers/crypto/chelsio/chtls/chtls_main.c 		return ctx->sk_proto->setsockopt(sk, level,
sk                548 drivers/crypto/chelsio/chtls/chtls_main.c 	return do_chtls_setsockopt(sk, optname, optval, optlen);
sk                561 drivers/crypto/chelsio/chtls/chtls_main.c void chtls_install_cpl_ops(struct sock *sk)
sk                563 drivers/crypto/chelsio/chtls/chtls_main.c 	sk->sk_prot = &chtls_cpl_prot;
sk                136 drivers/infiniband/core/addr.c 	    !(NETLINK_CB(skb).sk))
sk               2325 drivers/infiniband/core/device.c 		if (!rdma_dev_access_netns(dev, sock_net(skb->sk)))
sk                 90 drivers/infiniband/core/netlink.c 	if (sock_net(skb->sk) != &init_net && type != RDMA_NL_NLDEV)
sk                190 drivers/infiniband/core/netlink.c 			err = netlink_dump_start(skb->sk, skb, nlh, &c);
sk                813 drivers/infiniband/core/nldev.c 	device = ib_device_get_by_index(sock_net(skb->sk), index);
sk                834 drivers/infiniband/core/nldev.c 	return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
sk                857 drivers/infiniband/core/nldev.c 	device = ib_device_get_by_index(sock_net(skb->sk), index);
sk                951 drivers/infiniband/core/nldev.c 	device = ib_device_get_by_index(sock_net(skb->sk), index);
sk                971 drivers/infiniband/core/nldev.c 	err = fill_port_info(msg, device, port, sock_net(skb->sk));
sk                978 drivers/infiniband/core/nldev.c 	return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
sk               1005 drivers/infiniband/core/nldev.c 	device = ib_device_get_by_index(sock_net(skb->sk), ifindex);
sk               1031 drivers/infiniband/core/nldev.c 		if (fill_port_info(skb, device, p, sock_net(skb->sk))) {
sk               1060 drivers/infiniband/core/nldev.c 	device = ib_device_get_by_index(sock_net(skb->sk), index);
sk               1080 drivers/infiniband/core/nldev.c 	return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
sk               1204 drivers/infiniband/core/nldev.c 	device = ib_device_get_by_index(sock_net(skb->sk), index);
sk               1257 drivers/infiniband/core/nldev.c 	return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
sk               1301 drivers/infiniband/core/nldev.c 	device = ib_device_get_by_index(sock_net(skb->sk), index);
sk               1482 drivers/infiniband/core/nldev.c 	ndev = dev_get_by_name(sock_net(skb->sk), ndev_name);
sk               1517 drivers/infiniband/core/nldev.c 	device = ib_device_get_by_index(sock_net(skb->sk), index);
sk               1551 drivers/infiniband/core/nldev.c 		ibdev = ib_device_get_by_index(sock_net(skb->sk), index);
sk               1602 drivers/infiniband/core/nldev.c 	return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
sk               1642 drivers/infiniband/core/nldev.c 	return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
sk               1687 drivers/infiniband/core/nldev.c 	device = ib_device_get_by_index(sock_net(skb->sk), index);
sk               1742 drivers/infiniband/core/nldev.c 	return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
sk               1774 drivers/infiniband/core/nldev.c 	device = ib_device_get_by_index(sock_net(skb->sk), index);
sk               1810 drivers/infiniband/core/nldev.c 	return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
sk               1836 drivers/infiniband/core/nldev.c 	device = ib_device_get_by_index(sock_net(skb->sk), index);
sk               1899 drivers/infiniband/core/nldev.c 	return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
sk               1931 drivers/infiniband/core/nldev.c 	device = ib_device_get_by_index(sock_net(skb->sk), index);
sk               1971 drivers/infiniband/core/nldev.c 	return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
sk               1027 drivers/infiniband/core/sa_query.c 	    !(NETLINK_CB(skb).sk))
sk               1102 drivers/infiniband/core/sa_query.c 	    !(NETLINK_CB(skb).sk))
sk                190 drivers/infiniband/hw/usnic/usnic_transport.c 		*proto = sock->sk->sk_protocol;
sk                120 drivers/infiniband/sw/rxe/rxe_net.c 	ndst = ipv6_stub->ipv6_dst_lookup_flow(sock_net(recv_sockets.sk6->sk),
sk                121 drivers/infiniband/sw/rxe/rxe_net.c 					       recv_sockets.sk6->sk, &fl6,
sk                157 drivers/infiniband/sw/rxe/rxe_net.c 		dst = sk_dst_get(qp->sk->sk);
sk                186 drivers/infiniband/sw/rxe/rxe_net.c 			sk_dst_set(qp->sk->sk, dst);
sk                192 drivers/infiniband/sw/rxe/rxe_net.c static int rxe_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
sk                268 drivers/infiniband/sw/rxe/rxe_net.c static void rxe_release_udp_tunnel(struct socket *sk)
sk                270 drivers/infiniband/sw/rxe/rxe_net.c 	if (sk)
sk                271 drivers/infiniband/sw/rxe/rxe_net.c 		udp_tunnel_sock_release(sk);
sk                413 drivers/infiniband/sw/rxe/rxe_net.c 	struct sock *sk = skb->sk;
sk                414 drivers/infiniband/sw/rxe/rxe_net.c 	struct rxe_qp *qp = sk->sk_user_data;
sk                429 drivers/infiniband/sw/rxe/rxe_net.c 	skb->sk = pkt->qp->sk->sk;
sk                435 drivers/infiniband/sw/rxe/rxe_net.c 		err = ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
sk                437 drivers/infiniband/sw/rxe/rxe_net.c 		err = ip6_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
sk                226 drivers/infiniband/sw/rxe/rxe_qp.c 	err = sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, 0, &qp->sk);
sk                229 drivers/infiniband/sw/rxe/rxe_qp.c 	qp->sk->sk->sk_user_data = qp;
sk                833 drivers/infiniband/sw/rxe/rxe_qp.c 		sk_dst_reset(qp->sk->sk);
sk                837 drivers/infiniband/sw/rxe/rxe_qp.c 	kernel_sock_shutdown(qp->sk, SHUT_RDWR);
sk                838 drivers/infiniband/sw/rxe/rxe_qp.c 	sock_release(qp->sk);
sk                253 drivers/infiniband/sw/rxe/rxe_verbs.h 	struct socket		*sk;
sk                281 drivers/infiniband/sw/siw/siw.h 	struct socket *sk;
sk                543 drivers/infiniband/sw/siw/siw.h void siw_qp_llp_data_ready(struct sock *sk);
sk                544 drivers/infiniband/sw/siw/siw.h void siw_qp_llp_write_space(struct sock *sk);
sk                 41 drivers/infiniband/sw/siw/siw_cm.c static void siw_sk_assign_cm_upcalls(struct sock *sk)
sk                 43 drivers/infiniband/sw/siw/siw_cm.c 	write_lock_bh(&sk->sk_callback_lock);
sk                 44 drivers/infiniband/sw/siw/siw_cm.c 	sk->sk_state_change = siw_cm_llp_state_change;
sk                 45 drivers/infiniband/sw/siw/siw_cm.c 	sk->sk_data_ready = siw_cm_llp_data_ready;
sk                 46 drivers/infiniband/sw/siw/siw_cm.c 	sk->sk_write_space = siw_cm_llp_write_space;
sk                 47 drivers/infiniband/sw/siw/siw_cm.c 	sk->sk_error_report = siw_cm_llp_error_report;
sk                 48 drivers/infiniband/sw/siw/siw_cm.c 	write_unlock_bh(&sk->sk_callback_lock);
sk                 51 drivers/infiniband/sw/siw/siw_cm.c static void siw_sk_save_upcalls(struct sock *sk)
sk                 53 drivers/infiniband/sw/siw/siw_cm.c 	struct siw_cep *cep = sk_to_cep(sk);
sk                 55 drivers/infiniband/sw/siw/siw_cm.c 	write_lock_bh(&sk->sk_callback_lock);
sk                 56 drivers/infiniband/sw/siw/siw_cm.c 	cep->sk_state_change = sk->sk_state_change;
sk                 57 drivers/infiniband/sw/siw/siw_cm.c 	cep->sk_data_ready = sk->sk_data_ready;
sk                 58 drivers/infiniband/sw/siw/siw_cm.c 	cep->sk_write_space = sk->sk_write_space;
sk                 59 drivers/infiniband/sw/siw/siw_cm.c 	cep->sk_error_report = sk->sk_error_report;
sk                 60 drivers/infiniband/sw/siw/siw_cm.c 	write_unlock_bh(&sk->sk_callback_lock);
sk                 63 drivers/infiniband/sw/siw/siw_cm.c static void siw_sk_restore_upcalls(struct sock *sk, struct siw_cep *cep)
sk                 65 drivers/infiniband/sw/siw/siw_cm.c 	sk->sk_state_change = cep->sk_state_change;
sk                 66 drivers/infiniband/sw/siw/siw_cm.c 	sk->sk_data_ready = cep->sk_data_ready;
sk                 67 drivers/infiniband/sw/siw/siw_cm.c 	sk->sk_write_space = cep->sk_write_space;
sk                 68 drivers/infiniband/sw/siw/siw_cm.c 	sk->sk_error_report = cep->sk_error_report;
sk                 69 drivers/infiniband/sw/siw/siw_cm.c 	sk->sk_user_data = NULL;
sk                 75 drivers/infiniband/sw/siw/siw_cm.c 	struct sock *sk = s->sk;
sk                 77 drivers/infiniband/sw/siw/siw_cm.c 	write_lock_bh(&sk->sk_callback_lock);
sk                 79 drivers/infiniband/sw/siw/siw_cm.c 	qp->attrs.sk = s;
sk                 80 drivers/infiniband/sw/siw/siw_cm.c 	sk->sk_data_ready = siw_qp_llp_data_ready;
sk                 81 drivers/infiniband/sw/siw/siw_cm.c 	sk->sk_write_space = siw_qp_llp_write_space;
sk                 83 drivers/infiniband/sw/siw/siw_cm.c 	write_unlock_bh(&sk->sk_callback_lock);
sk                 88 drivers/infiniband/sw/siw/siw_cm.c 	struct sock *sk = s->sk;
sk                 91 drivers/infiniband/sw/siw/siw_cm.c 	if (sk) {
sk                 92 drivers/infiniband/sw/siw/siw_cm.c 		write_lock_bh(&sk->sk_callback_lock);
sk                 93 drivers/infiniband/sw/siw/siw_cm.c 		cep = sk_to_cep(sk);
sk                 95 drivers/infiniband/sw/siw/siw_cm.c 			siw_sk_restore_upcalls(sk, cep);
sk                100 drivers/infiniband/sw/siw/siw_cm.c 		write_unlock_bh(&sk->sk_callback_lock);
sk                106 drivers/infiniband/sw/siw/siw_cm.c static void siw_rtr_data_ready(struct sock *sk)
sk                112 drivers/infiniband/sw/siw/siw_cm.c 	read_lock(&sk->sk_callback_lock);
sk                114 drivers/infiniband/sw/siw/siw_cm.c 	cep = sk_to_cep(sk);
sk                119 drivers/infiniband/sw/siw/siw_cm.c 	qp = sk_to_qp(sk);
sk                125 drivers/infiniband/sw/siw/siw_cm.c 	tcp_read_sock(sk, &rd_desc, siw_tcp_rx_data);
sk                135 drivers/infiniband/sw/siw/siw_cm.c 	read_unlock(&sk->sk_callback_lock);
sk                142 drivers/infiniband/sw/siw/siw_cm.c 	struct sock *sk = cep->sock->sk;
sk                144 drivers/infiniband/sw/siw/siw_cm.c 	write_lock_bh(&sk->sk_callback_lock);
sk                145 drivers/infiniband/sw/siw/siw_cm.c 	sk->sk_data_ready = siw_rtr_data_ready;
sk                146 drivers/infiniband/sw/siw/siw_cm.c 	sk->sk_write_space = siw_qp_llp_write_space;
sk                147 drivers/infiniband/sw/siw/siw_cm.c 	write_unlock_bh(&sk->sk_callback_lock);
sk                154 drivers/infiniband/sw/siw/siw_cm.c 	s->sk->sk_user_data = cep;
sk                156 drivers/infiniband/sw/siw/siw_cm.c 	siw_sk_save_upcalls(s->sk);
sk                157 drivers/infiniband/sw/siw/siw_cm.c 	siw_sk_assign_cm_upcalls(s->sk);
sk                864 drivers/infiniband/sw/siw/siw_cm.c 	qp_attrs.sk = cep->sock;
sk                948 drivers/infiniband/sw/siw/siw_cm.c 	new_s->sk->sk_user_data = new_cep;
sk                971 drivers/infiniband/sw/siw/siw_cm.c 	if (atomic_read(&new_s->sk->sk_rmem_alloc)) {
sk               1051 drivers/infiniband/sw/siw/siw_cm.c 				cep->sock->sk->sk_data_ready(cep->sock->sk);
sk               1221 drivers/infiniband/sw/siw/siw_cm.c static void siw_cm_llp_data_ready(struct sock *sk)
sk               1225 drivers/infiniband/sw/siw/siw_cm.c 	read_lock(&sk->sk_callback_lock);
sk               1227 drivers/infiniband/sw/siw/siw_cm.c 	cep = sk_to_cep(sk);
sk               1250 drivers/infiniband/sw/siw/siw_cm.c 	read_unlock(&sk->sk_callback_lock);
sk               1253 drivers/infiniband/sw/siw/siw_cm.c static void siw_cm_llp_write_space(struct sock *sk)
sk               1255 drivers/infiniband/sw/siw/siw_cm.c 	struct siw_cep *cep = sk_to_cep(sk);
sk               1261 drivers/infiniband/sw/siw/siw_cm.c static void siw_cm_llp_error_report(struct sock *sk)
sk               1263 drivers/infiniband/sw/siw/siw_cm.c 	struct siw_cep *cep = sk_to_cep(sk);
sk               1267 drivers/infiniband/sw/siw/siw_cm.c 			    sk->sk_err, sk->sk_state, cep->state);
sk               1268 drivers/infiniband/sw/siw/siw_cm.c 		cep->sk_error_report(sk);
sk               1272 drivers/infiniband/sw/siw/siw_cm.c static void siw_cm_llp_state_change(struct sock *sk)
sk               1277 drivers/infiniband/sw/siw/siw_cm.c 	read_lock(&sk->sk_callback_lock);
sk               1279 drivers/infiniband/sw/siw/siw_cm.c 	cep = sk_to_cep(sk);
sk               1282 drivers/infiniband/sw/siw/siw_cm.c 		read_unlock(&sk->sk_callback_lock);
sk               1289 drivers/infiniband/sw/siw/siw_cm.c 	switch (sk->sk_state) {
sk               1306 drivers/infiniband/sw/siw/siw_cm.c 		siw_dbg_cep(cep, "unexpected socket state %d\n", sk->sk_state);
sk               1308 drivers/infiniband/sw/siw/siw_cm.c 	read_unlock(&sk->sk_callback_lock);
sk               1309 drivers/infiniband/sw/siw/siw_cm.c 	orig_state_change(sk);
sk               1675 drivers/infiniband/sw/siw/siw_cm.c 	qp_attrs.sk = cep->sock;
sk               1822 drivers/infiniband/sw/siw/siw_cm.c 			s->sk->sk_bound_dev_if = sdev->netdev->ifindex;
sk               1831 drivers/infiniband/sw/siw/siw_cm.c 			s->sk->sk_bound_dev_if = sdev->netdev->ifindex;
sk                 62 drivers/infiniband/sw/siw/siw_cm.h 	void (*sk_state_change)(struct sock *sk);
sk                 63 drivers/infiniband/sw/siw/siw_cm.h 	void (*sk_data_ready)(struct sock *sk);
sk                 64 drivers/infiniband/sw/siw/siw_cm.h 	void (*sk_write_space)(struct sock *sk);
sk                 65 drivers/infiniband/sw/siw/siw_cm.h 	void (*sk_error_report)(struct sock *sk);
sk                130 drivers/infiniband/sw/siw/siw_cm.h #define sk_to_qp(sk) (((struct siw_cep *)((sk)->sk_user_data))->qp)
sk                131 drivers/infiniband/sw/siw/siw_cm.h #define sk_to_cep(sk) ((struct siw_cep *)((sk)->sk_user_data))
sk                 93 drivers/infiniband/sw/siw/siw_qp.c void siw_qp_llp_data_ready(struct sock *sk)
sk                 97 drivers/infiniband/sw/siw/siw_qp.c 	read_lock(&sk->sk_callback_lock);
sk                 99 drivers/infiniband/sw/siw/siw_qp.c 	if (unlikely(!sk->sk_user_data || !sk_to_qp(sk)))
sk                102 drivers/infiniband/sw/siw/siw_qp.c 	qp = sk_to_qp(sk);
sk                115 drivers/infiniband/sw/siw/siw_qp.c 			tcp_read_sock(sk, &rd_desc, siw_tcp_rx_data);
sk                123 drivers/infiniband/sw/siw/siw_qp.c 	read_unlock(&sk->sk_callback_lock);
sk                135 drivers/infiniband/sw/siw/siw_qp.c 	qp->attrs.sk = NULL;
sk                183 drivers/infiniband/sw/siw/siw_qp.c void siw_qp_llp_write_space(struct sock *sk)
sk                187 drivers/infiniband/sw/siw/siw_qp.c 	read_lock(&sk->sk_callback_lock);
sk                189 drivers/infiniband/sw/siw/siw_qp.c 	cep  = sk_to_cep(sk);
sk                191 drivers/infiniband/sw/siw/siw_qp.c 		cep->sk_write_space(sk);
sk                193 drivers/infiniband/sw/siw/siw_qp.c 		if (!test_bit(SOCK_NOSPACE, &sk->sk_socket->flags))
sk                197 drivers/infiniband/sw/siw/siw_qp.c 	read_unlock(&sk->sk_callback_lock);
sk                392 drivers/infiniband/sw/siw/siw_qp.c 	struct socket *s = qp->attrs.sk;
sk                679 drivers/infiniband/sw/siw/siw_qp.c 		qp->attrs.sk = attrs->sk;
sk                327 drivers/infiniband/sw/siw/siw_qp_tx.c 	struct sock *sk = s->sk;
sk                337 drivers/infiniband/sw/siw/siw_qp_tx.c 		tcp_rate_check_app_limited(sk);
sk                339 drivers/infiniband/sw/siw/siw_qp_tx.c 		lock_sock(sk);
sk                340 drivers/infiniband/sw/siw/siw_qp_tx.c 		rv = do_tcp_sendpages(sk, page[i], offset, bytes, flags);
sk                341 drivers/infiniband/sw/siw/siw_qp_tx.c 		release_sock(sk);
sk                663 drivers/infiniband/sw/siw/siw_qp_tx.c 	struct tcp_sock *tp = tcp_sk(s->sk);
sk                784 drivers/infiniband/sw/siw/siw_qp_tx.c 	struct socket *s = qp->attrs.sk;
sk                123 drivers/isdn/mISDN/dsp_dtmf.c 	s32 sk, sk1, sk2;
sk                160 drivers/isdn/mISDN/dsp_dtmf.c 			sk = (*hfccoeff++) >> 4;
sk                161 drivers/isdn/mISDN/dsp_dtmf.c 			if (sk > 32767 || sk < -32767 || sk2 > 32767
sk                167 drivers/isdn/mISDN/dsp_dtmf.c 				(sk * sk) -
sk                168 drivers/isdn/mISDN/dsp_dtmf.c 				(((cos2pik[k] * sk) >> 15) * sk2) +
sk                185 drivers/isdn/mISDN/dsp_dtmf.c 		sk = 0;
sk                191 drivers/isdn/mISDN/dsp_dtmf.c 			sk = ((cos2pik_ * sk1) >> 15) - sk2 + (*buf++);
sk                193 drivers/isdn/mISDN/dsp_dtmf.c 			sk1 = sk;
sk                195 drivers/isdn/mISDN/dsp_dtmf.c 		sk >>= 8;
sk                197 drivers/isdn/mISDN/dsp_dtmf.c 		if (sk > 32767 || sk < -32767 || sk2 > 32767 || sk2 < -32767)
sk                201 drivers/isdn/mISDN/dsp_dtmf.c 			(sk * sk) -
sk                202 drivers/isdn/mISDN/dsp_dtmf.c 			(((cos2pik[k] * sk) >> 15) * sk2) +
sk                685 drivers/isdn/mISDN/l1oip_core.c 	if (socket->sk == NULL) {
sk                 22 drivers/isdn/mISDN/socket.c #define _pms(sk)	((struct mISDN_sock *)sk)
sk                 46 drivers/isdn/mISDN/socket.c mISDN_sock_link(struct mISDN_sock_list *l, struct sock *sk)
sk                 49 drivers/isdn/mISDN/socket.c 	sk_add_node(sk, &l->head);
sk                 53 drivers/isdn/mISDN/socket.c static void mISDN_sock_unlink(struct mISDN_sock_list *l, struct sock *sk)
sk                 56 drivers/isdn/mISDN/socket.c 	sk_del_node_init(sk);
sk                 69 drivers/isdn/mISDN/socket.c 	if (msk->sk.sk_state == MISDN_CLOSED)
sk                 72 drivers/isdn/mISDN/socket.c 	err = sock_queue_rcv_skb(&msk->sk, skb);
sk                 88 drivers/isdn/mISDN/socket.c 		msk->sk.sk_state = MISDN_CLOSED;
sk                 95 drivers/isdn/mISDN/socket.c mISDN_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
sk                 99 drivers/isdn/mISDN/socket.c 	if (_pms(sk)->cmask & MISDN_TIME_STAMP) {
sk                110 drivers/isdn/mISDN/socket.c 	struct sock		*sk = sock->sk;
sk                116 drivers/isdn/mISDN/socket.c 		       __func__, (int)len, flags, _pms(sk)->ch.nr,
sk                117 drivers/isdn/mISDN/socket.c 		       sk->sk_protocol);
sk                121 drivers/isdn/mISDN/socket.c 	if (sk->sk_state == MISDN_CLOSED)
sk                124 drivers/isdn/mISDN/socket.c 	skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
sk                132 drivers/isdn/mISDN/socket.c 		maddr->dev = _pms(sk)->dev->id;
sk                133 drivers/isdn/mISDN/socket.c 		if ((sk->sk_protocol == ISDN_P_LAPD_TE) ||
sk                134 drivers/isdn/mISDN/socket.c 		    (sk->sk_protocol == ISDN_P_LAPD_NT)) {
sk                139 drivers/isdn/mISDN/socket.c 			maddr->channel = _pms(sk)->ch.nr;
sk                140 drivers/isdn/mISDN/socket.c 			maddr->sapi = _pms(sk)->ch.addr & 0xFF;
sk                141 drivers/isdn/mISDN/socket.c 			maddr->tei =  (_pms(sk)->ch.addr >> 8) & 0xFF;
sk                151 drivers/isdn/mISDN/socket.c 			skb_queue_head(&sk->sk_receive_queue, skb);
sk                159 drivers/isdn/mISDN/socket.c 	mISDN_sock_cmsg(sk, msg, skb);
sk                161 drivers/isdn/mISDN/socket.c 	skb_free_datagram(sk, skb);
sk                169 drivers/isdn/mISDN/socket.c 	struct sock		*sk = sock->sk;
sk                175 drivers/isdn/mISDN/socket.c 		       __func__, (int)len, msg->msg_flags, _pms(sk)->ch.nr,
sk                176 drivers/isdn/mISDN/socket.c 		       sk->sk_protocol);
sk                187 drivers/isdn/mISDN/socket.c 	if (sk->sk_state != MISDN_BOUND)
sk                190 drivers/isdn/mISDN/socket.c 	lock_sock(sk);
sk                209 drivers/isdn/mISDN/socket.c 		if ((sk->sk_protocol == ISDN_P_LAPD_TE) ||
sk                210 drivers/isdn/mISDN/socket.c 		    (sk->sk_protocol == ISDN_P_LAPD_NT))
sk                211 drivers/isdn/mISDN/socket.c 			mISDN_HEAD_ID(skb) = _pms(sk)->ch.nr;
sk                219 drivers/isdn/mISDN/socket.c 	if (!_pms(sk)->ch.peer)
sk                221 drivers/isdn/mISDN/socket.c 	err = _pms(sk)->ch.recv(_pms(sk)->ch.peer, skb);
sk                231 drivers/isdn/mISDN/socket.c 	release_sock(sk);
sk                238 drivers/isdn/mISDN/socket.c 	struct sock *sk = sock->sk;
sk                241 drivers/isdn/mISDN/socket.c 		printk(KERN_DEBUG "%s(%p) sk=%p\n", __func__, sock, sk);
sk                242 drivers/isdn/mISDN/socket.c 	if (!sk)
sk                244 drivers/isdn/mISDN/socket.c 	switch (sk->sk_protocol) {
sk                249 drivers/isdn/mISDN/socket.c 		if (sk->sk_state == MISDN_BOUND)
sk                250 drivers/isdn/mISDN/socket.c 			delete_channel(&_pms(sk)->ch);
sk                252 drivers/isdn/mISDN/socket.c 			mISDN_sock_unlink(&data_sockets, sk);
sk                262 drivers/isdn/mISDN/socket.c 		delete_channel(&_pms(sk)->ch);
sk                263 drivers/isdn/mISDN/socket.c 		mISDN_sock_unlink(&data_sockets, sk);
sk                267 drivers/isdn/mISDN/socket.c 	lock_sock(sk);
sk                269 drivers/isdn/mISDN/socket.c 	sock_orphan(sk);
sk                270 drivers/isdn/mISDN/socket.c 	skb_queue_purge(&sk->sk_receive_queue);
sk                272 drivers/isdn/mISDN/socket.c 	release_sock(sk);
sk                273 drivers/isdn/mISDN/socket.c 	sock_put(sk);
sk                279 drivers/isdn/mISDN/socket.c data_sock_ioctl_bound(struct sock *sk, unsigned int cmd, void __user *p)
sk                285 drivers/isdn/mISDN/socket.c 	lock_sock(sk);
sk                286 drivers/isdn/mISDN/socket.c 	if (!_pms(sk)->dev) {
sk                296 drivers/isdn/mISDN/socket.c 		if ((sk->sk_protocol & ~ISDN_P_B_MASK) == ISDN_P_B_START) {
sk                298 drivers/isdn/mISDN/socket.c 						 &_pms(sk)->dev->bchannels, list) {
sk                306 drivers/isdn/mISDN/socket.c 			err = _pms(sk)->dev->D.ctrl(&_pms(sk)->dev->D,
sk                314 drivers/isdn/mISDN/socket.c 		if (sk->sk_protocol != ISDN_P_LAPD_NT) {
sk                323 drivers/isdn/mISDN/socket.c 		err = _pms(sk)->dev->teimgr->ctrl(_pms(sk)->dev->teimgr,
sk                327 drivers/isdn/mISDN/socket.c 		if (sk->sk_protocol != ISDN_P_LAPD_NT
sk                328 drivers/isdn/mISDN/socket.c 		    && sk->sk_protocol != ISDN_P_LAPD_TE) {
sk                337 drivers/isdn/mISDN/socket.c 		err = _pms(sk)->dev->teimgr->ctrl(_pms(sk)->dev->teimgr,
sk                345 drivers/isdn/mISDN/socket.c 	release_sock(sk);
sk                353 drivers/isdn/mISDN/socket.c 	struct sock		*sk = sock->sk;
sk                394 drivers/isdn/mISDN/socket.c 		if (sk->sk_state == MISDN_BOUND)
sk                395 drivers/isdn/mISDN/socket.c 			err = data_sock_ioctl_bound(sk, cmd,
sk                406 drivers/isdn/mISDN/socket.c 	struct sock *sk = sock->sk;
sk                413 drivers/isdn/mISDN/socket.c 	lock_sock(sk);
sk                423 drivers/isdn/mISDN/socket.c 			_pms(sk)->cmask |= MISDN_TIME_STAMP;
sk                425 drivers/isdn/mISDN/socket.c 			_pms(sk)->cmask &= ~MISDN_TIME_STAMP;
sk                431 drivers/isdn/mISDN/socket.c 	release_sock(sk);
sk                438 drivers/isdn/mISDN/socket.c 	struct sock *sk = sock->sk;
sk                449 drivers/isdn/mISDN/socket.c 		if (_pms(sk)->cmask & MISDN_TIME_STAMP)
sk                468 drivers/isdn/mISDN/socket.c 	struct sock *sk = sock->sk;
sk                473 drivers/isdn/mISDN/socket.c 		printk(KERN_DEBUG "%s(%p) sk=%p\n", __func__, sock, sk);
sk                479 drivers/isdn/mISDN/socket.c 	lock_sock(sk);
sk                481 drivers/isdn/mISDN/socket.c 	if (_pms(sk)->dev) {
sk                485 drivers/isdn/mISDN/socket.c 	_pms(sk)->dev = get_mdevice(maddr->dev);
sk                486 drivers/isdn/mISDN/socket.c 	if (!_pms(sk)->dev) {
sk                491 drivers/isdn/mISDN/socket.c 	if (sk->sk_protocol < ISDN_P_B_START) {
sk                494 drivers/isdn/mISDN/socket.c 			if (sk == csk)
sk                496 drivers/isdn/mISDN/socket.c 			if (_pms(csk)->dev != _pms(sk)->dev)
sk                501 drivers/isdn/mISDN/socket.c 			    == IS_ISDN_P_TE(sk->sk_protocol))
sk                510 drivers/isdn/mISDN/socket.c 	_pms(sk)->ch.send = mISDN_send;
sk                511 drivers/isdn/mISDN/socket.c 	_pms(sk)->ch.ctrl = mISDN_ctrl;
sk                513 drivers/isdn/mISDN/socket.c 	switch (sk->sk_protocol) {
sk                518 drivers/isdn/mISDN/socket.c 		mISDN_sock_unlink(&data_sockets, sk);
sk                519 drivers/isdn/mISDN/socket.c 		err = connect_layer1(_pms(sk)->dev, &_pms(sk)->ch,
sk                520 drivers/isdn/mISDN/socket.c 				     sk->sk_protocol, maddr);
sk                522 drivers/isdn/mISDN/socket.c 			mISDN_sock_link(&data_sockets, sk);
sk                526 drivers/isdn/mISDN/socket.c 		err = create_l2entity(_pms(sk)->dev, &_pms(sk)->ch,
sk                527 drivers/isdn/mISDN/socket.c 				      sk->sk_protocol, maddr);
sk                535 drivers/isdn/mISDN/socket.c 		err = connect_Bstack(_pms(sk)->dev, &_pms(sk)->ch,
sk                536 drivers/isdn/mISDN/socket.c 				     sk->sk_protocol, maddr);
sk                543 drivers/isdn/mISDN/socket.c 	sk->sk_state = MISDN_BOUND;
sk                544 drivers/isdn/mISDN/socket.c 	_pms(sk)->ch.protocol = sk->sk_protocol;
sk                547 drivers/isdn/mISDN/socket.c 	release_sock(sk);
sk                556 drivers/isdn/mISDN/socket.c 	struct sock		*sk = sock->sk;
sk                558 drivers/isdn/mISDN/socket.c 	if (!_pms(sk)->dev)
sk                561 drivers/isdn/mISDN/socket.c 	lock_sock(sk);
sk                564 drivers/isdn/mISDN/socket.c 	maddr->dev = _pms(sk)->dev->id;
sk                565 drivers/isdn/mISDN/socket.c 	maddr->channel = _pms(sk)->ch.nr;
sk                566 drivers/isdn/mISDN/socket.c 	maddr->sapi = _pms(sk)->ch.addr & 0xff;
sk                567 drivers/isdn/mISDN/socket.c 	maddr->tei = (_pms(sk)->ch.addr >> 8) & 0xff;
sk                568 drivers/isdn/mISDN/socket.c 	release_sock(sk);
sk                595 drivers/isdn/mISDN/socket.c 	struct sock *sk;
sk                600 drivers/isdn/mISDN/socket.c 	sk = sk_alloc(net, PF_ISDN, GFP_KERNEL, &mISDN_proto, kern);
sk                601 drivers/isdn/mISDN/socket.c 	if (!sk)
sk                604 drivers/isdn/mISDN/socket.c 	sock_init_data(sock, sk);
sk                608 drivers/isdn/mISDN/socket.c 	sock_reset_flag(sk, SOCK_ZAPPED);
sk                610 drivers/isdn/mISDN/socket.c 	sk->sk_protocol = protocol;
sk                611 drivers/isdn/mISDN/socket.c 	sk->sk_state    = MISDN_OPEN;
sk                612 drivers/isdn/mISDN/socket.c 	mISDN_sock_link(&data_sockets, sk);
sk                620 drivers/isdn/mISDN/socket.c 	struct sock *sk = sock->sk;
sk                622 drivers/isdn/mISDN/socket.c 	printk(KERN_DEBUG "%s(%p) sk=%p\n", __func__, sock, sk);
sk                623 drivers/isdn/mISDN/socket.c 	if (!sk)
sk                626 drivers/isdn/mISDN/socket.c 	mISDN_sock_unlink(&base_sockets, sk);
sk                627 drivers/isdn/mISDN/socket.c 	sock_orphan(sk);
sk                628 drivers/isdn/mISDN/socket.c 	sock_put(sk);
sk                702 drivers/isdn/mISDN/socket.c 	struct sock *sk = sock->sk;
sk                711 drivers/isdn/mISDN/socket.c 	lock_sock(sk);
sk                713 drivers/isdn/mISDN/socket.c 	if (_pms(sk)->dev) {
sk                718 drivers/isdn/mISDN/socket.c 	_pms(sk)->dev = get_mdevice(maddr->dev);
sk                719 drivers/isdn/mISDN/socket.c 	if (!_pms(sk)->dev) {
sk                723 drivers/isdn/mISDN/socket.c 	sk->sk_state = MISDN_BOUND;
sk                726 drivers/isdn/mISDN/socket.c 	release_sock(sk);
sk                753 drivers/isdn/mISDN/socket.c 	struct sock *sk;
sk                760 drivers/isdn/mISDN/socket.c 	sk = sk_alloc(net, PF_ISDN, GFP_KERNEL, &mISDN_proto, kern);
sk                761 drivers/isdn/mISDN/socket.c 	if (!sk)
sk                764 drivers/isdn/mISDN/socket.c 	sock_init_data(sock, sk);
sk                767 drivers/isdn/mISDN/socket.c 	sock_reset_flag(sk, SOCK_ZAPPED);
sk                768 drivers/isdn/mISDN/socket.c 	sk->sk_protocol = protocol;
sk                769 drivers/isdn/mISDN/socket.c 	sk->sk_state    = MISDN_OPEN;
sk                770 drivers/isdn/mISDN/socket.c 	mISDN_sock_link(&base_sockets, sk);
sk                 61 drivers/isdn/mISDN/stack.c 	struct sock		*sk;
sk                 65 drivers/isdn/mISDN/stack.c 	sk_for_each(sk, &sl->head) {
sk                 66 drivers/isdn/mISDN/stack.c 		if (sk->sk_state != MISDN_BOUND)
sk                 74 drivers/isdn/mISDN/stack.c 		if (!sock_queue_rcv_skb(sk, cskb))
sk                445 drivers/isdn/mISDN/stack.c 		sk_add_node(&msk->sk, &dev->D.st->l1sock.head);
sk                587 drivers/isdn/mISDN/stack.c 		sk_del_node_init(&msk->sk);
sk                402 drivers/net/arcnet/arcnet.c 	struct sock *sk;
sk                407 drivers/net/arcnet/arcnet.c 	if (!skb || !skb->sk) {
sk                412 drivers/net/arcnet/arcnet.c 	sock_hold(skb->sk);
sk                413 drivers/net/arcnet/arcnet.c 	sk = skb->sk;
sk                415 drivers/net/arcnet/arcnet.c 	sock_put(skb->sk);
sk                435 drivers/net/arcnet/arcnet.c 	ret = sock_queue_err_skb(sk, ackskb);
sk                777 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 		if (skb->sk)
sk                778 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c 			skb_set_owner_w(ns, skb->sk);
sk               1837 drivers/net/ethernet/freescale/gianfar.c 		if (skb->sk)
sk               1838 drivers/net/ethernet/freescale/gianfar.c 			skb_set_owner_w(skb_new, skb->sk);
sk                 19 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c static int mlx5e_ktls_add(struct net_device *netdev, struct sock *sk,
sk                 26 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c 	struct tls_context *tls_ctx = tls_get_ctx(sk);
sk                437 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c 	if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))
sk                444 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c 	tls_ctx = tls_get_ctx(skb->sk);
sk                 39 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c static void mlx5e_tls_set_ipv4_flow(void *flow, struct sock *sk)
sk                 41 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c 	struct inet_sock *inet = inet_sk(sk);
sk                 51 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c static void mlx5e_tls_set_ipv6_flow(void *flow, struct sock *sk)
sk                 53 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c 	struct ipv6_pinfo *np = inet6_sk(sk);
sk                 57 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c 	       &sk->sk_v6_daddr, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
sk                 63 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c static void mlx5e_tls_set_flow_tcp_ports(void *flow, struct sock *sk)
sk                 65 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c 	struct inet_sock *inet = inet_sk(sk);
sk                 73 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c static int mlx5e_tls_set_flow(void *flow, struct sock *sk, u32 caps)
sk                 75 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c 	switch (sk->sk_family) {
sk                 77 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c 		mlx5e_tls_set_ipv4_flow(flow, sk);
sk                 81 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c 		if (!sk->sk_ipv6only &&
sk                 82 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c 		    ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED) {
sk                 83 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c 			mlx5e_tls_set_ipv4_flow(flow, sk);
sk                 89 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c 		mlx5e_tls_set_ipv6_flow(flow, sk);
sk                 96 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c 	mlx5e_tls_set_flow_tcp_ports(flow, sk);
sk                102 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c static int mlx5e_tls_add(struct net_device *netdev, struct sock *sk,
sk                108 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c 	struct tls_context *tls_ctx = tls_get_ctx(sk);
sk                119 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c 	ret = mlx5e_tls_set_flow(flow, sk, caps);
sk                163 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c static int mlx5e_tls_resync(struct net_device *netdev, struct sock *sk,
sk                167 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c 	struct tls_context *tls_ctx = tls_get_ctx(sk);
sk                277 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 	if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))
sk                284 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 	tls_ctx = tls_get_ctx(skb->sk);
sk                313 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 	struct sock *sk = NULL;
sk                326 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 		sk = inet_lookup_established(dev_net(netdev), &tcp_hashinfo,
sk                333 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 		sk = __inet6_lookup_established(dev_net(netdev), &tcp_hashinfo,
sk                339 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 	if (!sk || sk->sk_state == TCP_TIME_WAIT) {
sk                346 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 	skb->sk = sk;
sk                350 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c 	tls_offload_rx_resync_request(sk, seq);
sk                175 drivers/net/ethernet/netronome/nfp/crypto/tls.c 		     struct sock *sk, int direction)
sk                177 drivers/net/ethernet/netronome/nfp/crypto/tls.c 	struct inet_sock *inet = inet_sk(sk);
sk                193 drivers/net/ethernet/netronome/nfp/crypto/tls.c 		     struct sock *sk, int direction)
sk                196 drivers/net/ethernet/netronome/nfp/crypto/tls.c 	struct ipv6_pinfo *np = inet6_sk(sk);
sk                203 drivers/net/ethernet/netronome/nfp/crypto/tls.c 		memcpy(req->src_ip, &sk->sk_v6_daddr, sizeof(req->src_ip));
sk                213 drivers/net/ethernet/netronome/nfp/crypto/tls.c 		   struct nfp_crypto_req_add_back *back, struct sock *sk,
sk                216 drivers/net/ethernet/netronome/nfp/crypto/tls.c 	struct inet_sock *inet = inet_sk(sk);
sk                263 drivers/net/ethernet/netronome/nfp/crypto/tls.c nfp_net_tls_add(struct net_device *netdev, struct sock *sk,
sk                288 drivers/net/ethernet/netronome/nfp/crypto/tls.c 	switch (sk->sk_family) {
sk                291 drivers/net/ethernet/netronome/nfp/crypto/tls.c 		if (sk->sk_ipv6only ||
sk                292 drivers/net/ethernet/netronome/nfp/crypto/tls.c 		    ipv6_addr_type(&sk->sk_v6_daddr) != IPV6_ADDR_MAPPED) {
sk                327 drivers/net/ethernet/netronome/nfp/crypto/tls.c 		back = nfp_net_tls_set_ipv6(nn, req, sk, direction);
sk                329 drivers/net/ethernet/netronome/nfp/crypto/tls.c 		back = nfp_net_tls_set_ipv4(nn, req, sk, direction);
sk                331 drivers/net/ethernet/netronome/nfp/crypto/tls.c 	nfp_net_tls_set_l4(front, back, sk, direction);
sk                385 drivers/net/ethernet/netronome/nfp/crypto/tls.c 	ntls = tls_driver_ctx(sk, direction);
sk                394 drivers/net/ethernet/netronome/nfp/crypto/tls.c 	tls_offload_rx_resync_set_type(sk,
sk                421 drivers/net/ethernet/netronome/nfp/crypto/tls.c nfp_net_tls_resync(struct net_device *netdev, struct sock *sk, u32 seq,
sk                436 drivers/net/ethernet/netronome/nfp/crypto/tls.c 	ntls = tls_driver_ctx(sk, direction);
sk                837 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))
sk                842 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	ntls = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX);
sk                843 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	resync_pending = tls_offload_tx_resync_pending(skb->sk);
sk                875 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 			tls_offload_tx_resync_request(nskb->sk);
sk                904 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	if (WARN_ON_ONCE(!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk)))
sk                910 drivers/net/ethernet/netronome/nfp/nfp_net_common.c 	ntls = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX);
sk                216 drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c 	sk_pacing_shift_update(skb->sk, 8);
sk                121 drivers/net/geneve.c 	return gs->sock->sk->sk_family;
sk                343 drivers/net/geneve.c static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
sk                362 drivers/net/geneve.c 	gs = rcu_dereference_sk_user_data(sk);
sk                388 drivers/net/geneve.c static int geneve_udp_encap_err_lookup(struct sock *sk, struct sk_buff *skb)
sk                405 drivers/net/geneve.c 	gs = rcu_dereference_sk_user_data(sk);
sk                473 drivers/net/geneve.c static struct sk_buff *geneve_gro_receive(struct sock *sk,
sk                537 drivers/net/geneve.c static int geneve_gro_complete(struct sock *sk, struct sk_buff *skb,
sk                640 drivers/net/geneve.c 		if (inet_sk(gs->sock->sk)->inet_sport == dst_port &&
sk                856 drivers/net/geneve.c 	dst = ipv6_stub->ipv6_dst_lookup_flow(geneve->net, gs6->sock->sk, fl6,
sk                929 drivers/net/geneve.c 	udp_tunnel_xmit_skb(rt, gs4->sock->sk, skb, fl4.saddr, fl4.daddr,
sk                973 drivers/net/geneve.c 	udp_tunnel6_xmit_skb(dst, gs6->sock->sk, skb, dev,
sk               1585 drivers/net/geneve.c 		rcu_assign_sk_user_data((*gs4)->sock->sk, NULL);
sk               1590 drivers/net/geneve.c 		rcu_assign_sk_user_data((*gs6)->sock->sk, NULL);
sk               1603 drivers/net/geneve.c 		rcu_assign_sk_user_data(gs4->sock->sk, gs4);
sk               1607 drivers/net/geneve.c 		rcu_assign_sk_user_data(gs6->sock->sk, gs6);
sk                 56 drivers/net/gtp.c 	struct sock		*sk;
sk                194 drivers/net/gtp.c 				 !net_eq(sock_net(pctx->sk), dev_net(pctx->dev))))
sk                287 drivers/net/gtp.c static void __gtp_encap_destroy(struct sock *sk)
sk                291 drivers/net/gtp.c 	lock_sock(sk);
sk                292 drivers/net/gtp.c 	gtp = sk->sk_user_data;
sk                294 drivers/net/gtp.c 		if (gtp->sk0 == sk)
sk                298 drivers/net/gtp.c 		udp_sk(sk)->encap_type = 0;
sk                299 drivers/net/gtp.c 		rcu_assign_sk_user_data(sk, NULL);
sk                300 drivers/net/gtp.c 		sock_put(sk);
sk                302 drivers/net/gtp.c 	release_sock(sk);
sk                305 drivers/net/gtp.c static void gtp_encap_destroy(struct sock *sk)
sk                308 drivers/net/gtp.c 	__gtp_encap_destroy(sk);
sk                312 drivers/net/gtp.c static void gtp_encap_disable_sock(struct sock *sk)
sk                314 drivers/net/gtp.c 	if (!sk)
sk                317 drivers/net/gtp.c 	__gtp_encap_destroy(sk);
sk                329 drivers/net/gtp.c static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb)
sk                334 drivers/net/gtp.c 	gtp = rcu_dereference_sk_user_data(sk);
sk                338 drivers/net/gtp.c 	netdev_dbg(gtp->dev, "encap_recv sk=%p\n", sk);
sk                340 drivers/net/gtp.c 	switch (udp_sk(sk)->encap_type) {
sk                391 drivers/net/gtp.c 					   const struct sock *sk,
sk                395 drivers/net/gtp.c 	fl4->flowi4_oif		= sk->sk_bound_dev_if;
sk                397 drivers/net/gtp.c 	fl4->saddr		= inet_sk(sk)->inet_saddr;
sk                398 drivers/net/gtp.c 	fl4->flowi4_tos		= RT_CONN_FLAGS(sk);
sk                399 drivers/net/gtp.c 	fl4->flowi4_proto	= sk->sk_protocol;
sk                401 drivers/net/gtp.c 	return ip_route_output_key(sock_net(sk), fl4);
sk                445 drivers/net/gtp.c 	struct sock		*sk;
sk                469 drivers/net/gtp.c 					struct sock *sk, struct iphdr *iph,
sk                474 drivers/net/gtp.c 	pktinfo->sk	= sk;
sk                509 drivers/net/gtp.c 	rt = ip4_route_output_gtp(&fl4, pctx->sk, pctx->peer_addr_ip4.s_addr);
sk                554 drivers/net/gtp.c 	gtp_set_pktinfo_ipv4(pktinfo, pctx->sk, iph, pctx, rt, &fl4, dev);
sk                595 drivers/net/gtp.c 		udp_tunnel_xmit_skb(pktinfo.rt, pktinfo.sk, skb,
sk                796 drivers/net/gtp.c 	struct sock *sk;
sk                807 drivers/net/gtp.c 	sk = sock->sk;
sk                808 drivers/net/gtp.c 	if (sk->sk_protocol != IPPROTO_UDP ||
sk                809 drivers/net/gtp.c 	    sk->sk_type != SOCK_DGRAM ||
sk                810 drivers/net/gtp.c 	    (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)) {
sk                812 drivers/net/gtp.c 		sk = ERR_PTR(-EINVAL);
sk                816 drivers/net/gtp.c 	lock_sock(sk);
sk                817 drivers/net/gtp.c 	if (sk->sk_user_data) {
sk                818 drivers/net/gtp.c 		sk = ERR_PTR(-EBUSY);
sk                822 drivers/net/gtp.c 	sock_hold(sk);
sk                829 drivers/net/gtp.c 	setup_udp_tunnel_sock(sock_net(sock->sk), sock, &tuncfg);
sk                832 drivers/net/gtp.c 	release_sock(sock->sk);
sk                835 drivers/net/gtp.c 	return sk;
sk                934 drivers/net/gtp.c static int gtp_pdp_add(struct gtp_dev *gtp, struct sock *sk,
sk                988 drivers/net/gtp.c 	sock_hold(sk);
sk                989 drivers/net/gtp.c 	pctx->sk = sk;
sk               1031 drivers/net/gtp.c 	sock_put(pctx->sk);
sk               1046 drivers/net/gtp.c 	struct sock *sk;
sk               1076 drivers/net/gtp.c 	gtp = gtp_find_dev(sock_net(skb->sk), info->attrs);
sk               1083 drivers/net/gtp.c 		sk = gtp->sk0;
sk               1085 drivers/net/gtp.c 		sk = gtp->sk1u;
sk               1087 drivers/net/gtp.c 		sk = NULL;
sk               1089 drivers/net/gtp.c 	if (!sk) {
sk               1094 drivers/net/gtp.c 	err = gtp_pdp_add(gtp, sk, info);
sk               1152 drivers/net/gtp.c 	pctx = gtp_find_pdp(sock_net(skb->sk), info->attrs);
sk               1221 drivers/net/gtp.c 	pctx = gtp_find_pdp(sock_net(skb->sk), info->attrs);
sk               1253 drivers/net/gtp.c 	struct net *net = sock_net(skb->sk);
sk                300 drivers/net/hyperv/netvsc_drv.c 	struct sock *sk = skb->sk;
sk                308 drivers/net/hyperv/netvsc_drv.c 	    sk && sk_fullsock(sk) && rcu_access_pointer(sk->sk_dst_cache))
sk                309 drivers/net/hyperv/netvsc_drv.c 		sk_tx_queue_set(sk, q_idx);
sk                327 drivers/net/hyperv/netvsc_drv.c 	int q_idx = sk_tx_queue_get(skb->sk);
sk                440 drivers/net/ipvlan/ipvlan_core.c 	err = ip_local_out(net, skb->sk, skb);
sk                478 drivers/net/ipvlan/ipvlan_core.c 	err = ip6_local_out(net, skb->sk, skb);
sk               2602 drivers/net/macsec.c 	struct net *net = sock_net(skb->sk);
sk                 87 drivers/net/ppp/pppoe.c static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb);
sk                284 drivers/net/ppp/pppoe.c 		struct sock *sk;
sk                294 drivers/net/ppp/pppoe.c 			sk = sk_pppox(po);
sk                304 drivers/net/ppp/pppoe.c 			sock_hold(sk);
sk                306 drivers/net/ppp/pppoe.c 			lock_sock(sk);
sk                309 drivers/net/ppp/pppoe.c 			    sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) {
sk                310 drivers/net/ppp/pppoe.c 				pppox_unbind_sock(sk);
sk                311 drivers/net/ppp/pppoe.c 				sk->sk_state_change(sk);
sk                316 drivers/net/ppp/pppoe.c 			release_sock(sk);
sk                317 drivers/net/ppp/pppoe.c 			sock_put(sk);
sk                367 drivers/net/ppp/pppoe.c static int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb)
sk                369 drivers/net/ppp/pppoe.c 	struct pppox_sock *po = pppox_sk(sk);
sk                380 drivers/net/ppp/pppoe.c 	if (sk->sk_state & PPPOX_BOUND) {
sk                382 drivers/net/ppp/pppoe.c 	} else if (sk->sk_state & PPPOX_RELAY) {
sk                383 drivers/net/ppp/pppoe.c 		relay_po = get_item_by_addr(sock_net(sk),
sk                396 drivers/net/ppp/pppoe.c 		if (sock_queue_rcv_skb(sk, skb))
sk                465 drivers/net/ppp/pppoe.c 	struct sock *sk = sk_pppox(po);
sk                467 drivers/net/ppp/pppoe.c 	lock_sock(sk);
sk                472 drivers/net/ppp/pppoe.c 	pppox_unbind_sock(sk);
sk                473 drivers/net/ppp/pppoe.c 	release_sock(sk);
sk                474 drivers/net/ppp/pppoe.c 	sock_put(sk);
sk                540 drivers/net/ppp/pppoe.c 	struct sock *sk;
sk                542 drivers/net/ppp/pppoe.c 	sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppoe_sk_proto, kern);
sk                543 drivers/net/ppp/pppoe.c 	if (!sk)
sk                546 drivers/net/ppp/pppoe.c 	sock_init_data(sock, sk);
sk                551 drivers/net/ppp/pppoe.c 	sk->sk_backlog_rcv	= pppoe_rcv_core;
sk                552 drivers/net/ppp/pppoe.c 	sk->sk_state		= PPPOX_NONE;
sk                553 drivers/net/ppp/pppoe.c 	sk->sk_type		= SOCK_STREAM;
sk                554 drivers/net/ppp/pppoe.c 	sk->sk_family		= PF_PPPOX;
sk                555 drivers/net/ppp/pppoe.c 	sk->sk_protocol		= PX_PROTO_OE;
sk                557 drivers/net/ppp/pppoe.c 	INIT_WORK(&pppox_sk(sk)->proto.pppoe.padt_work,
sk                565 drivers/net/ppp/pppoe.c 	struct sock *sk = sock->sk;
sk                570 drivers/net/ppp/pppoe.c 	if (!sk)
sk                573 drivers/net/ppp/pppoe.c 	lock_sock(sk);
sk                574 drivers/net/ppp/pppoe.c 	if (sock_flag(sk, SOCK_DEAD)) {
sk                575 drivers/net/ppp/pppoe.c 		release_sock(sk);
sk                579 drivers/net/ppp/pppoe.c 	po = pppox_sk(sk);
sk                586 drivers/net/ppp/pppoe.c 	pppox_unbind_sock(sk);
sk                589 drivers/net/ppp/pppoe.c 	sk->sk_state = PPPOX_DEAD;
sk                591 drivers/net/ppp/pppoe.c 	net = sock_net(sk);
sk                601 drivers/net/ppp/pppoe.c 	sock_orphan(sk);
sk                602 drivers/net/ppp/pppoe.c 	sock->sk = NULL;
sk                604 drivers/net/ppp/pppoe.c 	skb_queue_purge(&sk->sk_receive_queue);
sk                605 drivers/net/ppp/pppoe.c 	release_sock(sk);
sk                606 drivers/net/ppp/pppoe.c 	sock_put(sk);
sk                614 drivers/net/ppp/pppoe.c 	struct sock *sk = sock->sk;
sk                616 drivers/net/ppp/pppoe.c 	struct pppox_sock *po = pppox_sk(sk);
sk                622 drivers/net/ppp/pppoe.c 	lock_sock(sk);
sk                634 drivers/net/ppp/pppoe.c 	if ((sk->sk_state & PPPOX_CONNECTED) &&
sk                640 drivers/net/ppp/pppoe.c 	if ((sk->sk_state & PPPOX_DEAD) &&
sk                648 drivers/net/ppp/pppoe.c 		pppox_unbind_sock(sk);
sk                649 drivers/net/ppp/pppoe.c 		pn = pppoe_pernet(sock_net(sk));
sk                664 drivers/net/ppp/pppoe.c 		sk->sk_state = PPPOX_NONE;
sk                670 drivers/net/ppp/pppoe.c 		net = sock_net(sk);
sk                696 drivers/net/ppp/pppoe.c 		po->chan.private = sk;
sk                706 drivers/net/ppp/pppoe.c 		sk->sk_state = PPPOX_CONNECTED;
sk                712 drivers/net/ppp/pppoe.c 	release_sock(sk);
sk                730 drivers/net/ppp/pppoe.c 	memcpy(&sp.sa_addr.pppoe, &pppox_sk(sock->sk)->pppoe_pa,
sk                741 drivers/net/ppp/pppoe.c 	struct sock *sk = sock->sk;
sk                742 drivers/net/ppp/pppoe.c 	struct pppox_sock *po = pppox_sk(sk);
sk                749 drivers/net/ppp/pppoe.c 		if (!(sk->sk_state & PPPOX_CONNECTED))
sk                763 drivers/net/ppp/pppoe.c 		if (!(sk->sk_state & PPPOX_CONNECTED))
sk                790 drivers/net/ppp/pppoe.c 		if (sk->sk_state & (PPPOX_BOUND | PPPOX_DEAD))
sk                794 drivers/net/ppp/pppoe.c 		if (!(sk->sk_state & PPPOX_CONNECTED))
sk                812 drivers/net/ppp/pppoe.c 		relay_po = get_item_by_addr(sock_net(sk), &po->pppoe_relay);
sk                817 drivers/net/ppp/pppoe.c 		sk->sk_state |= PPPOX_RELAY;
sk                824 drivers/net/ppp/pppoe.c 		if (!(sk->sk_state & PPPOX_RELAY))
sk                827 drivers/net/ppp/pppoe.c 		sk->sk_state &= ~PPPOX_RELAY;
sk                842 drivers/net/ppp/pppoe.c 	struct sock *sk = sock->sk;
sk                843 drivers/net/ppp/pppoe.c 	struct pppox_sock *po = pppox_sk(sk);
sk                851 drivers/net/ppp/pppoe.c 	lock_sock(sk);
sk                852 drivers/net/ppp/pppoe.c 	if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) {
sk                869 drivers/net/ppp/pppoe.c 	skb = sock_wmalloc(sk, hlen + sizeof(*ph) + total_len +
sk                882 drivers/net/ppp/pppoe.c 	skb->priority = sk->sk_priority;
sk                905 drivers/net/ppp/pppoe.c 	release_sock(sk);
sk                914 drivers/net/ppp/pppoe.c static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb)
sk                916 drivers/net/ppp/pppoe.c 	struct pppox_sock *po = pppox_sk(sk);
sk                929 drivers/net/ppp/pppoe.c 	if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
sk                973 drivers/net/ppp/pppoe.c 	struct sock *sk = (struct sock *)chan->private;
sk                974 drivers/net/ppp/pppoe.c 	return __pppoe_xmit(sk, skb);
sk                984 drivers/net/ppp/pppoe.c 	struct sock *sk = sock->sk;
sk                988 drivers/net/ppp/pppoe.c 	if (sk->sk_state & PPPOX_BOUND) {
sk                993 drivers/net/ppp/pppoe.c 	skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
sk                 53 drivers/net/ppp/pppox.c void pppox_unbind_sock(struct sock *sk)
sk                 57 drivers/net/ppp/pppox.c 	if (sk->sk_state & (PPPOX_BOUND | PPPOX_CONNECTED)) {
sk                 58 drivers/net/ppp/pppox.c 		ppp_unregister_channel(&pppox_sk(sk)->chan);
sk                 59 drivers/net/ppp/pppox.c 		sk->sk_state = PPPOX_DEAD;
sk                 69 drivers/net/ppp/pppox.c 	struct sock *sk = sock->sk;
sk                 70 drivers/net/ppp/pppox.c 	struct pppox_sock *po = pppox_sk(sk);
sk                 73 drivers/net/ppp/pppox.c 	lock_sock(sk);
sk                 79 drivers/net/ppp/pppox.c 		if (!(sk->sk_state & PPPOX_CONNECTED))
sk                 88 drivers/net/ppp/pppox.c 		sk->sk_state |= PPPOX_BOUND;
sk                 92 drivers/net/ppp/pppox.c 		rc = pppox_protos[sk->sk_protocol]->ioctl ?
sk                 93 drivers/net/ppp/pppox.c 			pppox_protos[sk->sk_protocol]->ioctl(sock, cmd, arg) : -ENOTTY;
sk                 96 drivers/net/ppp/pppox.c 	release_sock(sk);
sk                133 drivers/net/ppp/pptp.c 	struct sock *sk = (struct sock *) chan->private;
sk                134 drivers/net/ppp/pptp.c 	struct pppox_sock *po = pppox_sk(sk);
sk                135 drivers/net/ppp/pptp.c 	struct net *net = sock_net(sk);
sk                172 drivers/net/ppp/pptp.c 		if (skb->sk)
sk                173 drivers/net/ppp/pptp.c 			skb_set_owner_w(new_skb, skb->sk);
sk                227 drivers/net/ppp/pptp.c 	if (ip_dont_fragment(sk, &rt->dst))
sk                247 drivers/net/ppp/pptp.c 	ip_local_out(net, skb->sk, skb);
sk                255 drivers/net/ppp/pptp.c static int pptp_rcv_core(struct sock *sk, struct sk_buff *skb)
sk                257 drivers/net/ppp/pptp.c 	struct pppox_sock *po = pppox_sk(sk);
sk                263 drivers/net/ppp/pptp.c 	if (!(sk->sk_state & PPPOX_CONNECTED)) {
sk                264 drivers/net/ppp/pptp.c 		if (sock_queue_rcv_skb(sk, skb))
sk                372 drivers/net/ppp/pptp.c 	struct sock *sk = sock->sk;
sk                374 drivers/net/ppp/pptp.c 	struct pppox_sock *po = pppox_sk(sk);
sk                380 drivers/net/ppp/pptp.c 	lock_sock(sk);
sk                382 drivers/net/ppp/pptp.c 	if (sk->sk_state & PPPOX_DEAD) {
sk                387 drivers/net/ppp/pptp.c 	if (sk->sk_state & PPPOX_BOUND) {
sk                395 drivers/net/ppp/pptp.c 		sk->sk_state |= PPPOX_BOUND;
sk                398 drivers/net/ppp/pptp.c 	release_sock(sk);
sk                405 drivers/net/ppp/pptp.c 	struct sock *sk = sock->sk;
sk                407 drivers/net/ppp/pptp.c 	struct pppox_sock *po = pppox_sk(sk);
sk                422 drivers/net/ppp/pptp.c 	lock_sock(sk);
sk                424 drivers/net/ppp/pptp.c 	if (sk->sk_state & PPPOX_CONNECTED) {
sk                430 drivers/net/ppp/pptp.c 	if (sk->sk_state & PPPOX_DEAD) {
sk                440 drivers/net/ppp/pptp.c 	po->chan.private = sk;
sk                443 drivers/net/ppp/pptp.c 	rt = ip_route_output_ports(sock_net(sk), &fl4, sk,
sk                447 drivers/net/ppp/pptp.c 				   IPPROTO_GRE, RT_CONN_FLAGS(sk), 0);
sk                452 drivers/net/ppp/pptp.c 	sk_setup_caps(sk, &rt->dst);
sk                467 drivers/net/ppp/pptp.c 	sk->sk_state |= PPPOX_CONNECTED;
sk                470 drivers/net/ppp/pptp.c 	release_sock(sk);
sk                484 drivers/net/ppp/pptp.c 	sp.sa_addr.pptp = pppox_sk(sock->sk)->proto.pptp.src_addr;
sk                493 drivers/net/ppp/pptp.c 	struct sock *sk = sock->sk;
sk                497 drivers/net/ppp/pptp.c 	if (!sk)
sk                500 drivers/net/ppp/pptp.c 	lock_sock(sk);
sk                502 drivers/net/ppp/pptp.c 	if (sock_flag(sk, SOCK_DEAD)) {
sk                503 drivers/net/ppp/pptp.c 		release_sock(sk);
sk                507 drivers/net/ppp/pptp.c 	po = pppox_sk(sk);
sk                511 drivers/net/ppp/pptp.c 	pppox_unbind_sock(sk);
sk                512 drivers/net/ppp/pptp.c 	sk->sk_state = PPPOX_DEAD;
sk                514 drivers/net/ppp/pptp.c 	sock_orphan(sk);
sk                515 drivers/net/ppp/pptp.c 	sock->sk = NULL;
sk                517 drivers/net/ppp/pptp.c 	release_sock(sk);
sk                518 drivers/net/ppp/pptp.c 	sock_put(sk);
sk                523 drivers/net/ppp/pptp.c static void pptp_sock_destruct(struct sock *sk)
sk                525 drivers/net/ppp/pptp.c 	if (!(sk->sk_state & PPPOX_DEAD)) {
sk                526 drivers/net/ppp/pptp.c 		del_chan(pppox_sk(sk));
sk                527 drivers/net/ppp/pptp.c 		pppox_unbind_sock(sk);
sk                529 drivers/net/ppp/pptp.c 	skb_queue_purge(&sk->sk_receive_queue);
sk                530 drivers/net/ppp/pptp.c 	dst_release(rcu_dereference_protected(sk->sk_dst_cache, 1));
sk                536 drivers/net/ppp/pptp.c 	struct sock *sk;
sk                540 drivers/net/ppp/pptp.c 	sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pptp_sk_proto, kern);
sk                541 drivers/net/ppp/pptp.c 	if (!sk)
sk                544 drivers/net/ppp/pptp.c 	sock_init_data(sock, sk);
sk                549 drivers/net/ppp/pptp.c 	sk->sk_backlog_rcv = pptp_rcv_core;
sk                550 drivers/net/ppp/pptp.c 	sk->sk_state       = PPPOX_NONE;
sk                551 drivers/net/ppp/pptp.c 	sk->sk_type        = SOCK_STREAM;
sk                552 drivers/net/ppp/pptp.c 	sk->sk_family      = PF_PPPOX;
sk                553 drivers/net/ppp/pptp.c 	sk->sk_protocol    = PX_PROTO_PPTP;
sk                554 drivers/net/ppp/pptp.c 	sk->sk_destruct    = pptp_sock_destruct;
sk                556 drivers/net/ppp/pptp.c 	po = pppox_sk(sk);
sk                570 drivers/net/ppp/pptp.c 	struct sock *sk = (struct sock *) chan->private;
sk                571 drivers/net/ppp/pptp.c 	struct pppox_sock *po = pppox_sk(sk);
sk                173 drivers/net/tap.c 	sock_hold(&q->sk);
sk                235 drivers/net/tap.c 		sock_put(&q->sk);
sk                242 drivers/net/tap.c 	sock_put(&q->sk);
sk                309 drivers/net/tap.c 		sock_put(&q->sk);
sk                381 drivers/net/tap.c 	wake_up_interruptible_poll(sk_sleep(&q->sk), EPOLLIN | EPOLLRDNORM | EPOLLRDBAND);
sk                481 drivers/net/tap.c static void tap_sock_write_space(struct sock *sk)
sk                485 drivers/net/tap.c 	if (!sock_writeable(sk) ||
sk                486 drivers/net/tap.c 	    !test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags))
sk                489 drivers/net/tap.c 	wqueue = sk_sleep(sk);
sk                494 drivers/net/tap.c static void tap_sock_destruct(struct sock *sk)
sk                496 drivers/net/tap.c 	struct tap_queue *q = container_of(sk, struct tap_queue, sk);
sk                519 drivers/net/tap.c 		sk_free(&q->sk);
sk                528 drivers/net/tap.c 	sock_init_data(&q->sock, &q->sk);
sk                529 drivers/net/tap.c 	q->sk.sk_write_space = tap_sock_write_space;
sk                530 drivers/net/tap.c 	q->sk.sk_destruct = tap_sock_destruct;
sk                542 drivers/net/tap.c 		sock_set_flag(&q->sk, SOCK_ZEROCOPY);
sk                556 drivers/net/tap.c 	sock_put(&q->sk);
sk                586 drivers/net/tap.c 	if (sock_writeable(&q->sk) ||
sk                588 drivers/net/tap.c 	     sock_writeable(&q->sk)))
sk                595 drivers/net/tap.c static inline struct sk_buff *tap_alloc_skb(struct sock *sk, size_t prepad,
sk                605 drivers/net/tap.c 	skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
sk                666 drivers/net/tap.c 	if (msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) {
sk                691 drivers/net/tap.c 	skb = tap_alloc_skb(&q->sk, TAP_RESERVE, copylen,
sk                842 drivers/net/tap.c 			prepare_to_wait(sk_sleep(&q->sk), &wait,
sk                861 drivers/net/tap.c 		finish_wait(sk_sleep(&q->sk), &wait);
sk               1040 drivers/net/tap.c 		q->sk.sk_sndbuf = s;
sk                161 drivers/net/tun.c 	struct sock sk;
sk                273 drivers/net/tun.c 	struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
sk                673 drivers/net/tun.c 	skb_queue_purge(&tfile->sk.sk_write_queue);
sk                674 drivers/net/tun.c 	skb_queue_purge(&tfile->sk.sk_error_queue);
sk                703 drivers/net/tun.c 			sock_put(&tfile->sk);
sk                714 drivers/net/tun.c 		sock_put(&tfile->sk);
sk                728 drivers/net/tun.c 		sock_put(&tfile->sk);
sk                756 drivers/net/tun.c 		tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
sk                757 drivers/net/tun.c 		tfile->socket.sk->sk_data_ready(tfile->socket.sk);
sk                762 drivers/net/tun.c 		tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
sk                763 drivers/net/tun.c 		tfile->socket.sk->sk_data_ready(tfile->socket.sk);
sk                775 drivers/net/tun.c 		sock_put(&tfile->sk);
sk                781 drivers/net/tun.c 		sock_put(&tfile->sk);
sk                797 drivers/net/tun.c 	err = security_tun_dev_attach(tfile->socket.sk, tun->security);
sk                818 drivers/net/tun.c 		lock_sock(tfile->socket.sk);
sk                819 drivers/net/tun.c 		err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
sk                820 drivers/net/tun.c 		release_sock(tfile->socket.sk);
sk                833 drivers/net/tun.c 	tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN;
sk                859 drivers/net/tun.c 		sock_hold(&tfile->sk);
sk                864 drivers/net/tun.c 		sock_set_flag(&tfile->sk, SOCK_XDP);
sk               1089 drivers/net/tun.c 	if (tfile->socket.sk->sk_filter &&
sk               1090 drivers/net/tun.c 	    sk_filter(tfile->socket.sk, skb))
sk               1115 drivers/net/tun.c 	tfile->socket.sk->sk_data_ready(tfile->socket.sk);
sk               1207 drivers/net/tun.c 			sock_set_flag(&tfile->sk, SOCK_XDP);
sk               1209 drivers/net/tun.c 			sock_reset_flag(&tfile->sk, SOCK_XDP);
sk               1213 drivers/net/tun.c 			sock_set_flag(&tfile->sk, SOCK_XDP);
sk               1215 drivers/net/tun.c 			sock_reset_flag(&tfile->sk, SOCK_XDP);
sk               1278 drivers/net/tun.c 	tfile->socket.sk->sk_data_ready(tfile->socket.sk);
sk               1418 drivers/net/tun.c 	struct sock *sk = tfile->socket.sk;
sk               1420 drivers/net/tun.c 	return (tun->dev->flags & IFF_UP) && sock_writeable(sk);
sk               1430 drivers/net/tun.c 	struct sock *sk;
sk               1436 drivers/net/tun.c 	sk = tfile->socket.sk;
sk               1440 drivers/net/tun.c 	poll_wait(file, sk_sleep(sk), wait);
sk               1451 drivers/net/tun.c 	    (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
sk               1521 drivers/net/tun.c 	struct sock *sk = tfile->socket.sk;
sk               1529 drivers/net/tun.c 	skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
sk               1545 drivers/net/tun.c 	struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
sk               1588 drivers/net/tun.c 	if (tfile->socket.sk->sk_sndbuf != INT_MAX)
sk               1615 drivers/net/tun.c 	skb_set_owner_w(skb, tfile->socket.sk);
sk               1993 drivers/net/tun.c 		struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
sk               2196 drivers/net/tun.c 		if (tfile->socket.sk->sk_shutdown & RCV_SHUTDOWN) {
sk               2402 drivers/net/tun.c static void tun_sock_write_space(struct sock *sk)
sk               2407 drivers/net/tun.c 	if (!sock_writeable(sk))
sk               2410 drivers/net/tun.c 	if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags))
sk               2413 drivers/net/tun.c 	wqueue = sk_sleep(sk);
sk               2418 drivers/net/tun.c 	tfile = container_of(sk, struct tun_file, sk);
sk               2596 drivers/net/tun.c 		ret = sock_recv_errqueue(sock->sk, m, total_len,
sk               2815 drivers/net/tun.c 		tun->sndbuf = tfile->socket.sk->sk_sndbuf;
sk               2943 drivers/net/tun.c 		lock_sock(tfile->socket.sk);
sk               2944 drivers/net/tun.c 		sk_detach_filter(tfile->socket.sk);
sk               2945 drivers/net/tun.c 		release_sock(tfile->socket.sk);
sk               2958 drivers/net/tun.c 		lock_sock(tfile->socket.sk);
sk               2959 drivers/net/tun.c 		ret = sk_attach_filter(&tun->fprog, tfile->socket.sk);
sk               2960 drivers/net/tun.c 		release_sock(tfile->socket.sk);
sk               2978 drivers/net/tun.c 		tfile->socket.sk->sk_sndbuf = tun->sndbuf;
sk               3042 drivers/net/tun.c 	struct net *net = sock_net(&tfile->sk);
sk               3125 drivers/net/tun.c 		if (!tfile->socket.sk->sk_filter)
sk               3233 drivers/net/tun.c 		sndbuf = tfile->socket.sk->sk_sndbuf;
sk               3430 drivers/net/tun.c 		sk_free(&tfile->sk);
sk               3444 drivers/net/tun.c 	sock_init_data(&tfile->socket, &tfile->sk);
sk               3446 drivers/net/tun.c 	tfile->sk.sk_write_space = tun_sock_write_space;
sk               3447 drivers/net/tun.c 	tfile->sk.sk_sndbuf = INT_MAX;
sk               3452 drivers/net/tun.c 	sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
sk               3662 drivers/net/tun.c 			tfile->socket.sk->sk_write_space(tfile->socket.sk);
sk                151 drivers/net/vrf.c static int vrf_ip6_local_out(struct net *net, struct sock *sk,
sk                157 drivers/net/vrf.c 		      sk, skb, NULL, skb_dst(skb)->dev, dst_output);
sk                160 drivers/net/vrf.c 		err = dst_output(net, sk, skb);
sk                209 drivers/net/vrf.c 	ret = vrf_ip6_local_out(net, skb->sk, skb);
sk                230 drivers/net/vrf.c static int vrf_ip_local_out(struct net *net, struct sock *sk,
sk                235 drivers/net/vrf.c 	err = nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk,
sk                238 drivers/net/vrf.c 		err = dst_output(net, sk, skb);
sk                290 drivers/net/vrf.c 	ret = vrf_ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
sk                335 drivers/net/vrf.c static int vrf_finish_direct(struct net *net, struct sock *sk,
sk                360 drivers/net/vrf.c static int vrf_finish_output6(struct net *net, struct sock *sk,
sk                394 drivers/net/vrf.c static int vrf_output6(struct net *net, struct sock *sk, struct sk_buff *skb)
sk                397 drivers/net/vrf.c 			    net, sk, skb, NULL, skb_dst(skb)->dev,
sk                434 drivers/net/vrf.c static int vrf_output6_direct(struct net *net, struct sock *sk,
sk                440 drivers/net/vrf.c 			    net, sk, skb, NULL, skb->dev,
sk                446 drivers/net/vrf.c 					  struct sock *sk,
sk                454 drivers/net/vrf.c 	err = nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk,
sk                458 drivers/net/vrf.c 		err = vrf_output6_direct(net, sk, skb);
sk                470 drivers/net/vrf.c 				   struct sock *sk,
sk                479 drivers/net/vrf.c 		return vrf_ip6_out_direct(vrf_dev, sk, skb);
sk                537 drivers/net/vrf.c 				   struct sock *sk,
sk                554 drivers/net/vrf.c static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
sk                575 drivers/net/vrf.c 		if (skb->sk)
sk                576 drivers/net/vrf.c 			skb_set_owner_w(skb2, skb->sk);
sk                599 drivers/net/vrf.c static int vrf_output(struct net *net, struct sock *sk, struct sk_buff *skb)
sk                609 drivers/net/vrf.c 			    net, sk, skb, NULL, dev,
sk                646 drivers/net/vrf.c static int vrf_output_direct(struct net *net, struct sock *sk,
sk                652 drivers/net/vrf.c 			    net, sk, skb, NULL, skb->dev,
sk                658 drivers/net/vrf.c 					 struct sock *sk,
sk                666 drivers/net/vrf.c 	err = nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk,
sk                670 drivers/net/vrf.c 		err = vrf_output_direct(net, sk, skb);
sk                682 drivers/net/vrf.c 				  struct sock *sk,
sk                692 drivers/net/vrf.c 		return vrf_ip_out_direct(vrf_dev, sk, skb);
sk                699 drivers/net/vrf.c 				  struct sock *sk,
sk                705 drivers/net/vrf.c 		return vrf_ip_out(vrf_dev, sk, skb);
sk                707 drivers/net/vrf.c 		return vrf_ip6_out(vrf_dev, sk, skb);
sk                898 drivers/net/vrf.c static int vrf_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
sk               1190 drivers/net/vrf.c 	skb->sk = dev_net(dev)->rtnl;
sk                196 drivers/net/vxlan.c 		if (inet_sk(vs->sock->sk)->inet_sport == port &&
sk                199 drivers/net/vxlan.c 		    vs->sock->sk->sk_bound_dev_if == ifindex)
sk                727 drivers/net/vxlan.c static struct sk_buff *vxlan_gro_receive(struct sock *sk,
sk                736 drivers/net/vxlan.c 	struct vxlan_sock *vs = rcu_dereference_sk_user_data(sk);
sk                788 drivers/net/vxlan.c static int vxlan_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff)
sk               1402 drivers/net/vxlan.c 	vn = net_generic(sock_net(vs->sock->sk), vxlan_net_id);
sk               1446 drivers/net/vxlan.c 	struct sock *sk;
sk               1458 drivers/net/vxlan.c 		sk = sock4->sock->sk;
sk               1459 drivers/net/vxlan.c 		lock_sock(sk);
sk               1460 drivers/net/vxlan.c 		ret = ip_mc_join_group(sk, &mreq);
sk               1461 drivers/net/vxlan.c 		release_sock(sk);
sk               1466 drivers/net/vxlan.c 		sk = sock6->sock->sk;
sk               1467 drivers/net/vxlan.c 		lock_sock(sk);
sk               1468 drivers/net/vxlan.c 		ret = ipv6_stub->ipv6_sock_mc_join(sk, ifindex,
sk               1470 drivers/net/vxlan.c 		release_sock(sk);
sk               1480 drivers/net/vxlan.c 	struct sock *sk;
sk               1492 drivers/net/vxlan.c 		sk = sock4->sock->sk;
sk               1493 drivers/net/vxlan.c 		lock_sock(sk);
sk               1494 drivers/net/vxlan.c 		ret = ip_mc_leave_group(sk, &mreq);
sk               1495 drivers/net/vxlan.c 		release_sock(sk);
sk               1500 drivers/net/vxlan.c 		sk = sock6->sock->sk;
sk               1501 drivers/net/vxlan.c 		lock_sock(sk);
sk               1502 drivers/net/vxlan.c 		ret = ipv6_stub->ipv6_sock_mc_drop(sk, ifindex,
sk               1504 drivers/net/vxlan.c 		release_sock(sk);
sk               1650 drivers/net/vxlan.c static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
sk               1679 drivers/net/vxlan.c 	vs = rcu_dereference_sk_user_data(sk);
sk               1784 drivers/net/vxlan.c static int vxlan_err_lookup(struct sock *sk, struct sk_buff *skb)
sk               1799 drivers/net/vxlan.c 	vs = rcu_dereference_sk_user_data(sk);
sk               2305 drivers/net/vxlan.c 	ndst = ipv6_stub->ipv6_dst_lookup_flow(vxlan->net, sock6->sock->sk,
sk               2512 drivers/net/vxlan.c 			ifindex = sock4->sock->sk->sk_bound_dev_if;
sk               2556 drivers/net/vxlan.c 		udp_tunnel_xmit_skb(rt, sock4->sock->sk, skb, local_ip.sin.sin_addr.s_addr,
sk               2564 drivers/net/vxlan.c 			ifindex = sock6->sock->sk->sk_bound_dev_if;
sk               2597 drivers/net/vxlan.c 		udp_tunnel6_xmit_skb(ndst, sock6->sock->sk, skb, dev,
sk                628 drivers/net/wireless/ath/wil6210/txrx.h 	return is_unicast_ether_addr(da) && skb->sk &&
sk               3697 drivers/net/wireless/mac80211_hwsim.c 		if (!net_eq(wiphy_net(data->hw->wiphy), sock_net(skb->sk)))
sk                914 drivers/net/wireless/marvell/mwifiex/main.c 	if (unlikely(!multicast && skb->sk &&
sk                783 drivers/nvme/host/tcp.c static void nvme_tcp_data_ready(struct sock *sk)
sk                787 drivers/nvme/host/tcp.c 	read_lock(&sk->sk_callback_lock);
sk                788 drivers/nvme/host/tcp.c 	queue = sk->sk_user_data;
sk                791 drivers/nvme/host/tcp.c 	read_unlock(&sk->sk_callback_lock);
sk                794 drivers/nvme/host/tcp.c static void nvme_tcp_write_space(struct sock *sk)
sk                798 drivers/nvme/host/tcp.c 	read_lock_bh(&sk->sk_callback_lock);
sk                799 drivers/nvme/host/tcp.c 	queue = sk->sk_user_data;
sk                800 drivers/nvme/host/tcp.c 	if (likely(queue && sk_stream_is_writeable(sk))) {
sk                801 drivers/nvme/host/tcp.c 		clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
sk                804 drivers/nvme/host/tcp.c 	read_unlock_bh(&sk->sk_callback_lock);
sk                807 drivers/nvme/host/tcp.c static void nvme_tcp_state_change(struct sock *sk)
sk                811 drivers/nvme/host/tcp.c 	read_lock(&sk->sk_callback_lock);
sk                812 drivers/nvme/host/tcp.c 	queue = sk->sk_user_data;
sk                816 drivers/nvme/host/tcp.c 	switch (sk->sk_state) {
sk                828 drivers/nvme/host/tcp.c 			nvme_tcp_queue_id(queue), sk->sk_state);
sk                831 drivers/nvme/host/tcp.c 	queue->state_change(sk);
sk                833 drivers/nvme/host/tcp.c 	read_unlock(&sk->sk_callback_lock);
sk               1026 drivers/nvme/host/tcp.c 	struct sock *sk = sock->sk;
sk               1032 drivers/nvme/host/tcp.c 	lock_sock(sk);
sk               1034 drivers/nvme/host/tcp.c 	consumed = sock->ops->read_sock(sk, &rd_desc, nvme_tcp_recv_skb);
sk               1035 drivers/nvme/host/tcp.c 	release_sock(sk);
sk               1322 drivers/nvme/host/tcp.c 	queue->sock->sk->sk_allocation = GFP_ATOMIC;
sk               1333 drivers/nvme/host/tcp.c 	sk_set_memalloc(queue->sock->sk);
sk               1384 drivers/nvme/host/tcp.c 	write_lock_bh(&queue->sock->sk->sk_callback_lock);
sk               1385 drivers/nvme/host/tcp.c 	queue->sock->sk->sk_user_data = queue;
sk               1386 drivers/nvme/host/tcp.c 	queue->state_change = queue->sock->sk->sk_state_change;
sk               1387 drivers/nvme/host/tcp.c 	queue->data_ready = queue->sock->sk->sk_data_ready;
sk               1388 drivers/nvme/host/tcp.c 	queue->write_space = queue->sock->sk->sk_write_space;
sk               1389 drivers/nvme/host/tcp.c 	queue->sock->sk->sk_data_ready = nvme_tcp_data_ready;
sk               1390 drivers/nvme/host/tcp.c 	queue->sock->sk->sk_state_change = nvme_tcp_state_change;
sk               1391 drivers/nvme/host/tcp.c 	queue->sock->sk->sk_write_space = nvme_tcp_write_space;
sk               1393 drivers/nvme/host/tcp.c 	queue->sock->sk->sk_ll_usec = 1;
sk               1395 drivers/nvme/host/tcp.c 	write_unlock_bh(&queue->sock->sk->sk_callback_lock);
sk               1416 drivers/nvme/host/tcp.c 	write_lock_bh(&sock->sk->sk_callback_lock);
sk               1417 drivers/nvme/host/tcp.c 	sock->sk->sk_user_data  = NULL;
sk               1418 drivers/nvme/host/tcp.c 	sock->sk->sk_data_ready = queue->data_ready;
sk               1419 drivers/nvme/host/tcp.c 	sock->sk->sk_state_change = queue->state_change;
sk               1420 drivers/nvme/host/tcp.c 	sock->sk->sk_write_space  = queue->write_space;
sk               1421 drivers/nvme/host/tcp.c 	write_unlock_bh(&sock->sk->sk_callback_lock);
sk               2226 drivers/nvme/host/tcp.c 	struct sock *sk = queue->sock->sk;
sk               2228 drivers/nvme/host/tcp.c 	if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue))
sk               2229 drivers/nvme/host/tcp.c 		sk_busy_loop(sk, true);
sk               1303 drivers/nvme/target/tcp.c 	write_lock_bh(&sock->sk->sk_callback_lock);
sk               1304 drivers/nvme/target/tcp.c 	sock->sk->sk_data_ready =  queue->data_ready;
sk               1305 drivers/nvme/target/tcp.c 	sock->sk->sk_state_change = queue->state_change;
sk               1306 drivers/nvme/target/tcp.c 	sock->sk->sk_write_space = queue->write_space;
sk               1307 drivers/nvme/target/tcp.c 	sock->sk->sk_user_data = NULL;
sk               1308 drivers/nvme/target/tcp.c 	write_unlock_bh(&sock->sk->sk_callback_lock);
sk               1359 drivers/nvme/target/tcp.c static void nvmet_tcp_data_ready(struct sock *sk)
sk               1363 drivers/nvme/target/tcp.c 	read_lock_bh(&sk->sk_callback_lock);
sk               1364 drivers/nvme/target/tcp.c 	queue = sk->sk_user_data;
sk               1367 drivers/nvme/target/tcp.c 	read_unlock_bh(&sk->sk_callback_lock);
sk               1370 drivers/nvme/target/tcp.c static void nvmet_tcp_write_space(struct sock *sk)
sk               1374 drivers/nvme/target/tcp.c 	read_lock_bh(&sk->sk_callback_lock);
sk               1375 drivers/nvme/target/tcp.c 	queue = sk->sk_user_data;
sk               1380 drivers/nvme/target/tcp.c 		queue->write_space(sk);
sk               1384 drivers/nvme/target/tcp.c 	if (sk_stream_is_writeable(sk)) {
sk               1385 drivers/nvme/target/tcp.c 		clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
sk               1389 drivers/nvme/target/tcp.c 	read_unlock_bh(&sk->sk_callback_lock);
sk               1392 drivers/nvme/target/tcp.c static void nvmet_tcp_state_change(struct sock *sk)
sk               1396 drivers/nvme/target/tcp.c 	write_lock_bh(&sk->sk_callback_lock);
sk               1397 drivers/nvme/target/tcp.c 	queue = sk->sk_user_data;
sk               1401 drivers/nvme/target/tcp.c 	switch (sk->sk_state) {
sk               1406 drivers/nvme/target/tcp.c 		sk->sk_user_data = NULL;
sk               1411 drivers/nvme/target/tcp.c 			queue->idx, sk->sk_state);
sk               1414 drivers/nvme/target/tcp.c 	write_unlock_bh(&sk->sk_callback_lock);
sk               1420 drivers/nvme/target/tcp.c 	struct inet_sock *inet = inet_sk(sock->sk);
sk               1454 drivers/nvme/target/tcp.c 	write_lock_bh(&sock->sk->sk_callback_lock);
sk               1455 drivers/nvme/target/tcp.c 	sock->sk->sk_user_data = queue;
sk               1456 drivers/nvme/target/tcp.c 	queue->data_ready = sock->sk->sk_data_ready;
sk               1457 drivers/nvme/target/tcp.c 	sock->sk->sk_data_ready = nvmet_tcp_data_ready;
sk               1458 drivers/nvme/target/tcp.c 	queue->state_change = sock->sk->sk_state_change;
sk               1459 drivers/nvme/target/tcp.c 	sock->sk->sk_state_change = nvmet_tcp_state_change;
sk               1460 drivers/nvme/target/tcp.c 	queue->write_space = sock->sk->sk_write_space;
sk               1461 drivers/nvme/target/tcp.c 	sock->sk->sk_write_space = nvmet_tcp_write_space;
sk               1462 drivers/nvme/target/tcp.c 	write_unlock_bh(&sock->sk->sk_callback_lock);
sk               1554 drivers/nvme/target/tcp.c static void nvmet_tcp_listen_data_ready(struct sock *sk)
sk               1558 drivers/nvme/target/tcp.c 	read_lock_bh(&sk->sk_callback_lock);
sk               1559 drivers/nvme/target/tcp.c 	port = sk->sk_user_data;
sk               1563 drivers/nvme/target/tcp.c 	if (sk->sk_state == TCP_LISTEN)
sk               1566 drivers/nvme/target/tcp.c 	read_unlock_bh(&sk->sk_callback_lock);
sk               1614 drivers/nvme/target/tcp.c 	port->sock->sk->sk_user_data = port;
sk               1615 drivers/nvme/target/tcp.c 	port->data_ready = port->sock->sk->sk_data_ready;
sk               1616 drivers/nvme/target/tcp.c 	port->sock->sk->sk_data_ready = nvmet_tcp_listen_data_ready;
sk               1663 drivers/nvme/target/tcp.c 	write_lock_bh(&port->sock->sk->sk_callback_lock);
sk               1664 drivers/nvme/target/tcp.c 	port->sock->sk->sk_data_ready = port->data_ready;
sk               1665 drivers/nvme/target/tcp.c 	port->sock->sk->sk_user_data = NULL;
sk               1666 drivers/nvme/target/tcp.c 	write_unlock_bh(&port->sock->sk->sk_callback_lock);
sk               1086 drivers/s390/net/qeth_core_main.c 		if (skb->protocol == htons(ETH_P_AF_IUCV) && skb->sk)
sk               1087 drivers/s390/net/qeth_core_main.c 			iucv_sk(skb->sk)->sk_txnotify(skb, notification);
sk                563 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 		skb->sk = (struct sock *)csk;
sk                993 drivers/scsi/cxgbi/cxgb3i/cxgb3i.c 	skb->sk = (struct sock *)csk;
sk                943 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 		skb->sk = (struct sock *)csk;
sk               1774 drivers/scsi/cxgbi/cxgb4i/cxgb4i.c 	skb->sk = (struct sock *)csk;
sk                921 drivers/scsi/cxgbi/libcxgbi.c 	struct cxgbi_sock *csk = (struct cxgbi_sock *)skb->sk;
sk                111 drivers/scsi/iscsi_tcp.c static inline int iscsi_sw_sk_state_check(struct sock *sk)
sk                113 drivers/scsi/iscsi_tcp.c 	struct iscsi_conn *conn = sk->sk_user_data;
sk                115 drivers/scsi/iscsi_tcp.c 	if ((sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) &&
sk                117 drivers/scsi/iscsi_tcp.c 	    !atomic_read(&sk->sk_rmem_alloc)) {
sk                125 drivers/scsi/iscsi_tcp.c static void iscsi_sw_tcp_data_ready(struct sock *sk)
sk                131 drivers/scsi/iscsi_tcp.c 	read_lock_bh(&sk->sk_callback_lock);
sk                132 drivers/scsi/iscsi_tcp.c 	conn = sk->sk_user_data;
sk                134 drivers/scsi/iscsi_tcp.c 		read_unlock_bh(&sk->sk_callback_lock);
sk                147 drivers/scsi/iscsi_tcp.c 	tcp_read_sock(sk, &rd_desc, iscsi_sw_tcp_recv);
sk                149 drivers/scsi/iscsi_tcp.c 	iscsi_sw_sk_state_check(sk);
sk                154 drivers/scsi/iscsi_tcp.c 	read_unlock_bh(&sk->sk_callback_lock);
sk                157 drivers/scsi/iscsi_tcp.c static void iscsi_sw_tcp_state_change(struct sock *sk)
sk                164 drivers/scsi/iscsi_tcp.c 	read_lock_bh(&sk->sk_callback_lock);
sk                165 drivers/scsi/iscsi_tcp.c 	conn = sk->sk_user_data;
sk                167 drivers/scsi/iscsi_tcp.c 		read_unlock_bh(&sk->sk_callback_lock);
sk                171 drivers/scsi/iscsi_tcp.c 	iscsi_sw_sk_state_check(sk);
sk                177 drivers/scsi/iscsi_tcp.c 	read_unlock_bh(&sk->sk_callback_lock);
sk                179 drivers/scsi/iscsi_tcp.c 	old_state_change(sk);
sk                186 drivers/scsi/iscsi_tcp.c static void iscsi_sw_tcp_write_space(struct sock *sk)
sk                193 drivers/scsi/iscsi_tcp.c 	read_lock_bh(&sk->sk_callback_lock);
sk                194 drivers/scsi/iscsi_tcp.c 	conn = sk->sk_user_data;
sk                196 drivers/scsi/iscsi_tcp.c 		read_unlock_bh(&sk->sk_callback_lock);
sk                203 drivers/scsi/iscsi_tcp.c 	read_unlock_bh(&sk->sk_callback_lock);
sk                205 drivers/scsi/iscsi_tcp.c 	old_write_space(sk);
sk                215 drivers/scsi/iscsi_tcp.c 	struct sock *sk = tcp_sw_conn->sock->sk;
sk                218 drivers/scsi/iscsi_tcp.c 	write_lock_bh(&sk->sk_callback_lock);
sk                219 drivers/scsi/iscsi_tcp.c 	sk->sk_user_data = conn;
sk                220 drivers/scsi/iscsi_tcp.c 	tcp_sw_conn->old_data_ready = sk->sk_data_ready;
sk                221 drivers/scsi/iscsi_tcp.c 	tcp_sw_conn->old_state_change = sk->sk_state_change;
sk                222 drivers/scsi/iscsi_tcp.c 	tcp_sw_conn->old_write_space = sk->sk_write_space;
sk                223 drivers/scsi/iscsi_tcp.c 	sk->sk_data_ready = iscsi_sw_tcp_data_ready;
sk                224 drivers/scsi/iscsi_tcp.c 	sk->sk_state_change = iscsi_sw_tcp_state_change;
sk                225 drivers/scsi/iscsi_tcp.c 	sk->sk_write_space = iscsi_sw_tcp_write_space;
sk                226 drivers/scsi/iscsi_tcp.c 	write_unlock_bh(&sk->sk_callback_lock);
sk                234 drivers/scsi/iscsi_tcp.c 	struct sock *sk = tcp_sw_conn->sock->sk;
sk                237 drivers/scsi/iscsi_tcp.c 	write_lock_bh(&sk->sk_callback_lock);
sk                238 drivers/scsi/iscsi_tcp.c 	sk->sk_user_data    = NULL;
sk                239 drivers/scsi/iscsi_tcp.c 	sk->sk_data_ready   = tcp_sw_conn->old_data_ready;
sk                240 drivers/scsi/iscsi_tcp.c 	sk->sk_state_change = tcp_sw_conn->old_state_change;
sk                241 drivers/scsi/iscsi_tcp.c 	sk->sk_write_space  = tcp_sw_conn->old_write_space;
sk                242 drivers/scsi/iscsi_tcp.c 	sk->sk_no_check_tx = 0;
sk                243 drivers/scsi/iscsi_tcp.c 	write_unlock_bh(&sk->sk_callback_lock);
sk                263 drivers/scsi/iscsi_tcp.c 	struct socket *sk = tcp_sw_conn->sock;
sk                283 drivers/scsi/iscsi_tcp.c 			r = tcp_sw_conn->sendpage(sk, sg_page(sg), offset,
sk                292 drivers/scsi/iscsi_tcp.c 			r = kernel_sendmsg(sk, &msg, &iov, 1, copy);
sk                603 drivers/scsi/iscsi_tcp.c 	sock_hold(sock->sk);
sk                605 drivers/scsi/iscsi_tcp.c 	sock_put(sock->sk);
sk                644 drivers/scsi/iscsi_tcp.c 	sock->sk->sk_err = EIO;
sk                645 drivers/scsi/iscsi_tcp.c 	wake_up_interruptible(sk_sleep(sock->sk));
sk                665 drivers/scsi/iscsi_tcp.c 	struct sock *sk;
sk                687 drivers/scsi/iscsi_tcp.c 	sk = sock->sk;
sk                688 drivers/scsi/iscsi_tcp.c 	sk->sk_reuse = SK_CAN_REUSE;
sk                689 drivers/scsi/iscsi_tcp.c 	sk->sk_sndtimeo = 15 * HZ; /* FIXME: make it configurable */
sk                690 drivers/scsi/iscsi_tcp.c 	sk->sk_allocation = GFP_ATOMIC;
sk                691 drivers/scsi/iscsi_tcp.c 	sk_set_memalloc(sk);
sk                568 drivers/soc/qcom/qmi_interface.c static void qmi_data_ready(struct sock *sk)
sk                570 drivers/soc/qcom/qmi_interface.c 	struct qmi_handle *qmi = sk->sk_user_data;
sk                599 drivers/soc/qcom/qmi_interface.c 	sock->sk->sk_user_data = qmi;
sk                600 drivers/soc/qcom/qmi_interface.c 	sock->sk->sk_data_ready = qmi_data_ready;
sk                601 drivers/soc/qcom/qmi_interface.c 	sock->sk->sk_error_report = qmi_data_ready;
sk                685 drivers/soc/qcom/qmi_interface.c 	sock->sk->sk_user_data = NULL;
sk               2572 drivers/target/iscsi/iscsi_target.c 	if ((conn->sock->sk->sk_shutdown & SEND_SHUTDOWN) ||
sk               2573 drivers/target/iscsi/iscsi_target.c 	    (conn->sock->sk->sk_shutdown & RCV_SHUTDOWN)) {
sk               2761 drivers/target/iscsi/iscsi_target.c 	if ((conn->sock->sk->sk_shutdown & SEND_SHUTDOWN) ||
sk               2762 drivers/target/iscsi/iscsi_target.c 	    (conn->sock->sk->sk_shutdown & RCV_SHUTDOWN)) {
sk                391 drivers/target/iscsi/iscsi_target_nego.c static void iscsi_target_sk_data_ready(struct sock *sk)
sk                393 drivers/target/iscsi/iscsi_target_nego.c 	struct iscsi_conn *conn = sk->sk_user_data;
sk                398 drivers/target/iscsi/iscsi_target_nego.c 	write_lock_bh(&sk->sk_callback_lock);
sk                399 drivers/target/iscsi/iscsi_target_nego.c 	if (!sk->sk_user_data) {
sk                400 drivers/target/iscsi/iscsi_target_nego.c 		write_unlock_bh(&sk->sk_callback_lock);
sk                404 drivers/target/iscsi/iscsi_target_nego.c 		write_unlock_bh(&sk->sk_callback_lock);
sk                409 drivers/target/iscsi/iscsi_target_nego.c 		write_unlock_bh(&sk->sk_callback_lock);
sk                414 drivers/target/iscsi/iscsi_target_nego.c 		write_unlock_bh(&sk->sk_callback_lock);
sk                418 drivers/target/iscsi/iscsi_target_nego.c 		conn->orig_data_ready(sk);
sk                427 drivers/target/iscsi/iscsi_target_nego.c 	write_unlock_bh(&sk->sk_callback_lock);
sk                434 drivers/target/iscsi/iscsi_target_nego.c 	struct sock *sk;
sk                439 drivers/target/iscsi/iscsi_target_nego.c 	sk = conn->sock->sk;
sk                442 drivers/target/iscsi/iscsi_target_nego.c 	write_lock_bh(&sk->sk_callback_lock);
sk                443 drivers/target/iscsi/iscsi_target_nego.c 	sk->sk_user_data = conn;
sk                444 drivers/target/iscsi/iscsi_target_nego.c 	conn->orig_data_ready = sk->sk_data_ready;
sk                445 drivers/target/iscsi/iscsi_target_nego.c 	conn->orig_state_change = sk->sk_state_change;
sk                446 drivers/target/iscsi/iscsi_target_nego.c 	sk->sk_data_ready = iscsi_target_sk_data_ready;
sk                447 drivers/target/iscsi/iscsi_target_nego.c 	sk->sk_state_change = iscsi_target_sk_state_change;
sk                448 drivers/target/iscsi/iscsi_target_nego.c 	write_unlock_bh(&sk->sk_callback_lock);
sk                450 drivers/target/iscsi/iscsi_target_nego.c 	sk->sk_sndtimeo = TA_LOGIN_TIMEOUT * HZ;
sk                451 drivers/target/iscsi/iscsi_target_nego.c 	sk->sk_rcvtimeo = TA_LOGIN_TIMEOUT * HZ;
sk                456 drivers/target/iscsi/iscsi_target_nego.c 	struct sock *sk;
sk                461 drivers/target/iscsi/iscsi_target_nego.c 	sk = conn->sock->sk;
sk                464 drivers/target/iscsi/iscsi_target_nego.c 	write_lock_bh(&sk->sk_callback_lock);
sk                465 drivers/target/iscsi/iscsi_target_nego.c 	if (!sk->sk_user_data) {
sk                466 drivers/target/iscsi/iscsi_target_nego.c 		write_unlock_bh(&sk->sk_callback_lock);
sk                469 drivers/target/iscsi/iscsi_target_nego.c 	sk->sk_user_data = NULL;
sk                470 drivers/target/iscsi/iscsi_target_nego.c 	sk->sk_data_ready = conn->orig_data_ready;
sk                471 drivers/target/iscsi/iscsi_target_nego.c 	sk->sk_state_change = conn->orig_state_change;
sk                472 drivers/target/iscsi/iscsi_target_nego.c 	write_unlock_bh(&sk->sk_callback_lock);
sk                474 drivers/target/iscsi/iscsi_target_nego.c 	sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
sk                475 drivers/target/iscsi/iscsi_target_nego.c 	sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
sk                480 drivers/target/iscsi/iscsi_target_nego.c static bool __iscsi_target_sk_check_close(struct sock *sk)
sk                482 drivers/target/iscsi/iscsi_target_nego.c 	if (sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) {
sk                495 drivers/target/iscsi/iscsi_target_nego.c 		struct sock *sk = conn->sock->sk;
sk                497 drivers/target/iscsi/iscsi_target_nego.c 		read_lock_bh(&sk->sk_callback_lock);
sk                498 drivers/target/iscsi/iscsi_target_nego.c 		state = (__iscsi_target_sk_check_close(sk) ||
sk                500 drivers/target/iscsi/iscsi_target_nego.c 		read_unlock_bh(&sk->sk_callback_lock);
sk                510 drivers/target/iscsi/iscsi_target_nego.c 		struct sock *sk = conn->sock->sk;
sk                512 drivers/target/iscsi/iscsi_target_nego.c 		read_lock_bh(&sk->sk_callback_lock);
sk                514 drivers/target/iscsi/iscsi_target_nego.c 		read_unlock_bh(&sk->sk_callback_lock);
sk                524 drivers/target/iscsi/iscsi_target_nego.c 		struct sock *sk = conn->sock->sk;
sk                526 drivers/target/iscsi/iscsi_target_nego.c 		write_lock_bh(&sk->sk_callback_lock);
sk                527 drivers/target/iscsi/iscsi_target_nego.c 		state = (__iscsi_target_sk_check_close(sk) ||
sk                531 drivers/target/iscsi/iscsi_target_nego.c 		write_unlock_bh(&sk->sk_callback_lock);
sk                647 drivers/target/iscsi/iscsi_target_nego.c static void iscsi_target_sk_state_change(struct sock *sk)
sk                655 drivers/target/iscsi/iscsi_target_nego.c 	write_lock_bh(&sk->sk_callback_lock);
sk                656 drivers/target/iscsi/iscsi_target_nego.c 	conn = sk->sk_user_data;
sk                658 drivers/target/iscsi/iscsi_target_nego.c 		write_unlock_bh(&sk->sk_callback_lock);
sk                666 drivers/target/iscsi/iscsi_target_nego.c 		write_unlock_bh(&sk->sk_callback_lock);
sk                667 drivers/target/iscsi/iscsi_target_nego.c 		orig_state_change(sk);
sk                670 drivers/target/iscsi/iscsi_target_nego.c 	state = __iscsi_target_sk_check_close(sk);
sk                678 drivers/target/iscsi/iscsi_target_nego.c 		write_unlock_bh(&sk->sk_callback_lock);
sk                679 drivers/target/iscsi/iscsi_target_nego.c 		orig_state_change(sk);
sk                685 drivers/target/iscsi/iscsi_target_nego.c 		write_unlock_bh(&sk->sk_callback_lock);
sk                686 drivers/target/iscsi/iscsi_target_nego.c 		orig_state_change(sk);
sk                708 drivers/target/iscsi/iscsi_target_nego.c 		write_unlock_bh(&sk->sk_callback_lock);
sk                710 drivers/target/iscsi/iscsi_target_nego.c 		orig_state_change(sk);
sk                716 drivers/target/iscsi/iscsi_target_nego.c 	write_unlock_bh(&sk->sk_callback_lock);
sk                718 drivers/target/iscsi/iscsi_target_nego.c 	orig_state_change(sk);
sk               1280 drivers/target/iscsi/iscsi_target_nego.c 		struct sock *sk = conn->sock->sk;
sk               1282 drivers/target/iscsi/iscsi_target_nego.c 		write_lock_bh(&sk->sk_callback_lock);
sk               1285 drivers/target/iscsi/iscsi_target_nego.c 		write_unlock_bh(&sk->sk_callback_lock);
sk                 78 drivers/usb/storage/sddr55.c #define set_sense_info(sk, asc, ascq)	\
sk                 80 drivers/usb/storage/sddr55.c 	info->sense_data[2] = sk;	\
sk                317 drivers/usb/usbip/usbip_common.c 		sock->sk->sk_allocation = GFP_NOIO;
sk                345 drivers/vhost/net.c 		sock_flag(sock->sk, SOCK_ZEROCOPY);
sk                350 drivers/vhost/net.c 	return sock_flag(sock->sk, SOCK_XDP);
sk                494 drivers/vhost/net.c 	return skb_queue_empty(&sock->sk->sk_receive_queue);
sk                773 drivers/vhost/net.c 	bool sock_can_batch = (sock->sk->sk_sndbuf == INT_MAX);
sk                974 drivers/vhost/net.c static int peek_head_len(struct vhost_net_virtqueue *rvq, struct sock *sk)
sk                983 drivers/vhost/net.c 	spin_lock_irqsave(&sk->sk_receive_queue.lock, flags);
sk                984 drivers/vhost/net.c 	head = skb_peek(&sk->sk_receive_queue);
sk                991 drivers/vhost/net.c 	spin_unlock_irqrestore(&sk->sk_receive_queue.lock, flags);
sk                995 drivers/vhost/net.c static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk,
sk               1002 drivers/vhost/net.c 	int len = peek_head_len(rnvq, sk);
sk               1010 drivers/vhost/net.c 		len = peek_head_len(rnvq, sk);
sk               1142 drivers/vhost/net.c 		sock_len = vhost_net_rx_peek_head_len(net, sock->sk,
sk               1424 drivers/vhost/net.c 	if (sock->sk->sk_type != SOCK_RAW) {
sk               1429 drivers/vhost/net.c 	if (sock->sk->sk_family != AF_PACKET) {
sk                609 drivers/vhost/vsock.c static void vhost_vsock_reset_orphans(struct sock *sk)
sk                611 drivers/vhost/vsock.c 	struct vsock_sock *vsk = vsock_sk(sk);
sk                628 drivers/vhost/vsock.c 	sock_set_flag(sk, SOCK_DONE);
sk                630 drivers/vhost/vsock.c 	sk->sk_state = SS_UNCONNECTED;
sk                631 drivers/vhost/vsock.c 	sk->sk_err = ECONNRESET;
sk                632 drivers/vhost/vsock.c 	sk->sk_error_report(sk);
sk                 69 drivers/xen/pvcalls-back.c 	void (*saved_data_ready)(struct sock *sk);
sk                 82 drivers/xen/pvcalls-back.c 	void (*saved_data_ready)(struct sock *sk);
sk                115 drivers/xen/pvcalls-back.c 	spin_lock_irqsave(&map->sock->sk->sk_receive_queue.lock, flags);
sk                116 drivers/xen/pvcalls-back.c 	if (skb_queue_empty(&map->sock->sk->sk_receive_queue)) {
sk                118 drivers/xen/pvcalls-back.c 		spin_unlock_irqrestore(&map->sock->sk->sk_receive_queue.lock,
sk                122 drivers/xen/pvcalls-back.c 	spin_unlock_irqrestore(&map->sock->sk->sk_receive_queue.lock, flags);
sk                147 drivers/xen/pvcalls-back.c 	spin_lock_irqsave(&map->sock->sk->sk_receive_queue.lock, flags);
sk                148 drivers/xen/pvcalls-back.c 	if (ret > 0 && !skb_queue_empty(&map->sock->sk->sk_receive_queue))
sk                150 drivers/xen/pvcalls-back.c 	spin_unlock_irqrestore(&map->sock->sk->sk_receive_queue.lock, flags);
sk                360 drivers/xen/pvcalls-back.c 	write_lock_bh(&map->sock->sk->sk_callback_lock);
sk                361 drivers/xen/pvcalls-back.c 	map->saved_data_ready = map->sock->sk->sk_data_ready;
sk                362 drivers/xen/pvcalls-back.c 	map->sock->sk->sk_user_data = map;
sk                363 drivers/xen/pvcalls-back.c 	map->sock->sk->sk_data_ready = pvcalls_sk_data_ready;
sk                364 drivers/xen/pvcalls-back.c 	map->sock->sk->sk_state_change = pvcalls_sk_state_change;
sk                365 drivers/xen/pvcalls-back.c 	write_unlock_bh(&map->sock->sk->sk_callback_lock);
sk                427 drivers/xen/pvcalls-back.c 	if (map->sock->sk != NULL) {
sk                428 drivers/xen/pvcalls-back.c 		write_lock_bh(&map->sock->sk->sk_callback_lock);
sk                429 drivers/xen/pvcalls-back.c 		map->sock->sk->sk_user_data = NULL;
sk                430 drivers/xen/pvcalls-back.c 		map->sock->sk->sk_data_ready = map->saved_data_ready;
sk                431 drivers/xen/pvcalls-back.c 		write_unlock_bh(&map->sock->sk->sk_callback_lock);
sk                451 drivers/xen/pvcalls-back.c 	if (mappass->sock->sk != NULL) {
sk                452 drivers/xen/pvcalls-back.c 		write_lock_bh(&mappass->sock->sk->sk_callback_lock);
sk                453 drivers/xen/pvcalls-back.c 		mappass->sock->sk->sk_user_data = NULL;
sk                454 drivers/xen/pvcalls-back.c 		mappass->sock->sk->sk_data_ready = mappass->saved_data_ready;
sk                455 drivers/xen/pvcalls-back.c 		write_unlock_bh(&mappass->sock->sk->sk_callback_lock);
sk                650 drivers/xen/pvcalls-back.c 	write_lock_bh(&map->sock->sk->sk_callback_lock);
sk                651 drivers/xen/pvcalls-back.c 	map->saved_data_ready = map->sock->sk->sk_data_ready;
sk                652 drivers/xen/pvcalls-back.c 	map->sock->sk->sk_user_data = map;
sk                653 drivers/xen/pvcalls-back.c 	map->sock->sk->sk_data_ready = pvcalls_pass_sk_data_ready;
sk                654 drivers/xen/pvcalls-back.c 	write_unlock_bh(&map->sock->sk->sk_callback_lock);
sk                776 drivers/xen/pvcalls-back.c 	icsk = inet_csk(mappass->sock->sk);
sk                893 drivers/xen/pvcalls-back.c 	if (map == NULL || map->sock == NULL || map->sock->sk == NULL ||
sk                894 drivers/xen/pvcalls-back.c 		map->sock->sk->sk_user_data != map)
sk                107 drivers/xen/pvcalls-front.c 	map = (struct sock_mapping *)sock->sk->sk_send_head;
sk                120 drivers/xen/pvcalls-front.c 	map = (struct sock_mapping *)sock->sk->sk_send_head;
sk                306 drivers/xen/pvcalls-front.c 	sock->sk->sk_send_head = (void *)map;
sk                875 drivers/xen/pvcalls-front.c 	newsock->sk = sk_alloc(sock_net(sock->sk), PF_INET, GFP_KERNEL, &pvcalls_proto, false);
sk                876 drivers/xen/pvcalls-front.c 	if (!newsock->sk) {
sk                885 drivers/xen/pvcalls-front.c 	newsock->sk->sk_send_head = (void *)map2;
sk               1005 drivers/xen/pvcalls-front.c 	if (sock->sk == NULL)
sk               1024 drivers/xen/pvcalls-front.c 	sock->sk->sk_send_head = NULL;
sk               1098 drivers/xen/pvcalls-front.c 		map->sock->sk->sk_send_head = NULL;
sk               1055 fs/afs/internal.h static inline struct afs_net *afs_sock2net(struct sock *sk)
sk               1057 fs/afs/internal.h 	return net_generic(sock_net(sk), afs_net_id);
sk                 49 fs/afs/rxrpc.c 	socket->sk->sk_allocation = GFP_NOFS;
sk                685 fs/afs/rxrpc.c static void afs_wake_up_call_waiter(struct sock *sk, struct rxrpc_call *rxcall,
sk                697 fs/afs/rxrpc.c static void afs_wake_up_async_call(struct sock *sk, struct rxrpc_call *rxcall,
sk                792 fs/afs/rxrpc.c static void afs_rx_new_call(struct sock *sk, struct rxrpc_call *rxcall,
sk                795 fs/afs/rxrpc.c 	struct afs_net *net = afs_sock2net(sk);
sk               2007 fs/btrfs/ioctl.c 			      struct btrfs_ioctl_search_key *sk)
sk               2012 fs/btrfs/ioctl.c 	test.objectid = sk->min_objectid;
sk               2013 fs/btrfs/ioctl.c 	test.type = sk->min_type;
sk               2014 fs/btrfs/ioctl.c 	test.offset = sk->min_offset;
sk               2020 fs/btrfs/ioctl.c 	test.objectid = sk->max_objectid;
sk               2021 fs/btrfs/ioctl.c 	test.type = sk->max_type;
sk               2022 fs/btrfs/ioctl.c 	test.offset = sk->max_offset;
sk               2032 fs/btrfs/ioctl.c 			       struct btrfs_ioctl_search_key *sk,
sk               2053 fs/btrfs/ioctl.c 	if (btrfs_header_generation(leaf) > sk->max_transid) {
sk               2064 fs/btrfs/ioctl.c 		if (!key_in_sk(key, sk))
sk               2118 fs/btrfs/ioctl.c 		if (*num_found >= sk->nr_items) {
sk               2125 fs/btrfs/ioctl.c 	test.objectid = sk->max_objectid;
sk               2126 fs/btrfs/ioctl.c 	test.type = sk->max_type;
sk               2127 fs/btrfs/ioctl.c 	test.offset = sk->max_offset;
sk               2155 fs/btrfs/ioctl.c 				 struct btrfs_ioctl_search_key *sk,
sk               2176 fs/btrfs/ioctl.c 	if (sk->tree_id == 0) {
sk               2180 fs/btrfs/ioctl.c 		key.objectid = sk->tree_id;
sk               2190 fs/btrfs/ioctl.c 	key.objectid = sk->min_objectid;
sk               2191 fs/btrfs/ioctl.c 	key.type = sk->min_type;
sk               2192 fs/btrfs/ioctl.c 	key.offset = sk->min_offset;
sk               2195 fs/btrfs/ioctl.c 		ret = btrfs_search_forward(root, &key, path, sk->min_transid);
sk               2201 fs/btrfs/ioctl.c 		ret = copy_to_sk(path, &key, sk, buf_size, ubuf,
sk               2211 fs/btrfs/ioctl.c 	sk->nr_items = num_found;
sk               2220 fs/btrfs/ioctl.c 	struct btrfs_ioctl_search_key sk;
sk               2230 fs/btrfs/ioctl.c 	if (copy_from_user(&sk, &uargs->key, sizeof(sk)))
sk               2236 fs/btrfs/ioctl.c 	ret = search_ioctl(inode, &sk, &buf_size, uargs->buf);
sk               2245 fs/btrfs/ioctl.c 	if (ret == 0 && copy_to_user(&uargs->key, &sk, sizeof(sk)))
sk               3721 fs/cifs/connect.c 	struct sock *sk = sock->sk;
sk               3722 fs/cifs/connect.c 	BUG_ON(!sock_allow_reclassification(sk));
sk               3723 fs/cifs/connect.c 	sock_lock_init_class_and_name(sk, "slock-AF_INET-CIFS",
sk               3730 fs/cifs/connect.c 	struct sock *sk = sock->sk;
sk               3731 fs/cifs/connect.c 	BUG_ON(!sock_allow_reclassification(sk));
sk               3732 fs/cifs/connect.c 	sock_lock_init_class_and_name(sk, "slock-AF_INET6-CIFS",
sk               3892 fs/cifs/connect.c 		socket->sk->sk_allocation = GFP_NOFS;
sk               3908 fs/cifs/connect.c 	socket->sk->sk_rcvtimeo = 7 * HZ;
sk               3909 fs/cifs/connect.c 	socket->sk->sk_sndtimeo = 5 * HZ;
sk               3913 fs/cifs/connect.c 		if (socket->sk->sk_sndbuf < (200 * 1024))
sk               3914 fs/cifs/connect.c 			socket->sk->sk_sndbuf = 200 * 1024;
sk               3915 fs/cifs/connect.c 		if (socket->sk->sk_rcvbuf < (140 * 1024))
sk               3916 fs/cifs/connect.c 			socket->sk->sk_rcvbuf = 140 * 1024;
sk               3929 fs/cifs/connect.c 		 socket->sk->sk_sndbuf,
sk               3930 fs/cifs/connect.c 		 socket->sk->sk_rcvbuf, socket->sk->sk_rcvtimeo);
sk                410 fs/dlm/lowcomms.c static void lowcomms_data_ready(struct sock *sk)
sk                414 fs/dlm/lowcomms.c 	read_lock_bh(&sk->sk_callback_lock);
sk                415 fs/dlm/lowcomms.c 	con = sock2con(sk);
sk                418 fs/dlm/lowcomms.c 	read_unlock_bh(&sk->sk_callback_lock);
sk                421 fs/dlm/lowcomms.c static void lowcomms_write_space(struct sock *sk)
sk                425 fs/dlm/lowcomms.c 	read_lock_bh(&sk->sk_callback_lock);
sk                426 fs/dlm/lowcomms.c 	con = sock2con(sk);
sk                433 fs/dlm/lowcomms.c 		con->sock->sk->sk_write_pending--;
sk                439 fs/dlm/lowcomms.c 	read_unlock_bh(&sk->sk_callback_lock);
sk                450 fs/dlm/lowcomms.c static void lowcomms_state_change(struct sock *sk)
sk                457 fs/dlm/lowcomms.c 	if (sk->sk_shutdown) {
sk                458 fs/dlm/lowcomms.c 		if (sk->sk_shutdown == RCV_SHUTDOWN)
sk                459 fs/dlm/lowcomms.c 			lowcomms_data_ready(sk);
sk                460 fs/dlm/lowcomms.c 	} else if (sk->sk_state == TCP_ESTABLISHED) {
sk                461 fs/dlm/lowcomms.c 		lowcomms_write_space(sk);
sk                479 fs/dlm/lowcomms.c static void lowcomms_error_report(struct sock *sk)
sk                485 fs/dlm/lowcomms.c 	read_lock_bh(&sk->sk_callback_lock);
sk                486 fs/dlm/lowcomms.c 	con = sock2con(sk);
sk                497 fs/dlm/lowcomms.c 				   sk->sk_err, sk->sk_err_soft);
sk                505 fs/dlm/lowcomms.c 				   dlm_config.ci_tcp_port, sk->sk_err,
sk                506 fs/dlm/lowcomms.c 				   sk->sk_err_soft);
sk                517 fs/dlm/lowcomms.c 				   dlm_config.ci_tcp_port, sk->sk_err,
sk                518 fs/dlm/lowcomms.c 				   sk->sk_err_soft);
sk                521 fs/dlm/lowcomms.c 	read_unlock_bh(&sk->sk_callback_lock);
sk                523 fs/dlm/lowcomms.c 		orig_report(sk);
sk                529 fs/dlm/lowcomms.c 	struct sock *sk = sock->sk;
sk                531 fs/dlm/lowcomms.c 	listen_sock.sk_data_ready = sk->sk_data_ready;
sk                532 fs/dlm/lowcomms.c 	listen_sock.sk_state_change = sk->sk_state_change;
sk                533 fs/dlm/lowcomms.c 	listen_sock.sk_write_space = sk->sk_write_space;
sk                534 fs/dlm/lowcomms.c 	listen_sock.sk_error_report = sk->sk_error_report;
sk                539 fs/dlm/lowcomms.c 	struct sock *sk = sock->sk;
sk                541 fs/dlm/lowcomms.c 	write_lock_bh(&sk->sk_callback_lock);
sk                542 fs/dlm/lowcomms.c 	sk->sk_user_data = NULL;
sk                543 fs/dlm/lowcomms.c 	sk->sk_data_ready = listen_sock.sk_data_ready;
sk                544 fs/dlm/lowcomms.c 	sk->sk_state_change = listen_sock.sk_state_change;
sk                545 fs/dlm/lowcomms.c 	sk->sk_write_space = listen_sock.sk_write_space;
sk                546 fs/dlm/lowcomms.c 	sk->sk_error_report = listen_sock.sk_error_report;
sk                547 fs/dlm/lowcomms.c 	write_unlock_bh(&sk->sk_callback_lock);
sk                553 fs/dlm/lowcomms.c 	struct sock *sk = sock->sk;
sk                555 fs/dlm/lowcomms.c 	write_lock_bh(&sk->sk_callback_lock);
sk                558 fs/dlm/lowcomms.c 	sk->sk_user_data = con;
sk                560 fs/dlm/lowcomms.c 	sk->sk_data_ready = lowcomms_data_ready;
sk                561 fs/dlm/lowcomms.c 	sk->sk_write_space = lowcomms_write_space;
sk                562 fs/dlm/lowcomms.c 	sk->sk_state_change = lowcomms_state_change;
sk                563 fs/dlm/lowcomms.c 	sk->sk_allocation = GFP_NOFS;
sk                564 fs/dlm/lowcomms.c 	sk->sk_error_report = lowcomms_error_report;
sk                565 fs/dlm/lowcomms.c 	write_unlock_bh(&sk->sk_callback_lock);
sk               1253 fs/dlm/lowcomms.c 	write_lock_bh(&sock->sk->sk_callback_lock);
sk               1254 fs/dlm/lowcomms.c 	sock->sk->sk_user_data = con;
sk               1258 fs/dlm/lowcomms.c 	write_unlock_bh(&sock->sk->sk_callback_lock);
sk               1337 fs/dlm/lowcomms.c 	write_lock_bh(&sock->sk->sk_callback_lock);
sk               1339 fs/dlm/lowcomms.c 	sock->sk->sk_user_data = con;
sk               1342 fs/dlm/lowcomms.c 	con->sock->sk->sk_data_ready = lowcomms_data_ready;
sk               1346 fs/dlm/lowcomms.c 	write_unlock_bh(&sock->sk->sk_callback_lock);
sk               1522 fs/dlm/lowcomms.c 					con->sock->sk->sk_write_pending++;
sk               1663 fs/dlm/lowcomms.c 	if (con->sock && con->sock->sk) {
sk               1664 fs/dlm/lowcomms.c 		write_lock_bh(&con->sock->sk->sk_callback_lock);
sk               1665 fs/dlm/lowcomms.c 		con->sock->sk->sk_user_data = NULL;
sk               1666 fs/dlm/lowcomms.c 		write_unlock_bh(&con->sock->sk->sk_callback_lock);
sk                420 fs/eventpoll.c 	struct sock *sk;
sk                430 fs/eventpoll.c 	sk = sock->sk;
sk                431 fs/eventpoll.c 	if (!sk)
sk                434 fs/eventpoll.c 	napi_id = READ_ONCE(sk->sk_napi_id);
sk                382 fs/io_uring.c  		return ctx->ring_sock->sk;
sk               3027 fs/io_uring.c  		struct sock *sock = ctx->ring_sock->sk;
sk               3085 fs/io_uring.c  	struct io_ring_ctx *ctx = skb->sk->sk_user_data;
sk               3102 fs/io_uring.c  	struct sock *sk = ctx->ring_sock->sk;
sk               3117 fs/io_uring.c  	skb->sk = sk;
sk               3128 fs/io_uring.c  	refcount_add(skb->truesize, &sk->sk_wmem_alloc);
sk               3129 fs/io_uring.c  	skb_queue_head(&sk->sk_receive_queue, skb);
sk               3874 fs/io_uring.c  	ctx->ring_sock->sk->sk_user_data = ctx;
sk                307 fs/ocfs2/cluster/netdebug.c 		inet = inet_sk(sc->sc_sock->sk);
sk                127 fs/ocfs2/cluster/tcp.c static void o2net_listen_data_ready(struct sock *sk);
sk                585 fs/ocfs2/cluster/tcp.c static void o2net_data_ready(struct sock *sk)
sk                587 fs/ocfs2/cluster/tcp.c 	void (*ready)(struct sock *sk);
sk                590 fs/ocfs2/cluster/tcp.c 	read_lock_bh(&sk->sk_callback_lock);
sk                591 fs/ocfs2/cluster/tcp.c 	sc = sk->sk_user_data;
sk                598 fs/ocfs2/cluster/tcp.c 		ready = sk->sk_data_ready;
sk                600 fs/ocfs2/cluster/tcp.c 	read_unlock_bh(&sk->sk_callback_lock);
sk                602 fs/ocfs2/cluster/tcp.c 	ready(sk);
sk                606 fs/ocfs2/cluster/tcp.c static void o2net_state_change(struct sock *sk)
sk                608 fs/ocfs2/cluster/tcp.c 	void (*state_change)(struct sock *sk);
sk                611 fs/ocfs2/cluster/tcp.c 	read_lock_bh(&sk->sk_callback_lock);
sk                612 fs/ocfs2/cluster/tcp.c 	sc = sk->sk_user_data;
sk                614 fs/ocfs2/cluster/tcp.c 		state_change = sk->sk_state_change;
sk                618 fs/ocfs2/cluster/tcp.c 	sclog(sc, "state_change to %d\n", sk->sk_state);
sk                622 fs/ocfs2/cluster/tcp.c 	switch(sk->sk_state) {
sk                633 fs/ocfs2/cluster/tcp.c 			SC_NODEF_ARGS(sc), sk->sk_state);
sk                638 fs/ocfs2/cluster/tcp.c 	read_unlock_bh(&sk->sk_callback_lock);
sk                639 fs/ocfs2/cluster/tcp.c 	state_change(sk);
sk                647 fs/ocfs2/cluster/tcp.c static void o2net_register_callbacks(struct sock *sk,
sk                650 fs/ocfs2/cluster/tcp.c 	write_lock_bh(&sk->sk_callback_lock);
sk                653 fs/ocfs2/cluster/tcp.c 	if (sk->sk_data_ready == o2net_listen_data_ready) {
sk                654 fs/ocfs2/cluster/tcp.c 		sk->sk_data_ready = sk->sk_user_data;
sk                655 fs/ocfs2/cluster/tcp.c 		sk->sk_user_data = NULL;
sk                658 fs/ocfs2/cluster/tcp.c 	BUG_ON(sk->sk_user_data != NULL);
sk                659 fs/ocfs2/cluster/tcp.c 	sk->sk_user_data = sc;
sk                662 fs/ocfs2/cluster/tcp.c 	sc->sc_data_ready = sk->sk_data_ready;
sk                663 fs/ocfs2/cluster/tcp.c 	sc->sc_state_change = sk->sk_state_change;
sk                664 fs/ocfs2/cluster/tcp.c 	sk->sk_data_ready = o2net_data_ready;
sk                665 fs/ocfs2/cluster/tcp.c 	sk->sk_state_change = o2net_state_change;
sk                669 fs/ocfs2/cluster/tcp.c 	write_unlock_bh(&sk->sk_callback_lock);
sk                672 fs/ocfs2/cluster/tcp.c static int o2net_unregister_callbacks(struct sock *sk,
sk                677 fs/ocfs2/cluster/tcp.c 	write_lock_bh(&sk->sk_callback_lock);
sk                678 fs/ocfs2/cluster/tcp.c 	if (sk->sk_user_data == sc) {
sk                680 fs/ocfs2/cluster/tcp.c 		sk->sk_user_data = NULL;
sk                681 fs/ocfs2/cluster/tcp.c 		sk->sk_data_ready = sc->sc_data_ready;
sk                682 fs/ocfs2/cluster/tcp.c 		sk->sk_state_change = sc->sc_state_change;
sk                684 fs/ocfs2/cluster/tcp.c 	write_unlock_bh(&sk->sk_callback_lock);
sk                723 fs/ocfs2/cluster/tcp.c 	if (o2net_unregister_callbacks(sc->sc_sock->sk, sc)) {
sk               1627 fs/ocfs2/cluster/tcp.c 	sock->sk->sk_allocation = GFP_ATOMIC;
sk               1653 fs/ocfs2/cluster/tcp.c 	o2net_register_callbacks(sc->sc_sock->sk, sc);
sk               1825 fs/ocfs2/cluster/tcp.c 	ret = sock_create_lite(sock->sk->sk_family, sock->sk->sk_type,
sk               1826 fs/ocfs2/cluster/tcp.c 			       sock->sk->sk_protocol, &new_sock);
sk               1837 fs/ocfs2/cluster/tcp.c 	new_sock->sk->sk_allocation = GFP_ATOMIC;
sk               1921 fs/ocfs2/cluster/tcp.c 	o2net_register_callbacks(sc->sc_sock->sk, sc);
sk               1973 fs/ocfs2/cluster/tcp.c static void o2net_listen_data_ready(struct sock *sk)
sk               1975 fs/ocfs2/cluster/tcp.c 	void (*ready)(struct sock *sk);
sk               1977 fs/ocfs2/cluster/tcp.c 	read_lock_bh(&sk->sk_callback_lock);
sk               1978 fs/ocfs2/cluster/tcp.c 	ready = sk->sk_user_data;
sk               1980 fs/ocfs2/cluster/tcp.c 		ready = sk->sk_data_ready;
sk               1997 fs/ocfs2/cluster/tcp.c 	if (sk->sk_state == TCP_LISTEN) {
sk               2004 fs/ocfs2/cluster/tcp.c 	read_unlock_bh(&sk->sk_callback_lock);
sk               2006 fs/ocfs2/cluster/tcp.c 		ready(sk);
sk               2025 fs/ocfs2/cluster/tcp.c 	sock->sk->sk_allocation = GFP_ATOMIC;
sk               2027 fs/ocfs2/cluster/tcp.c 	write_lock_bh(&sock->sk->sk_callback_lock);
sk               2028 fs/ocfs2/cluster/tcp.c 	sock->sk->sk_user_data = sock->sk->sk_data_ready;
sk               2029 fs/ocfs2/cluster/tcp.c 	sock->sk->sk_data_ready = o2net_listen_data_ready;
sk               2030 fs/ocfs2/cluster/tcp.c 	write_unlock_bh(&sock->sk->sk_callback_lock);
sk               2035 fs/ocfs2/cluster/tcp.c 	sock->sk->sk_reuse = SK_CAN_REUSE;
sk               2100 fs/ocfs2/cluster/tcp.c 	write_lock_bh(&sock->sk->sk_callback_lock);
sk               2101 fs/ocfs2/cluster/tcp.c 	sock->sk->sk_data_ready = sock->sk->sk_user_data;
sk               2102 fs/ocfs2/cluster/tcp.c 	sock->sk->sk_user_data = NULL;
sk               2103 fs/ocfs2/cluster/tcp.c 	write_unlock_bh(&sock->sk->sk_callback_lock);
sk                 57 fs/ocfs2/cluster/tcp.h 		if (sock->sk->sk_state != TCP_ESTABLISHED &&
sk                 58 fs/ocfs2/cluster/tcp.h 	    	    sock->sk->sk_state != TCP_CLOSE_WAIT)
sk                153 fs/ocfs2/cluster/tcp_internal.h 	void			(*sc_state_change)(struct sock *sk);
sk                154 fs/ocfs2/cluster/tcp_internal.h 	void			(*sc_data_ready)(struct sock *sk);
sk                 28 include/crypto/if_alg.h 	struct sock sk;
sk                 49 include/crypto/if_alg.h 	int (*accept)(void *private, struct sock *sk);
sk                 50 include/crypto/if_alg.h 	int (*accept_nokey)(void *private, struct sock *sk);
sk                 97 include/crypto/if_alg.h 	struct sock *sk;
sk                162 include/crypto/if_alg.h void af_alg_release_parent(struct sock *sk);
sk                163 include/crypto/if_alg.h int af_alg_accept(struct sock *sk, struct socket *newsock, bool kern);
sk                168 include/crypto/if_alg.h static inline struct alg_sock *alg_sk(struct sock *sk)
sk                170 include/crypto/if_alg.h 	return (struct alg_sock *)sk;
sk                179 include/crypto/if_alg.h static inline int af_alg_sndbuf(struct sock *sk)
sk                181 include/crypto/if_alg.h 	struct alg_sock *ask = alg_sk(sk);
sk                184 include/crypto/if_alg.h 	return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) -
sk                194 include/crypto/if_alg.h static inline bool af_alg_writable(struct sock *sk)
sk                196 include/crypto/if_alg.h 	return PAGE_SIZE <= af_alg_sndbuf(sk);
sk                205 include/crypto/if_alg.h static inline int af_alg_rcvbuf(struct sock *sk)
sk                207 include/crypto/if_alg.h 	struct alg_sock *ask = alg_sk(sk);
sk                210 include/crypto/if_alg.h 	return max_t(int, max_t(int, sk->sk_rcvbuf & PAGE_MASK, PAGE_SIZE) -
sk                220 include/crypto/if_alg.h static inline bool af_alg_readable(struct sock *sk)
sk                222 include/crypto/if_alg.h 	return PAGE_SIZE <= af_alg_rcvbuf(sk);
sk                225 include/crypto/if_alg.h unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset);
sk                226 include/crypto/if_alg.h void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst,
sk                228 include/crypto/if_alg.h void af_alg_wmem_wakeup(struct sock *sk);
sk                229 include/crypto/if_alg.h int af_alg_wait_for_data(struct sock *sk, unsigned flags);
sk                238 include/crypto/if_alg.h struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk,
sk                240 include/crypto/if_alg.h int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags,
sk                 37 include/linux/atalk.h 	struct sock	sk;
sk                 46 include/linux/atalk.h static inline struct atalk_sock *at_sk(struct sock *sk)
sk                 48 include/linux/atalk.h 	return (struct atalk_sock *)sk;
sk                 94 include/linux/atmdev.h 	struct sock	sk;
sk                125 include/linux/atmdev.h static inline struct atm_vcc *atm_sk(struct sock *sk)
sk                127 include/linux/atmdev.h 	return (struct atm_vcc *)sk;
sk                132 include/linux/atmdev.h 	return atm_sk(sock->sk);
sk                241 include/linux/atmdev.h void vcc_insert_socket(struct sock *sk);
sk                102 include/linux/bpf-cgroup.h int __cgroup_bpf_run_filter_skb(struct sock *sk,
sk                106 include/linux/bpf-cgroup.h int __cgroup_bpf_run_filter_sk(struct sock *sk,
sk                109 include/linux/bpf-cgroup.h int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
sk                114 include/linux/bpf-cgroup.h int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
sk                130 include/linux/bpf-cgroup.h int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
sk                168 include/linux/bpf-cgroup.h #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb)			      \
sk                172 include/linux/bpf-cgroup.h 		__ret = __cgroup_bpf_run_filter_skb(sk, skb,		      \
sk                178 include/linux/bpf-cgroup.h #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb)			       \
sk                181 include/linux/bpf-cgroup.h 	if (cgroup_bpf_enabled && sk && sk == skb->sk) {		       \
sk                182 include/linux/bpf-cgroup.h 		typeof(sk) __sk = sk_to_full_sk(sk);			       \
sk                190 include/linux/bpf-cgroup.h #define BPF_CGROUP_RUN_SK_PROG(sk, type)				       \
sk                194 include/linux/bpf-cgroup.h 		__ret = __cgroup_bpf_run_filter_sk(sk, type);		       \
sk                199 include/linux/bpf-cgroup.h #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk)				       \
sk                200 include/linux/bpf-cgroup.h 	BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_CREATE)
sk                202 include/linux/bpf-cgroup.h #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk)				       \
sk                203 include/linux/bpf-cgroup.h 	BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET4_POST_BIND)
sk                205 include/linux/bpf-cgroup.h #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk)				       \
sk                206 include/linux/bpf-cgroup.h 	BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET6_POST_BIND)
sk                208 include/linux/bpf-cgroup.h #define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, type)				       \
sk                212 include/linux/bpf-cgroup.h 		__ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type,     \
sk                217 include/linux/bpf-cgroup.h #define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx)		       \
sk                221 include/linux/bpf-cgroup.h 		lock_sock(sk);						       \
sk                222 include/linux/bpf-cgroup.h 		__ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type,     \
sk                224 include/linux/bpf-cgroup.h 		release_sock(sk);					       \
sk                229 include/linux/bpf-cgroup.h #define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr)			       \
sk                230 include/linux/bpf-cgroup.h 	BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_BIND)
sk                232 include/linux/bpf-cgroup.h #define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr)			       \
sk                233 include/linux/bpf-cgroup.h 	BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_BIND)
sk                235 include/linux/bpf-cgroup.h #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (cgroup_bpf_enabled && \
sk                236 include/linux/bpf-cgroup.h 					    sk->sk_prot->pre_connect)
sk                238 include/linux/bpf-cgroup.h #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr)			       \
sk                239 include/linux/bpf-cgroup.h 	BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_CONNECT)
sk                241 include/linux/bpf-cgroup.h #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr)			       \
sk                242 include/linux/bpf-cgroup.h 	BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_CONNECT)
sk                244 include/linux/bpf-cgroup.h #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr)		       \
sk                245 include/linux/bpf-cgroup.h 	BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET4_CONNECT, NULL)
sk                247 include/linux/bpf-cgroup.h #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr)		       \
sk                248 include/linux/bpf-cgroup.h 	BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET6_CONNECT, NULL)
sk                250 include/linux/bpf-cgroup.h #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx)		       \
sk                251 include/linux/bpf-cgroup.h 	BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_SENDMSG, t_ctx)
sk                253 include/linux/bpf-cgroup.h #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx)		       \
sk                254 include/linux/bpf-cgroup.h 	BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_SENDMSG, t_ctx)
sk                256 include/linux/bpf-cgroup.h #define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr)			\
sk                257 include/linux/bpf-cgroup.h 	BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_RECVMSG, NULL)
sk                259 include/linux/bpf-cgroup.h #define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr)			\
sk                260 include/linux/bpf-cgroup.h 	BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_RECVMSG, NULL)
sk                265 include/linux/bpf-cgroup.h 	if (cgroup_bpf_enabled && (sock_ops)->sk) {	       \
sk                266 include/linux/bpf-cgroup.h 		typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk);	       \
sk                381 include/linux/bpf-cgroup.h #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
sk                382 include/linux/bpf-cgroup.h #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
sk                383 include/linux/bpf-cgroup.h #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
sk                384 include/linux/bpf-cgroup.h #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
sk                385 include/linux/bpf-cgroup.h #define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) ({ 0; })
sk                386 include/linux/bpf-cgroup.h #define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) ({ 0; })
sk                387 include/linux/bpf-cgroup.h #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; })
sk                388 include/linux/bpf-cgroup.h #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; })
sk                389 include/linux/bpf-cgroup.h #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; })
sk                390 include/linux/bpf-cgroup.h #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) ({ 0; })
sk                391 include/linux/bpf-cgroup.h #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) ({ 0; })
sk                392 include/linux/bpf-cgroup.h #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) ({ 0; })
sk                393 include/linux/bpf-cgroup.h #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
sk                394 include/linux/bpf-cgroup.h #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
sk                395 include/linux/bpf-cgroup.h #define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) ({ 0; })
sk                396 include/linux/bpf-cgroup.h #define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) ({ 0; })
sk               1001 include/linux/bpf.h void bpf_sk_reuseport_detach(struct sock *sk);
sk               1007 include/linux/bpf.h static inline void bpf_sk_reuseport_detach(struct sock *sk)
sk                  9 include/linux/bpfilter.h int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char __user *optval,
sk                 11 include/linux/bpfilter.h int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char __user *optval,
sk                 17 include/linux/bpfilter.h 	int (*sockopt)(struct sock *sk, int optname,
sk                 60 include/linux/can/core.h 		    void *data, char *ident, struct sock *sk);
sk                 68 include/linux/can/core.h void can_sock_destruct(struct sock *sk);
sk                 50 include/linux/can/skb.h static inline void can_skb_set_owner(struct sk_buff *skb, struct sock *sk)
sk                 52 include/linux/can/skb.h 	if (sk) {
sk                 53 include/linux/can/skb.h 		sock_hold(sk);
sk                 55 include/linux/can/skb.h 		skb->sk = sk;
sk                 68 include/linux/can/skb.h 			can_skb_set_owner(nskb, skb->sk);
sk                180 include/linux/dccp.h extern int dccp_parse_options(struct sock *sk, struct dccp_request_sock *dreq,
sk                308 include/linux/dccp.h static inline struct dccp_sock *dccp_sk(const struct sock *sk)
sk                310 include/linux/dccp.h 	return (struct dccp_sock *)sk;
sk                313 include/linux/dccp.h static inline const char *dccp_role(const struct sock *sk)
sk                315 include/linux/dccp.h 	switch (dccp_sk(sk)->dccps_role) {
sk                797 include/linux/filter.h int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap);
sk                798 include/linux/filter.h static inline int sk_filter(struct sock *sk, struct sk_buff *skb)
sk                800 include/linux/filter.h 	return sk_filter_trim_cap(sk, skb, 1);
sk                834 include/linux/filter.h int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
sk                835 include/linux/filter.h int sk_attach_bpf(u32 ufd, struct sock *sk);
sk                836 include/linux/filter.h int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk);
sk                837 include/linux/filter.h int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk);
sk                839 include/linux/filter.h int sk_detach_filter(struct sock *sk);
sk                840 include/linux/filter.h int sk_get_filter(struct sock *sk, struct sock_filter __user *filter,
sk                843 include/linux/filter.h bool sk_filter_charge(struct sock *sk, struct sk_filter *fp);
sk                844 include/linux/filter.h void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
sk                922 include/linux/filter.h struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
sk                927 include/linux/filter.h bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
sk               1181 include/linux/filter.h 	struct sock *sk;
sk               1192 include/linux/filter.h 	struct	sock *sk;
sk               1226 include/linux/filter.h 	struct sock	*sk;
sk                 43 include/linux/icmpv6.h extern void				icmpv6_flow_init(struct sock *sk,
sk                 46 include/linux/if_pppox.h 	struct sock sk;
sk                 60 include/linux/if_pppox.h static inline struct pppox_sock *pppox_sk(struct sock *sk)
sk                 62 include/linux/if_pppox.h 	return (struct pppox_sock *)sk;
sk                 81 include/linux/if_pppox.h extern void pppox_unbind_sock(struct sock *sk);/* delete ppp-channel binding */
sk                 63 include/linux/if_tap.h 	struct sock sk;
sk                115 include/linux/igmp.h extern int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr);
sk                116 include/linux/igmp.h extern int ip_mc_join_group_ssm(struct sock *sk, struct ip_mreqn *imr,
sk                118 include/linux/igmp.h extern int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr);
sk                119 include/linux/igmp.h extern void ip_mc_drop_socket(struct sock *sk);
sk                120 include/linux/igmp.h extern int ip_mc_source(int add, int omode, struct sock *sk,
sk                122 include/linux/igmp.h extern int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf,int ifindex);
sk                123 include/linux/igmp.h extern int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf,
sk                125 include/linux/igmp.h extern int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf,
sk                127 include/linux/igmp.h extern int ip_mc_sf_allow(struct sock *sk, __be32 local, __be32 rmt,
sk                 20 include/linux/inet_diag.h 	void		(*idiag_get_info)(struct sock *sk,
sk                 24 include/linux/inet_diag.h 	int		(*idiag_get_aux)(struct sock *sk,
sk                 28 include/linux/inet_diag.h 	size_t		(*idiag_get_aux_size)(struct sock *sk,
sk                 39 include/linux/inet_diag.h int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
sk                 56 include/linux/inet_diag.h int inet_diag_bc_sk(const struct nlattr *_bc, struct sock *sk);
sk                 58 include/linux/inet_diag.h void inet_diag_msg_common_fill(struct inet_diag_msg *r, struct sock *sk);
sk                 71 include/linux/inet_diag.h int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
sk                327 include/linux/ipv6.h extern int inet6_sk_rebuild_header(struct sock *sk);
sk                341 include/linux/ipv6.h static inline struct raw6_sock *raw6_sk(const struct sock *sk)
sk                343 include/linux/ipv6.h 	return (struct raw6_sock *)sk;
sk                357 include/linux/ipv6.h #define __ipv6_only_sock(sk)	(sk->sk_ipv6only)
sk                358 include/linux/ipv6.h #define ipv6_only_sock(sk)	(__ipv6_only_sock(sk))
sk                359 include/linux/ipv6.h #define ipv6_sk_rxinfo(sk)	((sk)->sk_family == PF_INET6 && \
sk                360 include/linux/ipv6.h 				 inet6_sk(sk)->rxopt.bits.rxinfo)
sk                362 include/linux/ipv6.h static inline const struct in6_addr *inet6_rcv_saddr(const struct sock *sk)
sk                364 include/linux/ipv6.h 	if (sk->sk_family == AF_INET6)
sk                365 include/linux/ipv6.h 		return &sk->sk_v6_rcv_saddr;
sk                369 include/linux/ipv6.h static inline int inet_v6_ipv6only(const struct sock *sk)
sk                372 include/linux/ipv6.h 	return ipv6_only_sock(sk);
sk                375 include/linux/ipv6.h #define __ipv6_only_sock(sk)	0
sk                376 include/linux/ipv6.h #define ipv6_only_sock(sk)	0
sk                377 include/linux/ipv6.h #define ipv6_sk_rxinfo(sk)	0
sk                395 include/linux/ipv6.h static inline struct raw6_sock *raw6_sk(const struct sock *sk)
sk                 43 include/linux/kobject_ns.h 	const void *(*netlink_ns)(struct sock *sk);
sk                 55 include/linux/kobject_ns.h const void *kobj_ns_netlink(enum kobj_ns_type type, struct sock *sk);
sk                 29 include/linux/lsm_audit.h 	struct sock *sk;
sk               1686 include/linux/lsm_hooks.h 	int (*netlink_send)(struct sock *sk, struct sk_buff *skb);
sk               1726 include/linux/lsm_hooks.h 	int (*socket_sock_rcv_skb)(struct sock *sk, struct sk_buff *skb);
sk               1732 include/linux/lsm_hooks.h 	int (*sk_alloc_security)(struct sock *sk, int family, gfp_t priority);
sk               1733 include/linux/lsm_hooks.h 	void (*sk_free_security)(struct sock *sk);
sk               1734 include/linux/lsm_hooks.h 	void (*sk_clone_security)(const struct sock *sk, struct sock *newsk);
sk               1735 include/linux/lsm_hooks.h 	void (*sk_getsecid)(struct sock *sk, u32 *secid);
sk               1736 include/linux/lsm_hooks.h 	void (*sock_graft)(struct sock *sk, struct socket *parent);
sk               1737 include/linux/lsm_hooks.h 	int (*inet_conn_request)(struct sock *sk, struct sk_buff *skb,
sk               1741 include/linux/lsm_hooks.h 	void (*inet_conn_established)(struct sock *sk, struct sk_buff *skb);
sk               1751 include/linux/lsm_hooks.h 	int (*tun_dev_attach)(struct sock *sk, void *security);
sk               1755 include/linux/lsm_hooks.h 	int (*sctp_bind_connect)(struct sock *sk, int optname,
sk               1757 include/linux/lsm_hooks.h 	void (*sctp_sk_clone)(struct sctp_endpoint *ep, struct sock *sk,
sk                486 include/linux/mISDNif.h 	struct sock		sk;
sk               1341 include/linux/memcontrol.h void mem_cgroup_sk_alloc(struct sock *sk);
sk               1342 include/linux/memcontrol.h void mem_cgroup_sk_free(struct sock *sk);
sk               1360 include/linux/memcontrol.h static inline void mem_cgroup_sk_alloc(struct sock *sk) { };
sk               1361 include/linux/memcontrol.h static inline void mem_cgroup_sk_free(struct sock *sk) { };
sk                 20 include/linux/mroute.h int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg);
sk                 21 include/linux/mroute.h int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg);
sk                 37 include/linux/mroute.h static inline int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
sk                 31 include/linux/mroute6.h extern int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg);
sk                 32 include/linux/mroute6.h extern int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg);
sk                 51 include/linux/mroute6.h int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
sk                102 include/linux/mroute6.h extern int ip6mr_sk_done(struct sock *sk);
sk                108 include/linux/mroute6.h static inline int ip6mr_sk_done(struct sock *sk)
sk                120 include/linux/net.h 	struct sock		*sk;
sk                192 include/linux/net.h 	int		(*set_peek_off)(struct sock *sk, int val);
sk                198 include/linux/net.h 	int		(*read_sock)(struct sock *sk, read_descriptor_t *desc,
sk                200 include/linux/net.h 	int		(*sendpage_locked)(struct sock *sk, struct page *page,
sk                202 include/linux/net.h 	int		(*sendmsg_locked)(struct sock *sk, struct msghdr *msg,
sk                204 include/linux/net.h 	int		(*set_rcvlowat)(struct sock *sk, int val);
sk                293 include/linux/net.h int kernel_sendmsg_locked(struct sock *sk, struct msghdr *msg,
sk                311 include/linux/net.h int kernel_sendpage_locked(struct sock *sk, struct page *page, int offset,
sk                316 include/linux/net.h u32 kernel_sock_ip_overhead(struct sock *sk);
sk               2343 include/linux/netdevice.h 						  struct sock *sk,
sk               2352 include/linux/netdevice.h 	return cb(sk, head, skb);
sk               2367 include/linux/netdevice.h 					    struct sock *sk);
sk               2631 include/linux/netdevice.h int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb);
sk                 71 include/linux/netfilter.h 	struct sock *sk;
sk                143 include/linux/netfilter.h 				      struct sock *sk,
sk                151 include/linux/netfilter.h 	p->sk = sk;
sk                166 include/linux/netfilter.h 	int (*set)(struct sock *sk, int optval, void __user *user, unsigned int len);
sk                168 include/linux/netfilter.h 	int (*compat_set)(struct sock *sk, int optval,
sk                173 include/linux/netfilter.h 	int (*get)(struct sock *sk, int optval, void __user *user, int *len);
sk                175 include/linux/netfilter.h 	int (*compat_get)(struct sock *sk, int optval,
sk                210 include/linux/netfilter.h 			  struct sock *sk, struct sk_buff *skb,
sk                258 include/linux/netfilter.h 				   sk, net, okfn);
sk                285 include/linux/netfilter.h NF_HOOK_COND(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
sk                293 include/linux/netfilter.h 	    ((ret = nf_hook(pf, hook, net, sk, skb, in, out, okfn)) == 1))
sk                294 include/linux/netfilter.h 		ret = okfn(net, sk, skb);
sk                299 include/linux/netfilter.h NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct sk_buff *skb,
sk                303 include/linux/netfilter.h 	int ret = nf_hook(pf, hook, net, sk, skb, in, out, okfn);
sk                305 include/linux/netfilter.h 		ret = okfn(net, sk, skb);
sk                310 include/linux/netfilter.h NF_HOOK_LIST(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
sk                320 include/linux/netfilter.h 		if (nf_hook(pf, hook, net, sk, skb, in, out, okfn) == 1)
sk                328 include/linux/netfilter.h int nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt,
sk                330 include/linux/netfilter.h int nf_getsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt,
sk                333 include/linux/netfilter.h int compat_nf_setsockopt(struct sock *sk, u_int8_t pf, int optval,
sk                335 include/linux/netfilter.h int compat_nf_getsockopt(struct sock *sk, u_int8_t pf, int optval,
sk                387 include/linux/netfilter.h NF_HOOK_COND(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
sk                392 include/linux/netfilter.h 	return okfn(net, sk, skb);
sk                396 include/linux/netfilter.h NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
sk                400 include/linux/netfilter.h 	return okfn(net, sk, skb);
sk                404 include/linux/netfilter.h NF_HOOK_LIST(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
sk                412 include/linux/netfilter.h 			  struct sock *sk, struct sk_buff *skb,
sk                 17 include/linux/netfilter_bridge.h int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb);
sk                 57 include/linux/netfilter_ipv6.h 	int (*fragment)(struct net *net, struct sock *sk, struct sk_buff *skb,
sk                 62 include/linux/netfilter_ipv6.h 	int (*br_fragment)(struct net *net, struct sock *sk,
sk                 65 include/linux/netfilter_ipv6.h 			   int (*output)(struct net *, struct sock *sk,
sk                137 include/linux/netfilter_ipv6.h int br_ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
sk                139 include/linux/netfilter_ipv6.h 		    int (*output)(struct net *, struct sock *sk,
sk                143 include/linux/netfilter_ipv6.h static inline int nf_br_ip6_fragment(struct net *net, struct sock *sk,
sk                146 include/linux/netfilter_ipv6.h 				     int (*output)(struct net *, struct sock *sk,
sk                156 include/linux/netfilter_ipv6.h 	return v6_ops->br_fragment(net, sk, skb, data, output);
sk                158 include/linux/netfilter_ipv6.h 	return br_ip6_fragment(net, sk, skb, data, output);
sk                 28 include/linux/netlink.h 	struct sock		*sk;
sk                 51 include/linux/netlink.h 	bool		(*compare)(struct net *net, struct sock *sk);
sk                122 include/linux/netlink.h void netlink_kernel_release(struct sock *sk);
sk                123 include/linux/netlink.h int __netlink_change_ngroups(struct sock *sk, unsigned int groups);
sk                124 include/linux/netlink.h int netlink_change_ngroups(struct sock *sk, unsigned int groups);
sk                125 include/linux/netlink.h void __netlink_clear_multicast_users(struct sock *sk, unsigned int group);
sk                128 include/linux/netlink.h int netlink_has_listeners(struct sock *sk, unsigned int group);
sk                144 include/linux/netlink.h int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
sk                146 include/linux/netlink.h void netlink_detachskb(struct sock *sk, struct sk_buff *skb);
sk                147 include/linux/netlink.h int netlink_sendskb(struct sock *sk, struct sk_buff *skb);
sk                438 include/linux/security.h int security_netlink_send(struct sock *sk, struct sk_buff *skb);
sk               1229 include/linux/security.h static inline int security_netlink_send(struct sock *sk, struct sk_buff *skb)
sk               1297 include/linux/security.h int security_sock_rcv_skb(struct sock *sk, struct sk_buff *skb);
sk               1301 include/linux/security.h int security_sk_alloc(struct sock *sk, int family, gfp_t priority);
sk               1302 include/linux/security.h void security_sk_free(struct sock *sk);
sk               1303 include/linux/security.h void security_sk_clone(const struct sock *sk, struct sock *newsk);
sk               1304 include/linux/security.h void security_sk_classify_flow(struct sock *sk, struct flowi *fl);
sk               1306 include/linux/security.h void security_sock_graft(struct sock*sk, struct socket *parent);
sk               1307 include/linux/security.h int security_inet_conn_request(struct sock *sk,
sk               1311 include/linux/security.h void security_inet_conn_established(struct sock *sk,
sk               1320 include/linux/security.h int security_tun_dev_attach(struct sock *sk, void *security);
sk               1323 include/linux/security.h int security_sctp_bind_connect(struct sock *sk, int optname,
sk               1325 include/linux/security.h void security_sctp_sk_clone(struct sctp_endpoint *ep, struct sock *sk,
sk               1426 include/linux/security.h static inline int security_sock_rcv_skb(struct sock *sk,
sk               1443 include/linux/security.h static inline int security_sk_alloc(struct sock *sk, int family, gfp_t priority)
sk               1448 include/linux/security.h static inline void security_sk_free(struct sock *sk)
sk               1452 include/linux/security.h static inline void security_sk_clone(const struct sock *sk, struct sock *newsk)
sk               1456 include/linux/security.h static inline void security_sk_classify_flow(struct sock *sk, struct flowi *fl)
sk               1464 include/linux/security.h static inline void security_sock_graft(struct sock *sk, struct socket *parent)
sk               1468 include/linux/security.h static inline int security_inet_conn_request(struct sock *sk,
sk               1479 include/linux/security.h static inline void security_inet_conn_established(struct sock *sk,
sk               1516 include/linux/security.h static inline int security_tun_dev_attach(struct sock *sk, void *security)
sk               1532 include/linux/security.h static inline int security_sctp_bind_connect(struct sock *sk, int optname,
sk               1540 include/linux/security.h 					  struct sock *sk,
sk                488 include/linux/skbuff.h struct ubuf_info *sock_zerocopy_alloc(struct sock *sk, size_t size);
sk                489 include/linux/skbuff.h struct ubuf_info *sock_zerocopy_realloc(struct sock *sk, size_t size,
sk                503 include/linux/skbuff.h int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
sk                706 include/linux/skbuff.h 		struct sock		*sk;
sk               1079 include/linux/skbuff.h static inline bool skb_fclone_busy(const struct sock *sk,
sk               1088 include/linux/skbuff.h 	       fclones->skb2.sk == sk;
sk               2711 include/linux/skbuff.h 		skb->sk		= NULL;
sk               2713 include/linux/skbuff.h 		BUG_ON(skb->sk);
sk               3464 include/linux/skbuff.h int __skb_wait_for_more_packets(struct sock *sk, int *err, long *timeo_p,
sk               3466 include/linux/skbuff.h struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
sk               3469 include/linux/skbuff.h 					  void (*destructor)(struct sock *sk,
sk               3473 include/linux/skbuff.h struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned flags,
sk               3474 include/linux/skbuff.h 					void (*destructor)(struct sock *sk,
sk               3478 include/linux/skbuff.h struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
sk               3479 include/linux/skbuff.h 				    void (*destructor)(struct sock *sk,
sk               3482 include/linux/skbuff.h struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
sk               3501 include/linux/skbuff.h void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
sk               3502 include/linux/skbuff.h void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len);
sk               3503 include/linux/skbuff.h static inline void skb_free_datagram_locked(struct sock *sk,
sk               3506 include/linux/skbuff.h 	__skb_free_datagram_locked(sk, skb, 0);
sk               3508 include/linux/skbuff.h int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
sk               3513 include/linux/skbuff.h int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
sk               3516 include/linux/skbuff.h int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset,
sk               3799 include/linux/skbuff.h 		     struct sock *sk, int tstype);
sk                 52 include/linux/skmsg.h 	struct sock			*sk;
sk                 75 include/linux/skmsg.h 	void (*saved_data_ready)(struct sock *sk);
sk                 85 include/linux/skmsg.h 	struct sock			*sk;
sk                 99 include/linux/skmsg.h 	void (*saved_unhash)(struct sock *sk);
sk                100 include/linux/skmsg.h 	void (*saved_close)(struct sock *sk, long timeout);
sk                101 include/linux/skmsg.h 	void (*saved_write_space)(struct sock *sk);
sk                111 include/linux/skmsg.h int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len,
sk                113 include/linux/skmsg.h int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src,
sk                115 include/linux/skmsg.h void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len);
sk                116 include/linux/skmsg.h int sk_msg_free(struct sock *sk, struct sk_msg *msg);
sk                117 include/linux/skmsg.h int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg);
sk                118 include/linux/skmsg.h void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes);
sk                119 include/linux/skmsg.h void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg,
sk                122 include/linux/skmsg.h void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes);
sk                123 include/linux/skmsg.h void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes);
sk                125 include/linux/skmsg.h int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
sk                127 include/linux/skmsg.h int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
sk                278 include/linux/skmsg.h static inline struct sk_psock *sk_psock(const struct sock *sk)
sk                280 include/linux/skmsg.h 	return rcu_dereference_sk_user_data(sk);
sk                296 include/linux/skmsg.h 	struct sock *sk = psock->sk;
sk                298 include/linux/skmsg.h 	sk->sk_err = err;
sk                299 include/linux/skmsg.h 	sk->sk_error_report(sk);
sk                302 include/linux/skmsg.h struct sk_psock *sk_psock_init(struct sock *sk, int node);
sk                304 include/linux/skmsg.h int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock);
sk                305 include/linux/skmsg.h void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock);
sk                306 include/linux/skmsg.h void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock);
sk                308 include/linux/skmsg.h int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
sk                324 include/linux/skmsg.h void sk_psock_unlink(struct sock *sk, struct sk_psock_link *link);
sk                326 include/linux/skmsg.h static inline void sk_psock_unlink(struct sock *sk,
sk                337 include/linux/skmsg.h 		sk_msg_free(psock->sk, psock->cork);
sk                343 include/linux/skmsg.h static inline void sk_psock_update_proto(struct sock *sk,
sk                347 include/linux/skmsg.h 	psock->saved_unhash = sk->sk_prot->unhash;
sk                348 include/linux/skmsg.h 	psock->saved_close = sk->sk_prot->close;
sk                349 include/linux/skmsg.h 	psock->saved_write_space = sk->sk_write_space;
sk                351 include/linux/skmsg.h 	psock->sk_proto = sk->sk_prot;
sk                352 include/linux/skmsg.h 	sk->sk_prot = ops;
sk                355 include/linux/skmsg.h static inline void sk_psock_restore_proto(struct sock *sk,
sk                358 include/linux/skmsg.h 	sk->sk_prot->unhash = psock->saved_unhash;
sk                361 include/linux/skmsg.h 		struct inet_connection_sock *icsk = inet_csk(sk);
sk                365 include/linux/skmsg.h 			tcp_update_ulp(sk, psock->sk_proto,
sk                368 include/linux/skmsg.h 			sk->sk_prot = psock->sk_proto;
sk                369 include/linux/skmsg.h 			sk->sk_write_space = psock->saved_write_space;
sk                373 include/linux/skmsg.h 		sk->sk_write_space = psock->saved_write_space;
sk                395 include/linux/skmsg.h static inline struct sk_psock *sk_psock_get_checked(struct sock *sk)
sk                400 include/linux/skmsg.h 	psock = sk_psock(sk);
sk                402 include/linux/skmsg.h 		if (sk->sk_prot->recvmsg != tcp_bpf_recvmsg) {
sk                415 include/linux/skmsg.h static inline struct sk_psock *sk_psock_get(struct sock *sk)
sk                420 include/linux/skmsg.h 	psock = sk_psock(sk);
sk                427 include/linux/skmsg.h void sk_psock_stop(struct sock *sk, struct sk_psock *psock);
sk                429 include/linux/skmsg.h void sk_psock_drop(struct sock *sk, struct sk_psock *psock);
sk                431 include/linux/skmsg.h static inline void sk_psock_put(struct sock *sk, struct sk_psock *psock)
sk                434 include/linux/skmsg.h 		sk_psock_drop(sk, psock);
sk                437 include/linux/skmsg.h static inline void sk_psock_data_ready(struct sock *sk, struct sk_psock *psock)
sk                440 include/linux/skmsg.h 		psock->parser.saved_data_ready(sk);
sk                442 include/linux/skmsg.h 		sk->sk_data_ready(sk);
sk                 18 include/linux/sock_diag.h 	int (*get_info)(struct sk_buff *skb, struct sock *sk);
sk                 28 include/linux/sock_diag.h u64 sock_gen_cookie(struct sock *sk);
sk                 29 include/linux/sock_diag.h int sock_diag_check_cookie(struct sock *sk, const __u32 *cookie);
sk                 30 include/linux/sock_diag.h void sock_diag_save_cookie(struct sock *sk, __u32 *cookie);
sk                 32 include/linux/sock_diag.h int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attr);
sk                 33 include/linux/sock_diag.h int sock_diag_put_filterinfo(bool may_report_filterinfo, struct sock *sk,
sk                 37 include/linux/sock_diag.h enum sknetlink_groups sock_diag_destroy_group(const struct sock *sk)
sk                 39 include/linux/sock_diag.h 	switch (sk->sk_family) {
sk                 41 include/linux/sock_diag.h 		if (sk->sk_type == SOCK_RAW)
sk                 44 include/linux/sock_diag.h 		switch (sk->sk_protocol) {
sk                 53 include/linux/sock_diag.h 		if (sk->sk_type == SOCK_RAW)
sk                 56 include/linux/sock_diag.h 		switch (sk->sk_protocol) {
sk                 70 include/linux/sock_diag.h bool sock_diag_has_destroy_listeners(const struct sock *sk)
sk                 72 include/linux/sock_diag.h 	const struct net *n = sock_net(sk);
sk                 73 include/linux/sock_diag.h 	const enum sknetlink_groups group = sock_diag_destroy_group(sk);
sk                 78 include/linux/sock_diag.h void sock_diag_broadcast_destroy(struct sock *sk);
sk                 80 include/linux/sock_diag.h int sock_diag_destroy(struct sock *sk, int err);
sk                420 include/linux/tcp.h static inline struct tcp_sock *tcp_sk(const struct sock *sk)
sk                422 include/linux/tcp.h 	return (struct tcp_sock *)sk;
sk                443 include/linux/tcp.h static inline struct tcp_timewait_sock *tcp_twsk(const struct sock *sk)
sk                445 include/linux/tcp.h 	return (struct tcp_timewait_sock *)sk;
sk                448 include/linux/tcp.h static inline bool tcp_passive_fastopen(const struct sock *sk)
sk                450 include/linux/tcp.h 	return sk->sk_state == TCP_SYN_RECV &&
sk                451 include/linux/tcp.h 	       rcu_access_pointer(tcp_sk(sk)->fastopen_rsk) != NULL;
sk                454 include/linux/tcp.h static inline void fastopen_queue_tune(struct sock *sk, int backlog)
sk                456 include/linux/tcp.h 	struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
sk                457 include/linux/tcp.h 	int somaxconn = READ_ONCE(sock_net(sk)->core.sysctl_somaxconn);
sk                475 include/linux/tcp.h struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk);
sk                 41 include/linux/udp.h #define udp_port_hash		inet.sk.__sk_common.skc_u16hashes[0]
sk                 42 include/linux/udp.h #define udp_portaddr_hash	inet.sk.__sk_common.skc_u16hashes[1]
sk                 43 include/linux/udp.h #define udp_portaddr_node	inet.sk.__sk_common.skc_portaddr_node
sk                 75 include/linux/udp.h 	int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
sk                 76 include/linux/udp.h 	int (*encap_err_lookup)(struct sock *sk, struct sk_buff *skb);
sk                 77 include/linux/udp.h 	void (*encap_destroy)(struct sock *sk);
sk                 80 include/linux/udp.h 	struct sk_buff *	(*gro_receive)(struct sock *sk,
sk                 83 include/linux/udp.h 	int			(*gro_complete)(struct sock *sk,
sk                 96 include/linux/udp.h static inline struct udp_sock *udp_sk(const struct sock *sk)
sk                 98 include/linux/udp.h 	return (struct udp_sock *)sk;
sk                101 include/linux/udp.h static inline void udp_set_no_check6_tx(struct sock *sk, bool val)
sk                103 include/linux/udp.h 	udp_sk(sk)->no_check6_tx = val;
sk                106 include/linux/udp.h static inline void udp_set_no_check6_rx(struct sock *sk, bool val)
sk                108 include/linux/udp.h 	udp_sk(sk)->no_check6_rx = val;
sk                111 include/linux/udp.h static inline bool udp_get_no_check6_tx(struct sock *sk)
sk                113 include/linux/udp.h 	return udp_sk(sk)->no_check6_tx;
sk                116 include/linux/udp.h static inline bool udp_get_no_check6_rx(struct sock *sk)
sk                118 include/linux/udp.h 	return udp_sk(sk)->no_check6_rx;
sk                121 include/linux/udp.h static inline void udp_cmsg_recv(struct msghdr *msg, struct sock *sk,
sk                132 include/linux/udp.h static inline bool udp_unexpected_gso(struct sock *sk, struct sk_buff *skb)
sk                134 include/linux/udp.h 	return !udp_sk(sk)->gro_enabled && skb_is_gso(skb) &&
sk                110 include/net/addrconf.h bool inet_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2,
sk                112 include/net/addrconf.h bool inet_rcv_saddr_any(const struct sock *sk);
sk                214 include/net/addrconf.h int ipv6_sock_mc_join(struct sock *sk, int ifindex,
sk                216 include/net/addrconf.h int ipv6_sock_mc_drop(struct sock *sk, int ifindex,
sk                218 include/net/addrconf.h void __ipv6_sock_mc_close(struct sock *sk);
sk                219 include/net/addrconf.h void ipv6_sock_mc_close(struct sock *sk);
sk                220 include/net/addrconf.h bool inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr,
sk                272 include/net/addrconf.h int ipv6_sock_ac_join(struct sock *sk, int ifindex,
sk                274 include/net/addrconf.h int ipv6_sock_ac_drop(struct sock *sk, int ifindex,
sk                276 include/net/addrconf.h void ipv6_sock_ac_close(struct sock *sk);
sk                 17 include/net/af_unix.h struct sock *unix_peer_get(struct sock *sk);
sk                 55 include/net/af_unix.h 	struct sock		sk;
sk                 70 include/net/af_unix.h static inline struct unix_sock *unix_sk(const struct sock *sk)
sk                 72 include/net/af_unix.h 	return (struct unix_sock *)sk;
sk                 77 include/net/af_unix.h long unix_inq_len(struct sock *sk);
sk                 78 include/net/af_unix.h long unix_outq_len(struct sock *sk);
sk                 25 include/net/af_vsock.h #define sk_vsock(__vsk)   (&(__vsk)->sk)
sk                 29 include/net/af_vsock.h 	struct sock sk;
sk                195 include/net/af_vsock.h void vsock_for_each_connected_socket(void (*fn)(struct sock *sk));
sk                260 include/net/ax25.h 	struct sock		*sk;		/* Backlink to socket */
sk                265 include/net/ax25.h 	struct sock		sk;
sk                269 include/net/ax25.h static inline struct ax25_sock *ax25_sk(const struct sock *sk)
sk                271 include/net/ax25.h 	return (struct ax25_sock *) sk;
sk                274 include/net/ax25.h static inline struct ax25_cb *sk_to_ax25(const struct sock *sk)
sk                276 include/net/ax25.h 	return ax25_sk(sk)->cb;
sk                246 include/net/bluetooth/bluetooth.h 	struct sock sk;
sk                276 include/net/bluetooth/bluetooth.h int  bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo);
sk                277 include/net/bluetooth/bluetooth.h int  bt_sock_wait_ready(struct sock *sk, unsigned long flags);
sk                279 include/net/bluetooth/bluetooth.h void bt_accept_enqueue(struct sock *parent, struct sock *sk, bool bh);
sk                280 include/net/bluetooth/bluetooth.h void bt_accept_unlink(struct sock *sk);
sk                345 include/net/bluetooth/bluetooth.h static inline struct sk_buff *bt_skb_send_alloc(struct sock *sk,
sk                350 include/net/bluetooth/bluetooth.h 	skb = sock_alloc_send_skb(sk, len + BT_SKB_RESERVE, nb, err);
sk                357 include/net/bluetooth/bluetooth.h 	*err = sock_error(sk);
sk                361 include/net/bluetooth/bluetooth.h 	if (sk->sk_shutdown) {
sk                375 include/net/bluetooth/bluetooth.h void hci_sock_set_flag(struct sock *sk, int nr);
sk                376 include/net/bluetooth/bluetooth.h void hci_sock_clear_flag(struct sock *sk, int nr);
sk                377 include/net/bluetooth/bluetooth.h int hci_sock_test_flag(struct sock *sk, int nr);
sk                378 include/net/bluetooth/bluetooth.h unsigned short hci_sock_get_channel(struct sock *sk);
sk                379 include/net/bluetooth/bluetooth.h u32 hci_sock_get_cookie(struct sock *sk);
sk                414 include/net/bluetooth/bluetooth.h void bt_sock_reclassify_lock(struct sock *sk, int proto);
sk               1485 include/net/bluetooth/hci_core.h 	int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
sk               1496 include/net/bluetooth/hci_core.h 	void (*hdev_init) (struct sock *sk, struct hci_dev *hdev);
sk               1589 include/net/bluetooth/hci_core.h void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev,
sk               1591 include/net/bluetooth/hci_core.h void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
sk                679 include/net/bluetooth/l2cap.h #define l2cap_pi(sk) ((struct l2cap_pinfo *) sk)
sk                302 include/net/bluetooth/rfcomm.h #define rfcomm_pi(sk) ((struct rfcomm_pinfo *) sk)
sk                362 include/net/bluetooth/rfcomm.h int  rfcomm_dev_ioctl(struct sock *sk, unsigned int cmd, void __user *arg);
sk                  8 include/net/bpf_sk_storage.h void bpf_sk_storage_free(struct sock *sk);
sk                 14 include/net/bpf_sk_storage.h int bpf_sk_storage_clone(const struct sock *sk, struct sock *newsk);
sk                 16 include/net/bpf_sk_storage.h static inline int bpf_sk_storage_clone(const struct sock *sk,
sk                 37 include/net/busy_poll.h static inline bool sk_can_busy_loop(const struct sock *sk)
sk                 39 include/net/busy_poll.h 	return sk->sk_ll_usec && !signal_pending(current);
sk                 54 include/net/busy_poll.h static inline bool sk_can_busy_loop(struct sock *sk)
sk                 86 include/net/busy_poll.h static inline bool sk_busy_loop_timeout(struct sock *sk,
sk                 90 include/net/busy_poll.h 	unsigned long bp_usec = READ_ONCE(sk->sk_ll_usec);
sk                102 include/net/busy_poll.h static inline void sk_busy_loop(struct sock *sk, int nonblock)
sk                105 include/net/busy_poll.h 	unsigned int napi_id = READ_ONCE(sk->sk_napi_id);
sk                108 include/net/busy_poll.h 		napi_busy_loop(napi_id, nonblock ? NULL : sk_busy_loop_end, sk);
sk                122 include/net/busy_poll.h static inline void sk_mark_napi_id(struct sock *sk, const struct sk_buff *skb)
sk                125 include/net/busy_poll.h 	WRITE_ONCE(sk->sk_napi_id, skb->napi_id);
sk                127 include/net/busy_poll.h 	sk_rx_queue_set(sk, skb);
sk                131 include/net/busy_poll.h static inline void sk_mark_napi_id_once(struct sock *sk,
sk                135 include/net/busy_poll.h 	if (!READ_ONCE(sk->sk_napi_id))
sk                136 include/net/busy_poll.h 		WRITE_ONCE(sk->sk_napi_id, skb->napi_id);
sk                196 include/net/cipso_ipv4.h int cipso_v4_sock_setattr(struct sock *sk,
sk                199 include/net/cipso_ipv4.h void cipso_v4_sock_delattr(struct sock *sk);
sk                200 include/net/cipso_ipv4.h int cipso_v4_sock_getattr(struct sock *sk, struct netlbl_lsm_secattr *secattr);
sk                227 include/net/cipso_ipv4.h static inline int cipso_v4_sock_setattr(struct sock *sk,
sk                234 include/net/cipso_ipv4.h static inline void cipso_v4_sock_delattr(struct sock *sk)
sk                238 include/net/cipso_ipv4.h static inline int cipso_v4_sock_getattr(struct sock *sk,
sk                 62 include/net/cls_cgroup.h 		struct sock *sk = skb_to_full_sk(skb);
sk                 65 include/net/cls_cgroup.h 		if (!sk || !sk_fullsock(sk))
sk                 68 include/net/cls_cgroup.h 		classid = sock_cgroup_classid(&sk->sk_cgrp_data);
sk                122 include/net/dn.h 	int (*persist_fxn)(struct sock *sk);
sk                124 include/net/dn.h 	void (*keepalive_fxn)(struct sock *sk);
sk                128 include/net/dn.h static inline struct dn_scp *DN_SK(struct sock *sk)
sk                130 include/net/dn.h 	return (struct dn_scp *)(sk + 1);
sk                209 include/net/dn.h int dn_destroy_timer(struct sock *sk);
sk                216 include/net/dn.h void dn_start_slow_timer(struct sock *sk);
sk                217 include/net/dn.h void dn_stop_slow_timer(struct sock *sk);
sk                 22 include/net/dn_neigh.h int dn_neigh_router_hello(struct net *net, struct sock *sk, struct sk_buff *skb);
sk                 23 include/net/dn_neigh.h int dn_neigh_endnode_hello(struct net *net, struct sock *sk, struct sk_buff *skb);
sk                 26 include/net/dn_neigh.h int dn_to_neigh_output(struct net *net, struct sock *sk, struct sk_buff *skb);
sk                 10 include/net/dn_nsp.h void dn_nsp_send_data_ack(struct sock *sk);
sk                 11 include/net/dn_nsp.h void dn_nsp_send_oth_ack(struct sock *sk);
sk                 12 include/net/dn_nsp.h void dn_send_conn_ack(struct sock *sk);
sk                 13 include/net/dn_nsp.h void dn_send_conn_conf(struct sock *sk, gfp_t gfp);
sk                 14 include/net/dn_nsp.h void dn_nsp_send_disc(struct sock *sk, unsigned char type,
sk                 18 include/net/dn_nsp.h void dn_nsp_send_link(struct sock *sk, unsigned char lsflags, char fcval);
sk                 19 include/net/dn_nsp.h void dn_nsp_send_conninit(struct sock *sk, unsigned char flags);
sk                 21 include/net/dn_nsp.h void dn_nsp_output(struct sock *sk);
sk                 22 include/net/dn_nsp.h int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb,
sk                 24 include/net/dn_nsp.h void dn_nsp_queue_xmit(struct sock *sk, struct sk_buff *skb, gfp_t gfp,
sk                 26 include/net/dn_nsp.h unsigned long dn_nsp_persist(struct sock *sk);
sk                 27 include/net/dn_nsp.h int dn_nsp_xmit_timeout(struct sock *sk);
sk                 30 include/net/dn_nsp.h int dn_nsp_backlog_rcv(struct sock *sk, struct sk_buff *skb);
sk                 32 include/net/dn_nsp.h struct sk_buff *dn_alloc_skb(struct sock *sk, int size, gfp_t pri);
sk                 33 include/net/dn_nsp.h struct sk_buff *dn_alloc_send_skb(struct sock *sk, size_t *size, int noblock,
sk                188 include/net/dn_nsp.h static __inline__ int dn_congested(struct sock *sk)
sk                190 include/net/dn_nsp.h         return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1);
sk                 10 include/net/dn_route.h struct sk_buff *dn_alloc_skb(struct sock *sk, int size, gfp_t pri);
sk                 12 include/net/dn_route.h 			 struct sock *sk, int flags);
sk                 35 include/net/dst.h 	int			(*output)(struct net *net, struct sock *sk, struct sk_buff *skb);
sk                378 include/net/dst.h int dst_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb);
sk                381 include/net/dst.h 	return dst_discard_out(&init_net, skb->sk, skb);
sk                434 include/net/dst.h static inline int dst_output(struct net *net, struct sock *sk, struct sk_buff *skb)
sk                436 include/net/dst.h 	return skb_dst(skb)->output(net, sk, skb);
sk                464 include/net/dst.h 					    const struct sock *sk,
sk                472 include/net/dst.h 		      const struct flowi *fl, const struct sock *sk,
sk                481 include/net/dst.h 						  const struct sock *sk,
sk                494 include/net/dst.h 			      const struct flowi *fl, const struct sock *sk,
sk                500 include/net/dst.h 					const struct sock *sk, int flags,
sk                504 include/net/dst.h 				    const struct flowi *fl, const struct sock *sk,
sk                 29 include/net/dst_ops.h 	void			(*update_pmtu)(struct dst_entry *dst, struct sock *sk,
sk                 32 include/net/dst_ops.h 	void			(*redirect)(struct dst_entry *dst, struct sock *sk,
sk                 34 include/net/dst_ops.h 	int			(*local_out)(struct net *net, struct sock *sk, struct sk_buff *skb);
sk                 19 include/net/inet6_connection_sock.h struct dst_entry *inet6_csk_route_req(const struct sock *sk, struct flowi6 *fl6,
sk                 22 include/net/inet6_connection_sock.h void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr);
sk                 24 include/net/inet6_connection_sock.h int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl);
sk                 26 include/net/inet6_connection_sock.h struct dst_entry *inet6_csk_update_pmtu(struct sock *sk, u32 mtu);
sk                 70 include/net/inet6_hashtables.h 	struct sock *sk = __inet6_lookup_established(net, hashinfo, saddr,
sk                 74 include/net/inet6_hashtables.h 	if (sk)
sk                 75 include/net/inet6_hashtables.h 		return sk;
sk                 88 include/net/inet6_hashtables.h 	struct sock *sk = skb_steal_sock(skb);
sk                 91 include/net/inet6_hashtables.h 	if (sk)
sk                 92 include/net/inet6_hashtables.h 		return sk;
sk                106 include/net/inet6_hashtables.h int inet6_hash(struct sock *sk);
sk                 28 include/net/inet_common.h int inet_send_prepare(struct sock *sk);
sk                 36 include/net/inet_common.h void inet_sock_destruct(struct sock *sk);
sk                 38 include/net/inet_common.h int __inet_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len,
sk                 43 include/net/inet_common.h int inet_ctl_sock_create(struct sock **sk, unsigned short family,
sk                 46 include/net/inet_common.h int inet_recv_error(struct sock *sk, struct msghdr *msg, int len,
sk                 54 include/net/inet_common.h static inline void inet_ctl_sock_destroy(struct sock *sk)
sk                 56 include/net/inet_common.h 	if (sk)
sk                 57 include/net/inet_common.h 		sock_release(sk->sk_socket);
sk                 34 include/net/inet_connection_sock.h 	int	    (*queue_xmit)(struct sock *sk, struct sk_buff *skb, struct flowi *fl);
sk                 35 include/net/inet_connection_sock.h 	void	    (*send_check)(struct sock *sk, struct sk_buff *skb);
sk                 36 include/net/inet_connection_sock.h 	int	    (*rebuild_header)(struct sock *sk);
sk                 37 include/net/inet_connection_sock.h 	void	    (*sk_rx_dst_set)(struct sock *sk, const struct sk_buff *skb);
sk                 38 include/net/inet_connection_sock.h 	int	    (*conn_request)(struct sock *sk, struct sk_buff *skb);
sk                 39 include/net/inet_connection_sock.h 	struct sock *(*syn_recv_sock)(const struct sock *sk, struct sk_buff *skb,
sk                 47 include/net/inet_connection_sock.h 	int	    (*setsockopt)(struct sock *sk, int level, int optname,
sk                 49 include/net/inet_connection_sock.h 	int	    (*getsockopt)(struct sock *sk, int level, int optname,
sk                 52 include/net/inet_connection_sock.h 	int	    (*compat_setsockopt)(struct sock *sk,
sk                 55 include/net/inet_connection_sock.h 	int	    (*compat_getsockopt)(struct sock *sk,
sk                 59 include/net/inet_connection_sock.h 	void	    (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
sk                 60 include/net/inet_connection_sock.h 	void	    (*mtu_reduced)(struct sock *sk);
sk                101 include/net/inet_connection_sock.h 	void (*icsk_clean_acked)(struct sock *sk, u32 acked_seq);
sk                103 include/net/inet_connection_sock.h 	unsigned int		  (*icsk_sync_mss)(struct sock *sk, u32 pmtu);
sk                149 include/net/inet_connection_sock.h static inline struct inet_connection_sock *inet_csk(const struct sock *sk)
sk                151 include/net/inet_connection_sock.h 	return (struct inet_connection_sock *)sk;
sk                154 include/net/inet_connection_sock.h static inline void *inet_csk_ca(const struct sock *sk)
sk                156 include/net/inet_connection_sock.h 	return (void *)inet_csk(sk)->icsk_ca_priv;
sk                159 include/net/inet_connection_sock.h struct sock *inet_csk_clone_lock(const struct sock *sk,
sk                171 include/net/inet_connection_sock.h void inet_csk_init_xmit_timers(struct sock *sk,
sk                175 include/net/inet_connection_sock.h void inet_csk_clear_xmit_timers(struct sock *sk);
sk                177 include/net/inet_connection_sock.h static inline void inet_csk_schedule_ack(struct sock *sk)
sk                179 include/net/inet_connection_sock.h 	inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_SCHED;
sk                182 include/net/inet_connection_sock.h static inline int inet_csk_ack_scheduled(const struct sock *sk)
sk                184 include/net/inet_connection_sock.h 	return inet_csk(sk)->icsk_ack.pending & ICSK_ACK_SCHED;
sk                187 include/net/inet_connection_sock.h static inline void inet_csk_delack_init(struct sock *sk)
sk                189 include/net/inet_connection_sock.h 	memset(&inet_csk(sk)->icsk_ack, 0, sizeof(inet_csk(sk)->icsk_ack));
sk                192 include/net/inet_connection_sock.h void inet_csk_delete_keepalive_timer(struct sock *sk);
sk                193 include/net/inet_connection_sock.h void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long timeout);
sk                195 include/net/inet_connection_sock.h static inline void inet_csk_clear_xmit_timer(struct sock *sk, const int what)
sk                197 include/net/inet_connection_sock.h 	struct inet_connection_sock *icsk = inet_csk(sk);
sk                202 include/net/inet_connection_sock.h 		sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
sk                207 include/net/inet_connection_sock.h 		sk_stop_timer(sk, &icsk->icsk_delack_timer);
sk                217 include/net/inet_connection_sock.h static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what,
sk                221 include/net/inet_connection_sock.h 	struct inet_connection_sock *icsk = inet_csk(sk);
sk                225 include/net/inet_connection_sock.h 			 sk, what, when, (void *)_THIS_IP_);
sk                234 include/net/inet_connection_sock.h 		sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
sk                238 include/net/inet_connection_sock.h 		sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout);
sk                253 include/net/inet_connection_sock.h struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern);
sk                255 include/net/inet_connection_sock.h int inet_csk_get_port(struct sock *sk, unsigned short snum);
sk                257 include/net/inet_connection_sock.h struct dst_entry *inet_csk_route_req(const struct sock *sk, struct flowi4 *fl4,
sk                259 include/net/inet_connection_sock.h struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
sk                263 include/net/inet_connection_sock.h struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
sk                266 include/net/inet_connection_sock.h void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
sk                268 include/net/inet_connection_sock.h struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
sk                272 include/net/inet_connection_sock.h static inline void inet_csk_reqsk_queue_added(struct sock *sk)
sk                274 include/net/inet_connection_sock.h 	reqsk_queue_added(&inet_csk(sk)->icsk_accept_queue);
sk                277 include/net/inet_connection_sock.h static inline int inet_csk_reqsk_queue_len(const struct sock *sk)
sk                279 include/net/inet_connection_sock.h 	return reqsk_queue_len(&inet_csk(sk)->icsk_accept_queue);
sk                282 include/net/inet_connection_sock.h static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk)
sk                284 include/net/inet_connection_sock.h 	return inet_csk_reqsk_queue_len(sk) >= sk->sk_max_ack_backlog;
sk                287 include/net/inet_connection_sock.h void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req);
sk                288 include/net/inet_connection_sock.h void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req);
sk                290 include/net/inet_connection_sock.h void inet_csk_destroy_sock(struct sock *sk);
sk                291 include/net/inet_connection_sock.h void inet_csk_prepare_forced_close(struct sock *sk);
sk                296 include/net/inet_connection_sock.h static inline __poll_t inet_csk_listen_poll(const struct sock *sk)
sk                298 include/net/inet_connection_sock.h 	return !reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue) ?
sk                302 include/net/inet_connection_sock.h int inet_csk_listen_start(struct sock *sk, int backlog);
sk                303 include/net/inet_connection_sock.h void inet_csk_listen_stop(struct sock *sk);
sk                305 include/net/inet_connection_sock.h void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr);
sk                307 include/net/inet_connection_sock.h int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname,
sk                309 include/net/inet_connection_sock.h int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
sk                312 include/net/inet_connection_sock.h struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu);
sk                316 include/net/inet_connection_sock.h static inline void inet_csk_enter_pingpong_mode(struct sock *sk)
sk                318 include/net/inet_connection_sock.h 	inet_csk(sk)->icsk_ack.pingpong = TCP_PINGPONG_THRESH;
sk                321 include/net/inet_connection_sock.h static inline void inet_csk_exit_pingpong_mode(struct sock *sk)
sk                323 include/net/inet_connection_sock.h 	inet_csk(sk)->icsk_ack.pingpong = 0;
sk                326 include/net/inet_connection_sock.h static inline bool inet_csk_in_pingpong_mode(struct sock *sk)
sk                328 include/net/inet_connection_sock.h 	return inet_csk(sk)->icsk_ack.pingpong >= TCP_PINGPONG_THRESH;
sk                331 include/net/inet_connection_sock.h static inline void inet_csk_inc_pingpong_cnt(struct sock *sk)
sk                333 include/net/inet_connection_sock.h 	struct inet_connection_sock *icsk = inet_csk(sk);
sk                 52 include/net/inet_ecn.h static inline void INET_ECN_xmit(struct sock *sk)
sk                 54 include/net/inet_ecn.h 	inet_sk(sk)->tos |= INET_ECN_ECT_0;
sk                 55 include/net/inet_ecn.h 	if (inet6_sk(sk) != NULL)
sk                 56 include/net/inet_ecn.h 		inet6_sk(sk)->tclass |= INET_ECN_ECT_0;
sk                 59 include/net/inet_ecn.h static inline void INET_ECN_dontxmit(struct sock *sk)
sk                 61 include/net/inet_ecn.h 	inet_sk(sk)->tos &= ~INET_ECN_MASK;
sk                 62 include/net/inet_ecn.h 	if (inet6_sk(sk) != NULL)
sk                 63 include/net/inet_ecn.h 		inet6_sk(sk)->tclass &= ~INET_ECN_MASK;
sk                 70 include/net/inet_ecn.h #define	IP6_ECN_flow_xmit(sk, label) do {				\
sk                 71 include/net/inet_ecn.h 	if (INET_ECN_is_capable(inet6_sk(sk)->tclass))			\
sk                224 include/net/inet_hashtables.h void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
sk                233 include/net/inet_hashtables.h static inline int inet_sk_listen_hashfn(const struct sock *sk)
sk                235 include/net/inet_hashtables.h 	return inet_lhashfn(sock_net(sk), inet_sk(sk)->inet_num);
sk                239 include/net/inet_hashtables.h int __inet_inherit_port(const struct sock *sk, struct sock *child);
sk                241 include/net/inet_hashtables.h void inet_put_port(struct sock *sk);
sk                250 include/net/inet_hashtables.h bool inet_ehash_insert(struct sock *sk, struct sock *osk);
sk                251 include/net/inet_hashtables.h bool inet_ehash_nolisten(struct sock *sk, struct sock *osk);
sk                252 include/net/inet_hashtables.h int __inet_hash(struct sock *sk, struct sock *osk);
sk                253 include/net/inet_hashtables.h int inet_hash(struct sock *sk);
sk                254 include/net/inet_hashtables.h void inet_unhash(struct sock *sk);
sk                350 include/net/inet_hashtables.h 	struct sock *sk;
sk                352 include/net/inet_hashtables.h 	sk = __inet_lookup_established(net, hashinfo, saddr, sport,
sk                355 include/net/inet_hashtables.h 	if (sk)
sk                356 include/net/inet_hashtables.h 		return sk;
sk                369 include/net/inet_hashtables.h 	struct sock *sk;
sk                372 include/net/inet_hashtables.h 	sk = __inet_lookup(net, hashinfo, skb, doff, saddr, sport, daddr,
sk                375 include/net/inet_hashtables.h 	if (sk && !refcounted && !refcount_inc_not_zero(&sk->sk_refcnt))
sk                376 include/net/inet_hashtables.h 		sk = NULL;
sk                377 include/net/inet_hashtables.h 	return sk;
sk                388 include/net/inet_hashtables.h 	struct sock *sk = skb_steal_sock(skb);
sk                392 include/net/inet_hashtables.h 	if (sk)
sk                393 include/net/inet_hashtables.h 		return sk;
sk                405 include/net/inet_hashtables.h static inline void sk_daddr_set(struct sock *sk, __be32 addr)
sk                407 include/net/inet_hashtables.h 	sk->sk_daddr = addr; /* alias of inet_daddr */
sk                409 include/net/inet_hashtables.h 	ipv6_addr_set_v4mapped(addr, &sk->sk_v6_daddr);
sk                413 include/net/inet_hashtables.h static inline void sk_rcv_saddr_set(struct sock *sk, __be32 addr)
sk                415 include/net/inet_hashtables.h 	sk->sk_rcv_saddr = addr; /* alias of inet_rcv_saddr */
sk                417 include/net/inet_hashtables.h 	ipv6_addr_set_v4mapped(addr, &sk->sk_v6_rcv_saddr);
sk                422 include/net/inet_hashtables.h 			struct sock *sk, u32 port_offset,
sk                428 include/net/inet_hashtables.h 		      struct sock *sk);
sk                103 include/net/inet_sock.h static inline struct inet_request_sock *inet_rsk(const struct request_sock *sk)
sk                105 include/net/inet_sock.h 	return (struct inet_request_sock *)sk;
sk                108 include/net/inet_sock.h static inline u32 inet_request_mark(const struct sock *sk, struct sk_buff *skb)
sk                110 include/net/inet_sock.h 	if (!sk->sk_mark && sock_net(sk)->ipv4.sysctl_tcp_fwmark_accept)
sk                113 include/net/inet_sock.h 	return sk->sk_mark;
sk                116 include/net/inet_sock.h static inline int inet_request_bound_dev_if(const struct sock *sk,
sk                120 include/net/inet_sock.h 	struct net *net = sock_net(sk);
sk                122 include/net/inet_sock.h 	if (!sk->sk_bound_dev_if && net->ipv4.sysctl_tcp_l3mdev_accept)
sk                126 include/net/inet_sock.h 	return sk->sk_bound_dev_if;
sk                129 include/net/inet_sock.h static inline int inet_sk_bound_l3mdev(const struct sock *sk)
sk                132 include/net/inet_sock.h 	struct net *net = sock_net(sk);
sk                136 include/net/inet_sock.h 						      sk->sk_bound_dev_if);
sk                197 include/net/inet_sock.h 	struct sock		sk;
sk                202 include/net/inet_sock.h #define inet_daddr		sk.__sk_common.skc_daddr
sk                203 include/net/inet_sock.h #define inet_rcv_saddr		sk.__sk_common.skc_rcv_saddr
sk                204 include/net/inet_sock.h #define inet_dport		sk.__sk_common.skc_dport
sk                205 include/net/inet_sock.h #define inet_num		sk.__sk_common.skc_num
sk                262 include/net/inet_sock.h static inline struct sock *sk_to_full_sk(struct sock *sk)
sk                265 include/net/inet_sock.h 	if (sk && sk->sk_state == TCP_NEW_SYN_RECV)
sk                266 include/net/inet_sock.h 		sk = inet_reqsk(sk)->rsk_listener;
sk                268 include/net/inet_sock.h 	return sk;
sk                272 include/net/inet_sock.h static inline const struct sock *sk_const_to_full_sk(const struct sock *sk)
sk                275 include/net/inet_sock.h 	if (sk && sk->sk_state == TCP_NEW_SYN_RECV)
sk                276 include/net/inet_sock.h 		sk = ((const struct request_sock *)sk)->rsk_listener;
sk                278 include/net/inet_sock.h 	return sk;
sk                283 include/net/inet_sock.h 	return sk_to_full_sk(skb->sk);
sk                286 include/net/inet_sock.h static inline struct inet_sock *inet_sk(const struct sock *sk)
sk                288 include/net/inet_sock.h 	return (struct inet_sock *)sk;
sk                306 include/net/inet_sock.h int inet_sk_rebuild_header(struct sock *sk);
sk                315 include/net/inet_sock.h static inline int inet_sk_state_load(const struct sock *sk)
sk                318 include/net/inet_sock.h 	return smp_load_acquire(&sk->sk_state);
sk                329 include/net/inet_sock.h void inet_sk_state_store(struct sock *sk, int newstate);
sk                331 include/net/inet_sock.h void inet_sk_set_state(struct sock *sk, int state);
sk                349 include/net/inet_sock.h static inline __u8 inet_sk_flowi_flags(const struct sock *sk)
sk                353 include/net/inet_sock.h 	if (inet_sk(sk)->transparent || inet_sk(sk)->hdrincl)
sk                358 include/net/inet_sock.h static inline void inet_inc_convert_csum(struct sock *sk)
sk                360 include/net/inet_sock.h 	inet_sk(sk)->convert_csum++;
sk                363 include/net/inet_sock.h static inline void inet_dec_convert_csum(struct sock *sk)
sk                365 include/net/inet_sock.h 	if (inet_sk(sk)->convert_csum > 0)
sk                366 include/net/inet_sock.h 		inet_sk(sk)->convert_csum--;
sk                369 include/net/inet_sock.h static inline bool inet_get_convert_csum(struct sock *sk)
sk                371 include/net/inet_sock.h 	return !!inet_sk(sk)->convert_csum;
sk                 80 include/net/inet_timewait_sock.h static inline struct inet_timewait_sock *inet_twsk(const struct sock *sk)
sk                 82 include/net/inet_timewait_sock.h 	return (struct inet_timewait_sock *)sk;
sk                 91 include/net/inet_timewait_sock.h struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
sk                 95 include/net/inet_timewait_sock.h void inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
sk                 91 include/net/ip.h 	ipcm->sockc.mark = inet->sk.sk_mark;
sk                 92 include/net/ip.h 	ipcm->sockc.tsflags = inet->sk.sk_tsflags;
sk                 93 include/net/ip.h 	ipcm->oif = inet->sk.sk_bound_dev_if;
sk                123 include/net/ip.h 	struct sock		*sk;
sk                151 include/net/ip.h int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
sk                161 include/net/ip.h int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb);
sk                162 include/net/ip.h int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb);
sk                163 include/net/ip.h int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
sk                204 include/net/ip.h int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
sk                205 include/net/ip.h int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
sk                207 include/net/ip.h int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
sk                210 include/net/ip.h int ip_append_data(struct sock *sk, struct flowi4 *fl4,
sk                219 include/net/ip.h ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
sk                221 include/net/ip.h struct sk_buff *__ip_make_skb(struct sock *sk, struct flowi4 *fl4,
sk                225 include/net/ip.h int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4);
sk                226 include/net/ip.h void ip_flush_pending_frames(struct sock *sk);
sk                227 include/net/ip.h struct sk_buff *ip_make_skb(struct sock *sk, struct flowi4 *fl4,
sk                234 include/net/ip.h static inline int ip_queue_xmit(struct sock *sk, struct sk_buff *skb,
sk                237 include/net/ip.h 	return __ip_queue_xmit(sk, skb, fl, inet_sk(sk)->tos);
sk                240 include/net/ip.h static inline struct sk_buff *ip_finish_skb(struct sock *sk, struct flowi4 *fl4)
sk                242 include/net/ip.h 	return __ip_make_skb(sk, fl4, &sk->sk_write_queue, &inet_sk(sk)->cork.base);
sk                250 include/net/ip.h static inline __u8 get_rtconn_flags(struct ipcm_cookie* ipc, struct sock* sk)
sk                252 include/net/ip.h 	return (ipc->tos != -1) ? RT_CONN_FLAGS_TOS(sk, ipc->tos) : RT_CONN_FLAGS(sk);
sk                256 include/net/ip.h int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
sk                257 include/net/ip.h int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
sk                259 include/net/ip.h void ip4_datagram_release_cb(struct sock *sk);
sk                279 include/net/ip.h void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
sk                412 include/net/ip.h int ip_dont_fragment(const struct sock *sk, const struct dst_entry *dst)
sk                414 include/net/ip.h 	u8 pmtudisc = READ_ONCE(inet_sk(sk)->pmtudisc);
sk                421 include/net/ip.h static inline bool ip_sk_accept_pmtu(const struct sock *sk)
sk                423 include/net/ip.h 	return inet_sk(sk)->pmtudisc != IP_PMTUDISC_INTERFACE &&
sk                424 include/net/ip.h 	       inet_sk(sk)->pmtudisc != IP_PMTUDISC_OMIT;
sk                427 include/net/ip.h static inline bool ip_sk_use_pmtu(const struct sock *sk)
sk                429 include/net/ip.h 	return inet_sk(sk)->pmtudisc < IP_PMTUDISC_PROBE;
sk                432 include/net/ip.h static inline bool ip_sk_ignore_df(const struct sock *sk)
sk                434 include/net/ip.h 	return inet_sk(sk)->pmtudisc < IP_PMTUDISC_DO ||
sk                435 include/net/ip.h 	       inet_sk(sk)->pmtudisc == IP_PMTUDISC_OMIT;
sk                451 include/net/ip.h static inline unsigned int ip_skb_dst_mtu(struct sock *sk,
sk                454 include/net/ip.h 	if (!sk || !sk_fullsock(sk) || ip_sk_use_pmtu(sk)) {
sk                498 include/net/ip.h 					struct sock *sk, int segs)
sk                508 include/net/ip.h 		if (sk && inet_sk(sk)->inet_daddr) {
sk                509 include/net/ip.h 			iph->id = htons(inet_sk(sk)->inet_id);
sk                510 include/net/ip.h 			inet_sk(sk)->inet_id += segs;
sk                520 include/net/ip.h 				   struct sock *sk)
sk                522 include/net/ip.h 	ip_select_ident_segs(net, skb, sk, 1);
sk                618 include/net/ip.h static __inline__ void inet_reset_saddr(struct sock *sk)
sk                620 include/net/ip.h 	inet_sk(sk)->inet_rcv_saddr = inet_sk(sk)->inet_saddr = 0;
sk                622 include/net/ip.h 	if (sk->sk_family == PF_INET6) {
sk                623 include/net/ip.h 		struct ipv6_pinfo *np = inet6_sk(sk);
sk                626 include/net/ip.h 		memset(&sk->sk_v6_rcv_saddr, 0, sizeof(sk->sk_v6_rcv_saddr));
sk                725 include/net/ip.h void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb);
sk                726 include/net/ip.h void ip_cmsg_recv_offset(struct msghdr *msg, struct sock *sk,
sk                728 include/net/ip.h int ip_cmsg_send(struct sock *sk, struct msghdr *msg,
sk                730 include/net/ip.h int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
sk                732 include/net/ip.h int ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
sk                734 include/net/ip.h int compat_ip_setsockopt(struct sock *sk, int level, int optname,
sk                736 include/net/ip.h int compat_ip_getsockopt(struct sock *sk, int level, int optname,
sk                738 include/net/ip.h int ip_ra_control(struct sock *sk, unsigned char on,
sk                741 include/net/ip.h int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len);
sk                742 include/net/ip.h void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
sk                744 include/net/ip.h void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport,
sk                749 include/net/ip.h 	ip_cmsg_recv_offset(msg, skb->sk, skb, 0, 0);
sk                 80 include/net/ip6_checksum.h static inline void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
sk                 82 include/net/ip6_checksum.h 	struct ipv6_pinfo *np = inet6_sk(sk);
sk                 84 include/net/ip6_checksum.h 	__tcp_v6_send_check(skb, &np->saddr, &sk->sk_v6_daddr);
sk                 88 include/net/ip6_route.h 					       const struct sock *sk,
sk                 91 include/net/ip6_route.h struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk,
sk                 95 include/net/ip6_route.h 						 const struct sock *sk,
sk                 98 include/net/ip6_route.h 	return ip6_route_output_flags(net, sk, fl6, 0);
sk                185 include/net/ip6_route.h void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu);
sk                189 include/net/ip6_route.h void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk);
sk                226 include/net/ip6_route.h static inline void ip6_dst_store(struct sock *sk, struct dst_entry *dst,
sk                230 include/net/ip6_route.h 	struct ipv6_pinfo *np = inet6_sk(sk);
sk                233 include/net/ip6_route.h 	sk_setup_caps(sk, dst);
sk                240 include/net/ip6_route.h void ip6_sk_dst_store_flow(struct sock *sk, struct dst_entry *dst,
sk                261 include/net/ip6_route.h int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
sk                266 include/net/ip6_route.h 	struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
sk                267 include/net/ip6_route.h 				inet6_sk(skb->sk) : NULL;
sk                273 include/net/ip6_route.h static inline bool ip6_sk_accept_pmtu(const struct sock *sk)
sk                275 include/net/ip6_route.h 	return inet6_sk(sk)->pmtudisc != IPV6_PMTUDISC_INTERFACE &&
sk                276 include/net/ip6_route.h 	       inet6_sk(sk)->pmtudisc != IPV6_PMTUDISC_OMIT;
sk                279 include/net/ip6_route.h static inline bool ip6_sk_ignore_df(const struct sock *sk)
sk                281 include/net/ip6_route.h 	return inet6_sk(sk)->pmtudisc < IPV6_PMTUDISC_DO ||
sk                282 include/net/ip6_route.h 	       inet6_sk(sk)->pmtudisc == IPV6_PMTUDISC_OMIT;
sk                153 include/net/ip6_tunnel.h static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb,
sk                160 include/net/ip6_tunnel.h 	err = ip6_local_out(dev_net(skb_dst(skb)->dev), sk, skb);
sk                414 include/net/ip_tunnels.h void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
sk                274 include/net/ipv6.h 	struct sock		*sk;
sk                393 include/net/ipv6.h struct ip6_flowlabel *__fl6_sock_lookup(struct sock *sk, __be32 label);
sk                396 include/net/ipv6.h static inline struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk,
sk                400 include/net/ipv6.h 		return __fl6_sock_lookup(sk, label) ? : ERR_PTR(-ENOENT);
sk                408 include/net/ipv6.h void fl6_free_socklist(struct sock *sk);
sk                409 include/net/ipv6.h int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen);
sk                410 include/net/ipv6.h int ipv6_flowlabel_opt_get(struct sock *sk, struct in6_flowlabel_req *freq,
sk                424 include/net/ipv6.h void icmpv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
sk                427 include/net/ipv6.h int ip6_ra_control(struct sock *sk, int sel);
sk                431 include/net/ipv6.h struct ipv6_txoptions *ipv6_dup_options(struct sock *sk,
sk                433 include/net/ipv6.h struct ipv6_txoptions *ipv6_renew_options(struct sock *sk,
sk                440 include/net/ipv6.h bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb,
sk                442 include/net/ipv6.h struct ipv6_txoptions *ipv6_update_options(struct sock *sk,
sk                906 include/net/ipv6.h static inline void ip6_set_txhash(struct sock *sk) { }
sk                978 include/net/ipv6.h int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb);
sk                983 include/net/ipv6.h int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
sk                988 include/net/ipv6.h int ip6_append_data(struct sock *sk,
sk                995 include/net/ipv6.h int ip6_push_pending_frames(struct sock *sk);
sk                997 include/net/ipv6.h void ip6_flush_pending_frames(struct sock *sk);
sk               1001 include/net/ipv6.h struct sk_buff *__ip6_make_skb(struct sock *sk, struct sk_buff_head *queue,
sk               1004 include/net/ipv6.h struct sk_buff *ip6_make_skb(struct sock *sk,
sk               1012 include/net/ipv6.h static inline struct sk_buff *ip6_finish_skb(struct sock *sk)
sk               1014 include/net/ipv6.h 	return __ip6_make_skb(sk, &sk->sk_write_queue, &inet_sk(sk)->cork,
sk               1015 include/net/ipv6.h 			      &inet6_sk(sk)->cork);
sk               1018 include/net/ipv6.h int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst,
sk               1020 include/net/ipv6.h struct dst_entry *ip6_dst_lookup_flow(struct net *net, const struct sock *sk, struct flowi6 *fl6,
sk               1022 include/net/ipv6.h struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
sk               1032 include/net/ipv6.h int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb);
sk               1039 include/net/ipv6.h int __ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
sk               1040 include/net/ipv6.h int ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
sk               1077 include/net/ipv6.h int ipv6_setsockopt(struct sock *sk, int level, int optname,
sk               1079 include/net/ipv6.h int ipv6_getsockopt(struct sock *sk, int level, int optname,
sk               1081 include/net/ipv6.h int compat_ipv6_setsockopt(struct sock *sk, int level, int optname,
sk               1083 include/net/ipv6.h int compat_ipv6_getsockopt(struct sock *sk, int level, int optname,
sk               1086 include/net/ipv6.h int __ip6_datagram_connect(struct sock *sk, struct sockaddr *addr,
sk               1088 include/net/ipv6.h int ip6_datagram_connect(struct sock *sk, struct sockaddr *addr, int addr_len);
sk               1089 include/net/ipv6.h int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr *addr,
sk               1091 include/net/ipv6.h int ip6_datagram_dst_update(struct sock *sk, bool fix_sk_saddr);
sk               1092 include/net/ipv6.h void ip6_datagram_release_cb(struct sock *sk);
sk               1094 include/net/ipv6.h int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len,
sk               1096 include/net/ipv6.h int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len,
sk               1098 include/net/ipv6.h void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
sk               1100 include/net/ipv6.h void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info);
sk               1101 include/net/ipv6.h void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu);
sk               1110 include/net/ipv6.h 			      struct sock *sk);
sk               1122 include/net/ipv6.h int ip6_mc_source(int add, int omode, struct sock *sk,
sk               1124 include/net/ipv6.h int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf);
sk               1125 include/net/ipv6.h int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
sk               1158 include/net/ipv6.h int ipv6_sock_mc_join(struct sock *sk, int ifindex,
sk               1160 include/net/ipv6.h int ipv6_sock_mc_join_ssm(struct sock *sk, int ifindex,
sk               1162 include/net/ipv6.h int ipv6_sock_mc_drop(struct sock *sk, int ifindex,
sk                 23 include/net/ipv6_stubs.h 	int (*ipv6_sock_mc_join)(struct sock *sk, int ifindex,
sk                 25 include/net/ipv6_stubs.h 	int (*ipv6_sock_mc_drop)(struct sock *sk, int ifindex,
sk                 28 include/net/ipv6_stubs.h 						  const struct sock *sk,
sk                 65 include/net/ipv6_stubs.h 	int (*inet6_bind)(struct sock *sk, struct sockaddr *uaddr, int addr_len,
sk                 97 include/net/ipx.h 	struct sock		sk;
sk                112 include/net/ipx.h static inline struct ipx_sock *ipx_sk(struct sock *sk)
sk                114 include/net/ipx.h 	return (struct ipx_sock *)sk;
sk                149 include/net/ipx.h int ipxrtr_route_packet(struct sock *sk, struct sockaddr_ipx *usipx,
sk                114 include/net/iucv/af_iucv.h 	struct sock		sk;
sk                165 include/net/iucv/af_iucv.h void iucv_accept_enqueue(struct sock *parent, struct sock *sk);
sk                166 include/net/iucv/af_iucv.h void iucv_accept_unlink(struct sock *sk);
sk                 59 include/net/kcm.h 	struct sock sk;
sk                 90 include/net/kcm.h 	struct sock *sk;
sk                 99 include/net/kcm.h 	void (*save_state_change)(struct sock *sk);
sk                100 include/net/kcm.h 	void (*save_data_ready)(struct sock *sk);
sk                101 include/net/kcm.h 	void (*save_write_space)(struct sock *sk);
sk                 30 include/net/l3mdev.h 					  struct sock *sk, struct sk_buff *skb,
sk                175 include/net/l3mdev.h struct sk_buff *l3mdev_l3_out(struct sock *sk, struct sk_buff *skb, u16 proto)
sk                184 include/net/l3mdev.h 			skb = master->l3mdev_ops->l3mdev_l3_out(master, sk,
sk                192 include/net/l3mdev.h struct sk_buff *l3mdev_ip_out(struct sock *sk, struct sk_buff *skb)
sk                194 include/net/l3mdev.h 	return l3mdev_l3_out(sk, skb, AF_INET);
sk                198 include/net/l3mdev.h struct sk_buff *l3mdev_ip6_out(struct sock *sk, struct sk_buff *skb)
sk                200 include/net/l3mdev.h 	return l3mdev_l3_out(sk, skb, AF_INET6);
sk                272 include/net/l3mdev.h struct sk_buff *l3mdev_ip_out(struct sock *sk, struct sk_buff *skb)
sk                278 include/net/l3mdev.h struct sk_buff *l3mdev_ip6_out(struct sock *sk, struct sk_buff *skb)
sk                 90 include/net/llc_c_ac.h typedef int (*llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
sk                 92 include/net/llc_c_ac.h int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb);
sk                 93 include/net/llc_c_ac.h int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb);
sk                 94 include/net/llc_c_ac.h int llc_conn_ac_conn_confirm(struct sock *sk, struct sk_buff *skb);
sk                 95 include/net/llc_c_ac.h int llc_conn_ac_data_ind(struct sock *sk, struct sk_buff *skb);
sk                 96 include/net/llc_c_ac.h int llc_conn_ac_disc_ind(struct sock *sk, struct sk_buff *skb);
sk                 97 include/net/llc_c_ac.h int llc_conn_ac_rst_ind(struct sock *sk, struct sk_buff *skb);
sk                 98 include/net/llc_c_ac.h int llc_conn_ac_rst_confirm(struct sock *sk, struct sk_buff *skb);
sk                 99 include/net/llc_c_ac.h int llc_conn_ac_clear_remote_busy_if_f_eq_1(struct sock *sk,
sk                101 include/net/llc_c_ac.h int llc_conn_ac_stop_rej_tmr_if_data_flag_eq_2(struct sock *sk,
sk                103 include/net/llc_c_ac.h int llc_conn_ac_send_disc_cmd_p_set_x(struct sock *sk, struct sk_buff *skb);
sk                104 include/net/llc_c_ac.h int llc_conn_ac_send_dm_rsp_f_set_p(struct sock *sk, struct sk_buff *skb);
sk                105 include/net/llc_c_ac.h int llc_conn_ac_send_dm_rsp_f_set_1(struct sock *sk, struct sk_buff *skb);
sk                106 include/net/llc_c_ac.h int llc_conn_ac_send_frmr_rsp_f_set_x(struct sock *sk, struct sk_buff *skb);
sk                107 include/net/llc_c_ac.h int llc_conn_ac_resend_frmr_rsp_f_set_0(struct sock *sk, struct sk_buff *skb);
sk                108 include/net/llc_c_ac.h int llc_conn_ac_resend_frmr_rsp_f_set_p(struct sock *sk, struct sk_buff *skb);
sk                109 include/net/llc_c_ac.h int llc_conn_ac_send_i_cmd_p_set_1(struct sock *sk, struct sk_buff *skb);
sk                110 include/net/llc_c_ac.h int llc_conn_ac_send_i_xxx_x_set_0(struct sock *sk, struct sk_buff *skb);
sk                111 include/net/llc_c_ac.h int llc_conn_ac_resend_i_xxx_x_set_0(struct sock *sk, struct sk_buff *skb);
sk                112 include/net/llc_c_ac.h int llc_conn_ac_resend_i_xxx_x_set_0_or_send_rr(struct sock *sk,
sk                114 include/net/llc_c_ac.h int llc_conn_ac_resend_i_rsp_f_set_1(struct sock *sk, struct sk_buff *skb);
sk                115 include/net/llc_c_ac.h int llc_conn_ac_send_rej_cmd_p_set_1(struct sock *sk, struct sk_buff *skb);
sk                116 include/net/llc_c_ac.h int llc_conn_ac_send_rej_rsp_f_set_1(struct sock *sk, struct sk_buff *skb);
sk                117 include/net/llc_c_ac.h int llc_conn_ac_send_rej_xxx_x_set_0(struct sock *sk, struct sk_buff *skb);
sk                118 include/net/llc_c_ac.h int llc_conn_ac_send_rnr_cmd_p_set_1(struct sock *sk, struct sk_buff *skb);
sk                119 include/net/llc_c_ac.h int llc_conn_ac_send_rnr_rsp_f_set_1(struct sock *sk, struct sk_buff *skb);
sk                120 include/net/llc_c_ac.h int llc_conn_ac_send_rnr_xxx_x_set_0(struct sock *sk, struct sk_buff *skb);
sk                121 include/net/llc_c_ac.h int llc_conn_ac_set_remote_busy(struct sock *sk, struct sk_buff *skb);
sk                122 include/net/llc_c_ac.h int llc_conn_ac_opt_send_rnr_xxx_x_set_0(struct sock *sk, struct sk_buff *skb);
sk                123 include/net/llc_c_ac.h int llc_conn_ac_send_rr_cmd_p_set_1(struct sock *sk, struct sk_buff *skb);
sk                124 include/net/llc_c_ac.h int llc_conn_ac_send_rr_rsp_f_set_1(struct sock *sk, struct sk_buff *skb);
sk                125 include/net/llc_c_ac.h int llc_conn_ac_send_ack_rsp_f_set_1(struct sock *sk, struct sk_buff *skb);
sk                126 include/net/llc_c_ac.h int llc_conn_ac_send_rr_xxx_x_set_0(struct sock *sk, struct sk_buff *skb);
sk                127 include/net/llc_c_ac.h int llc_conn_ac_send_ack_xxx_x_set_0(struct sock *sk, struct sk_buff *skb);
sk                128 include/net/llc_c_ac.h int llc_conn_ac_send_sabme_cmd_p_set_x(struct sock *sk, struct sk_buff *skb);
sk                129 include/net/llc_c_ac.h int llc_conn_ac_send_ua_rsp_f_set_p(struct sock *sk, struct sk_buff *skb);
sk                130 include/net/llc_c_ac.h int llc_conn_ac_set_s_flag_0(struct sock *sk, struct sk_buff *skb);
sk                131 include/net/llc_c_ac.h int llc_conn_ac_set_s_flag_1(struct sock *sk, struct sk_buff *skb);
sk                132 include/net/llc_c_ac.h int llc_conn_ac_start_p_timer(struct sock *sk, struct sk_buff *skb);
sk                133 include/net/llc_c_ac.h int llc_conn_ac_start_ack_timer(struct sock *sk, struct sk_buff *skb);
sk                134 include/net/llc_c_ac.h int llc_conn_ac_start_rej_timer(struct sock *sk, struct sk_buff *skb);
sk                135 include/net/llc_c_ac.h int llc_conn_ac_start_ack_tmr_if_not_running(struct sock *sk,
sk                137 include/net/llc_c_ac.h int llc_conn_ac_stop_ack_timer(struct sock *sk, struct sk_buff *skb);
sk                138 include/net/llc_c_ac.h int llc_conn_ac_stop_p_timer(struct sock *sk, struct sk_buff *skb);
sk                139 include/net/llc_c_ac.h int llc_conn_ac_stop_rej_timer(struct sock *sk, struct sk_buff *skb);
sk                140 include/net/llc_c_ac.h int llc_conn_ac_stop_all_timers(struct sock *sk, struct sk_buff *skb);
sk                141 include/net/llc_c_ac.h int llc_conn_ac_stop_other_timers(struct sock *sk, struct sk_buff *skb);
sk                142 include/net/llc_c_ac.h int llc_conn_ac_upd_nr_received(struct sock *sk, struct sk_buff *skb);
sk                143 include/net/llc_c_ac.h int llc_conn_ac_inc_tx_win_size(struct sock *sk, struct sk_buff *skb);
sk                144 include/net/llc_c_ac.h int llc_conn_ac_dec_tx_win_size(struct sock *sk, struct sk_buff *skb);
sk                145 include/net/llc_c_ac.h int llc_conn_ac_upd_p_flag(struct sock *sk, struct sk_buff *skb);
sk                146 include/net/llc_c_ac.h int llc_conn_ac_set_data_flag_2(struct sock *sk, struct sk_buff *skb);
sk                147 include/net/llc_c_ac.h int llc_conn_ac_set_data_flag_0(struct sock *sk, struct sk_buff *skb);
sk                148 include/net/llc_c_ac.h int llc_conn_ac_set_data_flag_1(struct sock *sk, struct sk_buff *skb);
sk                149 include/net/llc_c_ac.h int llc_conn_ac_set_data_flag_1_if_data_flag_eq_0(struct sock *sk,
sk                151 include/net/llc_c_ac.h int llc_conn_ac_set_p_flag_0(struct sock *sk, struct sk_buff *skb);
sk                152 include/net/llc_c_ac.h int llc_conn_ac_set_remote_busy_0(struct sock *sk, struct sk_buff *skb);
sk                153 include/net/llc_c_ac.h int llc_conn_ac_set_retry_cnt_0(struct sock *sk, struct sk_buff *skb);
sk                154 include/net/llc_c_ac.h int llc_conn_ac_set_cause_flag_0(struct sock *sk, struct sk_buff *skb);
sk                155 include/net/llc_c_ac.h int llc_conn_ac_set_cause_flag_1(struct sock *sk, struct sk_buff *skb);
sk                156 include/net/llc_c_ac.h int llc_conn_ac_inc_retry_cnt_by_1(struct sock *sk, struct sk_buff *skb);
sk                157 include/net/llc_c_ac.h int llc_conn_ac_set_vr_0(struct sock *sk, struct sk_buff *skb);
sk                158 include/net/llc_c_ac.h int llc_conn_ac_inc_vr_by_1(struct sock *sk, struct sk_buff *skb);
sk                159 include/net/llc_c_ac.h int llc_conn_ac_set_vs_0(struct sock *sk, struct sk_buff *skb);
sk                160 include/net/llc_c_ac.h int llc_conn_ac_set_vs_nr(struct sock *sk, struct sk_buff *skb);
sk                161 include/net/llc_c_ac.h int llc_conn_ac_rst_vs(struct sock *sk, struct sk_buff *skb);
sk                162 include/net/llc_c_ac.h int llc_conn_ac_upd_vs(struct sock *sk, struct sk_buff *skb);
sk                163 include/net/llc_c_ac.h int llc_conn_disc(struct sock *sk, struct sk_buff *skb);
sk                164 include/net/llc_c_ac.h int llc_conn_reset(struct sock *sk, struct sk_buff *skb);
sk                165 include/net/llc_c_ac.h int llc_conn_ac_disc_confirm(struct sock *sk, struct sk_buff *skb);
sk                167 include/net/llc_c_ac.h int llc_conn_ac_send_ack_if_needed(struct sock *sk, struct sk_buff *skb);
sk                168 include/net/llc_c_ac.h int llc_conn_ac_adjust_npta_by_rr(struct sock *sk, struct sk_buff *skb);
sk                169 include/net/llc_c_ac.h int llc_conn_ac_adjust_npta_by_rnr(struct sock *sk, struct sk_buff *skb);
sk                170 include/net/llc_c_ac.h int llc_conn_ac_rst_sendack_flag(struct sock *sk, struct sk_buff *skb);
sk                171 include/net/llc_c_ac.h int llc_conn_ac_send_i_rsp_as_ack(struct sock *sk, struct sk_buff *skb);
sk                172 include/net/llc_c_ac.h int llc_conn_ac_send_i_as_ack(struct sock *sk, struct sk_buff *skb);
sk                179 include/net/llc_c_ac.h void llc_conn_set_p_flag(struct sock *sk, u8 value);
sk                128 include/net/llc_c_ev.h typedef int (*llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
sk                129 include/net/llc_c_ev.h typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
sk                131 include/net/llc_c_ev.h int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb);
sk                132 include/net/llc_c_ev.h int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb);
sk                133 include/net/llc_c_ev.h int llc_conn_ev_disc_req(struct sock *sk, struct sk_buff *skb);
sk                134 include/net/llc_c_ev.h int llc_conn_ev_rst_req(struct sock *sk, struct sk_buff *skb);
sk                135 include/net/llc_c_ev.h int llc_conn_ev_local_busy_detected(struct sock *sk, struct sk_buff *skb);
sk                136 include/net/llc_c_ev.h int llc_conn_ev_local_busy_cleared(struct sock *sk, struct sk_buff *skb);
sk                137 include/net/llc_c_ev.h int llc_conn_ev_rx_bad_pdu(struct sock *sk, struct sk_buff *skb);
sk                138 include/net/llc_c_ev.h int llc_conn_ev_rx_disc_cmd_pbit_set_x(struct sock *sk, struct sk_buff *skb);
sk                139 include/net/llc_c_ev.h int llc_conn_ev_rx_dm_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb);
sk                140 include/net/llc_c_ev.h int llc_conn_ev_rx_frmr_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb);
sk                141 include/net/llc_c_ev.h int llc_conn_ev_rx_i_cmd_pbit_set_x_inval_ns(struct sock *sk,
sk                143 include/net/llc_c_ev.h int llc_conn_ev_rx_i_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb);
sk                144 include/net/llc_c_ev.h int llc_conn_ev_rx_i_rsp_fbit_set_x_unexpd_ns(struct sock *sk,
sk                146 include/net/llc_c_ev.h int llc_conn_ev_rx_i_rsp_fbit_set_x_inval_ns(struct sock *sk,
sk                148 include/net/llc_c_ev.h int llc_conn_ev_rx_rej_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb);
sk                149 include/net/llc_c_ev.h int llc_conn_ev_rx_sabme_cmd_pbit_set_x(struct sock *sk, struct sk_buff *skb);
sk                150 include/net/llc_c_ev.h int llc_conn_ev_rx_ua_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb);
sk                151 include/net/llc_c_ev.h int llc_conn_ev_rx_xxx_cmd_pbit_set_x(struct sock *sk, struct sk_buff *skb);
sk                152 include/net/llc_c_ev.h int llc_conn_ev_rx_xxx_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb);
sk                153 include/net/llc_c_ev.h int llc_conn_ev_rx_zzz_cmd_pbit_set_x_inval_nr(struct sock *sk,
sk                155 include/net/llc_c_ev.h int llc_conn_ev_rx_zzz_rsp_fbit_set_x_inval_nr(struct sock *sk,
sk                157 include/net/llc_c_ev.h int llc_conn_ev_p_tmr_exp(struct sock *sk, struct sk_buff *skb);
sk                158 include/net/llc_c_ev.h int llc_conn_ev_ack_tmr_exp(struct sock *sk, struct sk_buff *skb);
sk                159 include/net/llc_c_ev.h int llc_conn_ev_rej_tmr_exp(struct sock *sk, struct sk_buff *skb);
sk                160 include/net/llc_c_ev.h int llc_conn_ev_busy_tmr_exp(struct sock *sk, struct sk_buff *skb);
sk                161 include/net/llc_c_ev.h int llc_conn_ev_sendack_tmr_exp(struct sock *sk, struct sk_buff *skb);
sk                163 include/net/llc_c_ev.h int llc_conn_ev_rx_xxx_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb);
sk                164 include/net/llc_c_ev.h int llc_conn_ev_rx_xxx_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb);
sk                165 include/net/llc_c_ev.h int llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns(struct sock *sk,
sk                167 include/net/llc_c_ev.h int llc_conn_ev_rx_i_cmd_pbit_set_1_unexpd_ns(struct sock *sk,
sk                169 include/net/llc_c_ev.h int llc_conn_ev_rx_i_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb);
sk                170 include/net/llc_c_ev.h int llc_conn_ev_rx_i_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb);
sk                171 include/net/llc_c_ev.h int llc_conn_ev_rx_i_rsp_fbit_set_0_unexpd_ns(struct sock *sk,
sk                173 include/net/llc_c_ev.h int llc_conn_ev_rx_i_rsp_fbit_set_1_unexpd_ns(struct sock *sk,
sk                175 include/net/llc_c_ev.h int llc_conn_ev_rx_i_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb);
sk                176 include/net/llc_c_ev.h int llc_conn_ev_rx_i_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb);
sk                177 include/net/llc_c_ev.h int llc_conn_ev_rx_rr_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb);
sk                178 include/net/llc_c_ev.h int llc_conn_ev_rx_rr_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb);
sk                179 include/net/llc_c_ev.h int llc_conn_ev_rx_rr_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb);
sk                180 include/net/llc_c_ev.h int llc_conn_ev_rx_rr_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb);
sk                181 include/net/llc_c_ev.h int llc_conn_ev_rx_rnr_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb);
sk                182 include/net/llc_c_ev.h int llc_conn_ev_rx_rnr_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb);
sk                183 include/net/llc_c_ev.h int llc_conn_ev_rx_rnr_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb);
sk                184 include/net/llc_c_ev.h int llc_conn_ev_rx_rnr_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb);
sk                185 include/net/llc_c_ev.h int llc_conn_ev_rx_rej_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb);
sk                186 include/net/llc_c_ev.h int llc_conn_ev_rx_rej_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb);
sk                187 include/net/llc_c_ev.h int llc_conn_ev_rx_rej_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb);
sk                188 include/net/llc_c_ev.h int llc_conn_ev_rx_rej_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb);
sk                189 include/net/llc_c_ev.h int llc_conn_ev_rx_any_frame(struct sock *sk, struct sk_buff *skb);
sk                190 include/net/llc_c_ev.h int llc_conn_ev_tx_buffer_full(struct sock *sk, struct sk_buff *skb);
sk                191 include/net/llc_c_ev.h int llc_conn_ev_init_p_f_cycle(struct sock *sk, struct sk_buff *skb);
sk                194 include/net/llc_c_ev.h int llc_conn_ev_qlfy_data_flag_eq_1(struct sock *sk, struct sk_buff *skb);
sk                195 include/net/llc_c_ev.h int llc_conn_ev_qlfy_data_flag_eq_0(struct sock *sk, struct sk_buff *skb);
sk                196 include/net/llc_c_ev.h int llc_conn_ev_qlfy_data_flag_eq_2(struct sock *sk, struct sk_buff *skb);
sk                197 include/net/llc_c_ev.h int llc_conn_ev_qlfy_p_flag_eq_1(struct sock *sk, struct sk_buff *skb);
sk                198 include/net/llc_c_ev.h int llc_conn_ev_qlfy_last_frame_eq_1(struct sock *sk, struct sk_buff *skb);
sk                199 include/net/llc_c_ev.h int llc_conn_ev_qlfy_last_frame_eq_0(struct sock *sk, struct sk_buff *skb);
sk                200 include/net/llc_c_ev.h int llc_conn_ev_qlfy_p_flag_eq_0(struct sock *sk, struct sk_buff *skb);
sk                201 include/net/llc_c_ev.h int llc_conn_ev_qlfy_p_flag_eq_f(struct sock *sk, struct sk_buff *skb);
sk                202 include/net/llc_c_ev.h int llc_conn_ev_qlfy_remote_busy_eq_0(struct sock *sk, struct sk_buff *skb);
sk                203 include/net/llc_c_ev.h int llc_conn_ev_qlfy_remote_busy_eq_1(struct sock *sk, struct sk_buff *skb);
sk                204 include/net/llc_c_ev.h int llc_conn_ev_qlfy_retry_cnt_lt_n2(struct sock *sk, struct sk_buff *skb);
sk                205 include/net/llc_c_ev.h int llc_conn_ev_qlfy_retry_cnt_gte_n2(struct sock *sk, struct sk_buff *skb);
sk                206 include/net/llc_c_ev.h int llc_conn_ev_qlfy_s_flag_eq_1(struct sock *sk, struct sk_buff *skb);
sk                207 include/net/llc_c_ev.h int llc_conn_ev_qlfy_s_flag_eq_0(struct sock *sk, struct sk_buff *skb);
sk                208 include/net/llc_c_ev.h int llc_conn_ev_qlfy_cause_flag_eq_1(struct sock *sk, struct sk_buff *skb);
sk                209 include/net/llc_c_ev.h int llc_conn_ev_qlfy_cause_flag_eq_0(struct sock *sk, struct sk_buff *skb);
sk                210 include/net/llc_c_ev.h int llc_conn_ev_qlfy_set_status_conn(struct sock *sk, struct sk_buff *skb);
sk                211 include/net/llc_c_ev.h int llc_conn_ev_qlfy_set_status_disc(struct sock *sk, struct sk_buff *skb);
sk                212 include/net/llc_c_ev.h int llc_conn_ev_qlfy_set_status_failed(struct sock *sk, struct sk_buff *skb);
sk                213 include/net/llc_c_ev.h int llc_conn_ev_qlfy_set_status_remote_busy(struct sock *sk,
sk                215 include/net/llc_c_ev.h int llc_conn_ev_qlfy_set_status_refuse(struct sock *sk, struct sk_buff *skb);
sk                216 include/net/llc_c_ev.h int llc_conn_ev_qlfy_set_status_conflict(struct sock *sk, struct sk_buff *skb);
sk                217 include/net/llc_c_ev.h int llc_conn_ev_qlfy_set_status_rst_done(struct sock *sk, struct sk_buff *skb);
sk                219 include/net/llc_c_ev.h static __inline__ int llc_conn_space(struct sock *sk, struct sk_buff *skb)
sk                221 include/net/llc_c_ev.h 	return atomic_read(&sk->sk_rmem_alloc) + skb->truesize <
sk                222 include/net/llc_c_ev.h 	       (unsigned int)sk->sk_rcvbuf;
sk                 34 include/net/llc_conn.h 	struct sock	    sk;
sk                 83 include/net/llc_conn.h static inline struct llc_sock *llc_sk(const struct sock *sk)
sk                 85 include/net/llc_conn.h 	return (struct llc_sock *)sk;
sk                100 include/net/llc_conn.h void llc_sk_stop_all_timers(struct sock *sk, bool sync);
sk                101 include/net/llc_conn.h void llc_sk_free(struct sock *sk);
sk                103 include/net/llc_conn.h void llc_sk_reset(struct sock *sk);
sk                106 include/net/llc_conn.h int llc_conn_state_process(struct sock *sk, struct sk_buff *skb);
sk                107 include/net/llc_conn.h void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb);
sk                108 include/net/llc_conn.h void llc_conn_rtn_pdu(struct sock *sk, struct sk_buff *skb);
sk                109 include/net/llc_conn.h void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr, u8 first_p_bit);
sk                110 include/net/llc_conn.h void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr, u8 first_f_bit);
sk                114 include/net/llc_conn.h void llc_sap_add_socket(struct llc_sap *sap, struct sock *sk);
sk                115 include/net/llc_conn.h void llc_sap_remove_socket(struct llc_sap *sap, struct sock *sk);
sk                 65 include/net/llc_if.h int llc_establish_connection(struct sock *sk, u8 *lmac, u8 *dmac, u8 dsap);
sk                 66 include/net/llc_if.h int llc_build_and_send_pkt(struct sock *sk, struct sk_buff *skb);
sk                 67 include/net/llc_if.h int llc_send_disc(struct sock *sk);
sk                 23 include/net/llc_sap.h void llc_save_primitive(struct sock *sk, struct sk_buff *skb,
sk                 25 include/net/llc_sap.h struct sk_buff *llc_alloc_frame(struct sock *sk, struct net_device *dev,
sk                 30 include/net/lwtunnel.h 	int		(*orig_output)(struct net *net, struct sock *sk, struct sk_buff *skb);
sk                 42 include/net/lwtunnel.h 	int (*output)(struct net *net, struct sock *sk, struct sk_buff *skb);
sk                126 include/net/lwtunnel.h int lwtunnel_output(struct net *net, struct sock *sk, struct sk_buff *skb);
sk                244 include/net/lwtunnel.h static inline int lwtunnel_output(struct net *net, struct sock *sk, struct sk_buff *skb)
sk                 25 include/net/netfilter/br_netfilter.h int br_nf_hook_thresh(unsigned int hook, struct net *net, struct sock *sk,
sk                 41 include/net/netfilter/br_netfilter.h int br_nf_pre_routing_finish_bridge(struct net *net, struct sock *sk, struct sk_buff *skb);
sk                110 include/net/netfilter/nf_log.h 			    struct sock *sk);
sk                 11 include/net/netfilter/nf_tproxy.h static inline bool nf_tproxy_sk_is_transparent(struct sock *sk)
sk                 13 include/net/netfilter/nf_tproxy.h 	if (inet_sk_transparent(sk))
sk                 16 include/net/netfilter/nf_tproxy.h 	sock_gen_put(sk);
sk                 21 include/net/netfilter/nf_tproxy.h static inline void nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk)
sk                 24 include/net/netfilter/nf_tproxy.h 	skb->sk = sk;
sk                 49 include/net/netfilter/nf_tproxy.h 			    __be32 laddr, __be16 lport, struct sock *sk);
sk                111 include/net/netfilter/nf_tproxy.h 			    struct sock *sk);
sk                243 include/net/netlabel.h 	int (*sock_getattr)(struct sock *sk,
sk                245 include/net/netlabel.h 	int (*sock_setattr)(struct sock *sk,
sk                248 include/net/netlabel.h 	void (*sock_delattr)(struct sock *sk);
sk                472 include/net/netlabel.h int netlbl_sock_setattr(struct sock *sk,
sk                475 include/net/netlabel.h void netlbl_sock_delattr(struct sock *sk);
sk                476 include/net/netlabel.h int netlbl_sock_getattr(struct sock *sk,
sk                478 include/net/netlabel.h int netlbl_conn_setattr(struct sock *sk,
sk                616 include/net/netlabel.h static inline int netlbl_sock_setattr(struct sock *sk,
sk                622 include/net/netlabel.h static inline void netlbl_sock_delattr(struct sock *sk)
sk                625 include/net/netlabel.h static inline int netlbl_sock_getattr(struct sock *sk,
sk                630 include/net/netlabel.h static inline int netlbl_conn_setattr(struct sock *sk,
sk                431 include/net/netlink.h int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid,
sk                961 include/net/netlink.h static inline int nlmsg_multicast(struct sock *sk, struct sk_buff *skb,
sk                968 include/net/netlink.h 	err = netlink_broadcast(sk, skb, portid, group, flags);
sk                981 include/net/netlink.h static inline int nlmsg_unicast(struct sock *sk, struct sk_buff *skb, u32 portid)
sk                985 include/net/netlink.h 	err = netlink_unicast(sk, skb, portid, MSG_DONTWAIT);
sk                 86 include/net/netrom.h #define nr_sk(sk) ((struct nr_sock *)(sk))
sk                255 include/net/netrom.h void nr_init_timers(struct sock *sk);
sk                263 include/net/nfc/nfc.h struct sk_buff *nfc_alloc_send_skb(struct nfc_dev *dev, struct sock *sk,
sk                 18 include/net/phonet/gprs.h int pep_writeable(struct sock *sk);
sk                 19 include/net/phonet/gprs.h int pep_write(struct sock *sk, struct sk_buff *skb);
sk                 20 include/net/phonet/gprs.h struct sk_buff *pep_read(struct sock *sk);
sk                 22 include/net/phonet/gprs.h int gprs_attach(struct sock *sk);
sk                 23 include/net/phonet/gprs.h void gprs_detach(struct sock *sk);
sk                 36 include/net/phonet/pep.h static inline struct pep_sock *pep_sk(struct sock *sk)
sk                 38 include/net/phonet/pep.h 	return (struct pep_sock *)sk;
sk                 24 include/net/phonet/phonet.h 	struct sock	sk;
sk                 30 include/net/phonet/phonet.h static inline struct pn_sock *pn_sk(struct sock *sk)
sk                 32 include/net/phonet/phonet.h 	return (struct pn_sock *)sk;
sk                 41 include/net/phonet/phonet.h int pn_sock_hash(struct sock *sk);
sk                 42 include/net/phonet/phonet.h void pn_sock_unhash(struct sock *sk);
sk                 43 include/net/phonet/phonet.h int pn_sock_get_port(struct sock *sk, unsigned short sport);
sk                 47 include/net/phonet/phonet.h int pn_sock_unbind_res(struct sock *sk, u8 res);
sk                 48 include/net/phonet/phonet.h void pn_sock_unbind_all_res(struct sock *sk);
sk                 50 include/net/phonet/phonet.h int pn_skb_send(struct sock *sk, struct sk_buff *skb,
sk                 30 include/net/ping.h 	int (*ipv6_recv_error)(struct sock *sk, struct msghdr *msg, int len,
sk                 32 include/net/ping.h 	void (*ip6_datagram_recv_common_ctl)(struct sock *sk,
sk                 35 include/net/ping.h 	void (*ip6_datagram_recv_specific_ctl)(struct sock *sk,
sk                 39 include/net/ping.h 	void (*ipv6_icmp_error)(struct sock *sk, struct sk_buff *skb, int err,
sk                 63 include/net/ping.h int  ping_get_port(struct sock *sk, unsigned short ident);
sk                 64 include/net/ping.h int ping_hash(struct sock *sk);
sk                 65 include/net/ping.h void ping_unhash(struct sock *sk);
sk                 67 include/net/ping.h int  ping_init_sock(struct sock *sk);
sk                 68 include/net/ping.h void ping_close(struct sock *sk, long timeout);
sk                 69 include/net/ping.h int  ping_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len);
sk                 74 include/net/ping.h int  ping_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
sk                 78 include/net/ping.h int  ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
sk                 23 include/net/raw.h struct sock *__raw_v4_lookup(struct net *net, struct sock *sk,
sk                 27 include/net/raw.h int raw_abort(struct sock *sk, int err);
sk                 58 include/net/raw.h int raw_hash_sk(struct sock *sk);
sk                 59 include/net/raw.h void raw_unhash_sk(struct sock *sk);
sk                 69 include/net/raw.h static inline struct raw_sock *raw_sk(const struct sock *sk)
sk                 71 include/net/raw.h 	return (struct raw_sock *)sk;
sk                  8 include/net/rawv6.h struct sock *__raw_v6_lookup(struct net *net, struct sock *sk,
sk                 12 include/net/rawv6.h int raw_abort(struct sock *sk, int err);
sk                 18 include/net/rawv6.h int rawv6_rcv(struct sock *sk, struct sk_buff *skb);
sk                 32 include/net/request_sock.h 	int		(*rtx_syn_ack)(const struct sock *sk,
sk                 34 include/net/request_sock.h 	void		(*send_ack)(const struct sock *sk, struct sk_buff *skb,
sk                 36 include/net/request_sock.h 	void		(*send_reset)(const struct sock *sk,
sk                 62 include/net/request_sock.h 	struct sock			*sk;
sk                 68 include/net/request_sock.h static inline struct request_sock *inet_reqsk(const struct sock *sk)
sk                 70 include/net/request_sock.h 	return (struct request_sock *)sk;
sk                102 include/net/request_sock.h 	req->sk = NULL;
sk                183 include/net/request_sock.h void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
sk                150 include/net/rose.h #define rose_sk(sk) ((struct rose_sock *)(sk))
sk                 43 include/net/route.h #define RT_CONN_FLAGS(sk)   (RT_TOS(inet_sk(sk)->tos) | sock_flag(sk, SOCK_LOCALROUTE))
sk                 44 include/net/route.h #define RT_CONN_FLAGS_TOS(sk,tos)   (RT_TOS(tos) | sock_flag(sk, SOCK_LOCALROUTE))
sk                130 include/net/route.h 				    const struct sock *sk);
sk                152 include/net/route.h 						   struct sock *sk,
sk                157 include/net/route.h 	flowi4_init_output(fl4, oif, sk ? sk->sk_mark : 0, tos,
sk                159 include/net/route.h 			   sk ? inet_sk_flowi_flags(sk) : 0,
sk                160 include/net/route.h 			   daddr, saddr, dport, sport, sock_net_uid(net, sk));
sk                161 include/net/route.h 	if (sk)
sk                162 include/net/route.h 		security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
sk                163 include/net/route.h 	return ip_route_output_flow(net, fl4, sk);
sk                207 include/net/route.h void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu);
sk                209 include/net/route.h void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk);
sk                284 include/net/route.h 					 struct sock *sk)
sk                288 include/net/route.h 	if (inet_sk(sk)->transparent)
sk                291 include/net/route.h 	flowi4_init_output(fl4, oif, sk->sk_mark, tos, RT_SCOPE_UNIVERSE,
sk                293 include/net/route.h 			   sk->sk_uid);
sk                300 include/net/route.h 					      struct sock *sk)
sk                302 include/net/route.h 	struct net *net = sock_net(sk);
sk                306 include/net/route.h 			      sport, dport, sk);
sk                315 include/net/route.h 	security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
sk                316 include/net/route.h 	return ip_route_output_flow(net, fl4, sk);
sk                322 include/net/route.h 					       struct sock *sk)
sk                328 include/net/route.h 		flowi4_update_output(fl4, sk->sk_bound_dev_if,
sk                329 include/net/route.h 				     RT_CONN_FLAGS(sk), fl4->daddr,
sk                331 include/net/route.h 		security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
sk                332 include/net/route.h 		return ip_route_output_flow(sock_net(sk), fl4, sk);
sk                169 include/net/rtnetlink.h struct net *rtnl_get_net_ns_capable(struct sock *sk, int netnsid);
sk                 93 include/net/sctp/sctp.h int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb);
sk                 95 include/net/sctp/sctp.h void sctp_write_space(struct sock *sk);
sk                 96 include/net/sctp/sctp.h void sctp_data_ready(struct sock *sk);
sk                100 include/net/sctp/sctp.h void sctp_copy_sock(struct sock *newsk, struct sock *sk,
sk                120 include/net/sctp/sctp.h int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc,
sk                150 include/net/sctp/sctp.h void sctp_icmp_proto_unreachable(struct sock *sk,
sk                367 include/net/sctp/sctp.h struct sctp_association *sctp_id2assoc(struct sock *sk, sctp_assoc_t id);
sk                369 include/net/sctp/sctp.h int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp);
sk                398 include/net/sctp/sctp.h static inline void sctp_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
sk                403 include/net/sctp/sctp.h 	skb->sk = sk;
sk                405 include/net/sctp/sctp.h 	atomic_add(event->rmem_len, &sk->sk_rmem_alloc);
sk                409 include/net/sctp/sctp.h 	sk_mem_charge(sk, event->rmem_len);
sk                463 include/net/sctp/sctp.h void sctp_put_port(struct sock *sk);
sk                513 include/net/sctp/sctp.h #define sctp_style(sk, style) __sctp_style((sk), (SCTP_SOCKET_##style))
sk                514 include/net/sctp/sctp.h static inline int __sctp_style(const struct sock *sk,
sk                517 include/net/sctp/sctp.h 	return sctp_sk(sk)->type == style;
sk                529 include/net/sctp/sctp.h #define sctp_sstate(sk, state) __sctp_sstate((sk), (SCTP_SS_##state))
sk                530 include/net/sctp/sctp.h static inline int __sctp_sstate(const struct sock *sk,
sk                533 include/net/sctp/sctp.h 	return sk->sk_state == state;
sk                613 include/net/sctp/sctp.h static inline bool sctp_newsk_ready(const struct sock *sk)
sk                615 include/net/sctp/sctp.h 	return sock_flag(sk, SOCK_DEAD) || sk->sk_socket;
sk                237 include/net/sctp/structs.h static inline struct sctp_sock *sctp_sk(const struct sock *sk)
sk                239 include/net/sctp/structs.h        return (struct sctp_sock *)sk;
sk                429 include/net/sctp/structs.h 	int		(*setsockopt)	(struct sock *sk,
sk                434 include/net/sctp/structs.h 	int		(*getsockopt)	(struct sock *sk,
sk                439 include/net/sctp/structs.h 	int		(*compat_setsockopt)	(struct sock *sk,
sk                444 include/net/sctp/structs.h 	int		(*compat_getsockopt)	(struct sock *sk,
sk                452 include/net/sctp/structs.h 					 struct sock *sk);
sk                453 include/net/sctp/structs.h 	void		(*get_saddr)	(struct sctp_sock *sk,
sk                466 include/net/sctp/structs.h 					 struct sock *sk);
sk                480 include/net/sctp/structs.h 	int		(*skb_iif)	(const struct sk_buff *sk);
sk                481 include/net/sctp/structs.h 	int		(*is_ce)	(const struct sk_buff *sk);
sk                484 include/net/sctp/structs.h 	void		(*ecn_capable)(struct sock *sk);
sk                487 include/net/sctp/structs.h 	int		(*ip_options_len)(struct sock *sk);
sk                506 include/net/sctp/structs.h 	struct sock *(*create_accept_sk) (struct sock *sk,
sk                509 include/net/sctp/structs.h 	int (*addr_to_user)(struct sctp_sock *sk, union sctp_addr *addr);
sk                510 include/net/sctp/structs.h 	void (*to_sk_saddr)(union sctp_addr *, struct sock *sk);
sk                511 include/net/sctp/structs.h 	void (*to_sk_daddr)(union sctp_addr *, struct sock *sk);
sk                512 include/net/sctp/structs.h 	void (*copy_ip_options)(struct sock *sk, struct sock *newsk);
sk                990 include/net/sctp/structs.h void sctp_transport_pmtu(struct sctp_transport *, struct sock *sk);
sk               1198 include/net/sctp/structs.h int sctp_is_any(struct sock *sk, const union sctp_addr *addr);
sk               1199 include/net/sctp/structs.h int sctp_is_ep_boundall(struct sock *sk);
sk               1240 include/net/sctp/structs.h 	struct sock *sk;
sk               2103 include/net/sctp/structs.h sctp_association_new(const struct sctp_endpoint *ep, const struct sock *sk,
sk                148 include/net/sctp/ulpevent.h 				struct msghdr *, struct sock *sk);
sk                 59 include/net/sctp/ulpqueue.h int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc);
sk                 21 include/net/smc.h int smc_hash_sk(struct sock *sk);
sk                 22 include/net/smc.h void smc_unhash_sk(struct sock *sk);
sk                 81 include/net/sock.h #define SOCK_DEBUG(sk, msg...) do { if ((sk) && sock_flag((sk), SOCK_DBG)) \
sk                 86 include/net/sock.h void SOCK_DEBUG(const struct sock *sk, const char *msg, ...)
sk                497 include/net/sock.h 	void			(*sk_state_change)(struct sock *sk);
sk                498 include/net/sock.h 	void			(*sk_data_ready)(struct sock *sk);
sk                499 include/net/sock.h 	void			(*sk_write_space)(struct sock *sk);
sk                500 include/net/sock.h 	void			(*sk_error_report)(struct sock *sk);
sk                501 include/net/sock.h 	int			(*sk_backlog_rcv)(struct sock *sk,
sk                504 include/net/sock.h 	struct sk_buff*		(*sk_validate_xmit_skb)(struct sock *sk,
sk                508 include/net/sock.h 	void                    (*sk_destruct)(struct sock *sk);
sk                522 include/net/sock.h #define __sk_user_data(sk) ((*((void __rcu **)&(sk)->sk_user_data)))
sk                524 include/net/sock.h #define rcu_dereference_sk_user_data(sk)	rcu_dereference(__sk_user_data((sk)))
sk                525 include/net/sock.h #define rcu_assign_sk_user_data(sk, ptr)	rcu_assign_pointer(__sk_user_data((sk)), ptr)
sk                538 include/net/sock.h int sk_set_peek_off(struct sock *sk, int val);
sk                540 include/net/sock.h static inline int sk_peek_offset(struct sock *sk, int flags)
sk                543 include/net/sock.h 		return READ_ONCE(sk->sk_peek_off);
sk                549 include/net/sock.h static inline void sk_peek_offset_bwd(struct sock *sk, int val)
sk                551 include/net/sock.h 	s32 off = READ_ONCE(sk->sk_peek_off);
sk                555 include/net/sock.h 		WRITE_ONCE(sk->sk_peek_off, off);
sk                559 include/net/sock.h static inline void sk_peek_offset_fwd(struct sock *sk, int val)
sk                561 include/net/sock.h 	sk_peek_offset_bwd(sk, -val);
sk                592 include/net/sock.h static inline struct sock *sk_next(const struct sock *sk)
sk                594 include/net/sock.h 	return hlist_entry_safe(sk->sk_node.next, struct sock, sk_node);
sk                597 include/net/sock.h static inline struct sock *sk_nulls_next(const struct sock *sk)
sk                599 include/net/sock.h 	return (!is_a_nulls(sk->sk_nulls_node.next)) ?
sk                600 include/net/sock.h 		hlist_nulls_entry(sk->sk_nulls_node.next,
sk                605 include/net/sock.h static inline bool sk_unhashed(const struct sock *sk)
sk                607 include/net/sock.h 	return hlist_unhashed(&sk->sk_node);
sk                610 include/net/sock.h static inline bool sk_hashed(const struct sock *sk)
sk                612 include/net/sock.h 	return !sk_unhashed(sk);
sk                625 include/net/sock.h static inline void __sk_del_node(struct sock *sk)
sk                627 include/net/sock.h 	__hlist_del(&sk->sk_node);
sk                631 include/net/sock.h static inline bool __sk_del_node_init(struct sock *sk)
sk                633 include/net/sock.h 	if (sk_hashed(sk)) {
sk                634 include/net/sock.h 		__sk_del_node(sk);
sk                635 include/net/sock.h 		sk_node_init(&sk->sk_node);
sk                647 include/net/sock.h static __always_inline void sock_hold(struct sock *sk)
sk                649 include/net/sock.h 	refcount_inc(&sk->sk_refcnt);
sk                655 include/net/sock.h static __always_inline void __sock_put(struct sock *sk)
sk                657 include/net/sock.h 	refcount_dec(&sk->sk_refcnt);
sk                660 include/net/sock.h static inline bool sk_del_node_init(struct sock *sk)
sk                662 include/net/sock.h 	bool rc = __sk_del_node_init(sk);
sk                666 include/net/sock.h 		WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
sk                667 include/net/sock.h 		__sock_put(sk);
sk                671 include/net/sock.h #define sk_del_node_init_rcu(sk)	sk_del_node_init(sk)
sk                673 include/net/sock.h static inline bool __sk_nulls_del_node_init_rcu(struct sock *sk)
sk                675 include/net/sock.h 	if (sk_hashed(sk)) {
sk                676 include/net/sock.h 		hlist_nulls_del_init_rcu(&sk->sk_nulls_node);
sk                682 include/net/sock.h static inline bool sk_nulls_del_node_init_rcu(struct sock *sk)
sk                684 include/net/sock.h 	bool rc = __sk_nulls_del_node_init_rcu(sk);
sk                688 include/net/sock.h 		WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
sk                689 include/net/sock.h 		__sock_put(sk);
sk                694 include/net/sock.h static inline void __sk_add_node(struct sock *sk, struct hlist_head *list)
sk                696 include/net/sock.h 	hlist_add_head(&sk->sk_node, list);
sk                699 include/net/sock.h static inline void sk_add_node(struct sock *sk, struct hlist_head *list)
sk                701 include/net/sock.h 	sock_hold(sk);
sk                702 include/net/sock.h 	__sk_add_node(sk, list);
sk                705 include/net/sock.h static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
sk                707 include/net/sock.h 	sock_hold(sk);
sk                708 include/net/sock.h 	if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
sk                709 include/net/sock.h 	    sk->sk_family == AF_INET6)
sk                710 include/net/sock.h 		hlist_add_tail_rcu(&sk->sk_node, list);
sk                712 include/net/sock.h 		hlist_add_head_rcu(&sk->sk_node, list);
sk                715 include/net/sock.h static inline void sk_add_node_tail_rcu(struct sock *sk, struct hlist_head *list)
sk                717 include/net/sock.h 	sock_hold(sk);
sk                718 include/net/sock.h 	hlist_add_tail_rcu(&sk->sk_node, list);
sk                721 include/net/sock.h static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
sk                723 include/net/sock.h 	hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
sk                726 include/net/sock.h static inline void __sk_nulls_add_node_tail_rcu(struct sock *sk, struct hlist_nulls_head *list)
sk                728 include/net/sock.h 	hlist_nulls_add_tail_rcu(&sk->sk_nulls_node, list);
sk                731 include/net/sock.h static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
sk                733 include/net/sock.h 	sock_hold(sk);
sk                734 include/net/sock.h 	__sk_nulls_add_node_rcu(sk, list);
sk                737 include/net/sock.h static inline void __sk_del_bind_node(struct sock *sk)
sk                739 include/net/sock.h 	__hlist_del(&sk->sk_bind_node);
sk                742 include/net/sock.h static inline void sk_add_bind_node(struct sock *sk,
sk                745 include/net/sock.h 	hlist_add_head(&sk->sk_bind_node, list);
sk                780 include/net/sock.h static inline struct user_namespace *sk_user_ns(struct sock *sk)
sk                786 include/net/sock.h 	return sk->sk_socket->file->f_cred->user_ns;
sk                831 include/net/sock.h static inline void sock_set_flag(struct sock *sk, enum sock_flags flag)
sk                833 include/net/sock.h 	__set_bit(flag, &sk->sk_flags);
sk                836 include/net/sock.h static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag)
sk                838 include/net/sock.h 	__clear_bit(flag, &sk->sk_flags);
sk                841 include/net/sock.h static inline bool sock_flag(const struct sock *sk, enum sock_flags flag)
sk                843 include/net/sock.h 	return test_bit(flag, &sk->sk_flags);
sk                861 include/net/sock.h static inline gfp_t sk_gfp_mask(const struct sock *sk, gfp_t gfp_mask)
sk                863 include/net/sock.h 	return gfp_mask | (sk->sk_allocation & __GFP_MEMALLOC);
sk                866 include/net/sock.h static inline void sk_acceptq_removed(struct sock *sk)
sk                868 include/net/sock.h 	sk->sk_ack_backlog--;
sk                871 include/net/sock.h static inline void sk_acceptq_added(struct sock *sk)
sk                873 include/net/sock.h 	sk->sk_ack_backlog++;
sk                876 include/net/sock.h static inline bool sk_acceptq_is_full(const struct sock *sk)
sk                878 include/net/sock.h 	return sk->sk_ack_backlog > sk->sk_max_ack_backlog;
sk                884 include/net/sock.h static inline int sk_stream_min_wspace(const struct sock *sk)
sk                886 include/net/sock.h 	return READ_ONCE(sk->sk_wmem_queued) >> 1;
sk                889 include/net/sock.h static inline int sk_stream_wspace(const struct sock *sk)
sk                891 include/net/sock.h 	return READ_ONCE(sk->sk_sndbuf) - READ_ONCE(sk->sk_wmem_queued);
sk                894 include/net/sock.h static inline void sk_wmem_queued_add(struct sock *sk, int val)
sk                896 include/net/sock.h 	WRITE_ONCE(sk->sk_wmem_queued, sk->sk_wmem_queued + val);
sk                899 include/net/sock.h void sk_stream_write_space(struct sock *sk);
sk                902 include/net/sock.h static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
sk                907 include/net/sock.h 	if (!sk->sk_backlog.tail)
sk                908 include/net/sock.h 		sk->sk_backlog.head = skb;
sk                910 include/net/sock.h 		sk->sk_backlog.tail->next = skb;
sk                912 include/net/sock.h 	sk->sk_backlog.tail = skb;
sk                921 include/net/sock.h static inline bool sk_rcvqueues_full(const struct sock *sk, unsigned int limit)
sk                923 include/net/sock.h 	unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc);
sk                929 include/net/sock.h static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb,
sk                932 include/net/sock.h 	if (sk_rcvqueues_full(sk, limit))
sk                940 include/net/sock.h 	if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC))
sk                943 include/net/sock.h 	__sk_add_backlog(sk, skb);
sk                944 include/net/sock.h 	sk->sk_backlog.len += skb->truesize;
sk                948 include/net/sock.h int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
sk                950 include/net/sock.h static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
sk                953 include/net/sock.h 		return __sk_backlog_rcv(sk, skb);
sk                955 include/net/sock.h 	return sk->sk_backlog_rcv(sk, skb);
sk                958 include/net/sock.h static inline void sk_incoming_cpu_update(struct sock *sk)
sk                962 include/net/sock.h 	if (unlikely(READ_ONCE(sk->sk_incoming_cpu) != cpu))
sk                963 include/net/sock.h 		WRITE_ONCE(sk->sk_incoming_cpu, cpu);
sk                978 include/net/sock.h static inline void sock_rps_record_flow(const struct sock *sk)
sk                992 include/net/sock.h 		if (sk->sk_state == TCP_ESTABLISHED)
sk                993 include/net/sock.h 			sock_rps_record_flow_hash(sk->sk_rxhash);
sk                998 include/net/sock.h static inline void sock_rps_save_rxhash(struct sock *sk,
sk               1002 include/net/sock.h 	if (unlikely(sk->sk_rxhash != skb->hash))
sk               1003 include/net/sock.h 		sk->sk_rxhash = skb->hash;
sk               1007 include/net/sock.h static inline void sock_rps_reset_rxhash(struct sock *sk)
sk               1010 include/net/sock.h 	sk->sk_rxhash = 0;
sk               1029 include/net/sock.h int sk_stream_wait_connect(struct sock *sk, long *timeo_p);
sk               1030 include/net/sock.h int sk_stream_wait_memory(struct sock *sk, long *timeo_p);
sk               1031 include/net/sock.h void sk_stream_wait_close(struct sock *sk, long timeo_p);
sk               1032 include/net/sock.h int sk_stream_error(struct sock *sk, int flags, int err);
sk               1033 include/net/sock.h void sk_stream_kill_queues(struct sock *sk);
sk               1034 include/net/sock.h void sk_set_memalloc(struct sock *sk);
sk               1035 include/net/sock.h void sk_clear_memalloc(struct sock *sk);
sk               1037 include/net/sock.h void __sk_flush_backlog(struct sock *sk);
sk               1039 include/net/sock.h static inline bool sk_flush_backlog(struct sock *sk)
sk               1041 include/net/sock.h 	if (unlikely(READ_ONCE(sk->sk_backlog.tail))) {
sk               1042 include/net/sock.h 		__sk_flush_backlog(sk);
sk               1048 include/net/sock.h int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb);
sk               1061 include/net/sock.h static inline void sk_prot_clear_nulls(struct sock *sk, int size)
sk               1064 include/net/sock.h 		memset(sk, 0, offsetof(struct sock, sk_node.next));
sk               1065 include/net/sock.h 	memset(&sk->sk_node.pprev, 0,
sk               1073 include/net/sock.h 	void			(*close)(struct sock *sk,
sk               1075 include/net/sock.h 	int			(*pre_connect)(struct sock *sk,
sk               1078 include/net/sock.h 	int			(*connect)(struct sock *sk,
sk               1081 include/net/sock.h 	int			(*disconnect)(struct sock *sk, int flags);
sk               1083 include/net/sock.h 	struct sock *		(*accept)(struct sock *sk, int flags, int *err,
sk               1086 include/net/sock.h 	int			(*ioctl)(struct sock *sk, int cmd,
sk               1088 include/net/sock.h 	int			(*init)(struct sock *sk);
sk               1089 include/net/sock.h 	void			(*destroy)(struct sock *sk);
sk               1090 include/net/sock.h 	void			(*shutdown)(struct sock *sk, int how);
sk               1091 include/net/sock.h 	int			(*setsockopt)(struct sock *sk, int level,
sk               1094 include/net/sock.h 	int			(*getsockopt)(struct sock *sk, int level,
sk               1097 include/net/sock.h 	void			(*keepalive)(struct sock *sk, int valbool);
sk               1099 include/net/sock.h 	int			(*compat_setsockopt)(struct sock *sk,
sk               1103 include/net/sock.h 	int			(*compat_getsockopt)(struct sock *sk,
sk               1107 include/net/sock.h 	int			(*compat_ioctl)(struct sock *sk,
sk               1110 include/net/sock.h 	int			(*sendmsg)(struct sock *sk, struct msghdr *msg,
sk               1112 include/net/sock.h 	int			(*recvmsg)(struct sock *sk, struct msghdr *msg,
sk               1115 include/net/sock.h 	int			(*sendpage)(struct sock *sk, struct page *page,
sk               1117 include/net/sock.h 	int			(*bind)(struct sock *sk,
sk               1120 include/net/sock.h 	int			(*backlog_rcv) (struct sock *sk,
sk               1123 include/net/sock.h 	void		(*release_cb)(struct sock *sk);
sk               1126 include/net/sock.h 	int			(*hash)(struct sock *sk);
sk               1127 include/net/sock.h 	void			(*unhash)(struct sock *sk);
sk               1128 include/net/sock.h 	void			(*rehash)(struct sock *sk);
sk               1129 include/net/sock.h 	int			(*get_port)(struct sock *sk, unsigned short snum);
sk               1136 include/net/sock.h 	bool			(*stream_memory_free)(const struct sock *sk, int wake);
sk               1137 include/net/sock.h 	bool			(*stream_memory_read)(const struct sock *sk);
sk               1139 include/net/sock.h 	void			(*enter_memory_pressure)(struct sock *sk);
sk               1140 include/net/sock.h 	void			(*leave_memory_pressure)(struct sock *sk);
sk               1186 include/net/sock.h 	int			(*diag_destroy)(struct sock *sk, int err);
sk               1194 include/net/sock.h static inline void sk_refcnt_debug_inc(struct sock *sk)
sk               1196 include/net/sock.h 	atomic_inc(&sk->sk_prot->socks);
sk               1199 include/net/sock.h static inline void sk_refcnt_debug_dec(struct sock *sk)
sk               1201 include/net/sock.h 	atomic_dec(&sk->sk_prot->socks);
sk               1203 include/net/sock.h 	       sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks));
sk               1206 include/net/sock.h static inline void sk_refcnt_debug_release(const struct sock *sk)
sk               1208 include/net/sock.h 	if (refcount_read(&sk->sk_refcnt) != 1)
sk               1210 include/net/sock.h 		       sk->sk_prot->name, sk, refcount_read(&sk->sk_refcnt));
sk               1213 include/net/sock.h #define sk_refcnt_debug_inc(sk) do { } while (0)
sk               1214 include/net/sock.h #define sk_refcnt_debug_dec(sk) do { } while (0)
sk               1215 include/net/sock.h #define sk_refcnt_debug_release(sk) do { } while (0)
sk               1218 include/net/sock.h static inline bool __sk_stream_memory_free(const struct sock *sk, int wake)
sk               1220 include/net/sock.h 	if (READ_ONCE(sk->sk_wmem_queued) >= READ_ONCE(sk->sk_sndbuf))
sk               1223 include/net/sock.h 	return sk->sk_prot->stream_memory_free ?
sk               1224 include/net/sock.h 		sk->sk_prot->stream_memory_free(sk, wake) : true;
sk               1227 include/net/sock.h static inline bool sk_stream_memory_free(const struct sock *sk)
sk               1229 include/net/sock.h 	return __sk_stream_memory_free(sk, 0);
sk               1232 include/net/sock.h static inline bool __sk_stream_is_writeable(const struct sock *sk, int wake)
sk               1234 include/net/sock.h 	return sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) &&
sk               1235 include/net/sock.h 	       __sk_stream_memory_free(sk, wake);
sk               1238 include/net/sock.h static inline bool sk_stream_is_writeable(const struct sock *sk)
sk               1240 include/net/sock.h 	return __sk_stream_is_writeable(sk, 0);
sk               1243 include/net/sock.h static inline int sk_under_cgroup_hierarchy(struct sock *sk,
sk               1247 include/net/sock.h 	return cgroup_is_descendant(sock_cgroup_ptr(&sk->sk_cgrp_data),
sk               1254 include/net/sock.h static inline bool sk_has_memory_pressure(const struct sock *sk)
sk               1256 include/net/sock.h 	return sk->sk_prot->memory_pressure != NULL;
sk               1259 include/net/sock.h static inline bool sk_under_memory_pressure(const struct sock *sk)
sk               1261 include/net/sock.h 	if (!sk->sk_prot->memory_pressure)
sk               1264 include/net/sock.h 	if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
sk               1265 include/net/sock.h 	    mem_cgroup_under_socket_pressure(sk->sk_memcg))
sk               1268 include/net/sock.h 	return !!*sk->sk_prot->memory_pressure;
sk               1272 include/net/sock.h sk_memory_allocated(const struct sock *sk)
sk               1274 include/net/sock.h 	return atomic_long_read(sk->sk_prot->memory_allocated);
sk               1278 include/net/sock.h sk_memory_allocated_add(struct sock *sk, int amt)
sk               1280 include/net/sock.h 	return atomic_long_add_return(amt, sk->sk_prot->memory_allocated);
sk               1284 include/net/sock.h sk_memory_allocated_sub(struct sock *sk, int amt)
sk               1286 include/net/sock.h 	atomic_long_sub(amt, sk->sk_prot->memory_allocated);
sk               1289 include/net/sock.h static inline void sk_sockets_allocated_dec(struct sock *sk)
sk               1291 include/net/sock.h 	percpu_counter_dec(sk->sk_prot->sockets_allocated);
sk               1294 include/net/sock.h static inline void sk_sockets_allocated_inc(struct sock *sk)
sk               1296 include/net/sock.h 	percpu_counter_inc(sk->sk_prot->sockets_allocated);
sk               1300 include/net/sock.h sk_sockets_allocated_read_positive(struct sock *sk)
sk               1302 include/net/sock.h 	return percpu_counter_read_positive(sk->sk_prot->sockets_allocated);
sk               1342 include/net/sock.h static inline int __sk_prot_rehash(struct sock *sk)
sk               1344 include/net/sock.h 	sk->sk_prot->unhash(sk);
sk               1345 include/net/sock.h 	return sk->sk_prot->hash(sk);
sk               1381 include/net/sock.h int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind);
sk               1382 include/net/sock.h int __sk_mem_schedule(struct sock *sk, int size, int kind);
sk               1383 include/net/sock.h void __sk_mem_reduce_allocated(struct sock *sk, int amount);
sk               1384 include/net/sock.h void __sk_mem_reclaim(struct sock *sk, int amount);
sk               1395 include/net/sock.h static inline long sk_prot_mem_limits(const struct sock *sk, int index)
sk               1397 include/net/sock.h 	long val = sk->sk_prot->sysctl_mem[index];
sk               1412 include/net/sock.h static inline bool sk_has_account(struct sock *sk)
sk               1415 include/net/sock.h 	return !!sk->sk_prot->memory_allocated;
sk               1418 include/net/sock.h static inline bool sk_wmem_schedule(struct sock *sk, int size)
sk               1420 include/net/sock.h 	if (!sk_has_account(sk))
sk               1422 include/net/sock.h 	return size <= sk->sk_forward_alloc ||
sk               1423 include/net/sock.h 		__sk_mem_schedule(sk, size, SK_MEM_SEND);
sk               1427 include/net/sock.h sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size)
sk               1429 include/net/sock.h 	if (!sk_has_account(sk))
sk               1431 include/net/sock.h 	return size<= sk->sk_forward_alloc ||
sk               1432 include/net/sock.h 		__sk_mem_schedule(sk, size, SK_MEM_RECV) ||
sk               1436 include/net/sock.h static inline void sk_mem_reclaim(struct sock *sk)
sk               1438 include/net/sock.h 	if (!sk_has_account(sk))
sk               1440 include/net/sock.h 	if (sk->sk_forward_alloc >= SK_MEM_QUANTUM)
sk               1441 include/net/sock.h 		__sk_mem_reclaim(sk, sk->sk_forward_alloc);
sk               1444 include/net/sock.h static inline void sk_mem_reclaim_partial(struct sock *sk)
sk               1446 include/net/sock.h 	if (!sk_has_account(sk))
sk               1448 include/net/sock.h 	if (sk->sk_forward_alloc > SK_MEM_QUANTUM)
sk               1449 include/net/sock.h 		__sk_mem_reclaim(sk, sk->sk_forward_alloc - 1);
sk               1452 include/net/sock.h static inline void sk_mem_charge(struct sock *sk, int size)
sk               1454 include/net/sock.h 	if (!sk_has_account(sk))
sk               1456 include/net/sock.h 	sk->sk_forward_alloc -= size;
sk               1459 include/net/sock.h static inline void sk_mem_uncharge(struct sock *sk, int size)
sk               1461 include/net/sock.h 	if (!sk_has_account(sk))
sk               1463 include/net/sock.h 	sk->sk_forward_alloc += size;
sk               1472 include/net/sock.h 	if (unlikely(sk->sk_forward_alloc >= 1 << 21))
sk               1473 include/net/sock.h 		__sk_mem_reclaim(sk, 1 << 20);
sk               1477 include/net/sock.h static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
sk               1479 include/net/sock.h 	sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
sk               1480 include/net/sock.h 	sk_wmem_queued_add(sk, -skb->truesize);
sk               1481 include/net/sock.h 	sk_mem_uncharge(sk, skb->truesize);
sk               1483 include/net/sock.h 	    !sk->sk_tx_skb_cache && !skb_cloned(skb)) {
sk               1485 include/net/sock.h 		sk->sk_tx_skb_cache = skb;
sk               1491 include/net/sock.h static inline void sock_release_ownership(struct sock *sk)
sk               1493 include/net/sock.h 	if (sk->sk_lock.owned) {
sk               1494 include/net/sock.h 		sk->sk_lock.owned = 0;
sk               1497 include/net/sock.h 		mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
sk               1508 include/net/sock.h #define sock_lock_init_class_and_name(sk, sname, skey, name, key)	\
sk               1510 include/net/sock.h 	sk->sk_lock.owned = 0;						\
sk               1511 include/net/sock.h 	init_waitqueue_head(&sk->sk_lock.wq);				\
sk               1512 include/net/sock.h 	spin_lock_init(&(sk)->sk_lock.slock);				\
sk               1513 include/net/sock.h 	debug_check_no_locks_freed((void *)&(sk)->sk_lock,		\
sk               1514 include/net/sock.h 			sizeof((sk)->sk_lock));				\
sk               1515 include/net/sock.h 	lockdep_set_class_and_name(&(sk)->sk_lock.slock,		\
sk               1517 include/net/sock.h 	lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0);	\
sk               1521 include/net/sock.h static inline bool lockdep_sock_is_held(const struct sock *sk)
sk               1523 include/net/sock.h 	return lockdep_is_held(&sk->sk_lock) ||
sk               1524 include/net/sock.h 	       lockdep_is_held(&sk->sk_lock.slock);
sk               1528 include/net/sock.h void lock_sock_nested(struct sock *sk, int subclass);
sk               1530 include/net/sock.h static inline void lock_sock(struct sock *sk)
sk               1532 include/net/sock.h 	lock_sock_nested(sk, 0);
sk               1535 include/net/sock.h void __release_sock(struct sock *sk);
sk               1536 include/net/sock.h void release_sock(struct sock *sk);
sk               1545 include/net/sock.h bool lock_sock_fast(struct sock *sk);
sk               1554 include/net/sock.h static inline void unlock_sock_fast(struct sock *sk, bool slow)
sk               1557 include/net/sock.h 		release_sock(sk);
sk               1559 include/net/sock.h 		spin_unlock_bh(&sk->sk_lock.slock);
sk               1576 include/net/sock.h static inline void sock_owned_by_me(const struct sock *sk)
sk               1579 include/net/sock.h 	WARN_ON_ONCE(!lockdep_sock_is_held(sk) && debug_locks);
sk               1583 include/net/sock.h static inline bool sock_owned_by_user(const struct sock *sk)
sk               1585 include/net/sock.h 	sock_owned_by_me(sk);
sk               1586 include/net/sock.h 	return sk->sk_lock.owned;
sk               1589 include/net/sock.h static inline bool sock_owned_by_user_nocheck(const struct sock *sk)
sk               1591 include/net/sock.h 	return sk->sk_lock.owned;
sk               1597 include/net/sock.h 	struct sock *sk = (struct sock *)csk;
sk               1599 include/net/sock.h 	return !sk->sk_lock.owned && !spin_is_locked(&sk->sk_lock.slock);
sk               1604 include/net/sock.h void sk_free(struct sock *sk);
sk               1605 include/net/sock.h void sk_destruct(struct sock *sk);
sk               1606 include/net/sock.h struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority);
sk               1607 include/net/sock.h void sk_free_unlock_clone(struct sock *sk);
sk               1609 include/net/sock.h struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
sk               1613 include/net/sock.h struct sk_buff *sock_omalloc(struct sock *sk, unsigned long size,
sk               1631 include/net/sock.h struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
sk               1633 include/net/sock.h struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
sk               1636 include/net/sock.h void *sock_kmalloc(struct sock *sk, int size, gfp_t priority);
sk               1637 include/net/sock.h void sock_kfree_s(struct sock *sk, void *mem, int size);
sk               1638 include/net/sock.h void sock_kzfree_s(struct sock *sk, void *mem, int size);
sk               1639 include/net/sock.h void sk_send_sigurg(struct sock *sk);
sk               1648 include/net/sock.h 			       const struct sock *sk)
sk               1650 include/net/sock.h 	*sockc = (struct sockcm_cookie) { .tsflags = sk->sk_tsflags };
sk               1653 include/net/sock.h int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
sk               1655 include/net/sock.h int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
sk               1673 include/net/sock.h int sock_no_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t len);
sk               1679 include/net/sock.h ssize_t sock_no_sendpage_locked(struct sock *sk, struct page *page,
sk               1697 include/net/sock.h void sk_common_release(struct sock *sk);
sk               1704 include/net/sock.h void sock_init_data(struct socket *sock, struct sock *sk);
sk               1732 include/net/sock.h static inline void sock_put(struct sock *sk)
sk               1734 include/net/sock.h 	if (refcount_dec_and_test(&sk->sk_refcnt))
sk               1735 include/net/sock.h 		sk_free(sk);
sk               1740 include/net/sock.h void sock_gen_put(struct sock *sk);
sk               1742 include/net/sock.h int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested,
sk               1744 include/net/sock.h static inline int sk_receive_skb(struct sock *sk, struct sk_buff *skb,
sk               1747 include/net/sock.h 	return __sk_receive_skb(sk, skb, nested, 1, true);
sk               1750 include/net/sock.h static inline void sk_tx_queue_set(struct sock *sk, int tx_queue)
sk               1755 include/net/sock.h 	sk->sk_tx_queue_mapping = tx_queue;
sk               1760 include/net/sock.h static inline void sk_tx_queue_clear(struct sock *sk)
sk               1762 include/net/sock.h 	sk->sk_tx_queue_mapping = NO_QUEUE_MAPPING;
sk               1765 include/net/sock.h static inline int sk_tx_queue_get(const struct sock *sk)
sk               1767 include/net/sock.h 	if (sk && sk->sk_tx_queue_mapping != NO_QUEUE_MAPPING)
sk               1768 include/net/sock.h 		return sk->sk_tx_queue_mapping;
sk               1773 include/net/sock.h static inline void sk_rx_queue_set(struct sock *sk, const struct sk_buff *skb)
sk               1782 include/net/sock.h 		sk->sk_rx_queue_mapping = rx_queue;
sk               1787 include/net/sock.h static inline void sk_rx_queue_clear(struct sock *sk)
sk               1790 include/net/sock.h 	sk->sk_rx_queue_mapping = NO_QUEUE_MAPPING;
sk               1795 include/net/sock.h static inline int sk_rx_queue_get(const struct sock *sk)
sk               1797 include/net/sock.h 	if (sk && sk->sk_rx_queue_mapping != NO_QUEUE_MAPPING)
sk               1798 include/net/sock.h 		return sk->sk_rx_queue_mapping;
sk               1804 include/net/sock.h static inline void sk_set_socket(struct sock *sk, struct socket *sock)
sk               1806 include/net/sock.h 	sk_tx_queue_clear(sk);
sk               1807 include/net/sock.h 	sk->sk_socket = sock;
sk               1810 include/net/sock.h static inline wait_queue_head_t *sk_sleep(struct sock *sk)
sk               1813 include/net/sock.h 	return &rcu_dereference_raw(sk->sk_wq)->wait;
sk               1822 include/net/sock.h static inline void sock_orphan(struct sock *sk)
sk               1824 include/net/sock.h 	write_lock_bh(&sk->sk_callback_lock);
sk               1825 include/net/sock.h 	sock_set_flag(sk, SOCK_DEAD);
sk               1826 include/net/sock.h 	sk_set_socket(sk, NULL);
sk               1827 include/net/sock.h 	sk->sk_wq  = NULL;
sk               1828 include/net/sock.h 	write_unlock_bh(&sk->sk_callback_lock);
sk               1831 include/net/sock.h static inline void sock_graft(struct sock *sk, struct socket *parent)
sk               1833 include/net/sock.h 	WARN_ON(parent->sk);
sk               1834 include/net/sock.h 	write_lock_bh(&sk->sk_callback_lock);
sk               1835 include/net/sock.h 	rcu_assign_pointer(sk->sk_wq, &parent->wq);
sk               1836 include/net/sock.h 	parent->sk = sk;
sk               1837 include/net/sock.h 	sk_set_socket(sk, parent);
sk               1838 include/net/sock.h 	sk->sk_uid = SOCK_INODE(parent)->i_uid;
sk               1839 include/net/sock.h 	security_sock_graft(sk, parent);
sk               1840 include/net/sock.h 	write_unlock_bh(&sk->sk_callback_lock);
sk               1843 include/net/sock.h kuid_t sock_i_uid(struct sock *sk);
sk               1844 include/net/sock.h unsigned long sock_i_ino(struct sock *sk);
sk               1846 include/net/sock.h static inline kuid_t sock_net_uid(const struct net *net, const struct sock *sk)
sk               1848 include/net/sock.h 	return sk ? sk->sk_uid : make_kuid(net->user_ns, 0);
sk               1858 include/net/sock.h static inline void sk_set_txhash(struct sock *sk)
sk               1860 include/net/sock.h 	sk->sk_txhash = net_tx_rndhash();
sk               1863 include/net/sock.h static inline void sk_rethink_txhash(struct sock *sk)
sk               1865 include/net/sock.h 	if (sk->sk_txhash)
sk               1866 include/net/sock.h 		sk_set_txhash(sk);
sk               1870 include/net/sock.h __sk_dst_get(struct sock *sk)
sk               1872 include/net/sock.h 	return rcu_dereference_check(sk->sk_dst_cache,
sk               1873 include/net/sock.h 				     lockdep_sock_is_held(sk));
sk               1877 include/net/sock.h sk_dst_get(struct sock *sk)
sk               1882 include/net/sock.h 	dst = rcu_dereference(sk->sk_dst_cache);
sk               1889 include/net/sock.h static inline void dst_negative_advice(struct sock *sk)
sk               1891 include/net/sock.h 	struct dst_entry *ndst, *dst = __sk_dst_get(sk);
sk               1893 include/net/sock.h 	sk_rethink_txhash(sk);
sk               1899 include/net/sock.h 			rcu_assign_pointer(sk->sk_dst_cache, ndst);
sk               1900 include/net/sock.h 			sk_tx_queue_clear(sk);
sk               1901 include/net/sock.h 			sk->sk_dst_pending_confirm = 0;
sk               1907 include/net/sock.h __sk_dst_set(struct sock *sk, struct dst_entry *dst)
sk               1911 include/net/sock.h 	sk_tx_queue_clear(sk);
sk               1912 include/net/sock.h 	sk->sk_dst_pending_confirm = 0;
sk               1913 include/net/sock.h 	old_dst = rcu_dereference_protected(sk->sk_dst_cache,
sk               1914 include/net/sock.h 					    lockdep_sock_is_held(sk));
sk               1915 include/net/sock.h 	rcu_assign_pointer(sk->sk_dst_cache, dst);
sk               1920 include/net/sock.h sk_dst_set(struct sock *sk, struct dst_entry *dst)
sk               1924 include/net/sock.h 	sk_tx_queue_clear(sk);
sk               1925 include/net/sock.h 	sk->sk_dst_pending_confirm = 0;
sk               1926 include/net/sock.h 	old_dst = xchg((__force struct dst_entry **)&sk->sk_dst_cache, dst);
sk               1931 include/net/sock.h __sk_dst_reset(struct sock *sk)
sk               1933 include/net/sock.h 	__sk_dst_set(sk, NULL);
sk               1937 include/net/sock.h sk_dst_reset(struct sock *sk)
sk               1939 include/net/sock.h 	sk_dst_set(sk, NULL);
sk               1942 include/net/sock.h struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
sk               1944 include/net/sock.h struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
sk               1946 include/net/sock.h static inline void sk_dst_confirm(struct sock *sk)
sk               1948 include/net/sock.h 	if (!READ_ONCE(sk->sk_dst_pending_confirm))
sk               1949 include/net/sock.h 		WRITE_ONCE(sk->sk_dst_pending_confirm, 1);
sk               1955 include/net/sock.h 		struct sock *sk = skb->sk;
sk               1961 include/net/sock.h 		if (sk && READ_ONCE(sk->sk_dst_pending_confirm))
sk               1962 include/net/sock.h 			WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
sk               1966 include/net/sock.h bool sk_mc_loop(struct sock *sk);
sk               1968 include/net/sock.h static inline bool sk_can_gso(const struct sock *sk)
sk               1970 include/net/sock.h 	return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type);
sk               1973 include/net/sock.h void sk_setup_caps(struct sock *sk, struct dst_entry *dst);
sk               1975 include/net/sock.h static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
sk               1977 include/net/sock.h 	sk->sk_route_nocaps |= flags;
sk               1978 include/net/sock.h 	sk->sk_route_caps &= ~flags;
sk               1981 include/net/sock.h static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
sk               1990 include/net/sock.h 	} else if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY) {
sk               1999 include/net/sock.h static inline int skb_add_data_nocache(struct sock *sk, struct sk_buff *skb,
sk               2004 include/net/sock.h 	err = skb_do_copy_data_nocache(sk, skb, from, skb_put(skb, copy),
sk               2012 include/net/sock.h static inline int skb_copy_to_page_nocache(struct sock *sk, struct iov_iter *from,
sk               2019 include/net/sock.h 	err = skb_do_copy_data_nocache(sk, skb, from, page_address(page) + off,
sk               2027 include/net/sock.h 	sk_wmem_queued_add(sk, copy);
sk               2028 include/net/sock.h 	sk_mem_charge(sk, copy);
sk               2038 include/net/sock.h static inline int sk_wmem_alloc_get(const struct sock *sk)
sk               2040 include/net/sock.h 	return refcount_read(&sk->sk_wmem_alloc) - 1;
sk               2049 include/net/sock.h static inline int sk_rmem_alloc_get(const struct sock *sk)
sk               2051 include/net/sock.h 	return atomic_read(&sk->sk_rmem_alloc);
sk               2060 include/net/sock.h static inline bool sk_has_allocations(const struct sock *sk)
sk               2062 include/net/sock.h 	return sk_wmem_alloc_get(sk) || sk_rmem_alloc_get(sk);
sk               2123 include/net/sock.h static inline void skb_set_hash_from_sk(struct sk_buff *skb, struct sock *sk)
sk               2125 include/net/sock.h 	if (sk->sk_txhash) {
sk               2127 include/net/sock.h 		skb->hash = sk->sk_txhash;
sk               2131 include/net/sock.h void skb_set_owner_w(struct sk_buff *skb, struct sock *sk);
sk               2141 include/net/sock.h static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
sk               2144 include/net/sock.h 	skb->sk = sk;
sk               2146 include/net/sock.h 	atomic_add(skb->truesize, &sk->sk_rmem_alloc);
sk               2147 include/net/sock.h 	sk_mem_charge(sk, skb->truesize);
sk               2150 include/net/sock.h void sk_reset_timer(struct sock *sk, struct timer_list *timer,
sk               2153 include/net/sock.h void sk_stop_timer(struct sock *sk, struct timer_list *timer);
sk               2155 include/net/sock.h int __sk_queue_drop_skb(struct sock *sk, struct sk_buff_head *sk_queue,
sk               2157 include/net/sock.h 			void (*destructor)(struct sock *sk,
sk               2159 include/net/sock.h int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
sk               2160 include/net/sock.h int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
sk               2162 include/net/sock.h int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb);
sk               2163 include/net/sock.h struct sk_buff *sock_dequeue_err_skb(struct sock *sk);
sk               2169 include/net/sock.h static inline int sock_error(struct sock *sk)
sk               2172 include/net/sock.h 	if (likely(!sk->sk_err))
sk               2174 include/net/sock.h 	err = xchg(&sk->sk_err, 0);
sk               2178 include/net/sock.h static inline unsigned long sock_wspace(struct sock *sk)
sk               2182 include/net/sock.h 	if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
sk               2183 include/net/sock.h 		amt = sk->sk_sndbuf - refcount_read(&sk->sk_wmem_alloc);
sk               2194 include/net/sock.h static inline void sk_set_bit(int nr, struct sock *sk)
sk               2197 include/net/sock.h 	    !sock_flag(sk, SOCK_FASYNC))
sk               2200 include/net/sock.h 	set_bit(nr, &sk->sk_wq_raw->flags);
sk               2203 include/net/sock.h static inline void sk_clear_bit(int nr, struct sock *sk)
sk               2206 include/net/sock.h 	    !sock_flag(sk, SOCK_FASYNC))
sk               2209 include/net/sock.h 	clear_bit(nr, &sk->sk_wq_raw->flags);
sk               2212 include/net/sock.h static inline void sk_wake_async(const struct sock *sk, int how, int band)
sk               2214 include/net/sock.h 	if (sock_flag(sk, SOCK_FASYNC)) {
sk               2216 include/net/sock.h 		sock_wake_async(rcu_dereference(sk->sk_wq), how, band);
sk               2231 include/net/sock.h static inline void sk_stream_moderate_sndbuf(struct sock *sk)
sk               2235 include/net/sock.h 	if (sk->sk_userlocks & SOCK_SNDBUF_LOCK)
sk               2238 include/net/sock.h 	val = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1);
sk               2240 include/net/sock.h 	WRITE_ONCE(sk->sk_sndbuf, max_t(u32, val, SOCK_MIN_SNDBUF));
sk               2243 include/net/sock.h struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
sk               2258 include/net/sock.h static inline struct page_frag *sk_page_frag(struct sock *sk)
sk               2260 include/net/sock.h 	if (gfpflags_normal_context(sk->sk_allocation))
sk               2263 include/net/sock.h 	return &sk->sk_frag;
sk               2266 include/net/sock.h bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag);
sk               2271 include/net/sock.h static inline bool sock_writeable(const struct sock *sk)
sk               2273 include/net/sock.h 	return refcount_read(&sk->sk_wmem_alloc) < (READ_ONCE(sk->sk_sndbuf) >> 1);
sk               2281 include/net/sock.h static inline long sock_rcvtimeo(const struct sock *sk, bool noblock)
sk               2283 include/net/sock.h 	return noblock ? 0 : sk->sk_rcvtimeo;
sk               2286 include/net/sock.h static inline long sock_sndtimeo(const struct sock *sk, bool noblock)
sk               2288 include/net/sock.h 	return noblock ? 0 : sk->sk_sndtimeo;
sk               2291 include/net/sock.h static inline int sock_rcvlowat(const struct sock *sk, int waitall, int len)
sk               2293 include/net/sock.h 	int v = waitall ? len : min_t(int, READ_ONCE(sk->sk_rcvlowat), len);
sk               2324 include/net/sock.h sock_skb_set_dropcount(const struct sock *sk, struct sk_buff *skb)
sk               2326 include/net/sock.h 	SOCK_SKB_CB(skb)->dropcount = sock_flag(sk, SOCK_RXQ_OVFL) ?
sk               2327 include/net/sock.h 						atomic_read(&sk->sk_drops) : 0;
sk               2330 include/net/sock.h static inline void sk_drops_add(struct sock *sk, const struct sk_buff *skb)
sk               2334 include/net/sock.h 	atomic_add(segs, &sk->sk_drops);
sk               2337 include/net/sock.h static inline ktime_t sock_read_timestamp(struct sock *sk)
sk               2344 include/net/sock.h 		seq = read_seqbegin(&sk->sk_stamp_seq);
sk               2345 include/net/sock.h 		kt = sk->sk_stamp;
sk               2346 include/net/sock.h 	} while (read_seqretry(&sk->sk_stamp_seq, seq));
sk               2350 include/net/sock.h 	return READ_ONCE(sk->sk_stamp);
sk               2354 include/net/sock.h static inline void sock_write_timestamp(struct sock *sk, ktime_t kt)
sk               2357 include/net/sock.h 	write_seqlock(&sk->sk_stamp_seq);
sk               2358 include/net/sock.h 	sk->sk_stamp = kt;
sk               2359 include/net/sock.h 	write_sequnlock(&sk->sk_stamp_seq);
sk               2361 include/net/sock.h 	WRITE_ONCE(sk->sk_stamp, kt);
sk               2365 include/net/sock.h void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
sk               2367 include/net/sock.h void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
sk               2371 include/net/sock.h sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
sk               2382 include/net/sock.h 	if (sock_flag(sk, SOCK_RCVTSTAMP) ||
sk               2383 include/net/sock.h 	    (sk->sk_tsflags & SOF_TIMESTAMPING_RX_SOFTWARE) ||
sk               2384 include/net/sock.h 	    (kt && sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE) ||
sk               2386 include/net/sock.h 	     (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE)))
sk               2387 include/net/sock.h 		__sock_recv_timestamp(msg, sk, skb);
sk               2389 include/net/sock.h 		sock_write_timestamp(sk, kt);
sk               2391 include/net/sock.h 	if (sock_flag(sk, SOCK_WIFI_STATUS) && skb->wifi_acked_valid)
sk               2392 include/net/sock.h 		__sock_recv_wifi_status(msg, sk, skb);
sk               2395 include/net/sock.h void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
sk               2399 include/net/sock.h static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
sk               2407 include/net/sock.h 	if (sk->sk_flags & FLAGS_TS_OR_DROPS || sk->sk_tsflags & TSFLAGS_ANY)
sk               2408 include/net/sock.h 		__sock_recv_ts_and_drops(msg, sk, skb);
sk               2409 include/net/sock.h 	else if (unlikely(sock_flag(sk, SOCK_TIMESTAMP)))
sk               2410 include/net/sock.h 		sock_write_timestamp(sk, skb->tstamp);
sk               2411 include/net/sock.h 	else if (unlikely(sk->sk_stamp == SK_DEFAULT_STAMP))
sk               2412 include/net/sock.h 		sock_write_timestamp(sk, 0);
sk               2426 include/net/sock.h static inline void _sock_tx_timestamp(struct sock *sk, __u16 tsflags,
sk               2433 include/net/sock.h 			*tskey = sk->sk_tskey++;
sk               2435 include/net/sock.h 	if (unlikely(sock_flag(sk, SOCK_WIFI_STATUS)))
sk               2439 include/net/sock.h static inline void sock_tx_timestamp(struct sock *sk, __u16 tsflags,
sk               2442 include/net/sock.h 	_sock_tx_timestamp(sk, tsflags, tx_flags, NULL);
sk               2447 include/net/sock.h 	_sock_tx_timestamp(skb->sk, tsflags, &skb_shinfo(skb)->tx_flags,
sk               2460 include/net/sock.h static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb)
sk               2462 include/net/sock.h 	__skb_unlink(skb, &sk->sk_receive_queue);
sk               2464 include/net/sock.h 	    !sk->sk_rx_skb_cache) {
sk               2465 include/net/sock.h 		sk->sk_rx_skb_cache = skb;
sk               2473 include/net/sock.h struct net *sock_net(const struct sock *sk)
sk               2475 include/net/sock.h 	return read_pnet(&sk->sk_net);
sk               2479 include/net/sock.h void sock_net_set(struct sock *sk, struct net *net)
sk               2481 include/net/sock.h 	write_pnet(&sk->sk_net, net);
sk               2486 include/net/sock.h 	if (skb->sk) {
sk               2487 include/net/sock.h 		struct sock *sk = skb->sk;
sk               2490 include/net/sock.h 		skb->sk = NULL;
sk               2491 include/net/sock.h 		return sk;
sk               2499 include/net/sock.h static inline bool sk_fullsock(const struct sock *sk)
sk               2501 include/net/sock.h 	return (1 << sk->sk_state) & ~(TCPF_TIME_WAIT | TCPF_NEW_SYN_RECV);
sk               2512 include/net/sock.h 	struct sock *sk = skb->sk;
sk               2514 include/net/sock.h 	if (sk && sk_fullsock(sk) && sk->sk_validate_xmit_skb) {
sk               2515 include/net/sock.h 		skb = sk->sk_validate_xmit_skb(sk, dev, skb);
sk               2531 include/net/sock.h static inline bool sk_listener(const struct sock *sk)
sk               2533 include/net/sock.h 	return (1 << sk->sk_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV);
sk               2536 include/net/sock.h void sock_enable_timestamp(struct sock *sk, int flag);
sk               2537 include/net/sock.h int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, int level,
sk               2540 include/net/sock.h bool sk_ns_capable(const struct sock *sk,
sk               2542 include/net/sock.h bool sk_capable(const struct sock *sk, int cap);
sk               2543 include/net/sock.h bool sk_net_capable(const struct sock *sk, int cap);
sk               2545 include/net/sock.h void sk_get_meminfo(const struct sock *sk, u32 *meminfo);
sk               2568 include/net/sock.h static inline int sk_get_wmem0(const struct sock *sk, const struct proto *proto)
sk               2572 include/net/sock.h 		return *(int *)((void *)sock_net(sk) + proto->sysctl_wmem_offset);
sk               2577 include/net/sock.h static inline int sk_get_rmem0(const struct sock *sk, const struct proto *proto)
sk               2581 include/net/sock.h 		return *(int *)((void *)sock_net(sk) + proto->sysctl_rmem_offset);
sk               2590 include/net/sock.h static inline void sk_pacing_shift_update(struct sock *sk, int val)
sk               2592 include/net/sock.h 	if (!sk || !sk_fullsock(sk) || READ_ONCE(sk->sk_pacing_shift) == val)
sk               2594 include/net/sock.h 	WRITE_ONCE(sk->sk_pacing_shift, val);
sk               2602 include/net/sock.h static inline bool sk_dev_equal_l3scope(struct sock *sk, int dif)
sk               2606 include/net/sock.h 	if (!sk->sk_bound_dev_if || sk->sk_bound_dev_if == dif)
sk               2609 include/net/sock.h 	mdif = l3mdev_master_ifindex_by_index(sock_net(sk), dif);
sk               2610 include/net/sock.h 	if (mdif && mdif == sk->sk_bound_dev_if)
sk                 30 include/net/sock_reuseport.h extern int reuseport_alloc(struct sock *sk, bool bind_inany);
sk                 31 include/net/sock_reuseport.h extern int reuseport_add_sock(struct sock *sk, struct sock *sk2,
sk                 33 include/net/sock_reuseport.h extern void reuseport_detach_sock(struct sock *sk);
sk                 34 include/net/sock_reuseport.h extern struct sock *reuseport_select_sock(struct sock *sk,
sk                 38 include/net/sock_reuseport.h extern int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog);
sk                 39 include/net/sock_reuseport.h extern int reuseport_detach_prog(struct sock *sk);
sk                 41 include/net/sock_reuseport.h static inline bool reuseport_has_conns(struct sock *sk, bool set)
sk                 47 include/net/sock_reuseport.h 	reuse = rcu_dereference(sk->sk_reuseport_cb);
sk                 65 include/net/strparser.h 	struct sock *sk;
sk                138 include/net/strparser.h int strp_init(struct strparser *strp, struct sock *sk,
sk                 51 include/net/tcp.h void tcp_time_wait(struct sock *sk, int state, int timeo);
sk                255 include/net/tcp.h static inline bool tcp_under_memory_pressure(const struct sock *sk)
sk                257 include/net/tcp.h 	if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
sk                258 include/net/tcp.h 	    mem_cgroup_under_socket_pressure(sk->sk_memcg))
sk                280 include/net/tcp.h static inline bool tcp_out_of_memory(struct sock *sk)
sk                282 include/net/tcp.h 	if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
sk                283 include/net/tcp.h 	    sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2))
sk                288 include/net/tcp.h void sk_forced_mem_schedule(struct sock *sk, int size);
sk                290 include/net/tcp.h static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
sk                292 include/net/tcp.h 	struct percpu_counter *ocp = sk->sk_prot->orphan_count;
sk                303 include/net/tcp.h bool tcp_check_oom(struct sock *sk, int shift);
sk                317 include/net/tcp.h void tcp_shutdown(struct sock *sk, int how);
sk                323 include/net/tcp.h int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
sk                324 include/net/tcp.h int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size);
sk                325 include/net/tcp.h int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
sk                327 include/net/tcp.h int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset,
sk                329 include/net/tcp.h ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
sk                331 include/net/tcp.h void tcp_release_cb(struct sock *sk);
sk                333 include/net/tcp.h void tcp_write_timer_handler(struct sock *sk);
sk                334 include/net/tcp.h void tcp_delack_timer_handler(struct sock *sk);
sk                335 include/net/tcp.h int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
sk                336 include/net/tcp.h int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb);
sk                337 include/net/tcp.h void tcp_rcv_established(struct sock *sk, struct sk_buff *skb);
sk                338 include/net/tcp.h void tcp_rcv_space_adjust(struct sock *sk);
sk                339 include/net/tcp.h int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
sk                340 include/net/tcp.h void tcp_twsk_destructor(struct sock *sk);
sk                341 include/net/tcp.h ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
sk                345 include/net/tcp.h void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks);
sk                346 include/net/tcp.h static inline void tcp_dec_quickack_mode(struct sock *sk,
sk                349 include/net/tcp.h 	struct inet_connection_sock *icsk = inet_csk(sk);
sk                377 include/net/tcp.h struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
sk                382 include/net/tcp.h void tcp_enter_loss(struct sock *sk);
sk                383 include/net/tcp.h void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int flag);
sk                385 include/net/tcp.h void tcp_update_metrics(struct sock *sk);
sk                386 include/net/tcp.h void tcp_init_metrics(struct sock *sk);
sk                389 include/net/tcp.h void tcp_close(struct sock *sk, long timeout);
sk                390 include/net/tcp.h void tcp_init_sock(struct sock *sk);
sk                391 include/net/tcp.h void tcp_init_transfer(struct sock *sk, int bpf_op);
sk                394 include/net/tcp.h int tcp_getsockopt(struct sock *sk, int level, int optname,
sk                396 include/net/tcp.h int tcp_setsockopt(struct sock *sk, int level, int optname,
sk                398 include/net/tcp.h int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
sk                400 include/net/tcp.h int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
sk                402 include/net/tcp.h void tcp_set_keepalive(struct sock *sk, int val);
sk                404 include/net/tcp.h int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
sk                406 include/net/tcp.h int tcp_set_rcvlowat(struct sock *sk, int val);
sk                407 include/net/tcp.h void tcp_data_ready(struct sock *sk);
sk                420 include/net/tcp.h u16 tcp_v4_get_syncookie(struct sock *sk, struct iphdr *iph,
sk                422 include/net/tcp.h u16 tcp_v6_get_syncookie(struct sock *sk, struct ipv6hdr *iph,
sk                426 include/net/tcp.h 			  struct sock *sk, struct tcphdr *th);
sk                431 include/net/tcp.h void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
sk                432 include/net/tcp.h void tcp_v4_mtu_reduced(struct sock *sk);
sk                433 include/net/tcp.h void tcp_req_err(struct sock *sk, u32 seq, bool abort);
sk                434 include/net/tcp.h int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
sk                435 include/net/tcp.h struct sock *tcp_create_openreq_child(const struct sock *sk,
sk                438 include/net/tcp.h void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst);
sk                439 include/net/tcp.h struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
sk                444 include/net/tcp.h int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
sk                445 include/net/tcp.h int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
sk                446 include/net/tcp.h int tcp_connect(struct sock *sk);
sk                452 include/net/tcp.h struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
sk                456 include/net/tcp.h int tcp_disconnect(struct sock *sk, int flags);
sk                458 include/net/tcp.h void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
sk                459 include/net/tcp.h int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
sk                460 include/net/tcp.h void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
sk                463 include/net/tcp.h struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
sk                468 include/net/tcp.h struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
sk                486 include/net/tcp.h static inline void tcp_synq_overflow(const struct sock *sk)
sk                491 include/net/tcp.h 	if (sk->sk_reuseport) {
sk                494 include/net/tcp.h 		reuse = rcu_dereference(sk->sk_reuseport_cb);
sk                504 include/net/tcp.h 	last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp);
sk                506 include/net/tcp.h 		WRITE_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp, now);
sk                510 include/net/tcp.h static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
sk                515 include/net/tcp.h 	if (sk->sk_reuseport) {
sk                518 include/net/tcp.h 		reuse = rcu_dereference(sk->sk_reuseport_cb);
sk                527 include/net/tcp.h 	last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp);
sk                560 include/net/tcp.h struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
sk                568 include/net/tcp.h void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
sk                570 include/net/tcp.h int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
sk                571 include/net/tcp.h int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
sk                572 include/net/tcp.h void tcp_retransmit_timer(struct sock *sk);
sk                575 include/net/tcp.h void tcp_enter_recovery(struct sock *sk, bool ece_ack);
sk                581 include/net/tcp.h int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
sk                588 include/net/tcp.h void tcp_send_fin(struct sock *sk);
sk                589 include/net/tcp.h void tcp_send_active_reset(struct sock *sk, gfp_t priority);
sk                592 include/net/tcp.h void __tcp_send_ack(struct sock *sk, u32 rcv_nxt);
sk                593 include/net/tcp.h void tcp_send_ack(struct sock *sk);
sk                594 include/net/tcp.h void tcp_send_delayed_ack(struct sock *sk);
sk                595 include/net/tcp.h void tcp_send_loss_probe(struct sock *sk);
sk                596 include/net/tcp.h bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto);
sk                601 include/net/tcp.h void tcp_rearm_rto(struct sock *sk);
sk                602 include/net/tcp.h void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
sk                603 include/net/tcp.h void tcp_reset(struct sock *sk);
sk                605 include/net/tcp.h void tcp_fin(struct sock *sk);
sk                609 include/net/tcp.h static inline void tcp_clear_xmit_timers(struct sock *sk)
sk                611 include/net/tcp.h 	if (hrtimer_try_to_cancel(&tcp_sk(sk)->pacing_timer) == 1)
sk                612 include/net/tcp.h 		__sock_put(sk);
sk                614 include/net/tcp.h 	if (hrtimer_try_to_cancel(&tcp_sk(sk)->compressed_ack_timer) == 1)
sk                615 include/net/tcp.h 		__sock_put(sk);
sk                617 include/net/tcp.h 	inet_csk_clear_xmit_timers(sk);
sk                620 include/net/tcp.h unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
sk                621 include/net/tcp.h unsigned int tcp_current_mss(struct sock *sk);
sk                650 include/net/tcp.h int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
sk                653 include/net/tcp.h void tcp_initialize_rcv_mss(struct sock *sk);
sk                655 include/net/tcp.h int tcp_mtu_to_mss(struct sock *sk, int pmtu);
sk                656 include/net/tcp.h int tcp_mss_to_mtu(struct sock *sk, int mss);
sk                657 include/net/tcp.h void tcp_mtup_init(struct sock *sk);
sk                658 include/net/tcp.h void tcp_init_buffer_space(struct sock *sk);
sk                660 include/net/tcp.h static inline void tcp_bound_rto(const struct sock *sk)
sk                662 include/net/tcp.h 	if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
sk                663 include/net/tcp.h 		inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
sk                683 include/net/tcp.h static inline void tcp_fast_path_check(struct sock *sk)
sk                685 include/net/tcp.h 	struct tcp_sock *tp = tcp_sk(sk);
sk                689 include/net/tcp.h 	    atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
sk                695 include/net/tcp.h static inline u32 tcp_rto_min(struct sock *sk)
sk                697 include/net/tcp.h 	const struct dst_entry *dst = __sk_dst_get(sk);
sk                705 include/net/tcp.h static inline u32 tcp_rto_min_us(struct sock *sk)
sk                707 include/net/tcp.h 	return jiffies_to_usecs(tcp_rto_min(sk));
sk                738 include/net/tcp.h u32 __tcp_select_window(struct sock *sk);
sk                740 include/net/tcp.h void tcp_send_window_probe(struct sock *sk);
sk               1043 include/net/tcp.h 	void (*init)(struct sock *sk);
sk               1045 include/net/tcp.h 	void (*release)(struct sock *sk);
sk               1048 include/net/tcp.h 	u32 (*ssthresh)(struct sock *sk);
sk               1050 include/net/tcp.h 	void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked);
sk               1052 include/net/tcp.h 	void (*set_state)(struct sock *sk, u8 new_state);
sk               1054 include/net/tcp.h 	void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
sk               1056 include/net/tcp.h 	void (*in_ack_event)(struct sock *sk, u32 flags);
sk               1058 include/net/tcp.h 	u32  (*undo_cwnd)(struct sock *sk);
sk               1060 include/net/tcp.h 	void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
sk               1062 include/net/tcp.h 	u32 (*min_tso_segs)(struct sock *sk);
sk               1064 include/net/tcp.h 	u32 (*sndbuf_expand)(struct sock *sk);
sk               1068 include/net/tcp.h 	void (*cong_control)(struct sock *sk, const struct rate_sample *rs);
sk               1070 include/net/tcp.h 	size_t (*get_info)(struct sock *sk, u32 ext, int *attr,
sk               1080 include/net/tcp.h void tcp_assign_congestion_control(struct sock *sk);
sk               1081 include/net/tcp.h void tcp_init_congestion_control(struct sock *sk);
sk               1082 include/net/tcp.h void tcp_cleanup_congestion_control(struct sock *sk);
sk               1088 include/net/tcp.h int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
sk               1093 include/net/tcp.h u32 tcp_reno_ssthresh(struct sock *sk);
sk               1094 include/net/tcp.h u32 tcp_reno_undo_cwnd(struct sock *sk);
sk               1095 include/net/tcp.h void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
sk               1109 include/net/tcp.h static inline bool tcp_ca_needs_ecn(const struct sock *sk)
sk               1111 include/net/tcp.h 	const struct inet_connection_sock *icsk = inet_csk(sk);
sk               1116 include/net/tcp.h static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
sk               1118 include/net/tcp.h 	struct inet_connection_sock *icsk = inet_csk(sk);
sk               1121 include/net/tcp.h 		icsk->icsk_ca_ops->set_state(sk, ca_state);
sk               1125 include/net/tcp.h static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
sk               1127 include/net/tcp.h 	const struct inet_connection_sock *icsk = inet_csk(sk);
sk               1130 include/net/tcp.h 		icsk->icsk_ca_ops->cwnd_event(sk, event);
sk               1134 include/net/tcp.h void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb);
sk               1135 include/net/tcp.h void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
sk               1137 include/net/tcp.h void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
sk               1139 include/net/tcp.h void tcp_rate_check_app_limited(struct sock *sk);
sk               1194 include/net/tcp.h static inline bool tcp_in_cwnd_reduction(const struct sock *sk)
sk               1197 include/net/tcp.h 	       (1 << inet_csk(sk)->icsk_ca_state);
sk               1204 include/net/tcp.h static inline __u32 tcp_current_ssthresh(const struct sock *sk)
sk               1206 include/net/tcp.h 	const struct tcp_sock *tp = tcp_sk(sk);
sk               1208 include/net/tcp.h 	if (tcp_in_cwnd_reduction(sk))
sk               1219 include/net/tcp.h void tcp_enter_cwr(struct sock *sk);
sk               1249 include/net/tcp.h static inline bool tcp_is_cwnd_limited(const struct sock *sk)
sk               1251 include/net/tcp.h 	const struct tcp_sock *tp = tcp_sk(sk);
sk               1266 include/net/tcp.h static inline bool tcp_needs_internal_pacing(const struct sock *sk)
sk               1268 include/net/tcp.h 	return smp_load_acquire(&sk->sk_pacing_status) == SK_PACING_NEEDED;
sk               1274 include/net/tcp.h static inline unsigned long tcp_pacing_delay(const struct sock *sk,
sk               1277 include/net/tcp.h 	s64 pacing_delay = skb ? skb->tstamp : tcp_sk(sk)->tcp_wstamp_ns;
sk               1279 include/net/tcp.h 	pacing_delay -= tcp_sk(sk)->tcp_clock_cache;
sk               1284 include/net/tcp.h static inline void tcp_reset_xmit_timer(struct sock *sk,
sk               1290 include/net/tcp.h 	inet_csk_reset_xmit_timer(sk, what, when + tcp_pacing_delay(sk, skb),
sk               1300 include/net/tcp.h static inline unsigned long tcp_probe0_base(const struct sock *sk)
sk               1302 include/net/tcp.h 	return max_t(unsigned long, inet_csk(sk)->icsk_rto, TCP_RTO_MIN);
sk               1306 include/net/tcp.h static inline unsigned long tcp_probe0_when(const struct sock *sk,
sk               1309 include/net/tcp.h 	u64 when = (u64)tcp_probe0_base(sk) << inet_csk(sk)->icsk_backoff;
sk               1314 include/net/tcp.h static inline void tcp_check_probe_timer(struct sock *sk)
sk               1316 include/net/tcp.h 	if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending)
sk               1317 include/net/tcp.h 		tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
sk               1318 include/net/tcp.h 				     tcp_probe0_base(sk), TCP_RTO_MAX,
sk               1347 include/net/tcp.h bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb);
sk               1348 include/net/tcp.h int tcp_filter(struct sock *sk, struct sk_buff *skb);
sk               1349 include/net/tcp.h void tcp_set_state(struct sock *sk, int state);
sk               1350 include/net/tcp.h void tcp_done(struct sock *sk);
sk               1351 include/net/tcp.h int tcp_abort(struct sock *sk, int err);
sk               1360 include/net/tcp.h void tcp_cwnd_restart(struct sock *sk, s32 delta);
sk               1362 include/net/tcp.h static inline void tcp_slow_start_after_idle_check(struct sock *sk)
sk               1364 include/net/tcp.h 	const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
sk               1365 include/net/tcp.h 	struct tcp_sock *tp = tcp_sk(sk);
sk               1368 include/net/tcp.h 	if (!sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle || tp->packets_out ||
sk               1372 include/net/tcp.h 	if (delta > inet_csk(sk)->icsk_rto)
sk               1373 include/net/tcp.h 		tcp_cwnd_restart(sk, delta);
sk               1377 include/net/tcp.h void tcp_select_initial_window(const struct sock *sk, int __space,
sk               1382 include/net/tcp.h static inline int tcp_win_from_space(const struct sock *sk, int space)
sk               1384 include/net/tcp.h 	int tcp_adv_win_scale = sock_net(sk)->ipv4.sysctl_tcp_adv_win_scale;
sk               1392 include/net/tcp.h static inline int tcp_space(const struct sock *sk)
sk               1394 include/net/tcp.h 	return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf) -
sk               1395 include/net/tcp.h 				  READ_ONCE(sk->sk_backlog.len) -
sk               1396 include/net/tcp.h 				  atomic_read(&sk->sk_rmem_alloc));
sk               1399 include/net/tcp.h static inline int tcp_full_space(const struct sock *sk)
sk               1401 include/net/tcp.h 	return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf));
sk               1409 include/net/tcp.h static inline bool tcp_rmem_pressure(const struct sock *sk)
sk               1411 include/net/tcp.h 	int rcvbuf = READ_ONCE(sk->sk_rcvbuf);
sk               1414 include/net/tcp.h 	return atomic_read(&sk->sk_rmem_alloc) > threshold;
sk               1421 include/net/tcp.h void tcp_enter_memory_pressure(struct sock *sk);
sk               1422 include/net/tcp.h void tcp_leave_memory_pressure(struct sock *sk);
sk               1453 include/net/tcp.h static inline int tcp_fin_time(const struct sock *sk)
sk               1455 include/net/tcp.h 	int fin_timeout = tcp_sk(sk)->linger2 ? : sock_net(sk)->ipv4.sysctl_tcp_fin_timeout;
sk               1456 include/net/tcp.h 	const int rto = inet_csk(sk)->icsk_rto;
sk               1585 include/net/tcp.h 			const struct sock *sk, const struct sk_buff *skb);
sk               1586 include/net/tcp.h int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
sk               1589 include/net/tcp.h int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
sk               1591 include/net/tcp.h struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
sk               1597 include/net/tcp.h struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk,
sk               1601 include/net/tcp.h tcp_md5_do_lookup(const struct sock *sk,
sk               1607 include/net/tcp.h 	return __tcp_md5_do_lookup(sk, addr, family);
sk               1612 include/net/tcp.h static inline struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
sk               1635 include/net/tcp.h void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
sk               1637 include/net/tcp.h void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
sk               1649 include/net/tcp.h void tcp_fastopen_destroy_cipher(struct sock *sk);
sk               1651 include/net/tcp.h int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk,
sk               1653 include/net/tcp.h void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb);
sk               1654 include/net/tcp.h struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
sk               1659 include/net/tcp.h bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
sk               1661 include/net/tcp.h bool tcp_fastopen_defer_connect(struct sock *sk, int *err);
sk               1675 include/net/tcp.h void tcp_fastopen_active_disable(struct sock *sk);
sk               1676 include/net/tcp.h bool tcp_fastopen_active_should_disable(struct sock *sk);
sk               1677 include/net/tcp.h void tcp_fastopen_active_disable_ofo_check(struct sock *sk);
sk               1678 include/net/tcp.h void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired);
sk               1682 include/net/tcp.h struct tcp_fastopen_context *tcp_fastopen_get_ctx(const struct sock *sk)
sk               1686 include/net/tcp.h 	ctx = rcu_dereference(inet_csk(sk)->icsk_accept_queue.fastopenq.ctx);
sk               1688 include/net/tcp.h 		ctx = rcu_dereference(sock_net(sk)->ipv4.tcp_fastopen_ctx);
sk               1720 include/net/tcp.h void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type);
sk               1721 include/net/tcp.h void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type);
sk               1740 include/net/tcp.h void tcp_write_queue_purge(struct sock *sk);
sk               1742 include/net/tcp.h static inline struct sk_buff *tcp_rtx_queue_head(const struct sock *sk)
sk               1744 include/net/tcp.h 	return skb_rb_first(&sk->tcp_rtx_queue);
sk               1747 include/net/tcp.h static inline struct sk_buff *tcp_rtx_queue_tail(const struct sock *sk)
sk               1749 include/net/tcp.h 	return skb_rb_last(&sk->tcp_rtx_queue);
sk               1752 include/net/tcp.h static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk)
sk               1754 include/net/tcp.h 	return skb_peek(&sk->sk_write_queue);
sk               1757 include/net/tcp.h static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk)
sk               1759 include/net/tcp.h 	return skb_peek_tail(&sk->sk_write_queue);
sk               1762 include/net/tcp.h #define tcp_for_write_queue_from_safe(skb, tmp, sk)			\
sk               1763 include/net/tcp.h 	skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
sk               1765 include/net/tcp.h static inline struct sk_buff *tcp_send_head(const struct sock *sk)
sk               1767 include/net/tcp.h 	return skb_peek(&sk->sk_write_queue);
sk               1770 include/net/tcp.h static inline bool tcp_skb_is_last(const struct sock *sk,
sk               1773 include/net/tcp.h 	return skb_queue_is_last(&sk->sk_write_queue, skb);
sk               1776 include/net/tcp.h static inline bool tcp_write_queue_empty(const struct sock *sk)
sk               1778 include/net/tcp.h 	return skb_queue_empty(&sk->sk_write_queue);
sk               1781 include/net/tcp.h static inline bool tcp_rtx_queue_empty(const struct sock *sk)
sk               1783 include/net/tcp.h 	return RB_EMPTY_ROOT(&sk->tcp_rtx_queue);
sk               1786 include/net/tcp.h static inline bool tcp_rtx_and_write_queues_empty(const struct sock *sk)
sk               1788 include/net/tcp.h 	return tcp_rtx_queue_empty(sk) && tcp_write_queue_empty(sk);
sk               1791 include/net/tcp.h static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
sk               1793 include/net/tcp.h 	__skb_queue_tail(&sk->sk_write_queue, skb);
sk               1796 include/net/tcp.h 	if (sk->sk_write_queue.next == skb)
sk               1797 include/net/tcp.h 		tcp_chrono_start(sk, TCP_CHRONO_BUSY);
sk               1803 include/net/tcp.h 						  struct sock *sk)
sk               1805 include/net/tcp.h 	__skb_queue_before(&sk->sk_write_queue, skb, new);
sk               1808 include/net/tcp.h static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
sk               1811 include/net/tcp.h 	__skb_unlink(skb, &sk->sk_write_queue);
sk               1816 include/net/tcp.h static inline void tcp_rtx_queue_unlink(struct sk_buff *skb, struct sock *sk)
sk               1819 include/net/tcp.h 	rb_erase(&skb->rbnode, &sk->tcp_rtx_queue);
sk               1822 include/net/tcp.h static inline void tcp_rtx_queue_unlink_and_free(struct sk_buff *skb, struct sock *sk)
sk               1825 include/net/tcp.h 	tcp_rtx_queue_unlink(skb, sk);
sk               1826 include/net/tcp.h 	sk_wmem_free_skb(sk, skb);
sk               1829 include/net/tcp.h static inline void tcp_push_pending_frames(struct sock *sk)
sk               1831 include/net/tcp.h 	if (tcp_send_head(sk)) {
sk               1832 include/net/tcp.h 		struct tcp_sock *tp = tcp_sk(sk);
sk               1834 include/net/tcp.h 		__tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle);
sk               1853 include/net/tcp.h static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb)
sk               1855 include/net/tcp.h 	tcp_sk(sk)->highest_sack = skb_rb_next(skb);
sk               1858 include/net/tcp.h static inline struct sk_buff *tcp_highest_sack(struct sock *sk)
sk               1860 include/net/tcp.h 	return tcp_sk(sk)->highest_sack;
sk               1863 include/net/tcp.h static inline void tcp_highest_sack_reset(struct sock *sk)
sk               1865 include/net/tcp.h 	tcp_sk(sk)->highest_sack = tcp_rtx_queue_head(sk);
sk               1869 include/net/tcp.h static inline void tcp_highest_sack_replace(struct sock *sk,
sk               1873 include/net/tcp.h 	if (old == tcp_highest_sack(sk))
sk               1874 include/net/tcp.h 		tcp_sk(sk)->highest_sack = new;
sk               1878 include/net/tcp.h static inline bool inet_sk_transparent(const struct sock *sk)
sk               1880 include/net/tcp.h 	switch (sk->sk_state) {
sk               1882 include/net/tcp.h 		return inet_twsk(sk)->tw_transparent;
sk               1884 include/net/tcp.h 		return inet_rsk(inet_reqsk(sk))->no_srccheck;
sk               1886 include/net/tcp.h 	return inet_sk(sk)->transparent;
sk               1922 include/net/tcp.h void tcp_v4_destroy_sock(struct sock *sk);
sk               1941 include/net/tcp.h static inline bool tcp_stream_memory_free(const struct sock *sk, int wake)
sk               1943 include/net/tcp.h 	const struct tcp_sock *tp = tcp_sk(sk);
sk               1955 include/net/tcp.h int tcp_rtx_synack(const struct sock *sk, struct request_sock *req);
sk               1958 include/net/tcp.h 		     struct sock *sk, struct sk_buff *skb);
sk               1963 include/net/tcp.h 	struct tcp_md5sig_key	*(*md5_lookup) (const struct sock *sk,
sk               1967 include/net/tcp.h 					 const struct sock *sk,
sk               1969 include/net/tcp.h 	int		(*md5_parse)(struct sock *sk,
sk               1979 include/net/tcp.h 	struct tcp_md5sig_key *(*req_md5_lookup)(const struct sock *sk,
sk               1983 include/net/tcp.h 					  const struct sock *sk,
sk               1993 include/net/tcp.h 	struct dst_entry *(*route_req)(const struct sock *sk, struct flowi *fl,
sk               1997 include/net/tcp.h 	int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
sk               2005 include/net/tcp.h 					 const struct sock *sk, struct sk_buff *skb,
sk               2008 include/net/tcp.h 	tcp_synq_overflow(sk);
sk               2009 include/net/tcp.h 	__NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
sk               2014 include/net/tcp.h 					 const struct sock *sk, struct sk_buff *skb,
sk               2027 include/net/tcp.h void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb);
sk               2028 include/net/tcp.h void tcp_newreno_mark_lost(struct sock *sk, bool snd_una_advanced);
sk               2031 include/net/tcp.h extern void tcp_rack_mark_lost(struct sock *sk);
sk               2034 include/net/tcp.h extern void tcp_rack_reo_timeout(struct sock *sk);
sk               2035 include/net/tcp.h extern void tcp_rack_update_reo_wnd(struct sock *sk, struct rate_sample *rs);
sk               2038 include/net/tcp.h static inline s64 tcp_rto_delta_us(const struct sock *sk)
sk               2040 include/net/tcp.h 	const struct sk_buff *skb = tcp_rtx_queue_head(sk);
sk               2041 include/net/tcp.h 	u32 rto = inet_csk(sk)->icsk_rto;
sk               2044 include/net/tcp.h 	return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp;
sk               2083 include/net/tcp.h static inline int tcp_inq(struct sock *sk)
sk               2085 include/net/tcp.h 	struct tcp_sock *tp = tcp_sk(sk);
sk               2088 include/net/tcp.h 	if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
sk               2090 include/net/tcp.h 	} else if (sock_flag(sk, SOCK_URGINLINE) ||
sk               2098 include/net/tcp.h 		if (answ && sock_flag(sk, SOCK_DONE))
sk               2126 include/net/tcp.h static inline void tcp_listendrop(const struct sock *sk)
sk               2128 include/net/tcp.h 	atomic_inc(&((struct sock *)sk)->sk_drops);
sk               2129 include/net/tcp.h 	__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
sk               2146 include/net/tcp.h 	int (*init)(struct sock *sk);
sk               2148 include/net/tcp.h 	void (*update)(struct sock *sk, struct proto *p,
sk               2149 include/net/tcp.h 		       void (*write_space)(struct sock *sk));
sk               2151 include/net/tcp.h 	void (*release)(struct sock *sk);
sk               2153 include/net/tcp.h 	int (*get_info)(const struct sock *sk, struct sk_buff *skb);
sk               2154 include/net/tcp.h 	size_t (*get_info_size)(const struct sock *sk);
sk               2161 include/net/tcp.h int tcp_set_ulp(struct sock *sk, const char *name);
sk               2163 include/net/tcp.h void tcp_cleanup_ulp(struct sock *sk);
sk               2164 include/net/tcp.h void tcp_update_ulp(struct sock *sk, struct proto *p,
sk               2165 include/net/tcp.h 		    void (*write_space)(struct sock *sk));
sk               2174 include/net/tcp.h int tcp_bpf_init(struct sock *sk);
sk               2175 include/net/tcp.h void tcp_bpf_reinit(struct sock *sk);
sk               2176 include/net/tcp.h int tcp_bpf_sendmsg_redir(struct sock *sk, struct sk_msg *msg, u32 bytes,
sk               2178 include/net/tcp.h int tcp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
sk               2180 include/net/tcp.h int __tcp_bpf_recvmsg(struct sock *sk, struct sk_psock *psock,
sk               2189 include/net/tcp.h static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args)
sk               2195 include/net/tcp.h 	if (sk_fullsock(sk)) {
sk               2197 include/net/tcp.h 		sock_owned_by_me(sk);
sk               2200 include/net/tcp.h 	sock_ops.sk = sk;
sk               2213 include/net/tcp.h static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2)
sk               2217 include/net/tcp.h 	return tcp_call_bpf(sk, op, 2, args);
sk               2220 include/net/tcp.h static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2,
sk               2225 include/net/tcp.h 	return tcp_call_bpf(sk, op, 3, args);
sk               2229 include/net/tcp.h static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args)
sk               2234 include/net/tcp.h static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2)
sk               2239 include/net/tcp.h static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2,
sk               2247 include/net/tcp.h static inline u32 tcp_timeout_init(struct sock *sk)
sk               2251 include/net/tcp.h 	timeout = tcp_call_bpf(sk, BPF_SOCK_OPS_TIMEOUT_INIT, 0, NULL);
sk               2258 include/net/tcp.h static inline u32 tcp_rwnd_init_bpf(struct sock *sk)
sk               2262 include/net/tcp.h 	rwnd = tcp_call_bpf(sk, BPF_SOCK_OPS_RWND_INIT, 0, NULL);
sk               2269 include/net/tcp.h static inline bool tcp_bpf_ca_needs_ecn(struct sock *sk)
sk               2271 include/net/tcp.h 	return (tcp_call_bpf(sk, BPF_SOCK_OPS_NEEDS_ECN, 0, NULL) == 1);
sk               2274 include/net/tcp.h static inline void tcp_bpf_rtt(struct sock *sk)
sk               2276 include/net/tcp.h 	if (BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk), BPF_SOCK_OPS_RTT_CB_FLAG))
sk               2277 include/net/tcp.h 		tcp_call_bpf(sk, BPF_SOCK_OPS_RTT_CB, 0, NULL);
sk               2286 include/net/tcp.h 			     void (*cad)(struct sock *sk, u32 ack_seq));
sk               2302 include/net/tcp.h static inline u64 tcp_transmit_time(const struct sock *sk)
sk               2305 include/net/tcp.h 		u32 delay = (sk->sk_state == TCP_TIME_WAIT) ?
sk               2306 include/net/tcp.h 			tcp_twsk(sk)->tw_tx_delay : tcp_sk(sk)->tcp_tx_delay;
sk                 18 include/net/timewait_sock.h 	int		(*twsk_unique)(struct sock *sk,
sk                 20 include/net/timewait_sock.h 	void		(*twsk_destructor)(struct sock *sk);
sk                 23 include/net/timewait_sock.h static inline int twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
sk                 25 include/net/timewait_sock.h 	if (sk->sk_prot->twsk_prot->twsk_unique != NULL)
sk                 26 include/net/timewait_sock.h 		return sk->sk_prot->twsk_prot->twsk_unique(sk, sktw, twp);
sk                 30 include/net/timewait_sock.h static inline void twsk_destructor(struct sock *sk)
sk                 32 include/net/timewait_sock.h 	if (sk->sk_prot->twsk_prot->twsk_destructor != NULL)
sk                 33 include/net/timewait_sock.h 		sk->sk_prot->twsk_prot->twsk_destructor(sk);
sk                103 include/net/tls.h 	int  (*hash)(struct tls_device *device, struct sock *sk);
sk                104 include/net/tls.h 	void (*unhash)(struct tls_device *device, struct sock *sk);
sk                150 include/net/tls.h 	struct sock *sk;
sk                175 include/net/tls.h 	void (*saved_data_ready)(struct sock *sk);
sk                205 include/net/tls.h 	void (*sk_destruct)(struct sock *sk);
sk                259 include/net/tls.h 	int (*push_pending_record)(struct sock *sk, int flags);
sk                260 include/net/tls.h 	void (*sk_write_space)(struct sock *sk);
sk                285 include/net/tls.h 	void (*sk_destruct)(struct sock *sk);
sk                301 include/net/tls.h 	int (*tls_dev_add)(struct net_device *netdev, struct sock *sk,
sk                309 include/net/tls.h 			      struct sock *sk, u32 seq, u8 *rcd_sn,
sk                351 include/net/tls.h void tls_ctx_free(struct sock *sk, struct tls_context *ctx);
sk                352 include/net/tls.h int wait_on_pending_writer(struct sock *sk, long *timeo);
sk                353 include/net/tls.h int tls_sk_query(struct sock *sk, int optname, char __user *optval,
sk                355 include/net/tls.h int tls_sk_attach(struct sock *sk, int optname, char __user *optval,
sk                358 include/net/tls.h int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx);
sk                359 include/net/tls.h void tls_sw_strparser_arm(struct sock *sk, struct tls_context *ctx);
sk                361 include/net/tls.h int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
sk                362 include/net/tls.h int tls_sw_sendpage_locked(struct sock *sk, struct page *page,
sk                364 include/net/tls.h int tls_sw_sendpage(struct sock *sk, struct page *page,
sk                367 include/net/tls.h void tls_sw_release_resources_tx(struct sock *sk);
sk                369 include/net/tls.h void tls_sw_free_resources_rx(struct sock *sk);
sk                370 include/net/tls.h void tls_sw_release_resources_rx(struct sock *sk);
sk                372 include/net/tls.h int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
sk                374 include/net/tls.h bool tls_sw_stream_read(const struct sock *sk);
sk                379 include/net/tls.h int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
sk                380 include/net/tls.h int tls_device_sendpage(struct sock *sk, struct page *page,
sk                382 include/net/tls.h int tls_tx_records(struct sock *sk, int flags);
sk                397 include/net/tls.h int tls_push_sg(struct sock *sk, struct tls_context *ctx,
sk                400 include/net/tls.h int tls_push_partial_record(struct sock *sk, struct tls_context *ctx,
sk                402 include/net/tls.h void tls_free_partial_record(struct sock *sk, struct tls_context *ctx);
sk                448 include/net/tls.h tls_validate_xmit_skb(struct sock *sk, struct net_device *dev,
sk                451 include/net/tls.h static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk)
sk                454 include/net/tls.h 	return sk_fullsock(sk) &&
sk                455 include/net/tls.h 	       (smp_load_acquire(&sk->sk_validate_xmit_skb) ==
sk                462 include/net/tls.h static inline void tls_err_abort(struct sock *sk, int err)
sk                464 include/net/tls.h 	sk->sk_err = err;
sk                465 include/net/tls.h 	sk->sk_error_report(sk);
sk                481 include/net/tls.h static inline struct tls_context *tls_get_ctx(const struct sock *sk)
sk                483 include/net/tls.h 	struct inet_connection_sock *icsk = inet_csk(sk);
sk                491 include/net/tls.h static inline void tls_advance_record_sn(struct sock *sk,
sk                496 include/net/tls.h 		tls_err_abort(sk, EBADMSG);
sk                584 include/net/tls.h static inline bool tls_sw_has_ctx_tx(const struct sock *sk)
sk                586 include/net/tls.h 	struct tls_context *ctx = tls_get_ctx(sk);
sk                593 include/net/tls.h void tls_sw_write_space(struct sock *sk, struct tls_context *ctx);
sk                594 include/net/tls.h void tls_device_write_space(struct sock *sk, struct tls_context *ctx);
sk                613 include/net/tls.h tls_driver_ctx(const struct sock *sk, enum tls_offload_ctx_dir direction)
sk                615 include/net/tls.h 	return __tls_driver_ctx(tls_get_ctx(sk), direction);
sk                620 include/net/tls.h static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq)
sk                622 include/net/tls.h 	struct tls_context *tls_ctx = tls_get_ctx(sk);
sk                629 include/net/tls.h tls_offload_rx_resync_set_type(struct sock *sk, enum tls_offload_sync_type type)
sk                631 include/net/tls.h 	struct tls_context *tls_ctx = tls_get_ctx(sk);
sk                636 include/net/tls.h static inline void tls_offload_tx_resync_request(struct sock *sk)
sk                638 include/net/tls.h 	struct tls_context *tls_ctx = tls_get_ctx(sk);
sk                644 include/net/tls.h static inline bool tls_offload_tx_resync_pending(struct sock *sk)
sk                646 include/net/tls.h 	struct tls_context *tls_ctx = tls_get_ctx(sk);
sk                654 include/net/tls.h int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg,
sk                658 include/net/tls.h int decrypt_skb(struct sock *sk, struct sk_buff *skb,
sk                662 include/net/tls.h struct sk_buff *tls_validate_xmit_skb(struct sock *sk,
sk                666 include/net/tls.h int tls_sw_fallback_init(struct sock *sk,
sk                673 include/net/tls.h int tls_set_device_offload(struct sock *sk, struct tls_context *ctx);
sk                674 include/net/tls.h void tls_device_free_resources_tx(struct sock *sk);
sk                675 include/net/tls.h int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx);
sk                676 include/net/tls.h void tls_device_offload_cleanup_rx(struct sock *sk);
sk                677 include/net/tls.h void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq);
sk                678 include/net/tls.h int tls_device_decrypted(struct sock *sk, struct sk_buff *skb);
sk                684 include/net/tls.h tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
sk                689 include/net/tls.h static inline void tls_device_free_resources_tx(struct sock *sk) {}
sk                692 include/net/tls.h tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
sk                697 include/net/tls.h static inline void tls_device_offload_cleanup_rx(struct sock *sk) {}
sk                699 include/net/tls.h tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq) {}
sk                701 include/net/tls.h static inline int tls_device_decrypted(struct sock *sk, struct sk_buff *skb)
sk                 34 include/net/transp_v6.h int udpv6_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
sk                 37 include/net/transp_v6.h void ip6_datagram_recv_ctl(struct sock *sk, struct msghdr *msg,
sk                 39 include/net/transp_v6.h void ip6_datagram_recv_common_ctl(struct sock *sk, struct msghdr *msg,
sk                 41 include/net/transp_v6.h void ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg,
sk                 44 include/net/transp_v6.h int ip6_datagram_send_ctl(struct net *net, struct sock *sk, struct msghdr *msg,
sk                 62 include/net/transp_v6.h void inet6_destroy_sock(struct sock *sk);
sk                127 include/net/udp.h static inline __wsum udp_csum_outgoing(struct sock *sk, struct sk_buff *skb)
sk                131 include/net/udp.h 	skb_queue_walk(&sk->sk_write_queue, skb) {
sk                191 include/net/udp.h static inline int udp_lib_hash(struct sock *sk)
sk                197 include/net/udp.h void udp_lib_unhash(struct sock *sk);
sk                198 include/net/udp.h void udp_lib_rehash(struct sock *sk, u16 new_hash);
sk                200 include/net/udp.h static inline void udp_lib_close(struct sock *sk, long timeout)
sk                202 include/net/udp.h 	sk_common_release(sk);
sk                205 include/net/udp.h int udp_lib_get_port(struct sock *sk, unsigned short snum,
sk                246 include/net/udp.h static inline int udp_rqueue_get(struct sock *sk)
sk                248 include/net/udp.h 	return sk_rmem_alloc_get(sk) - READ_ONCE(udp_sk(sk)->forward_deficit);
sk                263 include/net/udp.h void udp_destruct_sock(struct sock *sk);
sk                264 include/net/udp.h void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len);
sk                265 include/net/udp.h int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb);
sk                266 include/net/udp.h void udp_skb_destructor(struct sock *sk, struct sk_buff *skb);
sk                267 include/net/udp.h struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags,
sk                269 include/net/udp.h static inline struct sk_buff *skb_recv_udp(struct sock *sk, unsigned int flags,
sk                274 include/net/udp.h 	return __skb_recv_udp(sk, flags, noblock, &off, err);
sk                278 include/net/udp.h bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst);
sk                279 include/net/udp.h int udp_get_port(struct sock *sk, unsigned short snum,
sk                283 include/net/udp.h int udp_abort(struct sock *sk, int err);
sk                284 include/net/udp.h int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len);
sk                285 include/net/udp.h int udp_push_pending_frames(struct sock *sk);
sk                286 include/net/udp.h void udp_flush_pending_frames(struct sock *sk);
sk                287 include/net/udp.h int udp_cmsg_send(struct sock *sk, struct msghdr *msg, u16 *gso_size);
sk                290 include/net/udp.h int udp_ioctl(struct sock *sk, int cmd, unsigned long arg);
sk                291 include/net/udp.h int udp_init_sock(struct sock *sk);
sk                292 include/net/udp.h int udp_pre_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
sk                293 include/net/udp.h int __udp_disconnect(struct sock *sk, int flags);
sk                294 include/net/udp.h int udp_disconnect(struct sock *sk, int flags);
sk                299 include/net/udp.h int udp_lib_getsockopt(struct sock *sk, int level, int optname,
sk                301 include/net/udp.h int udp_lib_setsockopt(struct sock *sk, int level, int optname,
sk                416 include/net/udp.h #define __UDPX_MIB(sk, ipv4)						\
sk                418 include/net/udp.h 	ipv4 ? (IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_statistics :	\
sk                419 include/net/udp.h 				 sock_net(sk)->mib.udp_statistics) :	\
sk                420 include/net/udp.h 		(IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_stats_in6 :	\
sk                421 include/net/udp.h 				 sock_net(sk)->mib.udp_stats_in6);	\
sk                424 include/net/udp.h #define __UDPX_MIB(sk, ipv4)						\
sk                426 include/net/udp.h 	IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_statistics :		\
sk                427 include/net/udp.h 			 sock_net(sk)->mib.udp_statistics;		\
sk                431 include/net/udp.h #define __UDPX_INC_STATS(sk, field) \
sk                432 include/net/udp.h 	__SNMP_INC_STATS(__UDPX_MIB(sk, (sk)->sk_family == AF_INET), field)
sk                467 include/net/udp.h static inline struct sk_buff *udp_rcv_segment(struct sock *sk,
sk                476 include/net/udp.h 	if (!inet_get_convert_csum(sk))
sk                496 include/net/udp.h 		atomic_add(segs_nr, &sk->sk_drops);
sk                497 include/net/udp.h 		SNMP_ADD_STATS(__UDPX_MIB(sk, ipv4), UDP_MIB_INERRORS, segs_nr);
sk                 67 include/net/udp_tunnel.h typedef int (*udp_tunnel_encap_rcv_t)(struct sock *sk, struct sk_buff *skb);
sk                 68 include/net/udp_tunnel.h typedef int (*udp_tunnel_encap_err_lookup_t)(struct sock *sk,
sk                 70 include/net/udp_tunnel.h typedef void (*udp_tunnel_encap_destroy_t)(struct sock *sk);
sk                 74 include/net/udp_tunnel.h typedef int (*udp_tunnel_gro_complete_t)(struct sock *sk, struct sk_buff *skb,
sk                141 include/net/udp_tunnel.h void udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
sk                147 include/net/udp_tunnel.h int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk,
sk                172 include/net/udp_tunnel.h 	struct udp_sock *up = udp_sk(sock->sk);
sk                179 include/net/udp_tunnel.h 	if (sock->sk->sk_family == PF_INET6)
sk                 28 include/net/udplite.h static inline int udplite_sk_init(struct sock *sk)
sk                 30 include/net/udplite.h 	udp_init_sock(sk);
sk                 31 include/net/udplite.h 	udp_sk(sk)->pcflag = UDPLITE_BIT;
sk                 74 include/net/udplite.h static inline __wsum udplite_csum_outgoing(struct sock *sk, struct sk_buff *skb)
sk                 76 include/net/udplite.h 	const struct udp_sock *up = udp_sk(skb->sk);
sk                104 include/net/udplite.h 	skb_queue_walk(&sk->sk_write_queue, skb) {
sk                119 include/net/udplite.h 	const struct udp_sock *up = udp_sk(skb->sk);
sk                134 include/net/udplite.h int udplite_get_port(struct sock *sk, unsigned short snum,
sk                377 include/net/vxlan.h 	return vs->sock->sk->sk_family;
sk                150 include/net/x25.h 	struct sock		sk;
sk                179 include/net/x25.h static inline struct x25_sock *x25_sk(const struct sock *sk)
sk                181 include/net/x25.h 	return (struct x25_sock *)sk;
sk                290 include/net/x25.h void x25_init_timers(struct sock *sk);
sk                 81 include/net/xdp_sock.h 	struct sock sk;
sk                362 include/net/xfrm.h 	int			(*output)(struct net *net, struct sock *sk, struct sk_buff *skb);
sk                363 include/net/xfrm.h 	int			(*output_finish)(struct sock *sk, struct sk_buff *skb);
sk                573 include/net/xfrm.h 	struct xfrm_policy	*(*compile_policy)(struct sock *sk, int opt, u8 *data, int len, int *dir);
sk               1089 include/net/xfrm.h static inline int __xfrm_policy_check2(struct sock *sk, int dir,
sk               1096 include/net/xfrm.h 	if (sk && sk->sk_policy[XFRM_POLICY_IN])
sk               1097 include/net/xfrm.h 		return __xfrm_policy_check(sk, ndir, skb, family);
sk               1101 include/net/xfrm.h 		__xfrm_policy_check(sk, ndir, skb, family);
sk               1104 include/net/xfrm.h static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family)
sk               1106 include/net/xfrm.h 	return __xfrm_policy_check2(sk, dir, skb, family, 0);
sk               1109 include/net/xfrm.h static inline int xfrm4_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
sk               1111 include/net/xfrm.h 	return xfrm_policy_check(sk, dir, skb, AF_INET);
sk               1114 include/net/xfrm.h static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
sk               1116 include/net/xfrm.h 	return xfrm_policy_check(sk, dir, skb, AF_INET6);
sk               1119 include/net/xfrm.h static inline int xfrm4_policy_check_reverse(struct sock *sk, int dir,
sk               1122 include/net/xfrm.h 	return __xfrm_policy_check2(sk, dir, skb, AF_INET, 1);
sk               1125 include/net/xfrm.h static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
sk               1128 include/net/xfrm.h 	return __xfrm_policy_check2(sk, dir, skb, AF_INET6, 1);
sk               1168 include/net/xfrm.h int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk);
sk               1170 include/net/xfrm.h static inline int xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk)
sk               1172 include/net/xfrm.h 	sk->sk_policy[0] = NULL;
sk               1173 include/net/xfrm.h 	sk->sk_policy[1] = NULL;
sk               1175 include/net/xfrm.h 		return __xfrm_sk_clone_policy(sk, osk);
sk               1181 include/net/xfrm.h static inline void xfrm_sk_free_policy(struct sock *sk)
sk               1185 include/net/xfrm.h 	pol = rcu_dereference_protected(sk->sk_policy[0], 1);
sk               1188 include/net/xfrm.h 		sk->sk_policy[0] = NULL;
sk               1190 include/net/xfrm.h 	pol = rcu_dereference_protected(sk->sk_policy[1], 1);
sk               1193 include/net/xfrm.h 		sk->sk_policy[1] = NULL;
sk               1199 include/net/xfrm.h static inline void xfrm_sk_free_policy(struct sock *sk) {}
sk               1200 include/net/xfrm.h static inline int xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk) { return 0; }
sk               1203 include/net/xfrm.h static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
sk               1207 include/net/xfrm.h static inline int xfrm4_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
sk               1211 include/net/xfrm.h static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family)
sk               1221 include/net/xfrm.h static inline int xfrm4_policy_check_reverse(struct sock *sk, int dir,
sk               1226 include/net/xfrm.h static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
sk               1554 include/net/xfrm.h int xfrm_output(struct sock *sk, struct sk_buff *skb);
sk               1578 include/net/xfrm.h int xfrm4_output(struct net *net, struct sock *sk, struct sk_buff *skb);
sk               1579 include/net/xfrm.h int xfrm4_output_finish(struct sock *sk, struct sk_buff *skb);
sk               1602 include/net/xfrm.h int xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb);
sk               1603 include/net/xfrm.h int xfrm6_output_finish(struct sock *sk, struct sk_buff *skb);
sk               1608 include/net/xfrm.h int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb);
sk               1609 include/net/xfrm.h int xfrm_user_policy(struct sock *sk, int optname,
sk               1612 include/net/xfrm.h static inline int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
sk               1617 include/net/xfrm.h static inline int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb)
sk               1655 include/net/xfrm.h int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol);
sk                  6 include/trace/events/net_probe_common.h #define TP_STORE_ADDR_PORTS_V4(__entry, inet, sk)			\
sk                 21 include/trace/events/net_probe_common.h #define TP_STORE_ADDR_PORTS(__entry, inet, sk)				\
sk                 23 include/trace/events/net_probe_common.h 		if (sk->sk_family == AF_INET6) {			\
sk                 28 include/trace/events/net_probe_common.h 			v6->sin6_addr = inet6_sk(sk)->saddr;		\
sk                 32 include/trace/events/net_probe_common.h 			v6->sin6_addr = sk->sk_v6_daddr;		\
sk                 34 include/trace/events/net_probe_common.h 			TP_STORE_ADDR_PORTS_V4(__entry, inet, sk);	\
sk                 39 include/trace/events/net_probe_common.h #define TP_STORE_ADDR_PORTS(__entry, inet, sk)		\
sk                 40 include/trace/events/net_probe_common.h 	TP_STORE_ADDR_PORTS_V4(__entry, inet, sk);
sk                 72 include/trace/events/sock.h 	TP_PROTO(struct sock *sk, struct sk_buff *skb),
sk                 74 include/trace/events/sock.h 	TP_ARGS(sk, skb),
sk                 83 include/trace/events/sock.h 		__entry->rmem_alloc = atomic_read(&sk->sk_rmem_alloc);
sk                 85 include/trace/events/sock.h 		__entry->sk_rcvbuf  = READ_ONCE(sk->sk_rcvbuf);
sk                 94 include/trace/events/sock.h 	TP_PROTO(struct sock *sk, struct proto *prot, long allocated, int kind),
sk                 96 include/trace/events/sock.h 	TP_ARGS(sk, prot, allocated, kind),
sk                114 include/trace/events/sock.h 		__entry->sysctl_rmem = sk_get_rmem0(sk, prot);
sk                115 include/trace/events/sock.h 		__entry->rmem_alloc = atomic_read(&sk->sk_rmem_alloc);
sk                116 include/trace/events/sock.h 		__entry->sysctl_wmem = sk_get_wmem0(sk, prot);
sk                117 include/trace/events/sock.h 		__entry->wmem_alloc = refcount_read(&sk->sk_wmem_alloc);
sk                118 include/trace/events/sock.h 		__entry->wmem_queued = READ_ONCE(sk->sk_wmem_queued);
sk                139 include/trace/events/sock.h 	TP_PROTO(const struct sock *sk, const int oldstate, const int newstate),
sk                141 include/trace/events/sock.h 	TP_ARGS(sk, oldstate, newstate),
sk                158 include/trace/events/sock.h 		struct inet_sock *inet = inet_sk(sk);
sk                162 include/trace/events/sock.h 		__entry->skaddr = sk;
sk                166 include/trace/events/sock.h 		__entry->family = sk->sk_family;
sk                167 include/trace/events/sock.h 		__entry->protocol = sk->sk_protocol;
sk                178 include/trace/events/sock.h 		if (sk->sk_family == AF_INET6) {
sk                180 include/trace/events/sock.h 			*pin6 = sk->sk_v6_rcv_saddr;
sk                182 include/trace/events/sock.h 			*pin6 = sk->sk_v6_daddr;
sk                571 include/trace/events/sunrpc.h 			__entry->sock_state = socket->sk->sk_state;
sk                621 include/trace/events/sunrpc.h 			__entry->sock_state = socket->sk->sk_state;
sk                 28 include/trace/events/tcp.h 		if (sk->sk_family == AF_INET6) {			\
sk                 52 include/trace/events/tcp.h 	TP_PROTO(const struct sock *sk, const struct sk_buff *skb),
sk                 54 include/trace/events/tcp.h 	TP_ARGS(sk, skb),
sk                 69 include/trace/events/tcp.h 		struct inet_sock *inet = inet_sk(sk);
sk                 73 include/trace/events/tcp.h 		__entry->skaddr = sk;
sk                 74 include/trace/events/tcp.h 		__entry->state = sk->sk_state;
sk                 86 include/trace/events/tcp.h 			      sk->sk_v6_rcv_saddr, sk->sk_v6_daddr);
sk                 97 include/trace/events/tcp.h 	TP_PROTO(const struct sock *sk, const struct sk_buff *skb),
sk                 99 include/trace/events/tcp.h 	TP_ARGS(sk, skb)
sk                108 include/trace/events/tcp.h 	TP_PROTO(const struct sock *sk, const struct sk_buff *skb),
sk                110 include/trace/events/tcp.h 	TP_ARGS(sk, skb)
sk                120 include/trace/events/tcp.h 	TP_PROTO(struct sock *sk),
sk                122 include/trace/events/tcp.h 	TP_ARGS(sk),
sk                136 include/trace/events/tcp.h 		struct inet_sock *inet = inet_sk(sk);
sk                139 include/trace/events/tcp.h 		__entry->skaddr = sk;
sk                151 include/trace/events/tcp.h 			       sk->sk_v6_rcv_saddr, sk->sk_v6_daddr);
sk                153 include/trace/events/tcp.h 		__entry->sock_cookie = sock_gen_cookie(sk);
sk                165 include/trace/events/tcp.h 	TP_PROTO(struct sock *sk),
sk                167 include/trace/events/tcp.h 	TP_ARGS(sk)
sk                172 include/trace/events/tcp.h 	TP_PROTO(struct sock *sk),
sk                174 include/trace/events/tcp.h 	TP_ARGS(sk)
sk                179 include/trace/events/tcp.h 	TP_PROTO(struct sock *sk),
sk                181 include/trace/events/tcp.h 	TP_ARGS(sk)
sk                186 include/trace/events/tcp.h 	TP_PROTO(const struct sock *sk, const struct request_sock *req),
sk                188 include/trace/events/tcp.h 	TP_ARGS(sk, req),
sk                205 include/trace/events/tcp.h 		__entry->skaddr = sk;
sk                231 include/trace/events/tcp.h 	TP_PROTO(struct sock *sk, struct sk_buff *skb),
sk                233 include/trace/events/tcp.h 	TP_ARGS(sk, skb),
sk                255 include/trace/events/tcp.h 		const struct inet_sock *inet = inet_sk(sk);
sk                256 include/trace/events/tcp.h 		const struct tcp_sock *tp = tcp_sk(sk);
sk                261 include/trace/events/tcp.h 		TP_STORE_ADDR_PORTS(__entry, inet, sk);
sk                274 include/trace/events/tcp.h 		__entry->ssthresh = tcp_current_ssthresh(sk);
sk                276 include/trace/events/tcp.h 		__entry->sock_cookie = sock_gen_cookie(sk);
sk                 13 include/trace/events/udp.h 	TP_PROTO(int rc, struct sock *sk),
sk                 15 include/trace/events/udp.h 	TP_ARGS(rc, sk),
sk                 24 include/trace/events/udp.h 		__entry->lport = inet_sk(sk)->inet_num;
sk               3007 include/uapi/linux/bpf.h 	__bpf_md_ptr(struct bpf_sock *, sk);
sk               3304 include/uapi/linux/bpf.h 	__bpf_md_ptr(struct bpf_sock *, sk);
sk               3356 include/uapi/linux/bpf.h 	__bpf_md_ptr(struct bpf_sock *, sk);
sk               3606 include/uapi/linux/bpf.h 	__bpf_md_ptr(struct bpf_sock *, sk);
sk                 91 kernel/audit.c 	struct sock *sk;
sk                293 kernel/audit.c 	return aunet->sk;
sk                656 kernel/audit.c 	struct sock *sk;
sk                675 kernel/audit.c 	sk = audit_get_sk(net);
sk                679 kernel/audit.c 	rc = netlink_unicast(sk, skb, portid, 0);
sk                707 kernel/audit.c static int kauditd_send_queue(struct sock *sk, u32 portid,
sk                726 kernel/audit.c 		if (!sk) {
sk                734 kernel/audit.c 		rc = netlink_unicast(sk, skb, portid, 0);
sk                740 kernel/audit.c 				sk = NULL;
sk                810 kernel/audit.c 	struct sock *sk = NULL;
sk                825 kernel/audit.c 		sk = audit_get_sk(net);
sk                830 kernel/audit.c 		rc = kauditd_send_queue(sk, portid,
sk                834 kernel/audit.c 			sk = NULL;
sk                840 kernel/audit.c 		rc = kauditd_send_queue(sk, portid,
sk                844 kernel/audit.c 			sk = NULL;
sk                854 kernel/audit.c 		rc = kauditd_send_queue(sk, portid, &audit_queue, 1,
sk                856 kernel/audit.c 					(sk ?
sk                860 kernel/audit.c 		sk = NULL;
sk                886 kernel/audit.c 	struct sock *sk = audit_get_sk(dest->net);
sk                893 kernel/audit.c 		netlink_unicast(sk, skb, dest->portid, 0);
sk                929 kernel/audit.c 	struct sock *sk = audit_get_sk(reply->net);
sk                936 kernel/audit.c 	netlink_unicast(sk, reply->skb, reply->portid, 0);
sk                958 kernel/audit.c 	struct net *net = sock_net(NETLINK_CB(request_skb).sk);
sk               1263 kernel/audit.c 						 sock_net(NETLINK_CB(skb).sk));
sk               1547 kernel/audit.c 	aunet->sk = netlink_kernel_create(net, NETLINK_AUDIT, &cfg);
sk               1548 kernel/audit.c 	if (aunet->sk == NULL) {
sk               1552 kernel/audit.c 	aunet->sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
sk               1567 kernel/audit.c 	netlink_kernel_release(aunet->sk);
sk               1165 kernel/auditfilter.c 	struct net *net = sock_net(NETLINK_CB(request_skb).sk);
sk                626 kernel/bpf/cgroup.c int __cgroup_bpf_run_filter_skb(struct sock *sk,
sk                636 kernel/bpf/cgroup.c 	if (!sk || !sk_fullsock(sk))
sk                639 kernel/bpf/cgroup.c 	if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)
sk                642 kernel/bpf/cgroup.c 	cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
sk                643 kernel/bpf/cgroup.c 	save_sk = skb->sk;
sk                644 kernel/bpf/cgroup.c 	skb->sk = sk;
sk                660 kernel/bpf/cgroup.c 	skb->sk = save_sk;
sk                679 kernel/bpf/cgroup.c int __cgroup_bpf_run_filter_sk(struct sock *sk,
sk                682 kernel/bpf/cgroup.c 	struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
sk                685 kernel/bpf/cgroup.c 	ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], sk, BPF_PROG_RUN);
sk                703 kernel/bpf/cgroup.c int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
sk                709 kernel/bpf/cgroup.c 		.sk = sk,
sk                720 kernel/bpf/cgroup.c 	if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)
sk                728 kernel/bpf/cgroup.c 	cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
sk                751 kernel/bpf/cgroup.c int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
sk                755 kernel/bpf/cgroup.c 	struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
sk                986 kernel/bpf/cgroup.c int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
sk                990 kernel/bpf/cgroup.c 	struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
sk                992 kernel/bpf/cgroup.c 		.sk = sk,
sk               1023 kernel/bpf/cgroup.c 	lock_sock(sk);
sk               1026 kernel/bpf/cgroup.c 	release_sock(sk);
sk               1057 kernel/bpf/cgroup.c int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
sk               1062 kernel/bpf/cgroup.c 	struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
sk               1064 kernel/bpf/cgroup.c 		.sk = sk,
sk               1107 kernel/bpf/cgroup.c 	lock_sock(sk);
sk               1110 kernel/bpf/cgroup.c 	release_sock(sk);
sk               1466 kernel/bpf/cgroup.c 	case offsetof(struct bpf_sockopt, sk):
sk               1507 kernel/bpf/cgroup.c 	case offsetof(struct bpf_sockopt, sk):
sk               1508 kernel/bpf/cgroup.c 		*insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, sk);
sk                 21 kernel/bpf/reuseport_array.c void bpf_sk_reuseport_detach(struct sock *sk)
sk                 25 kernel/bpf/reuseport_array.c 	write_lock_bh(&sk->sk_callback_lock);
sk                 26 kernel/bpf/reuseport_array.c 	socks = sk->sk_user_data;
sk                 28 kernel/bpf/reuseport_array.c 		WRITE_ONCE(sk->sk_user_data, NULL);
sk                 37 kernel/bpf/reuseport_array.c 	write_unlock_bh(&sk->sk_callback_lock);
sk                 65 kernel/bpf/reuseport_array.c 	struct sock *sk;
sk                 76 kernel/bpf/reuseport_array.c 	sk = rcu_dereference_protected(array->ptrs[index],
sk                 78 kernel/bpf/reuseport_array.c 	if (sk) {
sk                 79 kernel/bpf/reuseport_array.c 		write_lock_bh(&sk->sk_callback_lock);
sk                 80 kernel/bpf/reuseport_array.c 		WRITE_ONCE(sk->sk_user_data, NULL);
sk                 82 kernel/bpf/reuseport_array.c 		write_unlock_bh(&sk->sk_callback_lock);
sk                 96 kernel/bpf/reuseport_array.c 	struct sock *sk;
sk                128 kernel/bpf/reuseport_array.c 		sk = rcu_dereference(array->ptrs[i]);
sk                129 kernel/bpf/reuseport_array.c 		if (sk) {
sk                130 kernel/bpf/reuseport_array.c 			write_lock_bh(&sk->sk_callback_lock);
sk                136 kernel/bpf/reuseport_array.c 			sk->sk_user_data = NULL;
sk                137 kernel/bpf/reuseport_array.c 			write_unlock_bh(&sk->sk_callback_lock);
sk                184 kernel/bpf/reuseport_array.c 	struct sock *sk;
sk                191 kernel/bpf/reuseport_array.c 	sk = reuseport_array_lookup_elem(map, key);
sk                192 kernel/bpf/reuseport_array.c 	if (sk) {
sk                193 kernel/bpf/reuseport_array.c 		*(u64 *)value = sock_gen_cookie(sk);
sk                278 kernel/bpf/reuseport_array.c 	nsk = socket->sk;
sk                234 kernel/bpf/xskmap.c 	if (sock->sk->sk_family != PF_XDP) {
sk                239 kernel/bpf/xskmap.c 	xs = (struct xdp_sock *)sock->sk;
sk               1095 lib/kobject.c  const void *kobj_ns_netlink(enum kobj_ns_type type, struct sock *sk)
sk               1102 lib/kobject.c  		ns = kobj_ns_ops_tbl[type]->netlink_ns(sk);
sk                 40 lib/kobject_uevent.c 	struct sock *sk;
sk                318 lib/kobject_uevent.c 		struct sock *uevent_sock = ue_sk->sk;
sk                411 lib/kobject_uevent.c 		ret = uevent_net_broadcast_tagged(net->uevent_sock->sk, env,
sk                738 lib/kobject_uevent.c 	net = sock_net(NETLINK_CB(skb).sk);
sk                745 lib/kobject_uevent.c 	ret = uevent_net_broadcast(net->uevent_sock->sk, skb, extack);
sk                769 lib/kobject_uevent.c 	ue_sk->sk = netlink_kernel_create(net, NETLINK_KOBJECT_UEVENT, &cfg);
sk                770 lib/kobject_uevent.c 	if (!ue_sk->sk) {
sk                779 lib/kobject_uevent.c 	if (sock_net(ue_sk->sk)->user_ns == &init_user_ns) {
sk                792 lib/kobject_uevent.c 	if (sock_net(ue_sk->sk)->user_ns == &init_user_ns) {
sk                798 lib/kobject_uevent.c 	netlink_kernel_release(ue_sk->sk);
sk               6861 mm/memcontrol.c void mem_cgroup_sk_alloc(struct sock *sk)
sk               6879 mm/memcontrol.c 		sk->sk_memcg = memcg;
sk               6884 mm/memcontrol.c void mem_cgroup_sk_free(struct sock *sk)
sk               6886 mm/memcontrol.c 	if (sk->sk_memcg)
sk               6887 mm/memcontrol.c 		css_put(&sk->sk_memcg->css);
sk                831 net/9p/trans_fd.c 	csocket->sk->sk_allocation = GFP_NOIO;
sk                666 net/appletalk/aarp.c 	if (skb->sk)
sk                667 net/appletalk/aarp.c 		skb->priority = skb->sk->sk_priority;
sk                 75 net/appletalk/ddp.c static inline void __atalk_insert_socket(struct sock *sk)
sk                 77 net/appletalk/ddp.c 	sk_add_node(sk, &atalk_sockets);
sk                 80 net/appletalk/ddp.c static inline void atalk_remove_socket(struct sock *sk)
sk                 83 net/appletalk/ddp.c 	sk_del_node_init(sk);
sk                134 net/appletalk/ddp.c static struct sock *atalk_find_or_insert_socket(struct sock *sk,
sk                150 net/appletalk/ddp.c 	__atalk_insert_socket(sk); /* Wheee, it's free, assign and insert. */
sk                158 net/appletalk/ddp.c 	struct sock *sk = from_timer(sk, t, sk_timer);
sk                160 net/appletalk/ddp.c 	if (sk_has_allocations(sk)) {
sk                161 net/appletalk/ddp.c 		sk->sk_timer.expires = jiffies + SOCK_DESTROY_TIME;
sk                162 net/appletalk/ddp.c 		add_timer(&sk->sk_timer);
sk                164 net/appletalk/ddp.c 		sock_put(sk);
sk                167 net/appletalk/ddp.c static inline void atalk_destroy_socket(struct sock *sk)
sk                169 net/appletalk/ddp.c 	atalk_remove_socket(sk);
sk                170 net/appletalk/ddp.c 	skb_queue_purge(&sk->sk_receive_queue);
sk                172 net/appletalk/ddp.c 	if (sk_has_allocations(sk)) {
sk                173 net/appletalk/ddp.c 		timer_setup(&sk->sk_timer, atalk_destroy_timer, 0);
sk                174 net/appletalk/ddp.c 		sk->sk_timer.expires	= jiffies + SOCK_DESTROY_TIME;
sk                175 net/appletalk/ddp.c 		add_timer(&sk->sk_timer);
sk                177 net/appletalk/ddp.c 		sock_put(sk);
sk               1014 net/appletalk/ddp.c 	struct sock *sk;
sk               1032 net/appletalk/ddp.c 	sk = sk_alloc(net, PF_APPLETALK, GFP_KERNEL, &ddp_proto, kern);
sk               1033 net/appletalk/ddp.c 	if (!sk)
sk               1037 net/appletalk/ddp.c 	sock_init_data(sock, sk);
sk               1040 net/appletalk/ddp.c 	sock_set_flag(sk, SOCK_ZAPPED);
sk               1048 net/appletalk/ddp.c 	struct sock *sk = sock->sk;
sk               1050 net/appletalk/ddp.c 	if (sk) {
sk               1051 net/appletalk/ddp.c 		sock_hold(sk);
sk               1052 net/appletalk/ddp.c 		lock_sock(sk);
sk               1054 net/appletalk/ddp.c 		sock_orphan(sk);
sk               1055 net/appletalk/ddp.c 		sock->sk = NULL;
sk               1056 net/appletalk/ddp.c 		atalk_destroy_socket(sk);
sk               1058 net/appletalk/ddp.c 		release_sock(sk);
sk               1059 net/appletalk/ddp.c 		sock_put(sk);
sk               1074 net/appletalk/ddp.c static int atalk_pick_and_bind_port(struct sock *sk, struct sockaddr_at *sat)
sk               1095 net/appletalk/ddp.c 		__atalk_insert_socket(sk);
sk               1096 net/appletalk/ddp.c 		at_sk(sk)->src_port = sat->sat_port;
sk               1109 net/appletalk/ddp.c static int atalk_autobind(struct sock *sk)
sk               1111 net/appletalk/ddp.c 	struct atalk_sock *at = at_sk(sk);
sk               1122 net/appletalk/ddp.c 	n = atalk_pick_and_bind_port(sk, &sat);
sk               1124 net/appletalk/ddp.c 		sock_reset_flag(sk, SOCK_ZAPPED);
sk               1133 net/appletalk/ddp.c 	struct sock *sk = sock->sk;
sk               1134 net/appletalk/ddp.c 	struct atalk_sock *at = at_sk(sk);
sk               1137 net/appletalk/ddp.c 	if (!sock_flag(sk, SOCK_ZAPPED) ||
sk               1144 net/appletalk/ddp.c 	lock_sock(sk);
sk               1165 net/appletalk/ddp.c 		err = atalk_pick_and_bind_port(sk, addr);
sk               1173 net/appletalk/ddp.c 		if (atalk_find_or_insert_socket(sk, addr))
sk               1177 net/appletalk/ddp.c 	sock_reset_flag(sk, SOCK_ZAPPED);
sk               1180 net/appletalk/ddp.c 	release_sock(sk);
sk               1188 net/appletalk/ddp.c 	struct sock *sk = sock->sk;
sk               1189 net/appletalk/ddp.c 	struct atalk_sock *at = at_sk(sk);
sk               1193 net/appletalk/ddp.c 	sk->sk_state   = TCP_CLOSE;
sk               1205 net/appletalk/ddp.c 	    !sock_flag(sk, SOCK_BROADCAST)) {
sk               1214 net/appletalk/ddp.c 	lock_sock(sk);
sk               1216 net/appletalk/ddp.c 	if (sock_flag(sk, SOCK_ZAPPED))
sk               1217 net/appletalk/ddp.c 		if (atalk_autobind(sk) < 0)
sk               1229 net/appletalk/ddp.c 	sk->sk_state = TCP_ESTABLISHED;
sk               1232 net/appletalk/ddp.c 	release_sock(sk);
sk               1244 net/appletalk/ddp.c 	struct sock *sk = sock->sk;
sk               1245 net/appletalk/ddp.c 	struct atalk_sock *at = at_sk(sk);
sk               1248 net/appletalk/ddp.c 	lock_sock(sk);
sk               1250 net/appletalk/ddp.c 	if (sock_flag(sk, SOCK_ZAPPED))
sk               1251 net/appletalk/ddp.c 		if (atalk_autobind(sk) < 0)
sk               1258 net/appletalk/ddp.c 		if (sk->sk_state != TCP_ESTABLISHED)
sk               1275 net/appletalk/ddp.c 	release_sock(sk);
sk               1562 net/appletalk/ddp.c 	struct sock *sk = sock->sk;
sk               1563 net/appletalk/ddp.c 	struct atalk_sock *at = at_sk(sk);
sk               1581 net/appletalk/ddp.c 	lock_sock(sk);
sk               1584 net/appletalk/ddp.c 		if (sock_flag(sk, SOCK_ZAPPED))
sk               1585 net/appletalk/ddp.c 			if (atalk_autobind(sk) < 0)
sk               1596 net/appletalk/ddp.c 		    !sock_flag(sk, SOCK_BROADCAST)) {
sk               1601 net/appletalk/ddp.c 		if (sk->sk_state != TCP_ESTABLISHED)
sk               1611 net/appletalk/ddp.c 	SOCK_DEBUG(sk, "SK %p: Got address.\n", sk);
sk               1632 net/appletalk/ddp.c 	SOCK_DEBUG(sk, "SK %p: Size needed %d, device %s\n",
sk               1633 net/appletalk/ddp.c 			sk, size, dev->name);
sk               1636 net/appletalk/ddp.c 	release_sock(sk);
sk               1637 net/appletalk/ddp.c 	skb = sock_alloc_send_skb(sk, size, (flags & MSG_DONTWAIT), &err);
sk               1638 net/appletalk/ddp.c 	lock_sock(sk);
sk               1646 net/appletalk/ddp.c 	SOCK_DEBUG(sk, "SK %p: Begin build.\n", sk);
sk               1657 net/appletalk/ddp.c 	SOCK_DEBUG(sk, "SK %p: Copy user data (%zd bytes).\n", sk, len);
sk               1666 net/appletalk/ddp.c 	if (sk->sk_no_check_tx)
sk               1681 net/appletalk/ddp.c 			SOCK_DEBUG(sk, "SK %p: send out(copy).\n", sk);
sk               1690 net/appletalk/ddp.c 		SOCK_DEBUG(sk, "SK %p: Loop back.\n", sk);
sk               1710 net/appletalk/ddp.c 		SOCK_DEBUG(sk, "SK %p: send out.\n", sk);
sk               1721 net/appletalk/ddp.c 	SOCK_DEBUG(sk, "SK %p: Done write (%zd).\n", sk, len);
sk               1724 net/appletalk/ddp.c 	release_sock(sk);
sk               1731 net/appletalk/ddp.c 	struct sock *sk = sock->sk;
sk               1738 net/appletalk/ddp.c 	skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
sk               1740 net/appletalk/ddp.c 	lock_sock(sk);
sk               1749 net/appletalk/ddp.c 	if (sk->sk_type != SOCK_RAW) {
sk               1769 net/appletalk/ddp.c 	skb_free_datagram(sk, skb);	/* Free the datagram. */
sk               1772 net/appletalk/ddp.c 	release_sock(sk);
sk               1783 net/appletalk/ddp.c 	struct sock *sk = sock->sk;
sk               1789 net/appletalk/ddp.c 		long amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
sk               1801 net/appletalk/ddp.c 		struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
sk                 29 net/atm/atm_misc.c 	struct sock *sk = sk_atm(vcc);
sk                 33 net/atm/atm_misc.c 	if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) {
sk                 38 net/atm/atm_misc.c 				   &sk->sk_rmem_alloc);
sk                 54 net/atm/clip.c 	struct sock *sk;
sk                 70 net/atm/clip.c 	sk = sk_atm(atmarpd);
sk                 71 net/atm/clip.c 	skb_queue_tail(&sk->sk_receive_queue, skb);
sk                 72 net/atm/clip.c 	sk->sk_data_ready(sk);
sk                 43 net/atm/common.c static void __vcc_insert_socket(struct sock *sk)
sk                 45 net/atm/common.c 	struct atm_vcc *vcc = atm_sk(sk);
sk                 47 net/atm/common.c 	sk->sk_hash = vcc->vci & (VCC_HTABLE_SIZE - 1);
sk                 48 net/atm/common.c 	sk_add_node(sk, head);
sk                 51 net/atm/common.c void vcc_insert_socket(struct sock *sk)
sk                 54 net/atm/common.c 	__vcc_insert_socket(sk);
sk                 59 net/atm/common.c static void vcc_remove_socket(struct sock *sk)
sk                 62 net/atm/common.c 	sk_del_node_init(sk);
sk                 68 net/atm/common.c 	struct sock *sk = sk_atm(vcc);
sk                 70 net/atm/common.c 	if (sk_wmem_alloc_get(sk) && !atm_may_send(vcc, size)) {
sk                 72 net/atm/common.c 			 sk_wmem_alloc_get(sk), size, sk->sk_sndbuf);
sk                 78 net/atm/common.c static void vcc_sock_destruct(struct sock *sk)
sk                 80 net/atm/common.c 	if (atomic_read(&sk->sk_rmem_alloc))
sk                 82 net/atm/common.c 		       __func__, atomic_read(&sk->sk_rmem_alloc));
sk                 84 net/atm/common.c 	if (refcount_read(&sk->sk_wmem_alloc))
sk                 86 net/atm/common.c 		       __func__, refcount_read(&sk->sk_wmem_alloc));
sk                 89 net/atm/common.c static void vcc_def_wakeup(struct sock *sk)
sk                 94 net/atm/common.c 	wq = rcu_dereference(sk->sk_wq);
sk                100 net/atm/common.c static inline int vcc_writable(struct sock *sk)
sk                102 net/atm/common.c 	struct atm_vcc *vcc = atm_sk(sk);
sk                105 net/atm/common.c 		refcount_read(&sk->sk_wmem_alloc)) <= sk->sk_sndbuf;
sk                108 net/atm/common.c static void vcc_write_space(struct sock *sk)
sk                114 net/atm/common.c 	if (vcc_writable(sk)) {
sk                115 net/atm/common.c 		wq = rcu_dereference(sk->sk_wq);
sk                119 net/atm/common.c 		sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
sk                125 net/atm/common.c static void vcc_release_cb(struct sock *sk)
sk                127 net/atm/common.c 	struct atm_vcc *vcc = atm_sk(sk);
sk                142 net/atm/common.c 	struct sock *sk;
sk                145 net/atm/common.c 	sock->sk = NULL;
sk                148 net/atm/common.c 	sk = sk_alloc(net, family, GFP_KERNEL, &vcc_proto, kern);
sk                149 net/atm/common.c 	if (!sk)
sk                151 net/atm/common.c 	sock_init_data(sock, sk);
sk                152 net/atm/common.c 	sk->sk_state_change = vcc_def_wakeup;
sk                153 net/atm/common.c 	sk->sk_write_space = vcc_write_space;
sk                155 net/atm/common.c 	vcc = atm_sk(sk);
sk                160 net/atm/common.c 	refcount_set(&sk->sk_wmem_alloc, 1);
sk                161 net/atm/common.c 	atomic_set(&sk->sk_rmem_alloc, 0);
sk                169 net/atm/common.c 	sk->sk_destruct = vcc_sock_destruct;
sk                173 net/atm/common.c static void vcc_destroy_socket(struct sock *sk)
sk                175 net/atm/common.c 	struct atm_vcc *vcc = atm_sk(sk);
sk                187 net/atm/common.c 		while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
sk                196 net/atm/common.c 	vcc_remove_socket(sk);
sk                201 net/atm/common.c 	struct sock *sk = sock->sk;
sk                203 net/atm/common.c 	if (sk) {
sk                204 net/atm/common.c 		lock_sock(sk);
sk                205 net/atm/common.c 		vcc_destroy_socket(sock->sk);
sk                206 net/atm/common.c 		release_sock(sk);
sk                207 net/atm/common.c 		sock_put(sk);
sk                215 net/atm/common.c 	struct sock *sk = sk_atm(vcc);
sk                218 net/atm/common.c 	sk->sk_shutdown |= RCV_SHUTDOWN;
sk                219 net/atm/common.c 	sk->sk_err = -reply;
sk                221 net/atm/common.c 	sk->sk_state_change(sk);
sk                385 net/atm/common.c 	struct sock *sk = sk_atm(vcc);
sk                406 net/atm/common.c 	__vcc_insert_socket(sk);
sk                454 net/atm/common.c 	vcc_remove_socket(sk);
sk                525 net/atm/common.c 	struct sock *sk = sock->sk;
sk                543 net/atm/common.c 	skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &error);
sk                556 net/atm/common.c 	sock_recv_ts_and_drops(msg, sk, skb);
sk                559 net/atm/common.c 		pr_debug("%d -= %d\n", atomic_read(&sk->sk_rmem_alloc),
sk                564 net/atm/common.c 	skb_free_datagram(sk, skb);
sk                570 net/atm/common.c 	struct sock *sk = sock->sk;
sk                576 net/atm/common.c 	lock_sock(sk);
sk                603 net/atm/common.c 	prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
sk                622 net/atm/common.c 		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
sk                624 net/atm/common.c 	finish_wait(sk_sleep(sk), &wait);
sk                633 net/atm/common.c 	pr_debug("%d += %d\n", sk_wmem_alloc_get(sk), skb->truesize);
sk                647 net/atm/common.c 	release_sock(sk);
sk                653 net/atm/common.c 	struct sock *sk = sock->sk;
sk                663 net/atm/common.c 	if (sk->sk_err)
sk                671 net/atm/common.c 	if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
sk                680 net/atm/common.c 	    vcc_writable(sk))
sk                 54 net/atm/ioctl.c 	struct sock *sk = sock->sk;
sk                 68 net/atm/ioctl.c 		error = put_user(sk->sk_sndbuf - sk_wmem_alloc_get(sk),
sk                 79 net/atm/ioctl.c 		skb = skb_peek(&sk->sk_receive_queue);
sk                141 net/atm/lec.c  		struct sock *sk;
sk                157 net/atm/lec.c  		sk = sk_atm(priv->lecd);
sk                158 net/atm/lec.c  		skb_queue_tail(&sk->sk_receive_queue, skb2);
sk                159 net/atm/lec.c  		sk->sk_data_ready(sk);
sk                443 net/atm/lec.c  			struct sock *sk;
sk                453 net/atm/lec.c  			sk = sk_atm(priv->lecd);
sk                454 net/atm/lec.c  			skb_queue_tail(&sk->sk_receive_queue, skb2);
sk                455 net/atm/lec.c  			sk->sk_data_ready(sk);
sk                513 net/atm/lec.c  	struct sock *sk;
sk                536 net/atm/lec.c  	sk = sk_atm(priv->lecd);
sk                537 net/atm/lec.c  	skb_queue_tail(&sk->sk_receive_queue, skb);
sk                538 net/atm/lec.c  	sk->sk_data_ready(sk);
sk                543 net/atm/lec.c  		skb_queue_tail(&sk->sk_receive_queue, data);
sk                544 net/atm/lec.c  		sk->sk_data_ready(sk);
sk                610 net/atm/lec.c  		struct sock *sk = sk_atm(vcc);
sk                613 net/atm/lec.c  		skb_queue_tail(&sk->sk_receive_queue, skb);
sk                614 net/atm/lec.c  		sk->sk_data_ready(sk);
sk                704 net/atm/mpc.c  		struct sock *sk = sk_atm(vcc);
sk                708 net/atm/mpc.c  		skb_queue_tail(&sk->sk_receive_queue, skb);
sk                709 net/atm/mpc.c  		sk->sk_data_ready(sk);
sk                978 net/atm/mpc.c  	struct sock *sk;
sk                992 net/atm/mpc.c  	sk = sk_atm(mpc->mpoad_vcc);
sk                993 net/atm/mpc.c  	skb_queue_tail(&sk->sk_receive_queue, skb);
sk                994 net/atm/mpc.c  	sk->sk_data_ready(sk);
sk               1249 net/atm/mpc.c  	struct sock *sk;
sk               1274 net/atm/mpc.c  	sk = sk_atm(vcc);
sk               1275 net/atm/mpc.c  	skb_queue_tail(&sk->sk_receive_queue, skb);
sk               1276 net/atm/mpc.c  	sk->sk_data_ready(sk);
sk                 70 net/atm/proc.c 	struct sock *sk;
sk                 73 net/atm/proc.c static inline int compare_family(struct sock *sk, int family)
sk                 75 net/atm/proc.c 	return !family || (sk->sk_family == family);
sk                 80 net/atm/proc.c 	struct sock *sk = *sock;
sk                 82 net/atm/proc.c 	if (sk == SEQ_START_TOKEN) {
sk                 86 net/atm/proc.c 			sk = hlist_empty(head) ? NULL : __sk_head(head);
sk                 87 net/atm/proc.c 			if (sk)
sk                 93 net/atm/proc.c 	for (; sk; sk = sk_next(sk)) {
sk                 94 net/atm/proc.c 		l -= compare_family(sk, family);
sk                 98 net/atm/proc.c 	if (!sk && ++*bucket < VCC_HTABLE_SIZE) {
sk                 99 net/atm/proc.c 		sk = sk_head(&vcc_hash[*bucket]);
sk                102 net/atm/proc.c 	sk = SEQ_START_TOKEN;
sk                104 net/atm/proc.c 	*sock = sk;
sk                113 net/atm/proc.c 	return __vcc_walk(&state->sk, family, &state->bucket, l) ?
sk                124 net/atm/proc.c 	state->sk = SEQ_START_TOKEN;
sk                180 net/atm/proc.c 	struct sock *sk = sk_atm(vcc);
sk                188 net/atm/proc.c 	switch (sk->sk_family) {
sk                196 net/atm/proc.c 		seq_printf(seq, "%3d", sk->sk_family);
sk                199 net/atm/proc.c 		   vcc->flags, sk->sk_err,
sk                200 net/atm/proc.c 		   sk_wmem_alloc_get(sk), sk->sk_sndbuf,
sk                201 net/atm/proc.c 		   sk_rmem_alloc_get(sk), sk->sk_rcvbuf,
sk                202 net/atm/proc.c 		   refcount_read(&sk->sk_refcnt));
sk                257 net/atm/proc.c 		struct atm_vcc *vcc = atm_sk(state->sk);
sk                279 net/atm/proc.c 		struct atm_vcc *vcc = atm_sk(state->sk);
sk                302 net/atm/proc.c 		struct atm_vcc *vcc = atm_sk(state->sk);
sk                 30 net/atm/pvc.c  	struct sock *sk = sock->sk;
sk                 40 net/atm/pvc.c  	lock_sock(sk);
sk                 55 net/atm/pvc.c  	release_sock(sk);
sk                 68 net/atm/pvc.c  	struct sock *sk = sock->sk;
sk                 71 net/atm/pvc.c  	lock_sock(sk);
sk                 73 net/atm/pvc.c  	release_sock(sk);
sk                 80 net/atm/pvc.c  	struct sock *sk = sock->sk;
sk                 83 net/atm/pvc.c  	lock_sock(sk);
sk                 85 net/atm/pvc.c  	release_sock(sk);
sk                 26 net/atm/raw.c  		struct sock *sk = sk_atm(vcc);
sk                 28 net/atm/raw.c  		skb_queue_tail(&sk->sk_receive_queue, skb);
sk                 29 net/atm/raw.c  		sk->sk_data_ready(sk);
sk                 35 net/atm/raw.c  	struct sock *sk = sk_atm(vcc);
sk                 38 net/atm/raw.c  		 vcc->vci, sk_wmem_alloc_get(sk), ATM_SKB(skb)->acct_truesize);
sk                 39 net/atm/raw.c  	WARN_ON(refcount_sub_and_test(ATM_SKB(skb)->acct_truesize, &sk->sk_wmem_alloc));
sk                 41 net/atm/raw.c  	sk->sk_write_space(sk);
sk                 68 net/atm/signaling.c 	struct sock *sk;
sk                 74 net/atm/signaling.c 	sk = sk_atm(vcc);
sk                 78 net/atm/signaling.c 		sk->sk_err = -msg->reply;
sk                 99 net/atm/signaling.c 		sk->sk_err = -msg->reply;
sk                104 net/atm/signaling.c 		sk = sk_atm(vcc);
sk                106 net/atm/signaling.c 		lock_sock(sk);
sk                107 net/atm/signaling.c 		if (sk_acceptq_is_full(sk)) {
sk                112 net/atm/signaling.c 		sk->sk_ack_backlog++;
sk                113 net/atm/signaling.c 		skb_queue_tail(&sk->sk_receive_queue, skb);
sk                114 net/atm/signaling.c 		pr_debug("waking sk_sleep(sk) 0x%p\n", sk_sleep(sk));
sk                115 net/atm/signaling.c 		sk->sk_state_change(sk);
sk                117 net/atm/signaling.c 		release_sock(sk);
sk                128 net/atm/signaling.c 		sk->sk_err_soft = -msg->reply;
sk                136 net/atm/signaling.c 	sk->sk_state_change(sk);
sk                 50 net/atm/svc.c  	struct sock *sk = sk_atm(vcc);
sk                 56 net/atm/svc.c  			prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE);
sk                 61 net/atm/svc.c  		finish_wait(sk_sleep(sk), &wait);
sk                 65 net/atm/svc.c  	while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
sk                 77 net/atm/svc.c  	struct sock *sk = sock->sk;
sk                 80 net/atm/svc.c  	if (sk) {
sk                 99 net/atm/svc.c  	struct sock *sk = sock->sk;
sk                106 net/atm/svc.c  	lock_sock(sk);
sk                132 net/atm/svc.c  		prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE);
sk                137 net/atm/svc.c  	finish_wait(sk_sleep(sk), &wait);
sk                143 net/atm/svc.c  	if (!sk->sk_err)
sk                145 net/atm/svc.c  	error = -sk->sk_err;
sk                147 net/atm/svc.c  	release_sock(sk);
sk                155 net/atm/svc.c  	struct sock *sk = sock->sk;
sk                161 net/atm/svc.c  	lock_sock(sk);
sk                180 net/atm/svc.c  		if (sk->sk_err) {
sk                181 net/atm/svc.c  			error = -sk->sk_err;
sk                214 net/atm/svc.c  		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
sk                218 net/atm/svc.c  				prepare_to_wait(sk_sleep(sk), &wait,
sk                237 net/atm/svc.c  				prepare_to_wait(sk_sleep(sk), &wait,
sk                241 net/atm/svc.c  			if (!sk->sk_err)
sk                244 net/atm/svc.c  					prepare_to_wait(sk_sleep(sk), &wait,
sk                255 net/atm/svc.c  		finish_wait(sk_sleep(sk), &wait);
sk                262 net/atm/svc.c  		if (sk->sk_err) {
sk                263 net/atm/svc.c  			error = -sk->sk_err;
sk                278 net/atm/svc.c  	release_sock(sk);
sk                285 net/atm/svc.c  	struct sock *sk = sock->sk;
sk                290 net/atm/svc.c  	lock_sock(sk);
sk                303 net/atm/svc.c  		prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE);
sk                308 net/atm/svc.c  	finish_wait(sk_sleep(sk), &wait);
sk                314 net/atm/svc.c  	vcc_insert_socket(sk);
sk                315 net/atm/svc.c  	sk->sk_max_ack_backlog = backlog > 0 ? backlog : ATM_BACKLOG_DEFAULT;
sk                316 net/atm/svc.c  	error = -sk->sk_err;
sk                318 net/atm/svc.c  	release_sock(sk);
sk                325 net/atm/svc.c  	struct sock *sk = sock->sk;
sk                332 net/atm/svc.c  	lock_sock(sk);
sk                334 net/atm/svc.c  	error = svc_create(sock_net(sk), newsock, 0, kern);
sk                344 net/atm/svc.c  		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
sk                345 net/atm/svc.c  		while (!(skb = skb_dequeue(&sk->sk_receive_queue)) &&
sk                350 net/atm/svc.c  				error = -sk->sk_err;
sk                357 net/atm/svc.c  			release_sock(sk);
sk                359 net/atm/svc.c  			lock_sock(sk);
sk                364 net/atm/svc.c  			prepare_to_wait(sk_sleep(sk), &wait,
sk                367 net/atm/svc.c  		finish_wait(sk_sleep(sk), &wait);
sk                384 net/atm/svc.c  		sk->sk_ack_backlog--;
sk                399 net/atm/svc.c  			release_sock(sk);
sk                401 net/atm/svc.c  			lock_sock(sk);
sk                417 net/atm/svc.c  	release_sock(sk);
sk                434 net/atm/svc.c  	struct sock *sk = sk_atm(vcc);
sk                440 net/atm/svc.c  		prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE);
sk                447 net/atm/svc.c  	finish_wait(sk_sleep(sk), &wait);
sk                450 net/atm/svc.c  	return -sk->sk_err;
sk                456 net/atm/svc.c  	struct sock *sk = sock->sk;
sk                460 net/atm/svc.c  	lock_sock(sk);
sk                494 net/atm/svc.c  	release_sock(sk);
sk                501 net/atm/svc.c  	struct sock *sk = sock->sk;
sk                504 net/atm/svc.c  	lock_sock(sk);
sk                522 net/atm/svc.c  	release_sock(sk);
sk                530 net/atm/svc.c  	struct sock *sk = sock->sk;
sk                534 net/atm/svc.c  	lock_sock(sk);
sk                544 net/atm/svc.c  		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
sk                549 net/atm/svc.c  	finish_wait(sk_sleep(sk), &wait);
sk                550 net/atm/svc.c  	error = -xchg(&sk->sk_err_soft, 0);
sk                552 net/atm/svc.c  	release_sock(sk);
sk                559 net/atm/svc.c  	struct sock *sk = sock->sk;
sk                563 net/atm/svc.c  	lock_sock(sk);
sk                567 net/atm/svc.c  		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
sk                572 net/atm/svc.c  	finish_wait(sk_sleep(sk), &wait);
sk                577 net/atm/svc.c  	error = -xchg(&sk->sk_err_soft, 0);
sk                579 net/atm/svc.c  	release_sock(sk);
sk                 55 net/ax25/af_ax25.c static void ax25_free_sock(struct sock *sk)
sk                 57 net/ax25/af_ax25.c 	ax25_cb_put(sk_to_ax25(sk));
sk                161 net/ax25/af_ax25.c 		if (s->sk && !ax25cmp(&s->source_addr, addr) &&
sk                162 net/ax25/af_ax25.c 		    s->sk->sk_type == type && s->sk->sk_state == TCP_LISTEN) {
sk                165 net/ax25/af_ax25.c 				sock_hold(s->sk);
sk                167 net/ax25/af_ax25.c 				return s->sk;
sk                182 net/ax25/af_ax25.c 	struct sock *sk = NULL;
sk                187 net/ax25/af_ax25.c 		if (s->sk && !ax25cmp(&s->source_addr, my_addr) &&
sk                189 net/ax25/af_ax25.c 		    s->sk->sk_type == type) {
sk                190 net/ax25/af_ax25.c 			sk = s->sk;
sk                191 net/ax25/af_ax25.c 			sock_hold(sk);
sk                198 net/ax25/af_ax25.c 	return sk;
sk                212 net/ax25/af_ax25.c 		if (s->sk && s->sk->sk_type != SOCK_SEQPACKET)
sk                246 net/ax25/af_ax25.c 		if (s->sk != NULL && ax25cmp(&s->source_addr, addr) == 0 &&
sk                247 net/ax25/af_ax25.c 		    s->sk->sk_type == SOCK_RAW &&
sk                248 net/ax25/af_ax25.c 		    s->sk->sk_protocol == proto &&
sk                250 net/ax25/af_ax25.c 		    atomic_read(&s->sk->sk_rmem_alloc) <= s->sk->sk_rcvbuf) {
sk                253 net/ax25/af_ax25.c 			if (sock_queue_rcv_skb(s->sk, copy) != 0)
sk                271 net/ax25/af_ax25.c 	struct sock *sk;
sk                273 net/ax25/af_ax25.c 	sk=ax25->sk;
sk                275 net/ax25/af_ax25.c 	bh_lock_sock(sk);
sk                276 net/ax25/af_ax25.c 	sock_hold(sk);
sk                278 net/ax25/af_ax25.c 	bh_unlock_sock(sk);
sk                279 net/ax25/af_ax25.c 	sock_put(sk);
sk                302 net/ax25/af_ax25.c 	if (ax25->sk != NULL) {
sk                303 net/ax25/af_ax25.c 		while ((skb = skb_dequeue(&ax25->sk->sk_receive_queue)) != NULL) {
sk                304 net/ax25/af_ax25.c 			if (skb->sk != ax25->sk) {
sk                306 net/ax25/af_ax25.c 				ax25_cb *sax25 = sk_to_ax25(skb->sk);
sk                309 net/ax25/af_ax25.c 				sock_orphan(skb->sk);
sk                312 net/ax25/af_ax25.c 				skb->sk->sk_state = TCP_LISTEN;
sk                320 net/ax25/af_ax25.c 		skb_queue_purge(&ax25->sk->sk_write_queue);
sk                323 net/ax25/af_ax25.c 	if (ax25->sk != NULL) {
sk                324 net/ax25/af_ax25.c 		if (sk_has_allocations(ax25->sk)) {
sk                330 net/ax25/af_ax25.c 			struct sock *sk=ax25->sk;
sk                331 net/ax25/af_ax25.c 			ax25->sk=NULL;
sk                332 net/ax25/af_ax25.c 			sock_put(sk);
sk                533 net/ax25/af_ax25.c 	struct sock *sk = sock->sk;
sk                549 net/ax25/af_ax25.c 	lock_sock(sk);
sk                550 net/ax25/af_ax25.c 	ax25 = sk_to_ax25(sk);
sk                648 net/ax25/af_ax25.c 		if (sk->sk_type == SOCK_SEQPACKET &&
sk                650 net/ax25/af_ax25.c 		    sk->sk_state == TCP_LISTEN)) {
sk                676 net/ax25/af_ax25.c 	release_sock(sk);
sk                684 net/ax25/af_ax25.c 	struct sock *sk = sock->sk;
sk                704 net/ax25/af_ax25.c 	lock_sock(sk);
sk                705 net/ax25/af_ax25.c 	ax25 = sk_to_ax25(sk);
sk                767 net/ax25/af_ax25.c 		release_sock(sk);
sk                770 net/ax25/af_ax25.c 	release_sock(sk);
sk                780 net/ax25/af_ax25.c 	struct sock *sk = sock->sk;
sk                783 net/ax25/af_ax25.c 	lock_sock(sk);
sk                784 net/ax25/af_ax25.c 	if (sk->sk_type == SOCK_SEQPACKET && sk->sk_state != TCP_LISTEN) {
sk                785 net/ax25/af_ax25.c 		sk->sk_max_ack_backlog = backlog;
sk                786 net/ax25/af_ax25.c 		sk->sk_state           = TCP_LISTEN;
sk                792 net/ax25/af_ax25.c 	release_sock(sk);
sk                810 net/ax25/af_ax25.c 	struct sock *sk;
sk                867 net/ax25/af_ax25.c 	sk = sk_alloc(net, PF_AX25, GFP_ATOMIC, &ax25_proto, kern);
sk                868 net/ax25/af_ax25.c 	if (sk == NULL)
sk                871 net/ax25/af_ax25.c 	ax25 = ax25_sk(sk)->cb = ax25_create_cb();
sk                873 net/ax25/af_ax25.c 		sk_free(sk);
sk                877 net/ax25/af_ax25.c 	sock_init_data(sock, sk);
sk                879 net/ax25/af_ax25.c 	sk->sk_destruct = ax25_free_sock;
sk                881 net/ax25/af_ax25.c 	sk->sk_protocol = protocol;
sk                883 net/ax25/af_ax25.c 	ax25->sk    = sk;
sk                890 net/ax25/af_ax25.c 	struct sock *sk;
sk                893 net/ax25/af_ax25.c 	sk = sk_alloc(sock_net(osk), PF_AX25, GFP_ATOMIC, osk->sk_prot, 0);
sk                894 net/ax25/af_ax25.c 	if (sk == NULL)
sk                898 net/ax25/af_ax25.c 		sk_free(sk);
sk                908 net/ax25/af_ax25.c 		sk_free(sk);
sk                913 net/ax25/af_ax25.c 	sock_init_data(NULL, sk);
sk                915 net/ax25/af_ax25.c 	sk->sk_type     = osk->sk_type;
sk                916 net/ax25/af_ax25.c 	sk->sk_priority = osk->sk_priority;
sk                917 net/ax25/af_ax25.c 	sk->sk_protocol = osk->sk_protocol;
sk                918 net/ax25/af_ax25.c 	sk->sk_rcvbuf   = osk->sk_rcvbuf;
sk                919 net/ax25/af_ax25.c 	sk->sk_sndbuf   = osk->sk_sndbuf;
sk                920 net/ax25/af_ax25.c 	sk->sk_state    = TCP_ESTABLISHED;
sk                921 net/ax25/af_ax25.c 	sock_copy_flags(sk, osk);
sk                945 net/ax25/af_ax25.c 			sk_free(sk);
sk                951 net/ax25/af_ax25.c 	ax25_sk(sk)->cb = ax25;
sk                952 net/ax25/af_ax25.c 	sk->sk_destruct = ax25_free_sock;
sk                953 net/ax25/af_ax25.c 	ax25->sk    = sk;
sk                955 net/ax25/af_ax25.c 	return sk;
sk                960 net/ax25/af_ax25.c 	struct sock *sk = sock->sk;
sk                963 net/ax25/af_ax25.c 	if (sk == NULL)
sk                966 net/ax25/af_ax25.c 	sock_hold(sk);
sk                967 net/ax25/af_ax25.c 	sock_orphan(sk);
sk                968 net/ax25/af_ax25.c 	lock_sock(sk);
sk                969 net/ax25/af_ax25.c 	ax25 = sk_to_ax25(sk);
sk                971 net/ax25/af_ax25.c 	if (sk->sk_type == SOCK_SEQPACKET) {
sk                974 net/ax25/af_ax25.c 			release_sock(sk);
sk                976 net/ax25/af_ax25.c 			lock_sock(sk);
sk                983 net/ax25/af_ax25.c 			release_sock(sk);
sk                985 net/ax25/af_ax25.c 			lock_sock(sk);
sk                986 net/ax25/af_ax25.c 			if (!sock_flag(ax25->sk, SOCK_DESTROY))
sk               1016 net/ax25/af_ax25.c 			sk->sk_state                = TCP_CLOSE;
sk               1017 net/ax25/af_ax25.c 			sk->sk_shutdown            |= SEND_SHUTDOWN;
sk               1018 net/ax25/af_ax25.c 			sk->sk_state_change(sk);
sk               1019 net/ax25/af_ax25.c 			sock_set_flag(sk, SOCK_DESTROY);
sk               1026 net/ax25/af_ax25.c 		sk->sk_state     = TCP_CLOSE;
sk               1027 net/ax25/af_ax25.c 		sk->sk_shutdown |= SEND_SHUTDOWN;
sk               1028 net/ax25/af_ax25.c 		sk->sk_state_change(sk);
sk               1032 net/ax25/af_ax25.c 	sock->sk   = NULL;
sk               1033 net/ax25/af_ax25.c 	release_sock(sk);
sk               1034 net/ax25/af_ax25.c 	sock_put(sk);
sk               1047 net/ax25/af_ax25.c 	struct sock *sk = sock->sk;
sk               1078 net/ax25/af_ax25.c 	lock_sock(sk);
sk               1080 net/ax25/af_ax25.c 	ax25 = sk_to_ax25(sk);
sk               1081 net/ax25/af_ax25.c 	if (!sock_flag(sk, SOCK_ZAPPED)) {
sk               1112 net/ax25/af_ax25.c 	sock_reset_flag(sk, SOCK_ZAPPED);
sk               1115 net/ax25/af_ax25.c 	release_sock(sk);
sk               1126 net/ax25/af_ax25.c 	struct sock *sk = sock->sk;
sk               1127 net/ax25/af_ax25.c 	ax25_cb *ax25 = sk_to_ax25(sk), *ax25t;
sk               1153 net/ax25/af_ax25.c 	lock_sock(sk);
sk               1157 net/ax25/af_ax25.c 		switch (sk->sk_state) {
sk               1173 net/ax25/af_ax25.c 	if (sk->sk_state == TCP_ESTABLISHED && sk->sk_type == SOCK_SEQPACKET) {
sk               1178 net/ax25/af_ax25.c 	sk->sk_state   = TCP_CLOSE;
sk               1221 net/ax25/af_ax25.c 	if (sock_flag(sk, SOCK_ZAPPED)) {
sk               1240 net/ax25/af_ax25.c 	if (sk->sk_type == SOCK_SEQPACKET &&
sk               1253 net/ax25/af_ax25.c 	if (sk->sk_type != SOCK_SEQPACKET) {
sk               1255 net/ax25/af_ax25.c 		sk->sk_state   = TCP_ESTABLISHED;
sk               1261 net/ax25/af_ax25.c 	sk->sk_state          = TCP_SYN_SENT;
sk               1286 net/ax25/af_ax25.c 	if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) {
sk               1291 net/ax25/af_ax25.c 	if (sk->sk_state == TCP_SYN_SENT) {
sk               1295 net/ax25/af_ax25.c 			prepare_to_wait(sk_sleep(sk), &wait,
sk               1297 net/ax25/af_ax25.c 			if (sk->sk_state != TCP_SYN_SENT)
sk               1300 net/ax25/af_ax25.c 				release_sock(sk);
sk               1302 net/ax25/af_ax25.c 				lock_sock(sk);
sk               1308 net/ax25/af_ax25.c 		finish_wait(sk_sleep(sk), &wait);
sk               1314 net/ax25/af_ax25.c 	if (sk->sk_state != TCP_ESTABLISHED) {
sk               1317 net/ax25/af_ax25.c 		err = sock_error(sk);	/* Always set at this point */
sk               1325 net/ax25/af_ax25.c 	release_sock(sk);
sk               1336 net/ax25/af_ax25.c 	struct sock *sk;
sk               1342 net/ax25/af_ax25.c 	if ((sk = sock->sk) == NULL)
sk               1345 net/ax25/af_ax25.c 	lock_sock(sk);
sk               1346 net/ax25/af_ax25.c 	if (sk->sk_type != SOCK_SEQPACKET) {
sk               1351 net/ax25/af_ax25.c 	if (sk->sk_state != TCP_LISTEN) {
sk               1361 net/ax25/af_ax25.c 		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
sk               1362 net/ax25/af_ax25.c 		skb = skb_dequeue(&sk->sk_receive_queue);
sk               1371 net/ax25/af_ax25.c 			release_sock(sk);
sk               1373 net/ax25/af_ax25.c 			lock_sock(sk);
sk               1379 net/ax25/af_ax25.c 	finish_wait(sk_sleep(sk), &wait);
sk               1384 net/ax25/af_ax25.c 	newsk		 = skb->sk;
sk               1389 net/ax25/af_ax25.c 	sk->sk_ack_backlog--;
sk               1393 net/ax25/af_ax25.c 	release_sock(sk);
sk               1402 net/ax25/af_ax25.c 	struct sock *sk = sock->sk;
sk               1408 net/ax25/af_ax25.c 	lock_sock(sk);
sk               1409 net/ax25/af_ax25.c 	ax25 = sk_to_ax25(sk);
sk               1412 net/ax25/af_ax25.c 		if (sk->sk_state != TCP_ESTABLISHED) {
sk               1441 net/ax25/af_ax25.c 	release_sock(sk);
sk               1449 net/ax25/af_ax25.c 	struct sock *sk = sock->sk;
sk               1460 net/ax25/af_ax25.c 	lock_sock(sk);
sk               1461 net/ax25/af_ax25.c 	ax25 = sk_to_ax25(sk);
sk               1463 net/ax25/af_ax25.c 	if (sock_flag(sk, SOCK_ZAPPED)) {
sk               1468 net/ax25/af_ax25.c 	if (sk->sk_shutdown & SEND_SHUTDOWN) {
sk               1527 net/ax25/af_ax25.c 		if (sk->sk_type == SOCK_SEQPACKET &&
sk               1542 net/ax25/af_ax25.c 		if (sk->sk_state != TCP_ESTABLISHED) {
sk               1555 net/ax25/af_ax25.c 	skb = sock_alloc_send_skb(sk, size, msg->msg_flags&MSG_DONTWAIT, &err);
sk               1572 net/ax25/af_ax25.c 		*(u8 *)skb_push(skb, 1) = sk->sk_protocol;
sk               1574 net/ax25/af_ax25.c 	if (sk->sk_type == SOCK_SEQPACKET) {
sk               1576 net/ax25/af_ax25.c 		if (sk->sk_state != TCP_ESTABLISHED) {
sk               1607 net/ax25/af_ax25.c 	release_sock(sk);
sk               1615 net/ax25/af_ax25.c 	struct sock *sk = sock->sk;
sk               1620 net/ax25/af_ax25.c 	lock_sock(sk);
sk               1625 net/ax25/af_ax25.c 	if (sk->sk_type == SOCK_SEQPACKET && sk->sk_state != TCP_ESTABLISHED) {
sk               1631 net/ax25/af_ax25.c 	skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
sk               1636 net/ax25/af_ax25.c 	if (!sk_to_ax25(sk)->pidincl)
sk               1675 net/ax25/af_ax25.c 	skb_free_datagram(sk, skb);
sk               1679 net/ax25/af_ax25.c 	release_sock(sk);
sk               1684 net/ax25/af_ax25.c static int ax25_shutdown(struct socket *sk, int how)
sk               1692 net/ax25/af_ax25.c 	struct sock *sk = sock->sk;
sk               1696 net/ax25/af_ax25.c 	lock_sock(sk);
sk               1701 net/ax25/af_ax25.c 		amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
sk               1712 net/ax25/af_ax25.c 		if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL)
sk               1769 net/ax25/af_ax25.c 		ax25_cb *ax25 = sk_to_ax25(sk);
sk               1783 net/ax25/af_ax25.c 		ax25_info.rcv_q     = sk_rmem_alloc_get(sk);
sk               1784 net/ax25/af_ax25.c 		ax25_info.snd_q     = sk_wmem_alloc_get(sk);
sk               1847 net/ax25/af_ax25.c 	release_sock(sk);
sk               1910 net/ax25/af_ax25.c 	if (ax25->sk != NULL) {
sk               1912 net/ax25/af_ax25.c 			   sk_wmem_alloc_get(ax25->sk),
sk               1913 net/ax25/af_ax25.c 			   sk_rmem_alloc_get(ax25->sk),
sk               1914 net/ax25/af_ax25.c 			   sock_i_ino(ax25->sk));
sk                 61 net/ax25/ax25_ds_in.c 		if (ax25->sk != NULL) {
sk                 62 net/ax25/ax25_ds_in.c 			bh_lock_sock(ax25->sk);
sk                 63 net/ax25/ax25_ds_in.c 			ax25->sk->sk_state = TCP_ESTABLISHED;
sk                 68 net/ax25/ax25_ds_in.c 			if (!sock_flag(ax25->sk, SOCK_DEAD))
sk                 69 net/ax25/ax25_ds_in.c 				ax25->sk->sk_state_change(ax25->sk);
sk                 70 net/ax25/ax25_ds_in.c 			bh_unlock_sock(ax25->sk);
sk                 93 net/ax25/ax25_ds_timer.c 	struct sock *sk=ax25->sk;
sk                 95 net/ax25/ax25_ds_timer.c 	if (sk)
sk                 96 net/ax25/ax25_ds_timer.c 		bh_lock_sock(sk);
sk                104 net/ax25/ax25_ds_timer.c 		if (!sk || sock_flag(sk, SOCK_DESTROY) ||
sk                105 net/ax25/ax25_ds_timer.c 		    (sk->sk_state == TCP_LISTEN &&
sk                106 net/ax25/ax25_ds_timer.c 		     sock_flag(sk, SOCK_DEAD))) {
sk                107 net/ax25/ax25_ds_timer.c 			if (sk) {
sk                108 net/ax25/ax25_ds_timer.c 				sock_hold(sk);
sk                110 net/ax25/ax25_ds_timer.c 				bh_unlock_sock(sk);
sk                112 net/ax25/ax25_ds_timer.c 				sock_put(sk);
sk                123 net/ax25/ax25_ds_timer.c 		if (sk != NULL) {
sk                124 net/ax25/ax25_ds_timer.c 			if (atomic_read(&sk->sk_rmem_alloc) <
sk                125 net/ax25/ax25_ds_timer.c 			    (sk->sk_rcvbuf >> 1) &&
sk                135 net/ax25/ax25_ds_timer.c 	if (sk)
sk                136 net/ax25/ax25_ds_timer.c 		bh_unlock_sock(sk);
sk                167 net/ax25/ax25_ds_timer.c 	if (ax25->sk != NULL) {
sk                168 net/ax25/ax25_ds_timer.c 		bh_lock_sock(ax25->sk);
sk                169 net/ax25/ax25_ds_timer.c 		ax25->sk->sk_state     = TCP_CLOSE;
sk                170 net/ax25/ax25_ds_timer.c 		ax25->sk->sk_err       = 0;
sk                171 net/ax25/ax25_ds_timer.c 		ax25->sk->sk_shutdown |= SEND_SHUTDOWN;
sk                172 net/ax25/ax25_ds_timer.c 		if (!sock_flag(ax25->sk, SOCK_DEAD)) {
sk                173 net/ax25/ax25_ds_timer.c 			ax25->sk->sk_state_change(ax25->sk);
sk                174 net/ax25/ax25_ds_timer.c 			sock_set_flag(ax25->sk, SOCK_DEAD);
sk                176 net/ax25/ax25_ds_timer.c 		bh_unlock_sock(ax25->sk);
sk                214 net/ax25/ax25_ds_timer.c 			if (!sock_flag(ax25->sk, SOCK_DESTROY))
sk                141 net/ax25/ax25_in.c 	if (ax25->sk != NULL && ax25->ax25_dev->values[AX25_VALUES_CONMODE] == 2) {
sk                142 net/ax25/ax25_in.c 		if ((!ax25->pidincl && ax25->sk->sk_protocol == pid) ||
sk                144 net/ax25/ax25_in.c 			if (sock_queue_rcv_skb(ax25->sk, skb) == 0)
sk                188 net/ax25/ax25_in.c 	struct sock *make, *sk;
sk                260 net/ax25/ax25_in.c 			sk = ax25_get_socket(&dest, &src, SOCK_DGRAM);
sk                261 net/ax25/ax25_in.c 			if (sk != NULL) {
sk                262 net/ax25/ax25_in.c 				bh_lock_sock(sk);
sk                263 net/ax25/ax25_in.c 				if (atomic_read(&sk->sk_rmem_alloc) >=
sk                264 net/ax25/ax25_in.c 				    sk->sk_rcvbuf) {
sk                271 net/ax25/ax25_in.c 					if (sock_queue_rcv_skb(sk, skb) != 0)
sk                274 net/ax25/ax25_in.c 				bh_unlock_sock(sk);
sk                275 net/ax25/ax25_in.c 				sock_put(sk);
sk                336 net/ax25/ax25_in.c 		sk = ax25_find_listener(&dest, 0, dev, SOCK_SEQPACKET);
sk                338 net/ax25/ax25_in.c 		sk = ax25_find_listener(next_digi, 1, dev, SOCK_SEQPACKET);
sk                340 net/ax25/ax25_in.c 	if (sk != NULL) {
sk                341 net/ax25/ax25_in.c 		bh_lock_sock(sk);
sk                342 net/ax25/ax25_in.c 		if (sk_acceptq_is_full(sk) ||
sk                343 net/ax25/ax25_in.c 		    (make = ax25_make_new(sk, ax25_dev)) == NULL) {
sk                347 net/ax25/ax25_in.c 			bh_unlock_sock(sk);
sk                348 net/ax25/ax25_in.c 			sock_put(sk);
sk                355 net/ax25/ax25_in.c 		skb_queue_head(&sk->sk_receive_queue, skb);
sk                359 net/ax25/ax25_in.c 		sk->sk_ack_backlog++;
sk                360 net/ax25/ax25_in.c 		bh_unlock_sock(sk);
sk                383 net/ax25/ax25_in.c 		if (sk)
sk                384 net/ax25/ax25_in.c 			sock_put(sk);
sk                419 net/ax25/ax25_in.c 	if (sk) {
sk                420 net/ax25/ax25_in.c 		if (!sock_flag(sk, SOCK_DEAD))
sk                421 net/ax25/ax25_in.c 			sk->sk_data_ready(sk);
sk                422 net/ax25/ax25_in.c 		sock_put(sk);
sk                156 net/ax25/ax25_ip.c 			if (skb->sk != NULL)
sk                157 net/ax25/ax25_ip.c 				skb_set_owner_w(ourskb, skb->sk);
sk                150 net/ax25/ax25_out.c 			if (skb->sk != NULL)
sk                151 net/ax25/ax25_out.c 				skb_set_owner_w(skbn, skb->sk);
sk                284 net/ax25/ax25_out.c 		if (skb->sk != NULL)
sk                285 net/ax25/ax25_out.c 			skb_set_owner_w(skbn, skb->sk);
sk                346 net/ax25/ax25_out.c 		if (skb->sk != NULL)
sk                347 net/ax25/ax25_out.c 			skb_set_owner_w(skbn, skb->sk);
sk                428 net/ax25/ax25_route.c 	if (ax25->sk != NULL) {
sk                430 net/ax25/ax25_route.c 		bh_lock_sock(ax25->sk);
sk                431 net/ax25/ax25_route.c 		sock_reset_flag(ax25->sk, SOCK_ZAPPED);
sk                432 net/ax25/ax25_route.c 		bh_unlock_sock(ax25->sk);
sk                456 net/ax25/ax25_route.c 		if (skb->sk != NULL)
sk                457 net/ax25/ax25_route.c 			skb_set_owner_w(skbn, skb->sk);
sk                 69 net/ax25/ax25_std_in.c 			if (ax25->sk != NULL) {
sk                 70 net/ax25/ax25_std_in.c 				bh_lock_sock(ax25->sk);
sk                 71 net/ax25/ax25_std_in.c 				ax25->sk->sk_state = TCP_ESTABLISHED;
sk                 73 net/ax25/ax25_std_in.c 				if (!sock_flag(ax25->sk, SOCK_DEAD))
sk                 74 net/ax25/ax25_std_in.c 					ax25->sk->sk_state_change(ax25->sk);
sk                 75 net/ax25/ax25_std_in.c 				bh_unlock_sock(ax25->sk);
sk                 31 net/ax25/ax25_std_timer.c 	struct sock *sk = ax25->sk;
sk                 33 net/ax25/ax25_std_timer.c 	if (sk)
sk                 34 net/ax25/ax25_std_timer.c 		bh_lock_sock(sk);
sk                 41 net/ax25/ax25_std_timer.c 		if (!sk || sock_flag(sk, SOCK_DESTROY) ||
sk                 42 net/ax25/ax25_std_timer.c 		    (sk->sk_state == TCP_LISTEN &&
sk                 43 net/ax25/ax25_std_timer.c 		     sock_flag(sk, SOCK_DEAD))) {
sk                 44 net/ax25/ax25_std_timer.c 			if (sk) {
sk                 45 net/ax25/ax25_std_timer.c 				sock_hold(sk);
sk                 47 net/ax25/ax25_std_timer.c 				bh_unlock_sock(sk);
sk                 49 net/ax25/ax25_std_timer.c 				sock_put(sk);
sk                 61 net/ax25/ax25_std_timer.c 		if (sk != NULL) {
sk                 62 net/ax25/ax25_std_timer.c 			if (atomic_read(&sk->sk_rmem_alloc) <
sk                 63 net/ax25/ax25_std_timer.c 			    (sk->sk_rcvbuf >> 1) &&
sk                 73 net/ax25/ax25_std_timer.c 	if (sk)
sk                 74 net/ax25/ax25_std_timer.c 		bh_unlock_sock(sk);
sk                107 net/ax25/ax25_std_timer.c 	if (ax25->sk != NULL) {
sk                108 net/ax25/ax25_std_timer.c 		bh_lock_sock(ax25->sk);
sk                109 net/ax25/ax25_std_timer.c 		ax25->sk->sk_state     = TCP_CLOSE;
sk                110 net/ax25/ax25_std_timer.c 		ax25->sk->sk_err       = 0;
sk                111 net/ax25/ax25_std_timer.c 		ax25->sk->sk_shutdown |= SEND_SHUTDOWN;
sk                112 net/ax25/ax25_std_timer.c 		if (!sock_flag(ax25->sk, SOCK_DEAD)) {
sk                113 net/ax25/ax25_std_timer.c 			ax25->sk->sk_state_change(ax25->sk);
sk                114 net/ax25/ax25_std_timer.c 			sock_set_flag(ax25->sk, SOCK_DEAD);
sk                116 net/ax25/ax25_std_timer.c 		bh_unlock_sock(ax25->sk);
sk                146 net/ax25/ax25_std_timer.c 			if (!sock_flag(ax25->sk, SOCK_DESTROY))
sk                264 net/ax25/ax25_subr.c 	if (!ax25->sk || !sock_flag(ax25->sk, SOCK_DESTROY))
sk                275 net/ax25/ax25_subr.c 	if (ax25->sk != NULL) {
sk                277 net/ax25/ax25_subr.c 		bh_lock_sock(ax25->sk);
sk                278 net/ax25/ax25_subr.c 		ax25->sk->sk_state     = TCP_CLOSE;
sk                279 net/ax25/ax25_subr.c 		ax25->sk->sk_err       = reason;
sk                280 net/ax25/ax25_subr.c 		ax25->sk->sk_shutdown |= SEND_SHUTDOWN;
sk                281 net/ax25/ax25_subr.c 		if (!sock_flag(ax25->sk, SOCK_DEAD)) {
sk                282 net/ax25/ax25_subr.c 			ax25->sk->sk_state_change(ax25->sk);
sk                283 net/ax25/ax25_subr.c 			sock_set_flag(ax25->sk, SOCK_DEAD);
sk                285 net/ax25/ax25_subr.c 		bh_unlock_sock(ax25->sk);
sk               2208 net/batman-adv/bridge_loop_avoidance.c 	struct net *net = sock_net(cb->skb->sk);
sk               2446 net/batman-adv/bridge_loop_avoidance.c 	struct net *net = sock_net(cb->skb->sk);
sk                993 net/batman-adv/distributed-arp-table.c 	struct net *net = sock_net(cb->skb->sk);
sk                562 net/batman-adv/gateway_client.c 	struct net *net = sock_net(cb->skb->sk);
sk               2332 net/batman-adv/multicast.c 	struct net *net = sock_net(cb->skb->sk);
sk                956 net/batman-adv/netlink.c 	struct net *net = sock_net(cb->skb->sk);
sk                782 net/batman-adv/originator.c 	struct net *net = sock_net(cb->skb->sk);
sk               1479 net/batman-adv/originator.c 	struct net *net = sock_net(cb->skb->sk);
sk               1253 net/batman-adv/translation-table.c 	struct net *net = sock_net(cb->skb->sk);
sk               2169 net/batman-adv/translation-table.c 	struct net *net = sock_net(cb->skb->sk);
sk                 69 net/bluetooth/af_bluetooth.c void bt_sock_reclassify_lock(struct sock *sk, int proto)
sk                 71 net/bluetooth/af_bluetooth.c 	BUG_ON(!sk);
sk                 72 net/bluetooth/af_bluetooth.c 	BUG_ON(!sock_allow_reclassification(sk));
sk                 74 net/bluetooth/af_bluetooth.c 	sock_lock_init_class_and_name(sk,
sk                132 net/bluetooth/af_bluetooth.c 			bt_sock_reclassify_lock(sock->sk, proto);
sk                141 net/bluetooth/af_bluetooth.c void bt_sock_link(struct bt_sock_list *l, struct sock *sk)
sk                144 net/bluetooth/af_bluetooth.c 	sk_add_node(sk, &l->head);
sk                149 net/bluetooth/af_bluetooth.c void bt_sock_unlink(struct bt_sock_list *l, struct sock *sk)
sk                152 net/bluetooth/af_bluetooth.c 	sk_del_node_init(sk);
sk                157 net/bluetooth/af_bluetooth.c void bt_accept_enqueue(struct sock *parent, struct sock *sk, bool bh)
sk                159 net/bluetooth/af_bluetooth.c 	BT_DBG("parent %p, sk %p", parent, sk);
sk                161 net/bluetooth/af_bluetooth.c 	sock_hold(sk);
sk                164 net/bluetooth/af_bluetooth.c 		bh_lock_sock_nested(sk);
sk                166 net/bluetooth/af_bluetooth.c 		lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
sk                168 net/bluetooth/af_bluetooth.c 	list_add_tail(&bt_sk(sk)->accept_q, &bt_sk(parent)->accept_q);
sk                169 net/bluetooth/af_bluetooth.c 	bt_sk(sk)->parent = parent;
sk                172 net/bluetooth/af_bluetooth.c 		bh_unlock_sock(sk);
sk                174 net/bluetooth/af_bluetooth.c 		release_sock(sk);
sk                183 net/bluetooth/af_bluetooth.c void bt_accept_unlink(struct sock *sk)
sk                185 net/bluetooth/af_bluetooth.c 	BT_DBG("sk %p state %d", sk, sk->sk_state);
sk                187 net/bluetooth/af_bluetooth.c 	list_del_init(&bt_sk(sk)->accept_q);
sk                188 net/bluetooth/af_bluetooth.c 	bt_sk(sk)->parent->sk_ack_backlog--;
sk                189 net/bluetooth/af_bluetooth.c 	bt_sk(sk)->parent = NULL;
sk                190 net/bluetooth/af_bluetooth.c 	sock_put(sk);
sk                197 net/bluetooth/af_bluetooth.c 	struct sock *sk;
sk                203 net/bluetooth/af_bluetooth.c 		sk = (struct sock *)s;
sk                206 net/bluetooth/af_bluetooth.c 		sock_hold(sk);
sk                207 net/bluetooth/af_bluetooth.c 		lock_sock(sk);
sk                212 net/bluetooth/af_bluetooth.c 		if (!bt_sk(sk)->parent) {
sk                213 net/bluetooth/af_bluetooth.c 			BT_DBG("sk %p, already unlinked", sk);
sk                214 net/bluetooth/af_bluetooth.c 			release_sock(sk);
sk                215 net/bluetooth/af_bluetooth.c 			sock_put(sk);
sk                225 net/bluetooth/af_bluetooth.c 		sock_put(sk);
sk                228 net/bluetooth/af_bluetooth.c 		if (sk->sk_state == BT_CLOSED) {
sk                229 net/bluetooth/af_bluetooth.c 			bt_accept_unlink(sk);
sk                230 net/bluetooth/af_bluetooth.c 			release_sock(sk);
sk                234 net/bluetooth/af_bluetooth.c 		if (sk->sk_state == BT_CONNECTED || !newsock ||
sk                236 net/bluetooth/af_bluetooth.c 			bt_accept_unlink(sk);
sk                238 net/bluetooth/af_bluetooth.c 				sock_graft(sk, newsock);
sk                240 net/bluetooth/af_bluetooth.c 			release_sock(sk);
sk                241 net/bluetooth/af_bluetooth.c 			return sk;
sk                244 net/bluetooth/af_bluetooth.c 		release_sock(sk);
sk                255 net/bluetooth/af_bluetooth.c 	struct sock *sk = sock->sk;
sk                261 net/bluetooth/af_bluetooth.c 	BT_DBG("sock %p sk %p len %zu", sock, sk, len);
sk                266 net/bluetooth/af_bluetooth.c 	skb = skb_recv_datagram(sk, flags, noblock, &err);
sk                268 net/bluetooth/af_bluetooth.c 		if (sk->sk_shutdown & RCV_SHUTDOWN)
sk                284 net/bluetooth/af_bluetooth.c 		sock_recv_ts_and_drops(msg, sk, skb);
sk                286 net/bluetooth/af_bluetooth.c 		if (msg->msg_name && bt_sk(sk)->skb_msg_name)
sk                287 net/bluetooth/af_bluetooth.c 			bt_sk(sk)->skb_msg_name(skb, msg->msg_name,
sk                291 net/bluetooth/af_bluetooth.c 	skb_free_datagram(sk, skb);
sk                300 net/bluetooth/af_bluetooth.c static long bt_sock_data_wait(struct sock *sk, long timeo)
sk                304 net/bluetooth/af_bluetooth.c 	add_wait_queue(sk_sleep(sk), &wait);
sk                308 net/bluetooth/af_bluetooth.c 		if (!skb_queue_empty(&sk->sk_receive_queue))
sk                311 net/bluetooth/af_bluetooth.c 		if (sk->sk_err || (sk->sk_shutdown & RCV_SHUTDOWN))
sk                317 net/bluetooth/af_bluetooth.c 		sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
sk                318 net/bluetooth/af_bluetooth.c 		release_sock(sk);
sk                320 net/bluetooth/af_bluetooth.c 		lock_sock(sk);
sk                321 net/bluetooth/af_bluetooth.c 		sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
sk                325 net/bluetooth/af_bluetooth.c 	remove_wait_queue(sk_sleep(sk), &wait);
sk                332 net/bluetooth/af_bluetooth.c 	struct sock *sk = sock->sk;
sk                340 net/bluetooth/af_bluetooth.c 	BT_DBG("sk %p size %zu", sk, size);
sk                342 net/bluetooth/af_bluetooth.c 	lock_sock(sk);
sk                344 net/bluetooth/af_bluetooth.c 	target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
sk                345 net/bluetooth/af_bluetooth.c 	timeo  = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
sk                351 net/bluetooth/af_bluetooth.c 		skb = skb_dequeue(&sk->sk_receive_queue);
sk                356 net/bluetooth/af_bluetooth.c 			err = sock_error(sk);
sk                359 net/bluetooth/af_bluetooth.c 			if (sk->sk_shutdown & RCV_SHUTDOWN)
sk                366 net/bluetooth/af_bluetooth.c 			timeo = bt_sock_data_wait(sk, timeo);
sk                377 net/bluetooth/af_bluetooth.c 			skb_queue_head(&sk->sk_receive_queue, skb);
sk                385 net/bluetooth/af_bluetooth.c 		sock_recv_ts_and_drops(msg, sk, skb);
sk                416 net/bluetooth/af_bluetooth.c 				skb_queue_head(&sk->sk_receive_queue, skb);
sk                423 net/bluetooth/af_bluetooth.c 			skb_queue_head(&sk->sk_receive_queue, skb);
sk                429 net/bluetooth/af_bluetooth.c 	release_sock(sk);
sk                437 net/bluetooth/af_bluetooth.c 	struct sock *sk;
sk                440 net/bluetooth/af_bluetooth.c 		sk = (struct sock *)s;
sk                441 net/bluetooth/af_bluetooth.c 		if (sk->sk_state == BT_CONNECTED ||
sk                443 net/bluetooth/af_bluetooth.c 		     sk->sk_state == BT_CONNECT2))
sk                453 net/bluetooth/af_bluetooth.c 	struct sock *sk = sock->sk;
sk                456 net/bluetooth/af_bluetooth.c 	BT_DBG("sock %p, sk %p", sock, sk);
sk                458 net/bluetooth/af_bluetooth.c 	poll_wait(file, sk_sleep(sk), wait);
sk                460 net/bluetooth/af_bluetooth.c 	if (sk->sk_state == BT_LISTEN)
sk                461 net/bluetooth/af_bluetooth.c 		return bt_accept_poll(sk);
sk                463 net/bluetooth/af_bluetooth.c 	if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
sk                465 net/bluetooth/af_bluetooth.c 			(sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
sk                467 net/bluetooth/af_bluetooth.c 	if (sk->sk_shutdown & RCV_SHUTDOWN)
sk                470 net/bluetooth/af_bluetooth.c 	if (sk->sk_shutdown == SHUTDOWN_MASK)
sk                473 net/bluetooth/af_bluetooth.c 	if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
sk                476 net/bluetooth/af_bluetooth.c 	if (sk->sk_state == BT_CLOSED)
sk                479 net/bluetooth/af_bluetooth.c 	if (sk->sk_state == BT_CONNECT ||
sk                480 net/bluetooth/af_bluetooth.c 			sk->sk_state == BT_CONNECT2 ||
sk                481 net/bluetooth/af_bluetooth.c 			sk->sk_state == BT_CONFIG)
sk                484 net/bluetooth/af_bluetooth.c 	if (!test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags) && sock_writeable(sk))
sk                487 net/bluetooth/af_bluetooth.c 		sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
sk                495 net/bluetooth/af_bluetooth.c 	struct sock *sk = sock->sk;
sk                500 net/bluetooth/af_bluetooth.c 	BT_DBG("sk %p cmd %x arg %lx", sk, cmd, arg);
sk                504 net/bluetooth/af_bluetooth.c 		if (sk->sk_state == BT_LISTEN)
sk                507 net/bluetooth/af_bluetooth.c 		amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
sk                514 net/bluetooth/af_bluetooth.c 		if (sk->sk_state == BT_LISTEN)
sk                517 net/bluetooth/af_bluetooth.c 		lock_sock(sk);
sk                518 net/bluetooth/af_bluetooth.c 		skb = skb_peek(&sk->sk_receive_queue);
sk                520 net/bluetooth/af_bluetooth.c 		release_sock(sk);
sk                534 net/bluetooth/af_bluetooth.c int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo)
sk                539 net/bluetooth/af_bluetooth.c 	BT_DBG("sk %p", sk);
sk                541 net/bluetooth/af_bluetooth.c 	add_wait_queue(sk_sleep(sk), &wait);
sk                543 net/bluetooth/af_bluetooth.c 	while (sk->sk_state != state) {
sk                554 net/bluetooth/af_bluetooth.c 		release_sock(sk);
sk                556 net/bluetooth/af_bluetooth.c 		lock_sock(sk);
sk                559 net/bluetooth/af_bluetooth.c 		err = sock_error(sk);
sk                564 net/bluetooth/af_bluetooth.c 	remove_wait_queue(sk_sleep(sk), &wait);
sk                570 net/bluetooth/af_bluetooth.c int bt_sock_wait_ready(struct sock *sk, unsigned long flags)
sk                576 net/bluetooth/af_bluetooth.c 	BT_DBG("sk %p", sk);
sk                578 net/bluetooth/af_bluetooth.c 	timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
sk                580 net/bluetooth/af_bluetooth.c 	add_wait_queue(sk_sleep(sk), &wait);
sk                582 net/bluetooth/af_bluetooth.c 	while (test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags)) {
sk                593 net/bluetooth/af_bluetooth.c 		release_sock(sk);
sk                595 net/bluetooth/af_bluetooth.c 		lock_sock(sk);
sk                598 net/bluetooth/af_bluetooth.c 		err = sock_error(sk);
sk                603 net/bluetooth/af_bluetooth.c 	remove_wait_queue(sk_sleep(sk), &wait);
sk                648 net/bluetooth/af_bluetooth.c 		struct sock *sk = sk_entry(v);
sk                649 net/bluetooth/af_bluetooth.c 		struct bt_sock *bt = bt_sk(sk);
sk                653 net/bluetooth/af_bluetooth.c 			   sk,
sk                654 net/bluetooth/af_bluetooth.c 			   refcount_read(&sk->sk_refcnt),
sk                655 net/bluetooth/af_bluetooth.c 			   sk_rmem_alloc_get(sk),
sk                656 net/bluetooth/af_bluetooth.c 			   sk_wmem_alloc_get(sk),
sk                657 net/bluetooth/af_bluetooth.c 			   from_kuid(seq_user_ns(seq), sock_i_uid(sk)),
sk                658 net/bluetooth/af_bluetooth.c 			   sock_i_ino(sk),
sk                482 net/bluetooth/bnep/core.c 	struct sock *sk = s->sock->sk;
sk                490 net/bluetooth/bnep/core.c 	add_wait_queue(sk_sleep(sk), &wait);
sk                495 net/bluetooth/bnep/core.c 		while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
sk                503 net/bluetooth/bnep/core.c 		if (sk->sk_state != BT_CONNECTED)
sk                507 net/bluetooth/bnep/core.c 		while ((skb = skb_dequeue(&sk->sk_write_queue)))
sk                518 net/bluetooth/bnep/core.c 	remove_wait_queue(sk_sleep(sk), &wait);
sk                527 net/bluetooth/bnep/core.c 	s->sock->sk->sk_err = EUNATCH;
sk                529 net/bluetooth/bnep/core.c 	wake_up_interruptible(sk_sleep(s->sock->sk));
sk                544 net/bluetooth/bnep/core.c 	struct l2cap_conn *conn = l2cap_pi(session->sock->sk)->chan->conn;
sk                572 net/bluetooth/bnep/core.c 	baswap((void *) dst, &l2cap_pi(sock->sk)->chan->dst);
sk                573 net/bluetooth/bnep/core.c 	baswap((void *) src, &l2cap_pi(sock->sk)->chan->src);
sk                666 net/bluetooth/bnep/core.c 		wake_up_interruptible(sk_sleep(s->sock->sk));
sk                 54 net/bluetooth/bnep/netdev.c 	struct sock *sk = s->sock->sk;
sk                104 net/bluetooth/bnep/netdev.c 	skb_queue_tail(&sk->sk_write_queue, skb);
sk                105 net/bluetooth/bnep/netdev.c 	wake_up_interruptible(sk_sleep(sk));
sk                168 net/bluetooth/bnep/netdev.c 	struct sock *sk = s->sock->sk;
sk                192 net/bluetooth/bnep/netdev.c 	skb_queue_tail(&sk->sk_write_queue, skb);
sk                193 net/bluetooth/bnep/netdev.c 	wake_up_interruptible(sk_sleep(sk));
sk                195 net/bluetooth/bnep/netdev.c 	if (skb_queue_len(&sk->sk_write_queue) >= BNEP_TX_QUEUE_LEN) {
sk                 38 net/bluetooth/bnep/sock.c 	struct sock *sk = sock->sk;
sk                 40 net/bluetooth/bnep/sock.c 	BT_DBG("sock %p sk %p", sock, sk);
sk                 42 net/bluetooth/bnep/sock.c 	if (!sk)
sk                 45 net/bluetooth/bnep/sock.c 	bt_sock_unlink(&bnep_sk_list, sk);
sk                 47 net/bluetooth/bnep/sock.c 	sock_orphan(sk);
sk                 48 net/bluetooth/bnep/sock.c 	sock_put(sk);
sk                 76 net/bluetooth/bnep/sock.c 		if (nsock->sk->sk_state != BT_CONNECTED) {
sk                202 net/bluetooth/bnep/sock.c 	struct sock *sk;
sk                209 net/bluetooth/bnep/sock.c 	sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &bnep_proto, kern);
sk                210 net/bluetooth/bnep/sock.c 	if (!sk)
sk                213 net/bluetooth/bnep/sock.c 	sock_init_data(sock, sk);
sk                219 net/bluetooth/bnep/sock.c 	sock_reset_flag(sk, SOCK_ZAPPED);
sk                221 net/bluetooth/bnep/sock.c 	sk->sk_protocol = protocol;
sk                222 net/bluetooth/bnep/sock.c 	sk->sk_state	= BT_OPEN;
sk                224 net/bluetooth/bnep/sock.c 	bt_sock_link(&bnep_sk_list, sk);
sk                145 net/bluetooth/cmtp/capi.c 	wake_up_interruptible(sk_sleep(session->sock->sk));
sk                281 net/bluetooth/cmtp/core.c 	struct sock *sk = session->sock->sk;
sk                289 net/bluetooth/cmtp/core.c 	add_wait_queue(sk_sleep(sk), &wait);
sk                293 net/bluetooth/cmtp/core.c 		if (sk->sk_state != BT_CONNECTED)
sk                296 net/bluetooth/cmtp/core.c 		while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
sk                312 net/bluetooth/cmtp/core.c 	remove_wait_queue(sk_sleep(sk), &wait);
sk                350 net/bluetooth/cmtp/core.c 	s = __cmtp_get_session(&l2cap_pi(sock->sk)->chan->dst);
sk                356 net/bluetooth/cmtp/core.c 	bacpy(&session->bdaddr, &l2cap_pi(sock->sk)->chan->dst);
sk                358 net/bluetooth/cmtp/core.c 	session->mtu = min_t(uint, l2cap_pi(sock->sk)->chan->omtu,
sk                359 net/bluetooth/cmtp/core.c 					l2cap_pi(sock->sk)->chan->imtu);
sk                396 net/bluetooth/cmtp/core.c 			wake_up_interruptible(sk_sleep(session->sock->sk));
sk                439 net/bluetooth/cmtp/core.c 		wake_up_interruptible(sk_sleep(session->sock->sk));
sk                 51 net/bluetooth/cmtp/sock.c 	struct sock *sk = sock->sk;
sk                 53 net/bluetooth/cmtp/sock.c 	BT_DBG("sock %p sk %p", sock, sk);
sk                 55 net/bluetooth/cmtp/sock.c 	if (!sk)
sk                 58 net/bluetooth/cmtp/sock.c 	bt_sock_unlink(&cmtp_sk_list, sk);
sk                 60 net/bluetooth/cmtp/sock.c 	sock_orphan(sk);
sk                 61 net/bluetooth/cmtp/sock.c 	sock_put(sk);
sk                 89 net/bluetooth/cmtp/sock.c 		if (nsock->sk->sk_state != BT_CONNECTED) {
sk                205 net/bluetooth/cmtp/sock.c 	struct sock *sk;
sk                212 net/bluetooth/cmtp/sock.c 	sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &cmtp_proto, kern);
sk                213 net/bluetooth/cmtp/sock.c 	if (!sk)
sk                216 net/bluetooth/cmtp/sock.c 	sock_init_data(sock, sk);
sk                222 net/bluetooth/cmtp/sock.c 	sock_reset_flag(sk, SOCK_ZAPPED);
sk                224 net/bluetooth/cmtp/sock.c 	sk->sk_protocol = protocol;
sk                225 net/bluetooth/cmtp/sock.c 	sk->sk_state    = BT_OPEN;
sk                227 net/bluetooth/cmtp/sock.c 	bt_sock_link(&cmtp_sk_list, sk);
sk               1819 net/bluetooth/hci_request.c void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
sk               1847 net/bluetooth/hci_request.c 				mgmt_advertising_removed(sk, hdev, rem_inst);
sk               1861 net/bluetooth/hci_request.c 				mgmt_advertising_removed(sk, hdev, instance);
sk                 80 net/bluetooth/hci_request.h void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
sk                 49 net/bluetooth/hci_sock.c #define hci_pi(sk) ((struct hci_pinfo *) sk)
sk                 62 net/bluetooth/hci_sock.c void hci_sock_set_flag(struct sock *sk, int nr)
sk                 64 net/bluetooth/hci_sock.c 	set_bit(nr, &hci_pi(sk)->flags);
sk                 67 net/bluetooth/hci_sock.c void hci_sock_clear_flag(struct sock *sk, int nr)
sk                 69 net/bluetooth/hci_sock.c 	clear_bit(nr, &hci_pi(sk)->flags);
sk                 72 net/bluetooth/hci_sock.c int hci_sock_test_flag(struct sock *sk, int nr)
sk                 74 net/bluetooth/hci_sock.c 	return test_bit(nr, &hci_pi(sk)->flags);
sk                 77 net/bluetooth/hci_sock.c unsigned short hci_sock_get_channel(struct sock *sk)
sk                 79 net/bluetooth/hci_sock.c 	return hci_pi(sk)->channel;
sk                 82 net/bluetooth/hci_sock.c u32 hci_sock_get_cookie(struct sock *sk)
sk                 84 net/bluetooth/hci_sock.c 	return hci_pi(sk)->cookie;
sk                 87 net/bluetooth/hci_sock.c static bool hci_sock_gen_cookie(struct sock *sk)
sk                 89 net/bluetooth/hci_sock.c 	int id = hci_pi(sk)->cookie;
sk                 96 net/bluetooth/hci_sock.c 		hci_pi(sk)->cookie = id;
sk                 97 net/bluetooth/hci_sock.c 		get_task_comm(hci_pi(sk)->comm, current);
sk                104 net/bluetooth/hci_sock.c static void hci_sock_free_cookie(struct sock *sk)
sk                106 net/bluetooth/hci_sock.c 	int id = hci_pi(sk)->cookie;
sk                109 net/bluetooth/hci_sock.c 		hci_pi(sk)->cookie = 0xffffffff;
sk                153 net/bluetooth/hci_sock.c static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
sk                159 net/bluetooth/hci_sock.c 	flt = &hci_pi(sk)->filter;
sk                193 net/bluetooth/hci_sock.c 	struct sock *sk;
sk                200 net/bluetooth/hci_sock.c 	sk_for_each(sk, &hci_sk_list.head) {
sk                203 net/bluetooth/hci_sock.c 		if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
sk                207 net/bluetooth/hci_sock.c 		if (skb->sk == sk)
sk                210 net/bluetooth/hci_sock.c 		if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
sk                216 net/bluetooth/hci_sock.c 			if (is_filtered_packet(sk, skb))
sk                218 net/bluetooth/hci_sock.c 		} else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
sk                244 net/bluetooth/hci_sock.c 		if (sock_queue_rcv_skb(sk, nskb))
sk                257 net/bluetooth/hci_sock.c 	struct sock *sk;
sk                261 net/bluetooth/hci_sock.c 	sk_for_each(sk, &hci_sk_list.head) {
sk                265 net/bluetooth/hci_sock.c 		if (!hci_sock_test_flag(sk, flag))
sk                269 net/bluetooth/hci_sock.c 		if (sk == skip_sk)
sk                272 net/bluetooth/hci_sock.c 		if (sk->sk_state != BT_BOUND)
sk                275 net/bluetooth/hci_sock.c 		if (hci_pi(sk)->channel != channel)
sk                282 net/bluetooth/hci_sock.c 		if (sock_queue_rcv_skb(sk, nskb))
sk                354 net/bluetooth/hci_sock.c 	struct sock *sk;
sk                364 net/bluetooth/hci_sock.c 	sk_for_each(sk, &hci_sk_list.head) {
sk                368 net/bluetooth/hci_sock.c 		if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
sk                372 net/bluetooth/hci_sock.c 		if (!hci_sock_test_flag(sk, flag))
sk                376 net/bluetooth/hci_sock.c 		if (sk == skip_sk)
sk                383 net/bluetooth/hci_sock.c 		put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
sk                483 net/bluetooth/hci_sock.c static struct sk_buff *create_monitor_ctrl_open(struct sock *sk)
sk                492 net/bluetooth/hci_sock.c 	if (!hci_pi(sk)->cookie)
sk                495 net/bluetooth/hci_sock.c 	switch (hci_pi(sk)->channel) {
sk                519 net/bluetooth/hci_sock.c 	flags = hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) ? 0x1 : 0x0;
sk                521 net/bluetooth/hci_sock.c 	put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
sk                526 net/bluetooth/hci_sock.c 	skb_put_data(skb, hci_pi(sk)->comm, TASK_COMM_LEN);
sk                532 net/bluetooth/hci_sock.c 	if (hci_pi(sk)->hdev)
sk                533 net/bluetooth/hci_sock.c 		hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
sk                541 net/bluetooth/hci_sock.c static struct sk_buff *create_monitor_ctrl_close(struct sock *sk)
sk                547 net/bluetooth/hci_sock.c 	if (!hci_pi(sk)->cookie)
sk                550 net/bluetooth/hci_sock.c 	switch (hci_pi(sk)->channel) {
sk                564 net/bluetooth/hci_sock.c 	put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
sk                570 net/bluetooth/hci_sock.c 	if (hci_pi(sk)->hdev)
sk                571 net/bluetooth/hci_sock.c 		hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
sk                579 net/bluetooth/hci_sock.c static struct sk_buff *create_monitor_ctrl_command(struct sock *sk, u16 index,
sk                590 net/bluetooth/hci_sock.c 	put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
sk                607 net/bluetooth/hci_sock.c send_monitor_note(struct sock *sk, const char *fmt, ...)
sk                634 net/bluetooth/hci_sock.c 	if (sock_queue_rcv_skb(sk, skb))
sk                638 net/bluetooth/hci_sock.c static void send_monitor_replay(struct sock *sk)
sk                651 net/bluetooth/hci_sock.c 		if (sock_queue_rcv_skb(sk, skb))
sk                661 net/bluetooth/hci_sock.c 		if (sock_queue_rcv_skb(sk, skb))
sk                672 net/bluetooth/hci_sock.c 			if (sock_queue_rcv_skb(sk, skb))
sk                682 net/bluetooth/hci_sock.c 	struct sock *sk;
sk                686 net/bluetooth/hci_sock.c 	sk_for_each(sk, &hci_sk_list.head) {
sk                689 net/bluetooth/hci_sock.c 		skb = create_monitor_ctrl_open(sk);
sk                753 net/bluetooth/hci_sock.c 		struct sock *sk;
sk                757 net/bluetooth/hci_sock.c 		sk_for_each(sk, &hci_sk_list.head) {
sk                758 net/bluetooth/hci_sock.c 			bh_lock_sock_nested(sk);
sk                759 net/bluetooth/hci_sock.c 			if (hci_pi(sk)->hdev == hdev) {
sk                760 net/bluetooth/hci_sock.c 				hci_pi(sk)->hdev = NULL;
sk                761 net/bluetooth/hci_sock.c 				sk->sk_err = EPIPE;
sk                762 net/bluetooth/hci_sock.c 				sk->sk_state = BT_OPEN;
sk                763 net/bluetooth/hci_sock.c 				sk->sk_state_change(sk);
sk                767 net/bluetooth/hci_sock.c 			bh_unlock_sock(sk);
sk                825 net/bluetooth/hci_sock.c 	struct sock *sk = sock->sk;
sk                829 net/bluetooth/hci_sock.c 	BT_DBG("sock %p sk %p", sock, sk);
sk                831 net/bluetooth/hci_sock.c 	if (!sk)
sk                834 net/bluetooth/hci_sock.c 	lock_sock(sk);
sk                836 net/bluetooth/hci_sock.c 	switch (hci_pi(sk)->channel) {
sk                844 net/bluetooth/hci_sock.c 		skb = create_monitor_ctrl_close(sk);
sk                851 net/bluetooth/hci_sock.c 		hci_sock_free_cookie(sk);
sk                855 net/bluetooth/hci_sock.c 	bt_sock_unlink(&hci_sk_list, sk);
sk                857 net/bluetooth/hci_sock.c 	hdev = hci_pi(sk)->hdev;
sk                859 net/bluetooth/hci_sock.c 		if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
sk                878 net/bluetooth/hci_sock.c 	sock_orphan(sk);
sk                880 net/bluetooth/hci_sock.c 	skb_queue_purge(&sk->sk_receive_queue);
sk                881 net/bluetooth/hci_sock.c 	skb_queue_purge(&sk->sk_write_queue);
sk                883 net/bluetooth/hci_sock.c 	release_sock(sk);
sk                884 net/bluetooth/hci_sock.c 	sock_put(sk);
sk                923 net/bluetooth/hci_sock.c static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
sk                926 net/bluetooth/hci_sock.c 	struct hci_dev *hdev = hci_pi(sk)->hdev;
sk                970 net/bluetooth/hci_sock.c 	struct sock *sk = sock->sk;
sk                975 net/bluetooth/hci_sock.c 	lock_sock(sk);
sk                977 net/bluetooth/hci_sock.c 	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
sk                988 net/bluetooth/hci_sock.c 	if (hci_sock_gen_cookie(sk)) {
sk                992 net/bluetooth/hci_sock.c 			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
sk                995 net/bluetooth/hci_sock.c 		skb = create_monitor_ctrl_open(sk);
sk               1003 net/bluetooth/hci_sock.c 	release_sock(sk);
sk               1051 net/bluetooth/hci_sock.c 	lock_sock(sk);
sk               1053 net/bluetooth/hci_sock.c 	err = hci_sock_bound_ioctl(sk, cmd, arg);
sk               1056 net/bluetooth/hci_sock.c 	release_sock(sk);
sk               1064 net/bluetooth/hci_sock.c 	struct sock *sk = sock->sk;
sk               1069 net/bluetooth/hci_sock.c 	BT_DBG("sock %p sk %p", sock, sk);
sk               1081 net/bluetooth/hci_sock.c 	lock_sock(sk);
sk               1083 net/bluetooth/hci_sock.c 	if (sk->sk_state == BT_BOUND) {
sk               1090 net/bluetooth/hci_sock.c 		if (hci_pi(sk)->hdev) {
sk               1105 net/bluetooth/hci_sock.c 		hci_pi(sk)->channel = haddr.hci_channel;
sk               1107 net/bluetooth/hci_sock.c 		if (!hci_sock_gen_cookie(sk)) {
sk               1114 net/bluetooth/hci_sock.c 			skb = create_monitor_ctrl_close(sk);
sk               1123 net/bluetooth/hci_sock.c 			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
sk               1125 net/bluetooth/hci_sock.c 		hci_pi(sk)->hdev = hdev;
sk               1128 net/bluetooth/hci_sock.c 		skb = create_monitor_ctrl_open(sk);
sk               1137 net/bluetooth/hci_sock.c 		if (hci_pi(sk)->hdev) {
sk               1195 net/bluetooth/hci_sock.c 		hci_pi(sk)->channel = haddr.hci_channel;
sk               1197 net/bluetooth/hci_sock.c 		if (!hci_sock_gen_cookie(sk)) {
sk               1203 net/bluetooth/hci_sock.c 			skb = create_monitor_ctrl_close(sk);
sk               1214 net/bluetooth/hci_sock.c 		hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
sk               1216 net/bluetooth/hci_sock.c 		hci_pi(sk)->hdev = hdev;
sk               1219 net/bluetooth/hci_sock.c 		skb = create_monitor_ctrl_open(sk);
sk               1240 net/bluetooth/hci_sock.c 		hci_pi(sk)->channel = haddr.hci_channel;
sk               1245 net/bluetooth/hci_sock.c 		hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
sk               1247 net/bluetooth/hci_sock.c 		send_monitor_note(sk, "Linux version %s (%s)",
sk               1250 net/bluetooth/hci_sock.c 		send_monitor_note(sk, "Bluetooth subsystem version %u.%u",
sk               1252 net/bluetooth/hci_sock.c 		send_monitor_replay(sk);
sk               1253 net/bluetooth/hci_sock.c 		send_monitor_control_replay(sk);
sk               1269 net/bluetooth/hci_sock.c 		hci_pi(sk)->channel = haddr.hci_channel;
sk               1289 net/bluetooth/hci_sock.c 			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
sk               1291 net/bluetooth/hci_sock.c 		hci_pi(sk)->channel = haddr.hci_channel;
sk               1303 net/bluetooth/hci_sock.c 		if (hci_pi(sk)->channel == HCI_CHANNEL_CONTROL) {
sk               1304 net/bluetooth/hci_sock.c 			if (!hci_sock_gen_cookie(sk)) {
sk               1311 net/bluetooth/hci_sock.c 				skb = create_monitor_ctrl_close(sk);
sk               1320 net/bluetooth/hci_sock.c 			skb = create_monitor_ctrl_open(sk);
sk               1327 net/bluetooth/hci_sock.c 			hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
sk               1328 net/bluetooth/hci_sock.c 			hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
sk               1329 net/bluetooth/hci_sock.c 			hci_sock_set_flag(sk, HCI_MGMT_OPTION_EVENTS);
sk               1330 net/bluetooth/hci_sock.c 			hci_sock_set_flag(sk, HCI_MGMT_SETTING_EVENTS);
sk               1331 net/bluetooth/hci_sock.c 			hci_sock_set_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
sk               1332 net/bluetooth/hci_sock.c 			hci_sock_set_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
sk               1337 net/bluetooth/hci_sock.c 	sk->sk_state = BT_BOUND;
sk               1340 net/bluetooth/hci_sock.c 	release_sock(sk);
sk               1348 net/bluetooth/hci_sock.c 	struct sock *sk = sock->sk;
sk               1352 net/bluetooth/hci_sock.c 	BT_DBG("sock %p sk %p", sock, sk);
sk               1357 net/bluetooth/hci_sock.c 	lock_sock(sk);
sk               1359 net/bluetooth/hci_sock.c 	hdev = hci_pi(sk)->hdev;
sk               1367 net/bluetooth/hci_sock.c 	haddr->hci_channel= hci_pi(sk)->channel;
sk               1371 net/bluetooth/hci_sock.c 	release_sock(sk);
sk               1375 net/bluetooth/hci_sock.c static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
sk               1378 net/bluetooth/hci_sock.c 	__u32 mask = hci_pi(sk)->cmsg_mask;
sk               1416 net/bluetooth/hci_sock.c 	struct sock *sk = sock->sk;
sk               1421 net/bluetooth/hci_sock.c 	BT_DBG("sock %p, sk %p", sock, sk);
sk               1426 net/bluetooth/hci_sock.c 	if (hci_pi(sk)->channel == HCI_CHANNEL_LOGGING)
sk               1429 net/bluetooth/hci_sock.c 	if (sk->sk_state == BT_CLOSED)
sk               1432 net/bluetooth/hci_sock.c 	skb = skb_recv_datagram(sk, flags, noblock, &err);
sk               1446 net/bluetooth/hci_sock.c 	switch (hci_pi(sk)->channel) {
sk               1448 net/bluetooth/hci_sock.c 		hci_sock_cmsg(sk, msg, skb);
sk               1452 net/bluetooth/hci_sock.c 		sock_recv_timestamp(msg, sk, skb);
sk               1455 net/bluetooth/hci_sock.c 		if (hci_mgmt_chan_find(hci_pi(sk)->channel))
sk               1456 net/bluetooth/hci_sock.c 			sock_recv_timestamp(msg, sk, skb);
sk               1460 net/bluetooth/hci_sock.c 	skb_free_datagram(sk, skb);
sk               1468 net/bluetooth/hci_sock.c static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
sk               1508 net/bluetooth/hci_sock.c 		skb = create_monitor_ctrl_command(sk, index, opcode, len,
sk               1520 net/bluetooth/hci_sock.c 		err = mgmt_cmd_status(sk, index, opcode,
sk               1527 net/bluetooth/hci_sock.c 	if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
sk               1529 net/bluetooth/hci_sock.c 		err = mgmt_cmd_status(sk, index, opcode,
sk               1537 net/bluetooth/hci_sock.c 			err = mgmt_cmd_status(sk, index, opcode,
sk               1545 net/bluetooth/hci_sock.c 			err = mgmt_cmd_status(sk, index, opcode,
sk               1552 net/bluetooth/hci_sock.c 			err = mgmt_cmd_status(sk, index, opcode,
sk               1560 net/bluetooth/hci_sock.c 		err = mgmt_cmd_status(sk, index, opcode,
sk               1568 net/bluetooth/hci_sock.c 		err = mgmt_cmd_status(sk, index, opcode,
sk               1574 net/bluetooth/hci_sock.c 		chan->hdev_init(sk, hdev);
sk               1578 net/bluetooth/hci_sock.c 	err = handler->func(sk, hdev, cp, len);
sk               1592 net/bluetooth/hci_sock.c static int hci_logging_frame(struct sock *sk, struct msghdr *msg, int len)
sk               1607 net/bluetooth/hci_sock.c 	skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
sk               1679 net/bluetooth/hci_sock.c 	struct sock *sk = sock->sk;
sk               1685 net/bluetooth/hci_sock.c 	BT_DBG("sock %p sk %p", sock, sk);
sk               1697 net/bluetooth/hci_sock.c 	lock_sock(sk);
sk               1699 net/bluetooth/hci_sock.c 	switch (hci_pi(sk)->channel) {
sk               1707 net/bluetooth/hci_sock.c 		err = hci_logging_frame(sk, msg, len);
sk               1711 net/bluetooth/hci_sock.c 		chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
sk               1713 net/bluetooth/hci_sock.c 			err = hci_mgmt_cmd(chan, sk, msg, len);
sk               1721 net/bluetooth/hci_sock.c 	hdev = hci_pi(sk)->hdev;
sk               1732 net/bluetooth/hci_sock.c 	skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
sk               1744 net/bluetooth/hci_sock.c 	if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
sk               1808 net/bluetooth/hci_sock.c 	release_sock(sk);
sk               1820 net/bluetooth/hci_sock.c 	struct sock *sk = sock->sk;
sk               1823 net/bluetooth/hci_sock.c 	BT_DBG("sk %p, opt %d", sk, optname);
sk               1828 net/bluetooth/hci_sock.c 	lock_sock(sk);
sk               1830 net/bluetooth/hci_sock.c 	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
sk               1843 net/bluetooth/hci_sock.c 			hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
sk               1845 net/bluetooth/hci_sock.c 			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
sk               1855 net/bluetooth/hci_sock.c 			hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
sk               1857 net/bluetooth/hci_sock.c 			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
sk               1862 net/bluetooth/hci_sock.c 			struct hci_filter *f = &hci_pi(sk)->filter;
sk               1883 net/bluetooth/hci_sock.c 			struct hci_filter *f = &hci_pi(sk)->filter;
sk               1898 net/bluetooth/hci_sock.c 	release_sock(sk);
sk               1906 net/bluetooth/hci_sock.c 	struct sock *sk = sock->sk;
sk               1909 net/bluetooth/hci_sock.c 	BT_DBG("sk %p, opt %d", sk, optname);
sk               1917 net/bluetooth/hci_sock.c 	lock_sock(sk);
sk               1919 net/bluetooth/hci_sock.c 	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
sk               1926 net/bluetooth/hci_sock.c 		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
sk               1936 net/bluetooth/hci_sock.c 		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
sk               1947 net/bluetooth/hci_sock.c 			struct hci_filter *f = &hci_pi(sk)->filter;
sk               1967 net/bluetooth/hci_sock.c 	release_sock(sk);
sk               2000 net/bluetooth/hci_sock.c 	struct sock *sk;
sk               2009 net/bluetooth/hci_sock.c 	sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
sk               2010 net/bluetooth/hci_sock.c 	if (!sk)
sk               2013 net/bluetooth/hci_sock.c 	sock_init_data(sock, sk);
sk               2015 net/bluetooth/hci_sock.c 	sock_reset_flag(sk, SOCK_ZAPPED);
sk               2017 net/bluetooth/hci_sock.c 	sk->sk_protocol = protocol;
sk               2020 net/bluetooth/hci_sock.c 	sk->sk_state = BT_OPEN;
sk               2022 net/bluetooth/hci_sock.c 	bt_sock_link(&hci_sk_list, sk);
sk                103 net/bluetooth/hidp/core.c 	struct sock *sk = sock->sk;
sk                126 net/bluetooth/hidp/core.c 	wake_up_interruptible(sk_sleep(sk));
sk                419 net/bluetooth/hidp/core.c 	session->intr_sock->sk->sk_err = EUNATCH;
sk                420 net/bluetooth/hidp/core.c 	session->ctrl_sock->sk->sk_err = EUNATCH;
sk                421 net/bluetooth/hidp/core.c 	wake_up_interruptible(sk_sleep(session->intr_sock->sk));
sk                422 net/bluetooth/hidp/core.c 	wake_up_interruptible(sk_sleep(session->ctrl_sock->sk));
sk                786 net/bluetooth/hidp/core.c 		 &l2cap_pi(session->ctrl_sock->sk)->chan->src);
sk                792 net/bluetooth/hidp/core.c 		 &l2cap_pi(session->ctrl_sock->sk)->chan->dst);
sk                921 net/bluetooth/hidp/core.c 	ctrl = bt_sk(ctrl_sock->sk);
sk                922 net/bluetooth/hidp/core.c 	intr = bt_sk(intr_sock->sk);
sk               1190 net/bluetooth/hidp/core.c 	struct sock *ctrl_sk = session->ctrl_sock->sk;
sk               1191 net/bluetooth/hidp/core.c 	struct sock *intr_sk = session->intr_sock->sk;
sk               1278 net/bluetooth/hidp/core.c 	add_wait_queue(sk_sleep(session->ctrl_sock->sk), &ctrl_wait);
sk               1279 net/bluetooth/hidp/core.c 	add_wait_queue(sk_sleep(session->intr_sock->sk), &intr_wait);
sk               1292 net/bluetooth/hidp/core.c 	remove_wait_queue(sk_sleep(session->intr_sock->sk), &intr_wait);
sk               1293 net/bluetooth/hidp/core.c 	remove_wait_queue(sk_sleep(session->intr_sock->sk), &ctrl_wait);
sk               1322 net/bluetooth/hidp/core.c 	ctrl_chan = l2cap_pi(ctrl_sock->sk)->chan;
sk               1323 net/bluetooth/hidp/core.c 	intr_chan = l2cap_pi(intr_sock->sk)->chan;
sk               1329 net/bluetooth/hidp/core.c 	ctrl = bt_sk(ctrl_sock->sk);
sk               1330 net/bluetooth/hidp/core.c 	intr = bt_sk(intr_sock->sk);
sk               1332 net/bluetooth/hidp/core.c 	if (ctrl->sk.sk_state != BT_CONNECTED ||
sk               1333 net/bluetooth/hidp/core.c 	    intr->sk.sk_state != BT_CONNECTED)
sk               1364 net/bluetooth/hidp/core.c 	chan = l2cap_pi(ctrl_sock->sk)->chan;
sk                 34 net/bluetooth/hidp/sock.c 	struct sock *sk = sock->sk;
sk                 36 net/bluetooth/hidp/sock.c 	BT_DBG("sock %p sk %p", sock, sk);
sk                 38 net/bluetooth/hidp/sock.c 	if (!sk)
sk                 41 net/bluetooth/hidp/sock.c 	bt_sock_unlink(&hidp_sk_list, sk);
sk                 43 net/bluetooth/hidp/sock.c 	sock_orphan(sk);
sk                 44 net/bluetooth/hidp/sock.c 	sock_put(sk);
sk                253 net/bluetooth/hidp/sock.c 	struct sock *sk;
sk                260 net/bluetooth/hidp/sock.c 	sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hidp_proto, kern);
sk                261 net/bluetooth/hidp/sock.c 	if (!sk)
sk                264 net/bluetooth/hidp/sock.c 	sock_init_data(sock, sk);
sk                270 net/bluetooth/hidp/sock.c 	sock_reset_flag(sk, SOCK_ZAPPED);
sk                272 net/bluetooth/hidp/sock.c 	sk->sk_protocol = protocol;
sk                273 net/bluetooth/hidp/sock.c 	sk->sk_state	= BT_OPEN;
sk                275 net/bluetooth/hidp/sock.c 	bt_sock_link(&hidp_sk_list, sk);
sk                 45 net/bluetooth/l2cap_sock.c static void l2cap_sock_init(struct sock *sk, struct sock *parent);
sk                 83 net/bluetooth/l2cap_sock.c 	struct sock *sk = sock->sk;
sk                 84 net/bluetooth/l2cap_sock.c 	struct l2cap_chan *chan = l2cap_pi(sk)->chan;
sk                 88 net/bluetooth/l2cap_sock.c 	BT_DBG("sk %p", sk);
sk                111 net/bluetooth/l2cap_sock.c 	lock_sock(sk);
sk                113 net/bluetooth/l2cap_sock.c 	if (sk->sk_state != BT_OPEN) {
sk                168 net/bluetooth/l2cap_sock.c 	sk->sk_state = BT_BOUND;
sk                171 net/bluetooth/l2cap_sock.c 	release_sock(sk);
sk                178 net/bluetooth/l2cap_sock.c 	struct sock *sk = sock->sk;
sk                179 net/bluetooth/l2cap_sock.c 	struct l2cap_chan *chan = l2cap_pi(sk)->chan;
sk                183 net/bluetooth/l2cap_sock.c 	BT_DBG("sk %p", sk);
sk                243 net/bluetooth/l2cap_sock.c 	lock_sock(sk);
sk                245 net/bluetooth/l2cap_sock.c 	err = bt_sock_wait_state(sk, BT_CONNECTED,
sk                246 net/bluetooth/l2cap_sock.c 				 sock_sndtimeo(sk, flags & O_NONBLOCK));
sk                248 net/bluetooth/l2cap_sock.c 	release_sock(sk);
sk                255 net/bluetooth/l2cap_sock.c 	struct sock *sk = sock->sk;
sk                256 net/bluetooth/l2cap_sock.c 	struct l2cap_chan *chan = l2cap_pi(sk)->chan;
sk                259 net/bluetooth/l2cap_sock.c 	BT_DBG("sk %p backlog %d", sk, backlog);
sk                261 net/bluetooth/l2cap_sock.c 	lock_sock(sk);
sk                263 net/bluetooth/l2cap_sock.c 	if (sk->sk_state != BT_BOUND) {
sk                268 net/bluetooth/l2cap_sock.c 	if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM) {
sk                287 net/bluetooth/l2cap_sock.c 	sk->sk_max_ack_backlog = backlog;
sk                288 net/bluetooth/l2cap_sock.c 	sk->sk_ack_backlog = 0;
sk                297 net/bluetooth/l2cap_sock.c 	sk->sk_state = BT_LISTEN;
sk                300 net/bluetooth/l2cap_sock.c 	release_sock(sk);
sk                308 net/bluetooth/l2cap_sock.c 	struct sock *sk = sock->sk, *nsk;
sk                312 net/bluetooth/l2cap_sock.c 	lock_sock_nested(sk, L2CAP_NESTING_PARENT);
sk                314 net/bluetooth/l2cap_sock.c 	timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
sk                316 net/bluetooth/l2cap_sock.c 	BT_DBG("sk %p timeo %ld", sk, timeo);
sk                319 net/bluetooth/l2cap_sock.c 	add_wait_queue_exclusive(sk_sleep(sk), &wait);
sk                321 net/bluetooth/l2cap_sock.c 		if (sk->sk_state != BT_LISTEN) {
sk                326 net/bluetooth/l2cap_sock.c 		nsk = bt_accept_dequeue(sk, newsock);
sk                340 net/bluetooth/l2cap_sock.c 		release_sock(sk);
sk                344 net/bluetooth/l2cap_sock.c 		lock_sock_nested(sk, L2CAP_NESTING_PARENT);
sk                346 net/bluetooth/l2cap_sock.c 	remove_wait_queue(sk_sleep(sk), &wait);
sk                356 net/bluetooth/l2cap_sock.c 	release_sock(sk);
sk                364 net/bluetooth/l2cap_sock.c 	struct sock *sk = sock->sk;
sk                365 net/bluetooth/l2cap_sock.c 	struct l2cap_chan *chan = l2cap_pi(sk)->chan;
sk                367 net/bluetooth/l2cap_sock.c 	BT_DBG("sock %p, sk %p", sock, sk);
sk                369 net/bluetooth/l2cap_sock.c 	if (peer && sk->sk_state != BT_CONNECTED &&
sk                370 net/bluetooth/l2cap_sock.c 	    sk->sk_state != BT_CONNECT && sk->sk_state != BT_CONNECT2 &&
sk                371 net/bluetooth/l2cap_sock.c 	    sk->sk_state != BT_CONFIG)
sk                395 net/bluetooth/l2cap_sock.c 	struct sock *sk = sock->sk;
sk                396 net/bluetooth/l2cap_sock.c 	struct l2cap_chan *chan = l2cap_pi(sk)->chan;
sk                402 net/bluetooth/l2cap_sock.c 	BT_DBG("sk %p", sk);
sk                407 net/bluetooth/l2cap_sock.c 	lock_sock(sk);
sk                469 net/bluetooth/l2cap_sock.c 		if (sk->sk_state != BT_CONNECTED &&
sk                470 net/bluetooth/l2cap_sock.c 		    !(sk->sk_state == BT_CONNECT2 &&
sk                471 net/bluetooth/l2cap_sock.c 		      test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))) {
sk                491 net/bluetooth/l2cap_sock.c 	release_sock(sk);
sk                498 net/bluetooth/l2cap_sock.c 	struct sock *sk = sock->sk;
sk                499 net/bluetooth/l2cap_sock.c 	struct l2cap_chan *chan = l2cap_pi(sk)->chan;
sk                504 net/bluetooth/l2cap_sock.c 	BT_DBG("sk %p", sk);
sk                515 net/bluetooth/l2cap_sock.c 	lock_sock(sk);
sk                530 net/bluetooth/l2cap_sock.c 			if (sk->sk_state == BT_CONNECTED)
sk                543 net/bluetooth/l2cap_sock.c 		if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
sk                548 net/bluetooth/l2cap_sock.c 		if (put_user(test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags),
sk                562 net/bluetooth/l2cap_sock.c 		if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
sk                563 net/bluetooth/l2cap_sock.c 		    && sk->sk_type != SOCK_RAW) {
sk                587 net/bluetooth/l2cap_sock.c 		if (sk->sk_state != BT_CONNECTED) {
sk                611 net/bluetooth/l2cap_sock.c 	release_sock(sk);
sk                634 net/bluetooth/l2cap_sock.c 	struct sock *sk = sock->sk;
sk                635 net/bluetooth/l2cap_sock.c 	struct l2cap_chan *chan = l2cap_pi(sk)->chan;
sk                640 net/bluetooth/l2cap_sock.c 	BT_DBG("sk %p", sk);
sk                642 net/bluetooth/l2cap_sock.c 	lock_sock(sk);
sk                651 net/bluetooth/l2cap_sock.c 		if (sk->sk_state == BT_CONNECTED) {
sk                739 net/bluetooth/l2cap_sock.c 	release_sock(sk);
sk                746 net/bluetooth/l2cap_sock.c 	struct sock *sk = sock->sk;
sk                747 net/bluetooth/l2cap_sock.c 	struct l2cap_chan *chan = l2cap_pi(sk)->chan;
sk                754 net/bluetooth/l2cap_sock.c 	BT_DBG("sk %p", sk);
sk                762 net/bluetooth/l2cap_sock.c 	lock_sock(sk);
sk                802 net/bluetooth/l2cap_sock.c 			sk->sk_state = BT_CONFIG;
sk                806 net/bluetooth/l2cap_sock.c 		} else if ((sk->sk_state == BT_CONNECT2 &&
sk                807 net/bluetooth/l2cap_sock.c 			    test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) ||
sk                808 net/bluetooth/l2cap_sock.c 			   sk->sk_state == BT_CONNECTED) {
sk                810 net/bluetooth/l2cap_sock.c 				set_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
sk                812 net/bluetooth/l2cap_sock.c 				sk->sk_state_change(sk);
sk                819 net/bluetooth/l2cap_sock.c 		if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
sk                830 net/bluetooth/l2cap_sock.c 			set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
sk                833 net/bluetooth/l2cap_sock.c 			clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
sk                905 net/bluetooth/l2cap_sock.c 		if (sk->sk_state == BT_CONNECTED &&
sk                929 net/bluetooth/l2cap_sock.c 		if (sk->sk_state == BT_CONNECTED) {
sk                947 net/bluetooth/l2cap_sock.c 	release_sock(sk);
sk                954 net/bluetooth/l2cap_sock.c 	struct sock *sk = sock->sk;
sk                955 net/bluetooth/l2cap_sock.c 	struct l2cap_chan *chan = l2cap_pi(sk)->chan;
sk                958 net/bluetooth/l2cap_sock.c 	BT_DBG("sock %p, sk %p", sock, sk);
sk                960 net/bluetooth/l2cap_sock.c 	err = sock_error(sk);
sk                967 net/bluetooth/l2cap_sock.c 	if (sk->sk_state != BT_CONNECTED)
sk                970 net/bluetooth/l2cap_sock.c 	lock_sock(sk);
sk                971 net/bluetooth/l2cap_sock.c 	err = bt_sock_wait_ready(sk, msg->msg_flags);
sk                972 net/bluetooth/l2cap_sock.c 	release_sock(sk);
sk                986 net/bluetooth/l2cap_sock.c 	struct sock *sk = sock->sk;
sk                987 net/bluetooth/l2cap_sock.c 	struct l2cap_pinfo *pi = l2cap_pi(sk);
sk                990 net/bluetooth/l2cap_sock.c 	lock_sock(sk);
sk                992 net/bluetooth/l2cap_sock.c 	if (sk->sk_state == BT_CONNECT2 && test_bit(BT_SK_DEFER_SETUP,
sk                993 net/bluetooth/l2cap_sock.c 						    &bt_sk(sk)->flags)) {
sk                995 net/bluetooth/l2cap_sock.c 			sk->sk_state = BT_CONNECTED;
sk                999 net/bluetooth/l2cap_sock.c 			sk->sk_state = BT_CONFIG;
sk               1008 net/bluetooth/l2cap_sock.c 	release_sock(sk);
sk               1020 net/bluetooth/l2cap_sock.c 	lock_sock(sk);
sk               1026 net/bluetooth/l2cap_sock.c 		if (!__sock_queue_rcv_skb(sk, pi->rx_busy_skb))
sk               1036 net/bluetooth/l2cap_sock.c 	if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf >> 1)
sk               1040 net/bluetooth/l2cap_sock.c 	release_sock(sk);
sk               1047 net/bluetooth/l2cap_sock.c static void l2cap_sock_kill(struct sock *sk)
sk               1049 net/bluetooth/l2cap_sock.c 	if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
sk               1052 net/bluetooth/l2cap_sock.c 	BT_DBG("sk %p state %s", sk, state_to_string(sk->sk_state));
sk               1056 net/bluetooth/l2cap_sock.c 	l2cap_chan_put(l2cap_pi(sk)->chan);
sk               1057 net/bluetooth/l2cap_sock.c 	sock_set_flag(sk, SOCK_DEAD);
sk               1058 net/bluetooth/l2cap_sock.c 	sock_put(sk);
sk               1061 net/bluetooth/l2cap_sock.c static int __l2cap_wait_ack(struct sock *sk, struct l2cap_chan *chan)
sk               1069 net/bluetooth/l2cap_sock.c 	add_wait_queue(sk_sleep(sk), &wait);
sk               1084 net/bluetooth/l2cap_sock.c 		release_sock(sk);
sk               1086 net/bluetooth/l2cap_sock.c 		lock_sock(sk);
sk               1089 net/bluetooth/l2cap_sock.c 		err = sock_error(sk);
sk               1102 net/bluetooth/l2cap_sock.c 	remove_wait_queue(sk_sleep(sk), &wait);
sk               1108 net/bluetooth/l2cap_sock.c 	struct sock *sk = sock->sk;
sk               1113 net/bluetooth/l2cap_sock.c 	BT_DBG("sock %p, sk %p", sock, sk);
sk               1115 net/bluetooth/l2cap_sock.c 	if (!sk)
sk               1118 net/bluetooth/l2cap_sock.c 	lock_sock(sk);
sk               1120 net/bluetooth/l2cap_sock.c 	if (sk->sk_shutdown)
sk               1126 net/bluetooth/l2cap_sock.c 	sock_hold(sk);
sk               1128 net/bluetooth/l2cap_sock.c 	chan = l2cap_pi(sk)->chan;
sk               1137 net/bluetooth/l2cap_sock.c 		err = __l2cap_wait_ack(sk, chan);
sk               1143 net/bluetooth/l2cap_sock.c 		if (sk->sk_shutdown)
sk               1147 net/bluetooth/l2cap_sock.c 	sk->sk_shutdown = SHUTDOWN_MASK;
sk               1148 net/bluetooth/l2cap_sock.c 	release_sock(sk);
sk               1170 net/bluetooth/l2cap_sock.c 	lock_sock(sk);
sk               1172 net/bluetooth/l2cap_sock.c 	if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime &&
sk               1174 net/bluetooth/l2cap_sock.c 		err = bt_sock_wait_state(sk, BT_CLOSED,
sk               1175 net/bluetooth/l2cap_sock.c 					 sk->sk_lingertime);
sk               1179 net/bluetooth/l2cap_sock.c 	sock_put(sk);
sk               1182 net/bluetooth/l2cap_sock.c 	if (!err && sk->sk_err)
sk               1183 net/bluetooth/l2cap_sock.c 		err = -sk->sk_err;
sk               1185 net/bluetooth/l2cap_sock.c 	release_sock(sk);
sk               1194 net/bluetooth/l2cap_sock.c 	struct sock *sk = sock->sk;
sk               1197 net/bluetooth/l2cap_sock.c 	BT_DBG("sock %p, sk %p", sock, sk);
sk               1199 net/bluetooth/l2cap_sock.c 	if (!sk)
sk               1202 net/bluetooth/l2cap_sock.c 	bt_sock_unlink(&l2cap_sk_list, sk);
sk               1206 net/bluetooth/l2cap_sock.c 	sock_orphan(sk);
sk               1207 net/bluetooth/l2cap_sock.c 	l2cap_sock_kill(sk);
sk               1213 net/bluetooth/l2cap_sock.c 	struct sock *sk;
sk               1219 net/bluetooth/l2cap_sock.c 	while ((sk = bt_accept_dequeue(parent, NULL))) {
sk               1220 net/bluetooth/l2cap_sock.c 		struct l2cap_chan *chan = l2cap_pi(sk)->chan;
sk               1230 net/bluetooth/l2cap_sock.c 		l2cap_sock_kill(sk);
sk               1236 net/bluetooth/l2cap_sock.c 	struct sock *sk, *parent = chan->data;
sk               1247 net/bluetooth/l2cap_sock.c 	sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP,
sk               1249 net/bluetooth/l2cap_sock.c 	if (!sk) {
sk               1254 net/bluetooth/l2cap_sock.c 	bt_sock_reclassify_lock(sk, BTPROTO_L2CAP);
sk               1256 net/bluetooth/l2cap_sock.c 	l2cap_sock_init(sk, parent);
sk               1258 net/bluetooth/l2cap_sock.c 	bt_accept_enqueue(parent, sk, false);
sk               1262 net/bluetooth/l2cap_sock.c 	return l2cap_pi(sk)->chan;
sk               1267 net/bluetooth/l2cap_sock.c 	struct sock *sk = chan->data;
sk               1270 net/bluetooth/l2cap_sock.c 	lock_sock(sk);
sk               1272 net/bluetooth/l2cap_sock.c 	if (l2cap_pi(sk)->rx_busy_skb) {
sk               1282 net/bluetooth/l2cap_sock.c 		err = sk_filter(sk, skb);
sk               1287 net/bluetooth/l2cap_sock.c 	err = __sock_queue_rcv_skb(sk, skb);
sk               1299 net/bluetooth/l2cap_sock.c 		l2cap_pi(sk)->rx_busy_skb = skb;
sk               1305 net/bluetooth/l2cap_sock.c 	release_sock(sk);
sk               1312 net/bluetooth/l2cap_sock.c 	struct sock *sk = chan->data;
sk               1314 net/bluetooth/l2cap_sock.c 	l2cap_sock_kill(sk);
sk               1319 net/bluetooth/l2cap_sock.c 	struct sock *sk = chan->data;
sk               1331 net/bluetooth/l2cap_sock.c 	lock_sock_nested(sk, atomic_read(&chan->nesting));
sk               1333 net/bluetooth/l2cap_sock.c 	parent = bt_sk(sk)->parent;
sk               1335 net/bluetooth/l2cap_sock.c 	sock_set_flag(sk, SOCK_ZAPPED);
sk               1343 net/bluetooth/l2cap_sock.c 		l2cap_sock_cleanup_listen(sk);
sk               1344 net/bluetooth/l2cap_sock.c 		sk->sk_state = BT_CLOSED;
sk               1349 net/bluetooth/l2cap_sock.c 		sk->sk_state = BT_CLOSED;
sk               1352 net/bluetooth/l2cap_sock.c 		sk->sk_err = err;
sk               1355 net/bluetooth/l2cap_sock.c 			bt_accept_unlink(sk);
sk               1358 net/bluetooth/l2cap_sock.c 			sk->sk_state_change(sk);
sk               1364 net/bluetooth/l2cap_sock.c 	release_sock(sk);
sk               1370 net/bluetooth/l2cap_sock.c 	struct sock *sk = chan->data;
sk               1372 net/bluetooth/l2cap_sock.c 	sk->sk_state = state;
sk               1375 net/bluetooth/l2cap_sock.c 		sk->sk_err = err;
sk               1382 net/bluetooth/l2cap_sock.c 	struct sock *sk = chan->data;
sk               1387 net/bluetooth/l2cap_sock.c 	skb = bt_skb_send_alloc(sk, hdr_len + len, nb, &err);
sk               1393 net/bluetooth/l2cap_sock.c 	skb->priority = sk->sk_priority;
sk               1402 net/bluetooth/l2cap_sock.c 	struct sock *sk = chan->data;
sk               1405 net/bluetooth/l2cap_sock.c 	lock_sock(sk);
sk               1407 net/bluetooth/l2cap_sock.c 	parent = bt_sk(sk)->parent;
sk               1409 net/bluetooth/l2cap_sock.c 	BT_DBG("sk %p, parent %p", sk, parent);
sk               1411 net/bluetooth/l2cap_sock.c 	sk->sk_state = BT_CONNECTED;
sk               1412 net/bluetooth/l2cap_sock.c 	sk->sk_state_change(sk);
sk               1417 net/bluetooth/l2cap_sock.c 	release_sock(sk);
sk               1422 net/bluetooth/l2cap_sock.c 	struct sock *parent, *sk = chan->data;
sk               1424 net/bluetooth/l2cap_sock.c 	lock_sock(sk);
sk               1426 net/bluetooth/l2cap_sock.c 	parent = bt_sk(sk)->parent;
sk               1430 net/bluetooth/l2cap_sock.c 	release_sock(sk);
sk               1435 net/bluetooth/l2cap_sock.c 	struct sock *sk = chan->data;
sk               1438 net/bluetooth/l2cap_sock.c 		sk->sk_state = BT_CONNECTED;
sk               1442 net/bluetooth/l2cap_sock.c 	clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
sk               1443 net/bluetooth/l2cap_sock.c 	sk->sk_state_change(sk);
sk               1448 net/bluetooth/l2cap_sock.c 	struct sock *sk = chan->data;
sk               1450 net/bluetooth/l2cap_sock.c 	lock_sock(sk);
sk               1451 net/bluetooth/l2cap_sock.c 	sk->sk_shutdown = SHUTDOWN_MASK;
sk               1452 net/bluetooth/l2cap_sock.c 	release_sock(sk);
sk               1457 net/bluetooth/l2cap_sock.c 	struct sock *sk = chan->data;
sk               1459 net/bluetooth/l2cap_sock.c 	return sk->sk_sndtimeo;
sk               1464 net/bluetooth/l2cap_sock.c 	struct sock *sk = chan->data;
sk               1466 net/bluetooth/l2cap_sock.c 	set_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
sk               1467 net/bluetooth/l2cap_sock.c 	sk->sk_state_change(sk);
sk               1486 net/bluetooth/l2cap_sock.c static void l2cap_sock_destruct(struct sock *sk)
sk               1488 net/bluetooth/l2cap_sock.c 	BT_DBG("sk %p", sk);
sk               1490 net/bluetooth/l2cap_sock.c 	if (l2cap_pi(sk)->chan)
sk               1491 net/bluetooth/l2cap_sock.c 		l2cap_chan_put(l2cap_pi(sk)->chan);
sk               1493 net/bluetooth/l2cap_sock.c 	if (l2cap_pi(sk)->rx_busy_skb) {
sk               1494 net/bluetooth/l2cap_sock.c 		kfree_skb(l2cap_pi(sk)->rx_busy_skb);
sk               1495 net/bluetooth/l2cap_sock.c 		l2cap_pi(sk)->rx_busy_skb = NULL;
sk               1498 net/bluetooth/l2cap_sock.c 	skb_queue_purge(&sk->sk_receive_queue);
sk               1499 net/bluetooth/l2cap_sock.c 	skb_queue_purge(&sk->sk_write_queue);
sk               1515 net/bluetooth/l2cap_sock.c static void l2cap_sock_init(struct sock *sk, struct sock *parent)
sk               1517 net/bluetooth/l2cap_sock.c 	struct l2cap_chan *chan = l2cap_pi(sk)->chan;
sk               1519 net/bluetooth/l2cap_sock.c 	BT_DBG("sk %p", sk);
sk               1524 net/bluetooth/l2cap_sock.c 		sk->sk_type = parent->sk_type;
sk               1525 net/bluetooth/l2cap_sock.c 		bt_sk(sk)->flags = bt_sk(parent)->flags;
sk               1546 net/bluetooth/l2cap_sock.c 		security_sk_clone(parent, sk);
sk               1548 net/bluetooth/l2cap_sock.c 		switch (sk->sk_type) {
sk               1554 net/bluetooth/l2cap_sock.c 			bt_sk(sk)->skb_msg_name = l2cap_skb_msg_name;
sk               1564 net/bluetooth/l2cap_sock.c 		if (!disable_ertm && sk->sk_type == SOCK_STREAM) {
sk               1577 net/bluetooth/l2cap_sock.c 	chan->data = sk;
sk               1590 net/bluetooth/l2cap_sock.c 	struct sock *sk;
sk               1593 net/bluetooth/l2cap_sock.c 	sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto, kern);
sk               1594 net/bluetooth/l2cap_sock.c 	if (!sk)
sk               1597 net/bluetooth/l2cap_sock.c 	sock_init_data(sock, sk);
sk               1598 net/bluetooth/l2cap_sock.c 	INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
sk               1600 net/bluetooth/l2cap_sock.c 	sk->sk_destruct = l2cap_sock_destruct;
sk               1601 net/bluetooth/l2cap_sock.c 	sk->sk_sndtimeo = L2CAP_CONN_TIMEOUT;
sk               1603 net/bluetooth/l2cap_sock.c 	sock_reset_flag(sk, SOCK_ZAPPED);
sk               1605 net/bluetooth/l2cap_sock.c 	sk->sk_protocol = proto;
sk               1606 net/bluetooth/l2cap_sock.c 	sk->sk_state = BT_OPEN;
sk               1610 net/bluetooth/l2cap_sock.c 		sk_free(sk);
sk               1616 net/bluetooth/l2cap_sock.c 	l2cap_pi(sk)->chan = chan;
sk               1618 net/bluetooth/l2cap_sock.c 	return sk;
sk               1624 net/bluetooth/l2cap_sock.c 	struct sock *sk;
sk               1639 net/bluetooth/l2cap_sock.c 	sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC, kern);
sk               1640 net/bluetooth/l2cap_sock.c 	if (!sk)
sk               1643 net/bluetooth/l2cap_sock.c 	l2cap_sock_init(sk, NULL);
sk               1644 net/bluetooth/l2cap_sock.c 	bt_sock_link(&l2cap_sk_list, sk);
sk                287 net/bluetooth/mgmt.c static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
sk                292 net/bluetooth/mgmt.c 	BT_DBG("sock %p", sk);
sk                296 net/bluetooth/mgmt.c 	return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
sk                300 net/bluetooth/mgmt.c static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
sk                308 net/bluetooth/mgmt.c 	BT_DBG("sock %p", sk);
sk                310 net/bluetooth/mgmt.c 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
sk                327 net/bluetooth/mgmt.c 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
sk                345 net/bluetooth/mgmt.c 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
sk                352 net/bluetooth/mgmt.c static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
sk                361 net/bluetooth/mgmt.c 	BT_DBG("sock %p", sk);
sk                404 net/bluetooth/mgmt.c 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
sk                412 net/bluetooth/mgmt.c static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
sk                421 net/bluetooth/mgmt.c 	BT_DBG("sock %p", sk);
sk                464 net/bluetooth/mgmt.c 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
sk                472 net/bluetooth/mgmt.c static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
sk                480 net/bluetooth/mgmt.c 	BT_DBG("sock %p", sk);
sk                533 net/bluetooth/mgmt.c 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
sk                534 net/bluetooth/mgmt.c 	hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
sk                535 net/bluetooth/mgmt.c 	hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
sk                537 net/bluetooth/mgmt.c 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
sk                584 net/bluetooth/mgmt.c static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
sk                588 net/bluetooth/mgmt.c 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
sk                592 net/bluetooth/mgmt.c static int read_config_info(struct sock *sk, struct hci_dev *hdev,
sk                598 net/bluetooth/mgmt.c 	BT_DBG("sock %p %s", sk, hdev->name);
sk                616 net/bluetooth/mgmt.c 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
sk                950 net/bluetooth/mgmt.c static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
sk                966 net/bluetooth/mgmt.c static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
sk                971 net/bluetooth/mgmt.c 	BT_DBG("sock %p %s", sk, hdev->name);
sk                992 net/bluetooth/mgmt.c 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
sk               1020 net/bluetooth/mgmt.c static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
sk               1027 net/bluetooth/mgmt.c 	BT_DBG("sock %p %s", sk, hdev->name);
sk               1052 net/bluetooth/mgmt.c 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
sk               1053 net/bluetooth/mgmt.c 	hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
sk               1054 net/bluetooth/mgmt.c 	hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
sk               1056 net/bluetooth/mgmt.c 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
sk               1076 net/bluetooth/mgmt.c static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
sk               1080 net/bluetooth/mgmt.c 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
sk               1094 net/bluetooth/mgmt.c void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
sk               1100 net/bluetooth/mgmt.c 	mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
sk               1103 net/bluetooth/mgmt.c void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
sk               1110 net/bluetooth/mgmt.c 	mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
sk               1155 net/bluetooth/mgmt.c static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
sk               1165 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
sk               1171 net/bluetooth/mgmt.c 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
sk               1177 net/bluetooth/mgmt.c 		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
sk               1181 net/bluetooth/mgmt.c 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
sk               1224 net/bluetooth/mgmt.c 	struct sock *sk;
sk               1233 net/bluetooth/mgmt.c 	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
sk               1237 net/bluetooth/mgmt.c 	if (match->sk == NULL) {
sk               1238 net/bluetooth/mgmt.c 		match->sk = cmd->sk;
sk               1239 net/bluetooth/mgmt.c 		sock_hold(match->sk);
sk               1249 net/bluetooth/mgmt.c 	mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
sk               1269 net/bluetooth/mgmt.c 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
sk               1275 net/bluetooth/mgmt.c 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
sk               1313 net/bluetooth/mgmt.c 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
sk               1324 net/bluetooth/mgmt.c 	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
sk               1325 net/bluetooth/mgmt.c 	new_settings(hdev, cmd->sk);
sk               1334 net/bluetooth/mgmt.c static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
sk               1346 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
sk               1350 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
sk               1360 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
sk               1366 net/bluetooth/mgmt.c 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
sk               1373 net/bluetooth/mgmt.c 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
sk               1379 net/bluetooth/mgmt.c 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
sk               1396 net/bluetooth/mgmt.c 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
sk               1401 net/bluetooth/mgmt.c 			err = new_settings(hdev, sk);
sk               1422 net/bluetooth/mgmt.c 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
sk               1426 net/bluetooth/mgmt.c 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
sk               1472 net/bluetooth/mgmt.c 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
sk               1476 net/bluetooth/mgmt.c 	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
sk               1477 net/bluetooth/mgmt.c 	new_settings(hdev, cmd->sk);
sk               1487 net/bluetooth/mgmt.c 					   struct sock *sk, u8 val)
sk               1502 net/bluetooth/mgmt.c 	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
sk               1509 net/bluetooth/mgmt.c 		return new_settings(hdev, sk);
sk               1515 net/bluetooth/mgmt.c static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
sk               1526 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
sk               1530 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
sk               1536 net/bluetooth/mgmt.c 		err = set_connectable_update_settings(hdev, sk, cp->val);
sk               1542 net/bluetooth/mgmt.c 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
sk               1547 net/bluetooth/mgmt.c 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
sk               1572 net/bluetooth/mgmt.c static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
sk               1582 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
sk               1592 net/bluetooth/mgmt.c 	err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
sk               1607 net/bluetooth/mgmt.c 		err = new_settings(hdev, sk);
sk               1615 net/bluetooth/mgmt.c static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
sk               1627 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
sk               1631 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
sk               1644 net/bluetooth/mgmt.c 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
sk               1649 net/bluetooth/mgmt.c 			err = new_settings(hdev, sk);
sk               1655 net/bluetooth/mgmt.c 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
sk               1663 net/bluetooth/mgmt.c 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
sk               1667 net/bluetooth/mgmt.c 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
sk               1684 net/bluetooth/mgmt.c static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
sk               1695 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
sk               1698 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
sk               1702 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
sk               1723 net/bluetooth/mgmt.c 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
sk               1728 net/bluetooth/mgmt.c 			err = new_settings(hdev, sk);
sk               1734 net/bluetooth/mgmt.c 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
sk               1740 net/bluetooth/mgmt.c 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
sk               1744 net/bluetooth/mgmt.c 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
sk               1765 net/bluetooth/mgmt.c static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
sk               1776 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
sk               1779 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
sk               1783 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
sk               1787 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
sk               1793 net/bluetooth/mgmt.c 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
sk               1802 net/bluetooth/mgmt.c 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
sk               1810 net/bluetooth/mgmt.c 	err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
sk               1815 net/bluetooth/mgmt.c 		err = new_settings(hdev, sk);
sk               1838 net/bluetooth/mgmt.c 	new_settings(hdev, match.sk);
sk               1840 net/bluetooth/mgmt.c 	if (match.sk)
sk               1841 net/bluetooth/mgmt.c 		sock_put(match.sk);
sk               1869 net/bluetooth/mgmt.c static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
sk               1881 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
sk               1885 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
sk               1899 net/bluetooth/mgmt.c 			return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
sk               1901 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
sk               1926 net/bluetooth/mgmt.c 		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
sk               1931 net/bluetooth/mgmt.c 			err = new_settings(hdev, sk);
sk               1938 net/bluetooth/mgmt.c 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
sk               1943 net/bluetooth/mgmt.c 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
sk               2028 net/bluetooth/mgmt.c 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
sk               2044 net/bluetooth/mgmt.c static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
sk               2057 net/bluetooth/mgmt.c 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
sk               2084 net/bluetooth/mgmt.c 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
sk               2089 net/bluetooth/mgmt.c 	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
sk               2123 net/bluetooth/mgmt.c static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
sk               2138 net/bluetooth/mgmt.c 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
sk               2147 net/bluetooth/mgmt.c 			err = mgmt_cmd_complete(sk, hdev->id,
sk               2168 net/bluetooth/mgmt.c 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
sk               2184 net/bluetooth/mgmt.c 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
sk               2189 net/bluetooth/mgmt.c 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
sk               2209 net/bluetooth/mgmt.c static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
sk               2220 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
sk               2226 net/bluetooth/mgmt.c 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
sk               2232 net/bluetooth/mgmt.c 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
sk               2241 net/bluetooth/mgmt.c 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
sk               2262 net/bluetooth/mgmt.c 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
sk               2267 net/bluetooth/mgmt.c 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
sk               2280 net/bluetooth/mgmt.c static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
sk               2293 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
sk               2300 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
sk               2308 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
sk               2313 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
sk               2323 net/bluetooth/mgmt.c 			return mgmt_cmd_status(sk, hdev->id,
sk               2354 net/bluetooth/mgmt.c 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
sk               2373 net/bluetooth/mgmt.c static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
sk               2389 net/bluetooth/mgmt.c 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
sk               2394 net/bluetooth/mgmt.c 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
sk               2401 net/bluetooth/mgmt.c 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
sk               2423 net/bluetooth/mgmt.c 			err = mgmt_cmd_complete(sk, hdev->id,
sk               2439 net/bluetooth/mgmt.c 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
sk               2477 net/bluetooth/mgmt.c 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
sk               2479 net/bluetooth/mgmt.c 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
sk               2483 net/bluetooth/mgmt.c 	cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
sk               2501 net/bluetooth/mgmt.c static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
sk               2517 net/bluetooth/mgmt.c 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
sk               2524 net/bluetooth/mgmt.c 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
sk               2531 net/bluetooth/mgmt.c 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
sk               2544 net/bluetooth/mgmt.c 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
sk               2550 net/bluetooth/mgmt.c 	cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
sk               2586 net/bluetooth/mgmt.c static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
sk               2599 net/bluetooth/mgmt.c 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
sk               2630 net/bluetooth/mgmt.c 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
sk               2640 net/bluetooth/mgmt.c static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
sk               2646 net/bluetooth/mgmt.c 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
sk               2661 net/bluetooth/mgmt.c static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
sk               2675 net/bluetooth/mgmt.c 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
sk               2682 net/bluetooth/mgmt.c 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
sk               2694 net/bluetooth/mgmt.c 		err = send_pin_code_neg_reply(sk, hdev, &ncp);
sk               2696 net/bluetooth/mgmt.c 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
sk               2702 net/bluetooth/mgmt.c 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
sk               2723 net/bluetooth/mgmt.c static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
sk               2731 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
sk               2743 net/bluetooth/mgmt.c 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
sk               2774 net/bluetooth/mgmt.c 	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
sk               2841 net/bluetooth/mgmt.c static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
sk               2858 net/bluetooth/mgmt.c 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
sk               2863 net/bluetooth/mgmt.c 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
sk               2870 net/bluetooth/mgmt.c 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
sk               2877 net/bluetooth/mgmt.c 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
sk               2924 net/bluetooth/mgmt.c 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
sk               2931 net/bluetooth/mgmt.c 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
sk               2936 net/bluetooth/mgmt.c 	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
sk               2972 net/bluetooth/mgmt.c static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
sk               2985 net/bluetooth/mgmt.c 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
sk               2992 net/bluetooth/mgmt.c 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
sk               3000 net/bluetooth/mgmt.c 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
sk               3008 net/bluetooth/mgmt.c 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
sk               3015 net/bluetooth/mgmt.c static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
sk               3026 net/bluetooth/mgmt.c 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
sk               3039 net/bluetooth/mgmt.c 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
sk               3048 net/bluetooth/mgmt.c 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
sk               3052 net/bluetooth/mgmt.c 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
sk               3059 net/bluetooth/mgmt.c 	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
sk               3086 net/bluetooth/mgmt.c static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
sk               3093 net/bluetooth/mgmt.c 	return user_pairing_resp(sk, hdev, &cp->addr,
sk               3098 net/bluetooth/mgmt.c static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
sk               3106 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
sk               3109 net/bluetooth/mgmt.c 	return user_pairing_resp(sk, hdev, &cp->addr,
sk               3114 net/bluetooth/mgmt.c static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
sk               3121 net/bluetooth/mgmt.c 	return user_pairing_resp(sk, hdev, &cp->addr,
sk               3126 net/bluetooth/mgmt.c static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
sk               3133 net/bluetooth/mgmt.c 	return user_pairing_resp(sk, hdev, &cp->addr,
sk               3138 net/bluetooth/mgmt.c static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
sk               3145 net/bluetooth/mgmt.c 	return user_pairing_resp(sk, hdev, &cp->addr,
sk               3195 net/bluetooth/mgmt.c 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
sk               3198 net/bluetooth/mgmt.c 		mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
sk               3211 net/bluetooth/mgmt.c static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
sk               3229 net/bluetooth/mgmt.c 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
sk               3239 net/bluetooth/mgmt.c 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
sk               3245 net/bluetooth/mgmt.c 					 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
sk               3246 net/bluetooth/mgmt.c 		ext_info_changed(hdev, sk);
sk               3251 net/bluetooth/mgmt.c 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
sk               3281 net/bluetooth/mgmt.c static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
sk               3291 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
sk               3304 net/bluetooth/mgmt.c 		ext_info_changed(hdev, sk);
sk               3307 net/bluetooth/mgmt.c 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
sk               3315 net/bluetooth/mgmt.c static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
sk               3320 net/bluetooth/mgmt.c 	BT_DBG("sock %p %s", sk, hdev->name);
sk               3332 net/bluetooth/mgmt.c 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
sk               3362 net/bluetooth/mgmt.c 		mgmt_cmd_status(cmd->sk, hdev->id,
sk               3366 net/bluetooth/mgmt.c 		mgmt_cmd_complete(cmd->sk, hdev->id,
sk               3370 net/bluetooth/mgmt.c 		mgmt_phy_configuration_changed(hdev, cmd->sk);
sk               3379 net/bluetooth/mgmt.c static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
sk               3391 net/bluetooth/mgmt.c 	BT_DBG("sock %p %s", sk, hdev->name);
sk               3398 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id,
sk               3405 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id,
sk               3410 net/bluetooth/mgmt.c 		return mgmt_cmd_complete(sk, hdev->id,
sk               3417 net/bluetooth/mgmt.c 		err = mgmt_cmd_status(sk, hdev->id,
sk               3424 net/bluetooth/mgmt.c 		err = mgmt_cmd_status(sk, hdev->id,
sk               3478 net/bluetooth/mgmt.c 			mgmt_phy_configuration_changed(hdev, sk);
sk               3480 net/bluetooth/mgmt.c 		err = mgmt_cmd_complete(sk, hdev->id,
sk               3487 net/bluetooth/mgmt.c 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
sk               3548 net/bluetooth/mgmt.c 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
sk               3559 net/bluetooth/mgmt.c 			mgmt_cmd_status(cmd->sk, hdev->id,
sk               3573 net/bluetooth/mgmt.c 			mgmt_cmd_status(cmd->sk, hdev->id,
sk               3586 net/bluetooth/mgmt.c 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
sk               3593 net/bluetooth/mgmt.c static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
sk               3605 net/bluetooth/mgmt.c 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
sk               3611 net/bluetooth/mgmt.c 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
sk               3617 net/bluetooth/mgmt.c 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
sk               3622 net/bluetooth/mgmt.c 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
sk               3644 net/bluetooth/mgmt.c static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
sk               3653 net/bluetooth/mgmt.c 		return mgmt_cmd_complete(sk, hdev->id,
sk               3665 net/bluetooth/mgmt.c 			err = mgmt_cmd_complete(sk, hdev->id,
sk               3680 net/bluetooth/mgmt.c 		err = mgmt_cmd_complete(sk, hdev->id,
sk               3694 net/bluetooth/mgmt.c 				err = mgmt_cmd_complete(sk, hdev->id,
sk               3737 net/bluetooth/mgmt.c 		err = mgmt_cmd_complete(sk, hdev->id,
sk               3743 net/bluetooth/mgmt.c 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
sk               3752 net/bluetooth/mgmt.c static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
sk               3762 net/bluetooth/mgmt.c 		return mgmt_cmd_complete(sk, hdev->id,
sk               3782 net/bluetooth/mgmt.c 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
sk               3839 net/bluetooth/mgmt.c static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
sk               3852 net/bluetooth/mgmt.c 		err = mgmt_cmd_complete(sk, hdev->id, op,
sk               3860 net/bluetooth/mgmt.c 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
sk               3866 net/bluetooth/mgmt.c 		err = mgmt_cmd_complete(sk, hdev->id, op, status,
sk               3883 net/bluetooth/mgmt.c 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
sk               3900 net/bluetooth/mgmt.c static int start_discovery(struct sock *sk, struct hci_dev *hdev,
sk               3903 net/bluetooth/mgmt.c 	return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
sk               3907 net/bluetooth/mgmt.c static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
sk               3910 net/bluetooth/mgmt.c 	return start_discovery_internal(sk, hdev,
sk               3918 net/bluetooth/mgmt.c 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
sk               3922 net/bluetooth/mgmt.c static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
sk               3937 net/bluetooth/mgmt.c 		err = mgmt_cmd_complete(sk, hdev->id,
sk               3946 net/bluetooth/mgmt.c 		err = mgmt_cmd_complete(sk, hdev->id,
sk               3957 net/bluetooth/mgmt.c 		err = mgmt_cmd_complete(sk, hdev->id,
sk               3968 net/bluetooth/mgmt.c 		err = mgmt_cmd_complete(sk, hdev->id,
sk               3976 net/bluetooth/mgmt.c 		err = mgmt_cmd_complete(sk, hdev->id,
sk               3982 net/bluetooth/mgmt.c 	cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
sk               4005 net/bluetooth/mgmt.c 			err = mgmt_cmd_complete(sk, hdev->id,
sk               4040 net/bluetooth/mgmt.c static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
sk               4052 net/bluetooth/mgmt.c 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
sk               4059 net/bluetooth/mgmt.c 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
sk               4065 net/bluetooth/mgmt.c 	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
sk               4082 net/bluetooth/mgmt.c static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
sk               4094 net/bluetooth/mgmt.c 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
sk               4102 net/bluetooth/mgmt.c 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
sk               4116 net/bluetooth/mgmt.c 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
sk               4124 net/bluetooth/mgmt.c static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
sk               4134 net/bluetooth/mgmt.c 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
sk               4148 net/bluetooth/mgmt.c 		   sk);
sk               4152 net/bluetooth/mgmt.c 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
sk               4160 net/bluetooth/mgmt.c static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
sk               4170 net/bluetooth/mgmt.c 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
sk               4184 net/bluetooth/mgmt.c 		   sk);
sk               4188 net/bluetooth/mgmt.c 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
sk               4196 net/bluetooth/mgmt.c static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
sk               4209 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
sk               4219 net/bluetooth/mgmt.c 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
sk               4264 net/bluetooth/mgmt.c 	new_settings(hdev, match.sk);
sk               4266 net/bluetooth/mgmt.c 	if (match.sk)
sk               4267 net/bluetooth/mgmt.c 		sock_put(match.sk);
sk               4300 net/bluetooth/mgmt.c static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
sk               4313 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
sk               4317 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
sk               4349 net/bluetooth/mgmt.c 		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
sk               4354 net/bluetooth/mgmt.c 			err = new_settings(hdev, sk);
sk               4361 net/bluetooth/mgmt.c 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
sk               4366 net/bluetooth/mgmt.c 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
sk               4408 net/bluetooth/mgmt.c static int set_static_address(struct sock *sk, struct hci_dev *hdev,
sk               4417 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
sk               4421 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
sk               4426 net/bluetooth/mgmt.c 			return mgmt_cmd_status(sk, hdev->id,
sk               4432 net/bluetooth/mgmt.c 			return mgmt_cmd_status(sk, hdev->id,
sk               4441 net/bluetooth/mgmt.c 	err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
sk               4445 net/bluetooth/mgmt.c 	err = new_settings(hdev, sk);
sk               4452 net/bluetooth/mgmt.c static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
sk               4462 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
sk               4468 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
sk               4474 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
sk               4478 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
sk               4486 net/bluetooth/mgmt.c 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
sk               4523 net/bluetooth/mgmt.c 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
sk               4533 net/bluetooth/mgmt.c 		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
sk               4534 net/bluetooth/mgmt.c 		new_settings(hdev, cmd->sk);
sk               4543 net/bluetooth/mgmt.c static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
sk               4555 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
sk               4559 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
sk               4565 net/bluetooth/mgmt.c 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
sk               4571 net/bluetooth/mgmt.c 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
sk               4578 net/bluetooth/mgmt.c 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
sk               4580 net/bluetooth/mgmt.c 		new_settings(hdev, sk);
sk               4584 net/bluetooth/mgmt.c 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
sk               4597 net/bluetooth/mgmt.c 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
sk               4628 net/bluetooth/mgmt.c 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
sk               4630 net/bluetooth/mgmt.c 		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
sk               4631 net/bluetooth/mgmt.c 		new_settings(hdev, cmd->sk);
sk               4640 net/bluetooth/mgmt.c static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
sk               4650 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
sk               4654 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
sk               4658 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
sk               4664 net/bluetooth/mgmt.c 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
sk               4679 net/bluetooth/mgmt.c 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
sk               4683 net/bluetooth/mgmt.c 		err = new_settings(hdev, sk);
sk               4689 net/bluetooth/mgmt.c 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
sk               4710 net/bluetooth/mgmt.c 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
sk               4717 net/bluetooth/mgmt.c 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
sk               4722 net/bluetooth/mgmt.c 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
sk               4766 net/bluetooth/mgmt.c 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
sk               4788 net/bluetooth/mgmt.c 	send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
sk               4789 net/bluetooth/mgmt.c 	new_settings(hdev, cmd->sk);
sk               4797 net/bluetooth/mgmt.c static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
sk               4810 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
sk               4816 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
sk               4820 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
sk               4842 net/bluetooth/mgmt.c 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
sk               4847 net/bluetooth/mgmt.c 			err = new_settings(hdev, sk);
sk               4853 net/bluetooth/mgmt.c 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
sk               4862 net/bluetooth/mgmt.c 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
sk               4866 net/bluetooth/mgmt.c 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
sk               4885 net/bluetooth/mgmt.c static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
sk               4895 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
sk               4920 net/bluetooth/mgmt.c 	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
sk               4925 net/bluetooth/mgmt.c 		err = new_settings(hdev, sk);
sk               4932 net/bluetooth/mgmt.c static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
sk               4942 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
sk               4946 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
sk               4950 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
sk               4977 net/bluetooth/mgmt.c 	err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
sk               4982 net/bluetooth/mgmt.c 		err = new_settings(hdev, sk);
sk               5005 net/bluetooth/mgmt.c static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
sk               5017 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
sk               5024 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
sk               5032 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
sk               5042 net/bluetooth/mgmt.c 			return mgmt_cmd_status(sk, hdev->id,
sk               5061 net/bluetooth/mgmt.c 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
sk               5087 net/bluetooth/mgmt.c static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
sk               5099 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
sk               5106 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
sk               5114 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
sk               5124 net/bluetooth/mgmt.c 			return mgmt_cmd_status(sk, hdev->id,
sk               5167 net/bluetooth/mgmt.c 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
sk               5193 net/bluetooth/mgmt.c 	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
sk               5256 net/bluetooth/mgmt.c static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
sk               5272 net/bluetooth/mgmt.c 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
sk               5279 net/bluetooth/mgmt.c 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
sk               5292 net/bluetooth/mgmt.c 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
sk               5299 net/bluetooth/mgmt.c 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
sk               5350 net/bluetooth/mgmt.c 		cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
sk               5368 net/bluetooth/mgmt.c 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
sk               5402 net/bluetooth/mgmt.c 	err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
sk               5445 net/bluetooth/mgmt.c static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
sk               5463 net/bluetooth/mgmt.c 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
sk               5470 net/bluetooth/mgmt.c 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
sk               5480 net/bluetooth/mgmt.c 			err = mgmt_cmd_complete(sk, hdev->id,
sk               5490 net/bluetooth/mgmt.c 	cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
sk               5583 net/bluetooth/mgmt.c static void device_added(struct sock *sk, struct hci_dev *hdev,
sk               5592 net/bluetooth/mgmt.c 	mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
sk               5595 net/bluetooth/mgmt.c static int add_device(struct sock *sk, struct hci_dev *hdev,
sk               5606 net/bluetooth/mgmt.c 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
sk               5611 net/bluetooth/mgmt.c 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
sk               5620 net/bluetooth/mgmt.c 			err = mgmt_cmd_complete(sk, hdev->id,
sk               5652 net/bluetooth/mgmt.c 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
sk               5663 net/bluetooth/mgmt.c 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
sk               5672 net/bluetooth/mgmt.c 	device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
sk               5674 net/bluetooth/mgmt.c 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
sk               5683 net/bluetooth/mgmt.c static void device_removed(struct sock *sk, struct hci_dev *hdev,
sk               5691 net/bluetooth/mgmt.c 	mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
sk               5694 net/bluetooth/mgmt.c static int remove_device(struct sock *sk, struct hci_dev *hdev,
sk               5709 net/bluetooth/mgmt.c 			err = mgmt_cmd_complete(sk, hdev->id,
sk               5721 net/bluetooth/mgmt.c 				err = mgmt_cmd_complete(sk, hdev->id,
sk               5731 net/bluetooth/mgmt.c 			device_removed(sk, hdev, &cp->addr.bdaddr,
sk               5744 net/bluetooth/mgmt.c 			err = mgmt_cmd_complete(sk, hdev->id,
sk               5754 net/bluetooth/mgmt.c 			err = mgmt_cmd_complete(sk, hdev->id,
sk               5763 net/bluetooth/mgmt.c 			err = mgmt_cmd_complete(sk, hdev->id,
sk               5775 net/bluetooth/mgmt.c 		device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
sk               5781 net/bluetooth/mgmt.c 			err = mgmt_cmd_complete(sk, hdev->id,
sk               5789 net/bluetooth/mgmt.c 			device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
sk               5799 net/bluetooth/mgmt.c 			device_removed(sk, hdev, &p->addr, p->addr_type);
sk               5815 net/bluetooth/mgmt.c 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
sk               5823 net/bluetooth/mgmt.c static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
sk               5833 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
sk               5840 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
sk               5848 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
sk               5904 net/bluetooth/mgmt.c 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
sk               5908 net/bluetooth/mgmt.c static int set_external_config(struct sock *sk, struct hci_dev *hdev,
sk               5918 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
sk               5922 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
sk               5926 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
sk               5936 net/bluetooth/mgmt.c 	err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
sk               5943 net/bluetooth/mgmt.c 	err = new_options(hdev, sk);
sk               5964 net/bluetooth/mgmt.c static int set_public_address(struct sock *sk, struct hci_dev *hdev,
sk               5974 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
sk               5978 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
sk               5982 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
sk               5990 net/bluetooth/mgmt.c 	err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
sk               5998 net/bluetooth/mgmt.c 		err = new_options(hdev, sk);
sk               6111 net/bluetooth/mgmt.c 	err = mgmt_cmd_complete(cmd->sk, hdev->id,
sk               6117 net/bluetooth/mgmt.c 	hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
sk               6121 net/bluetooth/mgmt.c 				 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
sk               6127 net/bluetooth/mgmt.c static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
sk               6134 net/bluetooth/mgmt.c 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
sk               6155 net/bluetooth/mgmt.c static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
sk               6207 net/bluetooth/mgmt.c 			err = read_local_ssp_oob_req(hdev, sk, cp);
sk               6288 net/bluetooth/mgmt.c 	hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
sk               6296 net/bluetooth/mgmt.c 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
sk               6303 net/bluetooth/mgmt.c 				 HCI_MGMT_OOB_DATA_EVENTS, sk);
sk               6342 net/bluetooth/mgmt.c static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
sk               6355 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
sk               6383 net/bluetooth/mgmt.c 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
sk               6516 net/bluetooth/mgmt.c 		mgmt_advertising_removed(cmd ? cmd->sk : NULL, hdev, instance);
sk               6526 net/bluetooth/mgmt.c 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
sk               6529 net/bluetooth/mgmt.c 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
sk               6538 net/bluetooth/mgmt.c static int add_advertising(struct sock *sk, struct hci_dev *hdev,
sk               6558 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
sk               6562 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
sk               6566 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
sk               6580 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
sk               6586 net/bluetooth/mgmt.c 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
sk               6594 net/bluetooth/mgmt.c 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
sk               6602 net/bluetooth/mgmt.c 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
sk               6613 net/bluetooth/mgmt.c 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
sk               6622 net/bluetooth/mgmt.c 		mgmt_advertising_added(sk, hdev, cp->instance);
sk               6650 net/bluetooth/mgmt.c 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
sk               6658 net/bluetooth/mgmt.c 	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
sk               6703 net/bluetooth/mgmt.c 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
sk               6711 net/bluetooth/mgmt.c static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
sk               6725 net/bluetooth/mgmt.c 		err = mgmt_cmd_status(sk, hdev->id,
sk               6734 net/bluetooth/mgmt.c 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
sk               6740 net/bluetooth/mgmt.c 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
sk               6747 net/bluetooth/mgmt.c 	hci_req_clear_adv_instance(hdev, sk, &req, cp->instance, true);
sk               6761 net/bluetooth/mgmt.c 		err = mgmt_cmd_complete(sk, hdev->id,
sk               6767 net/bluetooth/mgmt.c 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
sk               6784 net/bluetooth/mgmt.c static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
sk               6795 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
sk               6799 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
sk               6809 net/bluetooth/mgmt.c 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
sk               6817 net/bluetooth/mgmt.c 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
sk               7026 net/bluetooth/mgmt.c 	new_settings(hdev, match.sk);
sk               7028 net/bluetooth/mgmt.c 	if (match.sk)
sk               7029 net/bluetooth/mgmt.c 		sock_put(match.sk);
sk               7062 net/bluetooth/mgmt.c 	new_settings(hdev, match.sk);
sk               7064 net/bluetooth/mgmt.c 	if (match.sk)
sk               7065 net/bluetooth/mgmt.c 		sock_put(match.sk);
sk               7082 net/bluetooth/mgmt.c 	mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
sk               7272 net/bluetooth/mgmt.c 	struct sock **sk = data;
sk               7276 net/bluetooth/mgmt.c 	*sk = cmd->sk;
sk               7277 net/bluetooth/mgmt.c 	sock_hold(*sk);
sk               7287 net/bluetooth/mgmt.c 	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
sk               7314 net/bluetooth/mgmt.c 	struct sock *sk = NULL;
sk               7330 net/bluetooth/mgmt.c 	mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
sk               7336 net/bluetooth/mgmt.c 	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
sk               7338 net/bluetooth/mgmt.c 	if (sk)
sk               7339 net/bluetooth/mgmt.c 		sock_put(sk);
sk               7534 net/bluetooth/mgmt.c 		    cmd ? cmd->sk : NULL);
sk               7563 net/bluetooth/mgmt.c 		new_settings(hdev, match.sk);
sk               7565 net/bluetooth/mgmt.c 	if (match.sk)
sk               7566 net/bluetooth/mgmt.c 		sock_put(match.sk);
sk               7618 net/bluetooth/mgmt.c 		new_settings(hdev, match.sk);
sk               7620 net/bluetooth/mgmt.c 	if (match.sk)
sk               7621 net/bluetooth/mgmt.c 		sock_put(match.sk);
sk               7641 net/bluetooth/mgmt.c 	if (match->sk == NULL) {
sk               7642 net/bluetooth/mgmt.c 		match->sk = cmd->sk;
sk               7643 net/bluetooth/mgmt.c 		sock_hold(match->sk);
sk               7662 net/bluetooth/mgmt.c 	if (match.sk)
sk               7663 net/bluetooth/mgmt.c 		sock_put(match.sk);
sk               7690 net/bluetooth/mgmt.c 			   HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
sk               7691 net/bluetooth/mgmt.c 	ext_info_changed(hdev, cmd ? cmd->sk : NULL);
sk                 93 net/bluetooth/mgmt_util.c int mgmt_cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
sk                100 net/bluetooth/mgmt_util.c 	BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
sk                116 net/bluetooth/mgmt_util.c 	mskb = create_monitor_ctrl_event(hdr->index, hci_sock_get_cookie(sk),
sk                123 net/bluetooth/mgmt_util.c 	err = sock_queue_rcv_skb(sk, skb);
sk                136 net/bluetooth/mgmt_util.c int mgmt_cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
sk                144 net/bluetooth/mgmt_util.c 	BT_DBG("sock %p", sk);
sk                163 net/bluetooth/mgmt_util.c 	mskb = create_monitor_ctrl_event(hdr->index, hci_sock_get_cookie(sk),
sk                171 net/bluetooth/mgmt_util.c 	err = sock_queue_rcv_skb(sk, skb);
sk                190 net/bluetooth/mgmt_util.c 		if (hci_sock_get_channel(cmd->sk) != channel)
sk                230 net/bluetooth/mgmt_util.c struct mgmt_pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
sk                251 net/bluetooth/mgmt_util.c 	cmd->sk = sk;
sk                252 net/bluetooth/mgmt_util.c 	sock_hold(sk);
sk                261 net/bluetooth/mgmt_util.c 	sock_put(cmd->sk);
sk                 29 net/bluetooth/mgmt_util.h 	struct sock *sk;
sk                 36 net/bluetooth/mgmt_util.h int mgmt_cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status);
sk                 37 net/bluetooth/mgmt_util.h int mgmt_cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
sk                 49 net/bluetooth/mgmt_util.h struct mgmt_pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
sk                185 net/bluetooth/rfcomm/core.c static void rfcomm_l2state_change(struct sock *sk)
sk                187 net/bluetooth/rfcomm/core.c 	BT_DBG("%p state %d", sk, sk->sk_state);
sk                191 net/bluetooth/rfcomm/core.c static void rfcomm_l2data_ready(struct sock *sk)
sk                193 net/bluetooth/rfcomm/core.c 	BT_DBG("%p", sk);
sk                205 net/bluetooth/rfcomm/core.c 		struct sock *sk = (*sock)->sk;
sk                206 net/bluetooth/rfcomm/core.c 		sk->sk_data_ready   = rfcomm_l2data_ready;
sk                207 net/bluetooth/rfcomm/core.c 		sk->sk_state_change = rfcomm_l2state_change;
sk                214 net/bluetooth/rfcomm/core.c 	struct sock *sk = d->session->sock->sk;
sk                215 net/bluetooth/rfcomm/core.c 	struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
sk                699 net/bluetooth/rfcomm/core.c 		chan = l2cap_pi(s->sock->sk)->chan;
sk                735 net/bluetooth/rfcomm/core.c 	struct sock *sk;
sk                753 net/bluetooth/rfcomm/core.c 	sk = sock->sk;
sk                754 net/bluetooth/rfcomm/core.c 	lock_sock(sk);
sk                755 net/bluetooth/rfcomm/core.c 	l2cap_pi(sk)->chan->imtu = l2cap_mtu;
sk                756 net/bluetooth/rfcomm/core.c 	l2cap_pi(sk)->chan->sec_level = sec_level;
sk                758 net/bluetooth/rfcomm/core.c 		l2cap_pi(sk)->chan->mode = L2CAP_MODE_ERTM;
sk                759 net/bluetooth/rfcomm/core.c 	release_sock(sk);
sk                787 net/bluetooth/rfcomm/core.c 	struct l2cap_chan *chan = l2cap_pi(s->sock->sk)->chan;
sk               1285 net/bluetooth/rfcomm/core.c 	struct sock *sk = d->session->sock->sk;
sk               1286 net/bluetooth/rfcomm/core.c 	struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
sk               1899 net/bluetooth/rfcomm/core.c 	struct sock *sk = sock->sk;
sk               1902 net/bluetooth/rfcomm/core.c 	BT_DBG("session %p state %ld qlen %d", s, s->state, skb_queue_len(&sk->sk_receive_queue));
sk               1905 net/bluetooth/rfcomm/core.c 	while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
sk               1916 net/bluetooth/rfcomm/core.c 	if (s && (sk->sk_state == BT_CLOSED))
sk               1917 net/bluetooth/rfcomm/core.c 		s = rfcomm_session_close(s, sk->sk_err);
sk               1929 net/bluetooth/rfcomm/core.c 	if (list_empty(&bt_sk(sock->sk)->accept_q))
sk               1939 net/bluetooth/rfcomm/core.c 	nsock->sk->sk_data_ready   = rfcomm_l2data_ready;
sk               1940 net/bluetooth/rfcomm/core.c 	nsock->sk->sk_state_change = rfcomm_l2state_change;
sk               1946 net/bluetooth/rfcomm/core.c 		s->mtu = min(l2cap_pi(nsock->sk)->chan->omtu,
sk               1947 net/bluetooth/rfcomm/core.c 				l2cap_pi(nsock->sk)->chan->imtu) - 5;
sk               1956 net/bluetooth/rfcomm/core.c 	struct sock *sk = s->sock->sk;
sk               1960 net/bluetooth/rfcomm/core.c 	switch (sk->sk_state) {
sk               1966 net/bluetooth/rfcomm/core.c 		s->mtu = min(l2cap_pi(sk)->chan->omtu, l2cap_pi(sk)->chan->imtu) - 5;
sk               1972 net/bluetooth/rfcomm/core.c 		s = rfcomm_session_close(s, sk->sk_err);
sk               2016 net/bluetooth/rfcomm/core.c 	struct sock *sk;
sk               2040 net/bluetooth/rfcomm/core.c 	sk = sock->sk;
sk               2041 net/bluetooth/rfcomm/core.c 	lock_sock(sk);
sk               2042 net/bluetooth/rfcomm/core.c 	l2cap_pi(sk)->chan->imtu = l2cap_mtu;
sk               2043 net/bluetooth/rfcomm/core.c 	release_sock(sk);
sk               2155 net/bluetooth/rfcomm/core.c 		struct l2cap_chan *chan = l2cap_pi(s->sock->sk)->chan;
sk                 43 net/bluetooth/rfcomm/sock.c static void rfcomm_sock_close(struct sock *sk);
sk                 44 net/bluetooth/rfcomm/sock.c static void rfcomm_sock_kill(struct sock *sk);
sk                 52 net/bluetooth/rfcomm/sock.c 	struct sock *sk = d->owner;
sk                 53 net/bluetooth/rfcomm/sock.c 	if (!sk)
sk                 56 net/bluetooth/rfcomm/sock.c 	atomic_add(skb->len, &sk->sk_rmem_alloc);
sk                 57 net/bluetooth/rfcomm/sock.c 	skb_queue_tail(&sk->sk_receive_queue, skb);
sk                 58 net/bluetooth/rfcomm/sock.c 	sk->sk_data_ready(sk);
sk                 60 net/bluetooth/rfcomm/sock.c 	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
sk                 66 net/bluetooth/rfcomm/sock.c 	struct sock *sk = d->owner, *parent;
sk                 69 net/bluetooth/rfcomm/sock.c 	if (!sk)
sk                 75 net/bluetooth/rfcomm/sock.c 	bh_lock_sock(sk);
sk                 78 net/bluetooth/rfcomm/sock.c 		sk->sk_err = err;
sk                 80 net/bluetooth/rfcomm/sock.c 	sk->sk_state = d->state;
sk                 82 net/bluetooth/rfcomm/sock.c 	parent = bt_sk(sk)->parent;
sk                 85 net/bluetooth/rfcomm/sock.c 			sock_set_flag(sk, SOCK_ZAPPED);
sk                 86 net/bluetooth/rfcomm/sock.c 			bt_accept_unlink(sk);
sk                 92 net/bluetooth/rfcomm/sock.c 					       &rfcomm_pi(sk)->src, NULL);
sk                 93 net/bluetooth/rfcomm/sock.c 		sk->sk_state_change(sk);
sk                 96 net/bluetooth/rfcomm/sock.c 	bh_unlock_sock(sk);
sk                 99 net/bluetooth/rfcomm/sock.c 	if (parent && sock_flag(sk, SOCK_ZAPPED)) {
sk                103 net/bluetooth/rfcomm/sock.c 		rfcomm_sock_kill(sk);
sk                111 net/bluetooth/rfcomm/sock.c 	struct sock *sk = NULL;
sk                113 net/bluetooth/rfcomm/sock.c 	sk_for_each(sk, &rfcomm_sk_list.head) {
sk                114 net/bluetooth/rfcomm/sock.c 		if (rfcomm_pi(sk)->channel != channel)
sk                117 net/bluetooth/rfcomm/sock.c 		if (bacmp(&rfcomm_pi(sk)->src, src))
sk                120 net/bluetooth/rfcomm/sock.c 		if (sk->sk_state == BT_BOUND || sk->sk_state == BT_LISTEN)
sk                124 net/bluetooth/rfcomm/sock.c 	return sk ? sk : NULL;
sk                132 net/bluetooth/rfcomm/sock.c 	struct sock *sk = NULL, *sk1 = NULL;
sk                136 net/bluetooth/rfcomm/sock.c 	sk_for_each(sk, &rfcomm_sk_list.head) {
sk                137 net/bluetooth/rfcomm/sock.c 		if (state && sk->sk_state != state)
sk                140 net/bluetooth/rfcomm/sock.c 		if (rfcomm_pi(sk)->channel == channel) {
sk                142 net/bluetooth/rfcomm/sock.c 			if (!bacmp(&rfcomm_pi(sk)->src, src))
sk                146 net/bluetooth/rfcomm/sock.c 			if (!bacmp(&rfcomm_pi(sk)->src, BDADDR_ANY))
sk                147 net/bluetooth/rfcomm/sock.c 				sk1 = sk;
sk                153 net/bluetooth/rfcomm/sock.c 	return sk ? sk : sk1;
sk                156 net/bluetooth/rfcomm/sock.c static void rfcomm_sock_destruct(struct sock *sk)
sk                158 net/bluetooth/rfcomm/sock.c 	struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc;
sk                160 net/bluetooth/rfcomm/sock.c 	BT_DBG("sk %p dlc %p", sk, d);
sk                162 net/bluetooth/rfcomm/sock.c 	skb_queue_purge(&sk->sk_receive_queue);
sk                163 net/bluetooth/rfcomm/sock.c 	skb_queue_purge(&sk->sk_write_queue);
sk                166 net/bluetooth/rfcomm/sock.c 	rfcomm_pi(sk)->dlc = NULL;
sk                169 net/bluetooth/rfcomm/sock.c 	if (d->owner == sk)
sk                178 net/bluetooth/rfcomm/sock.c 	struct sock *sk;
sk                183 net/bluetooth/rfcomm/sock.c 	while ((sk = bt_accept_dequeue(parent, NULL))) {
sk                184 net/bluetooth/rfcomm/sock.c 		rfcomm_sock_close(sk);
sk                185 net/bluetooth/rfcomm/sock.c 		rfcomm_sock_kill(sk);
sk                195 net/bluetooth/rfcomm/sock.c static void rfcomm_sock_kill(struct sock *sk)
sk                197 net/bluetooth/rfcomm/sock.c 	if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
sk                200 net/bluetooth/rfcomm/sock.c 	BT_DBG("sk %p state %d refcnt %d", sk, sk->sk_state, refcount_read(&sk->sk_refcnt));
sk                203 net/bluetooth/rfcomm/sock.c 	bt_sock_unlink(&rfcomm_sk_list, sk);
sk                204 net/bluetooth/rfcomm/sock.c 	sock_set_flag(sk, SOCK_DEAD);
sk                205 net/bluetooth/rfcomm/sock.c 	sock_put(sk);
sk                208 net/bluetooth/rfcomm/sock.c static void __rfcomm_sock_close(struct sock *sk)
sk                210 net/bluetooth/rfcomm/sock.c 	struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc;
sk                212 net/bluetooth/rfcomm/sock.c 	BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
sk                214 net/bluetooth/rfcomm/sock.c 	switch (sk->sk_state) {
sk                216 net/bluetooth/rfcomm/sock.c 		rfcomm_sock_cleanup_listen(sk);
sk                227 net/bluetooth/rfcomm/sock.c 		sock_set_flag(sk, SOCK_ZAPPED);
sk                235 net/bluetooth/rfcomm/sock.c static void rfcomm_sock_close(struct sock *sk)
sk                237 net/bluetooth/rfcomm/sock.c 	lock_sock(sk);
sk                238 net/bluetooth/rfcomm/sock.c 	__rfcomm_sock_close(sk);
sk                239 net/bluetooth/rfcomm/sock.c 	release_sock(sk);
sk                242 net/bluetooth/rfcomm/sock.c static void rfcomm_sock_init(struct sock *sk, struct sock *parent)
sk                244 net/bluetooth/rfcomm/sock.c 	struct rfcomm_pinfo *pi = rfcomm_pi(sk);
sk                246 net/bluetooth/rfcomm/sock.c 	BT_DBG("sk %p", sk);
sk                249 net/bluetooth/rfcomm/sock.c 		sk->sk_type = parent->sk_type;
sk                256 net/bluetooth/rfcomm/sock.c 		security_sk_clone(parent, sk);
sk                277 net/bluetooth/rfcomm/sock.c 	struct sock *sk;
sk                279 net/bluetooth/rfcomm/sock.c 	sk = sk_alloc(net, PF_BLUETOOTH, prio, &rfcomm_proto, kern);
sk                280 net/bluetooth/rfcomm/sock.c 	if (!sk)
sk                283 net/bluetooth/rfcomm/sock.c 	sock_init_data(sock, sk);
sk                284 net/bluetooth/rfcomm/sock.c 	INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
sk                288 net/bluetooth/rfcomm/sock.c 		sk_free(sk);
sk                295 net/bluetooth/rfcomm/sock.c 	rfcomm_pi(sk)->dlc = d;
sk                296 net/bluetooth/rfcomm/sock.c 	d->owner = sk;
sk                298 net/bluetooth/rfcomm/sock.c 	sk->sk_destruct = rfcomm_sock_destruct;
sk                299 net/bluetooth/rfcomm/sock.c 	sk->sk_sndtimeo = RFCOMM_CONN_TIMEOUT;
sk                301 net/bluetooth/rfcomm/sock.c 	sk->sk_sndbuf = RFCOMM_MAX_CREDITS * RFCOMM_DEFAULT_MTU * 10;
sk                302 net/bluetooth/rfcomm/sock.c 	sk->sk_rcvbuf = RFCOMM_MAX_CREDITS * RFCOMM_DEFAULT_MTU * 10;
sk                304 net/bluetooth/rfcomm/sock.c 	sock_reset_flag(sk, SOCK_ZAPPED);
sk                306 net/bluetooth/rfcomm/sock.c 	sk->sk_protocol = proto;
sk                307 net/bluetooth/rfcomm/sock.c 	sk->sk_state    = BT_OPEN;
sk                309 net/bluetooth/rfcomm/sock.c 	bt_sock_link(&rfcomm_sk_list, sk);
sk                311 net/bluetooth/rfcomm/sock.c 	BT_DBG("sk %p", sk);
sk                312 net/bluetooth/rfcomm/sock.c 	return sk;
sk                318 net/bluetooth/rfcomm/sock.c 	struct sock *sk;
sk                329 net/bluetooth/rfcomm/sock.c 	sk = rfcomm_sock_alloc(net, sock, protocol, GFP_ATOMIC, kern);
sk                330 net/bluetooth/rfcomm/sock.c 	if (!sk)
sk                333 net/bluetooth/rfcomm/sock.c 	rfcomm_sock_init(sk, NULL);
sk                340 net/bluetooth/rfcomm/sock.c 	struct sock *sk = sock->sk;
sk                351 net/bluetooth/rfcomm/sock.c 	BT_DBG("sk %p %pMR", sk, &sa.rc_bdaddr);
sk                353 net/bluetooth/rfcomm/sock.c 	lock_sock(sk);
sk                355 net/bluetooth/rfcomm/sock.c 	if (sk->sk_state != BT_OPEN) {
sk                360 net/bluetooth/rfcomm/sock.c 	if (sk->sk_type != SOCK_STREAM) {
sk                372 net/bluetooth/rfcomm/sock.c 		bacpy(&rfcomm_pi(sk)->src, &sa.rc_bdaddr);
sk                373 net/bluetooth/rfcomm/sock.c 		rfcomm_pi(sk)->channel = sa.rc_channel;
sk                374 net/bluetooth/rfcomm/sock.c 		sk->sk_state = BT_BOUND;
sk                380 net/bluetooth/rfcomm/sock.c 	release_sock(sk);
sk                387 net/bluetooth/rfcomm/sock.c 	struct sock *sk = sock->sk;
sk                388 net/bluetooth/rfcomm/sock.c 	struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc;
sk                391 net/bluetooth/rfcomm/sock.c 	BT_DBG("sk %p", sk);
sk                397 net/bluetooth/rfcomm/sock.c 	lock_sock(sk);
sk                399 net/bluetooth/rfcomm/sock.c 	if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) {
sk                404 net/bluetooth/rfcomm/sock.c 	if (sk->sk_type != SOCK_STREAM) {
sk                409 net/bluetooth/rfcomm/sock.c 	sk->sk_state = BT_CONNECT;
sk                410 net/bluetooth/rfcomm/sock.c 	bacpy(&rfcomm_pi(sk)->dst, &sa->rc_bdaddr);
sk                411 net/bluetooth/rfcomm/sock.c 	rfcomm_pi(sk)->channel = sa->rc_channel;
sk                413 net/bluetooth/rfcomm/sock.c 	d->sec_level = rfcomm_pi(sk)->sec_level;
sk                414 net/bluetooth/rfcomm/sock.c 	d->role_switch = rfcomm_pi(sk)->role_switch;
sk                416 net/bluetooth/rfcomm/sock.c 	err = rfcomm_dlc_open(d, &rfcomm_pi(sk)->src, &sa->rc_bdaddr,
sk                419 net/bluetooth/rfcomm/sock.c 		err = bt_sock_wait_state(sk, BT_CONNECTED,
sk                420 net/bluetooth/rfcomm/sock.c 				sock_sndtimeo(sk, flags & O_NONBLOCK));
sk                423 net/bluetooth/rfcomm/sock.c 	release_sock(sk);
sk                429 net/bluetooth/rfcomm/sock.c 	struct sock *sk = sock->sk;
sk                432 net/bluetooth/rfcomm/sock.c 	BT_DBG("sk %p backlog %d", sk, backlog);
sk                434 net/bluetooth/rfcomm/sock.c 	lock_sock(sk);
sk                436 net/bluetooth/rfcomm/sock.c 	if (sk->sk_state != BT_BOUND) {
sk                441 net/bluetooth/rfcomm/sock.c 	if (sk->sk_type != SOCK_STREAM) {
sk                446 net/bluetooth/rfcomm/sock.c 	if (!rfcomm_pi(sk)->channel) {
sk                447 net/bluetooth/rfcomm/sock.c 		bdaddr_t *src = &rfcomm_pi(sk)->src;
sk                456 net/bluetooth/rfcomm/sock.c 				rfcomm_pi(sk)->channel = channel;
sk                467 net/bluetooth/rfcomm/sock.c 	sk->sk_max_ack_backlog = backlog;
sk                468 net/bluetooth/rfcomm/sock.c 	sk->sk_ack_backlog = 0;
sk                469 net/bluetooth/rfcomm/sock.c 	sk->sk_state = BT_LISTEN;
sk                472 net/bluetooth/rfcomm/sock.c 	release_sock(sk);
sk                480 net/bluetooth/rfcomm/sock.c 	struct sock *sk = sock->sk, *nsk;
sk                484 net/bluetooth/rfcomm/sock.c 	lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
sk                486 net/bluetooth/rfcomm/sock.c 	if (sk->sk_type != SOCK_STREAM) {
sk                491 net/bluetooth/rfcomm/sock.c 	timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
sk                493 net/bluetooth/rfcomm/sock.c 	BT_DBG("sk %p timeo %ld", sk, timeo);
sk                496 net/bluetooth/rfcomm/sock.c 	add_wait_queue_exclusive(sk_sleep(sk), &wait);
sk                498 net/bluetooth/rfcomm/sock.c 		if (sk->sk_state != BT_LISTEN) {
sk                503 net/bluetooth/rfcomm/sock.c 		nsk = bt_accept_dequeue(sk, newsock);
sk                517 net/bluetooth/rfcomm/sock.c 		release_sock(sk);
sk                521 net/bluetooth/rfcomm/sock.c 		lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
sk                523 net/bluetooth/rfcomm/sock.c 	remove_wait_queue(sk_sleep(sk), &wait);
sk                533 net/bluetooth/rfcomm/sock.c 	release_sock(sk);
sk                540 net/bluetooth/rfcomm/sock.c 	struct sock *sk = sock->sk;
sk                542 net/bluetooth/rfcomm/sock.c 	BT_DBG("sock %p, sk %p", sock, sk);
sk                544 net/bluetooth/rfcomm/sock.c 	if (peer && sk->sk_state != BT_CONNECTED &&
sk                545 net/bluetooth/rfcomm/sock.c 	    sk->sk_state != BT_CONNECT && sk->sk_state != BT_CONNECT2)
sk                550 net/bluetooth/rfcomm/sock.c 	sa->rc_channel = rfcomm_pi(sk)->channel;
sk                552 net/bluetooth/rfcomm/sock.c 		bacpy(&sa->rc_bdaddr, &rfcomm_pi(sk)->dst);
sk                554 net/bluetooth/rfcomm/sock.c 		bacpy(&sa->rc_bdaddr, &rfcomm_pi(sk)->src);
sk                562 net/bluetooth/rfcomm/sock.c 	struct sock *sk = sock->sk;
sk                563 net/bluetooth/rfcomm/sock.c 	struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc;
sk                573 net/bluetooth/rfcomm/sock.c 	if (sk->sk_shutdown & SEND_SHUTDOWN)
sk                576 net/bluetooth/rfcomm/sock.c 	BT_DBG("sock %p, sk %p", sock, sk);
sk                578 net/bluetooth/rfcomm/sock.c 	lock_sock(sk);
sk                580 net/bluetooth/rfcomm/sock.c 	sent = bt_sock_wait_ready(sk, msg->msg_flags);
sk                588 net/bluetooth/rfcomm/sock.c 		skb = sock_alloc_send_skb(sk, size + RFCOMM_SKB_RESERVE,
sk                605 net/bluetooth/rfcomm/sock.c 		skb->priority = sk->sk_priority;
sk                620 net/bluetooth/rfcomm/sock.c 	release_sock(sk);
sk                628 net/bluetooth/rfcomm/sock.c 	struct sock *sk = sock->sk;
sk                629 net/bluetooth/rfcomm/sock.c 	struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc;
sk                639 net/bluetooth/rfcomm/sock.c 	lock_sock(sk);
sk                641 net/bluetooth/rfcomm/sock.c 		atomic_sub(len, &sk->sk_rmem_alloc);
sk                643 net/bluetooth/rfcomm/sock.c 	if (atomic_read(&sk->sk_rmem_alloc) <= (sk->sk_rcvbuf >> 2))
sk                644 net/bluetooth/rfcomm/sock.c 		rfcomm_dlc_unthrottle(rfcomm_pi(sk)->dlc);
sk                645 net/bluetooth/rfcomm/sock.c 	release_sock(sk);
sk                652 net/bluetooth/rfcomm/sock.c 	struct sock *sk = sock->sk;
sk                656 net/bluetooth/rfcomm/sock.c 	BT_DBG("sk %p", sk);
sk                658 net/bluetooth/rfcomm/sock.c 	lock_sock(sk);
sk                673 net/bluetooth/rfcomm/sock.c 			rfcomm_pi(sk)->sec_level = BT_SECURITY_LOW;
sk                675 net/bluetooth/rfcomm/sock.c 			rfcomm_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
sk                677 net/bluetooth/rfcomm/sock.c 			rfcomm_pi(sk)->sec_level = BT_SECURITY_HIGH;
sk                679 net/bluetooth/rfcomm/sock.c 		rfcomm_pi(sk)->role_switch = (opt & RFCOMM_LM_MASTER);
sk                687 net/bluetooth/rfcomm/sock.c 	release_sock(sk);
sk                693 net/bluetooth/rfcomm/sock.c 	struct sock *sk = sock->sk;
sk                699 net/bluetooth/rfcomm/sock.c 	BT_DBG("sk %p", sk);
sk                707 net/bluetooth/rfcomm/sock.c 	lock_sock(sk);
sk                711 net/bluetooth/rfcomm/sock.c 		if (sk->sk_type != SOCK_STREAM) {
sk                729 net/bluetooth/rfcomm/sock.c 		rfcomm_pi(sk)->sec_level = sec.level;
sk                733 net/bluetooth/rfcomm/sock.c 		if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
sk                744 net/bluetooth/rfcomm/sock.c 			set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
sk                746 net/bluetooth/rfcomm/sock.c 			clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
sk                755 net/bluetooth/rfcomm/sock.c 	release_sock(sk);
sk                761 net/bluetooth/rfcomm/sock.c 	struct sock *sk = sock->sk;
sk                768 net/bluetooth/rfcomm/sock.c 	BT_DBG("sk %p", sk);
sk                773 net/bluetooth/rfcomm/sock.c 	lock_sock(sk);
sk                777 net/bluetooth/rfcomm/sock.c 		switch (rfcomm_pi(sk)->sec_level) {
sk                797 net/bluetooth/rfcomm/sock.c 		if (rfcomm_pi(sk)->role_switch)
sk                806 net/bluetooth/rfcomm/sock.c 		if (sk->sk_state != BT_CONNECTED &&
sk                807 net/bluetooth/rfcomm/sock.c 					!rfcomm_pi(sk)->dlc->defer_setup) {
sk                812 net/bluetooth/rfcomm/sock.c 		l2cap_sk = rfcomm_pi(sk)->dlc->session->sock->sk;
sk                830 net/bluetooth/rfcomm/sock.c 	release_sock(sk);
sk                836 net/bluetooth/rfcomm/sock.c 	struct sock *sk = sock->sk;
sk                840 net/bluetooth/rfcomm/sock.c 	BT_DBG("sk %p", sk);
sk                851 net/bluetooth/rfcomm/sock.c 	lock_sock(sk);
sk                855 net/bluetooth/rfcomm/sock.c 		if (sk->sk_type != SOCK_STREAM) {
sk                860 net/bluetooth/rfcomm/sock.c 		sec.level = rfcomm_pi(sk)->sec_level;
sk                870 net/bluetooth/rfcomm/sock.c 		if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
sk                875 net/bluetooth/rfcomm/sock.c 		if (put_user(test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags),
sk                886 net/bluetooth/rfcomm/sock.c 	release_sock(sk);
sk                892 net/bluetooth/rfcomm/sock.c 	struct sock *sk __maybe_unused = sock->sk;
sk                895 net/bluetooth/rfcomm/sock.c 	BT_DBG("sk %p cmd %x arg %lx", sk, cmd, arg);
sk                901 net/bluetooth/rfcomm/sock.c 		lock_sock(sk);
sk                902 net/bluetooth/rfcomm/sock.c 		err = rfcomm_dev_ioctl(sk, cmd, (void __user *) arg);
sk                903 net/bluetooth/rfcomm/sock.c 		release_sock(sk);
sk                914 net/bluetooth/rfcomm/sock.c 	struct sock *sk = sock->sk;
sk                917 net/bluetooth/rfcomm/sock.c 	BT_DBG("sock %p, sk %p", sock, sk);
sk                919 net/bluetooth/rfcomm/sock.c 	if (!sk)
sk                922 net/bluetooth/rfcomm/sock.c 	lock_sock(sk);
sk                923 net/bluetooth/rfcomm/sock.c 	if (!sk->sk_shutdown) {
sk                924 net/bluetooth/rfcomm/sock.c 		sk->sk_shutdown = SHUTDOWN_MASK;
sk                925 net/bluetooth/rfcomm/sock.c 		__rfcomm_sock_close(sk);
sk                927 net/bluetooth/rfcomm/sock.c 		if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime &&
sk                929 net/bluetooth/rfcomm/sock.c 			err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
sk                931 net/bluetooth/rfcomm/sock.c 	release_sock(sk);
sk                937 net/bluetooth/rfcomm/sock.c 	struct sock *sk = sock->sk;
sk                940 net/bluetooth/rfcomm/sock.c 	BT_DBG("sock %p, sk %p", sock, sk);
sk                942 net/bluetooth/rfcomm/sock.c 	if (!sk)
sk                947 net/bluetooth/rfcomm/sock.c 	sock_orphan(sk);
sk                948 net/bluetooth/rfcomm/sock.c 	rfcomm_sock_kill(sk);
sk                958 net/bluetooth/rfcomm/sock.c 	struct sock *sk, *parent;
sk                979 net/bluetooth/rfcomm/sock.c 	sk = rfcomm_sock_alloc(sock_net(parent), NULL, BTPROTO_RFCOMM, GFP_ATOMIC, 0);
sk                980 net/bluetooth/rfcomm/sock.c 	if (!sk)
sk                983 net/bluetooth/rfcomm/sock.c 	bt_sock_reclassify_lock(sk, BTPROTO_RFCOMM);
sk                985 net/bluetooth/rfcomm/sock.c 	rfcomm_sock_init(sk, parent);
sk                986 net/bluetooth/rfcomm/sock.c 	bacpy(&rfcomm_pi(sk)->src, &src);
sk                987 net/bluetooth/rfcomm/sock.c 	bacpy(&rfcomm_pi(sk)->dst, &dst);
sk                988 net/bluetooth/rfcomm/sock.c 	rfcomm_pi(sk)->channel = channel;
sk                990 net/bluetooth/rfcomm/sock.c 	sk->sk_state = BT_CONFIG;
sk                991 net/bluetooth/rfcomm/sock.c 	bt_accept_enqueue(parent, sk, true);
sk                994 net/bluetooth/rfcomm/sock.c 	*d = rfcomm_pi(sk)->dlc;
sk               1008 net/bluetooth/rfcomm/sock.c 	struct sock *sk;
sk               1012 net/bluetooth/rfcomm/sock.c 	sk_for_each(sk, &rfcomm_sk_list.head) {
sk               1014 net/bluetooth/rfcomm/sock.c 			   &rfcomm_pi(sk)->src, &rfcomm_pi(sk)->dst,
sk               1015 net/bluetooth/rfcomm/sock.c 			   sk->sk_state, rfcomm_pi(sk)->channel);
sk                279 net/bluetooth/rfcomm/tty.c 		struct sock *sk = dlc->owner;
sk                282 net/bluetooth/rfcomm/tty.c 		BUG_ON(!sk);
sk                286 net/bluetooth/rfcomm/tty.c 		while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
sk                289 net/bluetooth/rfcomm/tty.c 			atomic_sub(skb->len, &sk->sk_rmem_alloc);
sk                363 net/bluetooth/rfcomm/tty.c 	struct rfcomm_dev *dev = (void *) skb->sk;
sk                374 net/bluetooth/rfcomm/tty.c 	skb->sk = (void *) dev;
sk                390 net/bluetooth/rfcomm/tty.c static int __rfcomm_create_dev(struct sock *sk, void __user *arg)
sk                399 net/bluetooth/rfcomm/tty.c 	BT_DBG("sk %p dev_id %d flags 0x%x", sk, req.dev_id, req.flags);
sk                406 net/bluetooth/rfcomm/tty.c 		if (sk->sk_state != BT_CONNECTED)
sk                409 net/bluetooth/rfcomm/tty.c 		dlc = rfcomm_pi(sk)->dlc;
sk                430 net/bluetooth/rfcomm/tty.c 		sk->sk_state = BT_CLOSED;
sk                479 net/bluetooth/rfcomm/tty.c static int rfcomm_create_dev(struct sock *sk, void __user *arg)
sk                484 net/bluetooth/rfcomm/tty.c 	ret = __rfcomm_create_dev(sk, arg);
sk                580 net/bluetooth/rfcomm/tty.c int rfcomm_dev_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
sk                586 net/bluetooth/rfcomm/tty.c 		return rfcomm_create_dev(sk, arg);
sk                 49 net/bluetooth/sco.c 	struct sock	*sk;
sk                 57 net/bluetooth/sco.c static void sco_sock_close(struct sock *sk);
sk                 58 net/bluetooth/sco.c static void sco_sock_kill(struct sock *sk);
sk                 61 net/bluetooth/sco.c #define sco_pi(sk) ((struct sco_pinfo *) sk)
sk                 78 net/bluetooth/sco.c 	struct sock *sk = from_timer(sk, t, sk_timer);
sk                 80 net/bluetooth/sco.c 	BT_DBG("sock %p state %d", sk, sk->sk_state);
sk                 82 net/bluetooth/sco.c 	bh_lock_sock(sk);
sk                 83 net/bluetooth/sco.c 	sk->sk_err = ETIMEDOUT;
sk                 84 net/bluetooth/sco.c 	sk->sk_state_change(sk);
sk                 85 net/bluetooth/sco.c 	bh_unlock_sock(sk);
sk                 87 net/bluetooth/sco.c 	sco_sock_kill(sk);
sk                 88 net/bluetooth/sco.c 	sock_put(sk);
sk                 91 net/bluetooth/sco.c static void sco_sock_set_timer(struct sock *sk, long timeout)
sk                 93 net/bluetooth/sco.c 	BT_DBG("sock %p state %d timeout %ld", sk, sk->sk_state, timeout);
sk                 94 net/bluetooth/sco.c 	sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
sk                 97 net/bluetooth/sco.c static void sco_sock_clear_timer(struct sock *sk)
sk                 99 net/bluetooth/sco.c 	BT_DBG("sock %p state %d", sk, sk->sk_state);
sk                100 net/bluetooth/sco.c 	sk_stop_timer(sk, &sk->sk_timer);
sk                133 net/bluetooth/sco.c static void sco_chan_del(struct sock *sk, int err)
sk                137 net/bluetooth/sco.c 	conn = sco_pi(sk)->conn;
sk                139 net/bluetooth/sco.c 	BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
sk                143 net/bluetooth/sco.c 		conn->sk = NULL;
sk                144 net/bluetooth/sco.c 		sco_pi(sk)->conn = NULL;
sk                151 net/bluetooth/sco.c 	sk->sk_state = BT_CLOSED;
sk                152 net/bluetooth/sco.c 	sk->sk_err   = err;
sk                153 net/bluetooth/sco.c 	sk->sk_state_change(sk);
sk                155 net/bluetooth/sco.c 	sock_set_flag(sk, SOCK_ZAPPED);
sk                161 net/bluetooth/sco.c 	struct sock *sk;
sk                170 net/bluetooth/sco.c 	sk = conn->sk;
sk                173 net/bluetooth/sco.c 	if (sk) {
sk                174 net/bluetooth/sco.c 		sock_hold(sk);
sk                175 net/bluetooth/sco.c 		bh_lock_sock(sk);
sk                176 net/bluetooth/sco.c 		sco_sock_clear_timer(sk);
sk                177 net/bluetooth/sco.c 		sco_chan_del(sk, err);
sk                178 net/bluetooth/sco.c 		bh_unlock_sock(sk);
sk                179 net/bluetooth/sco.c 		sco_sock_kill(sk);
sk                180 net/bluetooth/sco.c 		sock_put(sk);
sk                187 net/bluetooth/sco.c static void __sco_chan_add(struct sco_conn *conn, struct sock *sk,
sk                192 net/bluetooth/sco.c 	sco_pi(sk)->conn = conn;
sk                193 net/bluetooth/sco.c 	conn->sk = sk;
sk                196 net/bluetooth/sco.c 		bt_accept_enqueue(parent, sk, true);
sk                199 net/bluetooth/sco.c static int sco_chan_add(struct sco_conn *conn, struct sock *sk,
sk                205 net/bluetooth/sco.c 	if (conn->sk)
sk                208 net/bluetooth/sco.c 		__sco_chan_add(conn, sk, parent);
sk                214 net/bluetooth/sco.c static int sco_connect(struct sock *sk)
sk                221 net/bluetooth/sco.c 	BT_DBG("%pMR -> %pMR", &sco_pi(sk)->src, &sco_pi(sk)->dst);
sk                223 net/bluetooth/sco.c 	hdev = hci_get_route(&sco_pi(sk)->dst, &sco_pi(sk)->src, BDADDR_BREDR);
sk                234 net/bluetooth/sco.c 	if (sco_pi(sk)->setting == BT_VOICE_TRANSPARENT &&
sk                240 net/bluetooth/sco.c 	hcon = hci_connect_sco(hdev, type, &sco_pi(sk)->dst,
sk                241 net/bluetooth/sco.c 			       sco_pi(sk)->setting);
sk                255 net/bluetooth/sco.c 	bacpy(&sco_pi(sk)->src, &hcon->src);
sk                257 net/bluetooth/sco.c 	err = sco_chan_add(conn, sk, NULL);
sk                262 net/bluetooth/sco.c 		sco_sock_clear_timer(sk);
sk                263 net/bluetooth/sco.c 		sk->sk_state = BT_CONNECTED;
sk                265 net/bluetooth/sco.c 		sk->sk_state = BT_CONNECT;
sk                266 net/bluetooth/sco.c 		sco_sock_set_timer(sk, sk->sk_sndtimeo);
sk                275 net/bluetooth/sco.c static int sco_send_frame(struct sock *sk, struct msghdr *msg, int len)
sk                277 net/bluetooth/sco.c 	struct sco_conn *conn = sco_pi(sk)->conn;
sk                285 net/bluetooth/sco.c 	BT_DBG("sk %p len %d", sk, len);
sk                287 net/bluetooth/sco.c 	skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
sk                303 net/bluetooth/sco.c 	struct sock *sk;
sk                306 net/bluetooth/sco.c 	sk = conn->sk;
sk                309 net/bluetooth/sco.c 	if (!sk)
sk                312 net/bluetooth/sco.c 	BT_DBG("sk %p len %d", sk, skb->len);
sk                314 net/bluetooth/sco.c 	if (sk->sk_state != BT_CONNECTED)
sk                317 net/bluetooth/sco.c 	if (!sock_queue_rcv_skb(sk, skb))
sk                327 net/bluetooth/sco.c 	struct sock *sk;
sk                329 net/bluetooth/sco.c 	sk_for_each(sk, &sco_sk_list.head) {
sk                330 net/bluetooth/sco.c 		if (sk->sk_state != BT_LISTEN)
sk                333 net/bluetooth/sco.c 		if (!bacmp(&sco_pi(sk)->src, ba))
sk                334 net/bluetooth/sco.c 			return sk;
sk                345 net/bluetooth/sco.c 	struct sock *sk = NULL, *sk1 = NULL;
sk                349 net/bluetooth/sco.c 	sk_for_each(sk, &sco_sk_list.head) {
sk                350 net/bluetooth/sco.c 		if (sk->sk_state != BT_LISTEN)
sk                354 net/bluetooth/sco.c 		if (!bacmp(&sco_pi(sk)->src, src))
sk                358 net/bluetooth/sco.c 		if (!bacmp(&sco_pi(sk)->src, BDADDR_ANY))
sk                359 net/bluetooth/sco.c 			sk1 = sk;
sk                364 net/bluetooth/sco.c 	return sk ? sk : sk1;
sk                367 net/bluetooth/sco.c static void sco_sock_destruct(struct sock *sk)
sk                369 net/bluetooth/sco.c 	BT_DBG("sk %p", sk);
sk                371 net/bluetooth/sco.c 	skb_queue_purge(&sk->sk_receive_queue);
sk                372 net/bluetooth/sco.c 	skb_queue_purge(&sk->sk_write_queue);
sk                377 net/bluetooth/sco.c 	struct sock *sk;
sk                382 net/bluetooth/sco.c 	while ((sk = bt_accept_dequeue(parent, NULL))) {
sk                383 net/bluetooth/sco.c 		sco_sock_close(sk);
sk                384 net/bluetooth/sco.c 		sco_sock_kill(sk);
sk                394 net/bluetooth/sco.c static void sco_sock_kill(struct sock *sk)
sk                396 net/bluetooth/sco.c 	if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket ||
sk                397 net/bluetooth/sco.c 	    sock_flag(sk, SOCK_DEAD))
sk                400 net/bluetooth/sco.c 	BT_DBG("sk %p state %d", sk, sk->sk_state);
sk                403 net/bluetooth/sco.c 	bt_sock_unlink(&sco_sk_list, sk);
sk                404 net/bluetooth/sco.c 	sock_set_flag(sk, SOCK_DEAD);
sk                405 net/bluetooth/sco.c 	sock_put(sk);
sk                408 net/bluetooth/sco.c static void __sco_sock_close(struct sock *sk)
sk                410 net/bluetooth/sco.c 	BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
sk                412 net/bluetooth/sco.c 	switch (sk->sk_state) {
sk                414 net/bluetooth/sco.c 		sco_sock_cleanup_listen(sk);
sk                419 net/bluetooth/sco.c 		if (sco_pi(sk)->conn->hcon) {
sk                420 net/bluetooth/sco.c 			sk->sk_state = BT_DISCONN;
sk                421 net/bluetooth/sco.c 			sco_sock_set_timer(sk, SCO_DISCONN_TIMEOUT);
sk                422 net/bluetooth/sco.c 			sco_conn_lock(sco_pi(sk)->conn);
sk                423 net/bluetooth/sco.c 			hci_conn_drop(sco_pi(sk)->conn->hcon);
sk                424 net/bluetooth/sco.c 			sco_pi(sk)->conn->hcon = NULL;
sk                425 net/bluetooth/sco.c 			sco_conn_unlock(sco_pi(sk)->conn);
sk                427 net/bluetooth/sco.c 			sco_chan_del(sk, ECONNRESET);
sk                433 net/bluetooth/sco.c 		sco_chan_del(sk, ECONNRESET);
sk                437 net/bluetooth/sco.c 		sock_set_flag(sk, SOCK_ZAPPED);
sk                443 net/bluetooth/sco.c static void sco_sock_close(struct sock *sk)
sk                445 net/bluetooth/sco.c 	sco_sock_clear_timer(sk);
sk                446 net/bluetooth/sco.c 	lock_sock(sk);
sk                447 net/bluetooth/sco.c 	__sco_sock_close(sk);
sk                448 net/bluetooth/sco.c 	release_sock(sk);
sk                449 net/bluetooth/sco.c 	sco_sock_kill(sk);
sk                452 net/bluetooth/sco.c static void sco_sock_init(struct sock *sk, struct sock *parent)
sk                454 net/bluetooth/sco.c 	BT_DBG("sk %p", sk);
sk                457 net/bluetooth/sco.c 		sk->sk_type = parent->sk_type;
sk                458 net/bluetooth/sco.c 		bt_sk(sk)->flags = bt_sk(parent)->flags;
sk                459 net/bluetooth/sco.c 		security_sk_clone(parent, sk);
sk                472 net/bluetooth/sco.c 	struct sock *sk;
sk                474 net/bluetooth/sco.c 	sk = sk_alloc(net, PF_BLUETOOTH, prio, &sco_proto, kern);
sk                475 net/bluetooth/sco.c 	if (!sk)
sk                478 net/bluetooth/sco.c 	sock_init_data(sock, sk);
sk                479 net/bluetooth/sco.c 	INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
sk                481 net/bluetooth/sco.c 	sk->sk_destruct = sco_sock_destruct;
sk                482 net/bluetooth/sco.c 	sk->sk_sndtimeo = SCO_CONN_TIMEOUT;
sk                484 net/bluetooth/sco.c 	sock_reset_flag(sk, SOCK_ZAPPED);
sk                486 net/bluetooth/sco.c 	sk->sk_protocol = proto;
sk                487 net/bluetooth/sco.c 	sk->sk_state    = BT_OPEN;
sk                489 net/bluetooth/sco.c 	sco_pi(sk)->setting = BT_VOICE_CVSD_16BIT;
sk                491 net/bluetooth/sco.c 	timer_setup(&sk->sk_timer, sco_sock_timeout, 0);
sk                493 net/bluetooth/sco.c 	bt_sock_link(&sco_sk_list, sk);
sk                494 net/bluetooth/sco.c 	return sk;
sk                500 net/bluetooth/sco.c 	struct sock *sk;
sk                511 net/bluetooth/sco.c 	sk = sco_sock_alloc(net, sock, protocol, GFP_ATOMIC, kern);
sk                512 net/bluetooth/sco.c 	if (!sk)
sk                515 net/bluetooth/sco.c 	sco_sock_init(sk, NULL);
sk                523 net/bluetooth/sco.c 	struct sock *sk = sock->sk;
sk                530 net/bluetooth/sco.c 	BT_DBG("sk %p %pMR", sk, &sa->sco_bdaddr);
sk                532 net/bluetooth/sco.c 	lock_sock(sk);
sk                534 net/bluetooth/sco.c 	if (sk->sk_state != BT_OPEN) {
sk                539 net/bluetooth/sco.c 	if (sk->sk_type != SOCK_SEQPACKET) {
sk                544 net/bluetooth/sco.c 	bacpy(&sco_pi(sk)->src, &sa->sco_bdaddr);
sk                546 net/bluetooth/sco.c 	sk->sk_state = BT_BOUND;
sk                549 net/bluetooth/sco.c 	release_sock(sk);
sk                556 net/bluetooth/sco.c 	struct sock *sk = sock->sk;
sk                559 net/bluetooth/sco.c 	BT_DBG("sk %p", sk);
sk                565 net/bluetooth/sco.c 	if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND)
sk                568 net/bluetooth/sco.c 	if (sk->sk_type != SOCK_SEQPACKET)
sk                571 net/bluetooth/sco.c 	lock_sock(sk);
sk                574 net/bluetooth/sco.c 	bacpy(&sco_pi(sk)->dst, &sa->sco_bdaddr);
sk                576 net/bluetooth/sco.c 	err = sco_connect(sk);
sk                580 net/bluetooth/sco.c 	err = bt_sock_wait_state(sk, BT_CONNECTED,
sk                581 net/bluetooth/sco.c 				 sock_sndtimeo(sk, flags & O_NONBLOCK));
sk                584 net/bluetooth/sco.c 	release_sock(sk);
sk                590 net/bluetooth/sco.c 	struct sock *sk = sock->sk;
sk                591 net/bluetooth/sco.c 	bdaddr_t *src = &sco_pi(sk)->src;
sk                594 net/bluetooth/sco.c 	BT_DBG("sk %p backlog %d", sk, backlog);
sk                596 net/bluetooth/sco.c 	lock_sock(sk);
sk                598 net/bluetooth/sco.c 	if (sk->sk_state != BT_BOUND) {
sk                603 net/bluetooth/sco.c 	if (sk->sk_type != SOCK_SEQPACKET) {
sk                615 net/bluetooth/sco.c 	sk->sk_max_ack_backlog = backlog;
sk                616 net/bluetooth/sco.c 	sk->sk_ack_backlog = 0;
sk                618 net/bluetooth/sco.c 	sk->sk_state = BT_LISTEN;
sk                624 net/bluetooth/sco.c 	release_sock(sk);
sk                632 net/bluetooth/sco.c 	struct sock *sk = sock->sk, *ch;
sk                636 net/bluetooth/sco.c 	lock_sock(sk);
sk                638 net/bluetooth/sco.c 	timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
sk                640 net/bluetooth/sco.c 	BT_DBG("sk %p timeo %ld", sk, timeo);
sk                643 net/bluetooth/sco.c 	add_wait_queue_exclusive(sk_sleep(sk), &wait);
sk                645 net/bluetooth/sco.c 		if (sk->sk_state != BT_LISTEN) {
sk                650 net/bluetooth/sco.c 		ch = bt_accept_dequeue(sk, newsock);
sk                664 net/bluetooth/sco.c 		release_sock(sk);
sk                667 net/bluetooth/sco.c 		lock_sock(sk);
sk                669 net/bluetooth/sco.c 	remove_wait_queue(sk_sleep(sk), &wait);
sk                679 net/bluetooth/sco.c 	release_sock(sk);
sk                687 net/bluetooth/sco.c 	struct sock *sk = sock->sk;
sk                689 net/bluetooth/sco.c 	BT_DBG("sock %p, sk %p", sock, sk);
sk                694 net/bluetooth/sco.c 		bacpy(&sa->sco_bdaddr, &sco_pi(sk)->dst);
sk                696 net/bluetooth/sco.c 		bacpy(&sa->sco_bdaddr, &sco_pi(sk)->src);
sk                704 net/bluetooth/sco.c 	struct sock *sk = sock->sk;
sk                707 net/bluetooth/sco.c 	BT_DBG("sock %p, sk %p", sock, sk);
sk                709 net/bluetooth/sco.c 	err = sock_error(sk);
sk                716 net/bluetooth/sco.c 	lock_sock(sk);
sk                718 net/bluetooth/sco.c 	if (sk->sk_state == BT_CONNECTED)
sk                719 net/bluetooth/sco.c 		err = sco_send_frame(sk, msg, len);
sk                723 net/bluetooth/sco.c 	release_sock(sk);
sk                774 net/bluetooth/sco.c 	struct sock *sk = sock->sk;
sk                775 net/bluetooth/sco.c 	struct sco_pinfo *pi = sco_pi(sk);
sk                777 net/bluetooth/sco.c 	lock_sock(sk);
sk                779 net/bluetooth/sco.c 	if (sk->sk_state == BT_CONNECT2 &&
sk                780 net/bluetooth/sco.c 	    test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
sk                782 net/bluetooth/sco.c 		sk->sk_state = BT_CONFIG;
sk                784 net/bluetooth/sco.c 		release_sock(sk);
sk                788 net/bluetooth/sco.c 	release_sock(sk);
sk                796 net/bluetooth/sco.c 	struct sock *sk = sock->sk;
sk                801 net/bluetooth/sco.c 	BT_DBG("sk %p", sk);
sk                803 net/bluetooth/sco.c 	lock_sock(sk);
sk                808 net/bluetooth/sco.c 		if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
sk                819 net/bluetooth/sco.c 			set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
sk                821 net/bluetooth/sco.c 			clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
sk                825 net/bluetooth/sco.c 		if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND &&
sk                826 net/bluetooth/sco.c 		    sk->sk_state != BT_CONNECT2) {
sk                831 net/bluetooth/sco.c 		voice.setting = sco_pi(sk)->setting;
sk                846 net/bluetooth/sco.c 		sco_pi(sk)->setting = voice.setting;
sk                854 net/bluetooth/sco.c 	release_sock(sk);
sk                861 net/bluetooth/sco.c 	struct sock *sk = sock->sk;
sk                866 net/bluetooth/sco.c 	BT_DBG("sk %p", sk);
sk                871 net/bluetooth/sco.c 	lock_sock(sk);
sk                875 net/bluetooth/sco.c 		if (sk->sk_state != BT_CONNECTED &&
sk                876 net/bluetooth/sco.c 		    !(sk->sk_state == BT_CONNECT2 &&
sk                877 net/bluetooth/sco.c 		      test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))) {
sk                882 net/bluetooth/sco.c 		opts.mtu = sco_pi(sk)->conn->mtu;
sk                893 net/bluetooth/sco.c 		if (sk->sk_state != BT_CONNECTED &&
sk                894 net/bluetooth/sco.c 		    !(sk->sk_state == BT_CONNECT2 &&
sk                895 net/bluetooth/sco.c 		      test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))) {
sk                901 net/bluetooth/sco.c 		cinfo.hci_handle = sco_pi(sk)->conn->hcon->handle;
sk                902 net/bluetooth/sco.c 		memcpy(cinfo.dev_class, sco_pi(sk)->conn->hcon->dev_class, 3);
sk                915 net/bluetooth/sco.c 	release_sock(sk);
sk                922 net/bluetooth/sco.c 	struct sock *sk = sock->sk;
sk                926 net/bluetooth/sco.c 	BT_DBG("sk %p", sk);
sk                934 net/bluetooth/sco.c 	lock_sock(sk);
sk                939 net/bluetooth/sco.c 		if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
sk                944 net/bluetooth/sco.c 		if (put_user(test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags),
sk                951 net/bluetooth/sco.c 		voice.setting = sco_pi(sk)->setting;
sk                964 net/bluetooth/sco.c 	release_sock(sk);
sk                970 net/bluetooth/sco.c 	struct sock *sk = sock->sk;
sk                973 net/bluetooth/sco.c 	BT_DBG("sock %p, sk %p", sock, sk);
sk                975 net/bluetooth/sco.c 	if (!sk)
sk                978 net/bluetooth/sco.c 	sock_hold(sk);
sk                979 net/bluetooth/sco.c 	lock_sock(sk);
sk                981 net/bluetooth/sco.c 	if (!sk->sk_shutdown) {
sk                982 net/bluetooth/sco.c 		sk->sk_shutdown = SHUTDOWN_MASK;
sk                983 net/bluetooth/sco.c 		sco_sock_clear_timer(sk);
sk                984 net/bluetooth/sco.c 		__sco_sock_close(sk);
sk                986 net/bluetooth/sco.c 		if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime &&
sk                988 net/bluetooth/sco.c 			err = bt_sock_wait_state(sk, BT_CLOSED,
sk                989 net/bluetooth/sco.c 						 sk->sk_lingertime);
sk                992 net/bluetooth/sco.c 	release_sock(sk);
sk                993 net/bluetooth/sco.c 	sock_put(sk);
sk               1000 net/bluetooth/sco.c 	struct sock *sk = sock->sk;
sk               1003 net/bluetooth/sco.c 	BT_DBG("sock %p, sk %p", sock, sk);
sk               1005 net/bluetooth/sco.c 	if (!sk)
sk               1008 net/bluetooth/sco.c 	sco_sock_close(sk);
sk               1010 net/bluetooth/sco.c 	if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime &&
sk               1012 net/bluetooth/sco.c 		lock_sock(sk);
sk               1013 net/bluetooth/sco.c 		err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
sk               1014 net/bluetooth/sco.c 		release_sock(sk);
sk               1017 net/bluetooth/sco.c 	sock_orphan(sk);
sk               1018 net/bluetooth/sco.c 	sco_sock_kill(sk);
sk               1025 net/bluetooth/sco.c 	struct sock *sk = conn->sk;
sk               1029 net/bluetooth/sco.c 	if (sk) {
sk               1030 net/bluetooth/sco.c 		sco_sock_clear_timer(sk);
sk               1031 net/bluetooth/sco.c 		bh_lock_sock(sk);
sk               1032 net/bluetooth/sco.c 		sk->sk_state = BT_CONNECTED;
sk               1033 net/bluetooth/sco.c 		sk->sk_state_change(sk);
sk               1034 net/bluetooth/sco.c 		bh_unlock_sock(sk);
sk               1051 net/bluetooth/sco.c 		sk = sco_sock_alloc(sock_net(parent), NULL,
sk               1053 net/bluetooth/sco.c 		if (!sk) {
sk               1059 net/bluetooth/sco.c 		sco_sock_init(sk, parent);
sk               1061 net/bluetooth/sco.c 		bacpy(&sco_pi(sk)->src, &conn->hcon->src);
sk               1062 net/bluetooth/sco.c 		bacpy(&sco_pi(sk)->dst, &conn->hcon->dst);
sk               1065 net/bluetooth/sco.c 		__sco_chan_add(conn, sk, parent);
sk               1068 net/bluetooth/sco.c 			sk->sk_state = BT_CONNECT2;
sk               1070 net/bluetooth/sco.c 			sk->sk_state = BT_CONNECTED;
sk               1084 net/bluetooth/sco.c 	struct sock *sk;
sk               1091 net/bluetooth/sco.c 	sk_for_each(sk, &sco_sk_list.head) {
sk               1092 net/bluetooth/sco.c 		if (sk->sk_state != BT_LISTEN)
sk               1095 net/bluetooth/sco.c 		if (!bacmp(&sco_pi(sk)->src, &hdev->bdaddr) ||
sk               1096 net/bluetooth/sco.c 		    !bacmp(&sco_pi(sk)->src, BDADDR_ANY)) {
sk               1099 net/bluetooth/sco.c 			if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
sk               1162 net/bluetooth/sco.c 	struct sock *sk;
sk               1166 net/bluetooth/sco.c 	sk_for_each(sk, &sco_sk_list.head) {
sk               1167 net/bluetooth/sco.c 		seq_printf(f, "%pMR %pMR %d\n", &sco_pi(sk)->src,
sk               1168 net/bluetooth/sco.c 			   &sco_pi(sk)->dst, sk->sk_state);
sk                251 net/bpf/test_run.c 	struct sock *sk;
sk                280 net/bpf/test_run.c 	sk = kzalloc(sizeof(struct sock), GFP_USER);
sk                281 net/bpf/test_run.c 	if (!sk) {
sk                286 net/bpf/test_run.c 	sock_net_set(sk, current->nsproxy->net_ns);
sk                287 net/bpf/test_run.c 	sock_init_data(NULL, sk);
sk                293 net/bpf/test_run.c 		kfree(sk);
sk                296 net/bpf/test_run.c 	skb->sk = sk;
sk                336 net/bpf/test_run.c 	bpf_sk_storage_free(sk);
sk                337 net/bpf/test_run.c 	kfree(sk);
sk                 36 net/bpfilter/bpfilter_kern.c static int __bpfilter_process_sockopt(struct sock *sk, int optname,
sk                 33 net/bridge/br_forward.c int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
sk                 62 net/bridge/br_forward.c int br_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
sk                 66 net/bridge/br_forward.c 		       net, sk, skb, NULL, skb->dev,
sk                 26 net/bridge/br_input.c br_netif_receive_skb(struct net *net, struct sock *sk, struct sk_buff *skb)
sk                 70 net/bridge/br_input.c int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
sk                191 net/bridge/br_input.c static int br_handle_local_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
sk                211 net/bridge/br_mdb.c 	struct net *net = sock_net(skb->sk);
sk                555 net/bridge/br_mdb.c 	struct net *net = sock_net(skb->sk);
sk                683 net/bridge/br_mdb.c 	struct net *net = sock_net(skb->sk);
sk                788 net/bridge/br_mdb.c 	struct net *net = sock_net(skb->sk);
sk                266 net/bridge/br_netfilter_hooks.c int br_nf_pre_routing_finish_bridge(struct net *net, struct sock *sk, struct sk_buff *skb)
sk                283 net/bridge/br_netfilter_hooks.c 			ret = br_handle_frame_finish(net, sk, skb);
sk                352 net/bridge/br_netfilter_hooks.c static int br_nf_pre_routing_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
sk                402 net/bridge/br_netfilter_hooks.c 						  net, sk, skb, skb->dev,
sk                422 net/bridge/br_netfilter_hooks.c 	br_nf_hook_thresh(NF_BR_PRE_ROUTING, net, sk, skb, skb->dev, NULL,
sk                531 net/bridge/br_netfilter_hooks.c 	NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, state->net, state->sk, skb,
sk                540 net/bridge/br_netfilter_hooks.c static int br_nf_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
sk                564 net/bridge/br_netfilter_hooks.c 	br_nf_hook_thresh(NF_BR_FORWARD, net, sk, skb, in, skb->dev,
sk                674 net/bridge/br_netfilter_hooks.c 	NF_HOOK(NFPROTO_ARP, NF_ARP_FORWARD, state->net, state->sk, skb,
sk                680 net/bridge/br_netfilter_hooks.c static int br_nf_push_frag_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
sk                700 net/bridge/br_netfilter_hooks.c 	return br_dev_queue_push_xmit(net, sk, skb);
sk                704 net/bridge/br_netfilter_hooks.c br_nf_ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
sk                707 net/bridge/br_netfilter_hooks.c 	unsigned int mtu = ip_skb_dst_mtu(sk, skb);
sk                718 net/bridge/br_netfilter_hooks.c 	return ip_do_fragment(net, sk, skb, output);
sk                730 net/bridge/br_netfilter_hooks.c static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
sk                743 net/bridge/br_netfilter_hooks.c 		return br_dev_queue_push_xmit(net, sk, skb);
sk                775 net/bridge/br_netfilter_hooks.c 		return br_nf_ip_fragment(net, sk, skb, br_nf_push_frag_xmit);
sk                797 net/bridge/br_netfilter_hooks.c 			return v6ops->fragment(net, sk, skb, br_nf_push_frag_xmit);
sk                803 net/bridge/br_netfilter_hooks.c 	return br_dev_queue_push_xmit(net, sk, skb);
sk                851 net/bridge/br_netfilter_hooks.c 	NF_HOOK(pf, NF_INET_POST_ROUTING, state->net, state->sk, skb,
sk                870 net/bridge/br_netfilter_hooks.c 		state->okfn(state->net, state->sk, skb);
sk                996 net/bridge/br_netfilter_hooks.c 		      struct sock *sk, struct sk_buff *skb,
sk               1010 net/bridge/br_netfilter_hooks.c 		return okfn(net, sk, skb);
sk               1018 net/bridge/br_netfilter_hooks.c 			   sk, net, okfn);
sk               1022 net/bridge/br_netfilter_hooks.c 		ret = okfn(net, sk, skb);
sk                160 net/bridge/br_netfilter_ipv6.c static int br_nf_pre_routing_finish_ipv6(struct net *net, struct sock *sk, struct sk_buff *skb)
sk                188 net/bridge/br_netfilter_ipv6.c 					  net, sk, skb, skb->dev, NULL,
sk                206 net/bridge/br_netfilter_ipv6.c 	br_nf_hook_thresh(NF_BR_PRE_ROUTING, net, sk, skb,
sk                236 net/bridge/br_netfilter_ipv6.c 	NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, state->net, state->sk, skb,
sk                 24 net/bridge/br_nf_core.c static void fake_update_pmtu(struct dst_entry *dst, struct sock *sk,
sk                 30 net/bridge/br_nf_core.c static void fake_redirect(struct dst_entry *dst, struct sock *sk,
sk                598 net/bridge/br_private.h int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb);
sk                601 net/bridge/br_private.h int br_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb);
sk                628 net/bridge/br_private.h int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb);
sk                 29 net/bridge/br_stp_bpdu.c static int br_send_bpdu_finish(struct net *net, struct sock *sk,
sk               1458 net/bridge/netfilter/ebtables.c static int do_ebt_set_ctl(struct sock *sk,
sk               1462 net/bridge/netfilter/ebtables.c 	struct net *net = sock_net(sk);
sk               1480 net/bridge/netfilter/ebtables.c static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
sk               1485 net/bridge/netfilter/ebtables.c 	struct net *net = sock_net(sk);
sk               2321 net/bridge/netfilter/ebtables.c static int compat_do_ebt_set_ctl(struct sock *sk,
sk               2325 net/bridge/netfilter/ebtables.c 	struct net *net = sock_net(sk);
sk               2343 net/bridge/netfilter/ebtables.c static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
sk               2349 net/bridge/netfilter/ebtables.c 	struct net *net = sock_net(sk);
sk               2357 net/bridge/netfilter/ebtables.c 			return do_ebt_get_ctl(sk, cmd, user, len);
sk                 27 net/bridge/netfilter/nf_conntrack_bridge.c static int nf_br_ip_fragment(struct net *net, struct sock *sk,
sk                 30 net/bridge/netfilter/nf_conntrack_bridge.c 			     int (*output)(struct net *, struct sock *sk,
sk                 85 net/bridge/netfilter/nf_conntrack_bridge.c 			err = output(net, sk, data, skb);
sk                110 net/bridge/netfilter/nf_conntrack_bridge.c 		err = output(net, sk, data, skb2);
sk                298 net/bridge/netfilter/nf_conntrack_bridge.c 		    int (*output)(struct net *, struct sock *sk,
sk                310 net/bridge/netfilter/nf_conntrack_bridge.c 		nf_br_ip_fragment(state->net, state->sk, skb, &data, output);
sk                313 net/bridge/netfilter/nf_conntrack_bridge.c 		nf_br_ip6_fragment(state->net, state->sk, skb, &data, output);
sk                345 net/bridge/netfilter/nf_conntrack_bridge.c static int nf_ct_bridge_refrag_post(struct net *net, struct sock *sk,
sk                355 net/bridge/netfilter/nf_conntrack_bridge.c 	return br_dev_queue_push_xmit(net, sk, skb);
sk                 47 net/caif/caif_socket.c 	struct sock sk; /* must be first member */
sk                 92 net/caif/caif_socket.c static void caif_read_lock(struct sock *sk)
sk                 95 net/caif/caif_socket.c 	cf_sk = container_of(sk, struct caifsock, sk);
sk                 99 net/caif/caif_socket.c static void caif_read_unlock(struct sock *sk)
sk                102 net/caif/caif_socket.c 	cf_sk = container_of(sk, struct caifsock, sk);
sk                109 net/caif/caif_socket.c 	return cf_sk->sk.sk_rcvbuf / 4;
sk                112 net/caif/caif_socket.c static void caif_flow_ctrl(struct sock *sk, int mode)
sk                115 net/caif/caif_socket.c 	cf_sk = container_of(sk, struct caifsock, sk);
sk                124 net/caif/caif_socket.c static void caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
sk                128 net/caif/caif_socket.c 	struct sk_buff_head *list = &sk->sk_receive_queue;
sk                129 net/caif/caif_socket.c 	struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
sk                132 net/caif/caif_socket.c 	if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
sk                133 net/caif/caif_socket.c 		(unsigned int)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) {
sk                135 net/caif/caif_socket.c 				    atomic_read(&cf_sk->sk.sk_rmem_alloc),
sk                138 net/caif/caif_socket.c 		caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
sk                141 net/caif/caif_socket.c 	err = sk_filter(sk, skb);
sk                145 net/caif/caif_socket.c 	if (!sk_rmem_schedule(sk, skb, skb->truesize) && rx_flow_is_on(cf_sk)) {
sk                148 net/caif/caif_socket.c 		caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
sk                151 net/caif/caif_socket.c 	skb_set_owner_r(skb, sk);
sk                153 net/caif/caif_socket.c 	queued = !sock_flag(sk, SOCK_DEAD);
sk                159 net/caif/caif_socket.c 		sk->sk_data_ready(sk);
sk                173 net/caif/caif_socket.c 	if (unlikely(cf_sk->sk.sk_state != CAIF_CONNECTED)) {
sk                177 net/caif/caif_socket.c 	caif_queue_rcv_skb(&cf_sk->sk, skb);
sk                184 net/caif/caif_socket.c 	sock_hold(&cf_sk->sk);
sk                190 net/caif/caif_socket.c 	sock_put(&cf_sk->sk);
sk                203 net/caif/caif_socket.c 		cf_sk->sk.sk_state_change(&cf_sk->sk);
sk                209 net/caif/caif_socket.c 		cf_sk->sk.sk_state_change(&cf_sk->sk);
sk                216 net/caif/caif_socket.c 		cf_sk->sk.sk_state = CAIF_CONNECTED;
sk                218 net/caif/caif_socket.c 		cf_sk->sk.sk_shutdown = 0;
sk                219 net/caif/caif_socket.c 		cf_sk->sk.sk_state_change(&cf_sk->sk);
sk                224 net/caif/caif_socket.c 		cf_sk->sk.sk_state = CAIF_DISCONNECTED;
sk                225 net/caif/caif_socket.c 		cf_sk->sk.sk_state_change(&cf_sk->sk);
sk                230 net/caif/caif_socket.c 		cf_sk->sk.sk_err = ECONNREFUSED;
sk                231 net/caif/caif_socket.c 		cf_sk->sk.sk_state = CAIF_DISCONNECTED;
sk                232 net/caif/caif_socket.c 		cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
sk                238 net/caif/caif_socket.c 		cf_sk->sk.sk_state_change(&cf_sk->sk);
sk                243 net/caif/caif_socket.c 		cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
sk                244 net/caif/caif_socket.c 		cf_sk->sk.sk_err = ECONNRESET;
sk                246 net/caif/caif_socket.c 		cf_sk->sk.sk_error_report(&cf_sk->sk);
sk                254 net/caif/caif_socket.c static void caif_check_flow_release(struct sock *sk)
sk                256 net/caif/caif_socket.c 	struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
sk                261 net/caif/caif_socket.c 	if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
sk                263 net/caif/caif_socket.c 			caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
sk                275 net/caif/caif_socket.c 	struct sock *sk = sock->sk;
sk                284 net/caif/caif_socket.c 	skb = skb_recv_datagram(sk, flags, 0 , &ret);
sk                299 net/caif/caif_socket.c 	skb_free_datagram(sk, skb);
sk                300 net/caif/caif_socket.c 	caif_check_flow_release(sk);
sk                309 net/caif/caif_socket.c static long caif_stream_data_wait(struct sock *sk, long timeo)
sk                312 net/caif/caif_socket.c 	lock_sock(sk);
sk                315 net/caif/caif_socket.c 		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
sk                317 net/caif/caif_socket.c 		if (!skb_queue_empty(&sk->sk_receive_queue) ||
sk                318 net/caif/caif_socket.c 			sk->sk_err ||
sk                319 net/caif/caif_socket.c 			sk->sk_state != CAIF_CONNECTED ||
sk                320 net/caif/caif_socket.c 			sock_flag(sk, SOCK_DEAD) ||
sk                321 net/caif/caif_socket.c 			(sk->sk_shutdown & RCV_SHUTDOWN) ||
sk                326 net/caif/caif_socket.c 		sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
sk                327 net/caif/caif_socket.c 		release_sock(sk);
sk                329 net/caif/caif_socket.c 		lock_sock(sk);
sk                331 net/caif/caif_socket.c 		if (sock_flag(sk, SOCK_DEAD))
sk                334 net/caif/caif_socket.c 		sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
sk                337 net/caif/caif_socket.c 	finish_wait(sk_sleep(sk), &wait);
sk                338 net/caif/caif_socket.c 	release_sock(sk);
sk                350 net/caif/caif_socket.c 	struct sock *sk = sock->sk;
sk                365 net/caif/caif_socket.c 	if (sk->sk_state == CAIF_CONNECTING)
sk                368 net/caif/caif_socket.c 	caif_read_lock(sk);
sk                369 net/caif/caif_socket.c 	target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
sk                370 net/caif/caif_socket.c 	timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT);
sk                376 net/caif/caif_socket.c 		lock_sock(sk);
sk                377 net/caif/caif_socket.c 		if (sock_flag(sk, SOCK_DEAD)) {
sk                381 net/caif/caif_socket.c 		skb = skb_dequeue(&sk->sk_receive_queue);
sk                382 net/caif/caif_socket.c 		caif_check_flow_release(sk);
sk                390 net/caif/caif_socket.c 			err = sock_error(sk);
sk                394 net/caif/caif_socket.c 			if (sk->sk_shutdown & RCV_SHUTDOWN)
sk                398 net/caif/caif_socket.c 			if (sk->sk_state != CAIF_CONNECTED)
sk                400 net/caif/caif_socket.c 			if (sock_flag(sk, SOCK_DEAD))
sk                403 net/caif/caif_socket.c 			release_sock(sk);
sk                409 net/caif/caif_socket.c 			caif_read_unlock(sk);
sk                411 net/caif/caif_socket.c 			timeo = caif_stream_data_wait(sk, timeo);
sk                417 net/caif/caif_socket.c 			caif_read_lock(sk);
sk                420 net/caif/caif_socket.c 			release_sock(sk);
sk                423 net/caif/caif_socket.c 		release_sock(sk);
sk                426 net/caif/caif_socket.c 			skb_queue_head(&sk->sk_receive_queue, skb);
sk                440 net/caif/caif_socket.c 				skb_queue_head(&sk->sk_receive_queue, skb);
sk                450 net/caif/caif_socket.c 			skb_queue_head(&sk->sk_receive_queue, skb);
sk                454 net/caif/caif_socket.c 	caif_read_unlock(sk);
sk                467 net/caif/caif_socket.c 	struct sock *sk = &cf_sk->sk;
sk                472 net/caif/caif_socket.c 			(!wait_writeable || sock_writeable(&cf_sk->sk)))
sk                480 net/caif/caif_socket.c 		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
sk                482 net/caif/caif_socket.c 		if (sk->sk_shutdown & SHUTDOWN_MASK)
sk                484 net/caif/caif_socket.c 		*err = -sk->sk_err;
sk                485 net/caif/caif_socket.c 		if (sk->sk_err)
sk                488 net/caif/caif_socket.c 		if (cf_sk->sk.sk_state != CAIF_CONNECTED)
sk                492 net/caif/caif_socket.c 	finish_wait(sk_sleep(sk), &wait);
sk                507 net/caif/caif_socket.c 	cfpkt_set_prio(pkt, cf_sk->sk.sk_priority);
sk                521 net/caif/caif_socket.c 	struct sock *sk = sock->sk;
sk                522 net/caif/caif_socket.c 	struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
sk                529 net/caif/caif_socket.c 	ret = sock_error(sk);
sk                546 net/caif/caif_socket.c 	timeo = sock_sndtimeo(sk, noblock);
sk                547 net/caif/caif_socket.c 	timeo = caif_wait_for_flow_on(container_of(sk, struct caifsock, sk),
sk                553 net/caif/caif_socket.c 	if (cf_sk->sk.sk_state != CAIF_CONNECTED ||
sk                554 net/caif/caif_socket.c 		sock_flag(sk, SOCK_DEAD) ||
sk                555 net/caif/caif_socket.c 		(sk->sk_shutdown & RCV_SHUTDOWN))
sk                560 net/caif/caif_socket.c 	if (len > cf_sk->maxframe && cf_sk->sk.sk_protocol != CAIFPROTO_RFM)
sk                566 net/caif/caif_socket.c 	skb = sock_alloc_send_skb(sk, buffer_size, noblock, &ret);
sk                596 net/caif/caif_socket.c 	struct sock *sk = sock->sk;
sk                597 net/caif/caif_socket.c 	struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
sk                610 net/caif/caif_socket.c 	timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
sk                613 net/caif/caif_socket.c 	if (unlikely(sk->sk_shutdown & SEND_SHUTDOWN))
sk                624 net/caif/caif_socket.c 		if (size > ((sk->sk_sndbuf >> 1) - 64))
sk                625 net/caif/caif_socket.c 			size = (sk->sk_sndbuf >> 1) - 64;
sk                630 net/caif/caif_socket.c 		skb = sock_alloc_send_skb(sk,
sk                675 net/caif/caif_socket.c 	struct sock *sk = sock->sk;
sk                676 net/caif/caif_socket.c 	struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
sk                679 net/caif/caif_socket.c 	if (cf_sk->sk.sk_socket->state != SS_UNCONNECTED)
sk                690 net/caif/caif_socket.c 		lock_sock(&(cf_sk->sk));
sk                692 net/caif/caif_socket.c 		release_sock(&cf_sk->sk);
sk                698 net/caif/caif_socket.c 		if (cf_sk->sk.sk_protocol != CAIFPROTO_UTIL)
sk                700 net/caif/caif_socket.c 		lock_sock(&(cf_sk->sk));
sk                703 net/caif/caif_socket.c 			release_sock(&cf_sk->sk);
sk                707 net/caif/caif_socket.c 		release_sock(&cf_sk->sk);
sk                747 net/caif/caif_socket.c 	struct sock *sk = sock->sk;
sk                748 net/caif/caif_socket.c 	struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
sk                755 net/caif/caif_socket.c 	lock_sock(sk);
sk                768 net/caif/caif_socket.c 		caif_assert(sk->sk_state == CAIF_DISCONNECTED);
sk                771 net/caif/caif_socket.c 		switch (sk->sk_state) {
sk                787 net/caif/caif_socket.c 		caif_assert(sk->sk_state == CAIF_CONNECTED ||
sk                788 net/caif/caif_socket.c 				sk->sk_state == CAIF_DISCONNECTED);
sk                789 net/caif/caif_socket.c 		if (sk->sk_shutdown & SHUTDOWN_MASK) {
sk                791 net/caif/caif_socket.c 			caif_disconnect_client(sock_net(sk), &cf_sk->layer);
sk                803 net/caif/caif_socket.c 	sk->sk_state = CAIF_DISCONNECTED;
sk                805 net/caif/caif_socket.c 	sk_stream_kill_queues(&cf_sk->sk);
sk                816 net/caif/caif_socket.c 	sk->sk_state = CAIF_CONNECTING;
sk                820 net/caif/caif_socket.c 	if (cf_sk->sk.sk_priority > CAIF_PRIO_MAX)
sk                822 net/caif/caif_socket.c 	else if (cf_sk->sk.sk_priority < CAIF_PRIO_MIN)
sk                825 net/caif/caif_socket.c 		cf_sk->conn_req.priority = cf_sk->sk.sk_priority;
sk                828 net/caif/caif_socket.c 	cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
sk                832 net/caif/caif_socket.c 	err = caif_connect_client(sock_net(sk), &cf_sk->conn_req,
sk                836 net/caif/caif_socket.c 		cf_sk->sk.sk_socket->state = SS_UNCONNECTED;
sk                837 net/caif/caif_socket.c 		cf_sk->sk.sk_state = CAIF_DISCONNECTED;
sk                843 net/caif/caif_socket.c 	dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
sk                863 net/caif/caif_socket.c 	if (sk->sk_state != CAIF_CONNECTED && (flags & O_NONBLOCK))
sk                866 net/caif/caif_socket.c 	timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
sk                868 net/caif/caif_socket.c 	release_sock(sk);
sk                870 net/caif/caif_socket.c 	timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
sk                871 net/caif/caif_socket.c 			sk->sk_state != CAIF_CONNECTING,
sk                873 net/caif/caif_socket.c 	lock_sock(sk);
sk                878 net/caif/caif_socket.c 	if (timeo == 0 && sk->sk_state != CAIF_CONNECTED)
sk                880 net/caif/caif_socket.c 	if (sk->sk_state != CAIF_CONNECTED) {
sk                882 net/caif/caif_socket.c 		err = sock_error(sk);
sk                890 net/caif/caif_socket.c 	release_sock(sk);
sk                900 net/caif/caif_socket.c 	struct sock *sk = sock->sk;
sk                901 net/caif/caif_socket.c 	struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
sk                903 net/caif/caif_socket.c 	if (!sk)
sk                913 net/caif/caif_socket.c 	spin_lock_bh(&sk->sk_receive_queue.lock);
sk                914 net/caif/caif_socket.c 	sock_set_flag(sk, SOCK_DEAD);
sk                915 net/caif/caif_socket.c 	spin_unlock_bh(&sk->sk_receive_queue.lock);
sk                916 net/caif/caif_socket.c 	sock->sk = NULL;
sk                921 net/caif/caif_socket.c 	lock_sock(&(cf_sk->sk));
sk                922 net/caif/caif_socket.c 	sk->sk_state = CAIF_DISCONNECTED;
sk                923 net/caif/caif_socket.c 	sk->sk_shutdown = SHUTDOWN_MASK;
sk                925 net/caif/caif_socket.c 	caif_disconnect_client(sock_net(sk), &cf_sk->layer);
sk                926 net/caif/caif_socket.c 	cf_sk->sk.sk_socket->state = SS_DISCONNECTING;
sk                927 net/caif/caif_socket.c 	wake_up_interruptible_poll(sk_sleep(sk), EPOLLERR|EPOLLHUP);
sk                929 net/caif/caif_socket.c 	sock_orphan(sk);
sk                930 net/caif/caif_socket.c 	sk_stream_kill_queues(&cf_sk->sk);
sk                931 net/caif/caif_socket.c 	release_sock(sk);
sk                932 net/caif/caif_socket.c 	sock_put(sk);
sk                940 net/caif/caif_socket.c 	struct sock *sk = sock->sk;
sk                942 net/caif/caif_socket.c 	struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
sk                948 net/caif/caif_socket.c 	if (sk->sk_err)
sk                950 net/caif/caif_socket.c 	if (sk->sk_shutdown == SHUTDOWN_MASK)
sk                952 net/caif/caif_socket.c 	if (sk->sk_shutdown & RCV_SHUTDOWN)
sk                956 net/caif/caif_socket.c 	if (!skb_queue_empty_lockless(&sk->sk_receive_queue) ||
sk                957 net/caif/caif_socket.c 		(sk->sk_shutdown & RCV_SHUTDOWN))
sk                964 net/caif/caif_socket.c 	if (sock_writeable(sk) && tx_flow_is_on(cf_sk))
sk               1013 net/caif/caif_socket.c static void caif_sock_destructor(struct sock *sk)
sk               1015 net/caif/caif_socket.c 	struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
sk               1016 net/caif/caif_socket.c 	caif_assert(!refcount_read(&sk->sk_wmem_alloc));
sk               1017 net/caif/caif_socket.c 	caif_assert(sk_unhashed(sk));
sk               1018 net/caif/caif_socket.c 	caif_assert(!sk->sk_socket);
sk               1019 net/caif/caif_socket.c 	if (!sock_flag(sk, SOCK_DEAD)) {
sk               1020 net/caif/caif_socket.c 		pr_debug("Attempt to release alive CAIF socket: %p\n", sk);
sk               1023 net/caif/caif_socket.c 	sk_stream_kill_queues(&cf_sk->sk);
sk               1030 net/caif/caif_socket.c 	struct sock *sk = NULL;
sk               1061 net/caif/caif_socket.c 	sk = sk_alloc(net, PF_CAIF, GFP_KERNEL, &prot, kern);
sk               1062 net/caif/caif_socket.c 	if (!sk)
sk               1065 net/caif/caif_socket.c 	cf_sk = container_of(sk, struct caifsock, sk);
sk               1068 net/caif/caif_socket.c 	sk->sk_protocol = (unsigned char) protocol;
sk               1073 net/caif/caif_socket.c 		sk->sk_priority = TC_PRIO_CONTROL;
sk               1076 net/caif/caif_socket.c 		sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
sk               1079 net/caif/caif_socket.c 		sk->sk_priority = TC_PRIO_BESTEFFORT;
sk               1086 net/caif/caif_socket.c 	lock_sock(&(cf_sk->sk));
sk               1089 net/caif/caif_socket.c 	sock_init_data(sock, sk);
sk               1090 net/caif/caif_socket.c 	sk->sk_destruct = caif_sock_destructor;
sk               1094 net/caif/caif_socket.c 	cf_sk->sk.sk_socket->state = SS_UNCONNECTED;
sk               1095 net/caif/caif_socket.c 	cf_sk->sk.sk_state = CAIF_DISCONNECTED;
sk               1103 net/caif/caif_socket.c 	release_sock(&cf_sk->sk);
sk                 89 net/can/af_can.c void can_sock_destruct(struct sock *sk)
sk                 91 net/can/af_can.c 	skb_queue_purge(&sk->sk_receive_queue);
sk                 92 net/can/af_can.c 	skb_queue_purge(&sk->sk_error_queue);
sk                117 net/can/af_can.c 	struct sock *sk;
sk                158 net/can/af_can.c 	sk = sk_alloc(net, PF_CAN, GFP_KERNEL, cp->prot, kern);
sk                159 net/can/af_can.c 	if (!sk) {
sk                164 net/can/af_can.c 	sock_init_data(sock, sk);
sk                165 net/can/af_can.c 	sk->sk_destruct = can_sock_destruct;
sk                167 net/can/af_can.c 	if (sk->sk_prot->init)
sk                168 net/can/af_can.c 		err = sk->sk_prot->init(sk);
sk                172 net/can/af_can.c 		sock_orphan(sk);
sk                173 net/can/af_can.c 		sock_put(sk);
sk                267 net/can/af_can.c 			can_skb_set_owner(newskb, skb->sk);
sk                443 net/can/af_can.c 		    void *data, char *ident, struct sock *sk)
sk                474 net/can/af_can.c 	rcv->sk = sk;
sk                492 net/can/af_can.c 	struct sock *sk = rcv->sk;
sk                495 net/can/af_can.c 	if (sk)
sk                496 net/can/af_can.c 		sock_put(sk);
sk                560 net/can/af_can.c 		if (rcv->sk)
sk                561 net/can/af_can.c 			sock_hold(rcv->sk);
sk                 59 net/can/af_can.h 	struct sock *sk;
sk                120 net/can/bcm.c  	struct sock *sk;
sk                125 net/can/bcm.c  	struct sock sk;
sk                136 net/can/bcm.c  static inline struct bcm_sock *bcm_sk(const struct sock *sk)
sk                138 net/can/bcm.c  	return (struct bcm_sock *)sk;
sk                192 net/can/bcm.c  	struct sock *sk = (struct sock *)PDE_DATA(m->file->f_inode);
sk                193 net/can/bcm.c  	struct bcm_sock *bo = bcm_sk(sk);
sk                196 net/can/bcm.c  	seq_printf(m, ">>> socket %pK", sk->sk_socket);
sk                197 net/can/bcm.c  	seq_printf(m, " / sk %pK", sk);
sk                277 net/can/bcm.c  	dev = dev_get_by_index(sock_net(op->sk), op->ifindex);
sk                295 net/can/bcm.c  	can_skb_set_owner(skb, op->sk);
sk                319 net/can/bcm.c  	struct sock *sk = op->sk;
sk                363 net/can/bcm.c  	err = sock_queue_rcv_skb(sk, skb);
sk                365 net/can/bcm.c  		struct bcm_sock *bo = bcm_sk(sk);
sk                767 net/can/bcm.c  					dev = dev_get_by_index(sock_net(op->sk),
sk                775 net/can/bcm.c  				can_rx_unregister(sock_net(op->sk), NULL,
sk                836 net/can/bcm.c  			int ifindex, struct sock *sk)
sk                838 net/can/bcm.c  	struct bcm_sock *bo = bcm_sk(sk);
sk                946 net/can/bcm.c  		op->sk = sk;
sk               1011 net/can/bcm.c  			int ifindex, struct sock *sk)
sk               1013 net/can/bcm.c  	struct bcm_sock *bo = bcm_sk(sk);
sk               1118 net/can/bcm.c  		op->sk = sk;
sk               1191 net/can/bcm.c  			dev = dev_get_by_index(sock_net(sk), ifindex);
sk               1193 net/can/bcm.c  				err = can_rx_register(sock_net(sk), dev,
sk               1197 net/can/bcm.c  						      "bcm", sk);
sk               1204 net/can/bcm.c  			err = can_rx_register(sock_net(sk), NULL, op->can_id,
sk               1206 net/can/bcm.c  					      bcm_rx_handler, op, "bcm", sk);
sk               1221 net/can/bcm.c  static int bcm_tx_send(struct msghdr *msg, int ifindex, struct sock *sk,
sk               1244 net/can/bcm.c  	dev = dev_get_by_index(sock_net(sk), ifindex);
sk               1253 net/can/bcm.c  	can_skb_set_owner(skb, sk);
sk               1268 net/can/bcm.c  	struct sock *sk = sock->sk;
sk               1269 net/can/bcm.c  	struct bcm_sock *bo = bcm_sk(sk);
sk               1309 net/can/bcm.c  			dev = dev_get_by_index(sock_net(sk), ifindex);
sk               1322 net/can/bcm.c  	lock_sock(sk);
sk               1327 net/can/bcm.c  		ret = bcm_tx_setup(&msg_head, msg, ifindex, sk);
sk               1331 net/can/bcm.c  		ret = bcm_rx_setup(&msg_head, msg, ifindex, sk);
sk               1365 net/can/bcm.c  			ret = bcm_tx_send(msg, ifindex, sk, cfsiz);
sk               1373 net/can/bcm.c  	release_sock(sk);
sk               1386 net/can/bcm.c  	struct sock *sk = &bo->sk;
sk               1390 net/can/bcm.c  	if (!net_eq(dev_net(dev), sock_net(sk)))
sk               1399 net/can/bcm.c  		lock_sock(sk);
sk               1413 net/can/bcm.c  		release_sock(sk);
sk               1416 net/can/bcm.c  			sk->sk_err = ENODEV;
sk               1417 net/can/bcm.c  			if (!sock_flag(sk, SOCK_DEAD))
sk               1418 net/can/bcm.c  				sk->sk_error_report(sk);
sk               1424 net/can/bcm.c  			sk->sk_err = ENETDOWN;
sk               1425 net/can/bcm.c  			if (!sock_flag(sk, SOCK_DEAD))
sk               1426 net/can/bcm.c  				sk->sk_error_report(sk);
sk               1436 net/can/bcm.c  static int bcm_init(struct sock *sk)
sk               1438 net/can/bcm.c  	struct bcm_sock *bo = bcm_sk(sk);
sk               1461 net/can/bcm.c  	struct sock *sk = sock->sk;
sk               1466 net/can/bcm.c  	if (!sk)
sk               1469 net/can/bcm.c  	net = sock_net(sk);
sk               1470 net/can/bcm.c  	bo = bcm_sk(sk);
sk               1476 net/can/bcm.c  	lock_sock(sk);
sk               1521 net/can/bcm.c  	sock_orphan(sk);
sk               1522 net/can/bcm.c  	sock->sk = NULL;
sk               1524 net/can/bcm.c  	release_sock(sk);
sk               1525 net/can/bcm.c  	sock_put(sk);
sk               1534 net/can/bcm.c  	struct sock *sk = sock->sk;
sk               1535 net/can/bcm.c  	struct bcm_sock *bo = bcm_sk(sk);
sk               1536 net/can/bcm.c  	struct net *net = sock_net(sk);
sk               1542 net/can/bcm.c  	lock_sock(sk);
sk               1575 net/can/bcm.c  		sprintf(bo->procname, "%lu", sock_i_ino(sk));
sk               1578 net/can/bcm.c  						     bcm_proc_show, sk);
sk               1589 net/can/bcm.c  	release_sock(sk);
sk               1597 net/can/bcm.c  	struct sock *sk = sock->sk;
sk               1605 net/can/bcm.c  	skb = skb_recv_datagram(sk, flags, noblock, &error);
sk               1614 net/can/bcm.c  		skb_free_datagram(sk, skb);
sk               1618 net/can/bcm.c  	sock_recv_ts_and_drops(msg, sk, skb);
sk               1626 net/can/bcm.c  	skb_free_datagram(sk, skb);
sk                689 net/can/gw.c   	struct net *net = sock_net(skb->sk);
sk                991 net/can/gw.c   	struct net *net = sock_net(skb->sk);
sk               1103 net/can/gw.c   	struct net *net = sock_net(skb->sk);
sk                183 net/can/j1939/j1939-priv.h void j1939_sk_send_loop_abort(struct sock *sk, int err);
sk                211 net/can/j1939/j1939-priv.h int j1939_cancel_active_session(struct j1939_priv *priv, struct sock *sk);
sk                215 net/can/j1939/j1939-priv.h void j1939_sock_pending_del(struct sock *sk);
sk                231 net/can/j1939/j1939-priv.h 	struct sock *sk;
sk                288 net/can/j1939/j1939-priv.h 	struct sock sk; /* must be first to skip with memset */
sk                316 net/can/j1939/j1939-priv.h static inline struct j1939_sock *j1939_sk(const struct sock *sk)
sk                318 net/can/j1939/j1939-priv.h 	return container_of(sk, struct j1939_sock, sk);
sk                 55 net/can/j1939/main.c 	can_skb_set_owner(skb, iskb->sk);
sk                 54 net/can/j1939/socket.c static inline void j1939_sock_pending_add(struct sock *sk)
sk                 56 net/can/j1939/socket.c 	struct j1939_sock *jsk = j1939_sk(sk);
sk                 61 net/can/j1939/socket.c static int j1939_sock_pending_get(struct sock *sk)
sk                 63 net/can/j1939/socket.c 	struct j1939_sock *jsk = j1939_sk(sk);
sk                 68 net/can/j1939/socket.c void j1939_sock_pending_del(struct sock *sk)
sk                 70 net/can/j1939/socket.c 	struct j1939_sock *jsk = j1939_sk(sk);
sk                 99 net/can/j1939/socket.c 	struct j1939_sock *jsk = j1939_sk(session->sk);
sk                107 net/can/j1939/socket.c 	j1939_sock_pending_add(&jsk->sk);
sk                155 net/can/j1939/socket.c 	if (!session->sk)
sk                158 net/can/j1939/socket.c 	jsk = j1939_sk(session->sk);
sk                198 net/can/j1939/socket.c 	if (!session->sk)
sk                201 net/can/j1939/socket.c 	jsk = j1939_sk(session->sk);
sk                227 net/can/j1939/socket.c 		} else if (!sock_flag(&jsk->sk, SOCK_BROADCAST)) {
sk                301 net/can/j1939/socket.c 	if (oskb->sk == &jsk->sk)
sk                312 net/can/j1939/socket.c 	can_skb_set_owner(skb, oskb->sk);
sk                316 net/can/j1939/socket.c 	if (skb->sk)
sk                319 net/can/j1939/socket.c 	if (sock_queue_rcv_skb(&jsk->sk, skb) < 0)
sk                350 net/can/j1939/socket.c static void j1939_sk_sock_destruct(struct sock *sk)
sk                352 net/can/j1939/socket.c 	struct j1939_sock *jsk = j1939_sk(sk);
sk                375 net/can/j1939/socket.c 	can_sock_destruct(sk);
sk                378 net/can/j1939/socket.c static int j1939_sk_init(struct sock *sk)
sk                380 net/can/j1939/socket.c 	struct j1939_sock *jsk = j1939_sk(sk);
sk                385 net/can/j1939/socket.c 	BUILD_BUG_ON(offsetof(struct j1939_sock, sk) != 0);
sk                386 net/can/j1939/socket.c 	memset((void *)jsk + sizeof(jsk->sk), 0x0,
sk                387 net/can/j1939/socket.c 	       sizeof(*jsk) - sizeof(jsk->sk));
sk                391 net/can/j1939/socket.c 	jsk->sk.sk_priority = j1939_to_sk_priority(6);
sk                392 net/can/j1939/socket.c 	jsk->sk.sk_reuse = 1; /* per default */
sk                400 net/can/j1939/socket.c 	sk->sk_destruct = j1939_sk_sock_destruct;
sk                425 net/can/j1939/socket.c 	struct j1939_sock *jsk = j1939_sk(sock->sk);
sk                427 net/can/j1939/socket.c 	struct sock *sk;
sk                435 net/can/j1939/socket.c 	lock_sock(sock->sk);
sk                438 net/can/j1939/socket.c 	sk = sock->sk;
sk                439 net/can/j1939/socket.c 	net = sock_net(sk);
sk                501 net/can/j1939/socket.c 	release_sock(sock->sk);
sk                510 net/can/j1939/socket.c 	struct j1939_sock *jsk = j1939_sk(sock->sk);
sk                517 net/can/j1939/socket.c 	lock_sock(sock->sk);
sk                533 net/can/j1939/socket.c 	    !sock_flag(&jsk->sk, SOCK_BROADCAST)) {
sk                548 net/can/j1939/socket.c 	release_sock(sock->sk);
sk                572 net/can/j1939/socket.c 	struct sock *sk = sock->sk;
sk                573 net/can/j1939/socket.c 	struct j1939_sock *jsk = j1939_sk(sk);
sk                576 net/can/j1939/socket.c 	lock_sock(sk);
sk                587 net/can/j1939/socket.c 	release_sock(sk);
sk                594 net/can/j1939/socket.c 	struct sock *sk = sock->sk;
sk                597 net/can/j1939/socket.c 	if (!sk)
sk                600 net/can/j1939/socket.c 	lock_sock(sk);
sk                601 net/can/j1939/socket.c 	jsk = j1939_sk(sk);
sk                607 net/can/j1939/socket.c 					     !j1939_sock_pending_get(&jsk->sk))) {
sk                608 net/can/j1939/socket.c 			j1939_cancel_active_session(priv, sk);
sk                621 net/can/j1939/socket.c 	sock_orphan(sk);
sk                622 net/can/j1939/socket.c 	sock->sk = NULL;
sk                624 net/can/j1939/socket.c 	release_sock(sk);
sk                625 net/can/j1939/socket.c 	sock_put(sk);
sk                639 net/can/j1939/socket.c 	lock_sock(&jsk->sk);
sk                644 net/can/j1939/socket.c 	release_sock(&jsk->sk);
sk                651 net/can/j1939/socket.c 	struct sock *sk = sock->sk;
sk                652 net/can/j1939/socket.c 	struct j1939_sock *jsk = j1939_sk(sk);
sk                684 net/can/j1939/socket.c 		lock_sock(&jsk->sk);
sk                688 net/can/j1939/socket.c 		release_sock(&jsk->sk);
sk                701 net/can/j1939/socket.c 			skb_queue_purge(&sk->sk_error_queue);
sk                712 net/can/j1939/socket.c 		lock_sock(&jsk->sk);
sk                713 net/can/j1939/socket.c 		jsk->sk.sk_priority = j1939_to_sk_priority(tmp);
sk                714 net/can/j1939/socket.c 		release_sock(&jsk->sk);
sk                724 net/can/j1939/socket.c 	struct sock *sk = sock->sk;
sk                725 net/can/j1939/socket.c 	struct j1939_sock *jsk = j1939_sk(sk);
sk                739 net/can/j1939/socket.c 	lock_sock(&jsk->sk);
sk                748 net/can/j1939/socket.c 		tmp = j1939_prio(jsk->sk.sk_priority);
sk                768 net/can/j1939/socket.c 	release_sock(&jsk->sk);
sk                775 net/can/j1939/socket.c 	struct sock *sk = sock->sk;
sk                784 net/can/j1939/socket.c 		return sock_recv_errqueue(sock->sk, msg, size, SOL_CAN_J1939,
sk                787 net/can/j1939/socket.c 	skb = skb_recv_datagram(sk, flags, 0, &ret);
sk                798 net/can/j1939/socket.c 		skb_free_datagram(sk, skb);
sk                826 net/can/j1939/socket.c 	sock_recv_ts_and_drops(msg, sk, skb);
sk                828 net/can/j1939/socket.c 	skb_free_datagram(sk, skb);
sk                834 net/can/j1939/socket.c 					  struct sock *sk,
sk                838 net/can/j1939/socket.c 	struct j1939_sock *jsk = j1939_sk(sk);
sk                843 net/can/j1939/socket.c 	skb = sock_alloc_send_skb(sk,
sk                866 net/can/j1939/socket.c 	skcb->priority = j1939_prio(sk->sk_priority);
sk                922 net/can/j1939/socket.c 	struct sock *sk = session->sk;
sk                930 net/can/j1939/socket.c 	if (!sk)
sk                933 net/can/j1939/socket.c 	jsk = j1939_sk(sk);
sk                950 net/can/j1939/socket.c 		if (!(sk->sk_tsflags & SOF_TIMESTAMPING_TX_ACK)) {
sk                961 net/can/j1939/socket.c 		if (!(sk->sk_tsflags & SOF_TIMESTAMPING_TX_SCHED)) {
sk                982 net/can/j1939/socket.c 	if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)
sk                987 net/can/j1939/socket.c 	err = sock_queue_err_skb(sk, skb);
sk                993 net/can/j1939/socket.c void j1939_sk_send_loop_abort(struct sock *sk, int err)
sk                995 net/can/j1939/socket.c 	sk->sk_err = err;
sk                997 net/can/j1939/socket.c 	sk->sk_error_report(sk);
sk               1000 net/can/j1939/socket.c static int j1939_sk_send_loop(struct j1939_priv *priv,  struct sock *sk,
sk               1004 net/can/j1939/socket.c 	struct j1939_sock *jsk = j1939_sk(sk);
sk               1025 net/can/j1939/socket.c 		skb = j1939_sk_alloc_skb(priv->ndev, sk, msg, segment_size,
sk               1097 net/can/j1939/socket.c 	struct sock *sk = sock->sk;
sk               1098 net/can/j1939/socket.c 	struct j1939_sock *jsk = j1939_sk(sk);
sk               1103 net/can/j1939/socket.c 	lock_sock(sock->sk);
sk               1146 net/can/j1939/socket.c 		    !sock_flag(sk, SOCK_BROADCAST)) {
sk               1153 net/can/j1939/socket.c 		    !sock_flag(sk, SOCK_BROADCAST)) {
sk               1160 net/can/j1939/socket.c 	ret = j1939_sk_send_loop(priv, sk, msg, size);
sk               1163 net/can/j1939/socket.c 	release_sock(sock->sk);
sk               1175 net/can/j1939/socket.c 		jsk->sk.sk_err = error_code;
sk               1176 net/can/j1939/socket.c 		if (!sock_flag(&jsk->sk, SOCK_DEAD))
sk               1177 net/can/j1939/socket.c 			jsk->sk.sk_error_report(&jsk->sk);
sk                257 net/can/j1939/transport.c 	j1939_sock_pending_del(session->sk);
sk                258 net/can/j1939/transport.c 	sock_put(session->sk);
sk                519 net/can/j1939/transport.c 		    session->tskey == skcb->tskey && session->sk == skb->sk)
sk                999 net/can/j1939/transport.c 	can_skb_set_owner(skb, se_skb->sk);
sk               1066 net/can/j1939/transport.c 	if (session->sk)
sk               1067 net/can/j1939/transport.c 		j1939_sk_send_loop_abort(session->sk, session->err);
sk               1273 net/can/j1939/transport.c 	if (session->sk)
sk               1274 net/can/j1939/transport.c 		j1939_sk_send_loop_abort(session->sk, session->err);
sk               1882 net/can/j1939/transport.c 	sock_hold(skb->sk);
sk               1883 net/can/j1939/transport.c 	session->sk = skb->sk;
sk               1893 net/can/j1939/transport.c 	skcb->tskey = session->sk->sk_tskey++;
sk               2017 net/can/j1939/transport.c 	if (!skb->sk)
sk               2035 net/can/j1939/transport.c int j1939_cancel_active_session(struct j1939_priv *priv, struct sock *sk)
sk               2039 net/can/j1939/transport.c 	netdev_dbg(priv->ndev, "%s, sk: %p\n", __func__, sk);
sk               2044 net/can/j1939/transport.c 		if (!sk || sk == session->sk) {
sk                 83 net/can/raw.c  	struct sock sk;
sk                111 net/can/raw.c  static inline struct raw_sock *raw_sk(const struct sock *sk)
sk                113 net/can/raw.c  	return (struct raw_sock *)sk;
sk                118 net/can/raw.c  	struct sock *sk = (struct sock *)data;
sk                119 net/can/raw.c  	struct raw_sock *ro = raw_sk(sk);
sk                125 net/can/raw.c  	if (!ro->recv_own_msgs && oskb->sk == sk)
sk                172 net/can/raw.c  	if (oskb->sk)
sk                174 net/can/raw.c  	if (oskb->sk == sk)
sk                177 net/can/raw.c  	if (sock_queue_rcv_skb(sk, skb) < 0)
sk                182 net/can/raw.c  			      struct sock *sk, struct can_filter *filter,
sk                191 net/can/raw.c  				      raw_rcv, sk, "raw", sk);
sk                197 net/can/raw.c  						  raw_rcv, sk);
sk                206 net/can/raw.c  				struct sock *sk, can_err_mask_t err_mask)
sk                212 net/can/raw.c  				      raw_rcv, sk, "raw", sk);
sk                218 net/can/raw.c  				struct sock *sk, struct can_filter *filter,
sk                225 net/can/raw.c  				  filter[i].can_mask, raw_rcv, sk);
sk                230 net/can/raw.c  					 struct sock *sk,
sk                236 net/can/raw.c  				  raw_rcv, sk);
sk                241 net/can/raw.c  					  struct sock *sk)
sk                243 net/can/raw.c  	struct raw_sock *ro = raw_sk(sk);
sk                245 net/can/raw.c  	raw_disable_filters(net, dev, sk, ro->filter, ro->count);
sk                246 net/can/raw.c  	raw_disable_errfilter(net, dev, sk, ro->err_mask);
sk                250 net/can/raw.c  				 struct sock *sk)
sk                252 net/can/raw.c  	struct raw_sock *ro = raw_sk(sk);
sk                255 net/can/raw.c  	err = raw_enable_filters(net, dev, sk, ro->filter, ro->count);
sk                257 net/can/raw.c  		err = raw_enable_errfilter(net, dev, sk, ro->err_mask);
sk                259 net/can/raw.c  			raw_disable_filters(net, dev, sk, ro->filter,
sk                271 net/can/raw.c  	struct sock *sk = &ro->sk;
sk                273 net/can/raw.c  	if (!net_eq(dev_net(dev), sock_net(sk)))
sk                284 net/can/raw.c  		lock_sock(sk);
sk                287 net/can/raw.c  			raw_disable_allfilters(dev_net(dev), dev, sk);
sk                295 net/can/raw.c  		release_sock(sk);
sk                297 net/can/raw.c  		sk->sk_err = ENODEV;
sk                298 net/can/raw.c  		if (!sock_flag(sk, SOCK_DEAD))
sk                299 net/can/raw.c  			sk->sk_error_report(sk);
sk                303 net/can/raw.c  		sk->sk_err = ENETDOWN;
sk                304 net/can/raw.c  		if (!sock_flag(sk, SOCK_DEAD))
sk                305 net/can/raw.c  			sk->sk_error_report(sk);
sk                312 net/can/raw.c  static int raw_init(struct sock *sk)
sk                314 net/can/raw.c  	struct raw_sock *ro = raw_sk(sk);
sk                346 net/can/raw.c  	struct sock *sk = sock->sk;
sk                349 net/can/raw.c  	if (!sk)
sk                352 net/can/raw.c  	ro = raw_sk(sk);
sk                356 net/can/raw.c  	lock_sock(sk);
sk                363 net/can/raw.c  			dev = dev_get_by_index(sock_net(sk), ro->ifindex);
sk                365 net/can/raw.c  				raw_disable_allfilters(dev_net(dev), dev, sk);
sk                369 net/can/raw.c  			raw_disable_allfilters(sock_net(sk), NULL, sk);
sk                381 net/can/raw.c  	sock_orphan(sk);
sk                382 net/can/raw.c  	sock->sk = NULL;
sk                384 net/can/raw.c  	release_sock(sk);
sk                385 net/can/raw.c  	sock_put(sk);
sk                393 net/can/raw.c  	struct sock *sk = sock->sk;
sk                394 net/can/raw.c  	struct raw_sock *ro = raw_sk(sk);
sk                404 net/can/raw.c  	lock_sock(sk);
sk                412 net/can/raw.c  		dev = dev_get_by_index(sock_net(sk), addr->can_ifindex);
sk                428 net/can/raw.c  		err = raw_enable_allfilters(sock_net(sk), dev, sk);
sk                434 net/can/raw.c  		err = raw_enable_allfilters(sock_net(sk), NULL, sk);
sk                443 net/can/raw.c  				dev = dev_get_by_index(sock_net(sk),
sk                447 net/can/raw.c  							       dev, sk);
sk                451 net/can/raw.c  				raw_disable_allfilters(sock_net(sk), NULL, sk);
sk                459 net/can/raw.c  	release_sock(sk);
sk                462 net/can/raw.c  		sk->sk_err = ENETDOWN;
sk                463 net/can/raw.c  		if (!sock_flag(sk, SOCK_DEAD))
sk                464 net/can/raw.c  			sk->sk_error_report(sk);
sk                474 net/can/raw.c  	struct sock *sk = sock->sk;
sk                475 net/can/raw.c  	struct raw_sock *ro = raw_sk(sk);
sk                490 net/can/raw.c  	struct sock *sk = sock->sk;
sk                491 net/can/raw.c  	struct raw_sock *ro = raw_sk(sk);
sk                522 net/can/raw.c  		lock_sock(sk);
sk                525 net/can/raw.c  			dev = dev_get_by_index(sock_net(sk), ro->ifindex);
sk                530 net/can/raw.c  				err = raw_enable_filters(sock_net(sk), dev, sk,
sk                533 net/can/raw.c  				err = raw_enable_filters(sock_net(sk), dev, sk,
sk                542 net/can/raw.c  			raw_disable_filters(sock_net(sk), dev, sk, ro->filter,
sk                563 net/can/raw.c  		release_sock(sk);
sk                576 net/can/raw.c  		lock_sock(sk);
sk                579 net/can/raw.c  			dev = dev_get_by_index(sock_net(sk), ro->ifindex);
sk                584 net/can/raw.c  			err = raw_enable_errfilter(sock_net(sk), dev, sk,
sk                591 net/can/raw.c  			raw_disable_errfilter(sock_net(sk), dev, sk,
sk                602 net/can/raw.c  		release_sock(sk);
sk                651 net/can/raw.c  	struct sock *sk = sock->sk;
sk                652 net/can/raw.c  	struct raw_sock *ro = raw_sk(sk);
sk                666 net/can/raw.c  		lock_sock(sk);
sk                677 net/can/raw.c  		release_sock(sk);
sk                726 net/can/raw.c  	struct sock *sk = sock->sk;
sk                727 net/can/raw.c  	struct raw_sock *ro = raw_sk(sk);
sk                747 net/can/raw.c  	dev = dev_get_by_index(sock_net(sk), ifindex);
sk                760 net/can/raw.c  	skb = sock_alloc_send_skb(sk, size + sizeof(struct can_skb_priv),
sk                773 net/can/raw.c  	skb_setup_tx_timestamp(skb, sk->sk_tsflags);
sk                776 net/can/raw.c  	skb->sk  = sk;
sk                777 net/can/raw.c  	skb->priority = sk->sk_priority;
sk                799 net/can/raw.c  	struct sock *sk = sock->sk;
sk                807 net/can/raw.c  	skb = skb_recv_datagram(sk, flags, noblock, &err);
sk                818 net/can/raw.c  		skb_free_datagram(sk, skb);
sk                822 net/can/raw.c  	sock_recv_ts_and_drops(msg, sk, skb);
sk                833 net/can/raw.c  	skb_free_datagram(sk, skb);
sk                367 net/ceph/messenger.c static void ceph_sock_data_ready(struct sock *sk)
sk                369 net/ceph/messenger.c 	struct ceph_connection *con = sk->sk_user_data;
sk                374 net/ceph/messenger.c 	if (sk->sk_state != TCP_CLOSE_WAIT) {
sk                382 net/ceph/messenger.c static void ceph_sock_write_space(struct sock *sk)
sk                384 net/ceph/messenger.c 	struct ceph_connection *con = sk->sk_user_data;
sk                394 net/ceph/messenger.c 		if (sk_stream_is_writeable(sk)) {
sk                396 net/ceph/messenger.c 			clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
sk                405 net/ceph/messenger.c static void ceph_sock_state_change(struct sock *sk)
sk                407 net/ceph/messenger.c 	struct ceph_connection *con = sk->sk_user_data;
sk                410 net/ceph/messenger.c 	     con, con->state, sk->sk_state);
sk                412 net/ceph/messenger.c 	switch (sk->sk_state) {
sk                438 net/ceph/messenger.c 	struct sock *sk = sock->sk;
sk                439 net/ceph/messenger.c 	sk->sk_user_data = con;
sk                440 net/ceph/messenger.c 	sk->sk_data_ready = ceph_sock_data_ready;
sk                441 net/ceph/messenger.c 	sk->sk_write_space = ceph_sock_write_space;
sk                442 net/ceph/messenger.c 	sk->sk_state_change = ceph_sock_state_change;
sk                469 net/ceph/messenger.c 	sock->sk->sk_allocation = GFP_NOFS;
sk                472 net/ceph/messenger.c 	lockdep_set_class(&sock->sk->sk_lock, &socket_class);
sk                485 net/ceph/messenger.c 		     sock->sk->sk_state);
sk                124 net/compat.c   int cmsghdr_from_user_compat_to_kern(struct msghdr *kmsg, struct sock *sk,
sk                161 net/compat.c   		kcmsg_base = kcmsg = sock_kmalloc(sk, kcmlen, GFP_KERNEL);
sk                206 net/compat.c   		sock_kfree_s(sk, kcmsg_base, kcmlen);
sk                 86 net/core/bpf_sk_storage.c 	struct sock *sk;	/* The sk that owns the the above "list" of
sk                 99 net/core/bpf_sk_storage.c static int omem_charge(struct sock *sk, unsigned int size)
sk                103 net/core/bpf_sk_storage.c 	    atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
sk                104 net/core/bpf_sk_storage.c 		atomic_add(size, &sk->sk_omem_alloc);
sk                122 net/core/bpf_sk_storage.c 					       struct sock *sk, void *value,
sk                127 net/core/bpf_sk_storage.c 	if (charge_omem && omem_charge(sk, smap->elem_size))
sk                138 net/core/bpf_sk_storage.c 		atomic_sub(smap->elem_size, &sk->sk_omem_alloc);
sk                153 net/core/bpf_sk_storage.c 	struct sock *sk;
sk                156 net/core/bpf_sk_storage.c 	sk = sk_storage->sk;
sk                162 net/core/bpf_sk_storage.c 		atomic_sub(smap->elem_size, &sk->sk_omem_alloc);
sk                167 net/core/bpf_sk_storage.c 		atomic_sub(sizeof(struct bpf_sk_storage), &sk->sk_omem_alloc);
sk                168 net/core/bpf_sk_storage.c 		sk_storage->sk = NULL;
sk                170 net/core/bpf_sk_storage.c 		RCU_INIT_POINTER(sk->sk_bpf_storage, NULL);
sk                299 net/core/bpf_sk_storage.c sk_storage_lookup(struct sock *sk, struct bpf_map *map, bool cacheit_lockit)
sk                304 net/core/bpf_sk_storage.c 	sk_storage = rcu_dereference(sk->sk_bpf_storage);
sk                326 net/core/bpf_sk_storage.c static int sk_storage_alloc(struct sock *sk,
sk                333 net/core/bpf_sk_storage.c 	err = omem_charge(sk, sizeof(*sk_storage));
sk                344 net/core/bpf_sk_storage.c 	sk_storage->sk = sk;
sk                357 net/core/bpf_sk_storage.c 	prev_sk_storage = cmpxchg((struct bpf_sk_storage **)&sk->sk_bpf_storage,
sk                378 net/core/bpf_sk_storage.c 	atomic_sub(sizeof(*sk_storage), &sk->sk_omem_alloc);
sk                387 net/core/bpf_sk_storage.c static struct bpf_sk_storage_data *sk_storage_update(struct sock *sk,
sk                405 net/core/bpf_sk_storage.c 	sk_storage = rcu_dereference(sk->sk_bpf_storage);
sk                412 net/core/bpf_sk_storage.c 		selem = selem_alloc(smap, sk, value, true);
sk                416 net/core/bpf_sk_storage.c 		err = sk_storage_alloc(sk, smap, selem);
sk                419 net/core/bpf_sk_storage.c 			atomic_sub(smap->elem_size, &sk->sk_omem_alloc);
sk                474 net/core/bpf_sk_storage.c 	selem = selem_alloc(smap, sk, value, !old_sdata);
sk                501 net/core/bpf_sk_storage.c static int sk_storage_delete(struct sock *sk, struct bpf_map *map)
sk                505 net/core/bpf_sk_storage.c 	sdata = sk_storage_lookup(sk, map, false);
sk                515 net/core/bpf_sk_storage.c void bpf_sk_storage_free(struct sock *sk)
sk                523 net/core/bpf_sk_storage.c 	sk_storage = rcu_dereference(sk->sk_bpf_storage);
sk                710 net/core/bpf_sk_storage.c 		sdata = sk_storage_lookup(sock->sk, map, true);
sk                728 net/core/bpf_sk_storage.c 		sdata = sk_storage_update(sock->sk, map, value, map_flags);
sk                744 net/core/bpf_sk_storage.c 		err = sk_storage_delete(sock->sk, map);
sk                773 net/core/bpf_sk_storage.c int bpf_sk_storage_clone(const struct sock *sk, struct sock *newsk)
sk                783 net/core/bpf_sk_storage.c 	sk_storage = rcu_dereference(sk->sk_bpf_storage);
sk                841 net/core/bpf_sk_storage.c BPF_CALL_4(bpf_sk_storage_get, struct bpf_map *, map, struct sock *, sk,
sk                849 net/core/bpf_sk_storage.c 	sdata = sk_storage_lookup(sk, map, true);
sk                859 net/core/bpf_sk_storage.c 	    refcount_inc_not_zero(&sk->sk_refcnt)) {
sk                860 net/core/bpf_sk_storage.c 		sdata = sk_storage_update(sk, map, value, BPF_NOEXIST);
sk                864 net/core/bpf_sk_storage.c 		sock_put(sk);
sk                872 net/core/bpf_sk_storage.c BPF_CALL_2(bpf_sk_storage_delete, struct bpf_map *, map, struct sock *, sk)
sk                874 net/core/bpf_sk_storage.c 	if (refcount_inc_not_zero(&sk->sk_refcnt)) {
sk                877 net/core/bpf_sk_storage.c 		err = sk_storage_delete(sk, map);
sk                878 net/core/bpf_sk_storage.c 		sock_put(sk);
sk                 70 net/core/datagram.c static inline int connection_based(struct sock *sk)
sk                 72 net/core/datagram.c 	return sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM;
sk                 88 net/core/datagram.c int __skb_wait_for_more_packets(struct sock *sk, int *err, long *timeo_p,
sk                 94 net/core/datagram.c 	prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
sk                 97 net/core/datagram.c 	error = sock_error(sk);
sk                101 net/core/datagram.c 	if (READ_ONCE(sk->sk_receive_queue.prev) != skb)
sk                105 net/core/datagram.c 	if (sk->sk_shutdown & RCV_SHUTDOWN)
sk                112 net/core/datagram.c 	if (connection_based(sk) &&
sk                113 net/core/datagram.c 	    !(sk->sk_state == TCP_ESTABLISHED || sk->sk_state == TCP_LISTEN))
sk                123 net/core/datagram.c 	finish_wait(sk_sleep(sk), &wait);
sk                166 net/core/datagram.c struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
sk                169 net/core/datagram.c 					  void (*destructor)(struct sock *sk,
sk                202 net/core/datagram.c 				destructor(sk, skb);
sk                245 net/core/datagram.c struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned int flags,
sk                246 net/core/datagram.c 					void (*destructor)(struct sock *sk,
sk                251 net/core/datagram.c 	struct sk_buff_head *queue = &sk->sk_receive_queue;
sk                257 net/core/datagram.c 	int error = sock_error(sk);
sk                270 net/core/datagram.c 		skb = __skb_try_recv_from_queue(sk, queue, flags, destructor,
sk                278 net/core/datagram.c 		if (!sk_can_busy_loop(sk))
sk                281 net/core/datagram.c 		sk_busy_loop(sk, flags & MSG_DONTWAIT);
sk                282 net/core/datagram.c 	} while (READ_ONCE(sk->sk_receive_queue.prev) != *last);
sk                292 net/core/datagram.c struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
sk                293 net/core/datagram.c 				    void (*destructor)(struct sock *sk,
sk                300 net/core/datagram.c 	timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
sk                303 net/core/datagram.c 		skb = __skb_try_recv_datagram(sk, flags, destructor, off, err,
sk                311 net/core/datagram.c 		!__skb_wait_for_more_packets(sk, err, &timeo, last));
sk                317 net/core/datagram.c struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned int flags,
sk                322 net/core/datagram.c 	return __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
sk                327 net/core/datagram.c void skb_free_datagram(struct sock *sk, struct sk_buff *skb)
sk                330 net/core/datagram.c 	sk_mem_reclaim_partial(sk);
sk                334 net/core/datagram.c void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len)
sk                339 net/core/datagram.c 		sk_peek_offset_bwd(sk, len);
sk                343 net/core/datagram.c 	slow = lock_sock_fast(sk);
sk                344 net/core/datagram.c 	sk_peek_offset_bwd(sk, len);
sk                346 net/core/datagram.c 	sk_mem_reclaim_partial(sk);
sk                347 net/core/datagram.c 	unlock_sock_fast(sk, slow);
sk                354 net/core/datagram.c int __sk_queue_drop_skb(struct sock *sk, struct sk_buff_head *sk_queue,
sk                356 net/core/datagram.c 			void (*destructor)(struct sock *sk,
sk                368 net/core/datagram.c 				destructor(sk, skb);
sk                374 net/core/datagram.c 	atomic_inc(&sk->sk_drops);
sk                400 net/core/datagram.c int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
sk                402 net/core/datagram.c 	int err = __sk_queue_drop_skb(sk, &sk->sk_receive_queue, skb, flags,
sk                406 net/core/datagram.c 	sk_mem_reclaim_partial(sk);
sk                623 net/core/datagram.c int __zerocopy_sg_from_iter(struct sock *sk, struct sk_buff *skb,
sk                650 net/core/datagram.c 		if (sk && sk->sk_type == SOCK_STREAM) {
sk                651 net/core/datagram.c 			sk_wmem_queued_add(sk, truesize);
sk                652 net/core/datagram.c 			sk_mem_charge(sk, truesize);
sk                654 net/core/datagram.c 			refcount_add(truesize, &skb->sk->sk_wmem_alloc);
sk                771 net/core/datagram.c 	struct sock *sk = sock->sk;
sk                778 net/core/datagram.c 	if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
sk                780 net/core/datagram.c 			(sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
sk                782 net/core/datagram.c 	if (sk->sk_shutdown & RCV_SHUTDOWN)
sk                784 net/core/datagram.c 	if (sk->sk_shutdown == SHUTDOWN_MASK)
sk                788 net/core/datagram.c 	if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
sk                792 net/core/datagram.c 	if (connection_based(sk)) {
sk                793 net/core/datagram.c 		if (sk->sk_state == TCP_CLOSE)
sk                796 net/core/datagram.c 		if (sk->sk_state == TCP_SYN_SENT)
sk                801 net/core/datagram.c 	if (sock_writeable(sk))
sk                804 net/core/datagram.c 		sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
sk                 12 net/core/datagram.h int __zerocopy_sg_from_iter(struct sock *sk, struct sk_buff *skb,
sk               1910 net/core/dev.c 	if (!ptype->af_packet_priv || !skb->sk)
sk               1914 net/core/dev.c 		return ptype->id_match(ptype, skb->sk);
sk               1915 net/core/dev.c 	else if ((struct sock *)ptype->af_packet_priv == skb->sk)
sk               2826 net/core/dev.c 	     skb->sk ? &skb->sk->sk_route_caps : &null_features);
sk               3455 net/core/dev.c 	const struct sock *sk;
sk               3463 net/core/dev.c 	sk = skb_to_full_sk(skb);
sk               3464 net/core/dev.c 	if (!sk)
sk               3467 net/core/dev.c 	prioidx = sock_cgroup_prioidx(&sk->sk_cgrp_data);
sk               3482 net/core/dev.c int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
sk               3568 net/core/dev.c 	struct sock *sk = skb->sk;
sk               3580 net/core/dev.c 		int tci = sk_rx_queue_get(sk);
sk               3622 net/core/dev.c 	struct sock *sk = skb->sk;
sk               3623 net/core/dev.c 	int queue_index = sk_tx_queue_get(sk);
sk               3634 net/core/dev.c 		if (queue_index != new_index && sk &&
sk               3635 net/core/dev.c 		    sk_fullsock(sk) &&
sk               3636 net/core/dev.c 		    rcu_access_pointer(sk->sk_dst_cache))
sk               3637 net/core/dev.c 			sk_tx_queue_set(sk, new_index);
sk               3711 net/core/dev.c 		__skb_tstamp_tx(skb, NULL, skb->sk, SCM_TSTAMP_SCHED);
sk                660 net/core/devlink.c 		if (!net_eq(devlink_net(devlink), sock_net(msg->sk)))
sk                714 net/core/devlink.c 		if (!net_eq(devlink_net(devlink), sock_net(msg->sk)))
sk                902 net/core/devlink.c 		if (!net_eq(devlink_net(devlink), sock_net(msg->sk)))
sk               1042 net/core/devlink.c 		if (!net_eq(devlink_net(devlink), sock_net(msg->sk)) ||
sk               1240 net/core/devlink.c 		if (!net_eq(devlink_net(devlink), sock_net(msg->sk)) ||
sk               1467 net/core/devlink.c 		if (!net_eq(devlink_net(devlink), sock_net(msg->sk)) ||
sk               3162 net/core/devlink.c 		if (!net_eq(devlink_net(devlink), sock_net(msg->sk)))
sk               3425 net/core/devlink.c 		if (!net_eq(devlink_net(devlink), sock_net(msg->sk)))
sk               3691 net/core/devlink.c 		if (!net_eq(devlink_net(devlink), sock_net(msg->sk)))
sk               3852 net/core/devlink.c 	devlink = devlink_get_from_attrs(sock_net(cb->skb->sk), attrs);
sk               4091 net/core/devlink.c 		if (!net_eq(devlink_net(devlink), sock_net(msg->sk)))
sk               4907 net/core/devlink.c 	devlink = devlink_get_from_attrs(sock_net(cb->skb->sk), attrs);
sk               5036 net/core/devlink.c 		if (!net_eq(devlink_net(devlink), sock_net(msg->sk)))
sk               5481 net/core/devlink.c 		if (!net_eq(devlink_net(devlink), sock_net(msg->sk)))
sk               5687 net/core/devlink.c 		if (!net_eq(devlink_net(devlink), sock_net(msg->sk)))
sk                 30 net/core/dst.c int dst_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
sk                244 net/core/dst.c static int dst_md_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
sk                493 net/core/fib_rules.c 	struct net *net = sock_net(skb->sk);
sk                726 net/core/fib_rules.c 	struct net *net = sock_net(skb->sk);
sk                834 net/core/fib_rules.c 	struct net *net = sock_net(skb->sk);
sk               1094 net/core/fib_rules.c 	struct net *net = sock_net(skb->sk);
sk                 90 net/core/filter.c int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap)
sk                100 net/core/filter.c 	if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC)) {
sk                101 net/core/filter.c 		NET_INC_STATS(sock_net(sk), LINUX_MIB_PFMEMALLOCDROP);
sk                104 net/core/filter.c 	err = BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb);
sk                108 net/core/filter.c 	err = security_sock_rcv_skb(sk, skb);
sk                113 net/core/filter.c 	filter = rcu_dereference(sk->sk_filter);
sk                115 net/core/filter.c 		struct sock *save_sk = skb->sk;
sk                118 net/core/filter.c 		skb->sk = sk;
sk                120 net/core/filter.c 		skb->sk = save_sk;
sk               1182 net/core/filter.c void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
sk               1186 net/core/filter.c 	atomic_sub(filter_size, &sk->sk_omem_alloc);
sk               1193 net/core/filter.c static bool __sk_filter_charge(struct sock *sk, struct sk_filter *fp)
sk               1199 net/core/filter.c 	    atomic_read(&sk->sk_omem_alloc) + filter_size < sysctl_optmem_max) {
sk               1200 net/core/filter.c 		atomic_add(filter_size, &sk->sk_omem_alloc);
sk               1206 net/core/filter.c bool sk_filter_charge(struct sock *sk, struct sk_filter *fp)
sk               1211 net/core/filter.c 	if (!__sk_filter_charge(sk, fp)) {
sk               1432 net/core/filter.c static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk)
sk               1442 net/core/filter.c 	if (!__sk_filter_charge(sk, fp)) {
sk               1448 net/core/filter.c 	old_fp = rcu_dereference_protected(sk->sk_filter,
sk               1449 net/core/filter.c 					   lockdep_sock_is_held(sk));
sk               1450 net/core/filter.c 	rcu_assign_pointer(sk->sk_filter, fp);
sk               1453 net/core/filter.c 		sk_filter_uncharge(sk, old_fp);
sk               1459 net/core/filter.c struct bpf_prog *__get_filter(struct sock_fprog *fprog, struct sock *sk)
sk               1465 net/core/filter.c 	if (sock_flag(sk, SOCK_FILTER_LOCKED))
sk               1505 net/core/filter.c int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
sk               1507 net/core/filter.c 	struct bpf_prog *prog = __get_filter(fprog, sk);
sk               1513 net/core/filter.c 	err = __sk_attach_prog(prog, sk);
sk               1523 net/core/filter.c int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk)
sk               1525 net/core/filter.c 	struct bpf_prog *prog = __get_filter(fprog, sk);
sk               1534 net/core/filter.c 		err = reuseport_attach_prog(sk, prog);
sk               1542 net/core/filter.c static struct bpf_prog *__get_bpf(u32 ufd, struct sock *sk)
sk               1544 net/core/filter.c 	if (sock_flag(sk, SOCK_FILTER_LOCKED))
sk               1550 net/core/filter.c int sk_attach_bpf(u32 ufd, struct sock *sk)
sk               1552 net/core/filter.c 	struct bpf_prog *prog = __get_bpf(ufd, sk);
sk               1558 net/core/filter.c 	err = __sk_attach_prog(prog, sk);
sk               1567 net/core/filter.c int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk)
sk               1572 net/core/filter.c 	if (sock_flag(sk, SOCK_FILTER_LOCKED))
sk               1587 net/core/filter.c 		if ((sk->sk_type != SOCK_STREAM &&
sk               1588 net/core/filter.c 		     sk->sk_type != SOCK_DGRAM) ||
sk               1589 net/core/filter.c 		    (sk->sk_protocol != IPPROTO_UDP &&
sk               1590 net/core/filter.c 		     sk->sk_protocol != IPPROTO_TCP) ||
sk               1591 net/core/filter.c 		    (sk->sk_family != AF_INET &&
sk               1592 net/core/filter.c 		     sk->sk_family != AF_INET6)) {
sk               1604 net/core/filter.c 	err = reuseport_attach_prog(sk, prog);
sk               1830 net/core/filter.c BPF_CALL_1(bpf_sk_fullsock, struct sock *, sk)
sk               1832 net/core/filter.c 	return sk_fullsock(sk) ? (unsigned long)sk : (unsigned long)NULL;
sk               2453 net/core/filter.c 	sk_mem_charge(msg->sk, len);
sk               2629 net/core/filter.c 	sk_mem_uncharge(msg->sk, len - pop);
sk               4060 net/core/filter.c 	struct sock *sk;
sk               4062 net/core/filter.c 	sk = skb_to_full_sk(skb);
sk               4063 net/core/filter.c 	if (!sk || !sk_fullsock(sk))
sk               4072 net/core/filter.c 	return sk_under_cgroup_hierarchy(sk, cgrp);
sk               4087 net/core/filter.c 	struct sock *sk = skb_to_full_sk(skb);
sk               4090 net/core/filter.c 	if (!sk || !sk_fullsock(sk))
sk               4093 net/core/filter.c 	cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
sk               4107 net/core/filter.c 	struct sock *sk = skb_to_full_sk(skb);
sk               4111 net/core/filter.c 	if (!sk || !sk_fullsock(sk))
sk               4114 net/core/filter.c 	cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
sk               4165 net/core/filter.c 	return skb->sk ? sock_gen_cookie(skb->sk) : 0;
sk               4177 net/core/filter.c 	return sock_gen_cookie(ctx->sk);
sk               4189 net/core/filter.c 	return sock_gen_cookie(ctx->sk);
sk               4201 net/core/filter.c 	struct sock *sk = sk_to_full_sk(skb->sk);
sk               4204 net/core/filter.c 	if (!sk || !sk_fullsock(sk))
sk               4206 net/core/filter.c 	kuid = sock_net_uid(sock_net(sk), sk);
sk               4207 net/core/filter.c 	return from_kuid_munged(sock_net(sk)->user_ns, kuid);
sk               4240 net/core/filter.c 	struct sock *sk = bpf_sock->sk;
sk               4244 net/core/filter.c 	if (!sk_fullsock(sk))
sk               4256 net/core/filter.c 			sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
sk               4257 net/core/filter.c 			WRITE_ONCE(sk->sk_rcvbuf,
sk               4262 net/core/filter.c 			sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
sk               4263 net/core/filter.c 			WRITE_ONCE(sk->sk_sndbuf,
sk               4268 net/core/filter.c 				cmpxchg(&sk->sk_pacing_status,
sk               4271 net/core/filter.c 			sk->sk_max_pacing_rate = (val == ~0U) ? ~0UL : val;
sk               4272 net/core/filter.c 			sk->sk_pacing_rate = min(sk->sk_pacing_rate,
sk               4273 net/core/filter.c 						 sk->sk_max_pacing_rate);
sk               4276 net/core/filter.c 			sk->sk_priority = val;
sk               4281 net/core/filter.c 			WRITE_ONCE(sk->sk_rcvlowat, val ? : 1);
sk               4284 net/core/filter.c 			if (sk->sk_mark != val) {
sk               4285 net/core/filter.c 				sk->sk_mark = val;
sk               4286 net/core/filter.c 				sk_dst_reset(sk);
sk               4294 net/core/filter.c 		if (optlen != sizeof(int) || sk->sk_family != AF_INET)
sk               4304 net/core/filter.c 				struct inet_sock *inet = inet_sk(sk);
sk               4316 net/core/filter.c 		if (optlen != sizeof(int) || sk->sk_family != AF_INET6)
sk               4326 net/core/filter.c 				struct ipv6_pinfo *np = inet6_sk(sk);
sk               4338 net/core/filter.c 		   sk->sk_prot->setsockopt == tcp_setsockopt) {
sk               4346 net/core/filter.c 			ret = tcp_set_congestion_control(sk, name, false,
sk               4349 net/core/filter.c 			struct tcp_sock *tp = tcp_sk(sk);
sk               4402 net/core/filter.c 	struct sock *sk = bpf_sock->sk;
sk               4404 net/core/filter.c 	if (!sk_fullsock(sk))
sk               4407 net/core/filter.c 	if (level == SOL_TCP && sk->sk_prot->getsockopt == tcp_getsockopt) {
sk               4413 net/core/filter.c 			icsk = inet_csk(sk);
sk               4421 net/core/filter.c 			tp = tcp_sk(sk);
sk               4432 net/core/filter.c 		struct inet_sock *inet = inet_sk(sk);
sk               4434 net/core/filter.c 		if (optlen != sizeof(int) || sk->sk_family != AF_INET)
sk               4447 net/core/filter.c 		struct ipv6_pinfo *np = inet6_sk(sk);
sk               4449 net/core/filter.c 		if (optlen != sizeof(int) || sk->sk_family != AF_INET6)
sk               4485 net/core/filter.c 	struct sock *sk = bpf_sock->sk;
sk               4488 net/core/filter.c 	if (!IS_ENABLED(CONFIG_INET) || !sk_fullsock(sk))
sk               4491 net/core/filter.c 	tcp_sk(sk)->bpf_sock_ops_cb_flags = val;
sk               4511 net/core/filter.c 	struct sock *sk = ctx->sk;
sk               4525 net/core/filter.c 		return __inet_bind(sk, addr, addr_len, true, false);
sk               4535 net/core/filter.c 		return ipv6_bpf_stub->inet6_bind(sk, addr, addr_len, true, false);
sk               5211 net/core/filter.c 	struct sock *sk = NULL;
sk               5218 net/core/filter.c 			sk = __inet_lookup(net, &tcp_hashinfo, NULL, 0,
sk               5223 net/core/filter.c 			sk = __udp4_lib_lookup(net, src4, tuple->ipv4.sport,
sk               5232 net/core/filter.c 			sk = __inet6_lookup(net, &tcp_hashinfo, NULL, 0,
sk               5237 net/core/filter.c 			sk = ipv6_bpf_stub->udp6_lib_lookup(net,
sk               5245 net/core/filter.c 	if (unlikely(sk && !refcounted && !sock_flag(sk, SOCK_RCU_FREE))) {
sk               5247 net/core/filter.c 		sk = NULL;
sk               5249 net/core/filter.c 	return sk;
sk               5262 net/core/filter.c 	struct sock *sk = NULL;
sk               5285 net/core/filter.c 		sk = sk_lookup(net, tuple, ifindex, sdif, family, proto);
sk               5290 net/core/filter.c 		sk = sk_lookup(net, tuple, ifindex, sdif, family, proto);
sk               5295 net/core/filter.c 	return sk;
sk               5303 net/core/filter.c 	struct sock *sk = __bpf_skc_lookup(skb, tuple, len, caller_net,
sk               5306 net/core/filter.c 	if (sk) {
sk               5307 net/core/filter.c 		sk = sk_to_full_sk(sk);
sk               5308 net/core/filter.c 		if (!sk_fullsock(sk)) {
sk               5309 net/core/filter.c 			sock_gen_put(sk);
sk               5314 net/core/filter.c 	return sk;
sk               5328 net/core/filter.c 		caller_net = sock_net(skb->sk);
sk               5340 net/core/filter.c 	struct sock *sk = bpf_skc_lookup(skb, tuple, len, proto, netns_id,
sk               5343 net/core/filter.c 	if (sk) {
sk               5344 net/core/filter.c 		sk = sk_to_full_sk(sk);
sk               5345 net/core/filter.c 		if (!sk_fullsock(sk)) {
sk               5346 net/core/filter.c 			sock_gen_put(sk);
sk               5351 net/core/filter.c 	return sk;
sk               5411 net/core/filter.c BPF_CALL_1(bpf_sk_release, struct sock *, sk)
sk               5414 net/core/filter.c 	if (!sk_fullsock(sk) || !sock_flag(sk, SOCK_RCU_FREE))
sk               5415 net/core/filter.c 		sock_gen_put(sk);
sk               5499 net/core/filter.c 					       sock_net(ctx->sk), 0,
sk               5518 net/core/filter.c 					      sock_net(ctx->sk), 0, IPPROTO_TCP,
sk               5537 net/core/filter.c 					      sock_net(ctx->sk), 0, IPPROTO_UDP,
sk               5695 net/core/filter.c BPF_CALL_1(bpf_tcp_sock, struct sock *, sk)
sk               5697 net/core/filter.c 	if (sk_fullsock(sk) && sk->sk_protocol == IPPROTO_TCP)
sk               5698 net/core/filter.c 		return (unsigned long)sk;
sk               5710 net/core/filter.c BPF_CALL_1(bpf_get_listener_sock, struct sock *, sk)
sk               5712 net/core/filter.c 	sk = sk_to_full_sk(sk);
sk               5714 net/core/filter.c 	if (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_RCU_FREE))
sk               5715 net/core/filter.c 		return (unsigned long)sk;
sk               5794 net/core/filter.c BPF_CALL_5(bpf_tcp_check_syncookie, struct sock *, sk, void *, iph, u32, iph_len,
sk               5805 net/core/filter.c 	if (sk->sk_protocol != IPPROTO_TCP || sk->sk_state != TCP_LISTEN)
sk               5808 net/core/filter.c 	if (!sock_net(sk)->ipv4.sysctl_tcp_syncookies)
sk               5814 net/core/filter.c 	if (tcp_synq_no_recent_overflow(sk))
sk               5819 net/core/filter.c 	switch (sk->sk_family) {
sk               5861 net/core/filter.c BPF_CALL_5(bpf_tcp_gen_syncookie, struct sock *, sk, void *, iph, u32, iph_len,
sk               5871 net/core/filter.c 	if (sk->sk_protocol != IPPROTO_TCP || sk->sk_state != TCP_LISTEN)
sk               5874 net/core/filter.c 	if (!sock_net(sk)->ipv4.sysctl_tcp_syncookies)
sk               5888 net/core/filter.c 		if (sk->sk_family == AF_INET6 && sk->sk_ipv6only)
sk               5891 net/core/filter.c 		mss = tcp_v4_get_syncookie(sk, iph, th, &cookie);
sk               5899 net/core/filter.c 		if (sk->sk_family != AF_INET6)
sk               5902 net/core/filter.c 		mss = tcp_v6_get_syncookie(sk, iph, th, &cookie);
sk               6509 net/core/filter.c 	case offsetof(struct __sk_buff, sk):
sk               7008 net/core/filter.c 	case offsetof(struct bpf_sock_addr, sk):
sk               7058 net/core/filter.c 		case offsetof(struct bpf_sock_ops, sk):
sk               7427 net/core/filter.c 		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
sk               7429 net/core/filter.c 				      offsetof(struct sk_buff, sk));
sk               7438 net/core/filter.c 		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
sk               7440 net/core/filter.c 				      offsetof(struct sk_buff, sk));
sk               7450 net/core/filter.c 		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
sk               7452 net/core/filter.c 				      offsetof(struct sk_buff, sk));
sk               7467 net/core/filter.c 		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
sk               7469 net/core/filter.c 				      offsetof(struct sk_buff, sk));
sk               7487 net/core/filter.c 		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
sk               7489 net/core/filter.c 				      offsetof(struct sk_buff, sk));
sk               7502 net/core/filter.c 		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
sk               7504 net/core/filter.c 				      offsetof(struct sk_buff, sk));
sk               7517 net/core/filter.c 		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
sk               7519 net/core/filter.c 				      offsetof(struct sk_buff, sk));
sk               7574 net/core/filter.c 	case offsetof(struct __sk_buff, sk):
sk               7575 net/core/filter.c 		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
sk               7577 net/core/filter.c 				      offsetof(struct sk_buff, sk));
sk               7930 net/core/filter.c 					    struct sock, sk, sk_family);
sk               7935 net/core/filter.c 			struct bpf_sock_addr_kern, struct sock, sk,
sk               7943 net/core/filter.c 			struct bpf_sock_addr_kern, struct sock, sk,
sk               7966 net/core/filter.c 	case offsetof(struct bpf_sock_addr, sk):
sk               7967 net/core/filter.c 		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sock_addr_kern, sk),
sk               7969 net/core/filter.c 				      offsetof(struct bpf_sock_addr_kern, sk));
sk               7998 net/core/filter.c 						struct bpf_sock_ops_kern, sk),\
sk               8000 net/core/filter.c 				      offsetof(struct bpf_sock_ops_kern, sk));\
sk               8039 net/core/filter.c 						struct bpf_sock_ops_kern, sk),\
sk               8041 net/core/filter.c 				      offsetof(struct bpf_sock_ops_kern, sk));\
sk               8085 net/core/filter.c 					      struct bpf_sock_ops_kern, sk),
sk               8087 net/core/filter.c 				      offsetof(struct bpf_sock_ops_kern, sk));
sk               8096 net/core/filter.c 						struct bpf_sock_ops_kern, sk),
sk               8098 net/core/filter.c 				      offsetof(struct bpf_sock_ops_kern, sk));
sk               8108 net/core/filter.c 					      struct bpf_sock_ops_kern, sk),
sk               8110 net/core/filter.c 				      offsetof(struct bpf_sock_ops_kern, sk));
sk               8125 net/core/filter.c 						struct bpf_sock_ops_kern, sk),
sk               8127 net/core/filter.c 				      offsetof(struct bpf_sock_ops_kern, sk));
sk               8146 net/core/filter.c 						struct bpf_sock_ops_kern, sk),
sk               8148 net/core/filter.c 				      offsetof(struct bpf_sock_ops_kern, sk));
sk               8162 net/core/filter.c 						struct bpf_sock_ops_kern, sk),
sk               8164 net/core/filter.c 				      offsetof(struct bpf_sock_ops_kern, sk));
sk               8176 net/core/filter.c 						struct bpf_sock_ops_kern, sk),
sk               8178 net/core/filter.c 				      offsetof(struct bpf_sock_ops_kern, sk));
sk               8196 net/core/filter.c 						struct bpf_sock_ops_kern, sk),
sk               8198 net/core/filter.c 				      offsetof(struct bpf_sock_ops_kern, sk));
sk               8210 net/core/filter.c 						struct bpf_sock_ops_kern, sk),
sk               8212 net/core/filter.c 				      offsetof(struct bpf_sock_ops_kern, sk));
sk               8290 net/core/filter.c 	case offsetof(struct bpf_sock_ops, sk):
sk               8299 net/core/filter.c 						struct bpf_sock_ops_kern, sk),
sk               8301 net/core/filter.c 				      offsetof(struct bpf_sock_ops_kern, sk));
sk               8360 net/core/filter.c 					      struct sk_msg, sk),
sk               8362 net/core/filter.c 				      offsetof(struct sk_msg, sk));
sk               8371 net/core/filter.c 						struct sk_msg, sk),
sk               8373 net/core/filter.c 				      offsetof(struct sk_msg, sk));
sk               8383 net/core/filter.c 					      struct sk_msg, sk),
sk               8385 net/core/filter.c 				      offsetof(struct sk_msg, sk));
sk               8400 net/core/filter.c 						struct sk_msg, sk),
sk               8402 net/core/filter.c 				      offsetof(struct sk_msg, sk));
sk               8421 net/core/filter.c 						struct sk_msg, sk),
sk               8423 net/core/filter.c 				      offsetof(struct sk_msg, sk));
sk               8437 net/core/filter.c 						struct sk_msg, sk),
sk               8439 net/core/filter.c 				      offsetof(struct sk_msg, sk));
sk               8451 net/core/filter.c 						struct sk_msg, sk),
sk               8453 net/core/filter.c 				      offsetof(struct sk_msg, sk));
sk               8610 net/core/filter.c int sk_detach_filter(struct sock *sk)
sk               8615 net/core/filter.c 	if (sock_flag(sk, SOCK_FILTER_LOCKED))
sk               8618 net/core/filter.c 	filter = rcu_dereference_protected(sk->sk_filter,
sk               8619 net/core/filter.c 					   lockdep_sock_is_held(sk));
sk               8621 net/core/filter.c 		RCU_INIT_POINTER(sk->sk_filter, NULL);
sk               8622 net/core/filter.c 		sk_filter_uncharge(sk, filter);
sk               8630 net/core/filter.c int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
sk               8637 net/core/filter.c 	lock_sock(sk);
sk               8638 net/core/filter.c 	filter = rcu_dereference_protected(sk->sk_filter,
sk               8639 net/core/filter.c 					   lockdep_sock_is_held(sk));
sk               8670 net/core/filter.c 	release_sock(sk);
sk               8677 net/core/filter.c 	struct sock *sk;
sk               8687 net/core/filter.c 				    struct sock *sk, struct sk_buff *skb,
sk               8691 net/core/filter.c 	reuse_kern->sk = sk;
sk               8699 net/core/filter.c struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
sk               8706 net/core/filter.c 	bpf_init_reuseport_kern(&reuse_kern, reuse, sk, skb, hash);
sk               8734 net/core/filter.c 		struct sock *sk;
sk               8745 net/core/filter.c 		sk = reuse_kern->sk;
sk               8746 net/core/filter.c 		if (sk->sk_protocol != selected_sk->sk_protocol)
sk               8748 net/core/filter.c 		else if (sk->sk_family != selected_sk->sk_family)
sk               8879 net/core/filter.c 					     sk,			\
sk                953 net/core/flow_dissector.c 			else if (skb->sk)
sk                954 net/core/flow_dissector.c 				net = sock_net(skb->sk);
sk                138 net/core/lwt_bpf.c static int bpf_output(struct net *net, struct sock *sk, struct sk_buff *skb)
sk                158 net/core/lwt_bpf.c 	return dst->lwtstate->orig_output(net, sk, skb);
sk                181 net/core/lwt_bpf.c 	struct sock *sk;
sk                192 net/core/lwt_bpf.c 	sk = sk_to_full_sk(skb->sk);
sk                193 net/core/lwt_bpf.c 	if (sk) {
sk                194 net/core/lwt_bpf.c 		if (sk->sk_bound_dev_if)
sk                195 net/core/lwt_bpf.c 			oif = sk->sk_bound_dev_if;
sk                196 net/core/lwt_bpf.c 		net = sock_net(sk);
sk                208 net/core/lwt_bpf.c 		fl4.flowi4_uid = sock_net_uid(net, sk);
sk                227 net/core/lwt_bpf.c 		fl6.flowi6_uid = sock_net_uid(net, sk);
sk                233 net/core/lwt_bpf.c 		dst = ipv6_stub->ipv6_dst_lookup_flow(net, skb->sk, &fl6, NULL);
sk                257 net/core/lwt_bpf.c 	err = dst_output(dev_net(skb_dst(skb)->dev), skb->sk, skb);
sk                312 net/core/lwtunnel.c int lwtunnel_output(struct net *net, struct sock *sk, struct sk_buff *skb)
sk                331 net/core/lwtunnel.c 		ret = ops->output(net, sk, skb);
sk               1787 net/core/neighbour.c 	struct net *net = sock_net(skb->sk);
sk               1854 net/core/neighbour.c 	struct net *net = sock_net(skb->sk);
sk               2178 net/core/neighbour.c 	struct net *net = sock_net(skb->sk);
sk               2364 net/core/neighbour.c 	struct net *net = sock_net(skb->sk);
sk               2549 net/core/neighbour.c 	struct net *net = sock_net(skb->sk);
sk               2597 net/core/neighbour.c 	struct net *net = sock_net(skb->sk);
sk               2866 net/core/neighbour.c 	struct net *net = sock_net(in_skb->sk);
sk               1595 net/core/net-sysfs.c static const void *net_netlink_ns(struct sock *sk)
sk               1597 net/core/net-sysfs.c 	return sock_net(sk);
sk                725 net/core/net_namespace.c 	struct net *net = sock_net(skb->sk);
sk                869 net/core/net_namespace.c 	struct net *net = sock_net(skb->sk);
sk                909 net/core/net_namespace.c 		target = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, id);
sk                973 net/core/net_namespace.c static int rtnl_valid_dump_net_req(const struct nlmsghdr *nlh, struct sock *sk,
sk                994 net/core/net_namespace.c 			net = rtnl_get_net_ns_capable(sk, nla_get_s32(tb[i]));
sk               1018 net/core/net_namespace.c 		.tgt_net = sock_net(skb->sk),
sk               1032 net/core/net_namespace.c 		err = rtnl_valid_dump_net_req(cb->nlh, skb->sk, &net_cb, cb);
sk                 77 net/core/netclassid_cgroup.c 		sock_cgroup_set_classid(&sock->sk->sk_cgrp_data, ctx->classid);
sk                227 net/core/netprio_cgroup.c 		sock_cgroup_set_prioidx(&sock->sk->sk_cgrp_data,
sk                 91 net/core/request_sock.c void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
sk                 99 net/core/request_sock.c 	RCU_INIT_POINTER(tcp_sk(sk)->fastopen_rsk, NULL);
sk                103 net/core/request_sock.c 	if (req->sk)	/* the child socket hasn't been accepted yet */
sk               1864 net/core/rtnetlink.c struct net *rtnl_get_net_ns_capable(struct sock *sk, int netnsid)
sk               1868 net/core/rtnetlink.c 	net = get_net_ns_by_id(sock_net(sk), netnsid);
sk               1875 net/core/rtnetlink.c 	if (!sk_ns_capable(sk, net->user_ns, CAP_NET_ADMIN)) {
sk               1931 net/core/rtnetlink.c 	struct net *net = sock_net(skb->sk);
sk               1964 net/core/rtnetlink.c 			tgt_net = rtnl_get_net_ns_capable(skb->sk, netnsid);
sk               2750 net/core/rtnetlink.c 	struct net *net = sock_net(skb->sk);
sk               2845 net/core/rtnetlink.c 	struct net *net = sock_net(skb->sk);
sk               2868 net/core/rtnetlink.c 		tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, netnsid);
sk               3026 net/core/rtnetlink.c 	struct net *net = sock_net(skb->sk);
sk               3340 net/core/rtnetlink.c 	struct net *net = sock_net(skb->sk);
sk               3361 net/core/rtnetlink.c 		tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, netnsid);
sk               3409 net/core/rtnetlink.c 	struct net *net = sock_net(skb->sk);
sk               3685 net/core/rtnetlink.c 	struct net *net = sock_net(skb->sk);
sk               3793 net/core/rtnetlink.c 	struct net *net = sock_net(skb->sk);
sk               4029 net/core/rtnetlink.c 	struct net *net = sock_net(skb->sk);
sk               4191 net/core/rtnetlink.c 	struct net *net = sock_net(in_skb->sk);
sk               4446 net/core/rtnetlink.c 	struct net *net = sock_net(skb->sk);
sk               4553 net/core/rtnetlink.c 	struct net *net = sock_net(skb->sk);
sk               4630 net/core/rtnetlink.c 	struct net *net = sock_net(skb->sk);
sk               5045 net/core/rtnetlink.c 	struct net *net = sock_net(skb->sk);
sk               5093 net/core/rtnetlink.c 	struct net *net = sock_net(skb->sk);
sk               5158 net/core/rtnetlink.c 	struct net *net = sock_net(skb->sk);
sk               5321 net/core/rtnetlink.c 	struct sock *sk;
sk               5330 net/core/rtnetlink.c 	sk = netlink_kernel_create(net, NETLINK_ROUTE, &cfg);
sk               5331 net/core/rtnetlink.c 	if (!sk)
sk               5333 net/core/rtnetlink.c 	net->rtnl = sk;
sk                322 net/core/scm.c 			sock_update_netprioidx(&sock->sk->sk_cgrp_data);
sk                323 net/core/scm.c 			sock_update_classid(&sock->sk->sk_cgrp_data);
sk                723 net/core/skbuff.c 	struct sock *sk = skb->sk;
sk                764 net/core/skbuff.c 	if (sk)
sk                766 net/core/skbuff.c 		       level, sk->sk_family, sk->sk_type, sk->sk_protocol);
sk                986 net/core/skbuff.c 	n->sk = NULL;
sk               1095 net/core/skbuff.c struct ubuf_info *sock_zerocopy_alloc(struct sock *sk, size_t size)
sk               1102 net/core/skbuff.c 	skb = sock_omalloc(sk, 0, GFP_KERNEL);
sk               1116 net/core/skbuff.c 	uarg->id = ((u32)atomic_inc_return(&sk->sk_zckey)) - 1;
sk               1121 net/core/skbuff.c 	sock_hold(sk);
sk               1132 net/core/skbuff.c struct ubuf_info *sock_zerocopy_realloc(struct sock *sk, size_t size,
sk               1142 net/core/skbuff.c 		if (!sock_owned_by_user(sk)) {
sk               1150 net/core/skbuff.c 			if (sk->sk_type == SOCK_STREAM)
sk               1155 net/core/skbuff.c 		next = (u32)atomic_read(&sk->sk_zckey);
sk               1161 net/core/skbuff.c 			atomic_set(&sk->sk_zckey, ++next);
sk               1164 net/core/skbuff.c 			if (sk->sk_type == SOCK_STREAM)
sk               1172 net/core/skbuff.c 	return sock_zerocopy_alloc(sk, size);
sk               1200 net/core/skbuff.c 	struct sock *sk = skb->sk;
sk               1211 net/core/skbuff.c 	if (!uarg->len || sock_flag(sk, SOCK_DEAD))
sk               1227 net/core/skbuff.c 	q = &sk->sk_error_queue;
sk               1237 net/core/skbuff.c 	sk->sk_error_report(sk);
sk               1241 net/core/skbuff.c 	sock_put(sk);
sk               1259 net/core/skbuff.c 		struct sock *sk = skb_from_uarg(uarg)->sk;
sk               1261 net/core/skbuff.c 		atomic_dec(&sk->sk_zckey);
sk               1272 net/core/skbuff.c 	return __zerocopy_sg_from_iter(skb->sk, skb, &msg->msg_iter, len);
sk               1276 net/core/skbuff.c int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
sk               1290 net/core/skbuff.c 	err = __zerocopy_sg_from_iter(sk, skb, &msg->msg_iter, len);
sk               1292 net/core/skbuff.c 		struct sock *save_sk = skb->sk;
sk               1296 net/core/skbuff.c 		skb->sk = sk;
sk               1298 net/core/skbuff.c 		skb->sk = save_sk;
sk               1687 net/core/skbuff.c 	if (!skb->sk || skb->destructor == sock_edemux)
sk               2004 net/core/skbuff.c 	if (!skb->sk || skb->destructor == sock_edemux)
sk               2277 net/core/skbuff.c 				   struct sock *sk)
sk               2279 net/core/skbuff.c 	struct page_frag *pfrag = sk_page_frag(sk);
sk               2281 net/core/skbuff.c 	if (!sk_page_frag_refill(sk, pfrag))
sk               2311 net/core/skbuff.c 			  struct sock *sk)
sk               2317 net/core/skbuff.c 		page = linear_to_page(page, len, &offset, sk);
sk               2338 net/core/skbuff.c 			     struct sock *sk,
sk               2359 net/core/skbuff.c 				  linear, sk))
sk               2375 net/core/skbuff.c 			      struct splice_pipe_desc *spd, struct sock *sk)
sk               2390 net/core/skbuff.c 			     sk, pipe))
sk               2401 net/core/skbuff.c 				     offset, len, spd, false, sk, pipe))
sk               2414 net/core/skbuff.c 		if (__skb_splice_bits(iter, pipe, offset, len, spd, sk))
sk               2425 net/core/skbuff.c int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
sk               2440 net/core/skbuff.c 	__skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk);
sk               2450 net/core/skbuff.c int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset,
sk               2471 net/core/skbuff.c 		ret = kernel_sendmsg_locked(sk, &msg, &kv, 1, slen);
sk               2502 net/core/skbuff.c 			ret = kernel_sendpage_locked(sk, skb_frag_page(frag),
sk               3976 net/core/skbuff.c 		swap(tail->sk, head_skb->sk);
sk               4376 net/core/skbuff.c 			if (skb1->sk)
sk               4377 net/core/skbuff.c 				skb_set_owner_w(skb2, skb1->sk);
sk               4398 net/core/skbuff.c 	struct sock *sk = skb->sk;
sk               4400 net/core/skbuff.c 	atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
sk               4415 net/core/skbuff.c int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
sk               4417 net/core/skbuff.c 	if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
sk               4418 net/core/skbuff.c 	    (unsigned int)READ_ONCE(sk->sk_rcvbuf))
sk               4422 net/core/skbuff.c 	skb->sk = sk;
sk               4424 net/core/skbuff.c 	atomic_add(skb->truesize, &sk->sk_rmem_alloc);
sk               4430 net/core/skbuff.c 	skb_queue_tail(&sk->sk_error_queue, skb);
sk               4431 net/core/skbuff.c 	if (!sock_flag(sk, SOCK_DEAD))
sk               4432 net/core/skbuff.c 		sk->sk_error_report(sk);
sk               4443 net/core/skbuff.c struct sk_buff *sock_dequeue_err_skb(struct sock *sk)
sk               4445 net/core/skbuff.c 	struct sk_buff_head *q = &sk->sk_error_queue;
sk               4455 net/core/skbuff.c 			sk->sk_err = SKB_EXT_ERR(skb_next)->ee.ee_origin;
sk               4460 net/core/skbuff.c 		sk->sk_err = 0;
sk               4463 net/core/skbuff.c 		sk->sk_error_report(sk);
sk               4484 net/core/skbuff.c 	struct sock *sk = skb->sk;
sk               4487 net/core/skbuff.c 	if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt))
sk               4492 net/core/skbuff.c 		sock_put(sk);
sk               4496 net/core/skbuff.c 	clone->sk = sk;
sk               4504 net/core/skbuff.c 					struct sock *sk,
sk               4520 net/core/skbuff.c 	if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) {
sk               4522 net/core/skbuff.c 		if (sk->sk_protocol == IPPROTO_TCP &&
sk               4523 net/core/skbuff.c 		    sk->sk_type == SOCK_STREAM)
sk               4524 net/core/skbuff.c 			serr->ee.ee_data -= sk->sk_tskey;
sk               4527 net/core/skbuff.c 	err = sock_queue_err_skb(sk, skb);
sk               4533 net/core/skbuff.c static bool skb_may_tx_timestamp(struct sock *sk, bool tsonly)
sk               4540 net/core/skbuff.c 	read_lock_bh(&sk->sk_callback_lock);
sk               4541 net/core/skbuff.c 	ret = sk->sk_socket && sk->sk_socket->file &&
sk               4542 net/core/skbuff.c 	      file_ns_capable(sk->sk_socket->file, &init_user_ns, CAP_NET_RAW);
sk               4543 net/core/skbuff.c 	read_unlock_bh(&sk->sk_callback_lock);
sk               4550 net/core/skbuff.c 	struct sock *sk = skb->sk;
sk               4552 net/core/skbuff.c 	if (!skb_may_tx_timestamp(sk, false))
sk               4558 net/core/skbuff.c 	if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) {
sk               4560 net/core/skbuff.c 		__skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false);
sk               4561 net/core/skbuff.c 		sock_put(sk);
sk               4572 net/core/skbuff.c 		     struct sock *sk, int tstype)
sk               4577 net/core/skbuff.c 	if (!sk)
sk               4580 net/core/skbuff.c 	if (!hwtstamps && !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TX_SWHW) &&
sk               4584 net/core/skbuff.c 	tsonly = sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TSONLY;
sk               4585 net/core/skbuff.c 	if (!skb_may_tx_timestamp(sk, tsonly))
sk               4590 net/core/skbuff.c 		if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS) &&
sk               4591 net/core/skbuff.c 		    sk->sk_protocol == IPPROTO_TCP &&
sk               4592 net/core/skbuff.c 		    sk->sk_type == SOCK_STREAM) {
sk               4593 net/core/skbuff.c 			skb = tcp_get_timestamping_opt_stats(sk);
sk               4615 net/core/skbuff.c 	__skb_complete_tx_timestamp(skb, sk, tstype, opt_stats);
sk               4622 net/core/skbuff.c 	return __skb_tstamp_tx(orig_skb, hwtstamps, orig_skb->sk,
sk               4629 net/core/skbuff.c 	struct sock *sk = skb->sk;
sk               4644 net/core/skbuff.c 	if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) {
sk               4645 net/core/skbuff.c 		err = sock_queue_err_skb(sk, skb);
sk               4646 net/core/skbuff.c 		sock_put(sk);
sk                 25 net/core/skmsg.c int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len,
sk                 28 net/core/skmsg.c 	struct page_frag *pfrag = sk_page_frag(sk);
sk                 37 net/core/skmsg.c 		if (!sk_page_frag_refill(sk, pfrag))
sk                 42 net/core/skmsg.c 		if (!sk_wmem_schedule(sk, use))
sk                 66 net/core/skmsg.c 		sk_mem_charge(sk, use);
sk                 76 net/core/skmsg.c int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src,
sk                116 net/core/skmsg.c 		sk_mem_charge(sk, sge_len);
sk                127 net/core/skmsg.c void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes)
sk                137 net/core/skmsg.c 			sk_mem_uncharge(sk, bytes);
sk                141 net/core/skmsg.c 		sk_mem_uncharge(sk, sge->length);
sk                151 net/core/skmsg.c void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes)
sk                159 net/core/skmsg.c 		sk_mem_uncharge(sk, uncharge);
sk                166 net/core/skmsg.c static int sk_msg_free_elem(struct sock *sk, struct sk_msg *msg, u32 i,
sk                173 net/core/skmsg.c 		sk_mem_uncharge(sk, len);
sk                180 net/core/skmsg.c static int __sk_msg_free(struct sock *sk, struct sk_msg *msg, u32 i,
sk                188 net/core/skmsg.c 		freed += sk_msg_free_elem(sk, msg, i, charge);
sk                198 net/core/skmsg.c int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg)
sk                200 net/core/skmsg.c 	return __sk_msg_free(sk, msg, msg->sg.start, false);
sk                204 net/core/skmsg.c int sk_msg_free(struct sock *sk, struct sk_msg *msg)
sk                206 net/core/skmsg.c 	return __sk_msg_free(sk, msg, msg->sg.start, true);
sk                210 net/core/skmsg.c static void __sk_msg_free_partial(struct sock *sk, struct sk_msg *msg,
sk                222 net/core/skmsg.c 				sk_mem_uncharge(sk, bytes);
sk                231 net/core/skmsg.c 		sk_msg_free_elem(sk, msg, i, charge);
sk                238 net/core/skmsg.c void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes)
sk                240 net/core/skmsg.c 	__sk_msg_free_partial(sk, msg, bytes, true);
sk                244 net/core/skmsg.c void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg,
sk                247 net/core/skmsg.c 	__sk_msg_free_partial(sk, msg, bytes, false);
sk                250 net/core/skmsg.c void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len)
sk                265 net/core/skmsg.c 		sk_msg_free_elem(sk, msg, i, true);
sk                272 net/core/skmsg.c 	sk_mem_uncharge(sk, trim);
sk                298 net/core/skmsg.c int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
sk                331 net/core/skmsg.c 			sk_mem_charge(sk, use);
sk                356 net/core/skmsg.c int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
sk                379 net/core/skmsg.c 		if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY)
sk                401 net/core/skmsg.c 	struct sock *sk = psock->sk;
sk                408 net/core/skmsg.c 	if (!sk_rmem_schedule(sk, skb, skb->len)) {
sk                420 net/core/skmsg.c 	sk_mem_charge(sk, skb->len);
sk                428 net/core/skmsg.c 	sk_psock_data_ready(sk, psock);
sk                438 net/core/skmsg.c 		return skb_send_sock_locked(psock->sk, skb, off, len);
sk                451 net/core/skmsg.c 	lock_sock(psock->sk);
sk                467 net/core/skmsg.c 			if (likely(psock->sk->sk_socket))
sk                491 net/core/skmsg.c 	release_sock(psock->sk);
sk                494 net/core/skmsg.c struct sk_psock *sk_psock_init(struct sock *sk, int node)
sk                502 net/core/skmsg.c 	psock->sk = sk;
sk                515 net/core/skmsg.c 	rcu_assign_sk_user_data(sk, psock);
sk                516 net/core/skmsg.c 	sock_hold(sk);
sk                541 net/core/skmsg.c 		sk_msg_free(psock->sk, msg);
sk                582 net/core/skmsg.c 	sock_put(psock->sk);
sk                595 net/core/skmsg.c void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
sk                600 net/core/skmsg.c 	write_lock_bh(&sk->sk_callback_lock);
sk                601 net/core/skmsg.c 	sk_psock_restore_proto(sk, psock);
sk                602 net/core/skmsg.c 	rcu_assign_sk_user_data(sk, NULL);
sk                604 net/core/skmsg.c 		sk_psock_stop_strp(sk, psock);
sk                605 net/core/skmsg.c 	write_unlock_bh(&sk->sk_callback_lock);
sk                625 net/core/skmsg.c int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
sk                640 net/core/skmsg.c 	msg->sk = sk;
sk                666 net/core/skmsg.c 	skb->sk = psock->sk;
sk                677 net/core/skmsg.c 	skb->sk = NULL;
sk                698 net/core/skmsg.c 		sk_other = psock->sk;
sk                779 net/core/skmsg.c static void sk_psock_strp_data_ready(struct sock *sk)
sk                784 net/core/skmsg.c 	psock = sk_psock(sk);
sk                786 net/core/skmsg.c 		write_lock_bh(&sk->sk_callback_lock);
sk                788 net/core/skmsg.c 		write_unlock_bh(&sk->sk_callback_lock);
sk                793 net/core/skmsg.c static void sk_psock_write_space(struct sock *sk)
sk                796 net/core/skmsg.c 	void (*write_space)(struct sock *sk) = NULL;
sk                799 net/core/skmsg.c 	psock = sk_psock(sk);
sk                807 net/core/skmsg.c 		write_space(sk);
sk                810 net/core/skmsg.c int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock)
sk                819 net/core/skmsg.c 	return strp_init(&psock->parser.strp, sk, &cb);
sk                822 net/core/skmsg.c void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
sk                829 net/core/skmsg.c 	parser->saved_data_ready = sk->sk_data_ready;
sk                830 net/core/skmsg.c 	sk->sk_data_ready = sk_psock_strp_data_ready;
sk                831 net/core/skmsg.c 	sk->sk_write_space = sk_psock_write_space;
sk                835 net/core/skmsg.c void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock)
sk                842 net/core/skmsg.c 	sk->sk_data_ready = parser->saved_data_ready;
sk                156 net/core/sock.c bool sk_ns_capable(const struct sock *sk,
sk                159 net/core/sock.c 	return file_ns_capable(sk->sk_socket->file, user_ns, cap) &&
sk                173 net/core/sock.c bool sk_capable(const struct sock *sk, int cap)
sk                175 net/core/sock.c 	return sk_ns_capable(sk, &init_user_ns, cap);
sk                188 net/core/sock.c bool sk_net_capable(const struct sock *sk, int cap)
sk                190 net/core/sock.c 	return sk_ns_capable(sk, sock_net(sk)->user_ns, cap);
sk                292 net/core/sock.c void sk_set_memalloc(struct sock *sk)
sk                294 net/core/sock.c 	sock_set_flag(sk, SOCK_MEMALLOC);
sk                295 net/core/sock.c 	sk->sk_allocation |= __GFP_MEMALLOC;
sk                300 net/core/sock.c void sk_clear_memalloc(struct sock *sk)
sk                302 net/core/sock.c 	sock_reset_flag(sk, SOCK_MEMALLOC);
sk                303 net/core/sock.c 	sk->sk_allocation &= ~__GFP_MEMALLOC;
sk                313 net/core/sock.c 	sk_mem_reclaim(sk);
sk                317 net/core/sock.c int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
sk                323 net/core/sock.c 	BUG_ON(!sock_flag(sk, SOCK_MEMALLOC));
sk                326 net/core/sock.c 	ret = sk->sk_backlog_rcv(sk, skb);
sk                429 net/core/sock.c static bool sock_needs_netstamp(const struct sock *sk)
sk                431 net/core/sock.c 	switch (sk->sk_family) {
sk                440 net/core/sock.c static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
sk                442 net/core/sock.c 	if (sk->sk_flags & flags) {
sk                443 net/core/sock.c 		sk->sk_flags &= ~flags;
sk                444 net/core/sock.c 		if (sock_needs_netstamp(sk) &&
sk                445 net/core/sock.c 		    !(sk->sk_flags & SK_FLAGS_TIMESTAMP))
sk                451 net/core/sock.c int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
sk                454 net/core/sock.c 	struct sk_buff_head *list = &sk->sk_receive_queue;
sk                456 net/core/sock.c 	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
sk                457 net/core/sock.c 		atomic_inc(&sk->sk_drops);
sk                458 net/core/sock.c 		trace_sock_rcvqueue_full(sk, skb);
sk                462 net/core/sock.c 	if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
sk                463 net/core/sock.c 		atomic_inc(&sk->sk_drops);
sk                468 net/core/sock.c 	skb_set_owner_r(skb, sk);
sk                476 net/core/sock.c 	sock_skb_set_dropcount(sk, skb);
sk                480 net/core/sock.c 	if (!sock_flag(sk, SOCK_DEAD))
sk                481 net/core/sock.c 		sk->sk_data_ready(sk);
sk                486 net/core/sock.c int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
sk                490 net/core/sock.c 	err = sk_filter(sk, skb);
sk                494 net/core/sock.c 	return __sock_queue_rcv_skb(sk, skb);
sk                498 net/core/sock.c int __sk_receive_skb(struct sock *sk, struct sk_buff *skb,
sk                503 net/core/sock.c 	if (sk_filter_trim_cap(sk, skb, trim_cap))
sk                508 net/core/sock.c 	if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
sk                509 net/core/sock.c 		atomic_inc(&sk->sk_drops);
sk                513 net/core/sock.c 		bh_lock_sock_nested(sk);
sk                515 net/core/sock.c 		bh_lock_sock(sk);
sk                516 net/core/sock.c 	if (!sock_owned_by_user(sk)) {
sk                520 net/core/sock.c 		mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
sk                522 net/core/sock.c 		rc = sk_backlog_rcv(sk, skb);
sk                524 net/core/sock.c 		mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
sk                525 net/core/sock.c 	} else if (sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf))) {
sk                526 net/core/sock.c 		bh_unlock_sock(sk);
sk                527 net/core/sock.c 		atomic_inc(&sk->sk_drops);
sk                531 net/core/sock.c 	bh_unlock_sock(sk);
sk                534 net/core/sock.c 		sock_put(sk);
sk                542 net/core/sock.c struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
sk                544 net/core/sock.c 	struct dst_entry *dst = __sk_dst_get(sk);
sk                547 net/core/sock.c 		sk_tx_queue_clear(sk);
sk                548 net/core/sock.c 		sk->sk_dst_pending_confirm = 0;
sk                549 net/core/sock.c 		RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
sk                558 net/core/sock.c struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
sk                560 net/core/sock.c 	struct dst_entry *dst = sk_dst_get(sk);
sk                563 net/core/sock.c 		sk_dst_reset(sk);
sk                572 net/core/sock.c static int sock_setbindtodevice_locked(struct sock *sk, int ifindex)
sk                576 net/core/sock.c 	struct net *net = sock_net(sk);
sk                587 net/core/sock.c 	sk->sk_bound_dev_if = ifindex;
sk                588 net/core/sock.c 	if (sk->sk_prot->rehash)
sk                589 net/core/sock.c 		sk->sk_prot->rehash(sk);
sk                590 net/core/sock.c 	sk_dst_reset(sk);
sk                600 net/core/sock.c static int sock_setbindtodevice(struct sock *sk, char __user *optval,
sk                605 net/core/sock.c 	struct net *net = sock_net(sk);
sk                640 net/core/sock.c 	lock_sock(sk);
sk                641 net/core/sock.c 	ret = sock_setbindtodevice_locked(sk, index);
sk                642 net/core/sock.c 	release_sock(sk);
sk                650 net/core/sock.c static int sock_getbindtodevice(struct sock *sk, char __user *optval,
sk                655 net/core/sock.c 	struct net *net = sock_net(sk);
sk                658 net/core/sock.c 	if (sk->sk_bound_dev_if == 0) {
sk                667 net/core/sock.c 	ret = netdev_get_name(net, devname, sk->sk_bound_dev_if);
sk                690 net/core/sock.c static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
sk                693 net/core/sock.c 		sock_set_flag(sk, bit);
sk                695 net/core/sock.c 		sock_reset_flag(sk, bit);
sk                698 net/core/sock.c bool sk_mc_loop(struct sock *sk)
sk                702 net/core/sock.c 	if (!sk)
sk                704 net/core/sock.c 	switch (sk->sk_family) {
sk                706 net/core/sock.c 		return inet_sk(sk)->mc_loop;
sk                709 net/core/sock.c 		return inet6_sk(sk)->mc_loop;
sk                726 net/core/sock.c 	struct sock *sk = sock->sk;
sk                737 net/core/sock.c 		return sock_setbindtodevice(sk, optval, optlen);
sk                747 net/core/sock.c 	lock_sock(sk);
sk                754 net/core/sock.c 			sock_valbool_flag(sk, SOCK_DBG, valbool);
sk                757 net/core/sock.c 		sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
sk                760 net/core/sock.c 		sk->sk_reuseport = valbool;
sk                769 net/core/sock.c 		sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
sk                770 net/core/sock.c 		sk_dst_reset(sk);
sk                773 net/core/sock.c 		sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
sk                787 net/core/sock.c 		sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
sk                788 net/core/sock.c 		WRITE_ONCE(sk->sk_sndbuf,
sk                791 net/core/sock.c 		sk->sk_write_space(sk);
sk                819 net/core/sock.c 		sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
sk                835 net/core/sock.c 		WRITE_ONCE(sk->sk_rcvbuf,
sk                853 net/core/sock.c 		if (sk->sk_prot->keepalive)
sk                854 net/core/sock.c 			sk->sk_prot->keepalive(sk, valbool);
sk                855 net/core/sock.c 		sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
sk                859 net/core/sock.c 		sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
sk                863 net/core/sock.c 		sk->sk_no_check_tx = valbool;
sk                868 net/core/sock.c 		    ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
sk                869 net/core/sock.c 			sk->sk_priority = val;
sk                884 net/core/sock.c 			sock_reset_flag(sk, SOCK_LINGER);
sk                888 net/core/sock.c 				sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
sk                891 net/core/sock.c 				sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
sk                892 net/core/sock.c 			sock_set_flag(sk, SOCK_LINGER);
sk                913 net/core/sock.c 				sock_set_flag(sk, SOCK_TSTAMP_NEW);
sk                915 net/core/sock.c 				sock_reset_flag(sk, SOCK_TSTAMP_NEW);
sk                918 net/core/sock.c 				sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
sk                920 net/core/sock.c 				sock_set_flag(sk, SOCK_RCVTSTAMPNS);
sk                921 net/core/sock.c 			sock_set_flag(sk, SOCK_RCVTSTAMP);
sk                922 net/core/sock.c 			sock_enable_timestamp(sk, SOCK_TIMESTAMP);
sk                924 net/core/sock.c 			sock_reset_flag(sk, SOCK_RCVTSTAMP);
sk                925 net/core/sock.c 			sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
sk                926 net/core/sock.c 			sock_reset_flag(sk, SOCK_TSTAMP_NEW);
sk                931 net/core/sock.c 		sock_set_flag(sk, SOCK_TSTAMP_NEW);
sk                940 net/core/sock.c 		    !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) {
sk                941 net/core/sock.c 			if (sk->sk_protocol == IPPROTO_TCP &&
sk                942 net/core/sock.c 			    sk->sk_type == SOCK_STREAM) {
sk                943 net/core/sock.c 				if ((1 << sk->sk_state) &
sk                948 net/core/sock.c 				sk->sk_tskey = tcp_sk(sk)->snd_una;
sk                950 net/core/sock.c 				sk->sk_tskey = 0;
sk                960 net/core/sock.c 		sk->sk_tsflags = val;
sk                962 net/core/sock.c 			sock_enable_timestamp(sk,
sk                966 net/core/sock.c 				sock_reset_flag(sk, SOCK_TSTAMP_NEW);
sk                968 net/core/sock.c 			sock_disable_timestamp(sk,
sk                977 net/core/sock.c 			ret = sock->ops->set_rcvlowat(sk, val);
sk                979 net/core/sock.c 			WRITE_ONCE(sk->sk_rcvlowat, val ? : 1);
sk                984 net/core/sock.c 		ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen, optname == SO_RCVTIMEO_OLD);
sk                989 net/core/sock.c 		ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen, optname == SO_SNDTIMEO_OLD);
sk               1001 net/core/sock.c 			ret = sk_attach_filter(&fprog, sk);
sk               1014 net/core/sock.c 			ret = sk_attach_bpf(ufd, sk);
sk               1027 net/core/sock.c 			ret = sk_reuseport_attach_filter(&fprog, sk);
sk               1040 net/core/sock.c 			ret = sk_reuseport_attach_bpf(ufd, sk);
sk               1045 net/core/sock.c 		ret = reuseport_detach_prog(sk);
sk               1049 net/core/sock.c 		ret = sk_detach_filter(sk);
sk               1053 net/core/sock.c 		if (sock_flag(sk, SOCK_FILTER_LOCKED) && !valbool)
sk               1056 net/core/sock.c 			sock_valbool_flag(sk, SOCK_FILTER_LOCKED, valbool);
sk               1066 net/core/sock.c 		if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
sk               1068 net/core/sock.c 		} else if (val != sk->sk_mark) {
sk               1069 net/core/sock.c 			sk->sk_mark = val;
sk               1070 net/core/sock.c 			sk_dst_reset(sk);
sk               1075 net/core/sock.c 		sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
sk               1079 net/core/sock.c 		sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool);
sk               1084 net/core/sock.c 			ret = sock->ops->set_peek_off(sk, val);
sk               1090 net/core/sock.c 		sock_valbool_flag(sk, SOCK_NOFCS, valbool);
sk               1094 net/core/sock.c 		sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool);
sk               1100 net/core/sock.c 		if ((val > sk->sk_ll_usec) && !capable(CAP_NET_ADMIN))
sk               1106 net/core/sock.c 				sk->sk_ll_usec = val;
sk               1122 net/core/sock.c 			cmpxchg(&sk->sk_pacing_status,
sk               1125 net/core/sock.c 		sk->sk_max_pacing_rate = ulval;
sk               1126 net/core/sock.c 		sk->sk_pacing_rate = min(sk->sk_pacing_rate, ulval);
sk               1130 net/core/sock.c 		WRITE_ONCE(sk->sk_incoming_cpu, val);
sk               1135 net/core/sock.c 			dst_negative_advice(sk);
sk               1139 net/core/sock.c 		if (sk->sk_family == PF_INET || sk->sk_family == PF_INET6) {
sk               1140 net/core/sock.c 			if (!((sk->sk_type == SOCK_STREAM &&
sk               1141 net/core/sock.c 			       sk->sk_protocol == IPPROTO_TCP) ||
sk               1142 net/core/sock.c 			      (sk->sk_type == SOCK_DGRAM &&
sk               1143 net/core/sock.c 			       sk->sk_protocol == IPPROTO_UDP)))
sk               1145 net/core/sock.c 		} else if (sk->sk_family != PF_RDS) {
sk               1152 net/core/sock.c 				sock_valbool_flag(sk, SOCK_ZEROCOPY, valbool);
sk               1157 net/core/sock.c 		if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
sk               1167 net/core/sock.c 			sock_valbool_flag(sk, SOCK_TXTIME, true);
sk               1168 net/core/sock.c 			sk->sk_clockid = sk_txtime.clockid;
sk               1169 net/core/sock.c 			sk->sk_txtime_deadline_mode =
sk               1171 net/core/sock.c 			sk->sk_txtime_report_errors =
sk               1177 net/core/sock.c 		ret = sock_setbindtodevice_locked(sk, val);
sk               1184 net/core/sock.c 	release_sock(sk);
sk               1218 net/core/sock.c 	struct sock *sk = sock->sk;
sk               1243 net/core/sock.c 		v.val = sock_flag(sk, SOCK_DBG);
sk               1247 net/core/sock.c 		v.val = sock_flag(sk, SOCK_LOCALROUTE);
sk               1251 net/core/sock.c 		v.val = sock_flag(sk, SOCK_BROADCAST);
sk               1255 net/core/sock.c 		v.val = sk->sk_sndbuf;
sk               1259 net/core/sock.c 		v.val = sk->sk_rcvbuf;
sk               1263 net/core/sock.c 		v.val = sk->sk_reuse;
sk               1267 net/core/sock.c 		v.val = sk->sk_reuseport;
sk               1271 net/core/sock.c 		v.val = sock_flag(sk, SOCK_KEEPOPEN);
sk               1275 net/core/sock.c 		v.val = sk->sk_type;
sk               1279 net/core/sock.c 		v.val = sk->sk_protocol;
sk               1283 net/core/sock.c 		v.val = sk->sk_family;
sk               1287 net/core/sock.c 		v.val = -sock_error(sk);
sk               1289 net/core/sock.c 			v.val = xchg(&sk->sk_err_soft, 0);
sk               1293 net/core/sock.c 		v.val = sock_flag(sk, SOCK_URGINLINE);
sk               1297 net/core/sock.c 		v.val = sk->sk_no_check_tx;
sk               1301 net/core/sock.c 		v.val = sk->sk_priority;
sk               1306 net/core/sock.c 		v.ling.l_onoff	= sock_flag(sk, SOCK_LINGER);
sk               1307 net/core/sock.c 		v.ling.l_linger	= sk->sk_lingertime / HZ;
sk               1315 net/core/sock.c 		v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
sk               1316 net/core/sock.c 				!sock_flag(sk, SOCK_TSTAMP_NEW) &&
sk               1317 net/core/sock.c 				!sock_flag(sk, SOCK_RCVTSTAMPNS);
sk               1321 net/core/sock.c 		v.val = sock_flag(sk, SOCK_RCVTSTAMPNS) && !sock_flag(sk, SOCK_TSTAMP_NEW);
sk               1325 net/core/sock.c 		v.val = sock_flag(sk, SOCK_RCVTSTAMP) && sock_flag(sk, SOCK_TSTAMP_NEW);
sk               1329 net/core/sock.c 		v.val = sock_flag(sk, SOCK_RCVTSTAMPNS) && sock_flag(sk, SOCK_TSTAMP_NEW);
sk               1333 net/core/sock.c 		v.val = sk->sk_tsflags;
sk               1338 net/core/sock.c 		lv = sock_get_timeout(sk->sk_rcvtimeo, &v, SO_RCVTIMEO_OLD == optname);
sk               1343 net/core/sock.c 		lv = sock_get_timeout(sk->sk_sndtimeo, &v, SO_SNDTIMEO_OLD == optname);
sk               1347 net/core/sock.c 		v.val = sk->sk_rcvlowat;
sk               1363 net/core/sock.c 		cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
sk               1373 net/core/sock.c 		if (!sk->sk_peer_cred)
sk               1376 net/core/sock.c 		n = sk->sk_peer_cred->group_info->ngroups;
sk               1384 net/core/sock.c 				     sk->sk_peer_cred->group_info);
sk               1408 net/core/sock.c 		v.val = sk->sk_state == TCP_LISTEN;
sk               1419 net/core/sock.c 		v.val = sk->sk_mark;
sk               1423 net/core/sock.c 		v.val = sock_flag(sk, SOCK_RXQ_OVFL);
sk               1427 net/core/sock.c 		v.val = sock_flag(sk, SOCK_WIFI_STATUS);
sk               1434 net/core/sock.c 		v.val = sk->sk_peek_off;
sk               1437 net/core/sock.c 		v.val = sock_flag(sk, SOCK_NOFCS);
sk               1441 net/core/sock.c 		return sock_getbindtodevice(sk, optval, optlen, len);
sk               1444 net/core/sock.c 		len = sk_get_filter(sk, (struct sock_filter __user *)optval, len);
sk               1451 net/core/sock.c 		v.val = sock_flag(sk, SOCK_FILTER_LOCKED);
sk               1459 net/core/sock.c 		v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE);
sk               1464 net/core/sock.c 		v.val = sk->sk_ll_usec;
sk               1471 net/core/sock.c 			v.ulval = sk->sk_max_pacing_rate;
sk               1474 net/core/sock.c 			v.val = min_t(unsigned long, sk->sk_max_pacing_rate, ~0U);
sk               1479 net/core/sock.c 		v.val = READ_ONCE(sk->sk_incoming_cpu);
sk               1486 net/core/sock.c 		sk_get_meminfo(sk, meminfo);
sk               1497 net/core/sock.c 		v.val = READ_ONCE(sk->sk_napi_id);
sk               1510 net/core/sock.c 		v.val64 = sock_gen_cookie(sk);
sk               1514 net/core/sock.c 		v.val = sock_flag(sk, SOCK_ZEROCOPY);
sk               1519 net/core/sock.c 		v.txtime.clockid = sk->sk_clockid;
sk               1520 net/core/sock.c 		v.txtime.flags |= sk->sk_txtime_deadline_mode ?
sk               1522 net/core/sock.c 		v.txtime.flags |= sk->sk_txtime_report_errors ?
sk               1527 net/core/sock.c 		v.val = sk->sk_bound_dev_if;
sk               1552 net/core/sock.c static inline void sock_lock_init(struct sock *sk)
sk               1554 net/core/sock.c 	if (sk->sk_kern_sock)
sk               1556 net/core/sock.c 			sk,
sk               1557 net/core/sock.c 			af_family_kern_slock_key_strings[sk->sk_family],
sk               1558 net/core/sock.c 			af_family_kern_slock_keys + sk->sk_family,
sk               1559 net/core/sock.c 			af_family_kern_key_strings[sk->sk_family],
sk               1560 net/core/sock.c 			af_family_kern_keys + sk->sk_family);
sk               1563 net/core/sock.c 			sk,
sk               1564 net/core/sock.c 			af_family_slock_key_strings[sk->sk_family],
sk               1565 net/core/sock.c 			af_family_slock_keys + sk->sk_family,
sk               1566 net/core/sock.c 			af_family_key_strings[sk->sk_family],
sk               1567 net/core/sock.c 			af_family_keys + sk->sk_family);
sk               1594 net/core/sock.c 	struct sock *sk;
sk               1599 net/core/sock.c 		sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
sk               1600 net/core/sock.c 		if (!sk)
sk               1601 net/core/sock.c 			return sk;
sk               1603 net/core/sock.c 			sk_prot_clear_nulls(sk, prot->obj_size);
sk               1605 net/core/sock.c 		sk = kmalloc(prot->obj_size, priority);
sk               1607 net/core/sock.c 	if (sk != NULL) {
sk               1608 net/core/sock.c 		if (security_sk_alloc(sk, family, priority))
sk               1613 net/core/sock.c 		sk_tx_queue_clear(sk);
sk               1616 net/core/sock.c 	return sk;
sk               1619 net/core/sock.c 	security_sk_free(sk);
sk               1622 net/core/sock.c 		kmem_cache_free(slab, sk);
sk               1624 net/core/sock.c 		kfree(sk);
sk               1628 net/core/sock.c static void sk_prot_free(struct proto *prot, struct sock *sk)
sk               1636 net/core/sock.c 	cgroup_sk_free(&sk->sk_cgrp_data);
sk               1637 net/core/sock.c 	mem_cgroup_sk_free(sk);
sk               1638 net/core/sock.c 	security_sk_free(sk);
sk               1640 net/core/sock.c 		kmem_cache_free(slab, sk);
sk               1642 net/core/sock.c 		kfree(sk);
sk               1657 net/core/sock.c 	struct sock *sk;
sk               1659 net/core/sock.c 	sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
sk               1660 net/core/sock.c 	if (sk) {
sk               1661 net/core/sock.c 		sk->sk_family = family;
sk               1666 net/core/sock.c 		sk->sk_prot = sk->sk_prot_creator = prot;
sk               1667 net/core/sock.c 		sk->sk_kern_sock = kern;
sk               1668 net/core/sock.c 		sock_lock_init(sk);
sk               1669 net/core/sock.c 		sk->sk_net_refcnt = kern ? 0 : 1;
sk               1670 net/core/sock.c 		if (likely(sk->sk_net_refcnt)) {
sk               1675 net/core/sock.c 		sock_net_set(sk, net);
sk               1676 net/core/sock.c 		refcount_set(&sk->sk_wmem_alloc, 1);
sk               1678 net/core/sock.c 		mem_cgroup_sk_alloc(sk);
sk               1679 net/core/sock.c 		cgroup_sk_alloc(&sk->sk_cgrp_data);
sk               1680 net/core/sock.c 		sock_update_classid(&sk->sk_cgrp_data);
sk               1681 net/core/sock.c 		sock_update_netprioidx(&sk->sk_cgrp_data);
sk               1684 net/core/sock.c 	return sk;
sk               1693 net/core/sock.c 	struct sock *sk = container_of(head, struct sock, sk_rcu);
sk               1696 net/core/sock.c 	if (sk->sk_destruct)
sk               1697 net/core/sock.c 		sk->sk_destruct(sk);
sk               1699 net/core/sock.c 	filter = rcu_dereference_check(sk->sk_filter,
sk               1700 net/core/sock.c 				       refcount_read(&sk->sk_wmem_alloc) == 0);
sk               1702 net/core/sock.c 		sk_filter_uncharge(sk, filter);
sk               1703 net/core/sock.c 		RCU_INIT_POINTER(sk->sk_filter, NULL);
sk               1706 net/core/sock.c 	sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
sk               1709 net/core/sock.c 	bpf_sk_storage_free(sk);
sk               1712 net/core/sock.c 	if (atomic_read(&sk->sk_omem_alloc))
sk               1714 net/core/sock.c 			 __func__, atomic_read(&sk->sk_omem_alloc));
sk               1716 net/core/sock.c 	if (sk->sk_frag.page) {
sk               1717 net/core/sock.c 		put_page(sk->sk_frag.page);
sk               1718 net/core/sock.c 		sk->sk_frag.page = NULL;
sk               1721 net/core/sock.c 	if (sk->sk_peer_cred)
sk               1722 net/core/sock.c 		put_cred(sk->sk_peer_cred);
sk               1723 net/core/sock.c 	put_pid(sk->sk_peer_pid);
sk               1724 net/core/sock.c 	if (likely(sk->sk_net_refcnt))
sk               1725 net/core/sock.c 		put_net(sock_net(sk));
sk               1726 net/core/sock.c 	sk_prot_free(sk->sk_prot_creator, sk);
sk               1729 net/core/sock.c void sk_destruct(struct sock *sk)
sk               1731 net/core/sock.c 	bool use_call_rcu = sock_flag(sk, SOCK_RCU_FREE);
sk               1733 net/core/sock.c 	if (rcu_access_pointer(sk->sk_reuseport_cb)) {
sk               1734 net/core/sock.c 		reuseport_detach_sock(sk);
sk               1739 net/core/sock.c 		call_rcu(&sk->sk_rcu, __sk_destruct);
sk               1741 net/core/sock.c 		__sk_destruct(&sk->sk_rcu);
sk               1744 net/core/sock.c static void __sk_free(struct sock *sk)
sk               1746 net/core/sock.c 	if (likely(sk->sk_net_refcnt))
sk               1747 net/core/sock.c 		sock_inuse_add(sock_net(sk), -1);
sk               1749 net/core/sock.c 	if (unlikely(sk->sk_net_refcnt && sock_diag_has_destroy_listeners(sk)))
sk               1750 net/core/sock.c 		sock_diag_broadcast_destroy(sk);
sk               1752 net/core/sock.c 		sk_destruct(sk);
sk               1755 net/core/sock.c void sk_free(struct sock *sk)
sk               1762 net/core/sock.c 	if (refcount_dec_and_test(&sk->sk_wmem_alloc))
sk               1763 net/core/sock.c 		__sk_free(sk);
sk               1767 net/core/sock.c static void sk_init_common(struct sock *sk)
sk               1769 net/core/sock.c 	skb_queue_head_init(&sk->sk_receive_queue);
sk               1770 net/core/sock.c 	skb_queue_head_init(&sk->sk_write_queue);
sk               1771 net/core/sock.c 	skb_queue_head_init(&sk->sk_error_queue);
sk               1773 net/core/sock.c 	rwlock_init(&sk->sk_callback_lock);
sk               1774 net/core/sock.c 	lockdep_set_class_and_name(&sk->sk_receive_queue.lock,
sk               1775 net/core/sock.c 			af_rlock_keys + sk->sk_family,
sk               1776 net/core/sock.c 			af_family_rlock_key_strings[sk->sk_family]);
sk               1777 net/core/sock.c 	lockdep_set_class_and_name(&sk->sk_write_queue.lock,
sk               1778 net/core/sock.c 			af_wlock_keys + sk->sk_family,
sk               1779 net/core/sock.c 			af_family_wlock_key_strings[sk->sk_family]);
sk               1780 net/core/sock.c 	lockdep_set_class_and_name(&sk->sk_error_queue.lock,
sk               1781 net/core/sock.c 			af_elock_keys + sk->sk_family,
sk               1782 net/core/sock.c 			af_family_elock_key_strings[sk->sk_family]);
sk               1783 net/core/sock.c 	lockdep_set_class_and_name(&sk->sk_callback_lock,
sk               1784 net/core/sock.c 			af_callback_keys + sk->sk_family,
sk               1785 net/core/sock.c 			af_family_clock_key_strings[sk->sk_family]);
sk               1795 net/core/sock.c struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
sk               1800 net/core/sock.c 	newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
sk               1804 net/core/sock.c 		sock_copy(newsk, sk);
sk               1806 net/core/sock.c 		newsk->sk_prot_creator = sk->sk_prot;
sk               1831 net/core/sock.c 		newsk->sk_userlocks	= sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
sk               1842 net/core/sock.c 		filter = rcu_dereference(sk->sk_filter);
sk               1852 net/core/sock.c 		if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) {
sk               1865 net/core/sock.c 		if (bpf_sk_storage_clone(sk, newsk)) {
sk               1903 net/core/sock.c 		if (sock_needs_netstamp(sk) &&
sk               1912 net/core/sock.c void sk_free_unlock_clone(struct sock *sk)
sk               1916 net/core/sock.c 	sk->sk_destruct = NULL;
sk               1917 net/core/sock.c 	bh_unlock_sock(sk);
sk               1918 net/core/sock.c 	sk_free(sk);
sk               1922 net/core/sock.c void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
sk               1926 net/core/sock.c 	sk_dst_set(sk, dst);
sk               1927 net/core/sock.c 	sk->sk_route_caps = dst->dev->features | sk->sk_route_forced_caps;
sk               1928 net/core/sock.c 	if (sk->sk_route_caps & NETIF_F_GSO)
sk               1929 net/core/sock.c 		sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
sk               1930 net/core/sock.c 	sk->sk_route_caps &= ~sk->sk_route_nocaps;
sk               1931 net/core/sock.c 	if (sk_can_gso(sk)) {
sk               1933 net/core/sock.c 			sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
sk               1935 net/core/sock.c 			sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
sk               1936 net/core/sock.c 			sk->sk_gso_max_size = dst->dev->gso_max_size;
sk               1940 net/core/sock.c 	sk->sk_gso_max_segs = max_segs;
sk               1954 net/core/sock.c 	struct sock *sk = skb->sk;
sk               1957 net/core/sock.c 	if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
sk               1962 net/core/sock.c 		WARN_ON(refcount_sub_and_test(len - 1, &sk->sk_wmem_alloc));
sk               1963 net/core/sock.c 		sk->sk_write_space(sk);
sk               1970 net/core/sock.c 	if (refcount_sub_and_test(len, &sk->sk_wmem_alloc))
sk               1971 net/core/sock.c 		__sk_free(sk);
sk               1980 net/core/sock.c 	struct sock *sk = skb->sk;
sk               1982 net/core/sock.c 	if (refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc))
sk               1983 net/core/sock.c 		__sk_free(sk);
sk               1986 net/core/sock.c void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
sk               1989 net/core/sock.c 	skb->sk = sk;
sk               1991 net/core/sock.c 	if (unlikely(!sk_fullsock(sk))) {
sk               1993 net/core/sock.c 		sock_hold(sk);
sk               1998 net/core/sock.c 	skb_set_hash_from_sk(skb, sk);
sk               2004 net/core/sock.c 	refcount_add(skb->truesize, &sk->sk_wmem_alloc);
sk               2033 net/core/sock.c 		struct sock *sk = skb->sk;
sk               2035 net/core/sock.c 		if (refcount_inc_not_zero(&sk->sk_refcnt)) {
sk               2036 net/core/sock.c 			WARN_ON(refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc));
sk               2050 net/core/sock.c 	struct sock *sk = skb->sk;
sk               2053 net/core/sock.c 	atomic_sub(len, &sk->sk_rmem_alloc);
sk               2054 net/core/sock.c 	sk_mem_uncharge(sk, len);
sk               2064 net/core/sock.c 	sock_put(skb->sk);
sk               2068 net/core/sock.c kuid_t sock_i_uid(struct sock *sk)
sk               2072 net/core/sock.c 	read_lock_bh(&sk->sk_callback_lock);
sk               2073 net/core/sock.c 	uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID;
sk               2074 net/core/sock.c 	read_unlock_bh(&sk->sk_callback_lock);
sk               2079 net/core/sock.c unsigned long sock_i_ino(struct sock *sk)
sk               2083 net/core/sock.c 	read_lock_bh(&sk->sk_callback_lock);
sk               2084 net/core/sock.c 	ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
sk               2085 net/core/sock.c 	read_unlock_bh(&sk->sk_callback_lock);
sk               2093 net/core/sock.c struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
sk               2097 net/core/sock.c 	    refcount_read(&sk->sk_wmem_alloc) < READ_ONCE(sk->sk_sndbuf)) {
sk               2101 net/core/sock.c 			skb_set_owner_w(skb, sk);
sk               2111 net/core/sock.c 	struct sock *sk = skb->sk;
sk               2113 net/core/sock.c 	atomic_sub(skb->truesize, &sk->sk_omem_alloc);
sk               2116 net/core/sock.c struct sk_buff *sock_omalloc(struct sock *sk, unsigned long size,
sk               2122 net/core/sock.c 	if (atomic_read(&sk->sk_omem_alloc) + SKB_TRUESIZE(size) >
sk               2130 net/core/sock.c 	atomic_add(skb->truesize, &sk->sk_omem_alloc);
sk               2131 net/core/sock.c 	skb->sk = sk;
sk               2139 net/core/sock.c void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
sk               2142 net/core/sock.c 	    atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
sk               2147 net/core/sock.c 		atomic_add(size, &sk->sk_omem_alloc);
sk               2151 net/core/sock.c 		atomic_sub(size, &sk->sk_omem_alloc);
sk               2161 net/core/sock.c static inline void __sock_kfree_s(struct sock *sk, void *mem, int size,
sk               2170 net/core/sock.c 	atomic_sub(size, &sk->sk_omem_alloc);
sk               2173 net/core/sock.c void sock_kfree_s(struct sock *sk, void *mem, int size)
sk               2175 net/core/sock.c 	__sock_kfree_s(sk, mem, size, false);
sk               2179 net/core/sock.c void sock_kzfree_s(struct sock *sk, void *mem, int size)
sk               2181 net/core/sock.c 	__sock_kfree_s(sk, mem, size, true);
sk               2188 net/core/sock.c static long sock_wait_for_wmem(struct sock *sk, long timeo)
sk               2192 net/core/sock.c 	sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
sk               2198 net/core/sock.c 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
sk               2199 net/core/sock.c 		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
sk               2200 net/core/sock.c 		if (refcount_read(&sk->sk_wmem_alloc) < READ_ONCE(sk->sk_sndbuf))
sk               2202 net/core/sock.c 		if (sk->sk_shutdown & SEND_SHUTDOWN)
sk               2204 net/core/sock.c 		if (sk->sk_err)
sk               2208 net/core/sock.c 	finish_wait(sk_sleep(sk), &wait);
sk               2217 net/core/sock.c struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
sk               2225 net/core/sock.c 	timeo = sock_sndtimeo(sk, noblock);
sk               2227 net/core/sock.c 		err = sock_error(sk);
sk               2232 net/core/sock.c 		if (sk->sk_shutdown & SEND_SHUTDOWN)
sk               2235 net/core/sock.c 		if (sk_wmem_alloc_get(sk) < READ_ONCE(sk->sk_sndbuf))
sk               2238 net/core/sock.c 		sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
sk               2239 net/core/sock.c 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
sk               2245 net/core/sock.c 		timeo = sock_wait_for_wmem(sk, timeo);
sk               2248 net/core/sock.c 				   errcode, sk->sk_allocation);
sk               2250 net/core/sock.c 		skb_set_owner_w(skb, sk);
sk               2261 net/core/sock.c struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
sk               2264 net/core/sock.c 	return sock_alloc_send_pskb(sk, size, 0, noblock, errcode, 0);
sk               2268 net/core/sock.c int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
sk               2275 net/core/sock.c 		if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
sk               2293 net/core/sock.c 		if (!sock_flag(sk, SOCK_TXTIME))
sk               2310 net/core/sock.c int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
sk               2321 net/core/sock.c 		ret = __sock_cmsg_send(sk, msg, cmsg, sockc);
sk               2329 net/core/sock.c static void sk_enter_memory_pressure(struct sock *sk)
sk               2331 net/core/sock.c 	if (!sk->sk_prot->enter_memory_pressure)
sk               2334 net/core/sock.c 	sk->sk_prot->enter_memory_pressure(sk);
sk               2337 net/core/sock.c static void sk_leave_memory_pressure(struct sock *sk)
sk               2339 net/core/sock.c 	if (sk->sk_prot->leave_memory_pressure) {
sk               2340 net/core/sock.c 		sk->sk_prot->leave_memory_pressure(sk);
sk               2342 net/core/sock.c 		unsigned long *memory_pressure = sk->sk_prot->memory_pressure;
sk               2397 net/core/sock.c bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
sk               2399 net/core/sock.c 	if (likely(skb_page_frag_refill(32U, pfrag, sk->sk_allocation)))
sk               2402 net/core/sock.c 	sk_enter_memory_pressure(sk);
sk               2403 net/core/sock.c 	sk_stream_moderate_sndbuf(sk);
sk               2408 net/core/sock.c static void __lock_sock(struct sock *sk)
sk               2409 net/core/sock.c 	__releases(&sk->sk_lock.slock)
sk               2410 net/core/sock.c 	__acquires(&sk->sk_lock.slock)
sk               2415 net/core/sock.c 		prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
sk               2417 net/core/sock.c 		spin_unlock_bh(&sk->sk_lock.slock);
sk               2419 net/core/sock.c 		spin_lock_bh(&sk->sk_lock.slock);
sk               2420 net/core/sock.c 		if (!sock_owned_by_user(sk))
sk               2423 net/core/sock.c 	finish_wait(&sk->sk_lock.wq, &wait);
sk               2426 net/core/sock.c void __release_sock(struct sock *sk)
sk               2427 net/core/sock.c 	__releases(&sk->sk_lock.slock)
sk               2428 net/core/sock.c 	__acquires(&sk->sk_lock.slock)
sk               2432 net/core/sock.c 	while ((skb = sk->sk_backlog.head) != NULL) {
sk               2433 net/core/sock.c 		sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
sk               2435 net/core/sock.c 		spin_unlock_bh(&sk->sk_lock.slock);
sk               2442 net/core/sock.c 			sk_backlog_rcv(sk, skb);
sk               2449 net/core/sock.c 		spin_lock_bh(&sk->sk_lock.slock);
sk               2456 net/core/sock.c 	sk->sk_backlog.len = 0;
sk               2459 net/core/sock.c void __sk_flush_backlog(struct sock *sk)
sk               2461 net/core/sock.c 	spin_lock_bh(&sk->sk_lock.slock);
sk               2462 net/core/sock.c 	__release_sock(sk);
sk               2463 net/core/sock.c 	spin_unlock_bh(&sk->sk_lock.slock);
sk               2477 net/core/sock.c int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb)
sk               2482 net/core/sock.c 	add_wait_queue(sk_sleep(sk), &wait);
sk               2483 net/core/sock.c 	sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
sk               2484 net/core/sock.c 	rc = sk_wait_event(sk, timeo, skb_peek_tail(&sk->sk_receive_queue) != skb, &wait);
sk               2485 net/core/sock.c 	sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
sk               2486 net/core/sock.c 	remove_wait_queue(sk_sleep(sk), &wait);
sk               2500 net/core/sock.c int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind)
sk               2502 net/core/sock.c 	struct proto *prot = sk->sk_prot;
sk               2503 net/core/sock.c 	long allocated = sk_memory_allocated_add(sk, amt);
sk               2506 net/core/sock.c 	if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
sk               2507 net/core/sock.c 	    !(charged = mem_cgroup_charge_skmem(sk->sk_memcg, amt)))
sk               2511 net/core/sock.c 	if (allocated <= sk_prot_mem_limits(sk, 0)) {
sk               2512 net/core/sock.c 		sk_leave_memory_pressure(sk);
sk               2517 net/core/sock.c 	if (allocated > sk_prot_mem_limits(sk, 1))
sk               2518 net/core/sock.c 		sk_enter_memory_pressure(sk);
sk               2521 net/core/sock.c 	if (allocated > sk_prot_mem_limits(sk, 2))
sk               2526 net/core/sock.c 		if (atomic_read(&sk->sk_rmem_alloc) < sk_get_rmem0(sk, prot))
sk               2530 net/core/sock.c 		int wmem0 = sk_get_wmem0(sk, prot);
sk               2532 net/core/sock.c 		if (sk->sk_type == SOCK_STREAM) {
sk               2533 net/core/sock.c 			if (sk->sk_wmem_queued < wmem0)
sk               2535 net/core/sock.c 		} else if (refcount_read(&sk->sk_wmem_alloc) < wmem0) {
sk               2540 net/core/sock.c 	if (sk_has_memory_pressure(sk)) {
sk               2543 net/core/sock.c 		if (!sk_under_memory_pressure(sk))
sk               2545 net/core/sock.c 		alloc = sk_sockets_allocated_read_positive(sk);
sk               2546 net/core/sock.c 		if (sk_prot_mem_limits(sk, 2) > alloc *
sk               2547 net/core/sock.c 		    sk_mem_pages(sk->sk_wmem_queued +
sk               2548 net/core/sock.c 				 atomic_read(&sk->sk_rmem_alloc) +
sk               2549 net/core/sock.c 				 sk->sk_forward_alloc))
sk               2555 net/core/sock.c 	if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
sk               2556 net/core/sock.c 		sk_stream_moderate_sndbuf(sk);
sk               2561 net/core/sock.c 		if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
sk               2566 net/core/sock.c 		trace_sock_exceed_buf_limit(sk, prot, allocated, kind);
sk               2568 net/core/sock.c 	sk_memory_allocated_sub(sk, amt);
sk               2570 net/core/sock.c 	if (mem_cgroup_sockets_enabled && sk->sk_memcg)
sk               2571 net/core/sock.c 		mem_cgroup_uncharge_skmem(sk->sk_memcg, amt);
sk               2587 net/core/sock.c int __sk_mem_schedule(struct sock *sk, int size, int kind)
sk               2591 net/core/sock.c 	sk->sk_forward_alloc += amt << SK_MEM_QUANTUM_SHIFT;
sk               2592 net/core/sock.c 	ret = __sk_mem_raise_allocated(sk, size, amt, kind);
sk               2594 net/core/sock.c 		sk->sk_forward_alloc -= amt << SK_MEM_QUANTUM_SHIFT;
sk               2606 net/core/sock.c void __sk_mem_reduce_allocated(struct sock *sk, int amount)
sk               2608 net/core/sock.c 	sk_memory_allocated_sub(sk, amount);
sk               2610 net/core/sock.c 	if (mem_cgroup_sockets_enabled && sk->sk_memcg)
sk               2611 net/core/sock.c 		mem_cgroup_uncharge_skmem(sk->sk_memcg, amount);
sk               2613 net/core/sock.c 	if (sk_under_memory_pressure(sk) &&
sk               2614 net/core/sock.c 	    (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
sk               2615 net/core/sock.c 		sk_leave_memory_pressure(sk);
sk               2624 net/core/sock.c void __sk_mem_reclaim(struct sock *sk, int amount)
sk               2627 net/core/sock.c 	sk->sk_forward_alloc -= amount << SK_MEM_QUANTUM_SHIFT;
sk               2628 net/core/sock.c 	__sk_mem_reduce_allocated(sk, amount);
sk               2632 net/core/sock.c int sk_set_peek_off(struct sock *sk, int val)
sk               2634 net/core/sock.c 	sk->sk_peek_off = val;
sk               2717 net/core/sock.c int sock_no_sendmsg_locked(struct sock *sk, struct msghdr *m, size_t len)
sk               2751 net/core/sock.c ssize_t sock_no_sendpage_locked(struct sock *sk, struct page *page,
sk               2761 net/core/sock.c 	res = kernel_sendmsg_locked(sk, &msg, &iov, 1, size);
sk               2771 net/core/sock.c static void sock_def_wakeup(struct sock *sk)
sk               2776 net/core/sock.c 	wq = rcu_dereference(sk->sk_wq);
sk               2782 net/core/sock.c static void sock_def_error_report(struct sock *sk)
sk               2787 net/core/sock.c 	wq = rcu_dereference(sk->sk_wq);
sk               2790 net/core/sock.c 	sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
sk               2794 net/core/sock.c static void sock_def_readable(struct sock *sk)
sk               2799 net/core/sock.c 	wq = rcu_dereference(sk->sk_wq);
sk               2803 net/core/sock.c 	sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
sk               2807 net/core/sock.c static void sock_def_write_space(struct sock *sk)
sk               2816 net/core/sock.c 	if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= READ_ONCE(sk->sk_sndbuf)) {
sk               2817 net/core/sock.c 		wq = rcu_dereference(sk->sk_wq);
sk               2823 net/core/sock.c 		if (sock_writeable(sk))
sk               2824 net/core/sock.c 			sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
sk               2830 net/core/sock.c static void sock_def_destruct(struct sock *sk)
sk               2834 net/core/sock.c void sk_send_sigurg(struct sock *sk)
sk               2836 net/core/sock.c 	if (sk->sk_socket && sk->sk_socket->file)
sk               2837 net/core/sock.c 		if (send_sigurg(&sk->sk_socket->file->f_owner))
sk               2838 net/core/sock.c 			sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
sk               2842 net/core/sock.c void sk_reset_timer(struct sock *sk, struct timer_list* timer,
sk               2846 net/core/sock.c 		sock_hold(sk);
sk               2850 net/core/sock.c void sk_stop_timer(struct sock *sk, struct timer_list* timer)
sk               2853 net/core/sock.c 		__sock_put(sk);
sk               2857 net/core/sock.c void sock_init_data(struct socket *sock, struct sock *sk)
sk               2859 net/core/sock.c 	sk_init_common(sk);
sk               2860 net/core/sock.c 	sk->sk_send_head	=	NULL;
sk               2862 net/core/sock.c 	timer_setup(&sk->sk_timer, NULL, 0);
sk               2864 net/core/sock.c 	sk->sk_allocation	=	GFP_KERNEL;
sk               2865 net/core/sock.c 	sk->sk_rcvbuf		=	sysctl_rmem_default;
sk               2866 net/core/sock.c 	sk->sk_sndbuf		=	sysctl_wmem_default;
sk               2867 net/core/sock.c 	sk->sk_state		=	TCP_CLOSE;
sk               2868 net/core/sock.c 	sk_set_socket(sk, sock);
sk               2870 net/core/sock.c 	sock_set_flag(sk, SOCK_ZAPPED);
sk               2873 net/core/sock.c 		sk->sk_type	=	sock->type;
sk               2874 net/core/sock.c 		RCU_INIT_POINTER(sk->sk_wq, &sock->wq);
sk               2875 net/core/sock.c 		sock->sk	=	sk;
sk               2876 net/core/sock.c 		sk->sk_uid	=	SOCK_INODE(sock)->i_uid;
sk               2878 net/core/sock.c 		RCU_INIT_POINTER(sk->sk_wq, NULL);
sk               2879 net/core/sock.c 		sk->sk_uid	=	make_kuid(sock_net(sk)->user_ns, 0);
sk               2882 net/core/sock.c 	rwlock_init(&sk->sk_callback_lock);
sk               2883 net/core/sock.c 	if (sk->sk_kern_sock)
sk               2885 net/core/sock.c 			&sk->sk_callback_lock,
sk               2886 net/core/sock.c 			af_kern_callback_keys + sk->sk_family,
sk               2887 net/core/sock.c 			af_family_kern_clock_key_strings[sk->sk_family]);
sk               2890 net/core/sock.c 			&sk->sk_callback_lock,
sk               2891 net/core/sock.c 			af_callback_keys + sk->sk_family,
sk               2892 net/core/sock.c 			af_family_clock_key_strings[sk->sk_family]);
sk               2894 net/core/sock.c 	sk->sk_state_change	=	sock_def_wakeup;
sk               2895 net/core/sock.c 	sk->sk_data_ready	=	sock_def_readable;
sk               2896 net/core/sock.c 	sk->sk_write_space	=	sock_def_write_space;
sk               2897 net/core/sock.c 	sk->sk_error_report	=	sock_def_error_report;
sk               2898 net/core/sock.c 	sk->sk_destruct		=	sock_def_destruct;
sk               2900 net/core/sock.c 	sk->sk_frag.page	=	NULL;
sk               2901 net/core/sock.c 	sk->sk_frag.offset	=	0;
sk               2902 net/core/sock.c 	sk->sk_peek_off		=	-1;
sk               2904 net/core/sock.c 	sk->sk_peer_pid 	=	NULL;
sk               2905 net/core/sock.c 	sk->sk_peer_cred	=	NULL;
sk               2906 net/core/sock.c 	sk->sk_write_pending	=	0;
sk               2907 net/core/sock.c 	sk->sk_rcvlowat		=	1;
sk               2908 net/core/sock.c 	sk->sk_rcvtimeo		=	MAX_SCHEDULE_TIMEOUT;
sk               2909 net/core/sock.c 	sk->sk_sndtimeo		=	MAX_SCHEDULE_TIMEOUT;
sk               2911 net/core/sock.c 	sk->sk_stamp = SK_DEFAULT_STAMP;
sk               2913 net/core/sock.c 	seqlock_init(&sk->sk_stamp_seq);
sk               2915 net/core/sock.c 	atomic_set(&sk->sk_zckey, 0);
sk               2918 net/core/sock.c 	sk->sk_napi_id		=	0;
sk               2919 net/core/sock.c 	sk->sk_ll_usec		=	sysctl_net_busy_read;
sk               2922 net/core/sock.c 	sk->sk_max_pacing_rate = ~0UL;
sk               2923 net/core/sock.c 	sk->sk_pacing_rate = ~0UL;
sk               2924 net/core/sock.c 	WRITE_ONCE(sk->sk_pacing_shift, 10);
sk               2925 net/core/sock.c 	sk->sk_incoming_cpu = -1;
sk               2927 net/core/sock.c 	sk_rx_queue_clear(sk);
sk               2933 net/core/sock.c 	refcount_set(&sk->sk_refcnt, 1);
sk               2934 net/core/sock.c 	atomic_set(&sk->sk_drops, 0);
sk               2938 net/core/sock.c void lock_sock_nested(struct sock *sk, int subclass)
sk               2941 net/core/sock.c 	spin_lock_bh(&sk->sk_lock.slock);
sk               2942 net/core/sock.c 	if (sk->sk_lock.owned)
sk               2943 net/core/sock.c 		__lock_sock(sk);
sk               2944 net/core/sock.c 	sk->sk_lock.owned = 1;
sk               2945 net/core/sock.c 	spin_unlock(&sk->sk_lock.slock);
sk               2949 net/core/sock.c 	mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
sk               2954 net/core/sock.c void release_sock(struct sock *sk)
sk               2956 net/core/sock.c 	spin_lock_bh(&sk->sk_lock.slock);
sk               2957 net/core/sock.c 	if (sk->sk_backlog.tail)
sk               2958 net/core/sock.c 		__release_sock(sk);
sk               2963 net/core/sock.c 	if (sk->sk_prot->release_cb)
sk               2964 net/core/sock.c 		sk->sk_prot->release_cb(sk);
sk               2966 net/core/sock.c 	sock_release_ownership(sk);
sk               2967 net/core/sock.c 	if (waitqueue_active(&sk->sk_lock.wq))
sk               2968 net/core/sock.c 		wake_up(&sk->sk_lock.wq);
sk               2969 net/core/sock.c 	spin_unlock_bh(&sk->sk_lock.slock);
sk               2986 net/core/sock.c bool lock_sock_fast(struct sock *sk)
sk               2989 net/core/sock.c 	spin_lock_bh(&sk->sk_lock.slock);
sk               2991 net/core/sock.c 	if (!sk->sk_lock.owned)
sk               2997 net/core/sock.c 	__lock_sock(sk);
sk               2998 net/core/sock.c 	sk->sk_lock.owned = 1;
sk               2999 net/core/sock.c 	spin_unlock(&sk->sk_lock.slock);
sk               3003 net/core/sock.c 	mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
sk               3012 net/core/sock.c 	struct sock *sk = sock->sk;
sk               3015 net/core/sock.c 	sock_enable_timestamp(sk, SOCK_TIMESTAMP);
sk               3016 net/core/sock.c 	ts = ktime_to_timespec64(sock_read_timestamp(sk));
sk               3021 net/core/sock.c 		sock_write_timestamp(sk, kt);;
sk               3048 net/core/sock.c void sock_enable_timestamp(struct sock *sk, int flag)
sk               3050 net/core/sock.c 	if (!sock_flag(sk, flag)) {
sk               3051 net/core/sock.c 		unsigned long previous_flags = sk->sk_flags;
sk               3053 net/core/sock.c 		sock_set_flag(sk, flag);
sk               3059 net/core/sock.c 		if (sock_needs_netstamp(sk) &&
sk               3065 net/core/sock.c int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
sk               3073 net/core/sock.c 	skb = sock_dequeue_err_skb(sk);
sk               3086 net/core/sock.c 	sock_recv_timestamp(msg, sk, skb);
sk               3111 net/core/sock.c 	struct sock *sk = sock->sk;
sk               3113 net/core/sock.c 	return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
sk               3121 net/core/sock.c 	struct sock *sk = sock->sk;
sk               3123 net/core/sock.c 	if (sk->sk_prot->compat_getsockopt != NULL)
sk               3124 net/core/sock.c 		return sk->sk_prot->compat_getsockopt(sk, level, optname,
sk               3126 net/core/sock.c 	return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
sk               3134 net/core/sock.c 	struct sock *sk = sock->sk;
sk               3138 net/core/sock.c 	err = sk->sk_prot->recvmsg(sk, msg, size, flags & MSG_DONTWAIT,
sk               3152 net/core/sock.c 	struct sock *sk = sock->sk;
sk               3154 net/core/sock.c 	return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
sk               3162 net/core/sock.c 	struct sock *sk = sock->sk;
sk               3164 net/core/sock.c 	if (sk->sk_prot->compat_setsockopt != NULL)
sk               3165 net/core/sock.c 		return sk->sk_prot->compat_setsockopt(sk, level, optname,
sk               3167 net/core/sock.c 	return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
sk               3172 net/core/sock.c void sk_common_release(struct sock *sk)
sk               3174 net/core/sock.c 	if (sk->sk_prot->destroy)
sk               3175 net/core/sock.c 		sk->sk_prot->destroy(sk);
sk               3185 net/core/sock.c 	sk->sk_prot->unhash(sk);
sk               3199 net/core/sock.c 	sock_orphan(sk);
sk               3201 net/core/sock.c 	xfrm_sk_free_policy(sk);
sk               3203 net/core/sock.c 	sk_refcnt_debug_release(sk);
sk               3205 net/core/sock.c 	sock_put(sk);
sk               3209 net/core/sock.c void sk_get_meminfo(const struct sock *sk, u32 *mem)
sk               3213 net/core/sock.c 	mem[SK_MEMINFO_RMEM_ALLOC] = sk_rmem_alloc_get(sk);
sk               3214 net/core/sock.c 	mem[SK_MEMINFO_RCVBUF] = READ_ONCE(sk->sk_rcvbuf);
sk               3215 net/core/sock.c 	mem[SK_MEMINFO_WMEM_ALLOC] = sk_wmem_alloc_get(sk);
sk               3216 net/core/sock.c 	mem[SK_MEMINFO_SNDBUF] = READ_ONCE(sk->sk_sndbuf);
sk               3217 net/core/sock.c 	mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc;
sk               3218 net/core/sock.c 	mem[SK_MEMINFO_WMEM_QUEUED] = READ_ONCE(sk->sk_wmem_queued);
sk               3219 net/core/sock.c 	mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc);
sk               3220 net/core/sock.c 	mem[SK_MEMINFO_BACKLOG] = READ_ONCE(sk->sk_backlog.len);
sk               3221 net/core/sock.c 	mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops);
sk               3604 net/core/sock.c 	struct sock *sk = p;
sk               3606 net/core/sock.c 	return !skb_queue_empty_lockless(&sk->sk_receive_queue) ||
sk               3607 net/core/sock.c 	       sk_busy_loop_timeout(sk, start_time);
sk                 24 net/core/sock_diag.c u64 sock_gen_cookie(struct sock *sk)
sk                 27 net/core/sock_diag.c 		u64 res = atomic64_read(&sk->sk_cookie);
sk                 32 net/core/sock_diag.c 		atomic64_cmpxchg(&sk->sk_cookie, 0, res);
sk                 36 net/core/sock_diag.c int sock_diag_check_cookie(struct sock *sk, const __u32 *cookie)
sk                 43 net/core/sock_diag.c 	res = sock_gen_cookie(sk);
sk                 51 net/core/sock_diag.c void sock_diag_save_cookie(struct sock *sk, __u32 *cookie)
sk                 53 net/core/sock_diag.c 	u64 res = sock_gen_cookie(sk);
sk                 60 net/core/sock_diag.c int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attrtype)
sk                 64 net/core/sock_diag.c 	sk_get_meminfo(sk, mem);
sk                 70 net/core/sock_diag.c int sock_diag_put_filterinfo(bool may_report_filterinfo, struct sock *sk,
sk                 85 net/core/sock_diag.c 	filter = rcu_dereference(sk->sk_filter);
sk                109 net/core/sock_diag.c 	struct sock *sk;
sk                124 net/core/sock_diag.c 	struct sock *sk = bsk->sk;
sk                127 net/core/sock_diag.c 	const enum sknetlink_groups group = sock_diag_destroy_group(sk);
sk                137 net/core/sock_diag.c 	hndl = sock_diag_handlers[sk->sk_family];
sk                139 net/core/sock_diag.c 		err = hndl->get_info(skb, sk);
sk                143 net/core/sock_diag.c 		nlmsg_multicast(sock_net(sk)->diag_nlsk, skb, 0, group,
sk                148 net/core/sock_diag.c 	sk_destruct(sk);
sk                152 net/core/sock_diag.c void sock_diag_broadcast_destroy(struct sock *sk)
sk                158 net/core/sock_diag.c 		return sk_destruct(sk);
sk                159 net/core/sock_diag.c 	bsk->sk = sk;
sk                296 net/core/sock_diag.c int sock_diag_destroy(struct sock *sk, int err)
sk                298 net/core/sock_diag.c 	if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
sk                301 net/core/sock_diag.c 	if (!sk->sk_prot->diag_destroy)
sk                304 net/core/sock_diag.c 	return sk->sk_prot->diag_destroy(sk, err);
sk                 79 net/core/sock_map.c static void sock_map_sk_acquire(struct sock *sk)
sk                 80 net/core/sock_map.c 	__acquires(&sk->sk_lock.slock)
sk                 82 net/core/sock_map.c 	lock_sock(sk);
sk                 87 net/core/sock_map.c static void sock_map_sk_release(struct sock *sk)
sk                 88 net/core/sock_map.c 	__releases(&sk->sk_lock.slock)
sk                 92 net/core/sock_map.c 	release_sock(sk);
sk                106 net/core/sock_map.c static void sock_map_del_link(struct sock *sk,
sk                126 net/core/sock_map.c 		write_lock_bh(&sk->sk_callback_lock);
sk                127 net/core/sock_map.c 		sk_psock_stop_strp(sk, psock);
sk                128 net/core/sock_map.c 		write_unlock_bh(&sk->sk_callback_lock);
sk                132 net/core/sock_map.c static void sock_map_unref(struct sock *sk, void *link_raw)
sk                134 net/core/sock_map.c 	struct sk_psock *psock = sk_psock(sk);
sk                137 net/core/sock_map.c 		sock_map_del_link(sk, psock, link_raw);
sk                138 net/core/sock_map.c 		sk_psock_put(sk, psock);
sk                143 net/core/sock_map.c 			 struct sock *sk)
sk                173 net/core/sock_map.c 	psock = sk_psock_get_checked(sk);
sk                182 net/core/sock_map.c 			sk_psock_put(sk, psock);
sk                187 net/core/sock_map.c 		psock = sk_psock_init(sk, map->numa_node);
sk                198 net/core/sock_map.c 		ret = tcp_bpf_init(sk);
sk                202 net/core/sock_map.c 		tcp_bpf_reinit(sk);
sk                205 net/core/sock_map.c 	write_lock_bh(&sk->sk_callback_lock);
sk                207 net/core/sock_map.c 		ret = sk_psock_init_strp(sk, psock);
sk                209 net/core/sock_map.c 			write_unlock_bh(&sk->sk_callback_lock);
sk                214 net/core/sock_map.c 		sk_psock_start_strp(sk, psock);
sk                216 net/core/sock_map.c 	write_unlock_bh(&sk->sk_callback_lock);
sk                219 net/core/sock_map.c 	sk_psock_put(sk, psock);
sk                243 net/core/sock_map.c 		struct sock *sk;
sk                245 net/core/sock_map.c 		sk = xchg(psk, NULL);
sk                246 net/core/sock_map.c 		if (sk) {
sk                247 net/core/sock_map.c 			lock_sock(sk);
sk                249 net/core/sock_map.c 			sock_map_unref(sk, psk);
sk                251 net/core/sock_map.c 			release_sock(sk);
sk                286 net/core/sock_map.c 	struct sock *sk;
sk                290 net/core/sock_map.c 	sk = *psk;
sk                291 net/core/sock_map.c 	if (!sk_test || sk_test == sk)
sk                292 net/core/sock_map.c 		sk = xchg(psk, NULL);
sk                294 net/core/sock_map.c 	if (likely(sk))
sk                295 net/core/sock_map.c 		sock_map_unref(sk, psk);
sk                303 net/core/sock_map.c static void sock_map_delete_from_link(struct bpf_map *map, struct sock *sk,
sk                308 net/core/sock_map.c 	__sock_map_delete(stab, sk, link_raw);
sk                340 net/core/sock_map.c 				  struct sock *sk, u64 flags)
sk                343 net/core/sock_map.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk                361 net/core/sock_map.c 	ret = sock_map_link(map, &stab->progs, sk);
sk                365 net/core/sock_map.c 	psock = sk_psock(sk);
sk                379 net/core/sock_map.c 	stab->sks[idx] = sk;
sk                387 net/core/sock_map.c 		sk_psock_put(sk, psock);
sk                399 net/core/sock_map.c static bool sock_map_sk_is_suitable(const struct sock *sk)
sk                401 net/core/sock_map.c 	return sk->sk_type == SOCK_STREAM &&
sk                402 net/core/sock_map.c 	       sk->sk_protocol == IPPROTO_TCP;
sk                411 net/core/sock_map.c 	struct sock *sk;
sk                417 net/core/sock_map.c 	sk = sock->sk;
sk                418 net/core/sock_map.c 	if (!sk) {
sk                422 net/core/sock_map.c 	if (!sock_map_sk_is_suitable(sk)) {
sk                427 net/core/sock_map.c 	sock_map_sk_acquire(sk);
sk                428 net/core/sock_map.c 	if (sk->sk_state != TCP_ESTABLISHED)
sk                431 net/core/sock_map.c 		ret = sock_map_update_common(map, idx, sk, flags);
sk                432 net/core/sock_map.c 	sock_map_sk_release(sk);
sk                443 net/core/sock_map.c 	if (likely(sock_map_sk_is_suitable(sops->sk) &&
sk                445 net/core/sock_map.c 		return sock_map_update_common(map, *(u32 *)key, sops->sk,
sk                521 net/core/sock_map.c 	struct sock *sk;
sk                579 net/core/sock_map.c 	return elem ? elem->sk : NULL;
sk                589 net/core/sock_map.c static void sock_hash_delete_from_link(struct bpf_map *map, struct sock *sk,
sk                608 net/core/sock_map.c 		sock_map_unref(elem->sk, elem);
sk                629 net/core/sock_map.c 		sock_map_unref(elem->sk, elem);
sk                639 net/core/sock_map.c 						  u32 hash, struct sock *sk,
sk                658 net/core/sock_map.c 	new->sk = sk;
sk                664 net/core/sock_map.c 				   struct sock *sk, u64 flags)
sk                667 net/core/sock_map.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk                685 net/core/sock_map.c 	ret = sock_map_link(map, &htab->progs, sk);
sk                689 net/core/sock_map.c 	psock = sk_psock(sk);
sk                705 net/core/sock_map.c 	elem_new = sock_hash_alloc_elem(htab, key, key_size, hash, sk, elem);
sk                718 net/core/sock_map.c 		sock_map_unref(elem->sk, elem);
sk                725 net/core/sock_map.c 	sk_psock_put(sk, psock);
sk                736 net/core/sock_map.c 	struct sock *sk;
sk                742 net/core/sock_map.c 	sk = sock->sk;
sk                743 net/core/sock_map.c 	if (!sk) {
sk                747 net/core/sock_map.c 	if (!sock_map_sk_is_suitable(sk)) {
sk                752 net/core/sock_map.c 	sock_map_sk_acquire(sk);
sk                753 net/core/sock_map.c 	if (sk->sk_state != TCP_ESTABLISHED)
sk                756 net/core/sock_map.c 		ret = sock_hash_update_common(map, key, sk, flags);
sk                757 net/core/sock_map.c 	sock_map_sk_release(sk);
sk                877 net/core/sock_map.c 			lock_sock(elem->sk);
sk                879 net/core/sock_map.c 			sock_map_unref(elem->sk, elem);
sk                881 net/core/sock_map.c 			release_sock(elem->sk);
sk                905 net/core/sock_map.c 	if (likely(sock_map_sk_is_suitable(sops->sk) &&
sk                907 net/core/sock_map.c 		return sock_hash_update_common(map, key, sops->sk, flags);
sk               1018 net/core/sock_map.c void sk_psock_unlink(struct sock *sk, struct sk_psock_link *link)
sk               1022 net/core/sock_map.c 		return sock_map_delete_from_link(link->map, sk,
sk               1025 net/core/sock_map.c 		return sock_hash_delete_from_link(link->map, sk,
sk                 55 net/core/sock_reuseport.c int reuseport_alloc(struct sock *sk, bool bind_inany)
sk                 67 net/core/sock_reuseport.c 	reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
sk                 85 net/core/sock_reuseport.c 	reuse->socks[0] = sk;
sk                 88 net/core/sock_reuseport.c 	rcu_assign_pointer(sk->sk_reuseport_cb, reuse);
sk                151 net/core/sock_reuseport.c int reuseport_add_sock(struct sock *sk, struct sock *sk2, bool bind_inany)
sk                165 net/core/sock_reuseport.c 	old_reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
sk                180 net/core/sock_reuseport.c 	reuse->socks[reuse->num_socks] = sk;
sk                184 net/core/sock_reuseport.c 	rcu_assign_pointer(sk->sk_reuseport_cb, reuse);
sk                194 net/core/sock_reuseport.c void reuseport_detach_sock(struct sock *sk)
sk                200 net/core/sock_reuseport.c 	reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
sk                208 net/core/sock_reuseport.c 		bpf_sk_reuseport_detach(sk);
sk                210 net/core/sock_reuseport.c 	rcu_assign_pointer(sk->sk_reuseport_cb, NULL);
sk                213 net/core/sock_reuseport.c 		if (reuse->socks[i] == sk) {
sk                265 net/core/sock_reuseport.c struct sock *reuseport_select_sock(struct sock *sk,
sk                276 net/core/sock_reuseport.c 	reuse = rcu_dereference(sk->sk_reuseport_cb);
sk                292 net/core/sock_reuseport.c 			sk2 = bpf_run_sk_reuseport(reuse, sk, prog, skb, hash);
sk                319 net/core/sock_reuseport.c int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog)
sk                324 net/core/sock_reuseport.c 	if (sk_unhashed(sk) && sk->sk_reuseport) {
sk                325 net/core/sock_reuseport.c 		int err = reuseport_alloc(sk, false);
sk                329 net/core/sock_reuseport.c 	} else if (!rcu_access_pointer(sk->sk_reuseport_cb)) {
sk                335 net/core/sock_reuseport.c 	reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
sk                347 net/core/sock_reuseport.c int reuseport_detach_prog(struct sock *sk)
sk                352 net/core/sock_reuseport.c 	if (!rcu_access_pointer(sk->sk_reuseport_cb))
sk                353 net/core/sock_reuseport.c 		return sk->sk_reuseport ? -ENOENT : -EINVAL;
sk                357 net/core/sock_reuseport.c 	reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
sk                 30 net/core/stream.c void sk_stream_write_space(struct sock *sk)
sk                 32 net/core/stream.c 	struct socket *sock = sk->sk_socket;
sk                 35 net/core/stream.c 	if (__sk_stream_is_writeable(sk, 1) && sock) {
sk                 39 net/core/stream.c 		wq = rcu_dereference(sk->sk_wq);
sk                 43 net/core/stream.c 		if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN))
sk                 56 net/core/stream.c int sk_stream_wait_connect(struct sock *sk, long *timeo_p)
sk                 63 net/core/stream.c 		int err = sock_error(sk);
sk                 66 net/core/stream.c 		if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV))
sk                 73 net/core/stream.c 		add_wait_queue(sk_sleep(sk), &wait);
sk                 74 net/core/stream.c 		sk->sk_write_pending++;
sk                 75 net/core/stream.c 		done = sk_wait_event(sk, timeo_p,
sk                 76 net/core/stream.c 				     !sk->sk_err &&
sk                 77 net/core/stream.c 				     !((1 << sk->sk_state) &
sk                 79 net/core/stream.c 		remove_wait_queue(sk_sleep(sk), &wait);
sk                 80 net/core/stream.c 		sk->sk_write_pending--;
sk                 90 net/core/stream.c static inline int sk_stream_closing(struct sock *sk)
sk                 92 net/core/stream.c 	return (1 << sk->sk_state) &
sk                 96 net/core/stream.c void sk_stream_wait_close(struct sock *sk, long timeout)
sk                101 net/core/stream.c 		add_wait_queue(sk_sleep(sk), &wait);
sk                104 net/core/stream.c 			if (sk_wait_event(sk, &timeout, !sk_stream_closing(sk), &wait))
sk                108 net/core/stream.c 		remove_wait_queue(sk_sleep(sk), &wait);
sk                118 net/core/stream.c int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
sk                125 net/core/stream.c 	if (sk_stream_memory_free(sk))
sk                128 net/core/stream.c 	add_wait_queue(sk_sleep(sk), &wait);
sk                131 net/core/stream.c 		sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
sk                133 net/core/stream.c 		if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
sk                139 net/core/stream.c 		sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
sk                140 net/core/stream.c 		if (sk_stream_memory_free(sk) && !vm_wait)
sk                143 net/core/stream.c 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
sk                144 net/core/stream.c 		sk->sk_write_pending++;
sk                145 net/core/stream.c 		sk_wait_event(sk, &current_timeo, sk->sk_err ||
sk                146 net/core/stream.c 						  (sk->sk_shutdown & SEND_SHUTDOWN) ||
sk                147 net/core/stream.c 						  (sk_stream_memory_free(sk) &&
sk                149 net/core/stream.c 		sk->sk_write_pending--;
sk                162 net/core/stream.c 	remove_wait_queue(sk_sleep(sk), &wait);
sk                174 net/core/stream.c 	set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
sk                183 net/core/stream.c int sk_stream_error(struct sock *sk, int flags, int err)
sk                186 net/core/stream.c 		err = sock_error(sk) ? : -EPIPE;
sk                193 net/core/stream.c void sk_stream_kill_queues(struct sock *sk)
sk                196 net/core/stream.c 	__skb_queue_purge(&sk->sk_receive_queue);
sk                199 net/core/stream.c 	__skb_queue_purge(&sk->sk_error_queue);
sk                202 net/core/stream.c 	WARN_ON(!skb_queue_empty(&sk->sk_write_queue));
sk                205 net/core/stream.c 	sk_mem_reclaim(sk);
sk                207 net/core/stream.c 	WARN_ON(sk->sk_wmem_queued);
sk                208 net/core/stream.c 	WARN_ON(sk->sk_forward_alloc);
sk                 28 net/core/timestamping.c 	if (!skb->sk)
sk               1735 net/dcb/dcbnl.c 	struct net *net = sock_net(skb->sk);
sk                 60 net/dccp/ccid.c int ccid_getsockopt_builtin_ccids(struct sock *sk, int len,
sk                140 net/dccp/ccid.c struct ccid *ccid_new(const u8 id, struct sock *sk, bool rx)
sk                156 net/dccp/ccid.c 		    ccid->ccid_ops->ccid_hc_rx_init(ccid, sk) != 0)
sk                161 net/dccp/ccid.c 		    ccid->ccid_ops->ccid_hc_tx_init(ccid, sk) != 0)
sk                173 net/dccp/ccid.c void ccid_hc_rx_delete(struct ccid *ccid, struct sock *sk)
sk                177 net/dccp/ccid.c 			ccid->ccid_ops->ccid_hc_rx_exit(sk);
sk                182 net/dccp/ccid.c void ccid_hc_tx_delete(struct ccid *ccid, struct sock *sk)
sk                186 net/dccp/ccid.c 			ccid->ccid_ops->ccid_hc_tx_exit(sk);
sk                 56 net/dccp/ccid.h 	int		(*ccid_hc_rx_init)(struct ccid *ccid, struct sock *sk);
sk                 57 net/dccp/ccid.h 	int		(*ccid_hc_tx_init)(struct ccid *ccid, struct sock *sk);
sk                 58 net/dccp/ccid.h 	void		(*ccid_hc_rx_exit)(struct sock *sk);
sk                 59 net/dccp/ccid.h 	void		(*ccid_hc_tx_exit)(struct sock *sk);
sk                 60 net/dccp/ccid.h 	void		(*ccid_hc_rx_packet_recv)(struct sock *sk,
sk                 62 net/dccp/ccid.h 	int		(*ccid_hc_rx_parse_options)(struct sock *sk, u8 pkt,
sk                 64 net/dccp/ccid.h 	int		(*ccid_hc_rx_insert_options)(struct sock *sk,
sk                 66 net/dccp/ccid.h 	void		(*ccid_hc_tx_packet_recv)(struct sock *sk,
sk                 68 net/dccp/ccid.h 	int		(*ccid_hc_tx_parse_options)(struct sock *sk, u8 pkt,
sk                 70 net/dccp/ccid.h 	int		(*ccid_hc_tx_send_packet)(struct sock *sk,
sk                 72 net/dccp/ccid.h 	void		(*ccid_hc_tx_packet_sent)(struct sock *sk,
sk                 74 net/dccp/ccid.h 	void		(*ccid_hc_rx_get_info)(struct sock *sk,
sk                 76 net/dccp/ccid.h 	void		(*ccid_hc_tx_get_info)(struct sock *sk,
sk                 78 net/dccp/ccid.h 	int		(*ccid_hc_rx_getsockopt)(struct sock *sk,
sk                 82 net/dccp/ccid.h 	int		(*ccid_hc_tx_getsockopt)(struct sock *sk,
sk                108 net/dccp/ccid.h int ccid_getsockopt_builtin_ccids(struct sock *sk, int len,
sk                111 net/dccp/ccid.h struct ccid *ccid_new(const u8 id, struct sock *sk, bool rx);
sk                131 net/dccp/ccid.h void ccid_hc_rx_delete(struct ccid *ccid, struct sock *sk);
sk                132 net/dccp/ccid.h void ccid_hc_tx_delete(struct ccid *ccid, struct sock *sk);
sk                163 net/dccp/ccid.h static inline int ccid_hc_tx_send_packet(struct ccid *ccid, struct sock *sk,
sk                167 net/dccp/ccid.h 		return ccid->ccid_ops->ccid_hc_tx_send_packet(sk, skb);
sk                171 net/dccp/ccid.h static inline void ccid_hc_tx_packet_sent(struct ccid *ccid, struct sock *sk,
sk                175 net/dccp/ccid.h 		ccid->ccid_ops->ccid_hc_tx_packet_sent(sk, len);
sk                178 net/dccp/ccid.h static inline void ccid_hc_rx_packet_recv(struct ccid *ccid, struct sock *sk,
sk                182 net/dccp/ccid.h 		ccid->ccid_ops->ccid_hc_rx_packet_recv(sk, skb);
sk                185 net/dccp/ccid.h static inline void ccid_hc_tx_packet_recv(struct ccid *ccid, struct sock *sk,
sk                189 net/dccp/ccid.h 		ccid->ccid_ops->ccid_hc_tx_packet_recv(sk, skb);
sk                199 net/dccp/ccid.h static inline int ccid_hc_tx_parse_options(struct ccid *ccid, struct sock *sk,
sk                204 net/dccp/ccid.h 	return ccid->ccid_ops->ccid_hc_tx_parse_options(sk, pkt, opt, val, len);
sk                211 net/dccp/ccid.h static inline int ccid_hc_rx_parse_options(struct ccid *ccid, struct sock *sk,
sk                216 net/dccp/ccid.h 	return ccid->ccid_ops->ccid_hc_rx_parse_options(sk, pkt, opt, val, len);
sk                219 net/dccp/ccid.h static inline int ccid_hc_rx_insert_options(struct ccid *ccid, struct sock *sk,
sk                223 net/dccp/ccid.h 		return ccid->ccid_ops->ccid_hc_rx_insert_options(sk, skb);
sk                227 net/dccp/ccid.h static inline void ccid_hc_rx_get_info(struct ccid *ccid, struct sock *sk,
sk                231 net/dccp/ccid.h 		ccid->ccid_ops->ccid_hc_rx_get_info(sk, info);
sk                234 net/dccp/ccid.h static inline void ccid_hc_tx_get_info(struct ccid *ccid, struct sock *sk,
sk                238 net/dccp/ccid.h 		ccid->ccid_ops->ccid_hc_tx_get_info(sk, info);
sk                241 net/dccp/ccid.h static inline int ccid_hc_rx_getsockopt(struct ccid *ccid, struct sock *sk,
sk                247 net/dccp/ccid.h 		rc = ccid->ccid_ops->ccid_hc_rx_getsockopt(sk, optname, len,
sk                252 net/dccp/ccid.h static inline int ccid_hc_tx_getsockopt(struct ccid *ccid, struct sock *sk,
sk                258 net/dccp/ccid.h 		rc = ccid->ccid_ops->ccid_hc_tx_getsockopt(sk, optname, len,
sk                 67 net/dccp/ccids/ccid2.c static int ccid2_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
sk                 69 net/dccp/ccids/ccid2.c 	if (ccid2_cwnd_network_limited(ccid2_hc_tx_sk(sk)))
sk                 74 net/dccp/ccids/ccid2.c static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val)
sk                 76 net/dccp/ccids/ccid2.c 	u32 max_ratio = DIV_ROUND_UP(ccid2_hc_tx_sk(sk)->tx_cwnd, 2);
sk                 88 net/dccp/ccids/ccid2.c 	dccp_feat_signal_nn_change(sk, DCCPF_ACK_RATIO,
sk                 92 net/dccp/ccids/ccid2.c static void ccid2_check_l_ack_ratio(struct sock *sk)
sk                 94 net/dccp/ccids/ccid2.c 	struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
sk                106 net/dccp/ccids/ccid2.c 	if (dccp_feat_nn_get(sk, DCCPF_ACK_RATIO) > hc->tx_cwnd)
sk                107 net/dccp/ccids/ccid2.c 		ccid2_change_l_ack_ratio(sk, hc->tx_cwnd/2 ? : 1U);
sk                110 net/dccp/ccids/ccid2.c static void ccid2_change_l_seq_window(struct sock *sk, u64 val)
sk                112 net/dccp/ccids/ccid2.c 	dccp_feat_signal_nn_change(sk, DCCPF_SEQUENCE_WINDOW,
sk                117 net/dccp/ccids/ccid2.c static void dccp_tasklet_schedule(struct sock *sk)
sk                119 net/dccp/ccids/ccid2.c 	struct tasklet_struct *t = &dccp_sk(sk)->dccps_xmitlet;
sk                122 net/dccp/ccids/ccid2.c 		sock_hold(sk);
sk                130 net/dccp/ccids/ccid2.c 	struct sock *sk = hc->sk;
sk                133 net/dccp/ccids/ccid2.c 	bh_lock_sock(sk);
sk                134 net/dccp/ccids/ccid2.c 	if (sock_owned_by_user(sk)) {
sk                135 net/dccp/ccids/ccid2.c 		sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + HZ / 5);
sk                141 net/dccp/ccids/ccid2.c 	if (sk->sk_state == DCCP_CLOSED)
sk                163 net/dccp/ccids/ccid2.c 	ccid2_change_l_ack_ratio(sk, 1);
sk                167 net/dccp/ccids/ccid2.c 		dccp_tasklet_schedule(sk);
sk                169 net/dccp/ccids/ccid2.c 	sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
sk                171 net/dccp/ccids/ccid2.c 	bh_unlock_sock(sk);
sk                172 net/dccp/ccids/ccid2.c 	sock_put(sk);
sk                195 net/dccp/ccids/ccid2.c static void ccid2_cwnd_application_limited(struct sock *sk, const u32 now)
sk                197 net/dccp/ccids/ccid2.c 	struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
sk                199 net/dccp/ccids/ccid2.c 	u32 init_win = rfc3390_bytes_to_packets(dccp_sk(sk)->dccps_mss_cache),
sk                210 net/dccp/ccids/ccid2.c 	ccid2_check_l_ack_ratio(sk);
sk                214 net/dccp/ccids/ccid2.c static void ccid2_cwnd_restart(struct sock *sk, const u32 now)
sk                216 net/dccp/ccids/ccid2.c 	struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
sk                218 net/dccp/ccids/ccid2.c 	    iwnd = rfc3390_bytes_to_packets(dccp_sk(sk)->dccps_mss_cache);
sk                232 net/dccp/ccids/ccid2.c 	ccid2_check_l_ack_ratio(sk);
sk                235 net/dccp/ccids/ccid2.c static void ccid2_hc_tx_packet_sent(struct sock *sk, unsigned int len)
sk                237 net/dccp/ccids/ccid2.c 	struct dccp_sock *dp = dccp_sk(sk);
sk                238 net/dccp/ccids/ccid2.c 	struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
sk                245 net/dccp/ccids/ccid2.c 		ccid2_cwnd_restart(sk, now);
sk                262 net/dccp/ccids/ccid2.c 			ccid2_cwnd_application_limited(sk, now);
sk                323 net/dccp/ccids/ccid2.c 				ccid2_change_l_ack_ratio(sk, dp->dccps_l_ack_ratio - 1);
sk                333 net/dccp/ccids/ccid2.c 	sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
sk                359 net/dccp/ccids/ccid2.c static void ccid2_rtt_estimator(struct sock *sk, const long mrtt)
sk                361 net/dccp/ccids/ccid2.c 	struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
sk                369 net/dccp/ccids/ccid2.c 		hc->tx_mdev_max = max(hc->tx_mdev, tcp_rto_min(sk));
sk                372 net/dccp/ccids/ccid2.c 		hc->tx_rtt_seq  = dccp_sk(sk)->dccps_gss;
sk                407 net/dccp/ccids/ccid2.c 		if (after48(dccp_sk(sk)->dccps_gar, hc->tx_rtt_seq)) {
sk                411 net/dccp/ccids/ccid2.c 			hc->tx_rtt_seq  = dccp_sk(sk)->dccps_gss;
sk                412 net/dccp/ccids/ccid2.c 			hc->tx_mdev_max = tcp_rto_min(sk);
sk                429 net/dccp/ccids/ccid2.c static void ccid2_new_ack(struct sock *sk, struct ccid2_seq *seqp,
sk                432 net/dccp/ccids/ccid2.c 	struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
sk                433 net/dccp/ccids/ccid2.c 	struct dccp_sock *dp = dccp_sk(sk);
sk                455 net/dccp/ccids/ccid2.c 		ccid2_change_l_ack_ratio(sk, dp->dccps_l_ack_ratio * 2);
sk                457 net/dccp/ccids/ccid2.c 		ccid2_change_l_ack_ratio(sk, dp->dccps_l_ack_ratio / 2 ? : 1U);
sk                460 net/dccp/ccids/ccid2.c 		ccid2_change_l_seq_window(sk, dp->dccps_l_seq_win * 2);
sk                462 net/dccp/ccids/ccid2.c 		ccid2_change_l_seq_window(sk, dp->dccps_l_seq_win / 2);
sk                472 net/dccp/ccids/ccid2.c 	ccid2_rtt_estimator(sk, ccid2_jiffies32 - seqp->ccid2s_sent);
sk                475 net/dccp/ccids/ccid2.c static void ccid2_congestion_event(struct sock *sk, struct ccid2_seq *seqp)
sk                477 net/dccp/ccids/ccid2.c 	struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
sk                489 net/dccp/ccids/ccid2.c 	ccid2_check_l_ack_ratio(sk);
sk                492 net/dccp/ccids/ccid2.c static int ccid2_hc_tx_parse_options(struct sock *sk, u8 packet_type,
sk                495 net/dccp/ccids/ccid2.c 	struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
sk                506 net/dccp/ccids/ccid2.c static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
sk                508 net/dccp/ccids/ccid2.c 	struct dccp_sock *dp = dccp_sk(sk);
sk                509 net/dccp/ccids/ccid2.c 	struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
sk                548 net/dccp/ccids/ccid2.c 				ccid2_change_l_ack_ratio(sk, 2 * dp->dccps_l_ack_ratio);
sk                618 net/dccp/ccids/ccid2.c 						ccid2_congestion_event(sk,
sk                621 net/dccp/ccids/ccid2.c 						ccid2_new_ack(sk, seqp,
sk                682 net/dccp/ccids/ccid2.c 				ccid2_congestion_event(sk, seqp);
sk                703 net/dccp/ccids/ccid2.c 		sk_stop_timer(sk, &hc->tx_rtotimer);
sk                705 net/dccp/ccids/ccid2.c 		sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
sk                709 net/dccp/ccids/ccid2.c 		dccp_tasklet_schedule(sk);
sk                713 net/dccp/ccids/ccid2.c static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk)
sk                716 net/dccp/ccids/ccid2.c 	struct dccp_sock *dp = dccp_sk(sk);
sk                739 net/dccp/ccids/ccid2.c 	hc->sk		 = sk;
sk                745 net/dccp/ccids/ccid2.c static void ccid2_hc_tx_exit(struct sock *sk)
sk                747 net/dccp/ccids/ccid2.c 	struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
sk                750 net/dccp/ccids/ccid2.c 	sk_stop_timer(sk, &hc->tx_rtotimer);
sk                758 net/dccp/ccids/ccid2.c static void ccid2_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
sk                760 net/dccp/ccids/ccid2.c 	struct ccid2_hc_rx_sock *hc = ccid2_hc_rx_sk(sk);
sk                765 net/dccp/ccids/ccid2.c 	if (++hc->rx_num_data_pkts >= dccp_sk(sk)->dccps_r_ack_ratio) {
sk                766 net/dccp/ccids/ccid2.c 		dccp_send_ack(sk);
sk                 75 net/dccp/ccids/ccid2.h 	struct sock		*sk;
sk                112 net/dccp/ccids/ccid2.h static inline struct ccid2_hc_tx_sock *ccid2_hc_tx_sk(const struct sock *sk)
sk                114 net/dccp/ccids/ccid2.h 	return ccid_priv(dccp_sk(sk)->dccps_hc_tx_ccid);
sk                117 net/dccp/ccids/ccid2.h static inline struct ccid2_hc_rx_sock *ccid2_hc_rx_sk(const struct sock *sk)
sk                119 net/dccp/ccids/ccid2.h 	return ccid_priv(dccp_sk(sk)->dccps_hc_rx_ccid);
sk                 50 net/dccp/ccids/ccid3.c static void ccid3_hc_tx_set_state(struct sock *sk,
sk                 53 net/dccp/ccids/ccid3.c 	struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
sk                 57 net/dccp/ccids/ccid3.c 		       dccp_role(sk), sk, ccid3_tx_state_name(oldstate),
sk                 72 net/dccp/ccids/ccid3.c static inline u64 rfc3390_initial_rate(struct sock *sk)
sk                 74 net/dccp/ccids/ccid3.c 	const struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
sk                111 net/dccp/ccids/ccid3.c static void ccid3_hc_tx_update_x(struct sock *sk, ktime_t *stamp)
sk                113 net/dccp/ccids/ccid3.c 	struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
sk                125 net/dccp/ccids/ccid3.c 		min_rate = rfc3390_initial_rate(sk);
sk                188 net/dccp/ccids/ccid3.c 	struct sock *sk = hc->sk;
sk                191 net/dccp/ccids/ccid3.c 	bh_lock_sock(sk);
sk                192 net/dccp/ccids/ccid3.c 	if (sock_owned_by_user(sk)) {
sk                198 net/dccp/ccids/ccid3.c 	ccid3_pr_debug("%s(%p, state=%s) - entry\n", dccp_role(sk), sk,
sk                202 net/dccp/ccids/ccid3.c 	if ((1 << sk->sk_state) & ~(DCCPF_OPEN | DCCPF_PARTOPEN))
sk                207 net/dccp/ccids/ccid3.c 		ccid3_hc_tx_set_state(sk, TFRC_SSTATE_NO_FBACK);
sk                238 net/dccp/ccids/ccid3.c 		ccid3_hc_tx_update_x(sk, NULL);
sk                253 net/dccp/ccids/ccid3.c 	sk_reset_timer(sk, &hc->tx_no_feedback_timer,
sk                256 net/dccp/ccids/ccid3.c 	bh_unlock_sock(sk);
sk                257 net/dccp/ccids/ccid3.c 	sock_put(sk);
sk                267 net/dccp/ccids/ccid3.c static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
sk                269 net/dccp/ccids/ccid3.c 	struct dccp_sock *dp = dccp_sk(sk);
sk                270 net/dccp/ccids/ccid3.c 	struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
sk                283 net/dccp/ccids/ccid3.c 		sk_reset_timer(sk, &hc->tx_no_feedback_timer, (jiffies +
sk                301 net/dccp/ccids/ccid3.c 			hc->tx_x    = rfc3390_initial_rate(sk);
sk                316 net/dccp/ccids/ccid3.c 		ccid3_hc_tx_set_state(sk, TFRC_SSTATE_NO_FBACK);
sk                344 net/dccp/ccids/ccid3.c static void ccid3_hc_tx_packet_sent(struct sock *sk, unsigned int len)
sk                346 net/dccp/ccids/ccid3.c 	struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
sk                350 net/dccp/ccids/ccid3.c 	if (tfrc_tx_hist_add(&hc->tx_hist, dccp_sk(sk)->dccps_gss))
sk                354 net/dccp/ccids/ccid3.c static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
sk                356 net/dccp/ccids/ccid3.c 	struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
sk                382 net/dccp/ccids/ccid3.c 	r_sample  = dccp_sample_rtt(sk, ktime_us_delta(now, acked->stamp));
sk                389 net/dccp/ccids/ccid3.c 		ccid3_hc_tx_set_state(sk, TFRC_SSTATE_FBACK);
sk                395 net/dccp/ccids/ccid3.c 			hc->tx_x    = rfc3390_initial_rate(sk);
sk                412 net/dccp/ccids/ccid3.c 	ccid3_hc_tx_update_x(sk, &now);
sk                417 net/dccp/ccids/ccid3.c 			       dccp_role(sk), sk, hc->tx_rtt, r_sample,
sk                423 net/dccp/ccids/ccid3.c 	sk_stop_timer(sk, &hc->tx_no_feedback_timer);
sk                429 net/dccp/ccids/ccid3.c 	sk->sk_write_space(sk);
sk                437 net/dccp/ccids/ccid3.c 				  USEC_PER_SEC/HZ * tcp_rto_min(sk));
sk                446 net/dccp/ccids/ccid3.c 		       dccp_role(sk), sk, usecs_to_jiffies(t_nfb), t_nfb);
sk                448 net/dccp/ccids/ccid3.c 	sk_reset_timer(sk, &hc->tx_no_feedback_timer,
sk                452 net/dccp/ccids/ccid3.c static int ccid3_hc_tx_parse_options(struct sock *sk, u8 packet_type,
sk                455 net/dccp/ccids/ccid3.c 	struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
sk                466 net/dccp/ccids/ccid3.c 				  dccp_role(sk), sk, optlen, option);
sk                477 net/dccp/ccids/ccid3.c 				       dccp_role(sk), sk, opt_val);
sk                483 net/dccp/ccids/ccid3.c 				       dccp_role(sk), sk, opt_val);
sk                489 net/dccp/ccids/ccid3.c static int ccid3_hc_tx_init(struct ccid *ccid, struct sock *sk)
sk                495 net/dccp/ccids/ccid3.c 	hc->sk	     = sk;
sk                501 net/dccp/ccids/ccid3.c static void ccid3_hc_tx_exit(struct sock *sk)
sk                503 net/dccp/ccids/ccid3.c 	struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
sk                505 net/dccp/ccids/ccid3.c 	sk_stop_timer(sk, &hc->tx_no_feedback_timer);
sk                509 net/dccp/ccids/ccid3.c static void ccid3_hc_tx_get_info(struct sock *sk, struct tcp_info *info)
sk                511 net/dccp/ccids/ccid3.c 	info->tcpi_rto = ccid3_hc_tx_sk(sk)->tx_t_rto;
sk                512 net/dccp/ccids/ccid3.c 	info->tcpi_rtt = ccid3_hc_tx_sk(sk)->tx_rtt;
sk                515 net/dccp/ccids/ccid3.c static int ccid3_hc_tx_getsockopt(struct sock *sk, const int optname, int len,
sk                518 net/dccp/ccids/ccid3.c 	const struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
sk                571 net/dccp/ccids/ccid3.c static void ccid3_hc_rx_set_state(struct sock *sk,
sk                574 net/dccp/ccids/ccid3.c 	struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
sk                578 net/dccp/ccids/ccid3.c 		       dccp_role(sk), sk, ccid3_rx_state_name(oldstate),
sk                584 net/dccp/ccids/ccid3.c static void ccid3_hc_rx_send_feedback(struct sock *sk,
sk                588 net/dccp/ccids/ccid3.c 	struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
sk                589 net/dccp/ccids/ccid3.c 	struct dccp_sock *dp = dccp_sk(sk);
sk                630 net/dccp/ccids/ccid3.c 	dccp_send_ack(sk);
sk                633 net/dccp/ccids/ccid3.c static int ccid3_hc_rx_insert_options(struct sock *sk, struct sk_buff *skb)
sk                635 net/dccp/ccids/ccid3.c 	const struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
sk                638 net/dccp/ccids/ccid3.c 	if (!(sk->sk_state == DCCP_OPEN || sk->sk_state == DCCP_PARTOPEN))
sk                666 net/dccp/ccids/ccid3.c static u32 ccid3_first_li(struct sock *sk)
sk                668 net/dccp/ccids/ccid3.c 	struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
sk                696 net/dccp/ccids/ccid3.c 		       "loss rate=%u\n", dccp_role(sk), sk, x_recv, p);
sk                701 net/dccp/ccids/ccid3.c static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
sk                703 net/dccp/ccids/ccid3.c 	struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
sk                705 net/dccp/ccids/ccid3.c 	const u64 ndp = dccp_sk(sk)->dccps_options_received.dccpor_ndp;
sk                712 net/dccp/ccids/ccid3.c 			ccid3_hc_rx_set_state(sk, TFRC_RSTATE_DATA);
sk                739 net/dccp/ccids/ccid3.c 				skb, ndp, ccid3_first_li, sk)) {
sk                782 net/dccp/ccids/ccid3.c 		ccid3_hc_rx_send_feedback(sk, skb, do_feedback);
sk                785 net/dccp/ccids/ccid3.c static int ccid3_hc_rx_init(struct ccid *ccid, struct sock *sk)
sk                794 net/dccp/ccids/ccid3.c static void ccid3_hc_rx_exit(struct sock *sk)
sk                796 net/dccp/ccids/ccid3.c 	struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
sk                802 net/dccp/ccids/ccid3.c static void ccid3_hc_rx_get_info(struct sock *sk, struct tcp_info *info)
sk                804 net/dccp/ccids/ccid3.c 	info->tcpi_ca_state = ccid3_hc_rx_sk(sk)->rx_state;
sk                806 net/dccp/ccids/ccid3.c 	info->tcpi_rcv_rtt  = ccid3_hc_rx_sk(sk)->rx_rtt;
sk                809 net/dccp/ccids/ccid3.c static int ccid3_hc_rx_getsockopt(struct sock *sk, const int optname, int len,
sk                812 net/dccp/ccids/ccid3.c 	const struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
sk                 96 net/dccp/ccids/ccid3.h 	struct sock			*sk;
sk                102 net/dccp/ccids/ccid3.h static inline struct ccid3_hc_tx_sock *ccid3_hc_tx_sk(const struct sock *sk)
sk                104 net/dccp/ccids/ccid3.h 	struct ccid3_hc_tx_sock *hctx = ccid_priv(dccp_sk(sk)->dccps_hc_tx_ccid);
sk                141 net/dccp/ccids/ccid3.h static inline struct ccid3_hc_rx_sock *ccid3_hc_rx_sk(const struct sock *sk)
sk                143 net/dccp/ccids/ccid3.h 	struct ccid3_hc_rx_sock *hcrx = ccid_priv(dccp_sk(sk)->dccps_hc_rx_ccid);
sk                136 net/dccp/ccids/lib/loss_interval.c 			 u32 (*calc_first_li)(struct sock *), struct sock *sk)
sk                154 net/dccp/ccids/lib/loss_interval.c 		lh->i_mean = new->li_length = (*calc_first_li)(sk);
sk                314 net/dccp/ccids/lib/packet_history.c 			u32 (*calc_first_li)(struct sock *), struct sock *sk)
sk                328 net/dccp/ccids/lib/packet_history.c 		is_new_loss = tfrc_lh_interval_add(lh, h, calc_first_li, sk);
sk                137 net/dccp/ccids/lib/packet_history.h 			u32 (*first_li)(struct sock *sk), struct sock *sk);
sk                 53 net/dccp/dccp.h void dccp_time_wait(struct sock *sk, int state, int timeo);
sk                224 net/dccp/dccp.h void dccp_v4_send_check(struct sock *sk, struct sk_buff *skb);
sk                226 net/dccp/dccp.h int dccp_retransmit_skb(struct sock *sk);
sk                228 net/dccp/dccp.h void dccp_send_ack(struct sock *sk);
sk                229 net/dccp/dccp.h void dccp_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
sk                232 net/dccp/dccp.h void dccp_send_sync(struct sock *sk, const u64 seq,
sk                238 net/dccp/dccp.h void dccp_qpolicy_push(struct sock *sk, struct sk_buff *skb);
sk                239 net/dccp/dccp.h bool dccp_qpolicy_full(struct sock *sk);
sk                240 net/dccp/dccp.h void dccp_qpolicy_drop(struct sock *sk, struct sk_buff *skb);
sk                241 net/dccp/dccp.h struct sk_buff *dccp_qpolicy_top(struct sock *sk);
sk                242 net/dccp/dccp.h struct sk_buff *dccp_qpolicy_pop(struct sock *sk);
sk                243 net/dccp/dccp.h bool dccp_qpolicy_param_ok(struct sock *sk, __be32 param);
sk                248 net/dccp/dccp.h void dccp_write_xmit(struct sock *sk);
sk                249 net/dccp/dccp.h void dccp_write_space(struct sock *sk);
sk                250 net/dccp/dccp.h void dccp_flush_write_queue(struct sock *sk, long *time_budget);
sk                252 net/dccp/dccp.h void dccp_init_xmit_timers(struct sock *sk);
sk                253 net/dccp/dccp.h static inline void dccp_clear_xmit_timers(struct sock *sk)
sk                255 net/dccp/dccp.h 	inet_csk_clear_xmit_timers(sk);
sk                258 net/dccp/dccp.h unsigned int dccp_sync_mss(struct sock *sk, u32 pmtu);
sk                262 net/dccp/dccp.h void dccp_set_state(struct sock *sk, const int state);
sk                263 net/dccp/dccp.h void dccp_done(struct sock *sk);
sk                268 net/dccp/dccp.h int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
sk                270 net/dccp/dccp.h struct sock *dccp_create_openreq_child(const struct sock *sk,
sk                274 net/dccp/dccp.h int dccp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
sk                276 net/dccp/dccp.h struct sock *dccp_v4_request_recv_sock(const struct sock *sk, struct sk_buff *skb,
sk                281 net/dccp/dccp.h struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
sk                286 net/dccp/dccp.h int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
sk                288 net/dccp/dccp.h int dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
sk                291 net/dccp/dccp.h int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized);
sk                292 net/dccp/dccp.h void dccp_destroy_sock(struct sock *sk);
sk                294 net/dccp/dccp.h void dccp_close(struct sock *sk, long timeout);
sk                295 net/dccp/dccp.h struct sk_buff *dccp_make_response(const struct sock *sk, struct dst_entry *dst,
sk                298 net/dccp/dccp.h int dccp_connect(struct sock *sk);
sk                299 net/dccp/dccp.h int dccp_disconnect(struct sock *sk, int flags);
sk                300 net/dccp/dccp.h int dccp_getsockopt(struct sock *sk, int level, int optname,
sk                302 net/dccp/dccp.h int dccp_setsockopt(struct sock *sk, int level, int optname,
sk                305 net/dccp/dccp.h int compat_dccp_getsockopt(struct sock *sk, int level, int optname,
sk                307 net/dccp/dccp.h int compat_dccp_setsockopt(struct sock *sk, int level, int optname,
sk                310 net/dccp/dccp.h int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg);
sk                311 net/dccp/dccp.h int dccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
sk                312 net/dccp/dccp.h int dccp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
sk                314 net/dccp/dccp.h void dccp_shutdown(struct sock *sk, int how);
sk                318 net/dccp/dccp.h int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
sk                319 net/dccp/dccp.h void dccp_req_err(struct sock *sk, u64 seq);
sk                321 net/dccp/dccp.h struct sk_buff *dccp_ctl_make_reset(struct sock *sk, struct sk_buff *skb);
sk                322 net/dccp/dccp.h int dccp_send_reset(struct sock *sk, enum dccp_reset_codes code);
sk                323 net/dccp/dccp.h void dccp_send_close(struct sock *sk, const int active);
sk                325 net/dccp/dccp.h u32 dccp_sample_rtt(struct sock *sk, long delta);
sk                327 net/dccp/dccp.h static inline bool dccp_bad_service_code(const struct sock *sk,
sk                330 net/dccp/dccp.h 	const struct dccp_sock *dp = dccp_sk(sk);
sk                417 net/dccp/dccp.h static inline void dccp_update_gsr(struct sock *sk, u64 seq)
sk                419 net/dccp/dccp.h 	struct dccp_sock *dp = dccp_sk(sk);
sk                445 net/dccp/dccp.h static inline void dccp_update_gss(struct sock *sk, u64 seq)
sk                447 net/dccp/dccp.h 	struct dccp_sock *dp = dccp_sk(sk);
sk                458 net/dccp/dccp.h static inline int dccp_ackvec_pending(const struct sock *sk)
sk                460 net/dccp/dccp.h 	return dccp_sk(sk)->dccps_hc_rx_ackvec != NULL &&
sk                461 net/dccp/dccp.h 	       !dccp_ackvec_is_empty(dccp_sk(sk)->dccps_hc_rx_ackvec);
sk                464 net/dccp/dccp.h static inline int dccp_ack_pending(const struct sock *sk)
sk                466 net/dccp/dccp.h 	return dccp_ackvec_pending(sk) || inet_csk_ack_scheduled(sk);
sk                469 net/dccp/dccp.h int dccp_feat_signal_nn_change(struct sock *sk, u8 feat, u64 nn_val);
sk                474 net/dccp/dccp.h int dccp_feat_activate_values(struct sock *sk, struct list_head *fn);
sk                477 net/dccp/dccp.h int dccp_insert_options(struct sock *sk, struct sk_buff *skb);
sk                 16 net/dccp/diag.c static void dccp_get_info(struct sock *sk, struct tcp_info *info)
sk                 18 net/dccp/diag.c 	struct dccp_sock *dp = dccp_sk(sk);
sk                 19 net/dccp/diag.c 	const struct inet_connection_sock *icsk = inet_csk(sk);
sk                 23 net/dccp/diag.c 	info->tcpi_state	= sk->sk_state;
sk                 33 net/dccp/diag.c 		ccid_hc_rx_get_info(dp->dccps_hc_rx_ccid, sk, info);
sk                 36 net/dccp/diag.c 		ccid_hc_tx_get_info(dp->dccps_hc_tx_ccid, sk, info);
sk                 39 net/dccp/diag.c static void dccp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
sk                 45 net/dccp/diag.c 		dccp_get_info(sk, _info);
sk                 36 net/dccp/feat.c static int dccp_hdlr_ccid(struct sock *sk, u64 ccid, bool rx)
sk                 38 net/dccp/feat.c 	struct dccp_sock *dp = dccp_sk(sk);
sk                 39 net/dccp/feat.c 	struct ccid *new_ccid = ccid_new(ccid, sk, rx);
sk                 45 net/dccp/feat.c 		ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
sk                 48 net/dccp/feat.c 		ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
sk                 54 net/dccp/feat.c static int dccp_hdlr_seq_win(struct sock *sk, u64 seq_win, bool rx)
sk                 56 net/dccp/feat.c 	struct dccp_sock *dp = dccp_sk(sk);
sk                 61 net/dccp/feat.c 		dccp_update_gsr(sk, dp->dccps_gsr);
sk                 65 net/dccp/feat.c 		dccp_update_gss(sk, dp->dccps_gss);
sk                 70 net/dccp/feat.c static int dccp_hdlr_ack_ratio(struct sock *sk, u64 ratio, bool rx)
sk                 73 net/dccp/feat.c 		dccp_sk(sk)->dccps_r_ack_ratio = ratio;
sk                 75 net/dccp/feat.c 		dccp_sk(sk)->dccps_l_ack_ratio = ratio;
sk                 79 net/dccp/feat.c static int dccp_hdlr_ackvec(struct sock *sk, u64 enable, bool rx)
sk                 81 net/dccp/feat.c 	struct dccp_sock *dp = dccp_sk(sk);
sk                 96 net/dccp/feat.c static int dccp_hdlr_ndp(struct sock *sk, u64 enable, bool rx)
sk                 99 net/dccp/feat.c 		dccp_sk(sk)->dccps_send_ndp_count = (enable > 0);
sk                112 net/dccp/feat.c static int dccp_hdlr_min_cscov(struct sock *sk, u64 cscov, bool rx)
sk                114 net/dccp/feat.c 	struct dccp_sock *dp = dccp_sk(sk);
sk                133 net/dccp/feat.c 	int (*activation_hdlr)(struct sock *sk, u64 val, bool rx);
sk                303 net/dccp/feat.c static int __dccp_feat_activate(struct sock *sk, const int idx,
sk                339 net/dccp/feat.c 	return dccp_feat_table[idx].activation_hdlr(sk, val, rx);
sk                351 net/dccp/feat.c static int dccp_feat_activate(struct sock *sk, u8 feat_num, bool local,
sk                354 net/dccp/feat.c 	return __dccp_feat_activate(sk, dccp_feat_index(feat_num), local, fval);
sk                667 net/dccp/feat.c 		if (skb->sk->sk_state == DCCP_OPEN &&
sk                752 net/dccp/feat.c int dccp_feat_register_sp(struct sock *sk, u8 feat, u8 is_local,
sk                755 net/dccp/feat.c 	if (sk->sk_state != DCCP_CLOSED)
sk                759 net/dccp/feat.c 	return __feat_register_sp(&dccp_sk(sk)->dccps_featneg, feat, is_local,
sk                771 net/dccp/feat.c u64 dccp_feat_nn_get(struct sock *sk, u8 feat)
sk                774 net/dccp/feat.c 		struct dccp_sock *dp = dccp_sk(sk);
sk                801 net/dccp/feat.c int dccp_feat_signal_nn_change(struct sock *sk, u8 feat, u64 nn_val)
sk                803 net/dccp/feat.c 	struct list_head *fn = &dccp_sk(sk)->dccps_featneg;
sk                807 net/dccp/feat.c 	if (sk->sk_state != DCCP_OPEN && sk->sk_state != DCCP_PARTOPEN)
sk                814 net/dccp/feat.c 	if (nn_val == dccp_feat_nn_get(sk, feat))
sk                825 net/dccp/feat.c 	inet_csk_schedule_ack(sk);
sk               1305 net/dccp/feat.c static u8 dccp_feat_handle_nn_established(struct sock *sk, u8 mandatory, u8 opt,
sk               1308 net/dccp/feat.c 	struct list_head *fn = &dccp_sk(sk)->dccps_featneg;
sk               1340 net/dccp/feat.c 		    dccp_feat_activate(sk, feat, local, &fval))
sk               1344 net/dccp/feat.c 		inet_csk_schedule_ack(sk);
sk               1362 net/dccp/feat.c 		dccp_feat_activate(sk, feat, local, &fval);
sk               1394 net/dccp/feat.c int dccp_feat_parse_options(struct sock *sk, struct dccp_request_sock *dreq,
sk               1397 net/dccp/feat.c 	struct dccp_sock *dp = dccp_sk(sk);
sk               1401 net/dccp/feat.c 	switch (sk->sk_state) {
sk               1424 net/dccp/feat.c 		return dccp_feat_handle_nn_established(sk, mandatory, opt, feat,
sk               1440 net/dccp/feat.c int dccp_feat_init(struct sock *sk)
sk               1442 net/dccp/feat.c 	struct list_head *fn = &dccp_sk(sk)->dccps_featneg;
sk               1497 net/dccp/feat.c int dccp_feat_activate_values(struct sock *sk, struct list_head *fn_list)
sk               1499 net/dccp/feat.c 	struct dccp_sock *dp = dccp_sk(sk);
sk               1537 net/dccp/feat.c 		if (__dccp_feat_activate(sk, idx, 0, fvals[idx][0]) ||
sk               1538 net/dccp/feat.c 		    __dccp_feat_activate(sk, idx, 1, fvals[idx][1])) {
sk               1558 net/dccp/feat.c 	ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
sk               1559 net/dccp/feat.c 	ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
sk                107 net/dccp/feat.h int dccp_feat_init(struct sock *sk);
sk                109 net/dccp/feat.h int dccp_feat_register_sp(struct sock *sk, u8 feat, u8 is_local,
sk                129 net/dccp/feat.h u64 dccp_feat_nn_get(struct sock *sk, u8 feat);
sk                 22 net/dccp/input.c static void dccp_enqueue_skb(struct sock *sk, struct sk_buff *skb)
sk                 25 net/dccp/input.c 	__skb_queue_tail(&sk->sk_receive_queue, skb);
sk                 26 net/dccp/input.c 	skb_set_owner_r(skb, sk);
sk                 27 net/dccp/input.c 	sk->sk_data_ready(sk);
sk                 30 net/dccp/input.c static void dccp_fin(struct sock *sk, struct sk_buff *skb)
sk                 38 net/dccp/input.c 	sk->sk_shutdown = SHUTDOWN_MASK;
sk                 39 net/dccp/input.c 	sock_set_flag(sk, SOCK_DONE);
sk                 40 net/dccp/input.c 	dccp_enqueue_skb(sk, skb);
sk                 43 net/dccp/input.c static int dccp_rcv_close(struct sock *sk, struct sk_buff *skb)
sk                 47 net/dccp/input.c 	switch (sk->sk_state) {
sk                 65 net/dccp/input.c 		if (dccp_sk(sk)->dccps_role != DCCP_ROLE_CLIENT)
sk                 70 net/dccp/input.c 		dccp_send_reset(sk, DCCP_RESET_CODE_CLOSED);
sk                 71 net/dccp/input.c 		dccp_done(sk);
sk                 77 net/dccp/input.c 		dccp_fin(sk, skb);
sk                 78 net/dccp/input.c 		dccp_set_state(sk, DCCP_PASSIVE_CLOSE);
sk                 84 net/dccp/input.c 		sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
sk                 89 net/dccp/input.c static int dccp_rcv_closereq(struct sock *sk, struct sk_buff *skb)
sk                 99 net/dccp/input.c 	if (dccp_sk(sk)->dccps_role != DCCP_ROLE_CLIENT) {
sk                100 net/dccp/input.c 		dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq, DCCP_PKT_SYNC);
sk                105 net/dccp/input.c 	switch (sk->sk_state) {
sk                107 net/dccp/input.c 		dccp_send_close(sk, 0);
sk                108 net/dccp/input.c 		dccp_set_state(sk, DCCP_CLOSING);
sk                114 net/dccp/input.c 		dccp_fin(sk, skb);
sk                115 net/dccp/input.c 		dccp_set_state(sk, DCCP_PASSIVE_CLOSEREQ);
sk                118 net/dccp/input.c 		sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
sk                145 net/dccp/input.c static void dccp_rcv_reset(struct sock *sk, struct sk_buff *skb)
sk                149 net/dccp/input.c 	sk->sk_err = err;
sk                152 net/dccp/input.c 	dccp_fin(sk, skb);
sk                154 net/dccp/input.c 	if (err && !sock_flag(sk, SOCK_DEAD))
sk                155 net/dccp/input.c 		sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
sk                156 net/dccp/input.c 	dccp_time_wait(sk, DCCP_TIME_WAIT, 0);
sk                159 net/dccp/input.c static void dccp_handle_ackvec_processing(struct sock *sk, struct sk_buff *skb)
sk                161 net/dccp/input.c 	struct dccp_ackvec *av = dccp_sk(sk)->dccps_hc_rx_ackvec;
sk                170 net/dccp/input.c static void dccp_deliver_input_to_ccids(struct sock *sk, struct sk_buff *skb)
sk                172 net/dccp/input.c 	const struct dccp_sock *dp = dccp_sk(sk);
sk                175 net/dccp/input.c 	if (!(sk->sk_shutdown & RCV_SHUTDOWN))
sk                176 net/dccp/input.c 		ccid_hc_rx_packet_recv(dp->dccps_hc_rx_ccid, sk, skb);
sk                181 net/dccp/input.c 	if (sk->sk_write_queue.qlen > 0 || !(sk->sk_shutdown & SEND_SHUTDOWN))
sk                182 net/dccp/input.c 		ccid_hc_tx_packet_recv(dp->dccps_hc_tx_ccid, sk, skb);
sk                185 net/dccp/input.c static int dccp_check_seqno(struct sock *sk, struct sk_buff *skb)
sk                188 net/dccp/input.c 	struct dccp_sock *dp = dccp_sk(sk);
sk                208 net/dccp/input.c 			dccp_update_gsr(sk, seqno);
sk                237 net/dccp/input.c 		dccp_update_gsr(sk, seqno);
sk                276 net/dccp/input.c 		dccp_send_sync(sk, seqno, DCCP_PKT_SYNC);
sk                283 net/dccp/input.c static int __dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
sk                286 net/dccp/input.c 	struct dccp_sock *dp = dccp_sk(sk);
sk                296 net/dccp/input.c 		dccp_enqueue_skb(sk, skb);
sk                309 net/dccp/input.c 		dccp_rcv_reset(sk, skb);
sk                312 net/dccp/input.c 		if (dccp_rcv_closereq(sk, skb))
sk                316 net/dccp/input.c 		if (dccp_rcv_close(sk, skb))
sk                341 net/dccp/input.c 			dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq,
sk                346 net/dccp/input.c 		dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq,
sk                364 net/dccp/input.c int dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
sk                367 net/dccp/input.c 	if (dccp_check_seqno(sk, skb))
sk                370 net/dccp/input.c 	if (dccp_parse_options(sk, NULL, skb))
sk                373 net/dccp/input.c 	dccp_handle_ackvec_processing(sk, skb);
sk                374 net/dccp/input.c 	dccp_deliver_input_to_ccids(sk, skb);
sk                376 net/dccp/input.c 	return __dccp_rcv_established(sk, skb, dh, len);
sk                384 net/dccp/input.c static int dccp_rcv_request_sent_state_process(struct sock *sk,
sk                401 net/dccp/input.c 		const struct inet_connection_sock *icsk = inet_csk(sk);
sk                402 net/dccp/input.c 		struct dccp_sock *dp = dccp_sk(sk);
sk                420 net/dccp/input.c 		if (dccp_parse_options(sk, NULL, skb))
sk                425 net/dccp/input.c 			dp->dccps_syn_rtt = dccp_sample_rtt(sk, 10 * (tstamp -
sk                429 net/dccp/input.c 		inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
sk                430 net/dccp/input.c 		WARN_ON(sk->sk_send_head == NULL);
sk                431 net/dccp/input.c 		kfree_skb(sk->sk_send_head);
sk                432 net/dccp/input.c 		sk->sk_send_head = NULL;
sk                443 net/dccp/input.c 		dccp_sync_mss(sk, icsk->icsk_pmtu_cookie);
sk                460 net/dccp/input.c 		dccp_set_state(sk, DCCP_PARTOPEN);
sk                468 net/dccp/input.c 		if (dccp_feat_activate_values(sk, &dp->dccps_featneg))
sk                472 net/dccp/input.c 		icsk->icsk_af_ops->rebuild_header(sk);
sk                474 net/dccp/input.c 		if (!sock_flag(sk, SOCK_DEAD)) {
sk                475 net/dccp/input.c 			sk->sk_state_change(sk);
sk                476 net/dccp/input.c 			sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
sk                479 net/dccp/input.c 		if (sk->sk_write_pending || inet_csk_in_pingpong_mode(sk) ||
sk                497 net/dccp/input.c 		dccp_send_ack(sk);
sk                512 net/dccp/input.c 	dccp_set_state(sk, DCCP_CLOSED);
sk                513 net/dccp/input.c 	sk->sk_err = ECOMM;
sk                517 net/dccp/input.c static int dccp_rcv_respond_partopen_state_process(struct sock *sk,
sk                522 net/dccp/input.c 	struct dccp_sock *dp = dccp_sk(sk);
sk                528 net/dccp/input.c 		inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
sk                531 net/dccp/input.c 		if (sk->sk_state == DCCP_RESPOND)
sk                546 net/dccp/input.c 		if (sk->sk_state == DCCP_PARTOPEN)
sk                547 net/dccp/input.c 			inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
sk                553 net/dccp/input.c 			dp->dccps_syn_rtt = dccp_sample_rtt(sk, 10 * delta);
sk                557 net/dccp/input.c 		dccp_set_state(sk, DCCP_OPEN);
sk                561 net/dccp/input.c 			__dccp_rcv_established(sk, skb, dh, len);
sk                571 net/dccp/input.c int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
sk                574 net/dccp/input.c 	struct dccp_sock *dp = dccp_sk(sk);
sk                576 net/dccp/input.c 	const int old_state = sk->sk_state;
sk                602 net/dccp/input.c 	if (sk->sk_state == DCCP_LISTEN) {
sk                609 net/dccp/input.c 			acceptable = inet_csk(sk)->icsk_af_ops->conn_request(sk, skb) >= 0;
sk                623 net/dccp/input.c 	} else if (sk->sk_state == DCCP_CLOSED) {
sk                629 net/dccp/input.c 	if (sk->sk_state != DCCP_REQUESTING && dccp_check_seqno(sk, skb))
sk                644 net/dccp/input.c 	    (sk->sk_state == DCCP_RESPOND && dh->dccph_type == DCCP_PKT_DATA)) {
sk                645 net/dccp/input.c 		dccp_send_sync(sk, dcb->dccpd_seq, DCCP_PKT_SYNC);
sk                650 net/dccp/input.c 	if (dccp_parse_options(sk, NULL, skb))
sk                662 net/dccp/input.c 		dccp_rcv_reset(sk, skb);
sk                665 net/dccp/input.c 		if (dccp_rcv_closereq(sk, skb))
sk                669 net/dccp/input.c 		if (dccp_rcv_close(sk, skb))
sk                674 net/dccp/input.c 	switch (sk->sk_state) {
sk                676 net/dccp/input.c 		queued = dccp_rcv_request_sent_state_process(sk, skb, dh, len);
sk                685 net/dccp/input.c 		dccp_handle_ackvec_processing(sk, skb);
sk                686 net/dccp/input.c 		dccp_deliver_input_to_ccids(sk, skb);
sk                689 net/dccp/input.c 		queued = dccp_rcv_respond_partopen_state_process(sk, skb,
sk                698 net/dccp/input.c 			sk->sk_state_change(sk);
sk                699 net/dccp/input.c 			sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
sk                703 net/dccp/input.c 		dccp_send_sync(sk, dcb->dccpd_seq, DCCP_PKT_SYNCACK);
sk                723 net/dccp/input.c u32 dccp_sample_rtt(struct sock *sk, long delta)
sk                726 net/dccp/input.c 	delta -= dccp_sk(sk)->dccps_options_received.dccpor_elapsed_time * 10;
sk                 38 net/dccp/ipv4.c int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
sk                 41 net/dccp/ipv4.c 	struct inet_sock *inet = inet_sk(sk);
sk                 42 net/dccp/ipv4.c 	struct dccp_sock *dp = dccp_sk(sk);
sk                 61 net/dccp/ipv4.c 					     lockdep_sock_is_held(sk));
sk                 72 net/dccp/ipv4.c 			      RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
sk                 74 net/dccp/ipv4.c 			      orig_sport, orig_dport, sk);
sk                 88 net/dccp/ipv4.c 	sk_rcv_saddr_set(sk, inet->inet_saddr);
sk                 90 net/dccp/ipv4.c 	sk_daddr_set(sk, daddr);
sk                 92 net/dccp/ipv4.c 	inet_csk(sk)->icsk_ext_hdr_len = 0;
sk                 94 net/dccp/ipv4.c 		inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
sk                101 net/dccp/ipv4.c 	dccp_set_state(sk, DCCP_REQUESTING);
sk                102 net/dccp/ipv4.c 	err = inet_hash_connect(&dccp_death_row, sk);
sk                107 net/dccp/ipv4.c 			       inet->inet_sport, inet->inet_dport, sk);
sk                114 net/dccp/ipv4.c 	sk_setup_caps(sk, &rt->dst);
sk                122 net/dccp/ipv4.c 	err = dccp_connect(sk);
sk                132 net/dccp/ipv4.c 	dccp_set_state(sk, DCCP_CLOSED);
sk                134 net/dccp/ipv4.c 	sk->sk_route_caps = 0;
sk                143 net/dccp/ipv4.c static inline void dccp_do_pmtu_discovery(struct sock *sk,
sk                148 net/dccp/ipv4.c 	const struct inet_sock *inet = inet_sk(sk);
sk                149 net/dccp/ipv4.c 	const struct dccp_sock *dp = dccp_sk(sk);
sk                155 net/dccp/ipv4.c 	if (sk->sk_state == DCCP_LISTEN)
sk                158 net/dccp/ipv4.c 	dst = inet_csk_update_pmtu(sk, mtu);
sk                165 net/dccp/ipv4.c 	if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
sk                166 net/dccp/ipv4.c 		sk->sk_err_soft = EMSGSIZE;
sk                171 net/dccp/ipv4.c 	    ip_sk_accept_pmtu(sk) &&
sk                172 net/dccp/ipv4.c 	    inet_csk(sk)->icsk_pmtu_cookie > mtu) {
sk                173 net/dccp/ipv4.c 		dccp_sync_mss(sk, mtu);
sk                182 net/dccp/ipv4.c 		dccp_send_sync(sk, dp->dccps_gsr, DCCP_PKT_SYNC);
sk                186 net/dccp/ipv4.c static void dccp_do_redirect(struct sk_buff *skb, struct sock *sk)
sk                188 net/dccp/ipv4.c 	struct dst_entry *dst = __sk_dst_check(sk, 0);
sk                191 net/dccp/ipv4.c 		dst->ops->redirect(dst, sk, skb);
sk                194 net/dccp/ipv4.c void dccp_req_err(struct sock *sk, u64 seq)
sk                196 net/dccp/ipv4.c 	struct request_sock *req = inet_reqsk(sk);
sk                197 net/dccp/ipv4.c 	struct net *net = sock_net(sk);
sk                239 net/dccp/ipv4.c 	struct sock *sk;
sk                252 net/dccp/ipv4.c 	sk = __inet_lookup_established(net, &dccp_hashinfo,
sk                256 net/dccp/ipv4.c 	if (!sk) {
sk                261 net/dccp/ipv4.c 	if (sk->sk_state == DCCP_TIME_WAIT) {
sk                262 net/dccp/ipv4.c 		inet_twsk_put(inet_twsk(sk));
sk                266 net/dccp/ipv4.c 	if (sk->sk_state == DCCP_NEW_SYN_RECV) {
sk                267 net/dccp/ipv4.c 		dccp_req_err(sk, seq);
sk                271 net/dccp/ipv4.c 	bh_lock_sock(sk);
sk                275 net/dccp/ipv4.c 	if (sock_owned_by_user(sk))
sk                278 net/dccp/ipv4.c 	if (sk->sk_state == DCCP_CLOSED)
sk                281 net/dccp/ipv4.c 	dp = dccp_sk(sk);
sk                282 net/dccp/ipv4.c 	if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) &&
sk                290 net/dccp/ipv4.c 		if (!sock_owned_by_user(sk))
sk                291 net/dccp/ipv4.c 			dccp_do_redirect(skb, sk);
sk                304 net/dccp/ipv4.c 			if (!sock_owned_by_user(sk))
sk                305 net/dccp/ipv4.c 				dccp_do_pmtu_discovery(sk, iph, info);
sk                318 net/dccp/ipv4.c 	switch (sk->sk_state) {
sk                321 net/dccp/ipv4.c 		if (!sock_owned_by_user(sk)) {
sk                323 net/dccp/ipv4.c 			sk->sk_err = err;
sk                325 net/dccp/ipv4.c 			sk->sk_error_report(sk);
sk                327 net/dccp/ipv4.c 			dccp_done(sk);
sk                329 net/dccp/ipv4.c 			sk->sk_err_soft = err;
sk                349 net/dccp/ipv4.c 	inet = inet_sk(sk);
sk                350 net/dccp/ipv4.c 	if (!sock_owned_by_user(sk) && inet->recverr) {
sk                351 net/dccp/ipv4.c 		sk->sk_err = err;
sk                352 net/dccp/ipv4.c 		sk->sk_error_report(sk);
sk                354 net/dccp/ipv4.c 		sk->sk_err_soft = err;
sk                356 net/dccp/ipv4.c 	bh_unlock_sock(sk);
sk                357 net/dccp/ipv4.c 	sock_put(sk);
sk                367 net/dccp/ipv4.c void dccp_v4_send_check(struct sock *sk, struct sk_buff *skb)
sk                369 net/dccp/ipv4.c 	const struct inet_sock *inet = inet_sk(sk);
sk                393 net/dccp/ipv4.c struct sock *dccp_v4_request_recv_sock(const struct sock *sk,
sk                404 net/dccp/ipv4.c 	if (sk_acceptq_is_full(sk))
sk                407 net/dccp/ipv4.c 	newsk = dccp_create_openreq_child(sk, req, skb);
sk                421 net/dccp/ipv4.c 	if (dst == NULL && (dst = inet_csk_route_child_sock(sk, newsk, req)) == NULL)
sk                428 net/dccp/ipv4.c 	if (__inet_inherit_port(sk, newsk) < 0)
sk                438 net/dccp/ipv4.c 	__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
sk                442 net/dccp/ipv4.c 	__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
sk                452 net/dccp/ipv4.c static struct dst_entry* dccp_v4_route_skb(struct net *net, struct sock *sk,
sk                461 net/dccp/ipv4.c 		.flowi4_tos = RT_CONN_FLAGS(sk),
sk                462 net/dccp/ipv4.c 		.flowi4_proto = sk->sk_protocol,
sk                468 net/dccp/ipv4.c 	rt = ip_route_output_flow(net, &fl4, sk);
sk                477 net/dccp/ipv4.c static int dccp_v4_send_response(const struct sock *sk, struct request_sock *req)
sk                484 net/dccp/ipv4.c 	dst = inet_csk_route_req(sk, &fl4, req);
sk                488 net/dccp/ipv4.c 	skb = dccp_make_response(sk, dst, req);
sk                496 net/dccp/ipv4.c 		err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
sk                508 net/dccp/ipv4.c static void dccp_v4_ctl_send_reset(const struct sock *sk, struct sk_buff *rxskb)
sk                573 net/dccp/ipv4.c int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
sk                585 net/dccp/ipv4.c 	if (dccp_bad_service_code(sk, service)) {
sk                595 net/dccp/ipv4.c 	if (inet_csk_reqsk_queue_is_full(sk))
sk                598 net/dccp/ipv4.c 	if (sk_acceptq_is_full(sk))
sk                601 net/dccp/ipv4.c 	req = inet_reqsk_alloc(&dccp_request_sock_ops, sk, true);
sk                605 net/dccp/ipv4.c 	if (dccp_reqsk_init(req, dccp_sk(sk), skb))
sk                609 net/dccp/ipv4.c 	if (dccp_parse_options(sk, dreq, skb))
sk                612 net/dccp/ipv4.c 	if (security_inet_conn_request(sk, skb, req))
sk                618 net/dccp/ipv4.c 	ireq->ir_mark = inet_request_mark(sk, skb);
sk                620 net/dccp/ipv4.c 	ireq->ir_iif = sk->sk_bound_dev_if;
sk                635 net/dccp/ipv4.c 	if (dccp_v4_send_response(sk, req))
sk                638 net/dccp/ipv4.c 	inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
sk                650 net/dccp/ipv4.c int dccp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
sk                654 net/dccp/ipv4.c 	if (sk->sk_state == DCCP_OPEN) { /* Fast path */
sk                655 net/dccp/ipv4.c 		if (dccp_rcv_established(sk, skb, dh, skb->len))
sk                684 net/dccp/ipv4.c 	if (dccp_rcv_state_process(sk, skb, dh, skb->len))
sk                689 net/dccp/ipv4.c 	dccp_v4_ctl_send_reset(sk, skb);
sk                775 net/dccp/ipv4.c 	struct sock *sk;
sk                811 net/dccp/ipv4.c 	sk = __inet_lookup_skb(&dccp_hashinfo, skb, __dccp_hdr_len(dh),
sk                813 net/dccp/ipv4.c 	if (!sk) {
sk                825 net/dccp/ipv4.c 	if (sk->sk_state == DCCP_TIME_WAIT) {
sk                827 net/dccp/ipv4.c 		inet_twsk_put(inet_twsk(sk));
sk                831 net/dccp/ipv4.c 	if (sk->sk_state == DCCP_NEW_SYN_RECV) {
sk                832 net/dccp/ipv4.c 		struct request_sock *req = inet_reqsk(sk);
sk                835 net/dccp/ipv4.c 		sk = req->rsk_listener;
sk                836 net/dccp/ipv4.c 		if (unlikely(sk->sk_state != DCCP_LISTEN)) {
sk                837 net/dccp/ipv4.c 			inet_csk_reqsk_queue_drop_and_put(sk, req);
sk                840 net/dccp/ipv4.c 		sock_hold(sk);
sk                842 net/dccp/ipv4.c 		nsk = dccp_check_req(sk, skb, req);
sk                847 net/dccp/ipv4.c 		if (nsk == sk) {
sk                849 net/dccp/ipv4.c 		} else if (dccp_child_process(sk, nsk, skb)) {
sk                850 net/dccp/ipv4.c 			dccp_v4_ctl_send_reset(sk, skb);
sk                853 net/dccp/ipv4.c 			sock_put(sk);
sk                862 net/dccp/ipv4.c 	min_cov = dccp_sk(sk)->dccps_pcrlen;
sk                872 net/dccp/ipv4.c 	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
sk                876 net/dccp/ipv4.c 	return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4, refcounted);
sk                890 net/dccp/ipv4.c 		dccp_v4_ctl_send_reset(sk, skb);
sk                899 net/dccp/ipv4.c 		sock_put(sk);
sk                920 net/dccp/ipv4.c static int dccp_v4_init_sock(struct sock *sk)
sk                923 net/dccp/ipv4.c 	int err = dccp_init_sock(sk, dccp_v4_ctl_sock_initialized);
sk                928 net/dccp/ipv4.c 		inet_csk(sk)->icsk_af_ops = &dccp_ipv4_af_ops;
sk                 49 net/dccp/ipv6.c static inline void dccp_v6_send_check(struct sock *sk, struct sk_buff *skb)
sk                 51 net/dccp/ipv6.c 	struct ipv6_pinfo *np = inet6_sk(sk);
sk                 55 net/dccp/ipv6.c 	dh->dccph_checksum = dccp_v6_csum_finish(skb, &np->saddr, &sk->sk_v6_daddr);
sk                 74 net/dccp/ipv6.c 	struct sock *sk;
sk                 87 net/dccp/ipv6.c 	sk = __inet6_lookup_established(net, &dccp_hashinfo,
sk                 92 net/dccp/ipv6.c 	if (!sk) {
sk                 98 net/dccp/ipv6.c 	if (sk->sk_state == DCCP_TIME_WAIT) {
sk                 99 net/dccp/ipv6.c 		inet_twsk_put(inet_twsk(sk));
sk                103 net/dccp/ipv6.c 	if (sk->sk_state == DCCP_NEW_SYN_RECV) {
sk                104 net/dccp/ipv6.c 		dccp_req_err(sk, seq);
sk                108 net/dccp/ipv6.c 	bh_lock_sock(sk);
sk                109 net/dccp/ipv6.c 	if (sock_owned_by_user(sk))
sk                112 net/dccp/ipv6.c 	if (sk->sk_state == DCCP_CLOSED)
sk                115 net/dccp/ipv6.c 	dp = dccp_sk(sk);
sk                116 net/dccp/ipv6.c 	if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) &&
sk                122 net/dccp/ipv6.c 	np = inet6_sk(sk);
sk                125 net/dccp/ipv6.c 		if (!sock_owned_by_user(sk)) {
sk                126 net/dccp/ipv6.c 			struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
sk                129 net/dccp/ipv6.c 				dst->ops->redirect(dst, sk, skb);
sk                137 net/dccp/ipv6.c 		if (!ip6_sk_accept_pmtu(sk))
sk                140 net/dccp/ipv6.c 		if (sock_owned_by_user(sk))
sk                142 net/dccp/ipv6.c 		if ((1 << sk->sk_state) & (DCCPF_LISTEN | DCCPF_CLOSED))
sk                145 net/dccp/ipv6.c 		dst = inet6_csk_update_pmtu(sk, ntohl(info));
sk                149 net/dccp/ipv6.c 		if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst))
sk                150 net/dccp/ipv6.c 			dccp_sync_mss(sk, dst_mtu(dst));
sk                157 net/dccp/ipv6.c 	switch (sk->sk_state) {
sk                161 net/dccp/ipv6.c 		if (!sock_owned_by_user(sk)) {
sk                163 net/dccp/ipv6.c 			sk->sk_err = err;
sk                168 net/dccp/ipv6.c 			sk->sk_error_report(sk);
sk                169 net/dccp/ipv6.c 			dccp_done(sk);
sk                171 net/dccp/ipv6.c 			sk->sk_err_soft = err;
sk                175 net/dccp/ipv6.c 	if (!sock_owned_by_user(sk) && np->recverr) {
sk                176 net/dccp/ipv6.c 		sk->sk_err = err;
sk                177 net/dccp/ipv6.c 		sk->sk_error_report(sk);
sk                179 net/dccp/ipv6.c 		sk->sk_err_soft = err;
sk                182 net/dccp/ipv6.c 	bh_unlock_sock(sk);
sk                183 net/dccp/ipv6.c 	sock_put(sk);
sk                188 net/dccp/ipv6.c static int dccp_v6_send_response(const struct sock *sk, struct request_sock *req)
sk                191 net/dccp/ipv6.c 	struct ipv6_pinfo *np = inet6_sk(sk);
sk                213 net/dccp/ipv6.c 	dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
sk                220 net/dccp/ipv6.c 	skb = dccp_make_response(sk, dst, req);
sk                233 net/dccp/ipv6.c 		err = ip6_xmit(sk, skb, &fl6, sk->sk_mark, opt, np->tclass,
sk                234 net/dccp/ipv6.c 			       sk->sk_priority);
sk                251 net/dccp/ipv6.c static void dccp_v6_ctl_send_reset(const struct sock *sk, struct sk_buff *rxskb)
sk                307 net/dccp/ipv6.c static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
sk                312 net/dccp/ipv6.c 	struct ipv6_pinfo *np = inet6_sk(sk);
sk                317 net/dccp/ipv6.c 		return dccp_v4_conn_request(sk, skb);
sk                322 net/dccp/ipv6.c 	if (dccp_bad_service_code(sk, service)) {
sk                330 net/dccp/ipv6.c 	if (inet_csk_reqsk_queue_is_full(sk))
sk                333 net/dccp/ipv6.c 	if (sk_acceptq_is_full(sk))
sk                336 net/dccp/ipv6.c 	req = inet_reqsk_alloc(&dccp6_request_sock_ops, sk, true);
sk                340 net/dccp/ipv6.c 	if (dccp_reqsk_init(req, dccp_sk(sk), skb))
sk                344 net/dccp/ipv6.c 	if (dccp_parse_options(sk, dreq, skb))
sk                347 net/dccp/ipv6.c 	if (security_inet_conn_request(sk, skb, req))
sk                354 net/dccp/ipv6.c 	ireq->ir_mark = inet_request_mark(sk, skb);
sk                356 net/dccp/ipv6.c 	if (ipv6_opt_accepted(sk, skb, IP6CB(skb)) ||
sk                362 net/dccp/ipv6.c 	ireq->ir_iif = sk->sk_bound_dev_if;
sk                365 net/dccp/ipv6.c 	if (!sk->sk_bound_dev_if &&
sk                382 net/dccp/ipv6.c 	if (dccp_v6_send_response(sk, req))
sk                385 net/dccp/ipv6.c 	inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
sk                396 net/dccp/ipv6.c static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
sk                405 net/dccp/ipv6.c 	const struct ipv6_pinfo *np = inet6_sk(sk);
sk                415 net/dccp/ipv6.c 		newsk = dccp_v4_request_recv_sock(sk, skb, req, dst,
sk                455 net/dccp/ipv6.c 	if (sk_acceptq_is_full(sk))
sk                461 net/dccp/ipv6.c 		dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_DCCP);
sk                466 net/dccp/ipv6.c 	newsk = dccp_create_openreq_child(sk, req, skb);
sk                531 net/dccp/ipv6.c 	if (__inet_inherit_port(sk, newsk) < 0) {
sk                549 net/dccp/ipv6.c 	__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
sk                553 net/dccp/ipv6.c 	__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
sk                565 net/dccp/ipv6.c static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
sk                567 net/dccp/ipv6.c 	struct ipv6_pinfo *np = inet6_sk(sk);
sk                579 net/dccp/ipv6.c 		return dccp_v4_do_rcv(sk, skb);
sk                581 net/dccp/ipv6.c 	if (sk_filter(sk, skb))
sk                605 net/dccp/ipv6.c 	if (sk->sk_state == DCCP_OPEN) { /* Fast path */
sk                606 net/dccp/ipv6.c 		if (dccp_rcv_established(sk, skb, dccp_hdr(skb), skb->len))
sk                638 net/dccp/ipv6.c 	if (dccp_rcv_state_process(sk, skb, dccp_hdr(skb), skb->len))
sk                645 net/dccp/ipv6.c 	dccp_v6_ctl_send_reset(sk, skb);
sk                656 net/dccp/ipv6.c 	if (!((1 << sk->sk_state) & (DCCPF_CLOSED | DCCPF_LISTEN))) {
sk                665 net/dccp/ipv6.c 		if (ipv6_opt_accepted(sk, opt_skb,
sk                667 net/dccp/ipv6.c 			skb_set_owner_r(opt_skb, sk);
sk                686 net/dccp/ipv6.c 	struct sock *sk;
sk                712 net/dccp/ipv6.c 	sk = __inet6_lookup_skb(&dccp_hashinfo, skb, __dccp_hdr_len(dh),
sk                715 net/dccp/ipv6.c 	if (!sk) {
sk                727 net/dccp/ipv6.c 	if (sk->sk_state == DCCP_TIME_WAIT) {
sk                729 net/dccp/ipv6.c 		inet_twsk_put(inet_twsk(sk));
sk                733 net/dccp/ipv6.c 	if (sk->sk_state == DCCP_NEW_SYN_RECV) {
sk                734 net/dccp/ipv6.c 		struct request_sock *req = inet_reqsk(sk);
sk                737 net/dccp/ipv6.c 		sk = req->rsk_listener;
sk                738 net/dccp/ipv6.c 		if (unlikely(sk->sk_state != DCCP_LISTEN)) {
sk                739 net/dccp/ipv6.c 			inet_csk_reqsk_queue_drop_and_put(sk, req);
sk                742 net/dccp/ipv6.c 		sock_hold(sk);
sk                744 net/dccp/ipv6.c 		nsk = dccp_check_req(sk, skb, req);
sk                749 net/dccp/ipv6.c 		if (nsk == sk) {
sk                751 net/dccp/ipv6.c 		} else if (dccp_child_process(sk, nsk, skb)) {
sk                752 net/dccp/ipv6.c 			dccp_v6_ctl_send_reset(sk, skb);
sk                755 net/dccp/ipv6.c 			sock_put(sk);
sk                764 net/dccp/ipv6.c 	min_cov = dccp_sk(sk)->dccps_pcrlen;
sk                772 net/dccp/ipv6.c 	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
sk                775 net/dccp/ipv6.c 	return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4,
sk                790 net/dccp/ipv6.c 		dccp_v6_ctl_send_reset(sk, skb);
sk                799 net/dccp/ipv6.c 		sock_put(sk);
sk                803 net/dccp/ipv6.c static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
sk                807 net/dccp/ipv6.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk                808 net/dccp/ipv6.c 	struct inet_sock *inet = inet_sk(sk);
sk                809 net/dccp/ipv6.c 	struct ipv6_pinfo *np = inet6_sk(sk);
sk                810 net/dccp/ipv6.c 	struct dccp_sock *dp = dccp_sk(sk);
sk                833 net/dccp/ipv6.c 			flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
sk                856 net/dccp/ipv6.c 			if (sk->sk_bound_dev_if &&
sk                857 net/dccp/ipv6.c 			    sk->sk_bound_dev_if != usin->sin6_scope_id)
sk                860 net/dccp/ipv6.c 			sk->sk_bound_dev_if = usin->sin6_scope_id;
sk                864 net/dccp/ipv6.c 		if (!sk->sk_bound_dev_if)
sk                868 net/dccp/ipv6.c 	sk->sk_v6_daddr = usin->sin6_addr;
sk                878 net/dccp/ipv6.c 		SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
sk                880 net/dccp/ipv6.c 		if (__ipv6_only_sock(sk))
sk                888 net/dccp/ipv6.c 		sk->sk_backlog_rcv = dccp_v4_do_rcv;
sk                890 net/dccp/ipv6.c 		err = dccp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
sk                894 net/dccp/ipv6.c 			sk->sk_backlog_rcv = dccp_v6_do_rcv;
sk                897 net/dccp/ipv6.c 		np->saddr = sk->sk_v6_rcv_saddr;
sk                901 net/dccp/ipv6.c 	if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
sk                902 net/dccp/ipv6.c 		saddr = &sk->sk_v6_rcv_saddr;
sk                905 net/dccp/ipv6.c 	fl6.daddr = sk->sk_v6_daddr;
sk                907 net/dccp/ipv6.c 	fl6.flowi6_oif = sk->sk_bound_dev_if;
sk                910 net/dccp/ipv6.c 	security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
sk                912 net/dccp/ipv6.c 	opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
sk                915 net/dccp/ipv6.c 	dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
sk                923 net/dccp/ipv6.c 		sk->sk_v6_rcv_saddr = *saddr;
sk                930 net/dccp/ipv6.c 	ip6_dst_store(sk, dst, NULL, NULL);
sk                938 net/dccp/ipv6.c 	dccp_set_state(sk, DCCP_REQUESTING);
sk                939 net/dccp/ipv6.c 	err = inet6_hash_connect(&dccp_death_row, sk);
sk                944 net/dccp/ipv6.c 						      sk->sk_v6_daddr.s6_addr32,
sk                947 net/dccp/ipv6.c 	err = dccp_connect(sk);
sk                954 net/dccp/ipv6.c 	dccp_set_state(sk, DCCP_CLOSED);
sk                955 net/dccp/ipv6.c 	__sk_dst_reset(sk);
sk                958 net/dccp/ipv6.c 	sk->sk_route_caps = 0;
sk               1002 net/dccp/ipv6.c static int dccp_v6_init_sock(struct sock *sk)
sk               1005 net/dccp/ipv6.c 	int err = dccp_init_sock(sk, dccp_v6_ctl_sock_initialized);
sk               1010 net/dccp/ipv6.c 		inet_csk(sk)->icsk_af_ops = &dccp_ipv6_af_ops;
sk               1016 net/dccp/ipv6.c static void dccp_v6_destroy_sock(struct sock *sk)
sk               1018 net/dccp/ipv6.c 	dccp_destroy_sock(sk);
sk               1019 net/dccp/ipv6.c 	inet6_destroy_sock(sk);
sk                 31 net/dccp/minisocks.c void dccp_time_wait(struct sock *sk, int state, int timeo)
sk                 35 net/dccp/minisocks.c 	tw = inet_twsk_alloc(sk, &dccp_death_row, state);
sk                 38 net/dccp/minisocks.c 		const struct inet_connection_sock *icsk = inet_csk(sk);
sk                 42 net/dccp/minisocks.c 			tw->tw_v6_daddr = sk->sk_v6_daddr;
sk                 43 net/dccp/minisocks.c 			tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
sk                 44 net/dccp/minisocks.c 			tw->tw_ipv6only = sk->sk_ipv6only;
sk                 64 net/dccp/minisocks.c 		inet_twsk_hashdance(tw, sk, &dccp_hashinfo);
sk                 74 net/dccp/minisocks.c 	dccp_done(sk);
sk                 77 net/dccp/minisocks.c struct sock *dccp_create_openreq_child(const struct sock *sk,
sk                 87 net/dccp/minisocks.c 	struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
sk                140 net/dccp/minisocks.c struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
sk                165 net/dccp/minisocks.c 			inet_rtx_syn_ack(sk, req);
sk                189 net/dccp/minisocks.c 	if (dccp_parse_options(sk, dreq, skb))
sk                192 net/dccp/minisocks.c 	child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
sk                195 net/dccp/minisocks.c 		child = inet_csk_complete_hashdance(sk, child, req, own_req);
sk                202 net/dccp/minisocks.c 		req->rsk_ops->send_reset(sk, skb);
sk                204 net/dccp/minisocks.c 	inet_csk_reqsk_queue_drop(sk, req);
sk                245 net/dccp/minisocks.c void dccp_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
sk                 47 net/dccp/options.c int dccp_parse_options(struct sock *sk, struct dccp_request_sock *dreq,
sk                 50 net/dccp/options.c 	struct dccp_sock *dp = dccp_sk(sk);
sk                116 net/dccp/options.c 			dccp_pr_debug("%s opt: NDP count=%llu\n", dccp_role(sk),
sk                124 net/dccp/options.c 			rc = dccp_feat_parse_options(sk, dreq, mandatory, opt,
sk                152 net/dccp/options.c 				      dccp_role(sk), ntohl(opt_val),
sk                156 net/dccp/options.c 			inet_csk_schedule_ack(sk);
sk                166 net/dccp/options.c 				      "ackno=%llu", dccp_role(sk),
sk                211 net/dccp/options.c 				      dccp_role(sk), elapsed_time);
sk                214 net/dccp/options.c 			if (ccid_hc_rx_parse_options(dp->dccps_hc_rx_ccid, sk,
sk                229 net/dccp/options.c 			if (ccid_hc_tx_parse_options(dp->dccps_hc_tx_ccid, sk,
sk                235 net/dccp/options.c 				  "implemented, ignoring", sk, opt, len);
sk                255 net/dccp/options.c 	DCCP_WARN("DCCP(%p): Option %d (len=%d) error=%u\n", sk, opt, len, rc);
sk                308 net/dccp/options.c static int dccp_insert_option_ndp(struct sock *sk, struct sk_buff *skb)
sk                310 net/dccp/options.c 	struct dccp_sock *dp = dccp_sk(sk);
sk                395 net/dccp/options.c static int dccp_insert_option_ackvec(struct sock *sk, struct sk_buff *skb)
sk                397 net/dccp/options.c 	struct dccp_sock *dp = dccp_sk(sk);
sk                545 net/dccp/options.c int dccp_insert_options(struct sock *sk, struct sk_buff *skb)
sk                547 net/dccp/options.c 	struct dccp_sock *dp = dccp_sk(sk);
sk                551 net/dccp/options.c 	if (dp->dccps_send_ndp_count && dccp_insert_option_ndp(sk, skb))
sk                568 net/dccp/options.c 		} else if (dccp_ackvec_pending(sk) &&
sk                569 net/dccp/options.c 			   dccp_insert_option_ackvec(sk, skb)) {
sk                575 net/dccp/options.c 		if (ccid_hc_rx_insert_options(dp->dccps_hc_rx_ccid, sk, skb))
sk                 22 net/dccp/output.c static inline void dccp_event_ack_sent(struct sock *sk)
sk                 24 net/dccp/output.c 	inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
sk                 28 net/dccp/output.c static struct sk_buff *dccp_skb_entail(struct sock *sk, struct sk_buff *skb)
sk                 30 net/dccp/output.c 	skb_set_owner_w(skb, sk);
sk                 31 net/dccp/output.c 	WARN_ON(sk->sk_send_head);
sk                 32 net/dccp/output.c 	sk->sk_send_head = skb;
sk                 33 net/dccp/output.c 	return skb_clone(sk->sk_send_head, gfp_any());
sk                 42 net/dccp/output.c static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb)
sk                 45 net/dccp/output.c 		struct inet_sock *inet = inet_sk(sk);
sk                 46 net/dccp/output.c 		const struct inet_connection_sock *icsk = inet_csk(sk);
sk                 47 net/dccp/output.c 		struct dccp_sock *dp = dccp_sk(sk);
sk                 88 net/dccp/output.c 			WARN_ON(skb->sk);
sk                 89 net/dccp/output.c 			skb_set_owner_w(skb, sk);
sk                 93 net/dccp/output.c 		if (dccp_insert_options(sk, skb)) {
sk                110 net/dccp/output.c 		dccp_update_gss(sk, dcb->dccpd_seq);
sk                131 net/dccp/output.c 		icsk->icsk_af_ops->send_check(sk, skb);
sk                134 net/dccp/output.c 			dccp_event_ack_sent(sk);
sk                138 net/dccp/output.c 		err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl);
sk                159 net/dccp/output.c unsigned int dccp_sync_mss(struct sock *sk, u32 pmtu)
sk                161 net/dccp/output.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk                162 net/dccp/output.c 	struct dccp_sock *dp = dccp_sk(sk);
sk                195 net/dccp/output.c void dccp_write_space(struct sock *sk)
sk                200 net/dccp/output.c 	wq = rcu_dereference(sk->sk_wq);
sk                204 net/dccp/output.c 	if (sock_writeable(sk))
sk                205 net/dccp/output.c 		sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
sk                217 net/dccp/output.c static int dccp_wait_for_ccid(struct sock *sk, unsigned long delay)
sk                222 net/dccp/output.c 	prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
sk                223 net/dccp/output.c 	sk->sk_write_pending++;
sk                224 net/dccp/output.c 	release_sock(sk);
sk                228 net/dccp/output.c 	lock_sock(sk);
sk                229 net/dccp/output.c 	sk->sk_write_pending--;
sk                230 net/dccp/output.c 	finish_wait(sk_sleep(sk), &wait);
sk                232 net/dccp/output.c 	if (signal_pending(current) || sk->sk_err)
sk                241 net/dccp/output.c static void dccp_xmit_packet(struct sock *sk)
sk                244 net/dccp/output.c 	struct dccp_sock *dp = dccp_sk(sk);
sk                245 net/dccp/output.c 	struct sk_buff *skb = dccp_qpolicy_pop(sk);
sk                251 net/dccp/output.c 	if (sk->sk_state == DCCP_PARTOPEN) {
sk                262 net/dccp/output.c 			dccp_send_ack(sk);
sk                266 net/dccp/output.c 		inet_csk_schedule_ack(sk);
sk                267 net/dccp/output.c 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
sk                268 net/dccp/output.c 					      inet_csk(sk)->icsk_rto,
sk                271 net/dccp/output.c 	} else if (dccp_ack_pending(sk)) {
sk                277 net/dccp/output.c 	err = dccp_transmit_skb(sk, skb);
sk                285 net/dccp/output.c 	ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, len);
sk                294 net/dccp/output.c 		dccp_send_sync(sk, dp->dccps_gsr, DCCP_PKT_SYNC);
sk                304 net/dccp/output.c void dccp_flush_write_queue(struct sock *sk, long *time_budget)
sk                306 net/dccp/output.c 	struct dccp_sock *dp = dccp_sk(sk);
sk                310 net/dccp/output.c 	while (*time_budget > 0 && (skb = skb_peek(&sk->sk_write_queue))) {
sk                311 net/dccp/output.c 		rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);
sk                326 net/dccp/output.c 			rc = dccp_wait_for_ccid(sk, delay);
sk                333 net/dccp/output.c 			dccp_xmit_packet(sk);
sk                336 net/dccp/output.c 			skb_dequeue(&sk->sk_write_queue);
sk                343 net/dccp/output.c void dccp_write_xmit(struct sock *sk)
sk                345 net/dccp/output.c 	struct dccp_sock *dp = dccp_sk(sk);
sk                348 net/dccp/output.c 	while ((skb = dccp_qpolicy_top(sk))) {
sk                349 net/dccp/output.c 		int rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);
sk                355 net/dccp/output.c 			sk_reset_timer(sk, &dp->dccps_xmit_timer,
sk                359 net/dccp/output.c 			dccp_xmit_packet(sk);
sk                362 net/dccp/output.c 			dccp_qpolicy_drop(sk, skb);
sk                377 net/dccp/output.c int dccp_retransmit_skb(struct sock *sk)
sk                379 net/dccp/output.c 	WARN_ON(sk->sk_send_head == NULL);
sk                381 net/dccp/output.c 	if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk) != 0)
sk                385 net/dccp/output.c 	inet_csk(sk)->icsk_retransmits++;
sk                387 net/dccp/output.c 	return dccp_transmit_skb(sk, skb_clone(sk->sk_send_head, GFP_ATOMIC));
sk                390 net/dccp/output.c struct sk_buff *dccp_make_response(const struct sock *sk, struct dst_entry *dst,
sk                404 net/dccp/output.c 	skb = sock_wmalloc((struct sock *)sk, MAX_DCCP_HEADER, 1,
sk                453 net/dccp/output.c struct sk_buff *dccp_ctl_make_reset(struct sock *sk, struct sk_buff *rcv_skb)
sk                463 net/dccp/output.c 	skb = alloc_skb(sk->sk_prot->max_header, GFP_ATOMIC);
sk                467 net/dccp/output.c 	skb_reserve(skb, sk->sk_prot->max_header);
sk                505 net/dccp/output.c int dccp_send_reset(struct sock *sk, enum dccp_reset_codes code)
sk                512 net/dccp/output.c 	int err = inet_csk(sk)->icsk_af_ops->rebuild_header(sk);
sk                517 net/dccp/output.c 	skb = sock_wmalloc(sk, sk->sk_prot->max_header, 1, GFP_ATOMIC);
sk                522 net/dccp/output.c 	skb_reserve(skb, sk->sk_prot->max_header);
sk                526 net/dccp/output.c 	return dccp_transmit_skb(sk, skb);
sk                532 net/dccp/output.c int dccp_connect(struct sock *sk)
sk                535 net/dccp/output.c 	struct dccp_sock *dp = dccp_sk(sk);
sk                536 net/dccp/output.c 	struct dst_entry *dst = __sk_dst_get(sk);
sk                537 net/dccp/output.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk                539 net/dccp/output.c 	sk->sk_err = 0;
sk                540 net/dccp/output.c 	sock_reset_flag(sk, SOCK_DONE);
sk                542 net/dccp/output.c 	dccp_sync_mss(sk, dst_mtu(dst));
sk                545 net/dccp/output.c 	if (dccp_feat_finalise_settings(dccp_sk(sk)))
sk                551 net/dccp/output.c 	skb = alloc_skb(sk->sk_prot->max_header, sk->sk_allocation);
sk                556 net/dccp/output.c 	skb_reserve(skb, sk->sk_prot->max_header);
sk                560 net/dccp/output.c 	dccp_transmit_skb(sk, dccp_skb_entail(sk, skb));
sk                565 net/dccp/output.c 	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
sk                572 net/dccp/output.c void dccp_send_ack(struct sock *sk)
sk                575 net/dccp/output.c 	if (sk->sk_state != DCCP_CLOSED) {
sk                576 net/dccp/output.c 		struct sk_buff *skb = alloc_skb(sk->sk_prot->max_header,
sk                580 net/dccp/output.c 			inet_csk_schedule_ack(sk);
sk                581 net/dccp/output.c 			inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
sk                582 net/dccp/output.c 			inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
sk                589 net/dccp/output.c 		skb_reserve(skb, sk->sk_prot->max_header);
sk                591 net/dccp/output.c 		dccp_transmit_skb(sk, skb);
sk                599 net/dccp/output.c void dccp_send_delayed_ack(struct sock *sk)
sk                601 net/dccp/output.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk                617 net/dccp/output.c 			dccp_send_ack(sk);
sk                626 net/dccp/output.c 	sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
sk                630 net/dccp/output.c void dccp_send_sync(struct sock *sk, const u64 ackno,
sk                638 net/dccp/output.c 	struct sk_buff *skb = alloc_skb(sk->sk_prot->max_header, GFP_ATOMIC);
sk                647 net/dccp/output.c 	skb_reserve(skb, sk->sk_prot->max_header);
sk                655 net/dccp/output.c 	dccp_sk(sk)->dccps_sync_scheduled = 0;
sk                657 net/dccp/output.c 	dccp_transmit_skb(sk, skb);
sk                667 net/dccp/output.c void dccp_send_close(struct sock *sk, const int active)
sk                669 net/dccp/output.c 	struct dccp_sock *dp = dccp_sk(sk);
sk                673 net/dccp/output.c 	skb = alloc_skb(sk->sk_prot->max_header, prio);
sk                678 net/dccp/output.c 	skb_reserve(skb, sk->sk_prot->max_header);
sk                685 net/dccp/output.c 		skb = dccp_skb_entail(sk, skb);
sk                696 net/dccp/output.c 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
sk                699 net/dccp/output.c 	dccp_transmit_skb(sk, skb);
sk                 78 net/dccp/proto.c void dccp_set_state(struct sock *sk, const int state)
sk                 80 net/dccp/proto.c 	const int oldstate = sk->sk_state;
sk                 82 net/dccp/proto.c 	dccp_pr_debug("%s(%p)  %s  -->  %s\n", dccp_role(sk), sk,
sk                 92 net/dccp/proto.c 			dccp_feat_list_purge(&dccp_sk(sk)->dccps_featneg);
sk                100 net/dccp/proto.c 		sk->sk_prot->unhash(sk);
sk                101 net/dccp/proto.c 		if (inet_csk(sk)->icsk_bind_hash != NULL &&
sk                102 net/dccp/proto.c 		    !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
sk                103 net/dccp/proto.c 			inet_put_port(sk);
sk                113 net/dccp/proto.c 	inet_sk_set_state(sk, state);
sk                118 net/dccp/proto.c static void dccp_finish_passive_close(struct sock *sk)
sk                120 net/dccp/proto.c 	switch (sk->sk_state) {
sk                123 net/dccp/proto.c 		dccp_send_reset(sk, DCCP_RESET_CODE_CLOSED);
sk                124 net/dccp/proto.c 		dccp_set_state(sk, DCCP_CLOSED);
sk                131 net/dccp/proto.c 		dccp_send_close(sk, 1);
sk                132 net/dccp/proto.c 		dccp_set_state(sk, DCCP_CLOSING);
sk                136 net/dccp/proto.c void dccp_done(struct sock *sk)
sk                138 net/dccp/proto.c 	dccp_set_state(sk, DCCP_CLOSED);
sk                139 net/dccp/proto.c 	dccp_clear_xmit_timers(sk);
sk                141 net/dccp/proto.c 	sk->sk_shutdown = SHUTDOWN_MASK;
sk                143 net/dccp/proto.c 	if (!sock_flag(sk, SOCK_DEAD))
sk                144 net/dccp/proto.c 		sk->sk_state_change(sk);
sk                146 net/dccp/proto.c 		inet_csk_destroy_sock(sk);
sk                174 net/dccp/proto.c static void dccp_sk_destruct(struct sock *sk)
sk                176 net/dccp/proto.c 	struct dccp_sock *dp = dccp_sk(sk);
sk                178 net/dccp/proto.c 	ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
sk                180 net/dccp/proto.c 	inet_sock_destruct(sk);
sk                183 net/dccp/proto.c int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized)
sk                185 net/dccp/proto.c 	struct dccp_sock *dp = dccp_sk(sk);
sk                186 net/dccp/proto.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk                190 net/dccp/proto.c 	sk->sk_state		= DCCP_CLOSED;
sk                191 net/dccp/proto.c 	sk->sk_write_space	= dccp_write_space;
sk                192 net/dccp/proto.c 	sk->sk_destruct		= dccp_sk_destruct;
sk                200 net/dccp/proto.c 	dccp_init_xmit_timers(sk);
sk                205 net/dccp/proto.c 		return dccp_feat_init(sk);
sk                211 net/dccp/proto.c void dccp_destroy_sock(struct sock *sk)
sk                213 net/dccp/proto.c 	struct dccp_sock *dp = dccp_sk(sk);
sk                215 net/dccp/proto.c 	__skb_queue_purge(&sk->sk_write_queue);
sk                216 net/dccp/proto.c 	if (sk->sk_send_head != NULL) {
sk                217 net/dccp/proto.c 		kfree_skb(sk->sk_send_head);
sk                218 net/dccp/proto.c 		sk->sk_send_head = NULL;
sk                222 net/dccp/proto.c 	if (inet_csk(sk)->icsk_bind_hash != NULL)
sk                223 net/dccp/proto.c 		inet_put_port(sk);
sk                232 net/dccp/proto.c 	ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
sk                241 net/dccp/proto.c static inline int dccp_listen_start(struct sock *sk, int backlog)
sk                243 net/dccp/proto.c 	struct dccp_sock *dp = dccp_sk(sk);
sk                249 net/dccp/proto.c 	return inet_csk_listen_start(sk, backlog);
sk                258 net/dccp/proto.c int dccp_disconnect(struct sock *sk, int flags)
sk                260 net/dccp/proto.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk                261 net/dccp/proto.c 	struct inet_sock *inet = inet_sk(sk);
sk                262 net/dccp/proto.c 	struct dccp_sock *dp = dccp_sk(sk);
sk                263 net/dccp/proto.c 	const int old_state = sk->sk_state;
sk                266 net/dccp/proto.c 		dccp_set_state(sk, DCCP_CLOSED);
sk                273 net/dccp/proto.c 		inet_csk_listen_stop(sk);
sk                275 net/dccp/proto.c 		dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED);
sk                276 net/dccp/proto.c 		sk->sk_err = ECONNRESET;
sk                278 net/dccp/proto.c 		sk->sk_err = ECONNRESET;
sk                280 net/dccp/proto.c 	dccp_clear_xmit_timers(sk);
sk                281 net/dccp/proto.c 	ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
sk                284 net/dccp/proto.c 	__skb_queue_purge(&sk->sk_receive_queue);
sk                285 net/dccp/proto.c 	__skb_queue_purge(&sk->sk_write_queue);
sk                286 net/dccp/proto.c 	if (sk->sk_send_head != NULL) {
sk                287 net/dccp/proto.c 		__kfree_skb(sk->sk_send_head);
sk                288 net/dccp/proto.c 		sk->sk_send_head = NULL;
sk                293 net/dccp/proto.c 	if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
sk                294 net/dccp/proto.c 		inet_reset_saddr(sk);
sk                296 net/dccp/proto.c 	sk->sk_shutdown = 0;
sk                297 net/dccp/proto.c 	sock_reset_flag(sk, SOCK_DONE);
sk                300 net/dccp/proto.c 	inet_csk_delack_init(sk);
sk                301 net/dccp/proto.c 	__sk_dst_reset(sk);
sk                305 net/dccp/proto.c 	sk->sk_error_report(sk);
sk                322 net/dccp/proto.c 	struct sock *sk = sock->sk;
sk                325 net/dccp/proto.c 	if (sk->sk_state == DCCP_LISTEN)
sk                326 net/dccp/proto.c 		return inet_csk_listen_poll(sk);
sk                334 net/dccp/proto.c 	if (sk->sk_err)
sk                337 net/dccp/proto.c 	if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == DCCP_CLOSED)
sk                339 net/dccp/proto.c 	if (sk->sk_shutdown & RCV_SHUTDOWN)
sk                343 net/dccp/proto.c 	if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_RESPOND)) {
sk                344 net/dccp/proto.c 		if (atomic_read(&sk->sk_rmem_alloc) > 0)
sk                347 net/dccp/proto.c 		if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
sk                348 net/dccp/proto.c 			if (sk_stream_is_writeable(sk)) {
sk                351 net/dccp/proto.c 				sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
sk                352 net/dccp/proto.c 				set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
sk                358 net/dccp/proto.c 				if (sk_stream_is_writeable(sk))
sk                368 net/dccp/proto.c int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg)
sk                372 net/dccp/proto.c 	lock_sock(sk);
sk                374 net/dccp/proto.c 	if (sk->sk_state == DCCP_LISTEN)
sk                382 net/dccp/proto.c 		skb = skb_peek(&sk->sk_receive_queue);
sk                398 net/dccp/proto.c 	release_sock(sk);
sk                404 net/dccp/proto.c static int dccp_setsockopt_service(struct sock *sk, const __be32 service,
sk                407 net/dccp/proto.c 	struct dccp_sock *dp = dccp_sk(sk);
sk                429 net/dccp/proto.c 	lock_sock(sk);
sk                435 net/dccp/proto.c 	release_sock(sk);
sk                439 net/dccp/proto.c static int dccp_setsockopt_cscov(struct sock *sk, int cscov, bool rx)
sk                463 net/dccp/proto.c 	rc = dccp_feat_register_sp(sk, DCCPF_MIN_CSUM_COVER, rx, list, len);
sk                467 net/dccp/proto.c 			dccp_sk(sk)->dccps_pcrlen = cscov;
sk                469 net/dccp/proto.c 			dccp_sk(sk)->dccps_pcslen = cscov;
sk                475 net/dccp/proto.c static int dccp_setsockopt_ccid(struct sock *sk, int type,
sk                488 net/dccp/proto.c 	lock_sock(sk);
sk                490 net/dccp/proto.c 		rc = dccp_feat_register_sp(sk, DCCPF_CCID, 1, val, optlen);
sk                493 net/dccp/proto.c 		rc = dccp_feat_register_sp(sk, DCCPF_CCID, 0, val, optlen);
sk                494 net/dccp/proto.c 	release_sock(sk);
sk                500 net/dccp/proto.c static int do_dccp_setsockopt(struct sock *sk, int level, int optname,
sk                503 net/dccp/proto.c 	struct dccp_sock *dp = dccp_sk(sk);
sk                517 net/dccp/proto.c 		return dccp_setsockopt_ccid(sk, optname, optval, optlen);
sk                527 net/dccp/proto.c 		return dccp_setsockopt_service(sk, val, optval, optlen);
sk                529 net/dccp/proto.c 	lock_sock(sk);
sk                538 net/dccp/proto.c 		err = dccp_setsockopt_cscov(sk, val, false);
sk                541 net/dccp/proto.c 		err = dccp_setsockopt_cscov(sk, val, true);
sk                544 net/dccp/proto.c 		if (sk->sk_state != DCCP_CLOSED)
sk                561 net/dccp/proto.c 	release_sock(sk);
sk                566 net/dccp/proto.c int dccp_setsockopt(struct sock *sk, int level, int optname,
sk                570 net/dccp/proto.c 		return inet_csk(sk)->icsk_af_ops->setsockopt(sk, level,
sk                573 net/dccp/proto.c 	return do_dccp_setsockopt(sk, level, optname, optval, optlen);
sk                579 net/dccp/proto.c int compat_dccp_setsockopt(struct sock *sk, int level, int optname,
sk                583 net/dccp/proto.c 		return inet_csk_compat_setsockopt(sk, level, optname,
sk                585 net/dccp/proto.c 	return do_dccp_setsockopt(sk, level, optname, optval, optlen);
sk                591 net/dccp/proto.c static int dccp_getsockopt_service(struct sock *sk, int len,
sk                595 net/dccp/proto.c 	const struct dccp_sock *dp = dccp_sk(sk);
sk                599 net/dccp/proto.c 	lock_sock(sk);
sk                615 net/dccp/proto.c 	release_sock(sk);
sk                619 net/dccp/proto.c static int do_dccp_getsockopt(struct sock *sk, int level, int optname,
sk                631 net/dccp/proto.c 	dp = dccp_sk(sk);
sk                638 net/dccp/proto.c 		return dccp_getsockopt_service(sk, len,
sk                644 net/dccp/proto.c 		return ccid_getsockopt_builtin_ccids(sk, len, optval, optlen);
sk                671 net/dccp/proto.c 		return ccid_hc_rx_getsockopt(dp->dccps_hc_rx_ccid, sk, optname,
sk                674 net/dccp/proto.c 		return ccid_hc_tx_getsockopt(dp->dccps_hc_tx_ccid, sk, optname,
sk                687 net/dccp/proto.c int dccp_getsockopt(struct sock *sk, int level, int optname,
sk                691 net/dccp/proto.c 		return inet_csk(sk)->icsk_af_ops->getsockopt(sk, level,
sk                694 net/dccp/proto.c 	return do_dccp_getsockopt(sk, level, optname, optval, optlen);
sk                700 net/dccp/proto.c int compat_dccp_getsockopt(struct sock *sk, int level, int optname,
sk                704 net/dccp/proto.c 		return inet_csk_compat_getsockopt(sk, level, optname,
sk                706 net/dccp/proto.c 	return do_dccp_getsockopt(sk, level, optname, optval, optlen);
sk                736 net/dccp/proto.c 		    !dccp_qpolicy_param_ok(skb->sk, cmsg->cmsg_type))
sk                752 net/dccp/proto.c int dccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
sk                754 net/dccp/proto.c 	const struct dccp_sock *dp = dccp_sk(sk);
sk                761 net/dccp/proto.c 	trace_dccp_probe(sk, len);
sk                766 net/dccp/proto.c 	lock_sock(sk);
sk                768 net/dccp/proto.c 	if (dccp_qpolicy_full(sk)) {
sk                773 net/dccp/proto.c 	timeo = sock_sndtimeo(sk, noblock);
sk                780 net/dccp/proto.c 	if ((1 << sk->sk_state) & ~(DCCPF_OPEN | DCCPF_PARTOPEN))
sk                781 net/dccp/proto.c 		if ((rc = sk_stream_wait_connect(sk, &timeo)) != 0)
sk                784 net/dccp/proto.c 	size = sk->sk_prot->max_header + len;
sk                785 net/dccp/proto.c 	release_sock(sk);
sk                786 net/dccp/proto.c 	skb = sock_alloc_send_skb(sk, size, noblock, &rc);
sk                787 net/dccp/proto.c 	lock_sock(sk);
sk                791 net/dccp/proto.c 	if (sk->sk_state == DCCP_CLOSED) {
sk                796 net/dccp/proto.c 	skb_reserve(skb, sk->sk_prot->max_header);
sk                805 net/dccp/proto.c 	dccp_qpolicy_push(sk, skb);
sk                812 net/dccp/proto.c 		dccp_write_xmit(sk);
sk                814 net/dccp/proto.c 	release_sock(sk);
sk                823 net/dccp/proto.c int dccp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
sk                829 net/dccp/proto.c 	lock_sock(sk);
sk                831 net/dccp/proto.c 	if (sk->sk_state == DCCP_LISTEN) {
sk                836 net/dccp/proto.c 	timeo = sock_rcvtimeo(sk, nonblock);
sk                839 net/dccp/proto.c 		struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
sk                854 net/dccp/proto.c 				dccp_finish_passive_close(sk);
sk                864 net/dccp/proto.c 			sk_eat_skb(sk, skb);
sk                867 net/dccp/proto.c 		if (sock_flag(sk, SOCK_DONE)) {
sk                872 net/dccp/proto.c 		if (sk->sk_err) {
sk                873 net/dccp/proto.c 			len = sock_error(sk);
sk                877 net/dccp/proto.c 		if (sk->sk_shutdown & RCV_SHUTDOWN) {
sk                882 net/dccp/proto.c 		if (sk->sk_state == DCCP_CLOSED) {
sk                883 net/dccp/proto.c 			if (!sock_flag(sk, SOCK_DONE)) {
sk                904 net/dccp/proto.c 		sk_wait_data(sk, &timeo, NULL);
sk                921 net/dccp/proto.c 			sk_eat_skb(sk, skb);
sk                925 net/dccp/proto.c 	release_sock(sk);
sk                933 net/dccp/proto.c 	struct sock *sk = sock->sk;
sk                937 net/dccp/proto.c 	lock_sock(sk);
sk                943 net/dccp/proto.c 	old_state = sk->sk_state;
sk                947 net/dccp/proto.c 	sk->sk_max_ack_backlog = backlog;
sk                956 net/dccp/proto.c 		err = dccp_listen_start(sk, backlog);
sk                963 net/dccp/proto.c 	release_sock(sk);
sk                969 net/dccp/proto.c static void dccp_terminate_connection(struct sock *sk)
sk                973 net/dccp/proto.c 	switch (sk->sk_state) {
sk                976 net/dccp/proto.c 		dccp_finish_passive_close(sk);
sk                979 net/dccp/proto.c 		dccp_pr_debug("Stop PARTOPEN timer (%p)\n", sk);
sk                980 net/dccp/proto.c 		inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
sk                983 net/dccp/proto.c 		dccp_send_close(sk, 1);
sk                985 net/dccp/proto.c 		if (dccp_sk(sk)->dccps_role == DCCP_ROLE_SERVER &&
sk                986 net/dccp/proto.c 		    !dccp_sk(sk)->dccps_server_timewait)
sk                992 net/dccp/proto.c 		dccp_set_state(sk, next_state);
sk                996 net/dccp/proto.c void dccp_close(struct sock *sk, long timeout)
sk                998 net/dccp/proto.c 	struct dccp_sock *dp = dccp_sk(sk);
sk               1003 net/dccp/proto.c 	lock_sock(sk);
sk               1005 net/dccp/proto.c 	sk->sk_shutdown = SHUTDOWN_MASK;
sk               1007 net/dccp/proto.c 	if (sk->sk_state == DCCP_LISTEN) {
sk               1008 net/dccp/proto.c 		dccp_set_state(sk, DCCP_CLOSED);
sk               1011 net/dccp/proto.c 		inet_csk_listen_stop(sk);
sk               1016 net/dccp/proto.c 	sk_stop_timer(sk, &dp->dccps_xmit_timer);
sk               1023 net/dccp/proto.c 	while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
sk               1029 net/dccp/proto.c 	if (sk->sk_state == DCCP_CLOSED)
sk               1035 net/dccp/proto.c 		dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED);
sk               1036 net/dccp/proto.c 		dccp_set_state(sk, DCCP_CLOSED);
sk               1037 net/dccp/proto.c 	} else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
sk               1039 net/dccp/proto.c 		sk->sk_prot->disconnect(sk, 0);
sk               1040 net/dccp/proto.c 	} else if (sk->sk_state != DCCP_CLOSED) {
sk               1045 net/dccp/proto.c 		dccp_flush_write_queue(sk, &timeout);
sk               1046 net/dccp/proto.c 		dccp_terminate_connection(sk);
sk               1055 net/dccp/proto.c 	__skb_queue_purge(&sk->sk_write_queue);
sk               1057 net/dccp/proto.c 	sk_stream_wait_close(sk, timeout);
sk               1060 net/dccp/proto.c 	state = sk->sk_state;
sk               1061 net/dccp/proto.c 	sock_hold(sk);
sk               1062 net/dccp/proto.c 	sock_orphan(sk);
sk               1067 net/dccp/proto.c 	release_sock(sk);
sk               1073 net/dccp/proto.c 	bh_lock_sock(sk);
sk               1074 net/dccp/proto.c 	WARN_ON(sock_owned_by_user(sk));
sk               1076 net/dccp/proto.c 	percpu_counter_inc(sk->sk_prot->orphan_count);
sk               1079 net/dccp/proto.c 	if (state != DCCP_CLOSED && sk->sk_state == DCCP_CLOSED)
sk               1082 net/dccp/proto.c 	if (sk->sk_state == DCCP_CLOSED)
sk               1083 net/dccp/proto.c 		inet_csk_destroy_sock(sk);
sk               1088 net/dccp/proto.c 	bh_unlock_sock(sk);
sk               1090 net/dccp/proto.c 	sock_put(sk);
sk               1095 net/dccp/proto.c void dccp_shutdown(struct sock *sk, int how)
sk                 15 net/dccp/qpolicy.c static void qpolicy_simple_push(struct sock *sk, struct sk_buff *skb)
sk                 17 net/dccp/qpolicy.c 	skb_queue_tail(&sk->sk_write_queue, skb);
sk                 20 net/dccp/qpolicy.c static bool qpolicy_simple_full(struct sock *sk)
sk                 22 net/dccp/qpolicy.c 	return dccp_sk(sk)->dccps_tx_qlen &&
sk                 23 net/dccp/qpolicy.c 	       sk->sk_write_queue.qlen >= dccp_sk(sk)->dccps_tx_qlen;
sk                 26 net/dccp/qpolicy.c static struct sk_buff *qpolicy_simple_top(struct sock *sk)
sk                 28 net/dccp/qpolicy.c 	return skb_peek(&sk->sk_write_queue);
sk                 36 net/dccp/qpolicy.c static struct sk_buff *qpolicy_prio_best_skb(struct sock *sk)
sk                 40 net/dccp/qpolicy.c 	skb_queue_walk(&sk->sk_write_queue, skb)
sk                 46 net/dccp/qpolicy.c static struct sk_buff *qpolicy_prio_worst_skb(struct sock *sk)
sk                 50 net/dccp/qpolicy.c 	skb_queue_walk(&sk->sk_write_queue, skb)
sk                 56 net/dccp/qpolicy.c static bool qpolicy_prio_full(struct sock *sk)
sk                 58 net/dccp/qpolicy.c 	if (qpolicy_simple_full(sk))
sk                 59 net/dccp/qpolicy.c 		dccp_qpolicy_drop(sk, qpolicy_prio_worst_skb(sk));
sk                 70 net/dccp/qpolicy.c 	void		(*push)	(struct sock *sk, struct sk_buff *skb);
sk                 71 net/dccp/qpolicy.c 	bool		(*full) (struct sock *sk);
sk                 72 net/dccp/qpolicy.c 	struct sk_buff*	(*top)  (struct sock *sk);
sk                 93 net/dccp/qpolicy.c void dccp_qpolicy_push(struct sock *sk, struct sk_buff *skb)
sk                 95 net/dccp/qpolicy.c 	qpol_table[dccp_sk(sk)->dccps_qpolicy].push(sk, skb);
sk                 98 net/dccp/qpolicy.c bool dccp_qpolicy_full(struct sock *sk)
sk                100 net/dccp/qpolicy.c 	return qpol_table[dccp_sk(sk)->dccps_qpolicy].full(sk);
sk                103 net/dccp/qpolicy.c void dccp_qpolicy_drop(struct sock *sk, struct sk_buff *skb)
sk                106 net/dccp/qpolicy.c 		skb_unlink(skb, &sk->sk_write_queue);
sk                111 net/dccp/qpolicy.c struct sk_buff *dccp_qpolicy_top(struct sock *sk)
sk                113 net/dccp/qpolicy.c 	return qpol_table[dccp_sk(sk)->dccps_qpolicy].top(sk);
sk                116 net/dccp/qpolicy.c struct sk_buff *dccp_qpolicy_pop(struct sock *sk)
sk                118 net/dccp/qpolicy.c 	struct sk_buff *skb = dccp_qpolicy_top(sk);
sk                123 net/dccp/qpolicy.c 		skb_unlink(skb, &sk->sk_write_queue);
sk                128 net/dccp/qpolicy.c bool dccp_qpolicy_param_ok(struct sock *sk, __be32 param)
sk                133 net/dccp/qpolicy.c 	return (qpol_table[dccp_sk(sk)->dccps_qpolicy].params & param) == param;
sk                 20 net/dccp/timer.c static void dccp_write_err(struct sock *sk)
sk                 22 net/dccp/timer.c 	sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
sk                 23 net/dccp/timer.c 	sk->sk_error_report(sk);
sk                 25 net/dccp/timer.c 	dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED);
sk                 26 net/dccp/timer.c 	dccp_done(sk);
sk                 31 net/dccp/timer.c static int dccp_write_timeout(struct sock *sk)
sk                 33 net/dccp/timer.c 	const struct inet_connection_sock *icsk = inet_csk(sk);
sk                 36 net/dccp/timer.c 	if (sk->sk_state == DCCP_REQUESTING || sk->sk_state == DCCP_PARTOPEN) {
sk                 38 net/dccp/timer.c 			dst_negative_advice(sk);
sk                 63 net/dccp/timer.c 			dst_negative_advice(sk);
sk                 74 net/dccp/timer.c 		dccp_write_err(sk);
sk                 83 net/dccp/timer.c static void dccp_retransmit_timer(struct sock *sk)
sk                 85 net/dccp/timer.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk                 91 net/dccp/timer.c 	if (dccp_write_timeout(sk))
sk                101 net/dccp/timer.c 	if (dccp_retransmit_skb(sk) != 0) {
sk                108 net/dccp/timer.c 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
sk                118 net/dccp/timer.c 	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto,
sk                121 net/dccp/timer.c 		__sk_dst_reset(sk);
sk                128 net/dccp/timer.c 	struct sock *sk = &icsk->icsk_inet.sk;
sk                131 net/dccp/timer.c 	bh_lock_sock(sk);
sk                132 net/dccp/timer.c 	if (sock_owned_by_user(sk)) {
sk                134 net/dccp/timer.c 		sk_reset_timer(sk, &icsk->icsk_retransmit_timer,
sk                139 net/dccp/timer.c 	if (sk->sk_state == DCCP_CLOSED || !icsk->icsk_pending)
sk                143 net/dccp/timer.c 		sk_reset_timer(sk, &icsk->icsk_retransmit_timer,
sk                153 net/dccp/timer.c 		dccp_retransmit_timer(sk);
sk                157 net/dccp/timer.c 	bh_unlock_sock(sk);
sk                158 net/dccp/timer.c 	sock_put(sk);
sk                163 net/dccp/timer.c 	struct sock *sk = from_timer(sk, t, sk_timer);
sk                166 net/dccp/timer.c 	sock_put(sk);
sk                174 net/dccp/timer.c 	struct sock *sk = &icsk->icsk_inet.sk;
sk                176 net/dccp/timer.c 	bh_lock_sock(sk);
sk                177 net/dccp/timer.c 	if (sock_owned_by_user(sk)) {
sk                180 net/dccp/timer.c 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
sk                181 net/dccp/timer.c 		sk_reset_timer(sk, &icsk->icsk_delack_timer,
sk                186 net/dccp/timer.c 	if (sk->sk_state == DCCP_CLOSED ||
sk                190 net/dccp/timer.c 		sk_reset_timer(sk, &icsk->icsk_delack_timer,
sk                197 net/dccp/timer.c 	if (inet_csk_ack_scheduled(sk)) {
sk                198 net/dccp/timer.c 		if (!inet_csk_in_pingpong_mode(sk)) {
sk                206 net/dccp/timer.c 			inet_csk_exit_pingpong_mode(sk);
sk                209 net/dccp/timer.c 		dccp_send_ack(sk);
sk                210 net/dccp/timer.c 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKS);
sk                213 net/dccp/timer.c 	bh_unlock_sock(sk);
sk                214 net/dccp/timer.c 	sock_put(sk);
sk                223 net/dccp/timer.c 	struct sock *sk = (struct sock *)data;
sk                225 net/dccp/timer.c 	bh_lock_sock(sk);
sk                226 net/dccp/timer.c 	if (sock_owned_by_user(sk))
sk                227 net/dccp/timer.c 		sk_reset_timer(sk, &dccp_sk(sk)->dccps_xmit_timer, jiffies + 1);
sk                229 net/dccp/timer.c 		dccp_write_xmit(sk);
sk                230 net/dccp/timer.c 	bh_unlock_sock(sk);
sk                231 net/dccp/timer.c 	sock_put(sk);
sk                237 net/dccp/timer.c 	struct sock *sk = &dp->dccps_inet_connection.icsk_inet.sk;
sk                239 net/dccp/timer.c 	dccp_write_xmitlet((unsigned long)sk);
sk                242 net/dccp/timer.c void dccp_init_xmit_timers(struct sock *sk)
sk                244 net/dccp/timer.c 	struct dccp_sock *dp = dccp_sk(sk);
sk                246 net/dccp/timer.c 	tasklet_init(&dp->dccps_xmitlet, dccp_write_xmitlet, (unsigned long)sk);
sk                248 net/dccp/timer.c 	inet_csk_init_xmit_timers(sk, &dccp_write_timer, &dccp_delack_timer,
sk                 16 net/dccp/trace.h 	TP_PROTO(struct sock *sk, size_t size),
sk                 18 net/dccp/trace.h 	TP_ARGS(sk, size),
sk                 37 net/dccp/trace.h 		const struct inet_sock *inet = inet_sk(sk);
sk                 40 net/dccp/trace.h 		if (ccid_get_current_tx_ccid(dccp_sk(sk)) == DCCPC_CCID3)
sk                 41 net/dccp/trace.h 			hc = ccid3_hc_tx_sk(sk);
sk                 46 net/dccp/trace.h 		TP_STORE_ADDR_PORTS(__entry, inet, sk);
sk                136 net/decnet/af_decnet.c 	struct sock sk;
sk                140 net/decnet/af_decnet.c static void dn_keepalive(struct sock *sk);
sk                156 net/decnet/af_decnet.c static struct hlist_head *dn_find_list(struct sock *sk)
sk                158 net/decnet/af_decnet.c 	struct dn_scp *scp = DN_SK(sk);
sk                171 net/decnet/af_decnet.c 	struct sock *sk;
sk                176 net/decnet/af_decnet.c 	sk_for_each(sk, &dn_sk_hash[le16_to_cpu(port) & DN_SK_HASH_MASK]) {
sk                177 net/decnet/af_decnet.c 		struct dn_scp *scp = DN_SK(sk);
sk                184 net/decnet/af_decnet.c static unsigned short port_alloc(struct sock *sk)
sk                186 net/decnet/af_decnet.c 	struct dn_scp *scp = DN_SK(sk);
sk                205 net/decnet/af_decnet.c static int dn_hash_sock(struct sock *sk)
sk                207 net/decnet/af_decnet.c 	struct dn_scp *scp = DN_SK(sk);
sk                211 net/decnet/af_decnet.c 	BUG_ON(sk_hashed(sk));
sk                215 net/decnet/af_decnet.c 	if (!scp->addrloc && !port_alloc(sk))
sk                219 net/decnet/af_decnet.c 	if ((list = dn_find_list(sk)) == NULL)
sk                222 net/decnet/af_decnet.c 	sk_add_node(sk, list);
sk                229 net/decnet/af_decnet.c static void dn_unhash_sock(struct sock *sk)
sk                232 net/decnet/af_decnet.c 	sk_del_node_init(sk);
sk                236 net/decnet/af_decnet.c static void dn_unhash_sock_bh(struct sock *sk)
sk                239 net/decnet/af_decnet.c 	sk_del_node_init(sk);
sk                264 net/decnet/af_decnet.c static void dn_rehash_sock(struct sock *sk)
sk                267 net/decnet/af_decnet.c 	struct dn_scp *scp = DN_SK(sk);
sk                273 net/decnet/af_decnet.c 	sk_del_node_init(sk);
sk                274 net/decnet/af_decnet.c 	DN_SK(sk)->addrloc = 0;
sk                275 net/decnet/af_decnet.c 	list = listen_hash(&DN_SK(sk)->addr);
sk                276 net/decnet/af_decnet.c 	sk_add_node(sk, list);
sk                370 net/decnet/af_decnet.c 	struct sock *sk;
sk                373 net/decnet/af_decnet.c 	sk_for_each(sk, list) {
sk                374 net/decnet/af_decnet.c 		struct dn_scp *scp = DN_SK(sk);
sk                375 net/decnet/af_decnet.c 		if (sk->sk_state != TCP_LISTEN)
sk                388 net/decnet/af_decnet.c 		sock_hold(sk);
sk                390 net/decnet/af_decnet.c 		return sk;
sk                393 net/decnet/af_decnet.c 	sk = sk_head(&dn_wild_sk);
sk                394 net/decnet/af_decnet.c 	if (sk) {
sk                395 net/decnet/af_decnet.c 		if (sk->sk_state == TCP_LISTEN)
sk                396 net/decnet/af_decnet.c 			sock_hold(sk);
sk                398 net/decnet/af_decnet.c 			sk = NULL;
sk                402 net/decnet/af_decnet.c 	return sk;
sk                408 net/decnet/af_decnet.c 	struct sock *sk;
sk                412 net/decnet/af_decnet.c 	sk_for_each(sk, &dn_sk_hash[le16_to_cpu(cb->dst_port) & DN_SK_HASH_MASK]) {
sk                413 net/decnet/af_decnet.c 		scp = DN_SK(sk);
sk                420 net/decnet/af_decnet.c 		sock_hold(sk);
sk                423 net/decnet/af_decnet.c 	sk = NULL;
sk                426 net/decnet/af_decnet.c 	return sk;
sk                431 net/decnet/af_decnet.c static void dn_destruct(struct sock *sk)
sk                433 net/decnet/af_decnet.c 	struct dn_scp *scp = DN_SK(sk);
sk                439 net/decnet/af_decnet.c 	dst_release(rcu_dereference_protected(sk->sk_dst_cache, 1));
sk                444 net/decnet/af_decnet.c static void dn_enter_memory_pressure(struct sock *sk)
sk                467 net/decnet/af_decnet.c 	struct sock *sk = sk_alloc(net, PF_DECnet, gfp, &dn_proto, kern);
sk                469 net/decnet/af_decnet.c 	if  (!sk)
sk                474 net/decnet/af_decnet.c 	sock_init_data(sock, sk);
sk                476 net/decnet/af_decnet.c 	sk->sk_backlog_rcv = dn_nsp_backlog_rcv;
sk                477 net/decnet/af_decnet.c 	sk->sk_destruct    = dn_destruct;
sk                478 net/decnet/af_decnet.c 	sk->sk_no_check_tx = 1;
sk                479 net/decnet/af_decnet.c 	sk->sk_family      = PF_DECnet;
sk                480 net/decnet/af_decnet.c 	sk->sk_protocol    = 0;
sk                481 net/decnet/af_decnet.c 	sk->sk_allocation  = gfp;
sk                482 net/decnet/af_decnet.c 	sk->sk_sndbuf	   = sysctl_decnet_wmem[1];
sk                483 net/decnet/af_decnet.c 	sk->sk_rcvbuf	   = sysctl_decnet_rmem[1];
sk                486 net/decnet/af_decnet.c 	scp = DN_SK(sk);
sk                528 net/decnet/af_decnet.c 	dn_start_slow_timer(sk);
sk                530 net/decnet/af_decnet.c 	return sk;
sk                537 net/decnet/af_decnet.c static void dn_keepalive(struct sock *sk)
sk                539 net/decnet/af_decnet.c 	struct dn_scp *scp = DN_SK(sk);
sk                547 net/decnet/af_decnet.c 		dn_nsp_send_link(sk, DN_NOCHANGE, 0);
sk                558 net/decnet/af_decnet.c int dn_destroy_timer(struct sock *sk)
sk                560 net/decnet/af_decnet.c 	struct dn_scp *scp = DN_SK(sk);
sk                562 net/decnet/af_decnet.c 	scp->persist = dn_nsp_persist(sk);
sk                566 net/decnet/af_decnet.c 		dn_nsp_send_disc(sk, NSP_DISCINIT, 0, GFP_ATOMIC);
sk                572 net/decnet/af_decnet.c 		dn_nsp_send_disc(sk, NSP_DISCINIT, 0, GFP_ATOMIC);
sk                580 net/decnet/af_decnet.c 			dn_nsp_send_disc(sk, NSP_DISCCONF, NSP_REASON_DC,
sk                588 net/decnet/af_decnet.c 	if (sk->sk_socket)
sk                592 net/decnet/af_decnet.c 		dn_unhash_sock(sk);
sk                593 net/decnet/af_decnet.c 		sock_put(sk);
sk                600 net/decnet/af_decnet.c static void dn_destroy_sock(struct sock *sk)
sk                602 net/decnet/af_decnet.c 	struct dn_scp *scp = DN_SK(sk);
sk                606 net/decnet/af_decnet.c 	if (sk->sk_socket) {
sk                607 net/decnet/af_decnet.c 		if (sk->sk_socket->state != SS_UNCONNECTED)
sk                608 net/decnet/af_decnet.c 			sk->sk_socket->state = SS_DISCONNECTING;
sk                611 net/decnet/af_decnet.c 	sk->sk_state = TCP_CLOSE;
sk                615 net/decnet/af_decnet.c 		dn_nsp_send_disc(sk, NSP_DISCCONF, NSP_REASON_DC,
sk                616 net/decnet/af_decnet.c 				 sk->sk_allocation);
sk                618 net/decnet/af_decnet.c 		scp->persist = dn_nsp_persist(sk);
sk                629 net/decnet/af_decnet.c 		dn_nsp_send_disc(sk, NSP_DISCINIT, 0, sk->sk_allocation);
sk                640 net/decnet/af_decnet.c 		scp->persist = dn_nsp_persist(sk);
sk                646 net/decnet/af_decnet.c 		dn_stop_slow_timer(sk);
sk                648 net/decnet/af_decnet.c 		dn_unhash_sock_bh(sk);
sk                649 net/decnet/af_decnet.c 		sock_put(sk);
sk                671 net/decnet/af_decnet.c 	struct sock *sk;
sk                691 net/decnet/af_decnet.c 	if ((sk = dn_alloc_sock(net, sock, GFP_KERNEL, kern)) == NULL)
sk                694 net/decnet/af_decnet.c 	sk->sk_protocol = protocol;
sk                703 net/decnet/af_decnet.c 	struct sock *sk = sock->sk;
sk                705 net/decnet/af_decnet.c 	if (sk) {
sk                706 net/decnet/af_decnet.c 		sock_orphan(sk);
sk                707 net/decnet/af_decnet.c 		sock_hold(sk);
sk                708 net/decnet/af_decnet.c 		lock_sock(sk);
sk                709 net/decnet/af_decnet.c 		dn_destroy_sock(sk);
sk                710 net/decnet/af_decnet.c 		release_sock(sk);
sk                711 net/decnet/af_decnet.c 		sock_put(sk);
sk                719 net/decnet/af_decnet.c 	struct sock *sk = sock->sk;
sk                720 net/decnet/af_decnet.c 	struct dn_scp *scp = DN_SK(sk);
sk                763 net/decnet/af_decnet.c 	lock_sock(sk);
sk                764 net/decnet/af_decnet.c 	if (sock_flag(sk, SOCK_ZAPPED)) {
sk                766 net/decnet/af_decnet.c 		sock_reset_flag(sk, SOCK_ZAPPED);
sk                768 net/decnet/af_decnet.c 		rv = dn_hash_sock(sk);
sk                770 net/decnet/af_decnet.c 			sock_set_flag(sk, SOCK_ZAPPED);
sk                772 net/decnet/af_decnet.c 	release_sock(sk);
sk                780 net/decnet/af_decnet.c 	struct sock *sk = sock->sk;
sk                781 net/decnet/af_decnet.c 	struct dn_scp *scp = DN_SK(sk);
sk                784 net/decnet/af_decnet.c 	sock_reset_flag(sk, SOCK_ZAPPED);
sk                807 net/decnet/af_decnet.c 		rv = dn_hash_sock(sk);
sk                809 net/decnet/af_decnet.c 			sock_set_flag(sk, SOCK_ZAPPED);
sk                815 net/decnet/af_decnet.c static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation)
sk                817 net/decnet/af_decnet.c 	struct dn_scp *scp = DN_SK(sk);
sk                825 net/decnet/af_decnet.c 	scp->segsize_loc = dst_metric_advmss(__sk_dst_get(sk));
sk                826 net/decnet/af_decnet.c 	dn_send_conn_conf(sk, allocation);
sk                828 net/decnet/af_decnet.c 	prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
sk                830 net/decnet/af_decnet.c 		release_sock(sk);
sk                833 net/decnet/af_decnet.c 		lock_sock(sk);
sk                837 net/decnet/af_decnet.c 		err = sock_error(sk);
sk                846 net/decnet/af_decnet.c 		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
sk                848 net/decnet/af_decnet.c 	finish_wait(sk_sleep(sk), &wait);
sk                850 net/decnet/af_decnet.c 		sk->sk_socket->state = SS_CONNECTED;
sk                852 net/decnet/af_decnet.c 		sk->sk_socket->state = SS_UNCONNECTED;
sk                857 net/decnet/af_decnet.c static int dn_wait_run(struct sock *sk, long *timeo)
sk                859 net/decnet/af_decnet.c 	struct dn_scp *scp = DN_SK(sk);
sk                869 net/decnet/af_decnet.c 	prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
sk                871 net/decnet/af_decnet.c 		release_sock(sk);
sk                874 net/decnet/af_decnet.c 		lock_sock(sk);
sk                878 net/decnet/af_decnet.c 		err = sock_error(sk);
sk                887 net/decnet/af_decnet.c 		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
sk                889 net/decnet/af_decnet.c 	finish_wait(sk_sleep(sk), &wait);
sk                892 net/decnet/af_decnet.c 		sk->sk_socket->state = SS_CONNECTED;
sk                894 net/decnet/af_decnet.c 		sk->sk_socket->state = SS_UNCONNECTED;
sk                899 net/decnet/af_decnet.c static int __dn_connect(struct sock *sk, struct sockaddr_dn *addr, int addrlen, long *timeo, int flags)
sk                901 net/decnet/af_decnet.c 	struct socket *sock = sk->sk_socket;
sk                902 net/decnet/af_decnet.c 	struct dn_scp *scp = DN_SK(sk);
sk                921 net/decnet/af_decnet.c 		return dn_wait_run(sk, timeo);
sk                935 net/decnet/af_decnet.c 	if (sock_flag(sk, SOCK_ZAPPED)) {
sk                936 net/decnet/af_decnet.c 		err = dn_auto_bind(sk->sk_socket);
sk                945 net/decnet/af_decnet.c 	fld.flowidn_oif = sk->sk_bound_dev_if;
sk                950 net/decnet/af_decnet.c 	if (dn_route_output_sock(&sk->sk_dst_cache, &fld, sk, flags) < 0)
sk                952 net/decnet/af_decnet.c 	dst = __sk_dst_get(sk);
sk                953 net/decnet/af_decnet.c 	sk->sk_route_caps = dst->dev->features;
sk                958 net/decnet/af_decnet.c 	dn_nsp_send_conninit(sk, NSP_CI);
sk                961 net/decnet/af_decnet.c 		err = dn_wait_run(sk, timeo);
sk                970 net/decnet/af_decnet.c 	struct sock *sk = sock->sk;
sk                972 net/decnet/af_decnet.c 	long timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
sk                974 net/decnet/af_decnet.c 	lock_sock(sk);
sk                975 net/decnet/af_decnet.c 	err = __dn_connect(sk, addr, addrlen, &timeo, 0);
sk                976 net/decnet/af_decnet.c 	release_sock(sk);
sk                981 net/decnet/af_decnet.c static inline int dn_check_state(struct sock *sk, struct sockaddr_dn *addr, int addrlen, long *timeo, int flags)
sk                983 net/decnet/af_decnet.c 	struct dn_scp *scp = DN_SK(sk);
sk                989 net/decnet/af_decnet.c 		return dn_confirm_accept(sk, timeo, sk->sk_allocation);
sk                992 net/decnet/af_decnet.c 		return dn_wait_run(sk, timeo);
sk                994 net/decnet/af_decnet.c 		return __dn_connect(sk, addr, addrlen, timeo, flags);
sk               1032 net/decnet/af_decnet.c static struct sk_buff *dn_wait_for_connect(struct sock *sk, long *timeo)
sk               1038 net/decnet/af_decnet.c 	prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
sk               1040 net/decnet/af_decnet.c 		release_sock(sk);
sk               1041 net/decnet/af_decnet.c 		skb = skb_dequeue(&sk->sk_receive_queue);
sk               1044 net/decnet/af_decnet.c 			skb = skb_dequeue(&sk->sk_receive_queue);
sk               1046 net/decnet/af_decnet.c 		lock_sock(sk);
sk               1050 net/decnet/af_decnet.c 		if (sk->sk_state != TCP_LISTEN)
sk               1058 net/decnet/af_decnet.c 		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
sk               1060 net/decnet/af_decnet.c 	finish_wait(sk_sleep(sk), &wait);
sk               1068 net/decnet/af_decnet.c 	struct sock *sk = sock->sk, *newsk;
sk               1074 net/decnet/af_decnet.c 	long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
sk               1077 net/decnet/af_decnet.c 	lock_sock(sk);
sk               1079 net/decnet/af_decnet.c 	if (sk->sk_state != TCP_LISTEN || DN_SK(sk)->state != DN_O) {
sk               1080 net/decnet/af_decnet.c 		release_sock(sk);
sk               1084 net/decnet/af_decnet.c 	skb = skb_dequeue(&sk->sk_receive_queue);
sk               1086 net/decnet/af_decnet.c 		skb = dn_wait_for_connect(sk, &timeo);
sk               1088 net/decnet/af_decnet.c 			release_sock(sk);
sk               1094 net/decnet/af_decnet.c 	sk->sk_ack_backlog--;
sk               1095 net/decnet/af_decnet.c 	newsk = dn_alloc_sock(sock_net(sk), newsock, sk->sk_allocation, kern);
sk               1097 net/decnet/af_decnet.c 		release_sock(sk);
sk               1101 net/decnet/af_decnet.c 	release_sock(sk);
sk               1112 net/decnet/af_decnet.c 	DN_SK(newsk)->accept_mode  = DN_SK(sk)->accept_mode;
sk               1121 net/decnet/af_decnet.c 	memcpy(&(DN_SK(newsk)->addr), &(DN_SK(sk)->addr), sizeof(struct sockaddr_dn));
sk               1151 net/decnet/af_decnet.c 	memcpy(&(DN_SK(newsk)->conndata_out), &(DN_SK(sk)->conndata_out),
sk               1153 net/decnet/af_decnet.c 	memcpy(&(DN_SK(newsk)->discdata_out), &(DN_SK(sk)->discdata_out),
sk               1168 net/decnet/af_decnet.c 						sk->sk_allocation);
sk               1178 net/decnet/af_decnet.c 	struct sock *sk = sock->sk;
sk               1179 net/decnet/af_decnet.c 	struct dn_scp *scp = DN_SK(sk);
sk               1181 net/decnet/af_decnet.c 	lock_sock(sk);
sk               1187 net/decnet/af_decnet.c 			release_sock(sk);
sk               1196 net/decnet/af_decnet.c 	release_sock(sk);
sk               1204 net/decnet/af_decnet.c 	struct sock *sk = sock->sk;
sk               1205 net/decnet/af_decnet.c 	struct dn_scp *scp = DN_SK(sk);
sk               1216 net/decnet/af_decnet.c 	struct sock *sk = sock->sk;
sk               1217 net/decnet/af_decnet.c 	struct dn_scp *scp = DN_SK(sk);
sk               1230 net/decnet/af_decnet.c 		lock_sock(sk);
sk               1234 net/decnet/af_decnet.c 		release_sock(sk);
sk               1238 net/decnet/af_decnet.c 		amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
sk               1245 net/decnet/af_decnet.c 		lock_sock(sk);
sk               1250 net/decnet/af_decnet.c 			skb_queue_walk(&sk->sk_receive_queue, skb)
sk               1253 net/decnet/af_decnet.c 		release_sock(sk);
sk               1267 net/decnet/af_decnet.c 	struct sock *sk = sock->sk;
sk               1270 net/decnet/af_decnet.c 	lock_sock(sk);
sk               1272 net/decnet/af_decnet.c 	if (sock_flag(sk, SOCK_ZAPPED))
sk               1275 net/decnet/af_decnet.c 	if ((DN_SK(sk)->state != DN_O) || (sk->sk_state == TCP_LISTEN))
sk               1278 net/decnet/af_decnet.c 	sk->sk_max_ack_backlog = backlog;
sk               1279 net/decnet/af_decnet.c 	sk->sk_ack_backlog     = 0;
sk               1280 net/decnet/af_decnet.c 	sk->sk_state           = TCP_LISTEN;
sk               1282 net/decnet/af_decnet.c 	dn_rehash_sock(sk);
sk               1285 net/decnet/af_decnet.c 	release_sock(sk);
sk               1293 net/decnet/af_decnet.c 	struct sock *sk = sock->sk;
sk               1294 net/decnet/af_decnet.c 	struct dn_scp *scp = DN_SK(sk);
sk               1297 net/decnet/af_decnet.c 	lock_sock(sk);
sk               1313 net/decnet/af_decnet.c 	sk->sk_shutdown = SHUTDOWN_MASK;
sk               1314 net/decnet/af_decnet.c 	dn_destroy_sock(sk);
sk               1318 net/decnet/af_decnet.c 	release_sock(sk);
sk               1325 net/decnet/af_decnet.c 	struct sock *sk = sock->sk;
sk               1328 net/decnet/af_decnet.c 	lock_sock(sk);
sk               1330 net/decnet/af_decnet.c 	release_sock(sk);
sk               1335 net/decnet/af_decnet.c 		err = nf_setsockopt(sk, PF_DECnet, optname, optval, optlen);
sk               1343 net/decnet/af_decnet.c 	struct	sock *sk = sock->sk;
sk               1344 net/decnet/af_decnet.c 	struct dn_scp *scp = DN_SK(sk);
sk               1431 net/decnet/af_decnet.c 		timeo = sock_rcvtimeo(sk, 0);
sk               1432 net/decnet/af_decnet.c 		err = dn_confirm_accept(sk, &timeo, sk->sk_allocation);
sk               1440 net/decnet/af_decnet.c 		sk->sk_shutdown = SHUTDOWN_MASK;
sk               1441 net/decnet/af_decnet.c 		dn_nsp_send_disc(sk, 0x38, 0, sk->sk_allocation);
sk               1504 net/decnet/af_decnet.c 	struct sock *sk = sock->sk;
sk               1507 net/decnet/af_decnet.c 	lock_sock(sk);
sk               1509 net/decnet/af_decnet.c 	release_sock(sk);
sk               1519 net/decnet/af_decnet.c 		err = nf_getsockopt(sk, PF_DECnet, optname, optval, &len);
sk               1530 net/decnet/af_decnet.c 	struct	sock *sk = sock->sk;
sk               1531 net/decnet/af_decnet.c 	struct dn_scp *scp = DN_SK(sk);
sk               1640 net/decnet/af_decnet.c static int dn_data_ready(struct sock *sk, struct sk_buff_head *q, int flags, int target)
sk               1654 net/decnet/af_decnet.c 			if (sk->sk_type == SOCK_SEQPACKET)
sk               1673 net/decnet/af_decnet.c 	struct sock *sk = sock->sk;
sk               1674 net/decnet/af_decnet.c 	struct dn_scp *scp = DN_SK(sk);
sk               1675 net/decnet/af_decnet.c 	struct sk_buff_head *queue = &sk->sk_receive_queue;
sk               1682 net/decnet/af_decnet.c 	long timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
sk               1684 net/decnet/af_decnet.c 	lock_sock(sk);
sk               1686 net/decnet/af_decnet.c 	if (sock_flag(sk, SOCK_ZAPPED)) {
sk               1691 net/decnet/af_decnet.c 	if (sk->sk_shutdown & RCV_SHUTDOWN) {
sk               1696 net/decnet/af_decnet.c 	rv = dn_check_state(sk, NULL, 0, &timeo, flags);
sk               1718 net/decnet/af_decnet.c 		if (sk->sk_err)
sk               1739 net/decnet/af_decnet.c 		if (dn_data_ready(sk, queue, flags, target))
sk               1747 net/decnet/af_decnet.c 		add_wait_queue(sk_sleep(sk), &wait);
sk               1748 net/decnet/af_decnet.c 		sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
sk               1749 net/decnet/af_decnet.c 		sk_wait_event(sk, &timeo, dn_data_ready(sk, queue, flags, target), &wait);
sk               1750 net/decnet/af_decnet.c 		sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
sk               1751 net/decnet/af_decnet.c 		remove_wait_queue(sk_sleep(sk), &wait);
sk               1779 net/decnet/af_decnet.c 			if ((scp->flowloc_sw == DN_DONTSEND) && !dn_congested(sk)) {
sk               1781 net/decnet/af_decnet.c 				dn_nsp_send_link(sk, DN_SEND, 0);
sk               1786 net/decnet/af_decnet.c 			if (sk->sk_type == SOCK_SEQPACKET)
sk               1802 net/decnet/af_decnet.c 	if (eor && (sk->sk_type == SOCK_SEQPACKET))
sk               1807 net/decnet/af_decnet.c 		rv = (flags & MSG_PEEK) ? -sk->sk_err : sock_error(sk);
sk               1815 net/decnet/af_decnet.c 	release_sock(sk);
sk               1868 net/decnet/af_decnet.c static inline unsigned int dn_current_mss(struct sock *sk, int flags)
sk               1870 net/decnet/af_decnet.c 	struct dst_entry *dst = __sk_dst_get(sk);
sk               1871 net/decnet/af_decnet.c 	struct dn_scp *scp = DN_SK(sk);
sk               1893 net/decnet/af_decnet.c static inline struct sk_buff *dn_alloc_send_pskb(struct sock *sk,
sk               1897 net/decnet/af_decnet.c 	struct sk_buff *skb = sock_alloc_send_skb(sk, datalen,
sk               1908 net/decnet/af_decnet.c 	struct sock *sk = sock->sk;
sk               1909 net/decnet/af_decnet.c 	struct dn_scp *scp = DN_SK(sk);
sk               1929 net/decnet/af_decnet.c 	lock_sock(sk);
sk               1930 net/decnet/af_decnet.c 	timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
sk               1945 net/decnet/af_decnet.c 	err = dn_check_state(sk, addr, addr_len, &timeo, flags);
sk               1949 net/decnet/af_decnet.c 	if (sk->sk_shutdown & SEND_SHUTDOWN) {
sk               1956 net/decnet/af_decnet.c 	if ((flags & MSG_TRYHARD) && sk->sk_dst_cache)
sk               1957 net/decnet/af_decnet.c 		dst_negative_advice(sk);
sk               1962 net/decnet/af_decnet.c 	mss = dn_current_mss(sk, flags);
sk               1975 net/decnet/af_decnet.c 		err = sock_error(sk);
sk               2004 net/decnet/af_decnet.c 			add_wait_queue(sk_sleep(sk), &wait);
sk               2005 net/decnet/af_decnet.c 			sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
sk               2006 net/decnet/af_decnet.c 			sk_wait_event(sk, &timeo,
sk               2008 net/decnet/af_decnet.c 			sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
sk               2009 net/decnet/af_decnet.c 			remove_wait_queue(sk_sleep(sk), &wait);
sk               2019 net/decnet/af_decnet.c 		skb = dn_alloc_send_pskb(sk, len + 64 + DN_MAX_NSP_DATA_HEADER,
sk               2059 net/decnet/af_decnet.c 		dn_nsp_queue_xmit(sk, skb, sk->sk_allocation, flags & MSG_OOB);
sk               2062 net/decnet/af_decnet.c 		scp->persist = dn_nsp_persist(sk);
sk               2069 net/decnet/af_decnet.c 	release_sock(sk);
sk               2074 net/decnet/af_decnet.c 	err = sk_stream_error(sk, flags, err);
sk               2075 net/decnet/af_decnet.c 	release_sock(sk);
sk               2150 net/decnet/af_decnet.c 	struct sock *sk = dn_socket_get_first(seq);
sk               2152 net/decnet/af_decnet.c 	if (sk) {
sk               2153 net/decnet/af_decnet.c 		while(*pos && (sk = dn_socket_get_next(seq, sk)))
sk               2156 net/decnet/af_decnet.c 	return *pos ? NULL : sk;
sk               2259 net/decnet/af_decnet.c static inline void dn_socket_format_entry(struct seq_file *seq, struct sock *sk)
sk               2261 net/decnet/af_decnet.c 	struct dn_scp *scp = DN_SK(sk);
sk                572 net/decnet/dn_dev.c 	struct net *net = sock_net(skb->sk);
sk                618 net/decnet/dn_dev.c 	struct net *net = sock_net(skb->sk);
sk                743 net/decnet/dn_dev.c 	struct net *net = sock_net(skb->sk);
sk                508 net/decnet/dn_fib.c 	struct net *net = sock_net(skb->sk);
sk                535 net/decnet/dn_fib.c 	struct net *net = sock_net(skb->sk);
sk                198 net/decnet/dn_neigh.c static int dn_neigh_output_packet(struct net *net, struct sock *sk, struct sk_buff *skb)
sk                210 net/decnet/dn_neigh.c static int dn_long_output(struct neighbour *neigh, struct sock *sk,
sk                251 net/decnet/dn_neigh.c 		       &init_net, sk, skb, NULL, neigh->dev,
sk                258 net/decnet/dn_neigh.c static int dn_short_output(struct neighbour *neigh, struct sock *sk,
sk                292 net/decnet/dn_neigh.c 		       &init_net, sk, skb, NULL, neigh->dev,
sk                301 net/decnet/dn_neigh.c static int dn_phase3_output(struct neighbour *neigh, struct sock *sk,
sk                334 net/decnet/dn_neigh.c 		       &init_net, sk, skb, NULL, neigh->dev,
sk                338 net/decnet/dn_neigh.c int dn_to_neigh_output(struct net *net, struct sock *sk, struct sk_buff *skb)
sk                357 net/decnet/dn_neigh.c 		return dn_phase3_output(neigh, sk, skb);
sk                359 net/decnet/dn_neigh.c 		return dn_long_output(neigh, sk, skb);
sk                361 net/decnet/dn_neigh.c 		return dn_short_output(neigh, sk, skb);
sk                382 net/decnet/dn_neigh.c int dn_neigh_router_hello(struct net *net, struct sock *sk, struct sk_buff *skb)
sk                444 net/decnet/dn_neigh.c int dn_neigh_endnode_hello(struct net *net, struct sock *sk, struct sk_buff *skb)
sk                 92 net/decnet/dn_nsp_in.c static void dn_ack(struct sock *sk, struct sk_buff *skb, unsigned short ack)
sk                 94 net/decnet/dn_nsp_in.c 	struct dn_scp *scp = DN_SK(sk);
sk                102 net/decnet/dn_nsp_in.c 			wakeup |= dn_nsp_check_xmit_queue(sk, skb,
sk                112 net/decnet/dn_nsp_in.c 			wakeup |= dn_nsp_check_xmit_queue(sk, skb,
sk                121 net/decnet/dn_nsp_in.c 	if (wakeup && !sock_flag(sk, SOCK_DEAD))
sk                122 net/decnet/dn_nsp_in.c 		sk->sk_state_change(sk);
sk                128 net/decnet/dn_nsp_in.c static int dn_process_ack(struct sock *sk, struct sk_buff *skb, int oth)
sk                144 net/decnet/dn_nsp_in.c 			dn_ack(sk, skb, ack);
sk                157 net/decnet/dn_nsp_in.c 			dn_ack(sk, skb, ack);
sk                324 net/decnet/dn_nsp_in.c static void dn_nsp_conn_init(struct sock *sk, struct sk_buff *skb)
sk                326 net/decnet/dn_nsp_in.c 	if (sk_acceptq_is_full(sk)) {
sk                331 net/decnet/dn_nsp_in.c 	sk->sk_ack_backlog++;
sk                332 net/decnet/dn_nsp_in.c 	skb_queue_tail(&sk->sk_receive_queue, skb);
sk                333 net/decnet/dn_nsp_in.c 	sk->sk_state_change(sk);
sk                336 net/decnet/dn_nsp_in.c static void dn_nsp_conn_conf(struct sock *sk, struct sk_buff *skb)
sk                339 net/decnet/dn_nsp_in.c 	struct dn_scp *scp = DN_SK(sk);
sk                353 net/decnet/dn_nsp_in.c 		sk->sk_state = TCP_ESTABLISHED;
sk                370 net/decnet/dn_nsp_in.c 		dn_nsp_send_link(sk, DN_NOCHANGE, 0);
sk                371 net/decnet/dn_nsp_in.c 		if (!sock_flag(sk, SOCK_DEAD))
sk                372 net/decnet/dn_nsp_in.c 			sk->sk_state_change(sk);
sk                379 net/decnet/dn_nsp_in.c static void dn_nsp_conn_ack(struct sock *sk, struct sk_buff *skb)
sk                381 net/decnet/dn_nsp_in.c 	struct dn_scp *scp = DN_SK(sk);
sk                391 net/decnet/dn_nsp_in.c static void dn_nsp_disc_init(struct sock *sk, struct sk_buff *skb)
sk                393 net/decnet/dn_nsp_in.c 	struct dn_scp *scp = DN_SK(sk);
sk                416 net/decnet/dn_nsp_in.c 	sk->sk_state = TCP_CLOSE;
sk                422 net/decnet/dn_nsp_in.c 		sk->sk_err = ECONNREFUSED;
sk                425 net/decnet/dn_nsp_in.c 		sk->sk_shutdown |= SHUTDOWN_MASK;
sk                433 net/decnet/dn_nsp_in.c 	if (!sock_flag(sk, SOCK_DEAD)) {
sk                434 net/decnet/dn_nsp_in.c 		if (sk->sk_socket->state != SS_UNCONNECTED)
sk                435 net/decnet/dn_nsp_in.c 			sk->sk_socket->state = SS_DISCONNECTING;
sk                436 net/decnet/dn_nsp_in.c 		sk->sk_state_change(sk);
sk                446 net/decnet/dn_nsp_in.c 		dn_nsp_send_disc(sk, NSP_DISCCONF, NSP_REASON_DC, GFP_ATOMIC);
sk                449 net/decnet/dn_nsp_in.c 	scp->persist = dn_nsp_persist(sk);
sk                459 net/decnet/dn_nsp_in.c static void dn_nsp_disc_conf(struct sock *sk, struct sk_buff *skb)
sk                461 net/decnet/dn_nsp_in.c 	struct dn_scp *scp = DN_SK(sk);
sk                469 net/decnet/dn_nsp_in.c 	sk->sk_state = TCP_CLOSE;
sk                485 net/decnet/dn_nsp_in.c 		sk->sk_shutdown |= SHUTDOWN_MASK;
sk                491 net/decnet/dn_nsp_in.c 	if (!sock_flag(sk, SOCK_DEAD)) {
sk                492 net/decnet/dn_nsp_in.c 		if (sk->sk_socket->state != SS_UNCONNECTED)
sk                493 net/decnet/dn_nsp_in.c 			sk->sk_socket->state = SS_DISCONNECTING;
sk                494 net/decnet/dn_nsp_in.c 		sk->sk_state_change(sk);
sk                498 net/decnet/dn_nsp_in.c 	scp->persist = dn_nsp_persist(sk);
sk                504 net/decnet/dn_nsp_in.c static void dn_nsp_linkservice(struct sock *sk, struct sk_buff *skb)
sk                506 net/decnet/dn_nsp_in.c 	struct dn_scp *scp = DN_SK(sk);
sk                552 net/decnet/dn_nsp_in.c 				dn_nsp_output(sk);
sk                563 net/decnet/dn_nsp_in.c 		if (wake_up && !sock_flag(sk, SOCK_DEAD))
sk                564 net/decnet/dn_nsp_in.c 			sk->sk_state_change(sk);
sk                567 net/decnet/dn_nsp_in.c 	dn_nsp_send_oth_ack(sk);
sk                578 net/decnet/dn_nsp_in.c static __inline__ int dn_queue_skb(struct sock *sk, struct sk_buff *skb, int sig, struct sk_buff_head *queue)
sk                585 net/decnet/dn_nsp_in.c 	if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
sk                586 net/decnet/dn_nsp_in.c 	    (unsigned int)sk->sk_rcvbuf) {
sk                591 net/decnet/dn_nsp_in.c 	err = sk_filter(sk, skb);
sk                595 net/decnet/dn_nsp_in.c 	skb_set_owner_r(skb, sk);
sk                598 net/decnet/dn_nsp_in.c 	if (!sock_flag(sk, SOCK_DEAD))
sk                599 net/decnet/dn_nsp_in.c 		sk->sk_data_ready(sk);
sk                604 net/decnet/dn_nsp_in.c static void dn_nsp_otherdata(struct sock *sk, struct sk_buff *skb)
sk                606 net/decnet/dn_nsp_in.c 	struct dn_scp *scp = DN_SK(sk);
sk                619 net/decnet/dn_nsp_in.c 		if (dn_queue_skb(sk, skb, SIGURG, &scp->other_receive_queue) == 0) {
sk                626 net/decnet/dn_nsp_in.c 	dn_nsp_send_oth_ack(sk);
sk                632 net/decnet/dn_nsp_in.c static void dn_nsp_data(struct sock *sk, struct sk_buff *skb)
sk                637 net/decnet/dn_nsp_in.c 	struct dn_scp *scp = DN_SK(sk);
sk                646 net/decnet/dn_nsp_in.c 		if (dn_queue_skb(sk, skb, SIGIO, &sk->sk_receive_queue) == 0) {
sk                651 net/decnet/dn_nsp_in.c 		if ((scp->flowloc_sw == DN_SEND) && dn_congested(sk)) {
sk                653 net/decnet/dn_nsp_in.c 			dn_nsp_send_link(sk, DN_DONTSEND, 0);
sk                657 net/decnet/dn_nsp_in.c 	dn_nsp_send_data_ack(sk);
sk                668 net/decnet/dn_nsp_in.c static void dn_returned_conn_init(struct sock *sk, struct sk_buff *skb)
sk                670 net/decnet/dn_nsp_in.c 	struct dn_scp *scp = DN_SK(sk);
sk                674 net/decnet/dn_nsp_in.c 		sk->sk_state = TCP_CLOSE;
sk                675 net/decnet/dn_nsp_in.c 		if (!sock_flag(sk, SOCK_DEAD))
sk                676 net/decnet/dn_nsp_in.c 			sk->sk_state_change(sk);
sk                714 net/decnet/dn_nsp_in.c 	struct sock *sk = NULL;
sk                743 net/decnet/dn_nsp_in.c 			sk = dn_find_listener(skb, &reason);
sk                779 net/decnet/dn_nsp_in.c 	sk = dn_find_by_skb(skb);
sk                781 net/decnet/dn_nsp_in.c 	if (sk != NULL) {
sk                782 net/decnet/dn_nsp_in.c 		struct dn_scp *scp = DN_SK(sk);
sk                795 net/decnet/dn_nsp_in.c 		return sk_receive_skb(sk, skb, 0);
sk                817 net/decnet/dn_nsp_in.c int dn_nsp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
sk                819 net/decnet/dn_nsp_in.c 	struct dn_scp *scp = DN_SK(sk);
sk                824 net/decnet/dn_nsp_in.c 			dn_returned_conn_init(sk, skb);
sk                837 net/decnet/dn_nsp_in.c 			dn_nsp_conn_init(sk, skb);
sk                840 net/decnet/dn_nsp_in.c 			dn_nsp_conn_conf(sk, skb);
sk                843 net/decnet/dn_nsp_in.c 			dn_nsp_disc_init(sk, skb);
sk                846 net/decnet/dn_nsp_in.c 			dn_nsp_disc_conf(sk, skb);
sk                855 net/decnet/dn_nsp_in.c 		dn_nsp_conn_ack(sk, skb);
sk                860 net/decnet/dn_nsp_in.c 		if ((scp->state == DN_CC) && !sock_flag(sk, SOCK_DEAD)) {
sk                862 net/decnet/dn_nsp_in.c 			sk->sk_state = TCP_ESTABLISHED;
sk                863 net/decnet/dn_nsp_in.c 			sk->sk_state_change(sk);
sk                876 net/decnet/dn_nsp_in.c 		dn_process_ack(sk, skb, other);
sk                890 net/decnet/dn_nsp_in.c 				dn_nsp_linkservice(sk, skb);
sk                893 net/decnet/dn_nsp_in.c 				dn_nsp_otherdata(sk, skb);
sk                896 net/decnet/dn_nsp_in.c 				dn_nsp_data(sk, skb);
sk                 68 net/decnet/dn_nsp_out.c 	struct sock *sk = skb->sk;
sk                 69 net/decnet/dn_nsp_out.c 	struct dn_scp *scp = DN_SK(sk);
sk                 76 net/decnet/dn_nsp_out.c 	dst = sk_dst_check(sk, 0);
sk                 80 net/decnet/dn_nsp_out.c 		dst_output(&init_net, skb->sk, skb);
sk                 85 net/decnet/dn_nsp_out.c 	fld.flowidn_oif = sk->sk_bound_dev_if;
sk                 90 net/decnet/dn_nsp_out.c 	if (dn_route_output_sock(&sk->sk_dst_cache, &fld, sk, 0) == 0) {
sk                 91 net/decnet/dn_nsp_out.c 		dst = sk_dst_get(sk);
sk                 92 net/decnet/dn_nsp_out.c 		sk->sk_route_caps = dst->dev->features;
sk                 96 net/decnet/dn_nsp_out.c 	sk->sk_err = EHOSTUNREACH;
sk                 97 net/decnet/dn_nsp_out.c 	if (!sock_flag(sk, SOCK_DEAD))
sk                 98 net/decnet/dn_nsp_out.c 		sk->sk_state_change(sk);
sk                110 net/decnet/dn_nsp_out.c struct sk_buff *dn_alloc_skb(struct sock *sk, int size, gfp_t pri)
sk                121 net/decnet/dn_nsp_out.c 	if (sk)
sk                122 net/decnet/dn_nsp_out.c 		skb_set_owner_w(skb, sk);
sk                134 net/decnet/dn_nsp_out.c unsigned long dn_nsp_persist(struct sock *sk)
sk                136 net/decnet/dn_nsp_out.c 	struct dn_scp *scp = DN_SK(sk);
sk                157 net/decnet/dn_nsp_out.c static void dn_nsp_rtt(struct sock *sk, long rtt)
sk                159 net/decnet/dn_nsp_out.c 	struct dn_scp *scp = DN_SK(sk);
sk                214 net/decnet/dn_nsp_out.c 		skb2->sk = skb->sk;
sk                230 net/decnet/dn_nsp_out.c void dn_nsp_output(struct sock *sk)
sk                232 net/decnet/dn_nsp_out.c 	struct dn_scp *scp = DN_SK(sk);
sk                266 net/decnet/dn_nsp_out.c int dn_nsp_xmit_timeout(struct sock *sk)
sk                268 net/decnet/dn_nsp_out.c 	struct dn_scp *scp = DN_SK(sk);
sk                270 net/decnet/dn_nsp_out.c 	dn_nsp_output(sk);
sk                274 net/decnet/dn_nsp_out.c 		scp->persist = dn_nsp_persist(sk);
sk                293 net/decnet/dn_nsp_out.c static __le16 *dn_mk_ack_header(struct sock *sk, struct sk_buff *skb, unsigned char msgflag, int hlen, int other)
sk                295 net/decnet/dn_nsp_out.c 	struct dn_scp *scp = DN_SK(sk);
sk                322 net/decnet/dn_nsp_out.c static __le16 *dn_nsp_mk_data_header(struct sock *sk, struct sk_buff *skb, int oth)
sk                324 net/decnet/dn_nsp_out.c 	struct dn_scp *scp = DN_SK(sk);
sk                326 net/decnet/dn_nsp_out.c 	__le16 *ptr = dn_mk_ack_header(sk, skb, cb->nsp_flags, 11, oth);
sk                340 net/decnet/dn_nsp_out.c void dn_nsp_queue_xmit(struct sock *sk, struct sk_buff *skb,
sk                343 net/decnet/dn_nsp_out.c 	struct dn_scp *scp = DN_SK(sk);
sk                348 net/decnet/dn_nsp_out.c 	dn_nsp_mk_data_header(sk, skb, oth);
sk                369 net/decnet/dn_nsp_out.c int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb, struct sk_buff_head *q, unsigned short acknum)
sk                372 net/decnet/dn_nsp_out.c 	struct dn_scp *scp = DN_SK(sk);
sk                420 net/decnet/dn_nsp_out.c 				dn_nsp_rtt(sk, (long)(pkttime - reftime));
sk                437 net/decnet/dn_nsp_out.c 		dn_nsp_output(sk);
sk                442 net/decnet/dn_nsp_out.c void dn_nsp_send_data_ack(struct sock *sk)
sk                446 net/decnet/dn_nsp_out.c 	if ((skb = dn_alloc_skb(sk, 9, GFP_ATOMIC)) == NULL)
sk                450 net/decnet/dn_nsp_out.c 	dn_mk_ack_header(sk, skb, 0x04, 9, 0);
sk                454 net/decnet/dn_nsp_out.c void dn_nsp_send_oth_ack(struct sock *sk)
sk                458 net/decnet/dn_nsp_out.c 	if ((skb = dn_alloc_skb(sk, 9, GFP_ATOMIC)) == NULL)
sk                462 net/decnet/dn_nsp_out.c 	dn_mk_ack_header(sk, skb, 0x14, 9, 1);
sk                467 net/decnet/dn_nsp_out.c void dn_send_conn_ack (struct sock *sk)
sk                469 net/decnet/dn_nsp_out.c 	struct dn_scp *scp = DN_SK(sk);
sk                473 net/decnet/dn_nsp_out.c 	if ((skb = dn_alloc_skb(sk, 3, sk->sk_allocation)) == NULL)
sk                483 net/decnet/dn_nsp_out.c static int dn_nsp_retrans_conn_conf(struct sock *sk)
sk                485 net/decnet/dn_nsp_out.c 	struct dn_scp *scp = DN_SK(sk);
sk                488 net/decnet/dn_nsp_out.c 		dn_send_conn_conf(sk, GFP_ATOMIC);
sk                493 net/decnet/dn_nsp_out.c void dn_send_conn_conf(struct sock *sk, gfp_t gfp)
sk                495 net/decnet/dn_nsp_out.c 	struct dn_scp *scp = DN_SK(sk);
sk                500 net/decnet/dn_nsp_out.c 	if ((skb = dn_alloc_skb(sk, 50 + len, gfp)) == NULL)
sk                519 net/decnet/dn_nsp_out.c 	scp->persist = dn_nsp_persist(sk);
sk                524 net/decnet/dn_nsp_out.c static __inline__ void dn_nsp_do_disc(struct sock *sk, unsigned char msgflg,
sk                539 net/decnet/dn_nsp_out.c 	if ((skb = dn_alloc_skb(sk, size, gfp)) == NULL)
sk                563 net/decnet/dn_nsp_out.c 	dst_output(&init_net, skb->sk, skb);
sk                567 net/decnet/dn_nsp_out.c void dn_nsp_send_disc(struct sock *sk, unsigned char msgflg,
sk                570 net/decnet/dn_nsp_out.c 	struct dn_scp *scp = DN_SK(sk);
sk                579 net/decnet/dn_nsp_out.c 	dn_nsp_do_disc(sk, msgflg, reason, gfp, __sk_dst_get(sk), ddl,
sk                596 net/decnet/dn_nsp_out.c void dn_nsp_send_link(struct sock *sk, unsigned char lsflags, char fcval)
sk                598 net/decnet/dn_nsp_out.c 	struct dn_scp *scp = DN_SK(sk);
sk                603 net/decnet/dn_nsp_out.c 	if ((skb = dn_alloc_skb(sk, DN_MAX_NSP_DATA_HEADER + 2, gfp)) == NULL)
sk                612 net/decnet/dn_nsp_out.c 	dn_nsp_queue_xmit(sk, skb, gfp, 1);
sk                614 net/decnet/dn_nsp_out.c 	scp->persist = dn_nsp_persist(sk);
sk                618 net/decnet/dn_nsp_out.c static int dn_nsp_retrans_conninit(struct sock *sk)
sk                620 net/decnet/dn_nsp_out.c 	struct dn_scp *scp = DN_SK(sk);
sk                623 net/decnet/dn_nsp_out.c 		dn_nsp_send_conninit(sk, NSP_RCI);
sk                628 net/decnet/dn_nsp_out.c void dn_nsp_send_conninit(struct sock *sk, unsigned char msgflg)
sk                630 net/decnet/dn_nsp_out.c 	struct dn_scp *scp = DN_SK(sk);
sk                636 net/decnet/dn_nsp_out.c 	gfp_t allocation = (msgflg == NSP_CI) ? sk->sk_allocation : GFP_ATOMIC;
sk                637 net/decnet/dn_nsp_out.c 	struct sk_buff *skb = dn_alloc_skb(sk, 200, allocation);
sk                689 net/decnet/dn_nsp_out.c 	scp->persist = dn_nsp_persist(sk);
sk                112 net/decnet/dn_route.c static void dn_dst_update_pmtu(struct dst_entry *dst, struct sock *sk,
sk                115 net/decnet/dn_route.c static void dn_dst_redirect(struct dst_entry *dst, struct sock *sk,
sk                254 net/decnet/dn_route.c static void dn_dst_update_pmtu(struct dst_entry *dst, struct sock *sk,
sk                284 net/decnet/dn_route.c static void dn_dst_redirect(struct dst_entry *dst, struct sock *sk,
sk                501 net/decnet/dn_route.c static int dn_route_rx_packet(struct net *net, struct sock *sk, struct sk_buff *skb)
sk                599 net/decnet/dn_route.c static int dn_route_discard(struct net *net, struct sock *sk, struct sk_buff *skb)
sk                609 net/decnet/dn_route.c static int dn_route_ptp_hello(struct net *net, struct sock *sk, struct sk_buff *skb)
sk                733 net/decnet/dn_route.c static int dn_output(struct net *net, struct sock *sk, struct sk_buff *skb)
sk                760 net/decnet/dn_route.c 		       &init_net, sk, skb, NULL, dev,
sk                819 net/decnet/dn_route.c static int dn_rt_bug_out(struct net *net, struct sock *sk, struct sk_buff *skb)
sk               1284 net/decnet/dn_route.c int dn_route_output_sock(struct dst_entry __rcu **pprt, struct flowidn *fl, struct sock *sk, int flags)
sk               1291 net/decnet/dn_route.c 				    flowidn_to_flowi(fl), sk, 0);
sk               1636 net/decnet/dn_route.c 	struct net *net = sock_net(in_skb->sk);
sk               1722 net/decnet/dn_route.c 	struct net *net = sock_net(skb->sk);
sk                491 net/decnet/dn_table.c 	struct net *net = sock_net(skb->sk);
sk                 39 net/decnet/dn_timer.c void dn_start_slow_timer(struct sock *sk)
sk                 41 net/decnet/dn_timer.c 	timer_setup(&sk->sk_timer, dn_slow_timer, 0);
sk                 42 net/decnet/dn_timer.c 	sk_reset_timer(sk, &sk->sk_timer, jiffies + SLOW_INTERVAL);
sk                 45 net/decnet/dn_timer.c void dn_stop_slow_timer(struct sock *sk)
sk                 47 net/decnet/dn_timer.c 	sk_stop_timer(sk, &sk->sk_timer);
sk                 52 net/decnet/dn_timer.c 	struct sock *sk = from_timer(sk, t, sk_timer);
sk                 53 net/decnet/dn_timer.c 	struct dn_scp *scp = DN_SK(sk);
sk                 55 net/decnet/dn_timer.c 	bh_lock_sock(sk);
sk                 57 net/decnet/dn_timer.c 	if (sock_owned_by_user(sk)) {
sk                 58 net/decnet/dn_timer.c 		sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ / 10);
sk                 77 net/decnet/dn_timer.c 			if (scp->persist_fxn(sk))
sk                 97 net/decnet/dn_timer.c 			scp->keepalive_fxn(sk);
sk                100 net/decnet/dn_timer.c 	sk_reset_timer(sk, &sk->sk_timer, jiffies + SLOW_INTERVAL);
sk                102 net/decnet/dn_timer.c 	bh_unlock_sock(sk);
sk                103 net/decnet/dn_timer.c 	sock_put(sk);
sk                440 net/ieee802154/nl-mac.c 	struct net *net = sock_net(skb->sk);
sk                772 net/ieee802154/nl-mac.c 	struct net *net = sock_net(skb->sk);
sk                252 net/ieee802154/nl802154.c 		*wpan_dev = __cfg802154_wpan_dev_from_attrs(sock_net(skb->sk),
sk                619 net/ieee802154/nl802154.c 		if (!net_eq(wpan_phy_net(&rdev->wpan_phy), sock_net(skb->sk)))
sk                863 net/ieee802154/nl802154.c 		if (!net_eq(wpan_phy_net(&rdev->wpan_phy), sock_net(skb->sk)))
sk                 83 net/ieee802154/socket.c 	struct sock *sk = sock->sk;
sk                 85 net/ieee802154/socket.c 	if (sk) {
sk                 86 net/ieee802154/socket.c 		sock->sk = NULL;
sk                 87 net/ieee802154/socket.c 		sk->sk_prot->close(sk, 0);
sk                 95 net/ieee802154/socket.c 	struct sock *sk = sock->sk;
sk                 97 net/ieee802154/socket.c 	return sk->sk_prot->sendmsg(sk, msg, len);
sk                103 net/ieee802154/socket.c 	struct sock *sk = sock->sk;
sk                105 net/ieee802154/socket.c 	if (sk->sk_prot->bind)
sk                106 net/ieee802154/socket.c 		return sk->sk_prot->bind(sk, uaddr, addr_len);
sk                114 net/ieee802154/socket.c 	struct sock *sk = sock->sk;
sk                120 net/ieee802154/socket.c 		return sk->sk_prot->disconnect(sk, flags);
sk                122 net/ieee802154/socket.c 	return sk->sk_prot->connect(sk, uaddr, addr_len);
sk                125 net/ieee802154/socket.c static int ieee802154_dev_ioctl(struct sock *sk, struct ifreq __user *arg,
sk                137 net/ieee802154/socket.c 	dev_load(sock_net(sk), ifr.ifr_name);
sk                138 net/ieee802154/socket.c 	dev = dev_get_by_name(sock_net(sk), ifr.ifr_name);
sk                156 net/ieee802154/socket.c 	struct sock *sk = sock->sk;
sk                161 net/ieee802154/socket.c 		return ieee802154_dev_ioctl(sk, (struct ifreq __user *)arg,
sk                164 net/ieee802154/socket.c 		if (!sk->sk_prot->ioctl)
sk                166 net/ieee802154/socket.c 		return sk->sk_prot->ioctl(sk, cmd, arg);
sk                174 net/ieee802154/socket.c static int raw_hash(struct sock *sk)
sk                177 net/ieee802154/socket.c 	sk_add_node(sk, &raw_head);
sk                178 net/ieee802154/socket.c 	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
sk                184 net/ieee802154/socket.c static void raw_unhash(struct sock *sk)
sk                187 net/ieee802154/socket.c 	if (sk_del_node_init(sk))
sk                188 net/ieee802154/socket.c 		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
sk                192 net/ieee802154/socket.c static void raw_close(struct sock *sk, long timeout)
sk                194 net/ieee802154/socket.c 	sk_common_release(sk);
sk                197 net/ieee802154/socket.c static int raw_bind(struct sock *sk, struct sockaddr *_uaddr, int len)
sk                211 net/ieee802154/socket.c 	lock_sock(sk);
sk                214 net/ieee802154/socket.c 	dev = ieee802154_get_dev(sock_net(sk), &addr);
sk                220 net/ieee802154/socket.c 	sk->sk_bound_dev_if = dev->ifindex;
sk                221 net/ieee802154/socket.c 	sk_dst_reset(sk);
sk                225 net/ieee802154/socket.c 	release_sock(sk);
sk                230 net/ieee802154/socket.c static int raw_connect(struct sock *sk, struct sockaddr *uaddr,
sk                236 net/ieee802154/socket.c static int raw_disconnect(struct sock *sk, int flags)
sk                241 net/ieee802154/socket.c static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
sk                254 net/ieee802154/socket.c 	lock_sock(sk);
sk                255 net/ieee802154/socket.c 	if (!sk->sk_bound_dev_if)
sk                256 net/ieee802154/socket.c 		dev = dev_getfirstbyhwtype(sock_net(sk), ARPHRD_IEEE802154);
sk                258 net/ieee802154/socket.c 		dev = dev_get_by_index(sock_net(sk), sk->sk_bound_dev_if);
sk                259 net/ieee802154/socket.c 	release_sock(sk);
sk                278 net/ieee802154/socket.c 	skb = sock_alloc_send_skb(sk, hlen + tlen + size,
sk                311 net/ieee802154/socket.c static int raw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
sk                318 net/ieee802154/socket.c 	skb = skb_recv_datagram(sk, flags, noblock, &err);
sk                332 net/ieee802154/socket.c 	sock_recv_ts_and_drops(msg, sk, skb);
sk                337 net/ieee802154/socket.c 	skb_free_datagram(sk, skb);
sk                344 net/ieee802154/socket.c static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
sk                350 net/ieee802154/socket.c 	if (sock_queue_rcv_skb(sk, skb) < 0) {
sk                360 net/ieee802154/socket.c 	struct sock *sk;
sk                363 net/ieee802154/socket.c 	sk_for_each(sk, &raw_head) {
sk                364 net/ieee802154/socket.c 		bh_lock_sock(sk);
sk                365 net/ieee802154/socket.c 		if (!sk->sk_bound_dev_if ||
sk                366 net/ieee802154/socket.c 		    sk->sk_bound_dev_if == dev->ifindex) {
sk                371 net/ieee802154/socket.c 				raw_rcv_skb(sk, clone);
sk                373 net/ieee802154/socket.c 		bh_unlock_sock(sk);
sk                378 net/ieee802154/socket.c static int raw_getsockopt(struct sock *sk, int level, int optname,
sk                384 net/ieee802154/socket.c static int raw_setsockopt(struct sock *sk, int level, int optname,
sk                437 net/ieee802154/socket.c 	struct sock sk;
sk                452 net/ieee802154/socket.c static inline struct dgram_sock *dgram_sk(const struct sock *sk)
sk                454 net/ieee802154/socket.c 	return container_of(sk, struct dgram_sock, sk);
sk                457 net/ieee802154/socket.c static int dgram_hash(struct sock *sk)
sk                460 net/ieee802154/socket.c 	sk_add_node(sk, &dgram_head);
sk                461 net/ieee802154/socket.c 	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
sk                467 net/ieee802154/socket.c static void dgram_unhash(struct sock *sk)
sk                470 net/ieee802154/socket.c 	if (sk_del_node_init(sk))
sk                471 net/ieee802154/socket.c 		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
sk                475 net/ieee802154/socket.c static int dgram_init(struct sock *sk)
sk                477 net/ieee802154/socket.c 	struct dgram_sock *ro = dgram_sk(sk);
sk                484 net/ieee802154/socket.c static void dgram_close(struct sock *sk, long timeout)
sk                486 net/ieee802154/socket.c 	sk_common_release(sk);
sk                489 net/ieee802154/socket.c static int dgram_bind(struct sock *sk, struct sockaddr *uaddr, int len)
sk                493 net/ieee802154/socket.c 	struct dgram_sock *ro = dgram_sk(sk);
sk                497 net/ieee802154/socket.c 	lock_sock(sk);
sk                508 net/ieee802154/socket.c 	dev = ieee802154_get_dev(sock_net(sk), &haddr);
sk                526 net/ieee802154/socket.c 	release_sock(sk);
sk                531 net/ieee802154/socket.c static int dgram_ioctl(struct sock *sk, int cmd, unsigned long arg)
sk                536 net/ieee802154/socket.c 		int amount = sk_wmem_alloc_get(sk);
sk                547 net/ieee802154/socket.c 		spin_lock_bh(&sk->sk_receive_queue.lock);
sk                548 net/ieee802154/socket.c 		skb = skb_peek(&sk->sk_receive_queue);
sk                556 net/ieee802154/socket.c 		spin_unlock_bh(&sk->sk_receive_queue.lock);
sk                565 net/ieee802154/socket.c static int dgram_connect(struct sock *sk, struct sockaddr *uaddr,
sk                569 net/ieee802154/socket.c 	struct dgram_sock *ro = dgram_sk(sk);
sk                578 net/ieee802154/socket.c 	lock_sock(sk);
sk                589 net/ieee802154/socket.c 	release_sock(sk);
sk                593 net/ieee802154/socket.c static int dgram_disconnect(struct sock *sk, int flags)
sk                595 net/ieee802154/socket.c 	struct dgram_sock *ro = dgram_sk(sk);
sk                597 net/ieee802154/socket.c 	lock_sock(sk);
sk                599 net/ieee802154/socket.c 	release_sock(sk);
sk                604 net/ieee802154/socket.c static int dgram_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
sk                610 net/ieee802154/socket.c 	struct dgram_sock *ro = dgram_sk(sk);
sk                626 net/ieee802154/socket.c 		dev = dev_getfirstbyhwtype(sock_net(sk), ARPHRD_IEEE802154);
sk                628 net/ieee802154/socket.c 		dev = ieee802154_get_dev(sock_net(sk), &ro->src_addr);
sk                646 net/ieee802154/socket.c 	skb = sock_alloc_send_skb(sk, hlen + tlen + size,
sk                702 net/ieee802154/socket.c static int dgram_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
sk                708 net/ieee802154/socket.c 	struct dgram_sock *ro = dgram_sk(sk);
sk                711 net/ieee802154/socket.c 	skb = skb_recv_datagram(sk, flags, noblock, &err);
sk                726 net/ieee802154/socket.c 	sock_recv_ts_and_drops(msg, sk, skb);
sk                750 net/ieee802154/socket.c 	skb_free_datagram(sk, skb);
sk                757 net/ieee802154/socket.c static int dgram_rcv_skb(struct sock *sk, struct sk_buff *skb)
sk                763 net/ieee802154/socket.c 	if (sock_queue_rcv_skb(sk, skb) < 0) {
sk                792 net/ieee802154/socket.c 	struct sock *sk, *prev = NULL;
sk                805 net/ieee802154/socket.c 	sk_for_each(sk, &dgram_head) {
sk                807 net/ieee802154/socket.c 					  dgram_sk(sk))) {
sk                816 net/ieee802154/socket.c 			prev = sk;
sk                831 net/ieee802154/socket.c static int dgram_getsockopt(struct sock *sk, int level, int optname,
sk                834 net/ieee802154/socket.c 	struct dgram_sock *ro = dgram_sk(sk);
sk                878 net/ieee802154/socket.c static int dgram_setsockopt(struct sock *sk, int level, int optname,
sk                881 net/ieee802154/socket.c 	struct dgram_sock *ro = dgram_sk(sk);
sk                882 net/ieee802154/socket.c 	struct net *net = sock_net(sk);
sk                892 net/ieee802154/socket.c 	lock_sock(sk);
sk                947 net/ieee802154/socket.c 	release_sock(sk);
sk               1001 net/ieee802154/socket.c 	struct sock *sk;
sk               1027 net/ieee802154/socket.c 	sk = sk_alloc(net, PF_IEEE802154, GFP_KERNEL, proto, kern);
sk               1028 net/ieee802154/socket.c 	if (!sk)
sk               1034 net/ieee802154/socket.c 	sock_init_data(sock, sk);
sk               1036 net/ieee802154/socket.c 	sk->sk_family = PF_IEEE802154;
sk               1039 net/ieee802154/socket.c 	sock_set_flag(sk, SOCK_ZAPPED);
sk               1041 net/ieee802154/socket.c 	if (sk->sk_prot->hash) {
sk               1042 net/ieee802154/socket.c 		rc = sk->sk_prot->hash(sk);
sk               1044 net/ieee802154/socket.c 			sk_common_release(sk);
sk               1049 net/ieee802154/socket.c 	if (sk->sk_prot->init) {
sk               1050 net/ieee802154/socket.c 		rc = sk->sk_prot->init(sk);
sk               1052 net/ieee802154/socket.c 			sk_common_release(sk);
sk                130 net/ipv4/af_inet.c void inet_sock_destruct(struct sock *sk)
sk                132 net/ipv4/af_inet.c 	struct inet_sock *inet = inet_sk(sk);
sk                134 net/ipv4/af_inet.c 	__skb_queue_purge(&sk->sk_receive_queue);
sk                135 net/ipv4/af_inet.c 	if (sk->sk_rx_skb_cache) {
sk                136 net/ipv4/af_inet.c 		__kfree_skb(sk->sk_rx_skb_cache);
sk                137 net/ipv4/af_inet.c 		sk->sk_rx_skb_cache = NULL;
sk                139 net/ipv4/af_inet.c 	__skb_queue_purge(&sk->sk_error_queue);
sk                141 net/ipv4/af_inet.c 	sk_mem_reclaim(sk);
sk                143 net/ipv4/af_inet.c 	if (sk->sk_type == SOCK_STREAM && sk->sk_state != TCP_CLOSE) {
sk                145 net/ipv4/af_inet.c 		       sk->sk_state, sk);
sk                148 net/ipv4/af_inet.c 	if (!sock_flag(sk, SOCK_DEAD)) {
sk                149 net/ipv4/af_inet.c 		pr_err("Attempt to release alive inet socket %p\n", sk);
sk                153 net/ipv4/af_inet.c 	WARN_ON(atomic_read(&sk->sk_rmem_alloc));
sk                154 net/ipv4/af_inet.c 	WARN_ON(refcount_read(&sk->sk_wmem_alloc));
sk                155 net/ipv4/af_inet.c 	WARN_ON(sk->sk_wmem_queued);
sk                156 net/ipv4/af_inet.c 	WARN_ON(sk->sk_forward_alloc);
sk                159 net/ipv4/af_inet.c 	dst_release(rcu_dereference_protected(sk->sk_dst_cache, 1));
sk                160 net/ipv4/af_inet.c 	dst_release(sk->sk_rx_dst);
sk                161 net/ipv4/af_inet.c 	sk_refcnt_debug_dec(sk);
sk                175 net/ipv4/af_inet.c static int inet_autobind(struct sock *sk)
sk                179 net/ipv4/af_inet.c 	lock_sock(sk);
sk                180 net/ipv4/af_inet.c 	inet = inet_sk(sk);
sk                182 net/ipv4/af_inet.c 		if (sk->sk_prot->get_port(sk, 0)) {
sk                183 net/ipv4/af_inet.c 			release_sock(sk);
sk                188 net/ipv4/af_inet.c 	release_sock(sk);
sk                197 net/ipv4/af_inet.c 	struct sock *sk = sock->sk;
sk                201 net/ipv4/af_inet.c 	lock_sock(sk);
sk                207 net/ipv4/af_inet.c 	old_state = sk->sk_state;
sk                211 net/ipv4/af_inet.c 	sk->sk_max_ack_backlog = backlog;
sk                222 net/ipv4/af_inet.c 		tcp_fastopen = sock_net(sk)->ipv4.sysctl_tcp_fastopen;
sk                225 net/ipv4/af_inet.c 		    !inet_csk(sk)->icsk_accept_queue.fastopenq.max_qlen) {
sk                226 net/ipv4/af_inet.c 			fastopen_queue_tune(sk, backlog);
sk                227 net/ipv4/af_inet.c 			tcp_fastopen_init_key_once(sock_net(sk));
sk                230 net/ipv4/af_inet.c 		err = inet_csk_listen_start(sk, backlog);
sk                233 net/ipv4/af_inet.c 		tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_LISTEN_CB, 0, NULL);
sk                238 net/ipv4/af_inet.c 	release_sock(sk);
sk                250 net/ipv4/af_inet.c 	struct sock *sk;
sk                321 net/ipv4/af_inet.c 	sk = sk_alloc(net, PF_INET, GFP_KERNEL, answer_prot, kern);
sk                322 net/ipv4/af_inet.c 	if (!sk)
sk                327 net/ipv4/af_inet.c 		sk->sk_reuse = SK_CAN_REUSE;
sk                329 net/ipv4/af_inet.c 	inet = inet_sk(sk);
sk                347 net/ipv4/af_inet.c 	sock_init_data(sock, sk);
sk                349 net/ipv4/af_inet.c 	sk->sk_destruct	   = inet_sock_destruct;
sk                350 net/ipv4/af_inet.c 	sk->sk_protocol	   = protocol;
sk                351 net/ipv4/af_inet.c 	sk->sk_backlog_rcv = sk->sk_prot->backlog_rcv;
sk                361 net/ipv4/af_inet.c 	sk_refcnt_debug_inc(sk);
sk                371 net/ipv4/af_inet.c 		err = sk->sk_prot->hash(sk);
sk                373 net/ipv4/af_inet.c 			sk_common_release(sk);
sk                378 net/ipv4/af_inet.c 	if (sk->sk_prot->init) {
sk                379 net/ipv4/af_inet.c 		err = sk->sk_prot->init(sk);
sk                381 net/ipv4/af_inet.c 			sk_common_release(sk);
sk                387 net/ipv4/af_inet.c 		err = BPF_CGROUP_RUN_PROG_INET_SOCK(sk);
sk                389 net/ipv4/af_inet.c 			sk_common_release(sk);
sk                408 net/ipv4/af_inet.c 	struct sock *sk = sock->sk;
sk                410 net/ipv4/af_inet.c 	if (sk) {
sk                414 net/ipv4/af_inet.c 		ip_mc_drop_socket(sk);
sk                424 net/ipv4/af_inet.c 		if (sock_flag(sk, SOCK_LINGER) &&
sk                426 net/ipv4/af_inet.c 			timeout = sk->sk_lingertime;
sk                427 net/ipv4/af_inet.c 		sk->sk_prot->close(sk, timeout);
sk                428 net/ipv4/af_inet.c 		sock->sk = NULL;
sk                436 net/ipv4/af_inet.c 	struct sock *sk = sock->sk;
sk                440 net/ipv4/af_inet.c 	if (sk->sk_prot->bind) {
sk                441 net/ipv4/af_inet.c 		return sk->sk_prot->bind(sk, uaddr, addr_len);
sk                449 net/ipv4/af_inet.c 	err = BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr);
sk                453 net/ipv4/af_inet.c 	return __inet_bind(sk, uaddr, addr_len, false, true);
sk                457 net/ipv4/af_inet.c int __inet_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len,
sk                461 net/ipv4/af_inet.c 	struct inet_sock *inet = inet_sk(sk);
sk                462 net/ipv4/af_inet.c 	struct net *net = sock_net(sk);
sk                478 net/ipv4/af_inet.c 	tb_id = l3mdev_fib_table_by_index(net, sk->sk_bound_dev_if) ? : tb_id;
sk                510 net/ipv4/af_inet.c 		lock_sock(sk);
sk                514 net/ipv4/af_inet.c 	if (sk->sk_state != TCP_CLOSE || inet->inet_num)
sk                524 net/ipv4/af_inet.c 		if (sk->sk_prot->get_port(sk, snum)) {
sk                529 net/ipv4/af_inet.c 		err = BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk);
sk                537 net/ipv4/af_inet.c 		sk->sk_userlocks |= SOCK_BINDADDR_LOCK;
sk                539 net/ipv4/af_inet.c 		sk->sk_userlocks |= SOCK_BINDPORT_LOCK;
sk                543 net/ipv4/af_inet.c 	sk_dst_reset(sk);
sk                547 net/ipv4/af_inet.c 		release_sock(sk);
sk                555 net/ipv4/af_inet.c 	struct sock *sk = sock->sk;
sk                561 net/ipv4/af_inet.c 		return sk->sk_prot->disconnect(sk, flags);
sk                563 net/ipv4/af_inet.c 	if (BPF_CGROUP_PRE_CONNECT_ENABLED(sk)) {
sk                564 net/ipv4/af_inet.c 		err = sk->sk_prot->pre_connect(sk, uaddr, addr_len);
sk                569 net/ipv4/af_inet.c 	if (!inet_sk(sk)->inet_num && inet_autobind(sk))
sk                571 net/ipv4/af_inet.c 	return sk->sk_prot->connect(sk, uaddr, addr_len);
sk                575 net/ipv4/af_inet.c static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias)
sk                579 net/ipv4/af_inet.c 	add_wait_queue(sk_sleep(sk), &wait);
sk                580 net/ipv4/af_inet.c 	sk->sk_write_pending += writebias;
sk                587 net/ipv4/af_inet.c 	while ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
sk                588 net/ipv4/af_inet.c 		release_sock(sk);
sk                590 net/ipv4/af_inet.c 		lock_sock(sk);
sk                594 net/ipv4/af_inet.c 	remove_wait_queue(sk_sleep(sk), &wait);
sk                595 net/ipv4/af_inet.c 	sk->sk_write_pending -= writebias;
sk                606 net/ipv4/af_inet.c 	struct sock *sk = sock->sk;
sk                624 net/ipv4/af_inet.c 			err = sk->sk_prot->disconnect(sk, flags);
sk                638 net/ipv4/af_inet.c 		if (inet_sk(sk)->defer_connect)
sk                646 net/ipv4/af_inet.c 		if (sk->sk_state != TCP_CLOSE)
sk                649 net/ipv4/af_inet.c 		if (BPF_CGROUP_PRE_CONNECT_ENABLED(sk)) {
sk                650 net/ipv4/af_inet.c 			err = sk->sk_prot->pre_connect(sk, uaddr, addr_len);
sk                655 net/ipv4/af_inet.c 		err = sk->sk_prot->connect(sk, uaddr, addr_len);
sk                661 net/ipv4/af_inet.c 		if (!err && inet_sk(sk)->defer_connect)
sk                672 net/ipv4/af_inet.c 	timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
sk                674 net/ipv4/af_inet.c 	if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
sk                675 net/ipv4/af_inet.c 		int writebias = (sk->sk_protocol == IPPROTO_TCP) &&
sk                676 net/ipv4/af_inet.c 				tcp_sk(sk)->fastopen_req &&
sk                677 net/ipv4/af_inet.c 				tcp_sk(sk)->fastopen_req->data ? 1 : 0;
sk                680 net/ipv4/af_inet.c 		if (!timeo || !inet_wait_for_connect(sk, timeo, writebias))
sk                691 net/ipv4/af_inet.c 	if (sk->sk_state == TCP_CLOSE)
sk                705 net/ipv4/af_inet.c 	err = sock_error(sk) ? : -ECONNABORTED;
sk                707 net/ipv4/af_inet.c 	if (sk->sk_prot->disconnect(sk, flags))
sk                718 net/ipv4/af_inet.c 	lock_sock(sock->sk);
sk                720 net/ipv4/af_inet.c 	release_sock(sock->sk);
sk                732 net/ipv4/af_inet.c 	struct sock *sk1 = sock->sk;
sk                763 net/ipv4/af_inet.c 	struct sock *sk		= sock->sk;
sk                764 net/ipv4/af_inet.c 	struct inet_sock *inet	= inet_sk(sk);
sk                770 net/ipv4/af_inet.c 		    (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)) &&
sk                787 net/ipv4/af_inet.c int inet_send_prepare(struct sock *sk)
sk                789 net/ipv4/af_inet.c 	sock_rps_record_flow(sk);
sk                792 net/ipv4/af_inet.c 	if (!inet_sk(sk)->inet_num && !sk->sk_prot->no_autobind &&
sk                793 net/ipv4/af_inet.c 	    inet_autobind(sk))
sk                802 net/ipv4/af_inet.c 	struct sock *sk = sock->sk;
sk                804 net/ipv4/af_inet.c 	if (unlikely(inet_send_prepare(sk)))
sk                807 net/ipv4/af_inet.c 	return INDIRECT_CALL_2(sk->sk_prot->sendmsg, tcp_sendmsg, udp_sendmsg,
sk                808 net/ipv4/af_inet.c 			       sk, msg, size);
sk                815 net/ipv4/af_inet.c 	struct sock *sk = sock->sk;
sk                817 net/ipv4/af_inet.c 	if (unlikely(inet_send_prepare(sk)))
sk                820 net/ipv4/af_inet.c 	if (sk->sk_prot->sendpage)
sk                821 net/ipv4/af_inet.c 		return sk->sk_prot->sendpage(sk, page, offset, size, flags);
sk                831 net/ipv4/af_inet.c 	struct sock *sk = sock->sk;
sk                836 net/ipv4/af_inet.c 		sock_rps_record_flow(sk);
sk                838 net/ipv4/af_inet.c 	err = INDIRECT_CALL_2(sk->sk_prot->recvmsg, tcp_recvmsg, udp_recvmsg,
sk                839 net/ipv4/af_inet.c 			      sk, msg, size, flags & MSG_DONTWAIT,
sk                849 net/ipv4/af_inet.c 	struct sock *sk = sock->sk;
sk                861 net/ipv4/af_inet.c 	lock_sock(sk);
sk                863 net/ipv4/af_inet.c 		if ((1 << sk->sk_state) &
sk                870 net/ipv4/af_inet.c 	switch (sk->sk_state) {
sk                877 net/ipv4/af_inet.c 		sk->sk_shutdown |= how;
sk                878 net/ipv4/af_inet.c 		if (sk->sk_prot->shutdown)
sk                879 net/ipv4/af_inet.c 			sk->sk_prot->shutdown(sk, how);
sk                891 net/ipv4/af_inet.c 		err = sk->sk_prot->disconnect(sk, O_NONBLOCK);
sk                897 net/ipv4/af_inet.c 	sk->sk_state_change(sk);
sk                898 net/ipv4/af_inet.c 	release_sock(sk);
sk                915 net/ipv4/af_inet.c 	struct sock *sk = sock->sk;
sk                917 net/ipv4/af_inet.c 	struct net *net = sock_net(sk);
sk                960 net/ipv4/af_inet.c 		if (sk->sk_prot->ioctl)
sk                961 net/ipv4/af_inet.c 			err = sk->sk_prot->ioctl(sk, cmd, arg);
sk                973 net/ipv4/af_inet.c 	struct sock *sk = sock->sk;
sk                976 net/ipv4/af_inet.c 	if (sk->sk_prot->compat_ioctl)
sk                977 net/ipv4/af_inet.c 		err = sk->sk_prot->compat_ioctl(sk, cmd, arg);
sk               1188 net/ipv4/af_inet.c static int inet_sk_reselect_saddr(struct sock *sk)
sk               1190 net/ipv4/af_inet.c 	struct inet_sock *inet = inet_sk(sk);
sk               1199 net/ipv4/af_inet.c 					     lockdep_sock_is_held(sk));
sk               1205 net/ipv4/af_inet.c 	rt = ip_route_connect(fl4, daddr, 0, RT_CONN_FLAGS(sk),
sk               1206 net/ipv4/af_inet.c 			      sk->sk_bound_dev_if, sk->sk_protocol,
sk               1207 net/ipv4/af_inet.c 			      inet->inet_sport, inet->inet_dport, sk);
sk               1211 net/ipv4/af_inet.c 	sk_setup_caps(sk, &rt->dst);
sk               1218 net/ipv4/af_inet.c 	if (sock_net(sk)->ipv4.sysctl_ip_dynaddr > 1) {
sk               1233 net/ipv4/af_inet.c 	return __sk_prot_rehash(sk);
sk               1236 net/ipv4/af_inet.c int inet_sk_rebuild_header(struct sock *sk)
sk               1238 net/ipv4/af_inet.c 	struct inet_sock *inet = inet_sk(sk);
sk               1239 net/ipv4/af_inet.c 	struct rtable *rt = (struct rtable *)__sk_dst_check(sk, 0);
sk               1257 net/ipv4/af_inet.c 	rt = ip_route_output_ports(sock_net(sk), fl4, sk, daddr, inet->inet_saddr,
sk               1259 net/ipv4/af_inet.c 				   sk->sk_protocol, RT_CONN_FLAGS(sk),
sk               1260 net/ipv4/af_inet.c 				   sk->sk_bound_dev_if);
sk               1263 net/ipv4/af_inet.c 		sk_setup_caps(sk, &rt->dst);
sk               1268 net/ipv4/af_inet.c 		sk->sk_route_caps = 0;
sk               1273 net/ipv4/af_inet.c 		if (!sock_net(sk)->ipv4.sysctl_ip_dynaddr ||
sk               1274 net/ipv4/af_inet.c 		    sk->sk_state != TCP_SYN_SENT ||
sk               1275 net/ipv4/af_inet.c 		    (sk->sk_userlocks & SOCK_BINDADDR_LOCK) ||
sk               1276 net/ipv4/af_inet.c 		    (err = inet_sk_reselect_saddr(sk)) != 0)
sk               1277 net/ipv4/af_inet.c 			sk->sk_err_soft = -err;
sk               1284 net/ipv4/af_inet.c void inet_sk_set_state(struct sock *sk, int state)
sk               1286 net/ipv4/af_inet.c 	trace_inet_sock_set_state(sk, sk->sk_state, state);
sk               1287 net/ipv4/af_inet.c 	sk->sk_state = state;
sk               1291 net/ipv4/af_inet.c void inet_sk_state_store(struct sock *sk, int newstate)
sk               1293 net/ipv4/af_inet.c 	trace_inet_sock_set_state(sk, sk->sk_state, newstate);
sk               1294 net/ipv4/af_inet.c 	smp_store_release(&sk->sk_state, newstate);
sk               1568 net/ipv4/af_inet.c int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
sk               1570 net/ipv4/af_inet.c 	if (sk->sk_family == AF_INET)
sk               1571 net/ipv4/af_inet.c 		return ip_recv_error(sk, msg, len, addr_len);
sk               1573 net/ipv4/af_inet.c 	if (sk->sk_family == AF_INET6)
sk               1574 net/ipv4/af_inet.c 		return pingv6_ops.ipv6_recv_error(sk, msg, len, addr_len);
sk               1624 net/ipv4/af_inet.c int inet_ctl_sock_create(struct sock **sk, unsigned short family,
sk               1632 net/ipv4/af_inet.c 		*sk = sock->sk;
sk               1633 net/ipv4/af_inet.c 		(*sk)->sk_allocation = GFP_ATOMIC;
sk               1638 net/ipv4/af_inet.c 		(*sk)->sk_prot->unhash(*sk);
sk                628 net/ipv4/arp.c static int arp_xmit_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
sk                675 net/ipv4/arp.c static int arp_process(struct net *net, struct sock *sk, struct sk_buff *skb)
sk                 25 net/ipv4/bpfilter/sockopt.c static int bpfilter_mbox_request(struct sock *sk, int optname,
sk                 46 net/ipv4/bpfilter/sockopt.c 	err = bpfilter_ops.sockopt(sk, optname, optval, optlen, is_set);
sk                 52 net/ipv4/bpfilter/sockopt.c int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char __user *optval,
sk                 55 net/ipv4/bpfilter/sockopt.c 	return bpfilter_mbox_request(sk, optname, optval, optlen, true);
sk                 58 net/ipv4/bpfilter/sockopt.c int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char __user *optval,
sk                 66 net/ipv4/bpfilter/sockopt.c 	return bpfilter_mbox_request(sk, optname, optval, len, false);
sk               1836 net/ipv4/cipso_ipv4.c int cipso_v4_sock_setattr(struct sock *sk,
sk               1852 net/ipv4/cipso_ipv4.c 	if (!sk)
sk               1886 net/ipv4/cipso_ipv4.c 	sk_inet = inet_sk(sk);
sk               1889 net/ipv4/cipso_ipv4.c 					lockdep_sock_is_held(sk));
sk               1891 net/ipv4/cipso_ipv4.c 		sk_conn = inet_csk(sk);
sk               1895 net/ipv4/cipso_ipv4.c 		sk_conn->icsk_sync_mss(sk, sk_conn->icsk_pmtu_cookie);
sk               2052 net/ipv4/cipso_ipv4.c void cipso_v4_sock_delattr(struct sock *sk)
sk               2057 net/ipv4/cipso_ipv4.c 	sk_inet = inet_sk(sk);
sk               2061 net/ipv4/cipso_ipv4.c 		struct inet_connection_sock *sk_conn = inet_csk(sk);
sk               2063 net/ipv4/cipso_ipv4.c 		sk_conn->icsk_sync_mss(sk, sk_conn->icsk_pmtu_cookie);
sk               2142 net/ipv4/cipso_ipv4.c int cipso_v4_sock_getattr(struct sock *sk, struct netlbl_lsm_secattr *secattr)
sk               2148 net/ipv4/cipso_ipv4.c 	opt = rcu_dereference(inet_sk(sk)->inet_opt);
sk                 20 net/ipv4/datagram.c int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
sk                 22 net/ipv4/datagram.c 	struct inet_sock *inet = inet_sk(sk);
sk                 37 net/ipv4/datagram.c 	sk_dst_reset(sk);
sk                 39 net/ipv4/datagram.c 	oif = sk->sk_bound_dev_if;
sk                 42 net/ipv4/datagram.c 		if (!oif || netif_index_is_l3_master(sock_net(sk), oif))
sk                 49 net/ipv4/datagram.c 			      RT_CONN_FLAGS(sk), oif,
sk                 50 net/ipv4/datagram.c 			      sk->sk_protocol,
sk                 51 net/ipv4/datagram.c 			      inet->inet_sport, usin->sin_port, sk);
sk                 55 net/ipv4/datagram.c 			IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
sk                 59 net/ipv4/datagram.c 	if ((rt->rt_flags & RTCF_BROADCAST) && !sock_flag(sk, SOCK_BROADCAST)) {
sk                 68 net/ipv4/datagram.c 		if (sk->sk_prot->rehash)
sk                 69 net/ipv4/datagram.c 			sk->sk_prot->rehash(sk);
sk                 73 net/ipv4/datagram.c 	reuseport_has_conns(sk, true);
sk                 74 net/ipv4/datagram.c 	sk->sk_state = TCP_ESTABLISHED;
sk                 75 net/ipv4/datagram.c 	sk_set_txhash(sk);
sk                 78 net/ipv4/datagram.c 	sk_dst_set(sk, &rt->dst);
sk                 85 net/ipv4/datagram.c int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
sk                 89 net/ipv4/datagram.c 	lock_sock(sk);
sk                 90 net/ipv4/datagram.c 	res = __ip4_datagram_connect(sk, uaddr, addr_len);
sk                 91 net/ipv4/datagram.c 	release_sock(sk);
sk                100 net/ipv4/datagram.c void ip4_datagram_release_cb(struct sock *sk)
sk                102 net/ipv4/datagram.c 	const struct inet_sock *inet = inet_sk(sk);
sk                111 net/ipv4/datagram.c 	dst = __sk_dst_get(sk);
sk                119 net/ipv4/datagram.c 	rt = ip_route_output_ports(sock_net(sk), &fl4, sk, daddr,
sk                121 net/ipv4/datagram.c 				   inet->inet_sport, sk->sk_protocol,
sk                122 net/ipv4/datagram.c 				   RT_CONN_FLAGS(sk), sk->sk_bound_dev_if);
sk                125 net/ipv4/datagram.c 	sk_dst_set(sk, dst);
sk                626 net/ipv4/devinet.c 	struct sock *sk = net->ipv4.mc_autojoin_sk;
sk                631 net/ipv4/devinet.c 	lock_sock(sk);
sk                633 net/ipv4/devinet.c 		ret = ip_mc_join_group(sk, &mreq);
sk                635 net/ipv4/devinet.c 		ret = ip_mc_leave_group(sk, &mreq);
sk                636 net/ipv4/devinet.c 	release_sock(sk);
sk                647 net/ipv4/devinet.c 	struct net *net = sock_net(skb->sk);
sk                931 net/ipv4/devinet.c 	struct net *net = sock_net(skb->sk);
sk               1718 net/ipv4/devinet.c 				      struct net **tgt_net, struct sock *sk,
sk               1757 net/ipv4/devinet.c 			net = rtnl_get_net_ns_capable(sk, fillargs->netnsid);
sk               1811 net/ipv4/devinet.c 	struct net *net = sock_net(skb->sk);
sk               1827 net/ipv4/devinet.c 						 skb->sk, cb);
sk               2163 net/ipv4/devinet.c 	struct net *net = sock_net(in_skb->sk);
sk               2223 net/ipv4/devinet.c 	struct net *net = sock_net(skb->sk);
sk                300 net/ipv4/esp4.c 			struct sock *sk = skb->sk;
sk                340 net/ipv4/esp4.c 			if (sk && sk_fullsock(sk))
sk                341 net/ipv4/esp4.c 				refcount_add(tailen, &sk->sk_wmem_alloc);
sk                848 net/ipv4/fib_frontend.c 	struct net *net = sock_net(skb->sk);
sk                878 net/ipv4/fib_frontend.c 	struct net *net = sock_net(skb->sk);
sk                979 net/ipv4/fib_frontend.c 	struct net *net = sock_net(skb->sk);
sk               1370 net/ipv4/fib_frontend.c 	net = sock_net(skb->sk);
sk               1393 net/ipv4/fib_frontend.c 	struct sock *sk;
sk               1398 net/ipv4/fib_frontend.c 	sk = netlink_kernel_create(net, NETLINK_FIB_LOOKUP, &cfg);
sk               1399 net/ipv4/fib_frontend.c 	if (!sk)
sk               1401 net/ipv4/fib_frontend.c 	net->ipv4.fibnl = sk;
sk                223 net/ipv4/fib_rules.c 	struct net *net = sock_net(skb->sk);
sk                 49 net/ipv4/fou.c static inline struct fou *fou_from_sock(struct sock *sk)
sk                 51 net/ipv4/fou.c 	return sk->sk_user_data;
sk                 71 net/ipv4/fou.c static int fou_udp_recv(struct sock *sk, struct sk_buff *skb)
sk                 73 net/ipv4/fou.c 	struct fou *fou = fou_from_sock(sk);
sk                118 net/ipv4/fou.c static int gue_udp_recv(struct sock *sk, struct sk_buff *skb)
sk                120 net/ipv4/fou.c 	struct fou *fou = fou_from_sock(sk);
sk                229 net/ipv4/fou.c static struct sk_buff *fou_gro_receive(struct sock *sk,
sk                233 net/ipv4/fou.c 	u8 proto = fou_from_sock(sk)->protocol;
sk                263 net/ipv4/fou.c static int fou_gro_complete(struct sock *sk, struct sk_buff *skb,
sk                267 net/ipv4/fou.c 	u8 proto = fou_from_sock(sk)->protocol;
sk                310 net/ipv4/fou.c static struct sk_buff *gue_gro_receive(struct sock *sk,
sk                323 net/ipv4/fou.c 	struct fou *fou = fou_from_sock(sk);
sk                458 net/ipv4/fou.c static int gue_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff)
sk                505 net/ipv4/fou.c 	struct sock *sk = fou->sock->sk;
sk                510 net/ipv4/fou.c 	    sk->sk_dport != udp_cfg->peer_udp_port ||
sk                511 net/ipv4/fou.c 	    sk->sk_bound_dev_if != udp_cfg->bind_ifindex)
sk                515 net/ipv4/fou.c 		if (sk->sk_rcv_saddr != udp_cfg->local_ip.s_addr ||
sk                516 net/ipv4/fou.c 		    sk->sk_daddr != udp_cfg->peer_ip.s_addr)
sk                522 net/ipv4/fou.c 		if (ipv6_addr_cmp(&sk->sk_v6_rcv_saddr, &udp_cfg->local_ip6) ||
sk                523 net/ipv4/fou.c 		    ipv6_addr_cmp(&sk->sk_v6_daddr, &udp_cfg->peer_ip6))
sk                568 net/ipv4/fou.c 	struct sock *sk;
sk                584 net/ipv4/fou.c 	sk = sock->sk;
sk                617 net/ipv4/fou.c 	sk->sk_allocation = GFP_ATOMIC;
sk                790 net/ipv4/fou.c 	struct sock *sk = fou->sock->sk;
sk                792 net/ipv4/fou.c 	if (nla_put_u8(msg, FOU_ATTR_AF, fou->sock->sk->sk_family) ||
sk                794 net/ipv4/fou.c 	    nla_put_be16(msg, FOU_ATTR_PEER_PORT, sk->sk_dport) ||
sk                797 net/ipv4/fou.c 	    nla_put_s32(msg, FOU_ATTR_IFINDEX, sk->sk_bound_dev_if))
sk                804 net/ipv4/fou.c 	if (fou->sock->sk->sk_family == AF_INET) {
sk                805 net/ipv4/fou.c 		if (nla_put_in_addr(msg, FOU_ATTR_LOCAL_V4, sk->sk_rcv_saddr))
sk                808 net/ipv4/fou.c 		if (nla_put_in_addr(msg, FOU_ATTR_PEER_V4, sk->sk_daddr))
sk                813 net/ipv4/fou.c 				     &sk->sk_v6_rcv_saddr))
sk                816 net/ipv4/fou.c 		if (nla_put_in6_addr(msg, FOU_ATTR_PEER_V6, &sk->sk_v6_daddr))
sk                893 net/ipv4/fou.c 	struct net *net = sock_net(skb->sk);
sk                210 net/ipv4/icmp.c 	struct sock *sk;
sk                212 net/ipv4/icmp.c 	sk = icmp_sk(net);
sk                214 net/ipv4/icmp.c 	if (unlikely(!spin_trylock(&sk->sk_lock.slock))) {
sk                220 net/ipv4/icmp.c 	return sk;
sk                223 net/ipv4/icmp.c static inline void icmp_xmit_unlock(struct sock *sk)
sk                225 net/ipv4/icmp.c 	spin_unlock(&sk->sk_lock.slock);
sk                367 net/ipv4/icmp.c 	struct sock *sk;
sk                370 net/ipv4/icmp.c 	sk = icmp_sk(dev_net((*rt)->dst.dev));
sk                371 net/ipv4/icmp.c 	if (ip_append_data(sk, fl4, icmp_glue_bits, icmp_param,
sk                375 net/ipv4/icmp.c 		__ICMP_INC_STATS(sock_net(sk), ICMP_MIB_OUTERRORS);
sk                376 net/ipv4/icmp.c 		ip_flush_pending_frames(sk);
sk                377 net/ipv4/icmp.c 	} else if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
sk                382 net/ipv4/icmp.c 		skb_queue_walk(&sk->sk_write_queue, skb1) {
sk                390 net/ipv4/icmp.c 		ip_push_pending_frames(sk, fl4);
sk                404 net/ipv4/icmp.c 	struct sock *sk;
sk                421 net/ipv4/icmp.c 	sk = icmp_xmit_lock(net);
sk                422 net/ipv4/icmp.c 	if (!sk)
sk                424 net/ipv4/icmp.c 	inet = inet_sk(sk);
sk                430 net/ipv4/icmp.c 	sk->sk_mark = mark;
sk                455 net/ipv4/icmp.c 	icmp_xmit_unlock(sk);
sk                582 net/ipv4/icmp.c 	struct sock *sk;
sk                668 net/ipv4/icmp.c 	sk = icmp_xmit_lock(net);
sk                669 net/ipv4/icmp.c 	if (!sk)
sk                711 net/ipv4/icmp.c 	inet_sk(sk)->tos = tos;
sk                712 net/ipv4/icmp.c 	sk->sk_mark = mark;
sk                743 net/ipv4/icmp.c 	icmp_xmit_unlock(sk);
sk               1204 net/ipv4/icmp.c 		struct sock *sk;
sk               1206 net/ipv4/icmp.c 		err = inet_ctl_sock_create(&sk, PF_INET,
sk               1211 net/ipv4/icmp.c 		*per_cpu_ptr(net->ipv4.icmp_sk, i) = sk;
sk               1216 net/ipv4/icmp.c 		sk->sk_sndbuf =	2 * SKB_TRUESIZE(64 * 1024);
sk               1221 net/ipv4/icmp.c 		sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
sk               1222 net/ipv4/icmp.c 		inet_sk(sk)->pmtudisc = IP_PMTUDISC_DONT;
sk                426 net/ipv4/igmp.c 	return ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
sk                793 net/ipv4/igmp.c 	return ip_local_out(net, skb->sk, skb);
sk               2156 net/ipv4/igmp.c static int __ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr,
sk               2162 net/ipv4/igmp.c 	struct inet_sock *inet = inet_sk(sk);
sk               2163 net/ipv4/igmp.c 	struct net *net = sock_net(sk);
sk               2191 net/ipv4/igmp.c 	iml = sock_kmalloc(sk, sizeof(*iml), GFP_KERNEL);
sk               2208 net/ipv4/igmp.c int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr)
sk               2210 net/ipv4/igmp.c 	return __ip_mc_join_group(sk, imr, MCAST_EXCLUDE);
sk               2216 net/ipv4/igmp.c int ip_mc_join_group_ssm(struct sock *sk, struct ip_mreqn *imr,
sk               2219 net/ipv4/igmp.c 	return __ip_mc_join_group(sk, imr, mode);
sk               2222 net/ipv4/igmp.c static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml,
sk               2237 net/ipv4/igmp.c 	atomic_sub(IP_SFLSIZE(psf->sl_max), &sk->sk_omem_alloc);
sk               2242 net/ipv4/igmp.c int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
sk               2244 net/ipv4/igmp.c 	struct inet_sock *inet = inet_sk(sk);
sk               2248 net/ipv4/igmp.c 	struct net *net = sock_net(sk);
sk               2273 net/ipv4/igmp.c 		(void) ip_mc_leave_src(sk, iml, in_dev);
sk               2281 net/ipv4/igmp.c 		atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
sk               2290 net/ipv4/igmp.c int ip_mc_source(int add, int omode, struct sock *sk, struct
sk               2298 net/ipv4/igmp.c 	struct inet_sock *inet = inet_sk(sk);
sk               2300 net/ipv4/igmp.c 	struct net *net = sock_net(sk);
sk               2386 net/ipv4/igmp.c 		newpsl = sock_kmalloc(sk, IP_SFLSIZE(count), GFP_KERNEL);
sk               2397 net/ipv4/igmp.c 			atomic_sub(IP_SFLSIZE(psl->sl_max), &sk->sk_omem_alloc);
sk               2422 net/ipv4/igmp.c 		err = ip_mc_leave_group(sk, &imr);
sk               2426 net/ipv4/igmp.c int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex)
sk               2433 net/ipv4/igmp.c 	struct inet_sock *inet = inet_sk(sk);
sk               2435 net/ipv4/igmp.c 	struct net *net = sock_net(sk);
sk               2472 net/ipv4/igmp.c 		newpsl = sock_kmalloc(sk, IP_SFLSIZE(msf->imsf_numsrc),
sk               2484 net/ipv4/igmp.c 			sock_kfree_s(sk, newpsl, IP_SFLSIZE(newpsl->sl_max));
sk               2497 net/ipv4/igmp.c 		atomic_sub(IP_SFLSIZE(psl->sl_max), &sk->sk_omem_alloc);
sk               2507 net/ipv4/igmp.c 		err = ip_mc_leave_group(sk, &imr);
sk               2511 net/ipv4/igmp.c int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf,
sk               2519 net/ipv4/igmp.c 	struct inet_sock *inet = inet_sk(sk);
sk               2521 net/ipv4/igmp.c 	struct net *net = sock_net(sk);
sk               2569 net/ipv4/igmp.c int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf,
sk               2576 net/ipv4/igmp.c 	struct inet_sock *inet = inet_sk(sk);
sk               2624 net/ipv4/igmp.c int ip_mc_sf_allow(struct sock *sk, __be32 loc_addr, __be32 rmt_addr,
sk               2627 net/ipv4/igmp.c 	struct inet_sock *inet = inet_sk(sk);
sk               2672 net/ipv4/igmp.c void ip_mc_drop_socket(struct sock *sk)
sk               2674 net/ipv4/igmp.c 	struct inet_sock *inet = inet_sk(sk);
sk               2676 net/ipv4/igmp.c 	struct net *net = sock_net(sk);
sk               2687 net/ipv4/igmp.c 		(void) ip_mc_leave_src(sk, iml, in_dev);
sk               2691 net/ipv4/igmp.c 		atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
sk                 91 net/ipv4/inet_connection_sock.c bool inet_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2,
sk                 95 net/ipv4/inet_connection_sock.c 	if (sk->sk_family == AF_INET6)
sk                 96 net/ipv4/inet_connection_sock.c 		return ipv6_rcv_saddr_equal(&sk->sk_v6_rcv_saddr,
sk                 98 net/ipv4/inet_connection_sock.c 					    sk->sk_rcv_saddr,
sk                100 net/ipv4/inet_connection_sock.c 					    ipv6_only_sock(sk),
sk                105 net/ipv4/inet_connection_sock.c 	return ipv4_rcv_saddr_equal(sk->sk_rcv_saddr, sk2->sk_rcv_saddr,
sk                111 net/ipv4/inet_connection_sock.c bool inet_rcv_saddr_any(const struct sock *sk)
sk                114 net/ipv4/inet_connection_sock.c 	if (sk->sk_family == AF_INET6)
sk                115 net/ipv4/inet_connection_sock.c 		return ipv6_addr_any(&sk->sk_v6_rcv_saddr);
sk                117 net/ipv4/inet_connection_sock.c 	return !sk->sk_rcv_saddr;
sk                133 net/ipv4/inet_connection_sock.c static int inet_csk_bind_conflict(const struct sock *sk,
sk                138 net/ipv4/inet_connection_sock.c 	bool reuse = sk->sk_reuse;
sk                139 net/ipv4/inet_connection_sock.c 	bool reuseport = !!sk->sk_reuseport && reuseport_ok;
sk                140 net/ipv4/inet_connection_sock.c 	kuid_t uid = sock_i_uid((struct sock *)sk);
sk                150 net/ipv4/inet_connection_sock.c 		if (sk != sk2 &&
sk                151 net/ipv4/inet_connection_sock.c 		    (!sk->sk_bound_dev_if ||
sk                153 net/ipv4/inet_connection_sock.c 		     sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
sk                157 net/ipv4/inet_connection_sock.c 			     rcu_access_pointer(sk->sk_reuseport_cb) ||
sk                160 net/ipv4/inet_connection_sock.c 				if (inet_rcv_saddr_equal(sk, sk2, true))
sk                165 net/ipv4/inet_connection_sock.c 				if (inet_rcv_saddr_equal(sk, sk2, true))
sk                178 net/ipv4/inet_connection_sock.c inet_csk_find_open_port(struct sock *sk, struct inet_bind_bucket **tb_ret, int *port_ret)
sk                180 net/ipv4/inet_connection_sock.c 	struct inet_hashinfo *hinfo = sk->sk_prot->h.hashinfo;
sk                183 net/ipv4/inet_connection_sock.c 	struct net *net = sock_net(sk);
sk                189 net/ipv4/inet_connection_sock.c 	l3mdev = inet_sk_bound_l3mdev(sk);
sk                190 net/ipv4/inet_connection_sock.c 	attempt_half = (sk->sk_reuse == SK_CAN_REUSE) ? 1 : 0;
sk                227 net/ipv4/inet_connection_sock.c 				if (!inet_csk_bind_conflict(sk, tb, false, false))
sk                255 net/ipv4/inet_connection_sock.c 				     struct sock *sk)
sk                257 net/ipv4/inet_connection_sock.c 	kuid_t uid = sock_i_uid(sk);
sk                261 net/ipv4/inet_connection_sock.c 	if (!sk->sk_reuseport)
sk                263 net/ipv4/inet_connection_sock.c 	if (rcu_access_pointer(sk->sk_reuseport_cb))
sk                277 net/ipv4/inet_connection_sock.c 					    inet6_rcv_saddr(sk),
sk                279 net/ipv4/inet_connection_sock.c 					    sk->sk_rcv_saddr,
sk                281 net/ipv4/inet_connection_sock.c 					    ipv6_only_sock(sk), true, false);
sk                283 net/ipv4/inet_connection_sock.c 	return ipv4_rcv_saddr_equal(tb->fast_rcv_saddr, sk->sk_rcv_saddr,
sk                284 net/ipv4/inet_connection_sock.c 				    ipv6_only_sock(sk), true, false);
sk                291 net/ipv4/inet_connection_sock.c int inet_csk_get_port(struct sock *sk, unsigned short snum)
sk                293 net/ipv4/inet_connection_sock.c 	bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN;
sk                294 net/ipv4/inet_connection_sock.c 	struct inet_hashinfo *hinfo = sk->sk_prot->h.hashinfo;
sk                297 net/ipv4/inet_connection_sock.c 	struct net *net = sock_net(sk);
sk                299 net/ipv4/inet_connection_sock.c 	kuid_t uid = sock_i_uid(sk);
sk                302 net/ipv4/inet_connection_sock.c 	l3mdev = inet_sk_bound_l3mdev(sk);
sk                305 net/ipv4/inet_connection_sock.c 		head = inet_csk_find_open_port(sk, &tb, &port);
sk                326 net/ipv4/inet_connection_sock.c 		if (sk->sk_reuse == SK_FORCE_REUSE)
sk                330 net/ipv4/inet_connection_sock.c 		    sk_reuseport_match(tb, sk))
sk                332 net/ipv4/inet_connection_sock.c 		if (inet_csk_bind_conflict(sk, tb, true, true))
sk                338 net/ipv4/inet_connection_sock.c 		if (sk->sk_reuseport) {
sk                341 net/ipv4/inet_connection_sock.c 			tb->fast_rcv_saddr = sk->sk_rcv_saddr;
sk                342 net/ipv4/inet_connection_sock.c 			tb->fast_ipv6_only = ipv6_only_sock(sk);
sk                343 net/ipv4/inet_connection_sock.c 			tb->fast_sk_family = sk->sk_family;
sk                345 net/ipv4/inet_connection_sock.c 			tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
sk                353 net/ipv4/inet_connection_sock.c 		if (sk->sk_reuseport) {
sk                365 net/ipv4/inet_connection_sock.c 			if (!sk_reuseport_match(tb, sk)) {
sk                368 net/ipv4/inet_connection_sock.c 				tb->fast_rcv_saddr = sk->sk_rcv_saddr;
sk                369 net/ipv4/inet_connection_sock.c 				tb->fast_ipv6_only = ipv6_only_sock(sk);
sk                370 net/ipv4/inet_connection_sock.c 				tb->fast_sk_family = sk->sk_family;
sk                372 net/ipv4/inet_connection_sock.c 				tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
sk                379 net/ipv4/inet_connection_sock.c 	if (!inet_csk(sk)->icsk_bind_hash)
sk                380 net/ipv4/inet_connection_sock.c 		inet_bind_hash(sk, tb, port);
sk                381 net/ipv4/inet_connection_sock.c 	WARN_ON(inet_csk(sk)->icsk_bind_hash != tb);
sk                394 net/ipv4/inet_connection_sock.c static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
sk                396 net/ipv4/inet_connection_sock.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk                415 net/ipv4/inet_connection_sock.c 		prepare_to_wait_exclusive(sk_sleep(sk), &wait,
sk                417 net/ipv4/inet_connection_sock.c 		release_sock(sk);
sk                421 net/ipv4/inet_connection_sock.c 		lock_sock(sk);
sk                426 net/ipv4/inet_connection_sock.c 		if (sk->sk_state != TCP_LISTEN)
sk                435 net/ipv4/inet_connection_sock.c 	finish_wait(sk_sleep(sk), &wait);
sk                442 net/ipv4/inet_connection_sock.c struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern)
sk                444 net/ipv4/inet_connection_sock.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk                450 net/ipv4/inet_connection_sock.c 	lock_sock(sk);
sk                456 net/ipv4/inet_connection_sock.c 	if (sk->sk_state != TCP_LISTEN)
sk                461 net/ipv4/inet_connection_sock.c 		long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
sk                468 net/ipv4/inet_connection_sock.c 		error = inet_csk_wait_for_connect(sk, timeo);
sk                472 net/ipv4/inet_connection_sock.c 	req = reqsk_queue_remove(queue, sk);
sk                473 net/ipv4/inet_connection_sock.c 	newsk = req->sk;
sk                475 net/ipv4/inet_connection_sock.c 	if (sk->sk_protocol == IPPROTO_TCP &&
sk                485 net/ipv4/inet_connection_sock.c 			req->sk = NULL;
sk                492 net/ipv4/inet_connection_sock.c 	release_sock(sk);
sk                528 net/ipv4/inet_connection_sock.c void inet_csk_init_xmit_timers(struct sock *sk,
sk                533 net/ipv4/inet_connection_sock.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk                537 net/ipv4/inet_connection_sock.c 	timer_setup(&sk->sk_timer, keepalive_handler, 0);
sk                542 net/ipv4/inet_connection_sock.c void inet_csk_clear_xmit_timers(struct sock *sk)
sk                544 net/ipv4/inet_connection_sock.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk                548 net/ipv4/inet_connection_sock.c 	sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
sk                549 net/ipv4/inet_connection_sock.c 	sk_stop_timer(sk, &icsk->icsk_delack_timer);
sk                550 net/ipv4/inet_connection_sock.c 	sk_stop_timer(sk, &sk->sk_timer);
sk                554 net/ipv4/inet_connection_sock.c void inet_csk_delete_keepalive_timer(struct sock *sk)
sk                556 net/ipv4/inet_connection_sock.c 	sk_stop_timer(sk, &sk->sk_timer);
sk                560 net/ipv4/inet_connection_sock.c void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long len)
sk                562 net/ipv4/inet_connection_sock.c 	sk_reset_timer(sk, &sk->sk_timer, jiffies + len);
sk                566 net/ipv4/inet_connection_sock.c struct dst_entry *inet_csk_route_req(const struct sock *sk,
sk                579 net/ipv4/inet_connection_sock.c 			   RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
sk                580 net/ipv4/inet_connection_sock.c 			   sk->sk_protocol, inet_sk_flowi_flags(sk),
sk                583 net/ipv4/inet_connection_sock.c 			   htons(ireq->ir_num), sk->sk_uid);
sk                585 net/ipv4/inet_connection_sock.c 	rt = ip_route_output_flow(net, fl4, sk);
sk                602 net/ipv4/inet_connection_sock.c struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
sk                617 net/ipv4/inet_connection_sock.c 			   RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
sk                618 net/ipv4/inet_connection_sock.c 			   sk->sk_protocol, inet_sk_flowi_flags(sk),
sk                621 net/ipv4/inet_connection_sock.c 			   htons(ireq->ir_num), sk->sk_uid);
sk                623 net/ipv4/inet_connection_sock.c 	rt = ip_route_output_flow(net, fl4, sk);
sk                694 net/ipv4/inet_connection_sock.c void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req)
sk                697 net/ipv4/inet_connection_sock.c 		reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
sk                703 net/ipv4/inet_connection_sock.c void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req)
sk                705 net/ipv4/inet_connection_sock.c 	inet_csk_reqsk_queue_drop(sk, req);
sk                790 net/ipv4/inet_connection_sock.c void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
sk                794 net/ipv4/inet_connection_sock.c 	inet_csk_reqsk_queue_added(sk);
sk                806 net/ipv4/inet_connection_sock.c struct sock *inet_csk_clone_lock(const struct sock *sk,
sk                810 net/ipv4/inet_connection_sock.c 	struct sock *newsk = sk_clone_lock(sk, priority);
sk                850 net/ipv4/inet_connection_sock.c void inet_csk_destroy_sock(struct sock *sk)
sk                852 net/ipv4/inet_connection_sock.c 	WARN_ON(sk->sk_state != TCP_CLOSE);
sk                853 net/ipv4/inet_connection_sock.c 	WARN_ON(!sock_flag(sk, SOCK_DEAD));
sk                856 net/ipv4/inet_connection_sock.c 	WARN_ON(!sk_unhashed(sk));
sk                859 net/ipv4/inet_connection_sock.c 	WARN_ON(inet_sk(sk)->inet_num && !inet_csk(sk)->icsk_bind_hash);
sk                861 net/ipv4/inet_connection_sock.c 	sk->sk_prot->destroy(sk);
sk                863 net/ipv4/inet_connection_sock.c 	sk_stream_kill_queues(sk);
sk                865 net/ipv4/inet_connection_sock.c 	xfrm_sk_free_policy(sk);
sk                867 net/ipv4/inet_connection_sock.c 	sk_refcnt_debug_release(sk);
sk                869 net/ipv4/inet_connection_sock.c 	percpu_counter_dec(sk->sk_prot->orphan_count);
sk                871 net/ipv4/inet_connection_sock.c 	sock_put(sk);
sk                878 net/ipv4/inet_connection_sock.c void inet_csk_prepare_forced_close(struct sock *sk)
sk                879 net/ipv4/inet_connection_sock.c 	__releases(&sk->sk_lock.slock)
sk                882 net/ipv4/inet_connection_sock.c 	bh_unlock_sock(sk);
sk                883 net/ipv4/inet_connection_sock.c 	sock_put(sk);
sk                886 net/ipv4/inet_connection_sock.c 	sock_set_flag(sk, SOCK_DEAD);
sk                887 net/ipv4/inet_connection_sock.c 	percpu_counter_inc(sk->sk_prot->orphan_count);
sk                888 net/ipv4/inet_connection_sock.c 	inet_sk(sk)->inet_num = 0;
sk                892 net/ipv4/inet_connection_sock.c int inet_csk_listen_start(struct sock *sk, int backlog)
sk                894 net/ipv4/inet_connection_sock.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk                895 net/ipv4/inet_connection_sock.c 	struct inet_sock *inet = inet_sk(sk);
sk                900 net/ipv4/inet_connection_sock.c 	sk->sk_ack_backlog = 0;
sk                901 net/ipv4/inet_connection_sock.c 	inet_csk_delack_init(sk);
sk                908 net/ipv4/inet_connection_sock.c 	inet_sk_state_store(sk, TCP_LISTEN);
sk                909 net/ipv4/inet_connection_sock.c 	if (!sk->sk_prot->get_port(sk, inet->inet_num)) {
sk                912 net/ipv4/inet_connection_sock.c 		sk_dst_reset(sk);
sk                913 net/ipv4/inet_connection_sock.c 		err = sk->sk_prot->hash(sk);
sk                919 net/ipv4/inet_connection_sock.c 	inet_sk_set_state(sk, TCP_CLOSE);
sk                924 net/ipv4/inet_connection_sock.c static void inet_child_forget(struct sock *sk, struct request_sock *req,
sk                927 net/ipv4/inet_connection_sock.c 	sk->sk_prot->disconnect(child, O_NONBLOCK);
sk                931 net/ipv4/inet_connection_sock.c 	percpu_counter_inc(sk->sk_prot->orphan_count);
sk                933 net/ipv4/inet_connection_sock.c 	if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->tfo_listener) {
sk                935 net/ipv4/inet_connection_sock.c 		BUG_ON(sk != req->rsk_listener);
sk                948 net/ipv4/inet_connection_sock.c struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
sk                952 net/ipv4/inet_connection_sock.c 	struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
sk                955 net/ipv4/inet_connection_sock.c 	if (unlikely(sk->sk_state != TCP_LISTEN)) {
sk                956 net/ipv4/inet_connection_sock.c 		inet_child_forget(sk, req, child);
sk                959 net/ipv4/inet_connection_sock.c 		req->sk = child;
sk                966 net/ipv4/inet_connection_sock.c 		sk_acceptq_added(sk);
sk                973 net/ipv4/inet_connection_sock.c struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
sk                977 net/ipv4/inet_connection_sock.c 		inet_csk_reqsk_queue_drop(sk, req);
sk                978 net/ipv4/inet_connection_sock.c 		reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
sk                979 net/ipv4/inet_connection_sock.c 		if (inet_csk_reqsk_queue_add(sk, req, child))
sk                993 net/ipv4/inet_connection_sock.c void inet_csk_listen_stop(struct sock *sk)
sk                995 net/ipv4/inet_connection_sock.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk               1007 net/ipv4/inet_connection_sock.c 	while ((req = reqsk_queue_remove(queue, sk)) != NULL) {
sk               1008 net/ipv4/inet_connection_sock.c 		struct sock *child = req->sk;
sk               1015 net/ipv4/inet_connection_sock.c 		inet_child_forget(sk, req, child);
sk               1035 net/ipv4/inet_connection_sock.c 	WARN_ON_ONCE(sk->sk_ack_backlog);
sk               1039 net/ipv4/inet_connection_sock.c void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr)
sk               1042 net/ipv4/inet_connection_sock.c 	const struct inet_sock *inet = inet_sk(sk);
sk               1051 net/ipv4/inet_connection_sock.c int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname,
sk               1054 net/ipv4/inet_connection_sock.c 	const struct inet_connection_sock *icsk = inet_csk(sk);
sk               1057 net/ipv4/inet_connection_sock.c 		return icsk->icsk_af_ops->compat_getsockopt(sk, level, optname,
sk               1059 net/ipv4/inet_connection_sock.c 	return icsk->icsk_af_ops->getsockopt(sk, level, optname,
sk               1064 net/ipv4/inet_connection_sock.c int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
sk               1067 net/ipv4/inet_connection_sock.c 	const struct inet_connection_sock *icsk = inet_csk(sk);
sk               1070 net/ipv4/inet_connection_sock.c 		return icsk->icsk_af_ops->compat_setsockopt(sk, level, optname,
sk               1072 net/ipv4/inet_connection_sock.c 	return icsk->icsk_af_ops->setsockopt(sk, level, optname,
sk               1078 net/ipv4/inet_connection_sock.c static struct dst_entry *inet_csk_rebuild_route(struct sock *sk, struct flowi *fl)
sk               1080 net/ipv4/inet_connection_sock.c 	const struct inet_sock *inet = inet_sk(sk);
sk               1091 net/ipv4/inet_connection_sock.c 	rt = ip_route_output_ports(sock_net(sk), fl4, sk, daddr,
sk               1093 net/ipv4/inet_connection_sock.c 				   inet->inet_sport, sk->sk_protocol,
sk               1094 net/ipv4/inet_connection_sock.c 				   RT_CONN_FLAGS(sk), sk->sk_bound_dev_if);
sk               1098 net/ipv4/inet_connection_sock.c 		sk_setup_caps(sk, &rt->dst);
sk               1104 net/ipv4/inet_connection_sock.c struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu)
sk               1106 net/ipv4/inet_connection_sock.c 	struct dst_entry *dst = __sk_dst_check(sk, 0);
sk               1107 net/ipv4/inet_connection_sock.c 	struct inet_sock *inet = inet_sk(sk);
sk               1110 net/ipv4/inet_connection_sock.c 		dst = inet_csk_rebuild_route(sk, &inet->cork.fl);
sk               1114 net/ipv4/inet_connection_sock.c 	dst->ops->update_pmtu(dst, sk, NULL, mtu, true);
sk               1116 net/ipv4/inet_connection_sock.c 	dst = __sk_dst_check(sk, 0);
sk               1118 net/ipv4/inet_connection_sock.c 		dst = inet_csk_rebuild_route(sk, &inet->cork.fl);
sk                 66 net/ipv4/inet_diag.c void inet_diag_msg_common_fill(struct inet_diag_msg *r, struct sock *sk)
sk                 68 net/ipv4/inet_diag.c 	r->idiag_family = sk->sk_family;
sk                 70 net/ipv4/inet_diag.c 	r->id.idiag_sport = htons(sk->sk_num);
sk                 71 net/ipv4/inet_diag.c 	r->id.idiag_dport = sk->sk_dport;
sk                 72 net/ipv4/inet_diag.c 	r->id.idiag_if = sk->sk_bound_dev_if;
sk                 73 net/ipv4/inet_diag.c 	sock_diag_save_cookie(sk, r->id.idiag_cookie);
sk                 76 net/ipv4/inet_diag.c 	if (sk->sk_family == AF_INET6) {
sk                 77 net/ipv4/inet_diag.c 		*(struct in6_addr *)r->id.idiag_src = sk->sk_v6_rcv_saddr;
sk                 78 net/ipv4/inet_diag.c 		*(struct in6_addr *)r->id.idiag_dst = sk->sk_v6_daddr;
sk                 85 net/ipv4/inet_diag.c 	r->id.idiag_src[0] = sk->sk_rcv_saddr;
sk                 86 net/ipv4/inet_diag.c 	r->id.idiag_dst[0] = sk->sk_daddr;
sk                 91 net/ipv4/inet_diag.c static size_t inet_sk_attr_size(struct sock *sk,
sk                100 net/ipv4/inet_diag.c 		aux = handler->idiag_get_aux_size(sk, net_admin);
sk                113 net/ipv4/inet_diag.c int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
sk                118 net/ipv4/inet_diag.c 	const struct inet_sock *inet = inet_sk(sk);
sk                120 net/ipv4/inet_diag.c 	if (nla_put_u8(skb, INET_DIAG_SHUTDOWN, sk->sk_shutdown))
sk                134 net/ipv4/inet_diag.c 				       inet6_sk(sk)->tclass) < 0)
sk                137 net/ipv4/inet_diag.c 		if (((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) &&
sk                138 net/ipv4/inet_diag.c 		    nla_put_u8(skb, INET_DIAG_SKV6ONLY, ipv6_only_sock(sk)))
sk                143 net/ipv4/inet_diag.c 	if (net_admin && nla_put_u32(skb, INET_DIAG_MARK, sk->sk_mark))
sk                151 net/ipv4/inet_diag.c 		classid = sock_cgroup_classid(&sk->sk_cgrp_data);
sk                158 net/ipv4/inet_diag.c 			classid = sk->sk_priority;
sk                164 net/ipv4/inet_diag.c 	r->idiag_uid = from_kuid_munged(user_ns, sock_i_uid(sk));
sk                165 net/ipv4/inet_diag.c 	r->idiag_inode = sock_i_ino(sk);
sk                173 net/ipv4/inet_diag.c int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
sk                197 net/ipv4/inet_diag.c 	BUG_ON(!sk_fullsock(sk));
sk                199 net/ipv4/inet_diag.c 	inet_diag_msg_common_fill(r, sk);
sk                200 net/ipv4/inet_diag.c 	r->idiag_state = sk->sk_state;
sk                204 net/ipv4/inet_diag.c 	if (inet_diag_msg_attrs_fill(sk, skb, r, ext, user_ns, net_admin))
sk                209 net/ipv4/inet_diag.c 			.idiag_rmem = sk_rmem_alloc_get(sk),
sk                210 net/ipv4/inet_diag.c 			.idiag_wmem = READ_ONCE(sk->sk_wmem_queued),
sk                211 net/ipv4/inet_diag.c 			.idiag_fmem = sk->sk_forward_alloc,
sk                212 net/ipv4/inet_diag.c 			.idiag_tmem = sk_wmem_alloc_get(sk),
sk                220 net/ipv4/inet_diag.c 		if (sock_diag_put_meminfo(sk, skb, INET_DIAG_SKMEMINFO))
sk                227 net/ipv4/inet_diag.c 	if (sk->sk_type == SOCK_RAW) {
sk                228 net/ipv4/inet_diag.c 		if (nla_put_u8(skb, INET_DIAG_PROTOCOL, sk->sk_protocol))
sk                233 net/ipv4/inet_diag.c 		handler->idiag_get_info(sk, r, NULL);
sk                249 net/ipv4/inet_diag.c 	} else if (timer_pending(&sk->sk_timer)) {
sk                253 net/ipv4/inet_diag.c 			jiffies_to_msecs(sk->sk_timer.expires - jiffies);
sk                281 net/ipv4/inet_diag.c 	handler->idiag_get_info(sk, r, info);
sk                284 net/ipv4/inet_diag.c 		if (handler->idiag_get_aux(sk, net_admin, skb) < 0)
sk                287 net/ipv4/inet_diag.c 	if (sk->sk_state < TCP_TIME_WAIT) {
sk                295 net/ipv4/inet_diag.c 			sz = ca_ops->get_info(sk, ext, &attr, &info);
sk                311 net/ipv4/inet_diag.c static int inet_csk_diag_fill(struct sock *sk,
sk                319 net/ipv4/inet_diag.c 	return inet_sk_diag_fill(sk, inet_csk(sk), skb, req, user_ns,
sk                323 net/ipv4/inet_diag.c static int inet_twsk_diag_fill(struct sock *sk,
sk                328 net/ipv4/inet_diag.c 	struct inet_timewait_sock *tw = inet_twsk(sk);
sk                345 net/ipv4/inet_diag.c 	inet_diag_msg_common_fill(r, sk);
sk                360 net/ipv4/inet_diag.c static int inet_req_diag_fill(struct sock *sk, struct sk_buff *skb,
sk                364 net/ipv4/inet_diag.c 	struct request_sock *reqsk = inet_reqsk(sk);
sk                375 net/ipv4/inet_diag.c 	inet_diag_msg_common_fill(r, sk);
sk                383 net/ipv4/inet_diag.c 	tmo = inet_reqsk(sk)->rsk_timer.expires - jiffies;
sk                398 net/ipv4/inet_diag.c static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
sk                404 net/ipv4/inet_diag.c 	if (sk->sk_state == TCP_TIME_WAIT)
sk                405 net/ipv4/inet_diag.c 		return inet_twsk_diag_fill(sk, skb, portid, seq,
sk                408 net/ipv4/inet_diag.c 	if (sk->sk_state == TCP_NEW_SYN_RECV)
sk                409 net/ipv4/inet_diag.c 		return inet_req_diag_fill(sk, skb, portid, seq,
sk                412 net/ipv4/inet_diag.c 	return inet_csk_diag_fill(sk, skb, r, user_ns, portid, seq,
sk                420 net/ipv4/inet_diag.c 	struct sock *sk;
sk                424 net/ipv4/inet_diag.c 		sk = inet_lookup(net, hashinfo, NULL, 0, req->id.idiag_dst[0],
sk                431 net/ipv4/inet_diag.c 			sk = inet_lookup(net, hashinfo, NULL, 0, req->id.idiag_dst[3],
sk                435 net/ipv4/inet_diag.c 			sk = inet6_lookup(net, hashinfo, NULL, 0,
sk                448 net/ipv4/inet_diag.c 	if (!sk)
sk                451 net/ipv4/inet_diag.c 	if (sock_diag_check_cookie(sk, req->id.idiag_cookie)) {
sk                452 net/ipv4/inet_diag.c 		sock_gen_put(sk);
sk                456 net/ipv4/inet_diag.c 	return sk;
sk                466 net/ipv4/inet_diag.c 	struct net *net = sock_net(in_skb->sk);
sk                468 net/ipv4/inet_diag.c 	struct sock *sk;
sk                471 net/ipv4/inet_diag.c 	sk = inet_diag_find_one_icsk(net, hashinfo, req);
sk                472 net/ipv4/inet_diag.c 	if (IS_ERR(sk))
sk                473 net/ipv4/inet_diag.c 		return PTR_ERR(sk);
sk                475 net/ipv4/inet_diag.c 	rep = nlmsg_new(inet_sk_attr_size(sk, req, net_admin), GFP_KERNEL);
sk                481 net/ipv4/inet_diag.c 	err = sk_diag_fill(sk, rep, req,
sk                482 net/ipv4/inet_diag.c 			   sk_user_ns(NETLINK_CB(in_skb).sk),
sk                496 net/ipv4/inet_diag.c 	if (sk)
sk                497 net/ipv4/inet_diag.c 		sock_gen_put(sk);
sk                660 net/ipv4/inet_diag.c 			     const struct sock *sk)
sk                663 net/ipv4/inet_diag.c 	if (sk->sk_family == AF_INET6) {
sk                664 net/ipv4/inet_diag.c 		entry->saddr = sk->sk_v6_rcv_saddr.s6_addr32;
sk                665 net/ipv4/inet_diag.c 		entry->daddr = sk->sk_v6_daddr.s6_addr32;
sk                669 net/ipv4/inet_diag.c 		entry->saddr = &sk->sk_rcv_saddr;
sk                670 net/ipv4/inet_diag.c 		entry->daddr = &sk->sk_daddr;
sk                674 net/ipv4/inet_diag.c int inet_diag_bc_sk(const struct nlattr *bc, struct sock *sk)
sk                676 net/ipv4/inet_diag.c 	struct inet_sock *inet = inet_sk(sk);
sk                682 net/ipv4/inet_diag.c 	entry.family = sk->sk_family;
sk                683 net/ipv4/inet_diag.c 	entry_fill_addrs(&entry, sk);
sk                686 net/ipv4/inet_diag.c 	entry.ifindex = sk->sk_bound_dev_if;
sk                687 net/ipv4/inet_diag.c 	entry.userlocks = sk_fullsock(sk) ? sk->sk_userlocks : 0;
sk                688 net/ipv4/inet_diag.c 	if (sk_fullsock(sk))
sk                689 net/ipv4/inet_diag.c 		entry.mark = sk->sk_mark;
sk                690 net/ipv4/inet_diag.c 	else if (sk->sk_state == TCP_NEW_SYN_RECV)
sk                691 net/ipv4/inet_diag.c 		entry.mark = inet_rsk(inet_reqsk(sk))->ir_mark;
sk                849 net/ipv4/inet_diag.c static int inet_csk_diag_dump(struct sock *sk,
sk                856 net/ipv4/inet_diag.c 	if (!inet_diag_bc_sk(bc, sk))
sk                859 net/ipv4/inet_diag.c 	return inet_csk_diag_fill(sk, skb, r,
sk                860 net/ipv4/inet_diag.c 				  sk_user_ns(NETLINK_CB(cb->skb).sk),
sk                897 net/ipv4/inet_diag.c 	struct net *net = sock_net(skb->sk);
sk                900 net/ipv4/inet_diag.c 	struct sock *sk;
sk                918 net/ipv4/inet_diag.c 			sk_nulls_for_each(sk, node, &ilb->nulls_head) {
sk                919 net/ipv4/inet_diag.c 				struct inet_sock *inet = inet_sk(sk);
sk                921 net/ipv4/inet_diag.c 				if (!net_eq(sock_net(sk), net))
sk                930 net/ipv4/inet_diag.c 				    sk->sk_family != r->sdiag_family)
sk                937 net/ipv4/inet_diag.c 				if (inet_csk_diag_dump(sk, skb, cb, r,
sk                977 net/ipv4/inet_diag.c 		sk_nulls_for_each(sk, node, &head->chain) {
sk                980 net/ipv4/inet_diag.c 			if (!net_eq(sock_net(sk), net))
sk                984 net/ipv4/inet_diag.c 			state = (sk->sk_state == TCP_TIME_WAIT) ?
sk                985 net/ipv4/inet_diag.c 				inet_twsk(sk)->tw_substate : sk->sk_state;
sk                989 net/ipv4/inet_diag.c 			    sk->sk_family != r->sdiag_family)
sk                991 net/ipv4/inet_diag.c 			if (r->id.idiag_sport != htons(sk->sk_num) &&
sk                994 net/ipv4/inet_diag.c 			if (r->id.idiag_dport != sk->sk_dport &&
sk                999 net/ipv4/inet_diag.c 			if (!inet_diag_bc_sk(bc, sk))
sk               1002 net/ipv4/inet_diag.c 			if (!refcount_inc_not_zero(&sk->sk_refcnt))
sk               1006 net/ipv4/inet_diag.c 			sk_arr[accum] = sk;
sk               1017 net/ipv4/inet_diag.c 					   sk_user_ns(NETLINK_CB(cb->skb).sk),
sk               1121 net/ipv4/inet_diag.c 	struct net *net = sock_net(skb->sk);
sk               1152 net/ipv4/inet_diag.c 	struct net *net = sock_net(skb->sk);
sk               1181 net/ipv4/inet_diag.c int inet_diag_handler_get_info(struct sk_buff *skb, struct sock *sk)
sk               1196 net/ipv4/inet_diag.c 	inet_diag_msg_common_fill(r, sk);
sk               1197 net/ipv4/inet_diag.c 	if (sk->sk_type == SOCK_DGRAM || sk->sk_type == SOCK_STREAM)
sk               1198 net/ipv4/inet_diag.c 		r->id.idiag_sport = inet_sk(sk)->inet_sport;
sk               1199 net/ipv4/inet_diag.c 	r->idiag_state = sk->sk_state;
sk               1201 net/ipv4/inet_diag.c 	if ((err = nla_put_u8(skb, INET_DIAG_PROTOCOL, sk->sk_protocol))) {
sk               1206 net/ipv4/inet_diag.c 	handler = inet_diag_lock_handler(sk->sk_protocol);
sk               1221 net/ipv4/inet_diag.c 	handler->idiag_get_info(sk, r, info);
sk                515 net/ipv4/inet_fragment.c 				fp->sk = NULL;
sk                 43 net/ipv4/inet_hashtables.c static u32 sk_ehashfn(const struct sock *sk)
sk                 46 net/ipv4/inet_hashtables.c 	if (sk->sk_family == AF_INET6 &&
sk                 47 net/ipv4/inet_hashtables.c 	    !ipv6_addr_v4mapped(&sk->sk_v6_daddr))
sk                 48 net/ipv4/inet_hashtables.c 		return inet6_ehashfn(sock_net(sk),
sk                 49 net/ipv4/inet_hashtables.c 				     &sk->sk_v6_rcv_saddr, sk->sk_num,
sk                 50 net/ipv4/inet_hashtables.c 				     &sk->sk_v6_daddr, sk->sk_dport);
sk                 52 net/ipv4/inet_hashtables.c 	return inet_ehashfn(sock_net(sk),
sk                 53 net/ipv4/inet_hashtables.c 			    sk->sk_rcv_saddr, sk->sk_num,
sk                 54 net/ipv4/inet_hashtables.c 			    sk->sk_daddr, sk->sk_dport);
sk                 92 net/ipv4/inet_hashtables.c void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
sk                 95 net/ipv4/inet_hashtables.c 	inet_sk(sk)->inet_num = snum;
sk                 96 net/ipv4/inet_hashtables.c 	sk_add_bind_node(sk, &tb->owners);
sk                 97 net/ipv4/inet_hashtables.c 	inet_csk(sk)->icsk_bind_hash = tb;
sk                103 net/ipv4/inet_hashtables.c static void __inet_put_port(struct sock *sk)
sk                105 net/ipv4/inet_hashtables.c 	struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
sk                106 net/ipv4/inet_hashtables.c 	const int bhash = inet_bhashfn(sock_net(sk), inet_sk(sk)->inet_num,
sk                112 net/ipv4/inet_hashtables.c 	tb = inet_csk(sk)->icsk_bind_hash;
sk                113 net/ipv4/inet_hashtables.c 	__sk_del_bind_node(sk);
sk                114 net/ipv4/inet_hashtables.c 	inet_csk(sk)->icsk_bind_hash = NULL;
sk                115 net/ipv4/inet_hashtables.c 	inet_sk(sk)->inet_num = 0;
sk                120 net/ipv4/inet_hashtables.c void inet_put_port(struct sock *sk)
sk                123 net/ipv4/inet_hashtables.c 	__inet_put_port(sk);
sk                128 net/ipv4/inet_hashtables.c int __inet_inherit_port(const struct sock *sk, struct sock *child)
sk                130 net/ipv4/inet_hashtables.c 	struct inet_hashinfo *table = sk->sk_prot->h.hashinfo;
sk                132 net/ipv4/inet_hashtables.c 	const int bhash = inet_bhashfn(sock_net(sk), port,
sk                139 net/ipv4/inet_hashtables.c 	tb = inet_csk(sk)->icsk_bind_hash;
sk                145 net/ipv4/inet_hashtables.c 		l3mdev = inet_sk_bound_l3mdev(sk);
sk                153 net/ipv4/inet_hashtables.c 			if (net_eq(ib_net(tb), sock_net(sk)) &&
sk                159 net/ipv4/inet_hashtables.c 						     sock_net(sk), head, port,
sk                175 net/ipv4/inet_hashtables.c inet_lhash2_bucket_sk(struct inet_hashinfo *h, struct sock *sk)
sk                180 net/ipv4/inet_hashtables.c 	if (sk->sk_family == AF_INET6)
sk                181 net/ipv4/inet_hashtables.c 		hash = ipv6_portaddr_hash(sock_net(sk),
sk                182 net/ipv4/inet_hashtables.c 					  &sk->sk_v6_rcv_saddr,
sk                183 net/ipv4/inet_hashtables.c 					  inet_sk(sk)->inet_num);
sk                186 net/ipv4/inet_hashtables.c 		hash = ipv4_portaddr_hash(sock_net(sk),
sk                187 net/ipv4/inet_hashtables.c 					  inet_sk(sk)->inet_rcv_saddr,
sk                188 net/ipv4/inet_hashtables.c 					  inet_sk(sk)->inet_num);
sk                192 net/ipv4/inet_hashtables.c static void inet_hash2(struct inet_hashinfo *h, struct sock *sk)
sk                199 net/ipv4/inet_hashtables.c 	ilb2 = inet_lhash2_bucket_sk(h, sk);
sk                202 net/ipv4/inet_hashtables.c 	if (sk->sk_reuseport && sk->sk_family == AF_INET6)
sk                203 net/ipv4/inet_hashtables.c 		hlist_add_tail_rcu(&inet_csk(sk)->icsk_listen_portaddr_node,
sk                206 net/ipv4/inet_hashtables.c 		hlist_add_head_rcu(&inet_csk(sk)->icsk_listen_portaddr_node,
sk                212 net/ipv4/inet_hashtables.c static void inet_unhash2(struct inet_hashinfo *h, struct sock *sk)
sk                217 net/ipv4/inet_hashtables.c 	    WARN_ON_ONCE(hlist_unhashed(&inet_csk(sk)->icsk_listen_portaddr_node)))
sk                220 net/ipv4/inet_hashtables.c 	ilb2 = inet_lhash2_bucket_sk(h, sk);
sk                223 net/ipv4/inet_hashtables.c 	hlist_del_init_rcu(&inet_csk(sk)->icsk_listen_portaddr_node);
sk                228 net/ipv4/inet_hashtables.c static inline int compute_score(struct sock *sk, struct net *net,
sk                234 net/ipv4/inet_hashtables.c 	if (net_eq(sock_net(sk), net) && sk->sk_num == hnum &&
sk                235 net/ipv4/inet_hashtables.c 			!ipv6_only_sock(sk)) {
sk                236 net/ipv4/inet_hashtables.c 		if (sk->sk_rcv_saddr != daddr)
sk                239 net/ipv4/inet_hashtables.c 		if (!inet_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif))
sk                242 net/ipv4/inet_hashtables.c 		score = sk->sk_family == PF_INET ? 2 : 1;
sk                243 net/ipv4/inet_hashtables.c 		if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
sk                266 net/ipv4/inet_hashtables.c 	struct sock *sk, *result = NULL;
sk                271 net/ipv4/inet_hashtables.c 		sk = (struct sock *)icsk;
sk                272 net/ipv4/inet_hashtables.c 		score = compute_score(sk, net, hnum, daddr,
sk                275 net/ipv4/inet_hashtables.c 			if (sk->sk_reuseport) {
sk                278 net/ipv4/inet_hashtables.c 				result = reuseport_select_sock(sk, phash,
sk                283 net/ipv4/inet_hashtables.c 			result = sk;
sk                326 net/ipv4/inet_hashtables.c void sock_gen_put(struct sock *sk)
sk                328 net/ipv4/inet_hashtables.c 	if (!refcount_dec_and_test(&sk->sk_refcnt))
sk                331 net/ipv4/inet_hashtables.c 	if (sk->sk_state == TCP_TIME_WAIT)
sk                332 net/ipv4/inet_hashtables.c 		inet_twsk_free(inet_twsk(sk));
sk                333 net/ipv4/inet_hashtables.c 	else if (sk->sk_state == TCP_NEW_SYN_RECV)
sk                334 net/ipv4/inet_hashtables.c 		reqsk_free(inet_reqsk(sk));
sk                336 net/ipv4/inet_hashtables.c 		sk_free(sk);
sk                342 net/ipv4/inet_hashtables.c 	sock_gen_put(skb->sk);
sk                354 net/ipv4/inet_hashtables.c 	struct sock *sk;
sk                364 net/ipv4/inet_hashtables.c 	sk_nulls_for_each_rcu(sk, node, &head->chain) {
sk                365 net/ipv4/inet_hashtables.c 		if (sk->sk_hash != hash)
sk                367 net/ipv4/inet_hashtables.c 		if (likely(INET_MATCH(sk, net, acookie,
sk                369 net/ipv4/inet_hashtables.c 			if (unlikely(!refcount_inc_not_zero(&sk->sk_refcnt)))
sk                371 net/ipv4/inet_hashtables.c 			if (unlikely(!INET_MATCH(sk, net, acookie,
sk                374 net/ipv4/inet_hashtables.c 				sock_gen_put(sk);
sk                388 net/ipv4/inet_hashtables.c 	sk = NULL;
sk                390 net/ipv4/inet_hashtables.c 	return sk;
sk                396 net/ipv4/inet_hashtables.c 				    struct sock *sk, __u16 lport,
sk                400 net/ipv4/inet_hashtables.c 	struct inet_sock *inet = inet_sk(sk);
sk                403 net/ipv4/inet_hashtables.c 	int dif = sk->sk_bound_dev_if;
sk                404 net/ipv4/inet_hashtables.c 	struct net *net = sock_net(sk);
sk                426 net/ipv4/inet_hashtables.c 				if (twsk_unique(sk, sk2, twp))
sk                438 net/ipv4/inet_hashtables.c 	sk->sk_hash = hash;
sk                439 net/ipv4/inet_hashtables.c 	WARN_ON(!sk_unhashed(sk));
sk                440 net/ipv4/inet_hashtables.c 	__sk_nulls_add_node_rcu(sk, &head->chain);
sk                446 net/ipv4/inet_hashtables.c 	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
sk                461 net/ipv4/inet_hashtables.c static u32 inet_sk_port_offset(const struct sock *sk)
sk                463 net/ipv4/inet_hashtables.c 	const struct inet_sock *inet = inet_sk(sk);
sk                473 net/ipv4/inet_hashtables.c bool inet_ehash_insert(struct sock *sk, struct sock *osk)
sk                475 net/ipv4/inet_hashtables.c 	struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
sk                481 net/ipv4/inet_hashtables.c 	WARN_ON_ONCE(!sk_unhashed(sk));
sk                483 net/ipv4/inet_hashtables.c 	sk->sk_hash = sk_ehashfn(sk);
sk                484 net/ipv4/inet_hashtables.c 	head = inet_ehash_bucket(hashinfo, sk->sk_hash);
sk                486 net/ipv4/inet_hashtables.c 	lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
sk                490 net/ipv4/inet_hashtables.c 		WARN_ON_ONCE(sk->sk_hash != osk->sk_hash);
sk                494 net/ipv4/inet_hashtables.c 		__sk_nulls_add_node_rcu(sk, list);
sk                499 net/ipv4/inet_hashtables.c bool inet_ehash_nolisten(struct sock *sk, struct sock *osk)
sk                501 net/ipv4/inet_hashtables.c 	bool ok = inet_ehash_insert(sk, osk);
sk                504 net/ipv4/inet_hashtables.c 		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
sk                506 net/ipv4/inet_hashtables.c 		percpu_counter_inc(sk->sk_prot->orphan_count);
sk                507 net/ipv4/inet_hashtables.c 		inet_sk_set_state(sk, TCP_CLOSE);
sk                508 net/ipv4/inet_hashtables.c 		sock_set_flag(sk, SOCK_DEAD);
sk                509 net/ipv4/inet_hashtables.c 		inet_csk_destroy_sock(sk);
sk                515 net/ipv4/inet_hashtables.c static int inet_reuseport_add_sock(struct sock *sk,
sk                518 net/ipv4/inet_hashtables.c 	struct inet_bind_bucket *tb = inet_csk(sk)->icsk_bind_hash;
sk                521 net/ipv4/inet_hashtables.c 	kuid_t uid = sock_i_uid(sk);
sk                524 net/ipv4/inet_hashtables.c 		if (sk2 != sk &&
sk                525 net/ipv4/inet_hashtables.c 		    sk2->sk_family == sk->sk_family &&
sk                526 net/ipv4/inet_hashtables.c 		    ipv6_only_sock(sk2) == ipv6_only_sock(sk) &&
sk                527 net/ipv4/inet_hashtables.c 		    sk2->sk_bound_dev_if == sk->sk_bound_dev_if &&
sk                530 net/ipv4/inet_hashtables.c 		    inet_rcv_saddr_equal(sk, sk2, false))
sk                531 net/ipv4/inet_hashtables.c 			return reuseport_add_sock(sk, sk2,
sk                532 net/ipv4/inet_hashtables.c 						  inet_rcv_saddr_any(sk));
sk                535 net/ipv4/inet_hashtables.c 	return reuseport_alloc(sk, inet_rcv_saddr_any(sk));
sk                538 net/ipv4/inet_hashtables.c int __inet_hash(struct sock *sk, struct sock *osk)
sk                540 net/ipv4/inet_hashtables.c 	struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
sk                544 net/ipv4/inet_hashtables.c 	if (sk->sk_state != TCP_LISTEN) {
sk                545 net/ipv4/inet_hashtables.c 		inet_ehash_nolisten(sk, osk);
sk                548 net/ipv4/inet_hashtables.c 	WARN_ON(!sk_unhashed(sk));
sk                549 net/ipv4/inet_hashtables.c 	ilb = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
sk                552 net/ipv4/inet_hashtables.c 	if (sk->sk_reuseport) {
sk                553 net/ipv4/inet_hashtables.c 		err = inet_reuseport_add_sock(sk, ilb);
sk                557 net/ipv4/inet_hashtables.c 	if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
sk                558 net/ipv4/inet_hashtables.c 		sk->sk_family == AF_INET6)
sk                559 net/ipv4/inet_hashtables.c 		__sk_nulls_add_node_tail_rcu(sk, &ilb->nulls_head);
sk                561 net/ipv4/inet_hashtables.c 		__sk_nulls_add_node_rcu(sk, &ilb->nulls_head);
sk                562 net/ipv4/inet_hashtables.c 	inet_hash2(hashinfo, sk);
sk                564 net/ipv4/inet_hashtables.c 	sock_set_flag(sk, SOCK_RCU_FREE);
sk                565 net/ipv4/inet_hashtables.c 	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
sk                573 net/ipv4/inet_hashtables.c int inet_hash(struct sock *sk)
sk                577 net/ipv4/inet_hashtables.c 	if (sk->sk_state != TCP_CLOSE) {
sk                579 net/ipv4/inet_hashtables.c 		err = __inet_hash(sk, NULL);
sk                587 net/ipv4/inet_hashtables.c void inet_unhash(struct sock *sk)
sk                589 net/ipv4/inet_hashtables.c 	struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
sk                593 net/ipv4/inet_hashtables.c 	if (sk_unhashed(sk))
sk                596 net/ipv4/inet_hashtables.c 	if (sk->sk_state == TCP_LISTEN) {
sk                597 net/ipv4/inet_hashtables.c 		ilb = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
sk                600 net/ipv4/inet_hashtables.c 		lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
sk                603 net/ipv4/inet_hashtables.c 	if (sk_unhashed(sk))
sk                606 net/ipv4/inet_hashtables.c 	if (rcu_access_pointer(sk->sk_reuseport_cb))
sk                607 net/ipv4/inet_hashtables.c 		reuseport_detach_sock(sk);
sk                609 net/ipv4/inet_hashtables.c 		inet_unhash2(hashinfo, sk);
sk                612 net/ipv4/inet_hashtables.c 	__sk_nulls_del_node_init_rcu(sk);
sk                613 net/ipv4/inet_hashtables.c 	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
sk                620 net/ipv4/inet_hashtables.c 		struct sock *sk, u32 port_offset,
sk                627 net/ipv4/inet_hashtables.c 	int port = inet_sk(sk)->inet_num;
sk                628 net/ipv4/inet_hashtables.c 	struct net *net = sock_net(sk);
sk                638 net/ipv4/inet_hashtables.c 		tb = inet_csk(sk)->icsk_bind_hash;
sk                640 net/ipv4/inet_hashtables.c 		if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
sk                641 net/ipv4/inet_hashtables.c 			inet_ehash_nolisten(sk, NULL);
sk                647 net/ipv4/inet_hashtables.c 		ret = check_established(death_row, sk, port, NULL);
sk                652 net/ipv4/inet_hashtables.c 	l3mdev = inet_sk_bound_l3mdev(sk);
sk                686 net/ipv4/inet_hashtables.c 				if (!check_established(death_row, sk,
sk                717 net/ipv4/inet_hashtables.c 	inet_bind_hash(sk, tb, port);
sk                718 net/ipv4/inet_hashtables.c 	if (sk_unhashed(sk)) {
sk                719 net/ipv4/inet_hashtables.c 		inet_sk(sk)->inet_sport = htons(port);
sk                720 net/ipv4/inet_hashtables.c 		inet_ehash_nolisten(sk, (struct sock *)tw);
sk                735 net/ipv4/inet_hashtables.c 		      struct sock *sk)
sk                739 net/ipv4/inet_hashtables.c 	if (!inet_sk(sk)->inet_num)
sk                740 net/ipv4/inet_hashtables.c 		port_offset = inet_sk_port_offset(sk);
sk                741 net/ipv4/inet_hashtables.c 	return __inet_hash_connect(death_row, sk, port_offset,
sk                101 net/ipv4/inet_timewait_sock.c void inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
sk                104 net/ipv4/inet_timewait_sock.c 	const struct inet_sock *inet = inet_sk(sk);
sk                105 net/ipv4/inet_timewait_sock.c 	const struct inet_connection_sock *icsk = inet_csk(sk);
sk                106 net/ipv4/inet_timewait_sock.c 	struct inet_ehash_bucket *ehead = inet_ehash_bucket(hashinfo, sk->sk_hash);
sk                107 net/ipv4/inet_timewait_sock.c 	spinlock_t *lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
sk                126 net/ipv4/inet_timewait_sock.c 	if (__sk_nulls_del_node_init_rcu(sk))
sk                127 net/ipv4/inet_timewait_sock.c 		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
sk                155 net/ipv4/inet_timewait_sock.c struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
sk                164 net/ipv4/inet_timewait_sock.c 	tw = kmem_cache_alloc(sk->sk_prot_creator->twsk_prot->twsk_slab,
sk                167 net/ipv4/inet_timewait_sock.c 		const struct inet_sock *inet = inet_sk(sk);
sk                173 net/ipv4/inet_timewait_sock.c 		tw->tw_bound_dev_if = sk->sk_bound_dev_if;
sk                180 net/ipv4/inet_timewait_sock.c 		tw->tw_family	    = sk->sk_family;
sk                181 net/ipv4/inet_timewait_sock.c 		tw->tw_reuse	    = sk->sk_reuse;
sk                182 net/ipv4/inet_timewait_sock.c 		tw->tw_reuseport    = sk->sk_reuseport;
sk                183 net/ipv4/inet_timewait_sock.c 		tw->tw_hash	    = sk->sk_hash;
sk                186 net/ipv4/inet_timewait_sock.c 		tw->tw_prot	    = sk->sk_prot_creator;
sk                187 net/ipv4/inet_timewait_sock.c 		atomic64_set(&tw->tw_cookie, atomic64_read(&sk->sk_cookie));
sk                188 net/ipv4/inet_timewait_sock.c 		twsk_net_set(tw, sock_net(sk));
sk                260 net/ipv4/inet_timewait_sock.c 	struct sock *sk;
sk                270 net/ipv4/inet_timewait_sock.c 		sk_nulls_for_each_rcu(sk, node, &head->chain) {
sk                271 net/ipv4/inet_timewait_sock.c 			if (sk->sk_state != TCP_TIME_WAIT)
sk                273 net/ipv4/inet_timewait_sock.c 			tw = inet_twsk(sk);
sk                 65 net/ipv4/ip_forward.c static int ip_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
sk                 83 net/ipv4/ip_forward.c 	return dst_output(net, sk, skb);
sk                 98 net/ipv4/ip_forward.c 	if (unlikely(skb->sk))
sk                157 net/ipv4/ip_input.c 		struct sock *sk = ra->sk;
sk                162 net/ipv4/ip_input.c 		if (sk && inet_sk(sk)->inet_num == protocol &&
sk                163 net/ipv4/ip_input.c 		    (!sk->sk_bound_dev_if ||
sk                164 net/ipv4/ip_input.c 		     sk->sk_bound_dev_if == dev->ifindex)) {
sk                174 net/ipv4/ip_input.c 			last = sk;
sk                226 net/ipv4/ip_input.c static int ip_local_deliver_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
sk                307 net/ipv4/ip_input.c static int ip_rcv_finish_core(struct net *net, struct sock *sk,
sk                317 net/ipv4/ip_input.c 	    !skb->sk &&
sk                399 net/ipv4/ip_input.c static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
sk                411 net/ipv4/ip_input.c 	ret = ip_rcv_finish_core(net, sk, skb, dev);
sk                538 net/ipv4/ip_input.c static void ip_list_rcv_finish(struct net *net, struct sock *sk,
sk                557 net/ipv4/ip_input.c 		if (ip_rcv_finish_core(net, sk, skb, dev) == NET_RX_DROP)
sk                 86 net/ipv4/ip_output.c ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
sk                 98 net/ipv4/ip_output.c int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
sk                108 net/ipv4/ip_output.c 	skb = l3mdev_ip_out(sk, skb);
sk                115 net/ipv4/ip_output.c 		       net, sk, skb, NULL, skb_dst(skb)->dev,
sk                119 net/ipv4/ip_output.c int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
sk                123 net/ipv4/ip_output.c 	err = __ip_local_out(net, sk, skb);
sk                125 net/ipv4/ip_output.c 		err = dst_output(net, sk, skb);
sk                144 net/ipv4/ip_output.c int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
sk                147 net/ipv4/ip_output.c 	struct inet_sock *inet = inet_sk(sk);
sk                149 net/ipv4/ip_output.c 	struct net *net = sock_net(sk);
sk                162 net/ipv4/ip_output.c 	iph->protocol = sk->sk_protocol;
sk                163 net/ipv4/ip_output.c 	if (ip_dont_fragment(sk, &rt->dst)) {
sk                176 net/ipv4/ip_output.c 	skb->priority = sk->sk_priority;
sk                178 net/ipv4/ip_output.c 		skb->mark = sk->sk_mark;
sk                181 net/ipv4/ip_output.c 	return ip_local_out(net, skb->sk, skb);
sk                185 net/ipv4/ip_output.c static int ip_finish_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
sk                208 net/ipv4/ip_output.c 		if (skb->sk)
sk                209 net/ipv4/ip_output.c 			skb_set_owner_w(skb2, skb->sk);
sk                240 net/ipv4/ip_output.c static int ip_finish_output_gso(struct net *net, struct sock *sk,
sk                250 net/ipv4/ip_output.c 		return ip_finish_output2(net, sk, skb);
sk                280 net/ipv4/ip_output.c 		err = ip_fragment(net, sk, segs, mtu, ip_finish_output2);
sk                290 net/ipv4/ip_output.c static int __ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
sk                298 net/ipv4/ip_output.c 		return dst_output(net, sk, skb);
sk                301 net/ipv4/ip_output.c 	mtu = ip_skb_dst_mtu(sk, skb);
sk                303 net/ipv4/ip_output.c 		return ip_finish_output_gso(net, sk, skb, mtu);
sk                306 net/ipv4/ip_output.c 		return ip_fragment(net, sk, skb, mtu, ip_finish_output2);
sk                308 net/ipv4/ip_output.c 	return ip_finish_output2(net, sk, skb);
sk                311 net/ipv4/ip_output.c static int ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
sk                315 net/ipv4/ip_output.c 	ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb);
sk                318 net/ipv4/ip_output.c 		return __ip_finish_output(net, sk, skb);
sk                320 net/ipv4/ip_output.c 		return __ip_finish_output(net, sk, skb) ? : ret;
sk                327 net/ipv4/ip_output.c static int ip_mc_finish_output(struct net *net, struct sock *sk,
sk                334 net/ipv4/ip_output.c 	ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb);
sk                357 net/ipv4/ip_output.c 	err = dev_loopback_xmit(net, sk, skb);
sk                361 net/ipv4/ip_output.c int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb)
sk                379 net/ipv4/ip_output.c 		if (sk_mc_loop(sk)
sk                397 net/ipv4/ip_output.c 					net, sk, newskb, NULL, newskb->dev,
sk                413 net/ipv4/ip_output.c 				net, sk, newskb, NULL, newskb->dev,
sk                418 net/ipv4/ip_output.c 			    net, sk, skb, NULL, skb->dev,
sk                423 net/ipv4/ip_output.c int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb)
sk                433 net/ipv4/ip_output.c 			    net, sk, skb, NULL, dev,
sk                453 net/ipv4/ip_output.c int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
sk                456 net/ipv4/ip_output.c 	struct inet_sock *inet = inet_sk(sk);
sk                457 net/ipv4/ip_output.c 	struct net *net = sock_net(sk);
sk                475 net/ipv4/ip_output.c 	rt = (struct rtable *)__sk_dst_check(sk, 0);
sk                488 net/ipv4/ip_output.c 		rt = ip_route_output_ports(net, fl4, sk,
sk                492 net/ipv4/ip_output.c 					   sk->sk_protocol,
sk                493 net/ipv4/ip_output.c 					   RT_CONN_FLAGS_TOS(sk, tos),
sk                494 net/ipv4/ip_output.c 					   sk->sk_bound_dev_if);
sk                497 net/ipv4/ip_output.c 		sk_setup_caps(sk, &rt->dst);
sk                510 net/ipv4/ip_output.c 	if (ip_dont_fragment(sk, &rt->dst) && !skb->ignore_df)
sk                515 net/ipv4/ip_output.c 	iph->protocol = sk->sk_protocol;
sk                525 net/ipv4/ip_output.c 	ip_select_ident_segs(net, skb, sk,
sk                529 net/ipv4/ip_output.c 	skb->priority = sk->sk_priority;
sk                530 net/ipv4/ip_output.c 	skb->mark = sk->sk_mark;
sk                532 net/ipv4/ip_output.c 	res = ip_local_out(net, sk, skb);
sk                568 net/ipv4/ip_output.c static int ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
sk                575 net/ipv4/ip_output.c 		return ip_do_fragment(net, sk, skb, output);
sk                587 net/ipv4/ip_output.c 	return ip_do_fragment(net, sk, skb, output);
sk                718 net/ipv4/ip_output.c 	if (skb->sk)
sk                719 net/ipv4/ip_output.c 		skb_set_owner_w(skb2, skb->sk);
sk                766 net/ipv4/ip_output.c int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
sk                789 net/ipv4/ip_output.c 	mtu = ip_skb_dst_mtu(sk, skb);
sk                831 net/ipv4/ip_output.c 			BUG_ON(frag->sk);
sk                832 net/ipv4/ip_output.c 			if (skb->sk) {
sk                833 net/ipv4/ip_output.c 				frag->sk = skb->sk;
sk                851 net/ipv4/ip_output.c 			err = output(net, sk, skb);
sk                875 net/ipv4/ip_output.c 			frag2->sk = NULL;
sk                907 net/ipv4/ip_output.c 		err = output(net, sk, skb2);
sk                953 net/ipv4/ip_output.c static int __ip_append_data(struct sock *sk,
sk                963 net/ipv4/ip_output.c 	struct inet_sock *inet = inet_sk(sk);
sk                988 net/ipv4/ip_output.c 	    sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)
sk                989 net/ipv4/ip_output.c 		tskey = sk->sk_tskey++;
sk                995 net/ipv4/ip_output.c 	maxnonfragsize = ip_sk_ignore_df(sk) ? 0xFFFF : mtu;
sk                998 net/ipv4/ip_output.c 		ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
sk               1014 net/ipv4/ip_output.c 	if (flags & MSG_ZEROCOPY && length && sock_flag(sk, SOCK_ZEROCOPY)) {
sk               1015 net/ipv4/ip_output.c 		uarg = sock_zerocopy_realloc(sk, length, skb_zcopy(skb));
sk               1091 net/ipv4/ip_output.c 				skb = sock_alloc_send_skb(sk,
sk               1096 net/ipv4/ip_output.c 				if (refcount_read(&sk->sk_wmem_alloc) + wmem_alloc_delta <=
sk               1097 net/ipv4/ip_output.c 				    2 * sk->sk_sndbuf)
sk               1099 net/ipv4/ip_output.c 							sk->sk_allocation);
sk               1160 net/ipv4/ip_output.c 				skb->sk = sk;
sk               1185 net/ipv4/ip_output.c 			if (!sk_page_frag_refill(sk, pfrag))
sk               1221 net/ipv4/ip_output.c 		refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc);
sk               1230 net/ipv4/ip_output.c 	IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
sk               1231 net/ipv4/ip_output.c 	refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc);
sk               1235 net/ipv4/ip_output.c static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
sk               1252 net/ipv4/ip_output.c 					    sk->sk_allocation);
sk               1261 net/ipv4/ip_output.c 	cork->fragsize = ip_sk_use_pmtu(sk) ?
sk               1280 net/ipv4/ip_output.c 	sock_tx_timestamp(sk, ipc->sockc.tsflags, &cork->tx_flags);
sk               1296 net/ipv4/ip_output.c int ip_append_data(struct sock *sk, struct flowi4 *fl4,
sk               1303 net/ipv4/ip_output.c 	struct inet_sock *inet = inet_sk(sk);
sk               1309 net/ipv4/ip_output.c 	if (skb_queue_empty(&sk->sk_write_queue)) {
sk               1310 net/ipv4/ip_output.c 		err = ip_setup_cork(sk, &inet->cork.base, ipc, rtp);
sk               1317 net/ipv4/ip_output.c 	return __ip_append_data(sk, fl4, &sk->sk_write_queue, &inet->cork.base,
sk               1318 net/ipv4/ip_output.c 				sk_page_frag(sk), getfrag,
sk               1322 net/ipv4/ip_output.c ssize_t	ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
sk               1325 net/ipv4/ip_output.c 	struct inet_sock *inet = inet_sk(sk);
sk               1342 net/ipv4/ip_output.c 	if (skb_queue_empty(&sk->sk_write_queue))
sk               1358 net/ipv4/ip_output.c 	maxnonfragsize = ip_sk_ignore_df(sk) ? 0xFFFF : mtu;
sk               1361 net/ipv4/ip_output.c 		ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
sk               1366 net/ipv4/ip_output.c 	skb = skb_peek_tail(&sk->sk_write_queue);
sk               1386 net/ipv4/ip_output.c 			skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation);
sk               1419 net/ipv4/ip_output.c 			__skb_queue_tail(&sk->sk_write_queue, skb);
sk               1440 net/ipv4/ip_output.c 		refcount_add(len, &sk->sk_wmem_alloc);
sk               1448 net/ipv4/ip_output.c 	IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
sk               1465 net/ipv4/ip_output.c struct sk_buff *__ip_make_skb(struct sock *sk,
sk               1472 net/ipv4/ip_output.c 	struct inet_sock *inet = inet_sk(sk);
sk               1473 net/ipv4/ip_output.c 	struct net *net = sock_net(sk);
sk               1496 net/ipv4/ip_output.c 		tmp_skb->sk = NULL;
sk               1503 net/ipv4/ip_output.c 	skb->ignore_df = ip_sk_ignore_df(sk);
sk               1511 net/ipv4/ip_output.c 	     ip_dont_fragment(sk, &rt->dst)))
sk               1530 net/ipv4/ip_output.c 	iph->protocol = sk->sk_protocol;
sk               1532 net/ipv4/ip_output.c 	ip_select_ident(net, skb, sk);
sk               1539 net/ipv4/ip_output.c 	skb->priority = (cork->tos != -1) ? cork->priority: sk->sk_priority;
sk               1562 net/ipv4/ip_output.c 	err = ip_local_out(net, skb->sk, skb);
sk               1573 net/ipv4/ip_output.c int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4)
sk               1577 net/ipv4/ip_output.c 	skb = ip_finish_skb(sk, fl4);
sk               1582 net/ipv4/ip_output.c 	return ip_send_skb(sock_net(sk), skb);
sk               1588 net/ipv4/ip_output.c static void __ip_flush_pending_frames(struct sock *sk,
sk               1600 net/ipv4/ip_output.c void ip_flush_pending_frames(struct sock *sk)
sk               1602 net/ipv4/ip_output.c 	__ip_flush_pending_frames(sk, &sk->sk_write_queue, &inet_sk(sk)->cork.base);
sk               1605 net/ipv4/ip_output.c struct sk_buff *ip_make_skb(struct sock *sk,
sk               1624 net/ipv4/ip_output.c 	err = ip_setup_cork(sk, cork, ipc, rtp);
sk               1628 net/ipv4/ip_output.c 	err = __ip_append_data(sk, fl4, &queue, cork,
sk               1632 net/ipv4/ip_output.c 		__ip_flush_pending_frames(sk, &queue, cork);
sk               1636 net/ipv4/ip_output.c 	return __ip_make_skb(sk, fl4, &queue, cork);
sk               1656 net/ipv4/ip_output.c void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
sk               1666 net/ipv4/ip_output.c 	struct net *net = sock_net(sk);
sk               1690 net/ipv4/ip_output.c 			   IP4_REPLY_MARK(net, skb->mark) ?: sk->sk_mark,
sk               1702 net/ipv4/ip_output.c 	inet_sk(sk)->tos = arg->tos;
sk               1704 net/ipv4/ip_output.c 	sk->sk_protocol = ip_hdr(skb)->protocol;
sk               1705 net/ipv4/ip_output.c 	sk->sk_bound_dev_if = arg->bound_dev_if;
sk               1706 net/ipv4/ip_output.c 	sk->sk_sndbuf = sysctl_wmem_default;
sk               1707 net/ipv4/ip_output.c 	sk->sk_mark = fl4.flowi4_mark;
sk               1708 net/ipv4/ip_output.c 	err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base,
sk               1711 net/ipv4/ip_output.c 		ip_flush_pending_frames(sk);
sk               1715 net/ipv4/ip_output.c 	nskb = skb_peek(&sk->sk_write_queue);
sk               1722 net/ipv4/ip_output.c 		ip_push_pending_frames(sk, &fl4);
sk                171 net/ipv4/ip_sockglue.c void ip_cmsg_recv_offset(struct msghdr *msg, struct sock *sk,
sk                174 net/ipv4/ip_sockglue.c 	struct inet_sock *inet = inet_sk(sk);
sk                211 net/ipv4/ip_sockglue.c 		ip_cmsg_recv_retopts(sock_net(sk), msg, skb);
sk                242 net/ipv4/ip_sockglue.c int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc,
sk                247 net/ipv4/ip_sockglue.c 	struct net *net = sock_net(sk);
sk                270 net/ipv4/ip_sockglue.c 			err = __sock_cmsg_send(sk, msg, cmsg, &ipc->sockc);
sk                335 net/ipv4/ip_sockglue.c int ip_ra_control(struct sock *sk, unsigned char on,
sk                340 net/ipv4/ip_sockglue.c 	struct net *net = sock_net(sk);
sk                342 net/ipv4/ip_sockglue.c 	if (sk->sk_type != SOCK_RAW || inet_sk(sk)->inet_num == IPPROTO_RAW)
sk                354 net/ipv4/ip_sockglue.c 		if (ra->sk == sk) {
sk                361 net/ipv4/ip_sockglue.c 			ra->sk = NULL;
sk                366 net/ipv4/ip_sockglue.c 				ra->destructor(sk);
sk                372 net/ipv4/ip_sockglue.c 			ra->saved_sk = sk;
sk                381 net/ipv4/ip_sockglue.c 	new_ra->sk = sk;
sk                386 net/ipv4/ip_sockglue.c 	sock_hold(sk);
sk                392 net/ipv4/ip_sockglue.c void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
sk                415 net/ipv4/ip_sockglue.c 		if (sock_queue_err_skb(sk, skb) == 0)
sk                421 net/ipv4/ip_sockglue.c void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 info)
sk                423 net/ipv4/ip_sockglue.c 	struct inet_sock *inet = inet_sk(sk);
sk                454 net/ipv4/ip_sockglue.c 	if (sock_queue_err_skb(sk, skb))
sk                472 net/ipv4/ip_sockglue.c static bool ipv4_datagram_support_cmsg(const struct sock *sk,
sk                489 net/ipv4/ip_sockglue.c 	if (!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_CMSG) ||
sk                500 net/ipv4/ip_sockglue.c int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
sk                513 net/ipv4/ip_sockglue.c 	skb = sock_dequeue_err_skb(sk);
sk                527 net/ipv4/ip_sockglue.c 	sock_recv_timestamp(msg, sk, skb);
sk                544 net/ipv4/ip_sockglue.c 	if (ipv4_datagram_support_cmsg(sk, skb, serr->ee.ee_origin)) {
sk                547 net/ipv4/ip_sockglue.c 		if (inet_sk(sk)->cmsg_flags)
sk                590 net/ipv4/ip_sockglue.c static int do_ip_setsockopt(struct sock *sk, int level,
sk                593 net/ipv4/ip_sockglue.c 	struct inet_sock *inet = inet_sk(sk);
sk                594 net/ipv4/ip_sockglue.c 	struct net *net = sock_net(sk);
sk                638 net/ipv4/ip_sockglue.c 		return ip_ra_control(sk, val ? 1 : 0, NULL);
sk                640 net/ipv4/ip_sockglue.c 		return ip_mroute_setsockopt(sk, optname, optval, optlen);
sk                645 net/ipv4/ip_sockglue.c 	lock_sock(sk);
sk                654 net/ipv4/ip_sockglue.c 		err = ip_options_get_from_user(sock_net(sk), &opt,
sk                659 net/ipv4/ip_sockglue.c 						lockdep_sock_is_held(sk));
sk                661 net/ipv4/ip_sockglue.c 			struct inet_connection_sock *icsk = inet_csk(sk);
sk                663 net/ipv4/ip_sockglue.c 			if (sk->sk_family == PF_INET ||
sk                664 net/ipv4/ip_sockglue.c 			    (!((1 << sk->sk_state) &
sk                672 net/ipv4/ip_sockglue.c 				icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie);
sk                727 net/ipv4/ip_sockglue.c 				inet_inc_convert_csum(sk);
sk                732 net/ipv4/ip_sockglue.c 				inet_dec_convert_csum(sk);
sk                738 net/ipv4/ip_sockglue.c 		if (sk->sk_type != SOCK_RAW && sk->sk_type != SOCK_DGRAM)
sk                746 net/ipv4/ip_sockglue.c 		if (sk->sk_type == SOCK_STREAM) {
sk                752 net/ipv4/ip_sockglue.c 			sk->sk_priority = rt_tos2priority(val);
sk                753 net/ipv4/ip_sockglue.c 			sk_dst_reset(sk);
sk                764 net/ipv4/ip_sockglue.c 		if (sk->sk_type != SOCK_RAW) {
sk                771 net/ipv4/ip_sockglue.c 		if (sk->sk_type != SOCK_RAW) {
sk                788 net/ipv4/ip_sockglue.c 			skb_queue_purge(&sk->sk_error_queue);
sk                791 net/ipv4/ip_sockglue.c 		if (sk->sk_type == SOCK_STREAM)
sk                822 net/ipv4/ip_sockglue.c 		dev = dev_get_by_index(sock_net(sk), ifindex);
sk                831 net/ipv4/ip_sockglue.c 		if (sk->sk_bound_dev_if &&
sk                832 net/ipv4/ip_sockglue.c 		    (!midx || midx != sk->sk_bound_dev_if))
sk                845 net/ipv4/ip_sockglue.c 		if (sk->sk_type == SOCK_STREAM)
sk                878 net/ipv4/ip_sockglue.c 			dev = ip_dev_find(sock_net(sk), mreq.imr_address.s_addr);
sk                882 net/ipv4/ip_sockglue.c 			dev = dev_get_by_index(sock_net(sk), mreq.imr_ifindex);
sk                894 net/ipv4/ip_sockglue.c 		if (sk->sk_bound_dev_if &&
sk                895 net/ipv4/ip_sockglue.c 		    mreq.imr_ifindex != sk->sk_bound_dev_if &&
sk                896 net/ipv4/ip_sockglue.c 		    (!midx || midx != sk->sk_bound_dev_if))
sk                911 net/ipv4/ip_sockglue.c 		if (inet_sk(sk)->is_icsk)
sk                927 net/ipv4/ip_sockglue.c 			err = ip_mc_join_group(sk, &mreq);
sk                929 net/ipv4/ip_sockglue.c 			err = ip_mc_leave_group(sk, &mreq);
sk                959 net/ipv4/ip_sockglue.c 		err = ip_mc_msfilter(sk, msf, 0);
sk                989 net/ipv4/ip_sockglue.c 			err = ip_mc_join_group_ssm(sk, &mreq, MCAST_INCLUDE);
sk                998 net/ipv4/ip_sockglue.c 		err = ip_mc_source(add, omode, sk, &mreqs, 0);
sk               1021 net/ipv4/ip_sockglue.c 			err = ip_mc_join_group(sk, &mreq);
sk               1023 net/ipv4/ip_sockglue.c 			err = ip_mc_leave_group(sk, &mreq);
sk               1066 net/ipv4/ip_sockglue.c 			err = ip_mc_join_group_ssm(sk, &mreq, MCAST_INCLUDE);
sk               1076 net/ipv4/ip_sockglue.c 		err = ip_mc_source(add, omode, sk, &mreqs,
sk               1136 net/ipv4/ip_sockglue.c 		err = ip_mc_msfilter(sk, msf, ifindex);
sk               1159 net/ipv4/ip_sockglue.c 		if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
sk               1161 net/ipv4/ip_sockglue.c 		err = xfrm_user_policy(sk, optname, optval, optlen);
sk               1165 net/ipv4/ip_sockglue.c 		if (!!val && !ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) &&
sk               1166 net/ipv4/ip_sockglue.c 		    !ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
sk               1187 net/ipv4/ip_sockglue.c 	release_sock(sk);
sk               1193 net/ipv4/ip_sockglue.c 	release_sock(sk);
sk               1208 net/ipv4/ip_sockglue.c void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb)
sk               1211 net/ipv4/ip_sockglue.c 	bool prepare = (inet_sk(sk)->cmsg_flags & IP_CMSG_PKTINFO) ||
sk               1212 net/ipv4/ip_sockglue.c 		       ipv6_sk_rxinfo(sk);
sk               1240 net/ipv4/ip_sockglue.c int ip_setsockopt(struct sock *sk, int level,
sk               1248 net/ipv4/ip_sockglue.c 	err = do_ip_setsockopt(sk, level, optname, optval, optlen);
sk               1252 net/ipv4/ip_sockglue.c 		err = bpfilter_ip_set_sockopt(sk, optname, optval, optlen);
sk               1260 net/ipv4/ip_sockglue.c 		err = nf_setsockopt(sk, PF_INET, optname, optval, optlen);
sk               1267 net/ipv4/ip_sockglue.c int compat_ip_setsockopt(struct sock *sk, int level, int optname,
sk               1276 net/ipv4/ip_sockglue.c 		return compat_mc_setsockopt(sk, level, optname, optval, optlen,
sk               1279 net/ipv4/ip_sockglue.c 	err = do_ip_setsockopt(sk, level, optname, optval, optlen);
sk               1286 net/ipv4/ip_sockglue.c 		err = compat_nf_setsockopt(sk, PF_INET, optname, optval,
sk               1309 net/ipv4/ip_sockglue.c static int do_ip_getsockopt(struct sock *sk, int level, int optname,
sk               1312 net/ipv4/ip_sockglue.c 	struct inet_sock *inet = inet_sk(sk);
sk               1321 net/ipv4/ip_sockglue.c 		return ip_mroute_getsockopt(sk, optname, optval, optlen);
sk               1330 net/ipv4/ip_sockglue.c 	lock_sock(sk);
sk               1340 net/ipv4/ip_sockglue.c 						     lockdep_sock_is_held(sk));
sk               1346 net/ipv4/ip_sockglue.c 		release_sock(sk);
sk               1392 net/ipv4/ip_sockglue.c 		struct net *net = sock_net(sk);
sk               1414 net/ipv4/ip_sockglue.c 		dst = sk_dst_get(sk);
sk               1420 net/ipv4/ip_sockglue.c 			release_sock(sk);
sk               1442 net/ipv4/ip_sockglue.c 		release_sock(sk);
sk               1462 net/ipv4/ip_sockglue.c 		err = ip_mc_msfget(sk, &msf,
sk               1478 net/ipv4/ip_sockglue.c 		err = ip_mc_gsfget(sk, &gsf,
sk               1490 net/ipv4/ip_sockglue.c 		release_sock(sk);
sk               1492 net/ipv4/ip_sockglue.c 		if (sk->sk_type != SOCK_STREAM)
sk               1528 net/ipv4/ip_sockglue.c 		release_sock(sk);
sk               1531 net/ipv4/ip_sockglue.c 	release_sock(sk);
sk               1550 net/ipv4/ip_sockglue.c 	release_sock(sk);
sk               1556 net/ipv4/ip_sockglue.c int ip_getsockopt(struct sock *sk, int level,
sk               1561 net/ipv4/ip_sockglue.c 	err = do_ip_getsockopt(sk, level, optname, optval, optlen, 0);
sk               1565 net/ipv4/ip_sockglue.c 		err = bpfilter_ip_get_sockopt(sk, optname, optval, optlen);
sk               1576 net/ipv4/ip_sockglue.c 		err = nf_getsockopt(sk, PF_INET, optname, optval, &len);
sk               1587 net/ipv4/ip_sockglue.c int compat_ip_getsockopt(struct sock *sk, int level, int optname,
sk               1593 net/ipv4/ip_sockglue.c 		return compat_mc_getsockopt(sk, level, optname, optval, optlen,
sk               1596 net/ipv4/ip_sockglue.c 	err = do_ip_getsockopt(sk, level, optname, optval, optlen,
sk               1602 net/ipv4/ip_sockglue.c 		err = bpfilter_ip_get_sockopt(sk, optname, optval, optlen);
sk               1613 net/ipv4/ip_sockglue.c 		err = compat_nf_getsockopt(sk, PF_INET, optname, optval, &len);
sk                 46 net/ipv4/ip_tunnel_core.c void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
sk                 78 net/ipv4/ip_tunnel_core.c 	err = ip_local_out(net, sk, skb);
sk                288 net/ipv4/ip_vti.c 	err = dst_output(tunnel->net, skb->sk, skb);
sk               1347 net/ipv4/ipmr.c static void mrtsock_destruct(struct sock *sk)
sk               1349 net/ipv4/ipmr.c 	struct net *net = sock_net(sk);
sk               1354 net/ipv4/ipmr.c 		if (sk == rtnl_dereference(mrt->mroute_sk)) {
sk               1373 net/ipv4/ipmr.c int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval,
sk               1376 net/ipv4/ipmr.c 	struct net *net = sock_net(sk);
sk               1386 net/ipv4/ipmr.c 	if (sk->sk_type != SOCK_RAW ||
sk               1387 net/ipv4/ipmr.c 	    inet_sk(sk)->inet_num != IPPROTO_IGMP) {
sk               1392 net/ipv4/ipmr.c 	mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
sk               1398 net/ipv4/ipmr.c 		if (sk != rcu_access_pointer(mrt->mroute_sk) &&
sk               1416 net/ipv4/ipmr.c 		ret = ip_ra_control(sk, 1, mrtsock_destruct);
sk               1418 net/ipv4/ipmr.c 			rcu_assign_pointer(mrt->mroute_sk, sk);
sk               1427 net/ipv4/ipmr.c 		if (sk != rcu_access_pointer(mrt->mroute_sk)) {
sk               1435 net/ipv4/ipmr.c 			ret = ip_ra_control(sk, 0, NULL);
sk               1455 net/ipv4/ipmr.c 				      sk == rtnl_dereference(mrt->mroute_sk));
sk               1483 net/ipv4/ipmr.c 					   sk == rtnl_dereference(mrt->mroute_sk),
sk               1545 net/ipv4/ipmr.c 		if (sk == rtnl_dereference(mrt->mroute_sk)) {
sk               1552 net/ipv4/ipmr.c 				raw_sk(sk)->ipmr_table = uval;
sk               1566 net/ipv4/ipmr.c int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int __user *optlen)
sk               1570 net/ipv4/ipmr.c 	struct net *net = sock_net(sk);
sk               1573 net/ipv4/ipmr.c 	if (sk->sk_type != SOCK_RAW ||
sk               1574 net/ipv4/ipmr.c 	    inet_sk(sk)->inet_num != IPPROTO_IGMP)
sk               1577 net/ipv4/ipmr.c 	mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
sk               1610 net/ipv4/ipmr.c int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
sk               1616 net/ipv4/ipmr.c 	struct net *net = sock_net(sk);
sk               1619 net/ipv4/ipmr.c 	mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
sk               1685 net/ipv4/ipmr.c int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
sk               1691 net/ipv4/ipmr.c 	struct net *net = sock_net(sk);
sk               1694 net/ipv4/ipmr.c 	mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
sk               1800 net/ipv4/ipmr.c static inline int ipmr_forward_finish(struct net *net, struct sock *sk,
sk               1811 net/ipv4/ipmr.c 	return dst_output(net, sk, skb);
sk               2541 net/ipv4/ipmr.c 	struct net *net = sock_net(in_skb->sk);
sk               2601 net/ipv4/ipmr.c 		err = ip_valid_fib_dump_req(sock_net(skb->sk), cb->nlh,
sk               2610 net/ipv4/ipmr.c 		mrt = ipmr_get_table(sock_net(skb->sk), filter.table_id);
sk               2736 net/ipv4/ipmr.c 	struct net *net = sock_net(skb->sk);
sk               2833 net/ipv4/ipmr.c 	struct net *net = sock_net(skb->sk);
sk                354 net/ipv4/ipmr_base.c 	struct net *net = sock_net(skb->sk);
sk                 26 net/ipv4/netfilter.c 	const struct sock *sk = skb_to_full_sk(skb);
sk                 27 net/ipv4/netfilter.c 	__u8 flags = sk ? inet_sk_flowi_flags(sk) : 0;
sk                 44 net/ipv4/netfilter.c 	fl4.flowi4_oif = sk ? sk->sk_bound_dev_if : 0;
sk                 65 net/ipv4/netfilter.c 		dst = xfrm_lookup(net, dst, flowi4_to_flowi(&fl4), sk, 0);
sk               1297 net/ipv4/netfilter/arp_tables.c static int compat_do_arpt_set_ctl(struct sock *sk, int cmd, void __user *user,
sk               1302 net/ipv4/netfilter/arp_tables.c 	if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
sk               1307 net/ipv4/netfilter/arp_tables.c 		ret = compat_do_replace(sock_net(sk), user, len);
sk               1311 net/ipv4/netfilter/arp_tables.c 		ret = do_add_counters(sock_net(sk), user, len, 1);
sk               1431 net/ipv4/netfilter/arp_tables.c static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user,
sk               1436 net/ipv4/netfilter/arp_tables.c 	if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
sk               1441 net/ipv4/netfilter/arp_tables.c 		ret = get_info(sock_net(sk), user, len, 1);
sk               1444 net/ipv4/netfilter/arp_tables.c 		ret = compat_get_entries(sock_net(sk), user, len);
sk               1447 net/ipv4/netfilter/arp_tables.c 		ret = do_arpt_get_ctl(sk, cmd, user, len);
sk               1453 net/ipv4/netfilter/arp_tables.c static int do_arpt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
sk               1457 net/ipv4/netfilter/arp_tables.c 	if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
sk               1462 net/ipv4/netfilter/arp_tables.c 		ret = do_replace(sock_net(sk), user, len);
sk               1466 net/ipv4/netfilter/arp_tables.c 		ret = do_add_counters(sock_net(sk), user, len, 0);
sk               1476 net/ipv4/netfilter/arp_tables.c static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
sk               1480 net/ipv4/netfilter/arp_tables.c 	if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
sk               1485 net/ipv4/netfilter/arp_tables.c 		ret = get_info(sock_net(sk), user, len, 0);
sk               1489 net/ipv4/netfilter/arp_tables.c 		ret = get_entries(sock_net(sk), user, len);
sk               1538 net/ipv4/netfilter/ip_tables.c compat_do_ipt_set_ctl(struct sock *sk,	int cmd, void __user *user,
sk               1543 net/ipv4/netfilter/ip_tables.c 	if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
sk               1548 net/ipv4/netfilter/ip_tables.c 		ret = compat_do_replace(sock_net(sk), user, len);
sk               1552 net/ipv4/netfilter/ip_tables.c 		ret = do_add_counters(sock_net(sk), user, len, 1);
sk               1641 net/ipv4/netfilter/ip_tables.c compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
sk               1645 net/ipv4/netfilter/ip_tables.c 	if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
sk               1650 net/ipv4/netfilter/ip_tables.c 		ret = get_info(sock_net(sk), user, len, 1);
sk               1653 net/ipv4/netfilter/ip_tables.c 		ret = compat_get_entries(sock_net(sk), user, len);
sk               1656 net/ipv4/netfilter/ip_tables.c 		ret = do_ipt_get_ctl(sk, cmd, user, len);
sk               1663 net/ipv4/netfilter/ip_tables.c do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
sk               1667 net/ipv4/netfilter/ip_tables.c 	if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
sk               1672 net/ipv4/netfilter/ip_tables.c 		ret = do_replace(sock_net(sk), user, len);
sk               1676 net/ipv4/netfilter/ip_tables.c 		ret = do_add_counters(sock_net(sk), user, len, 0);
sk               1687 net/ipv4/netfilter/ip_tables.c do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
sk               1691 net/ipv4/netfilter/ip_tables.c 	if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
sk               1696 net/ipv4/netfilter/ip_tables.c 		ret = get_info(sock_net(sk), user, len, 0);
sk               1700 net/ipv4/netfilter/ip_tables.c 		ret = get_entries(sock_net(sk), user, len);
sk                 65 net/ipv4/netfilter/nf_defrag_ipv4.c 	struct sock *sk = skb->sk;
sk                 67 net/ipv4/netfilter/nf_defrag_ipv4.c 	if (sk && sk_fullsock(sk) && (sk->sk_family == PF_INET) &&
sk                 68 net/ipv4/netfilter/nf_defrag_ipv4.c 	    inet_sk(sk)->nodefrag)
sk                 88 net/ipv4/netfilter/nf_dup_ipv4.c 		ip_local_out(net, skb->sk, skb);
sk                251 net/ipv4/netfilter/nf_log_ipv4.c 		nf_log_dump_sk_uid_gid(net, m, skb->sk);
sk                161 net/ipv4/netfilter/nf_reject_ipv4.c 		ip_local_out(net, nskb->sk, nskb);
sk                 20 net/ipv4/netfilter/nf_tproxy_ipv4.c 			 __be32 laddr, __be16 lport, struct sock *sk)
sk                 27 net/ipv4/netfilter/nf_tproxy_ipv4.c 		inet_twsk_put(inet_twsk(sk));
sk                 41 net/ipv4/netfilter/nf_tproxy_ipv4.c 			inet_twsk_deschedule_put(inet_twsk(sk));
sk                 42 net/ipv4/netfilter/nf_tproxy_ipv4.c 			sk = sk2;
sk                 46 net/ipv4/netfilter/nf_tproxy_ipv4.c 	return sk;
sk                 82 net/ipv4/netfilter/nf_tproxy_ipv4.c 	struct sock *sk;
sk                 95 net/ipv4/netfilter/nf_tproxy_ipv4.c 			sk = inet_lookup_listener(net, &tcp_hashinfo, skb,
sk                102 net/ipv4/netfilter/nf_tproxy_ipv4.c 			if (sk && !refcount_inc_not_zero(&sk->sk_refcnt))
sk                103 net/ipv4/netfilter/nf_tproxy_ipv4.c 				sk = NULL;
sk                111 net/ipv4/netfilter/nf_tproxy_ipv4.c 			sk = inet_lookup_established(net, &tcp_hashinfo,
sk                121 net/ipv4/netfilter/nf_tproxy_ipv4.c 		sk = udp4_lib_lookup(net, saddr, sport, daddr, dport,
sk                123 net/ipv4/netfilter/nf_tproxy_ipv4.c 		if (sk) {
sk                124 net/ipv4/netfilter/nf_tproxy_ipv4.c 			int connected = (sk->sk_state == TCP_ESTABLISHED);
sk                125 net/ipv4/netfilter/nf_tproxy_ipv4.c 			int wildcard = (inet_sk(sk)->inet_rcv_saddr == 0);
sk                135 net/ipv4/netfilter/nf_tproxy_ipv4.c 				sock_put(sk);
sk                136 net/ipv4/netfilter/nf_tproxy_ipv4.c 				sk = NULL;
sk                142 net/ipv4/netfilter/nf_tproxy_ipv4.c 		sk = NULL;
sk                146 net/ipv4/netfilter/nf_tproxy_ipv4.c 		 protocol, ntohl(saddr), ntohs(sport), ntohl(daddr), ntohs(dport), lookup_type, sk);
sk                148 net/ipv4/netfilter/nf_tproxy_ipv4.c 	return sk;
sk               1496 net/ipv4/nexthop.c 	struct net *net = sock_net(skb->sk);
sk               1560 net/ipv4/nexthop.c 	struct net *net = sock_net(skb->sk);
sk               1587 net/ipv4/nexthop.c 	struct net *net = sock_net(in_skb->sk);
sk               1718 net/ipv4/nexthop.c 	struct net *net = sock_net(skb->sk);
sk                 77 net/ipv4/ping.c int ping_get_port(struct sock *sk, unsigned short ident)
sk                 84 net/ipv4/ping.c 	isk = inet_sk(sk);
sk                 93 net/ipv4/ping.c 			hlist = ping_hashslot(&ping_table, sock_net(sk),
sk                111 net/ipv4/ping.c 		hlist = ping_hashslot(&ping_table, sock_net(sk), ident);
sk                120 net/ipv4/ping.c 			    (sk2 != sk) &&
sk                121 net/ipv4/ping.c 			    (!sk2->sk_reuse || !sk->sk_reuse))
sk                128 net/ipv4/ping.c 	if (sk_unhashed(sk)) {
sk                130 net/ipv4/ping.c 		sock_hold(sk);
sk                131 net/ipv4/ping.c 		hlist_nulls_add_head(&sk->sk_nulls_node, hlist);
sk                132 net/ipv4/ping.c 		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
sk                143 net/ipv4/ping.c int ping_hash(struct sock *sk)
sk                145 net/ipv4/ping.c 	pr_debug("ping_hash(sk->port=%u)\n", inet_sk(sk)->inet_num);
sk                151 net/ipv4/ping.c void ping_unhash(struct sock *sk)
sk                153 net/ipv4/ping.c 	struct inet_sock *isk = inet_sk(sk);
sk                157 net/ipv4/ping.c 	if (sk_hashed(sk)) {
sk                158 net/ipv4/ping.c 		hlist_nulls_del(&sk->sk_nulls_node);
sk                159 net/ipv4/ping.c 		sk_nulls_node_init(&sk->sk_nulls_node);
sk                160 net/ipv4/ping.c 		sock_put(sk);
sk                163 net/ipv4/ping.c 		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
sk                172 net/ipv4/ping.c 	struct sock *sk = NULL;
sk                189 net/ipv4/ping.c 	ping_portaddr_for_each_entry(sk, hnode, hslot) {
sk                190 net/ipv4/ping.c 		isk = inet_sk(sk);
sk                197 net/ipv4/ping.c 		    sk->sk_family == AF_INET) {
sk                198 net/ipv4/ping.c 			pr_debug("found: %p: num=%d, daddr=%pI4, dif=%d\n", sk,
sk                200 net/ipv4/ping.c 				 sk->sk_bound_dev_if);
sk                207 net/ipv4/ping.c 			   sk->sk_family == AF_INET6) {
sk                209 net/ipv4/ping.c 			pr_debug("found: %p: num=%d, daddr=%pI6c, dif=%d\n", sk,
sk                211 net/ipv4/ping.c 				 &sk->sk_v6_rcv_saddr,
sk                212 net/ipv4/ping.c 				 sk->sk_bound_dev_if);
sk                214 net/ipv4/ping.c 			if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr) &&
sk                215 net/ipv4/ping.c 			    !ipv6_addr_equal(&sk->sk_v6_rcv_saddr,
sk                223 net/ipv4/ping.c 		if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)
sk                226 net/ipv4/ping.c 		sock_hold(sk);
sk                230 net/ipv4/ping.c 	sk = NULL;
sk                234 net/ipv4/ping.c 	return sk;
sk                252 net/ipv4/ping.c int ping_init_sock(struct sock *sk)
sk                254 net/ipv4/ping.c 	struct net *net = sock_net(sk);
sk                261 net/ipv4/ping.c 	if (sk->sk_family == AF_INET6)
sk                262 net/ipv4/ping.c 		sk->sk_ipv6only = 1;
sk                284 net/ipv4/ping.c void ping_close(struct sock *sk, long timeout)
sk                287 net/ipv4/ping.c 		 inet_sk(sk), inet_sk(sk)->inet_num);
sk                288 net/ipv4/ping.c 	pr_debug("isk->refcnt = %d\n", refcount_read(&sk->sk_refcnt));
sk                290 net/ipv4/ping.c 	sk_common_release(sk);
sk                295 net/ipv4/ping.c static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
sk                297 net/ipv4/ping.c 	struct net *net = sock_net(sk);
sk                298 net/ipv4/ping.c 	if (sk->sk_family == AF_INET) {
sk                311 net/ipv4/ping.c 			 sk, &addr->sin_addr.s_addr, ntohs(addr->sin_port));
sk                325 net/ipv4/ping.c 	} else if (sk->sk_family == AF_INET6) {
sk                337 net/ipv4/ping.c 			 sk, addr->sin6_addr.s6_addr, ntohs(addr->sin6_port));
sk                363 net/ipv4/ping.c 			sk->sk_bound_dev_if = addr->sin6_scope_id;
sk                371 net/ipv4/ping.c static void ping_set_saddr(struct sock *sk, struct sockaddr *saddr)
sk                374 net/ipv4/ping.c 		struct inet_sock *isk = inet_sk(sk);
sk                380 net/ipv4/ping.c 		struct ipv6_pinfo *np = inet6_sk(sk);
sk                381 net/ipv4/ping.c 		sk->sk_v6_rcv_saddr = np->saddr = addr->sin6_addr;
sk                386 net/ipv4/ping.c static void ping_clear_saddr(struct sock *sk, int dif)
sk                388 net/ipv4/ping.c 	sk->sk_bound_dev_if = dif;
sk                389 net/ipv4/ping.c 	if (sk->sk_family == AF_INET) {
sk                390 net/ipv4/ping.c 		struct inet_sock *isk = inet_sk(sk);
sk                393 net/ipv4/ping.c 	} else if (sk->sk_family == AF_INET6) {
sk                394 net/ipv4/ping.c 		struct ipv6_pinfo *np = inet6_sk(sk);
sk                395 net/ipv4/ping.c 		memset(&sk->sk_v6_rcv_saddr, 0, sizeof(sk->sk_v6_rcv_saddr));
sk                405 net/ipv4/ping.c int ping_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
sk                407 net/ipv4/ping.c 	struct inet_sock *isk = inet_sk(sk);
sk                410 net/ipv4/ping.c 	int dif = sk->sk_bound_dev_if;
sk                412 net/ipv4/ping.c 	err = ping_check_bind_addr(sk, isk, uaddr, addr_len);
sk                416 net/ipv4/ping.c 	lock_sock(sk);
sk                423 net/ipv4/ping.c 	ping_set_saddr(sk, uaddr);
sk                425 net/ipv4/ping.c 	if (ping_get_port(sk, snum) != 0) {
sk                426 net/ipv4/ping.c 		ping_clear_saddr(sk, dif);
sk                432 net/ipv4/ping.c 		 sk->sk_bound_dev_if);
sk                435 net/ipv4/ping.c 	if (sk->sk_family == AF_INET && isk->inet_rcv_saddr)
sk                436 net/ipv4/ping.c 		sk->sk_userlocks |= SOCK_BINDADDR_LOCK;
sk                438 net/ipv4/ping.c 	if (sk->sk_family == AF_INET6 && !ipv6_addr_any(&sk->sk_v6_rcv_saddr))
sk                439 net/ipv4/ping.c 		sk->sk_userlocks |= SOCK_BINDADDR_LOCK;
sk                443 net/ipv4/ping.c 		sk->sk_userlocks |= SOCK_BINDPORT_LOCK;
sk                449 net/ipv4/ping.c 	if (sk->sk_family == AF_INET6)
sk                450 net/ipv4/ping.c 		memset(&sk->sk_v6_daddr, 0, sizeof(sk->sk_v6_daddr));
sk                453 net/ipv4/ping.c 	sk_dst_reset(sk);
sk                455 net/ipv4/ping.c 	release_sock(sk);
sk                484 net/ipv4/ping.c 	struct sock *sk;
sk                511 net/ipv4/ping.c 	sk = ping_lookup(net, skb, ntohs(icmph->un.echo.id));
sk                512 net/ipv4/ping.c 	if (!sk) {
sk                516 net/ipv4/ping.c 	pr_debug("err on socket %p\n", sk);
sk                520 net/ipv4/ping.c 	inet_sock = inet_sk(sk);
sk                540 net/ipv4/ping.c 				ipv4_sk_update_pmtu(skb, sk, info);
sk                556 net/ipv4/ping.c 			ipv4_sk_redirect(skb, sk);
sk                571 net/ipv4/ping.c 	    (family == AF_INET6 && !inet6_sk(sk)->recverr)) {
sk                572 net/ipv4/ping.c 		if (!harderr || sk->sk_state != TCP_ESTABLISHED)
sk                576 net/ipv4/ping.c 			ip_icmp_error(sk, skb, err, 0 /* no remote port */,
sk                580 net/ipv4/ping.c 			pingv6_ops.ipv6_icmp_error(sk, skb, err, 0,
sk                585 net/ipv4/ping.c 	sk->sk_err = err;
sk                586 net/ipv4/ping.c 	sk->sk_error_report(sk);
sk                588 net/ipv4/ping.c 	sock_put(sk);
sk                634 net/ipv4/ping.c static int ping_v4_push_pending_frames(struct sock *sk, struct pingfakehdr *pfh,
sk                637 net/ipv4/ping.c 	struct sk_buff *skb = skb_peek(&sk->sk_write_queue);
sk                646 net/ipv4/ping.c 	return ip_push_pending_frames(sk, fl4);
sk                694 net/ipv4/ping.c static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
sk                696 net/ipv4/ping.c 	struct net *net = sock_net(sk);
sk                698 net/ipv4/ping.c 	struct inet_sock *inet = inet_sk(sk);
sk                729 net/ipv4/ping.c 		if (sk->sk_state != TCP_ESTABLISHED)
sk                738 net/ipv4/ping.c 		err = ip_cmsg_send(sk, msg, &ipc, false);
sk                770 net/ipv4/ping.c 	if (sock_flag(sk, SOCK_LOCALROUTE) ||
sk                777 net/ipv4/ping.c 		if (!ipc.oif || netif_index_is_l3_master(sock_net(sk), ipc.oif))
sk                785 net/ipv4/ping.c 			   RT_SCOPE_UNIVERSE, sk->sk_protocol,
sk                786 net/ipv4/ping.c 			   inet_sk_flowi_flags(sk), faddr, saddr, 0, 0,
sk                787 net/ipv4/ping.c 			   sk->sk_uid);
sk                789 net/ipv4/ping.c 	security_sk_classify_flow(sk, flowi4_to_flowi(&fl4));
sk                790 net/ipv4/ping.c 	rt = ip_route_output_flow(net, &fl4, sk);
sk                801 net/ipv4/ping.c 	    !sock_flag(sk, SOCK_BROADCAST))
sk                811 net/ipv4/ping.c 	lock_sock(sk);
sk                822 net/ipv4/ping.c 	err = ip_append_data(sk, &fl4, ping_getfrag, &pfh, len,
sk                825 net/ipv4/ping.c 		ip_flush_pending_frames(sk);
sk                827 net/ipv4/ping.c 		err = ping_v4_push_pending_frames(sk, &pfh, &fl4);
sk                828 net/ipv4/ping.c 	release_sock(sk);
sk                836 net/ipv4/ping.c 		icmp_out_count(sock_net(sk), user_icmph.type);
sk                850 net/ipv4/ping.c int ping_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
sk                853 net/ipv4/ping.c 	struct inet_sock *isk = inet_sk(sk);
sk                854 net/ipv4/ping.c 	int family = sk->sk_family;
sk                865 net/ipv4/ping.c 		return inet_recv_error(sk, msg, len, addr_len);
sk                867 net/ipv4/ping.c 	skb = skb_recv_datagram(sk, flags, noblock, &err);
sk                882 net/ipv4/ping.c 	sock_recv_timestamp(msg, sk, skb);
sk                901 net/ipv4/ping.c 		struct ipv6_pinfo *np = inet6_sk(sk);
sk                918 net/ipv4/ping.c 		if (inet6_sk(sk)->rxopt.all)
sk                919 net/ipv4/ping.c 			pingv6_ops.ip6_datagram_recv_common_ctl(sk, msg, skb);
sk                921 net/ipv4/ping.c 		    inet6_sk(sk)->rxopt.all)
sk                922 net/ipv4/ping.c 			pingv6_ops.ip6_datagram_recv_specific_ctl(sk, msg, skb);
sk                933 net/ipv4/ping.c 	skb_free_datagram(sk, skb);
sk                940 net/ipv4/ping.c int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
sk                943 net/ipv4/ping.c 		 inet_sk(sk), inet_sk(sk)->inet_num, skb);
sk                944 net/ipv4/ping.c 	if (sock_queue_rcv_skb(sk, skb) < 0) {
sk                960 net/ipv4/ping.c 	struct sock *sk;
sk                972 net/ipv4/ping.c 	sk = ping_lookup(net, skb, ntohs(icmph->un.echo.id));
sk                973 net/ipv4/ping.c 	if (sk) {
sk                976 net/ipv4/ping.c 		pr_debug("rcv on socket %p\n", sk);
sk                978 net/ipv4/ping.c 			ping_queue_rcv_skb(sk, skb2);
sk                979 net/ipv4/ping.c 		sock_put(sk);
sk               1013 net/ipv4/ping.c 	struct sock *sk;
sk               1027 net/ipv4/ping.c 		sk_nulls_for_each(sk, node, hslot) {
sk               1028 net/ipv4/ping.c 			if (net_eq(sock_net(sk), net) &&
sk               1029 net/ipv4/ping.c 			    sk->sk_family == state->family)
sk               1033 net/ipv4/ping.c 	sk = NULL;
sk               1035 net/ipv4/ping.c 	return sk;
sk               1038 net/ipv4/ping.c static struct sock *ping_get_next(struct seq_file *seq, struct sock *sk)
sk               1044 net/ipv4/ping.c 		sk = sk_nulls_next(sk);
sk               1045 net/ipv4/ping.c 	} while (sk && (!net_eq(sock_net(sk), net)));
sk               1047 net/ipv4/ping.c 	if (!sk)
sk               1049 net/ipv4/ping.c 	return sk;
sk               1054 net/ipv4/ping.c 	struct sock *sk = ping_get_first(seq, 0);
sk               1056 net/ipv4/ping.c 	if (sk)
sk               1057 net/ipv4/ping.c 		while (pos && (sk = ping_get_next(seq, sk)) != NULL)
sk               1059 net/ipv4/ping.c 	return pos ? NULL : sk;
sk               1082 net/ipv4/ping.c 	struct sock *sk;
sk               1085 net/ipv4/ping.c 		sk = ping_get_idx(seq, 0);
sk               1087 net/ipv4/ping.c 		sk = ping_get_next(seq, v);
sk               1090 net/ipv4/ping.c 	return sk;
sk                 93 net/ipv4/raw.c int raw_hash_sk(struct sock *sk)
sk                 95 net/ipv4/raw.c 	struct raw_hashinfo *h = sk->sk_prot->h.raw_hash;
sk                 98 net/ipv4/raw.c 	head = &h->ht[inet_sk(sk)->inet_num & (RAW_HTABLE_SIZE - 1)];
sk                101 net/ipv4/raw.c 	sk_add_node(sk, head);
sk                102 net/ipv4/raw.c 	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
sk                109 net/ipv4/raw.c void raw_unhash_sk(struct sock *sk)
sk                111 net/ipv4/raw.c 	struct raw_hashinfo *h = sk->sk_prot->h.raw_hash;
sk                114 net/ipv4/raw.c 	if (sk_del_node_init(sk))
sk                115 net/ipv4/raw.c 		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
sk                120 net/ipv4/raw.c struct sock *__raw_v4_lookup(struct net *net, struct sock *sk,
sk                124 net/ipv4/raw.c 	sk_for_each_from(sk) {
sk                125 net/ipv4/raw.c 		struct inet_sock *inet = inet_sk(sk);
sk                127 net/ipv4/raw.c 		if (net_eq(sock_net(sk), net) && inet->inet_num == num	&&
sk                130 net/ipv4/raw.c 		    raw_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif))
sk                133 net/ipv4/raw.c 	sk = NULL;
sk                135 net/ipv4/raw.c 	return sk;
sk                143 net/ipv4/raw.c static int icmp_filter(const struct sock *sk, const struct sk_buff *skb)
sk                154 net/ipv4/raw.c 		__u32 data = raw_sk(sk)->filter.data;
sk                173 net/ipv4/raw.c 	struct sock *sk;
sk                184 net/ipv4/raw.c 	sk = __raw_v4_lookup(net, __sk_head(head), iph->protocol,
sk                187 net/ipv4/raw.c 	while (sk) {
sk                189 net/ipv4/raw.c 		if ((iph->protocol != IPPROTO_ICMP || !icmp_filter(sk, skb)) &&
sk                190 net/ipv4/raw.c 		    ip_mc_sf_allow(sk, iph->daddr, iph->saddr,
sk                196 net/ipv4/raw.c 				raw_rcv(sk, clone);
sk                198 net/ipv4/raw.c 		sk = __raw_v4_lookup(net, sk_next(sk), iph->protocol,
sk                225 net/ipv4/raw.c static void raw_err(struct sock *sk, struct sk_buff *skb, u32 info)
sk                227 net/ipv4/raw.c 	struct inet_sock *inet = inet_sk(sk);
sk                234 net/ipv4/raw.c 		ipv4_sk_update_pmtu(skb, sk, info);
sk                236 net/ipv4/raw.c 		ipv4_sk_redirect(skb, sk);
sk                245 net/ipv4/raw.c 	if (!inet->recverr && sk->sk_state != TCP_ESTABLISHED)
sk                277 net/ipv4/raw.c 		ip_icmp_error(sk, skb, err, 0, info, payload);
sk                281 net/ipv4/raw.c 		sk->sk_err = err;
sk                282 net/ipv4/raw.c 		sk->sk_error_report(sk);
sk                315 net/ipv4/raw.c static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
sk                319 net/ipv4/raw.c 	ipv4_pktinfo_prepare(sk, skb);
sk                320 net/ipv4/raw.c 	if (sock_queue_rcv_skb(sk, skb) < 0) {
sk                328 net/ipv4/raw.c int raw_rcv(struct sock *sk, struct sk_buff *skb)
sk                330 net/ipv4/raw.c 	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
sk                331 net/ipv4/raw.c 		atomic_inc(&sk->sk_drops);
sk                339 net/ipv4/raw.c 	raw_rcv_skb(sk, skb);
sk                343 net/ipv4/raw.c static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
sk                348 net/ipv4/raw.c 	struct inet_sock *inet = inet_sk(sk);
sk                349 net/ipv4/raw.c 	struct net *net = sock_net(sk);
sk                358 net/ipv4/raw.c 		ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
sk                370 net/ipv4/raw.c 	skb = sock_alloc_send_skb(sk,
sk                377 net/ipv4/raw.c 	skb->priority = sk->sk_priority;
sk                429 net/ipv4/raw.c 		      net, sk, skb, NULL, rt->dst.dev,
sk                498 net/ipv4/raw.c static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
sk                500 net/ipv4/raw.c 	struct inet_sock *inet = inet_sk(sk);
sk                501 net/ipv4/raw.c 	struct net *net = sock_net(sk);
sk                555 net/ipv4/raw.c 		if (sk->sk_state != TCP_ESTABLISHED)
sk                563 net/ipv4/raw.c 		err = ip_cmsg_send(sk, msg, &ipc, false);
sk                601 net/ipv4/raw.c 	tos = get_rtconn_flags(&ipc, sk);
sk                606 net/ipv4/raw.c 		if (!ipc.oif || netif_index_is_l3_master(sock_net(sk), ipc.oif))
sk                620 net/ipv4/raw.c 		    ipc.oif == l3mdev_master_ifindex_by_index(sock_net(sk),
sk                628 net/ipv4/raw.c 			   hdrincl ? IPPROTO_RAW : sk->sk_protocol,
sk                629 net/ipv4/raw.c 			   inet_sk_flowi_flags(sk) |
sk                631 net/ipv4/raw.c 			   daddr, saddr, 0, 0, sk->sk_uid);
sk                642 net/ipv4/raw.c 	security_sk_classify_flow(sk, flowi4_to_flowi(&fl4));
sk                643 net/ipv4/raw.c 	rt = ip_route_output_flow(net, &fl4, sk);
sk                651 net/ipv4/raw.c 	if (rt->rt_flags & RTCF_BROADCAST && !sock_flag(sk, SOCK_BROADCAST))
sk                659 net/ipv4/raw.c 		err = raw_send_hdrinc(sk, &fl4, msg, len,
sk                665 net/ipv4/raw.c 		lock_sock(sk);
sk                666 net/ipv4/raw.c 		err = ip_append_data(sk, &fl4, raw_getfrag,
sk                670 net/ipv4/raw.c 			ip_flush_pending_frames(sk);
sk                672 net/ipv4/raw.c 			err = ip_push_pending_frames(sk, &fl4);
sk                676 net/ipv4/raw.c 		release_sock(sk);
sk                697 net/ipv4/raw.c static void raw_close(struct sock *sk, long timeout)
sk                702 net/ipv4/raw.c 	ip_ra_control(sk, 0, NULL);
sk                704 net/ipv4/raw.c 	sk_common_release(sk);
sk                707 net/ipv4/raw.c static void raw_destroy(struct sock *sk)
sk                709 net/ipv4/raw.c 	lock_sock(sk);
sk                710 net/ipv4/raw.c 	ip_flush_pending_frames(sk);
sk                711 net/ipv4/raw.c 	release_sock(sk);
sk                715 net/ipv4/raw.c static int raw_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
sk                717 net/ipv4/raw.c 	struct inet_sock *inet = inet_sk(sk);
sk                723 net/ipv4/raw.c 	if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_in))
sk                726 net/ipv4/raw.c 	if (sk->sk_bound_dev_if)
sk                727 net/ipv4/raw.c 		tb_id = l3mdev_fib_table_by_index(sock_net(sk),
sk                728 net/ipv4/raw.c 						 sk->sk_bound_dev_if) ? : tb_id;
sk                730 net/ipv4/raw.c 	chk_addr_ret = inet_addr_type_table(sock_net(sk), addr->sin_addr.s_addr,
sk                740 net/ipv4/raw.c 	sk_dst_reset(sk);
sk                750 net/ipv4/raw.c static int raw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
sk                753 net/ipv4/raw.c 	struct inet_sock *inet = inet_sk(sk);
sk                763 net/ipv4/raw.c 		err = ip_recv_error(sk, msg, len, addr_len);
sk                767 net/ipv4/raw.c 	skb = skb_recv_datagram(sk, flags, noblock, &err);
sk                781 net/ipv4/raw.c 	sock_recv_ts_and_drops(msg, sk, skb);
sk                796 net/ipv4/raw.c 	skb_free_datagram(sk, skb);
sk                803 net/ipv4/raw.c static int raw_sk_init(struct sock *sk)
sk                805 net/ipv4/raw.c 	struct raw_sock *rp = raw_sk(sk);
sk                807 net/ipv4/raw.c 	if (inet_sk(sk)->inet_num == IPPROTO_ICMP)
sk                812 net/ipv4/raw.c static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
sk                816 net/ipv4/raw.c 	if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
sk                821 net/ipv4/raw.c static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
sk                834 net/ipv4/raw.c 	    copy_to_user(optval, &raw_sk(sk)->filter, len))
sk                840 net/ipv4/raw.c static int do_raw_setsockopt(struct sock *sk, int level, int optname,
sk                844 net/ipv4/raw.c 		if (inet_sk(sk)->inet_num != IPPROTO_ICMP)
sk                847 net/ipv4/raw.c 			return raw_seticmpfilter(sk, optval, optlen);
sk                852 net/ipv4/raw.c static int raw_setsockopt(struct sock *sk, int level, int optname,
sk                856 net/ipv4/raw.c 		return ip_setsockopt(sk, level, optname, optval, optlen);
sk                857 net/ipv4/raw.c 	return do_raw_setsockopt(sk, level, optname, optval, optlen);
sk                861 net/ipv4/raw.c static int compat_raw_setsockopt(struct sock *sk, int level, int optname,
sk                865 net/ipv4/raw.c 		return compat_ip_setsockopt(sk, level, optname, optval, optlen);
sk                866 net/ipv4/raw.c 	return do_raw_setsockopt(sk, level, optname, optval, optlen);
sk                870 net/ipv4/raw.c static int do_raw_getsockopt(struct sock *sk, int level, int optname,
sk                874 net/ipv4/raw.c 		if (inet_sk(sk)->inet_num != IPPROTO_ICMP)
sk                877 net/ipv4/raw.c 			return raw_geticmpfilter(sk, optval, optlen);
sk                882 net/ipv4/raw.c static int raw_getsockopt(struct sock *sk, int level, int optname,
sk                886 net/ipv4/raw.c 		return ip_getsockopt(sk, level, optname, optval, optlen);
sk                887 net/ipv4/raw.c 	return do_raw_getsockopt(sk, level, optname, optval, optlen);
sk                891 net/ipv4/raw.c static int compat_raw_getsockopt(struct sock *sk, int level, int optname,
sk                895 net/ipv4/raw.c 		return compat_ip_getsockopt(sk, level, optname, optval, optlen);
sk                896 net/ipv4/raw.c 	return do_raw_getsockopt(sk, level, optname, optval, optlen);
sk                900 net/ipv4/raw.c static int raw_ioctl(struct sock *sk, int cmd, unsigned long arg)
sk                904 net/ipv4/raw.c 		int amount = sk_wmem_alloc_get(sk);
sk                912 net/ipv4/raw.c 		spin_lock_bh(&sk->sk_receive_queue.lock);
sk                913 net/ipv4/raw.c 		skb = skb_peek(&sk->sk_receive_queue);
sk                916 net/ipv4/raw.c 		spin_unlock_bh(&sk->sk_receive_queue.lock);
sk                922 net/ipv4/raw.c 		return ipmr_ioctl(sk, cmd, (void __user *)arg);
sk                930 net/ipv4/raw.c static int compat_raw_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg)
sk                938 net/ipv4/raw.c 		return ipmr_compat_ioctl(sk, cmd, compat_ptr(arg));
sk                946 net/ipv4/raw.c int raw_abort(struct sock *sk, int err)
sk                948 net/ipv4/raw.c 	lock_sock(sk);
sk                950 net/ipv4/raw.c 	sk->sk_err = err;
sk                951 net/ipv4/raw.c 	sk->sk_error_report(sk);
sk                952 net/ipv4/raw.c 	__udp_disconnect(sk, 0);
sk                954 net/ipv4/raw.c 	release_sock(sk);
sk                993 net/ipv4/raw.c 	struct sock *sk;
sk                999 net/ipv4/raw.c 		sk_for_each(sk, &h->ht[state->bucket])
sk               1000 net/ipv4/raw.c 			if (sock_net(sk) == seq_file_net(seq))
sk               1003 net/ipv4/raw.c 	sk = NULL;
sk               1005 net/ipv4/raw.c 	return sk;
sk               1008 net/ipv4/raw.c static struct sock *raw_get_next(struct seq_file *seq, struct sock *sk)
sk               1014 net/ipv4/raw.c 		sk = sk_next(sk);
sk               1017 net/ipv4/raw.c 	} while (sk && sock_net(sk) != seq_file_net(seq));
sk               1019 net/ipv4/raw.c 	if (!sk && ++state->bucket < RAW_HTABLE_SIZE) {
sk               1020 net/ipv4/raw.c 		sk = sk_head(&h->ht[state->bucket]);
sk               1023 net/ipv4/raw.c 	return sk;
sk               1028 net/ipv4/raw.c 	struct sock *sk = raw_get_first(seq);
sk               1030 net/ipv4/raw.c 	if (sk)
sk               1031 net/ipv4/raw.c 		while (pos && (sk = raw_get_next(seq, sk)) != NULL)
sk               1033 net/ipv4/raw.c 	return pos ? NULL : sk;
sk               1047 net/ipv4/raw.c 	struct sock *sk;
sk               1050 net/ipv4/raw.c 		sk = raw_get_first(seq);
sk               1052 net/ipv4/raw.c 		sk = raw_get_next(seq, v);
sk               1054 net/ipv4/raw.c 	return sk;
sk                 41 net/ipv4/raw_diag.c 	struct sock *sk = NULL;
sk                 44 net/ipv4/raw_diag.c 		sk = __raw_v4_lookup(net, from, r->sdiag_raw_protocol,
sk                 50 net/ipv4/raw_diag.c 		sk = __raw_v6_lookup(net, from, r->sdiag_raw_protocol,
sk                 55 net/ipv4/raw_diag.c 	return sk;
sk                 61 net/ipv4/raw_diag.c 	struct sock *sk = NULL, *s;
sk                 70 net/ipv4/raw_diag.c 			sk = raw_lookup(net, s, r);
sk                 71 net/ipv4/raw_diag.c 			if (sk) {
sk                 79 net/ipv4/raw_diag.c 				sock_hold(sk);
sk                 87 net/ipv4/raw_diag.c 	return sk ? sk : ERR_PTR(-ENOENT);
sk                 94 net/ipv4/raw_diag.c 	struct net *net = sock_net(in_skb->sk);
sk                 96 net/ipv4/raw_diag.c 	struct sock *sk;
sk                 99 net/ipv4/raw_diag.c 	sk = raw_sock_get(net, r);
sk                100 net/ipv4/raw_diag.c 	if (IS_ERR(sk))
sk                101 net/ipv4/raw_diag.c 		return PTR_ERR(sk);
sk                108 net/ipv4/raw_diag.c 		sock_put(sk);
sk                112 net/ipv4/raw_diag.c 	err = inet_sk_diag_fill(sk, NULL, rep, r,
sk                113 net/ipv4/raw_diag.c 				sk_user_ns(NETLINK_CB(in_skb).sk),
sk                117 net/ipv4/raw_diag.c 	sock_put(sk);
sk                132 net/ipv4/raw_diag.c static int sk_diag_dump(struct sock *sk, struct sk_buff *skb,
sk                137 net/ipv4/raw_diag.c 	if (!inet_diag_bc_sk(bc, sk))
sk                140 net/ipv4/raw_diag.c 	return inet_sk_diag_fill(sk, NULL, skb, r,
sk                141 net/ipv4/raw_diag.c 				 sk_user_ns(NETLINK_CB(cb->skb).sk),
sk                152 net/ipv4/raw_diag.c 	struct net *net = sock_net(skb->sk);
sk                154 net/ipv4/raw_diag.c 	struct sock *sk = NULL;
sk                166 net/ipv4/raw_diag.c 		sk_for_each(sk, &hashinfo->ht[slot]) {
sk                167 net/ipv4/raw_diag.c 			struct inet_sock *inet = inet_sk(sk);
sk                169 net/ipv4/raw_diag.c 			if (!net_eq(sock_net(sk), net))
sk                173 net/ipv4/raw_diag.c 			if (sk->sk_family != r->sdiag_family)
sk                181 net/ipv4/raw_diag.c 			if (sk_diag_dump(sk, skb, cb, r, bc, net_admin) < 0)
sk                195 net/ipv4/raw_diag.c static void raw_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
sk                198 net/ipv4/raw_diag.c 	r->idiag_rqueue = sk_rmem_alloc_get(sk);
sk                199 net/ipv4/raw_diag.c 	r->idiag_wqueue = sk_wmem_alloc_get(sk);
sk                206 net/ipv4/raw_diag.c 	struct net *net = sock_net(in_skb->sk);
sk                207 net/ipv4/raw_diag.c 	struct sock *sk;
sk                210 net/ipv4/raw_diag.c 	sk = raw_sock_get(net, r);
sk                211 net/ipv4/raw_diag.c 	if (IS_ERR(sk))
sk                212 net/ipv4/raw_diag.c 		return PTR_ERR(sk);
sk                213 net/ipv4/raw_diag.c 	err = sock_diag_destroy(sk, ECONNABORTED);
sk                214 net/ipv4/raw_diag.c 	sock_put(sk);
sk                141 net/ipv4/route.c static void		 ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
sk                144 net/ipv4/route.c static void		 ip_do_redirect(struct dst_entry *dst, struct sock *sk,
sk                525 net/ipv4/route.c 			     const struct sock *sk,
sk                530 net/ipv4/route.c 	if (sk) {
sk                531 net/ipv4/route.c 		const struct inet_sock *inet = inet_sk(sk);
sk                533 net/ipv4/route.c 		oif = sk->sk_bound_dev_if;
sk                534 net/ipv4/route.c 		mark = sk->sk_mark;
sk                535 net/ipv4/route.c 		tos = RT_CONN_FLAGS(sk);
sk                536 net/ipv4/route.c 		prot = inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol;
sk                542 net/ipv4/route.c 			   sock_net_uid(net, sk));
sk                546 net/ipv4/route.c 			       const struct sock *sk)
sk                555 net/ipv4/route.c 	__build_flow_key(net, fl4, sk, iph, oif, tos, prot, mark, 0);
sk                558 net/ipv4/route.c static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk)
sk                560 net/ipv4/route.c 	const struct inet_sock *inet = inet_sk(sk);
sk                568 net/ipv4/route.c 	flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
sk                569 net/ipv4/route.c 			   RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
sk                570 net/ipv4/route.c 			   inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
sk                571 net/ipv4/route.c 			   inet_sk_flowi_flags(sk),
sk                572 net/ipv4/route.c 			   daddr, inet->inet_saddr, 0, 0, sk->sk_uid);
sk                576 net/ipv4/route.c static void ip_rt_build_flow_key(struct flowi4 *fl4, const struct sock *sk,
sk                580 net/ipv4/route.c 		build_skb_flow_key(fl4, skb, sk);
sk                582 net/ipv4/route.c 		build_sk_flow_key(fl4, sk);
sk                818 net/ipv4/route.c static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
sk                831 net/ipv4/route.c 	__build_flow_key(net, &fl4, sk, iph, oif, tos, prot, mark, 0);
sk               1044 net/ipv4/route.c static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
sk               1051 net/ipv4/route.c 	ip_rt_build_flow_key(&fl4, sk, skb);
sk               1073 net/ipv4/route.c static void __ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
sk               1079 net/ipv4/route.c 	__build_flow_key(sock_net(sk), &fl4, sk, iph, 0, 0, 0, 0, 0);
sk               1082 net/ipv4/route.c 		fl4.flowi4_mark = IP4_REPLY_MARK(sock_net(sk), skb->mark);
sk               1084 net/ipv4/route.c 	rt = __ip_route_output_key(sock_net(sk), &fl4);
sk               1091 net/ipv4/route.c void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
sk               1098 net/ipv4/route.c 	struct net *net = sock_net(sk);
sk               1100 net/ipv4/route.c 	bh_lock_sock(sk);
sk               1102 net/ipv4/route.c 	if (!ip_sk_accept_pmtu(sk))
sk               1105 net/ipv4/route.c 	odst = sk_dst_get(sk);
sk               1107 net/ipv4/route.c 	if (sock_owned_by_user(sk) || !odst) {
sk               1108 net/ipv4/route.c 		__ipv4_sk_update_pmtu(skb, sk, mtu);
sk               1112 net/ipv4/route.c 	__build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0);
sk               1116 net/ipv4/route.c 		rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
sk               1129 net/ipv4/route.c 		rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
sk               1137 net/ipv4/route.c 		sk_dst_set(sk, &rt->dst);
sk               1140 net/ipv4/route.c 	bh_unlock_sock(sk);
sk               1162 net/ipv4/route.c void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk)
sk               1167 net/ipv4/route.c 	struct net *net = sock_net(sk);
sk               1169 net/ipv4/route.c 	__build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0);
sk               1234 net/ipv4/route.c static int ip_rt_bug(struct net *net, struct sock *sk, struct sk_buff *skb)
sk               2650 net/ipv4/route.c static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
sk               2656 net/ipv4/route.c static void ipv4_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
sk               2719 net/ipv4/route.c 				    const struct sock *sk)
sk               2729 net/ipv4/route.c 							sk, 0);
sk               2919 net/ipv4/route.c 	struct net *net = sock_net(cb->skb->sk);
sk               3078 net/ipv4/route.c 	struct net *net = sock_net(in_skb->sk);
sk                201 net/ipv4/syncookies.c struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
sk                205 net/ipv4/syncookies.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk                209 net/ipv4/syncookies.c 	child = icsk->icsk_af_ops->syn_recv_sock(sk, skb, req, dst,
sk                215 net/ipv4/syncookies.c 		if (inet_csk_reqsk_queue_add(sk, req, child))
sk                283 net/ipv4/syncookies.c struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
sk                289 net/ipv4/syncookies.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                292 net/ipv4/syncookies.c 	struct sock *ret = sk;
sk                300 net/ipv4/syncookies.c 	if (!sock_net(sk)->ipv4.sysctl_tcp_syncookies || !th->ack || th->rst)
sk                303 net/ipv4/syncookies.c 	if (tcp_synq_no_recent_overflow(sk))
sk                308 net/ipv4/syncookies.c 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED);
sk                312 net/ipv4/syncookies.c 	__NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV);
sk                316 net/ipv4/syncookies.c 	tcp_parse_options(sock_net(sk), skb, &tcp_opt, 0, NULL);
sk                319 net/ipv4/syncookies.c 		tsoff = secure_tcp_ts_off(sock_net(sk),
sk                325 net/ipv4/syncookies.c 	if (!cookie_timestamp_decode(sock_net(sk), &tcp_opt))
sk                329 net/ipv4/syncookies.c 	req = inet_reqsk_alloc(&tcp_request_sock_ops, sk, false); /* for safety */
sk                344 net/ipv4/syncookies.c 	ireq->ir_mark		= inet_request_mark(sk, skb);
sk                355 net/ipv4/syncookies.c 	ireq->ir_iif = inet_request_bound_dev_if(sk, skb);
sk                360 net/ipv4/syncookies.c 	RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(sock_net(sk), skb));
sk                362 net/ipv4/syncookies.c 	if (security_inet_conn_request(sk, skb, req)) {
sk                376 net/ipv4/syncookies.c 			   RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, IPPROTO_TCP,
sk                377 net/ipv4/syncookies.c 			   inet_sk_flowi_flags(sk),
sk                379 net/ipv4/syncookies.c 			   ireq->ir_loc_addr, th->source, th->dest, sk->sk_uid);
sk                381 net/ipv4/syncookies.c 	rt = ip_route_output_key(sock_net(sk), &fl4);
sk                390 net/ipv4/syncookies.c 	tcp_select_initial_window(sk, tcp_full_space(sk), req->mss,
sk                396 net/ipv4/syncookies.c 	ireq->ecn_ok = cookie_ecn_ok(&tcp_opt, sock_net(sk), &rt->dst);
sk                398 net/ipv4/syncookies.c 	ret = tcp_get_cookie_sock(sk, skb, req, &rt->dst, tsoff);
sk                325 net/ipv4/tcp.c void tcp_enter_memory_pressure(struct sock *sk)
sk                336 net/ipv4/tcp.c 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURES);
sk                340 net/ipv4/tcp.c void tcp_leave_memory_pressure(struct sock *sk)
sk                348 net/ipv4/tcp.c 		NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURESCHRONO,
sk                408 net/ipv4/tcp.c void tcp_init_sock(struct sock *sk)
sk                410 net/ipv4/tcp.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk                411 net/ipv4/tcp.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                414 net/ipv4/tcp.c 	sk->tcp_rtx_queue = RB_ROOT;
sk                415 net/ipv4/tcp.c 	tcp_init_xmit_timers(sk);
sk                440 net/ipv4/tcp.c 	tp->reordering = sock_net(sk)->ipv4.sysctl_tcp_reordering;
sk                441 net/ipv4/tcp.c 	tcp_assign_congestion_control(sk);
sk                446 net/ipv4/tcp.c 	sk->sk_state = TCP_CLOSE;
sk                448 net/ipv4/tcp.c 	sk->sk_write_space = sk_stream_write_space;
sk                449 net/ipv4/tcp.c 	sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
sk                453 net/ipv4/tcp.c 	WRITE_ONCE(sk->sk_sndbuf, sock_net(sk)->ipv4.sysctl_tcp_wmem[1]);
sk                454 net/ipv4/tcp.c 	WRITE_ONCE(sk->sk_rcvbuf, sock_net(sk)->ipv4.sysctl_tcp_rmem[1]);
sk                456 net/ipv4/tcp.c 	sk_sockets_allocated_inc(sk);
sk                457 net/ipv4/tcp.c 	sk->sk_route_forced_caps = NETIF_F_GSO;
sk                461 net/ipv4/tcp.c static void tcp_tx_timestamp(struct sock *sk, u16 tsflags)
sk                463 net/ipv4/tcp.c 	struct sk_buff *skb = tcp_write_queue_tail(sk);
sk                469 net/ipv4/tcp.c 		sock_tx_timestamp(sk, tsflags, &shinfo->tx_flags);
sk                478 net/ipv4/tcp.c 					  int target, struct sock *sk)
sk                485 net/ipv4/tcp.c 		if (tcp_rmem_pressure(sk))
sk                488 net/ipv4/tcp.c 	if (sk->sk_prot->stream_memory_read)
sk                489 net/ipv4/tcp.c 		return sk->sk_prot->stream_memory_read(sk);
sk                503 net/ipv4/tcp.c 	struct sock *sk = sock->sk;
sk                504 net/ipv4/tcp.c 	const struct tcp_sock *tp = tcp_sk(sk);
sk                509 net/ipv4/tcp.c 	state = inet_sk_state_load(sk);
sk                511 net/ipv4/tcp.c 		return inet_csk_listen_poll(sk);
sk                547 net/ipv4/tcp.c 	if (sk->sk_shutdown == SHUTDOWN_MASK || state == TCP_CLOSE)
sk                549 net/ipv4/tcp.c 	if (sk->sk_shutdown & RCV_SHUTDOWN)
sk                555 net/ipv4/tcp.c 		int target = sock_rcvlowat(sk, 0, INT_MAX);
sk                558 net/ipv4/tcp.c 		    !sock_flag(sk, SOCK_URGINLINE) &&
sk                562 net/ipv4/tcp.c 		if (tcp_stream_is_readable(tp, target, sk))
sk                565 net/ipv4/tcp.c 		if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
sk                566 net/ipv4/tcp.c 			if (sk_stream_is_writeable(sk)) {
sk                569 net/ipv4/tcp.c 				sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
sk                570 net/ipv4/tcp.c 				set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
sk                578 net/ipv4/tcp.c 				if (sk_stream_is_writeable(sk))
sk                586 net/ipv4/tcp.c 	} else if (state == TCP_SYN_SENT && inet_sk(sk)->defer_connect) {
sk                595 net/ipv4/tcp.c 	if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
sk                602 net/ipv4/tcp.c int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
sk                604 net/ipv4/tcp.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                610 net/ipv4/tcp.c 		if (sk->sk_state == TCP_LISTEN)
sk                613 net/ipv4/tcp.c 		slow = lock_sock_fast(sk);
sk                614 net/ipv4/tcp.c 		answ = tcp_inq(sk);
sk                615 net/ipv4/tcp.c 		unlock_sock_fast(sk, slow);
sk                622 net/ipv4/tcp.c 		if (sk->sk_state == TCP_LISTEN)
sk                625 net/ipv4/tcp.c 		if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
sk                631 net/ipv4/tcp.c 		if (sk->sk_state == TCP_LISTEN)
sk                634 net/ipv4/tcp.c 		if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
sk                659 net/ipv4/tcp.c static void skb_entail(struct sock *sk, struct sk_buff *skb)
sk                661 net/ipv4/tcp.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                669 net/ipv4/tcp.c 	tcp_add_write_queue_tail(sk, skb);
sk                670 net/ipv4/tcp.c 	sk_wmem_queued_add(sk, skb->truesize);
sk                671 net/ipv4/tcp.c 	sk_mem_charge(sk, skb->truesize);
sk                675 net/ipv4/tcp.c 	tcp_slow_start_after_idle_check(sk);
sk                694 net/ipv4/tcp.c static bool tcp_should_autocork(struct sock *sk, struct sk_buff *skb,
sk                698 net/ipv4/tcp.c 	       sock_net(sk)->ipv4.sysctl_tcp_autocorking &&
sk                699 net/ipv4/tcp.c 	       !tcp_rtx_queue_empty(sk) &&
sk                700 net/ipv4/tcp.c 	       refcount_read(&sk->sk_wmem_alloc) > skb->truesize;
sk                703 net/ipv4/tcp.c static void tcp_push(struct sock *sk, int flags, int mss_now,
sk                706 net/ipv4/tcp.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                709 net/ipv4/tcp.c 	skb = tcp_write_queue_tail(sk);
sk                717 net/ipv4/tcp.c 	if (tcp_should_autocork(sk, skb, size_goal)) {
sk                720 net/ipv4/tcp.c 		if (!test_bit(TSQ_THROTTLED, &sk->sk_tsq_flags)) {
sk                721 net/ipv4/tcp.c 			NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAUTOCORKING);
sk                722 net/ipv4/tcp.c 			set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags);
sk                727 net/ipv4/tcp.c 		if (refcount_read(&sk->sk_wmem_alloc) > skb->truesize)
sk                734 net/ipv4/tcp.c 	__tcp_push_pending_frames(sk, mss_now, nonagle);
sk                743 net/ipv4/tcp.c 	ret = skb_splice_bits(skb, skb->sk, offset, tss->pipe,
sk                750 net/ipv4/tcp.c static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss)
sk                758 net/ipv4/tcp.c 	return tcp_read_sock(sk, &rd_desc, tcp_splice_data_recv);
sk                777 net/ipv4/tcp.c 	struct sock *sk = sock->sk;
sk                787 net/ipv4/tcp.c 	sock_rps_record_flow(sk);
sk                796 net/ipv4/tcp.c 	lock_sock(sk);
sk                798 net/ipv4/tcp.c 	timeo = sock_rcvtimeo(sk, sock->file->f_flags & O_NONBLOCK);
sk                800 net/ipv4/tcp.c 		ret = __tcp_splice_read(sk, &tss);
sk                806 net/ipv4/tcp.c 			if (sock_flag(sk, SOCK_DONE))
sk                808 net/ipv4/tcp.c 			if (sk->sk_err) {
sk                809 net/ipv4/tcp.c 				ret = sock_error(sk);
sk                812 net/ipv4/tcp.c 			if (sk->sk_shutdown & RCV_SHUTDOWN)
sk                814 net/ipv4/tcp.c 			if (sk->sk_state == TCP_CLOSE) {
sk                830 net/ipv4/tcp.c 			if (!skb_queue_empty(&sk->sk_receive_queue))
sk                832 net/ipv4/tcp.c 			sk_wait_data(sk, &timeo, NULL);
sk                844 net/ipv4/tcp.c 		release_sock(sk);
sk                845 net/ipv4/tcp.c 		lock_sock(sk);
sk                847 net/ipv4/tcp.c 		if (sk->sk_err || sk->sk_state == TCP_CLOSE ||
sk                848 net/ipv4/tcp.c 		    (sk->sk_shutdown & RCV_SHUTDOWN) ||
sk                853 net/ipv4/tcp.c 	release_sock(sk);
sk                862 net/ipv4/tcp.c struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
sk                868 net/ipv4/tcp.c 		skb = sk->sk_tx_skb_cache;
sk                871 net/ipv4/tcp.c 			sk->sk_tx_skb_cache = NULL;
sk                882 net/ipv4/tcp.c 	if (unlikely(tcp_under_memory_pressure(sk)))
sk                883 net/ipv4/tcp.c 		sk_mem_reclaim_partial(sk);
sk                885 net/ipv4/tcp.c 	skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
sk                891 net/ipv4/tcp.c 			sk_forced_mem_schedule(sk, skb->truesize);
sk                893 net/ipv4/tcp.c 			mem_scheduled = sk_wmem_schedule(sk, skb->truesize);
sk                896 net/ipv4/tcp.c 			skb_reserve(skb, sk->sk_prot->max_header);
sk                907 net/ipv4/tcp.c 		sk->sk_prot->enter_memory_pressure(sk);
sk                908 net/ipv4/tcp.c 		sk_stream_moderate_sndbuf(sk);
sk                913 net/ipv4/tcp.c static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
sk                916 net/ipv4/tcp.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                923 net/ipv4/tcp.c 	new_size_goal = sk->sk_gso_max_size - 1 - MAX_TCP_HEADER;
sk                931 net/ipv4/tcp.c 				     sk->sk_gso_max_segs);
sk                938 net/ipv4/tcp.c static int tcp_send_mss(struct sock *sk, int *size_goal, int flags)
sk                942 net/ipv4/tcp.c 	mss_now = tcp_current_mss(sk);
sk                943 net/ipv4/tcp.c 	*size_goal = tcp_xmit_size_goal(sk, mss_now, !(flags & MSG_OOB));
sk                954 net/ipv4/tcp.c static void tcp_remove_empty_skb(struct sock *sk, struct sk_buff *skb)
sk                957 net/ipv4/tcp.c 		tcp_unlink_write_queue(skb, sk);
sk                958 net/ipv4/tcp.c 		if (tcp_write_queue_empty(sk))
sk                959 net/ipv4/tcp.c 			tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
sk                960 net/ipv4/tcp.c 		sk_wmem_free_skb(sk, skb);
sk                964 net/ipv4/tcp.c ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
sk                967 net/ipv4/tcp.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                971 net/ipv4/tcp.c 	long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
sk                981 net/ipv4/tcp.c 	if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) &&
sk                982 net/ipv4/tcp.c 	    !tcp_passive_fastopen(sk)) {
sk                983 net/ipv4/tcp.c 		err = sk_stream_wait_connect(sk, &timeo);
sk                988 net/ipv4/tcp.c 	sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
sk                990 net/ipv4/tcp.c 	mss_now = tcp_send_mss(sk, &size_goal, flags);
sk                994 net/ipv4/tcp.c 	if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
sk                998 net/ipv4/tcp.c 		struct sk_buff *skb = tcp_write_queue_tail(sk);
sk               1005 net/ipv4/tcp.c 			if (!sk_stream_memory_free(sk))
sk               1008 net/ipv4/tcp.c 			skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation,
sk               1009 net/ipv4/tcp.c 					tcp_rtx_and_write_queues_empty(sk));
sk               1016 net/ipv4/tcp.c 			skb_entail(sk, skb);
sk               1029 net/ipv4/tcp.c 		if (!sk_wmem_schedule(sk, copy))
sk               1045 net/ipv4/tcp.c 		sk_wmem_queued_add(sk, copy);
sk               1046 net/ipv4/tcp.c 		sk_mem_charge(sk, copy);
sk               1066 net/ipv4/tcp.c 			__tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
sk               1067 net/ipv4/tcp.c 		} else if (skb == tcp_send_head(sk))
sk               1068 net/ipv4/tcp.c 			tcp_push_one(sk, mss_now);
sk               1072 net/ipv4/tcp.c 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
sk               1074 net/ipv4/tcp.c 		tcp_push(sk, flags & ~MSG_MORE, mss_now,
sk               1077 net/ipv4/tcp.c 		err = sk_stream_wait_memory(sk, &timeo);
sk               1081 net/ipv4/tcp.c 		mss_now = tcp_send_mss(sk, &size_goal, flags);
sk               1086 net/ipv4/tcp.c 		tcp_tx_timestamp(sk, sk->sk_tsflags);
sk               1088 net/ipv4/tcp.c 			tcp_push(sk, flags, mss_now, tp->nonagle, size_goal);
sk               1093 net/ipv4/tcp.c 	tcp_remove_empty_skb(sk, tcp_write_queue_tail(sk));
sk               1098 net/ipv4/tcp.c 	if (unlikely(tcp_rtx_and_write_queues_empty(sk) && err == -EAGAIN)) {
sk               1099 net/ipv4/tcp.c 		sk->sk_write_space(sk);
sk               1100 net/ipv4/tcp.c 		tcp_chrono_stop(sk, TCP_CHRONO_SNDBUF_LIMITED);
sk               1102 net/ipv4/tcp.c 	return sk_stream_error(sk, flags, err);
sk               1106 net/ipv4/tcp.c int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset,
sk               1109 net/ipv4/tcp.c 	if (!(sk->sk_route_caps & NETIF_F_SG))
sk               1110 net/ipv4/tcp.c 		return sock_no_sendpage_locked(sk, page, offset, size, flags);
sk               1112 net/ipv4/tcp.c 	tcp_rate_check_app_limited(sk);  /* is sending application-limited? */
sk               1114 net/ipv4/tcp.c 	return do_tcp_sendpages(sk, page, offset, size, flags);
sk               1118 net/ipv4/tcp.c int tcp_sendpage(struct sock *sk, struct page *page, int offset,
sk               1123 net/ipv4/tcp.c 	lock_sock(sk);
sk               1124 net/ipv4/tcp.c 	ret = tcp_sendpage_locked(sk, page, offset, size, flags);
sk               1125 net/ipv4/tcp.c 	release_sock(sk);
sk               1139 net/ipv4/tcp.c static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
sk               1143 net/ipv4/tcp.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               1144 net/ipv4/tcp.c 	struct inet_sock *inet = inet_sk(sk);
sk               1148 net/ipv4/tcp.c 	if (!(sock_net(sk)->ipv4.sysctl_tcp_fastopen & TFO_CLIENT_ENABLE) ||
sk               1156 net/ipv4/tcp.c 				   sk->sk_allocation);
sk               1164 net/ipv4/tcp.c 		err = tcp_connect(sk);
sk               1167 net/ipv4/tcp.c 			tcp_set_state(sk, TCP_CLOSE);
sk               1169 net/ipv4/tcp.c 			sk->sk_route_caps = 0;
sk               1173 net/ipv4/tcp.c 	err = __inet_stream_connect(sk->sk_socket, uaddr,
sk               1186 net/ipv4/tcp.c int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
sk               1188 net/ipv4/tcp.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               1200 net/ipv4/tcp.c 	if (flags & MSG_ZEROCOPY && size && sock_flag(sk, SOCK_ZEROCOPY)) {
sk               1201 net/ipv4/tcp.c 		skb = tcp_write_queue_tail(sk);
sk               1202 net/ipv4/tcp.c 		uarg = sock_zerocopy_realloc(sk, size, skb_zcopy(skb));
sk               1208 net/ipv4/tcp.c 		zc = sk->sk_route_caps & NETIF_F_SG;
sk               1213 net/ipv4/tcp.c 	if (unlikely(flags & MSG_FASTOPEN || inet_sk(sk)->defer_connect) &&
sk               1215 net/ipv4/tcp.c 		err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size, uarg);
sk               1222 net/ipv4/tcp.c 	timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
sk               1224 net/ipv4/tcp.c 	tcp_rate_check_app_limited(sk);  /* is sending application-limited? */
sk               1230 net/ipv4/tcp.c 	if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) &&
sk               1231 net/ipv4/tcp.c 	    !tcp_passive_fastopen(sk)) {
sk               1232 net/ipv4/tcp.c 		err = sk_stream_wait_connect(sk, &timeo);
sk               1239 net/ipv4/tcp.c 			copied = tcp_send_rcvq(sk, msg, size);
sk               1250 net/ipv4/tcp.c 	sockcm_init(&sockc, sk);
sk               1252 net/ipv4/tcp.c 		err = sock_cmsg_send(sk, msg, &sockc);
sk               1260 net/ipv4/tcp.c 	sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
sk               1266 net/ipv4/tcp.c 	mss_now = tcp_send_mss(sk, &size_goal, flags);
sk               1269 net/ipv4/tcp.c 	if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
sk               1275 net/ipv4/tcp.c 		skb = tcp_write_queue_tail(sk);
sk               1283 net/ipv4/tcp.c 			if (!sk_stream_memory_free(sk))
sk               1288 net/ipv4/tcp.c 				if (sk_flush_backlog(sk))
sk               1291 net/ipv4/tcp.c 			first_skb = tcp_rtx_and_write_queues_empty(sk);
sk               1292 net/ipv4/tcp.c 			skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation,
sk               1300 net/ipv4/tcp.c 			skb_entail(sk, skb);
sk               1319 net/ipv4/tcp.c 			err = skb_add_data_nocache(sk, skb, &msg->msg_iter, copy);
sk               1325 net/ipv4/tcp.c 			struct page_frag *pfrag = sk_page_frag(sk);
sk               1327 net/ipv4/tcp.c 			if (!sk_page_frag_refill(sk, pfrag))
sk               1341 net/ipv4/tcp.c 			if (!sk_wmem_schedule(sk, copy))
sk               1344 net/ipv4/tcp.c 			err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb,
sk               1361 net/ipv4/tcp.c 			err = skb_zerocopy_iter_stream(sk, skb, msg, copy, uarg);
sk               1390 net/ipv4/tcp.c 			__tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
sk               1391 net/ipv4/tcp.c 		} else if (skb == tcp_send_head(sk))
sk               1392 net/ipv4/tcp.c 			tcp_push_one(sk, mss_now);
sk               1396 net/ipv4/tcp.c 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
sk               1399 net/ipv4/tcp.c 			tcp_push(sk, flags & ~MSG_MORE, mss_now,
sk               1402 net/ipv4/tcp.c 		err = sk_stream_wait_memory(sk, &timeo);
sk               1406 net/ipv4/tcp.c 		mss_now = tcp_send_mss(sk, &size_goal, flags);
sk               1411 net/ipv4/tcp.c 		tcp_tx_timestamp(sk, sockc.tsflags);
sk               1412 net/ipv4/tcp.c 		tcp_push(sk, flags, mss_now, tp->nonagle, size_goal);
sk               1419 net/ipv4/tcp.c 	skb = tcp_write_queue_tail(sk);
sk               1421 net/ipv4/tcp.c 	tcp_remove_empty_skb(sk, skb);
sk               1427 net/ipv4/tcp.c 	err = sk_stream_error(sk, flags, err);
sk               1429 net/ipv4/tcp.c 	if (unlikely(tcp_rtx_and_write_queues_empty(sk) && err == -EAGAIN)) {
sk               1430 net/ipv4/tcp.c 		sk->sk_write_space(sk);
sk               1431 net/ipv4/tcp.c 		tcp_chrono_stop(sk, TCP_CHRONO_SNDBUF_LIMITED);
sk               1437 net/ipv4/tcp.c int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
sk               1441 net/ipv4/tcp.c 	lock_sock(sk);
sk               1442 net/ipv4/tcp.c 	ret = tcp_sendmsg_locked(sk, msg, size);
sk               1443 net/ipv4/tcp.c 	release_sock(sk);
sk               1454 net/ipv4/tcp.c static int tcp_recv_urg(struct sock *sk, struct msghdr *msg, int len, int flags)
sk               1456 net/ipv4/tcp.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               1459 net/ipv4/tcp.c 	if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data ||
sk               1463 net/ipv4/tcp.c 	if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE))
sk               1486 net/ipv4/tcp.c 	if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN))
sk               1498 net/ipv4/tcp.c static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len)
sk               1505 net/ipv4/tcp.c 	skb_rbtree_walk(skb, &sk->tcp_rtx_queue) {
sk               1512 net/ipv4/tcp.c 	skb_queue_walk(&sk->sk_write_queue, skb) {
sk               1529 net/ipv4/tcp.c static void tcp_cleanup_rbuf(struct sock *sk, int copied)
sk               1531 net/ipv4/tcp.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               1534 net/ipv4/tcp.c 	struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
sk               1540 net/ipv4/tcp.c 	if (inet_csk_ack_scheduled(sk)) {
sk               1541 net/ipv4/tcp.c 		const struct inet_connection_sock *icsk = inet_csk(sk);
sk               1556 net/ipv4/tcp.c 		       !inet_csk_in_pingpong_mode(sk))) &&
sk               1557 net/ipv4/tcp.c 		      !atomic_read(&sk->sk_rmem_alloc)))
sk               1567 net/ipv4/tcp.c 	if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) {
sk               1572 net/ipv4/tcp.c 			__u32 new_window = __tcp_select_window(sk);
sk               1584 net/ipv4/tcp.c 		tcp_send_ack(sk);
sk               1587 net/ipv4/tcp.c static struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
sk               1592 net/ipv4/tcp.c 	while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) {
sk               1606 net/ipv4/tcp.c 		sk_eat_skb(sk, skb);
sk               1622 net/ipv4/tcp.c int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
sk               1626 net/ipv4/tcp.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               1631 net/ipv4/tcp.c 	if (sk->sk_state == TCP_LISTEN)
sk               1633 net/ipv4/tcp.c 	while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
sk               1662 net/ipv4/tcp.c 			skb = tcp_recv_skb(sk, seq - 1, &offset);
sk               1672 net/ipv4/tcp.c 			sk_eat_skb(sk, skb);
sk               1676 net/ipv4/tcp.c 		sk_eat_skb(sk, skb);
sk               1683 net/ipv4/tcp.c 	tcp_rcv_space_adjust(sk);
sk               1687 net/ipv4/tcp.c 		tcp_recv_skb(sk, seq, &offset);
sk               1688 net/ipv4/tcp.c 		tcp_cleanup_rbuf(sk, copied);
sk               1696 net/ipv4/tcp.c 	return tcp_inq(sock->sk);
sk               1701 net/ipv4/tcp.c int tcp_set_rcvlowat(struct sock *sk, int val)
sk               1705 net/ipv4/tcp.c 	if (sk->sk_userlocks & SOCK_RCVBUF_LOCK)
sk               1706 net/ipv4/tcp.c 		cap = sk->sk_rcvbuf >> 1;
sk               1708 net/ipv4/tcp.c 		cap = sock_net(sk)->ipv4.sysctl_tcp_rmem[2] >> 1;
sk               1710 net/ipv4/tcp.c 	WRITE_ONCE(sk->sk_rcvlowat, val ? : 1);
sk               1713 net/ipv4/tcp.c 	tcp_data_ready(sk);
sk               1715 net/ipv4/tcp.c 	if (sk->sk_userlocks & SOCK_RCVBUF_LOCK)
sk               1719 net/ipv4/tcp.c 	if (val > sk->sk_rcvbuf) {
sk               1720 net/ipv4/tcp.c 		WRITE_ONCE(sk->sk_rcvbuf, val);
sk               1721 net/ipv4/tcp.c 		tcp_sk(sk)->window_clamp = tcp_win_from_space(sk, val);
sk               1746 net/ipv4/tcp.c static int tcp_zerocopy_receive(struct sock *sk,
sk               1761 net/ipv4/tcp.c 	if (sk->sk_state == TCP_LISTEN)
sk               1764 net/ipv4/tcp.c 	sock_rps_record_flow(sk);
sk               1775 net/ipv4/tcp.c 	tp = tcp_sk(sk);
sk               1777 net/ipv4/tcp.c 	inq = tcp_inq(sk);
sk               1793 net/ipv4/tcp.c 				skb = tcp_recv_skb(sk, seq, &offset);
sk               1832 net/ipv4/tcp.c 		tcp_rcv_space_adjust(sk);
sk               1835 net/ipv4/tcp.c 		tcp_recv_skb(sk, seq, &offset);
sk               1836 net/ipv4/tcp.c 		tcp_cleanup_rbuf(sk, length);
sk               1841 net/ipv4/tcp.c 		if (!zc->recv_skip_hint && sock_flag(sk, SOCK_DONE))
sk               1864 net/ipv4/tcp.c static void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk,
sk               1867 net/ipv4/tcp.c 	int new_tstamp = sock_flag(sk, SOCK_TSTAMP_NEW);
sk               1871 net/ipv4/tcp.c 		if (sock_flag(sk, SOCK_RCVTSTAMP)) {
sk               1872 net/ipv4/tcp.c 			if (sock_flag(sk, SOCK_RCVTSTAMPNS)) {
sk               1903 net/ipv4/tcp.c 		if (sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE)
sk               1910 net/ipv4/tcp.c 		if (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE)
sk               1918 net/ipv4/tcp.c 		if (sock_flag(sk, SOCK_TSTAMP_NEW))
sk               1925 net/ipv4/tcp.c static int tcp_inq_hint(struct sock *sk)
sk               1927 net/ipv4/tcp.c 	const struct tcp_sock *tp = tcp_sk(sk);
sk               1934 net/ipv4/tcp.c 		lock_sock(sk);
sk               1936 net/ipv4/tcp.c 		release_sock(sk);
sk               1941 net/ipv4/tcp.c 	if (inq == 0 && sock_flag(sk, SOCK_DONE))
sk               1954 net/ipv4/tcp.c int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
sk               1957 net/ipv4/tcp.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               1971 net/ipv4/tcp.c 		return inet_recv_error(sk, msg, len, addr_len);
sk               1973 net/ipv4/tcp.c 	if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue) &&
sk               1974 net/ipv4/tcp.c 	    (sk->sk_state == TCP_ESTABLISHED))
sk               1975 net/ipv4/tcp.c 		sk_busy_loop(sk, nonblock);
sk               1977 net/ipv4/tcp.c 	lock_sock(sk);
sk               1980 net/ipv4/tcp.c 	if (sk->sk_state == TCP_LISTEN)
sk               1984 net/ipv4/tcp.c 	timeo = sock_rcvtimeo(sk, nonblock);
sk               2011 net/ipv4/tcp.c 	target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
sk               2028 net/ipv4/tcp.c 		last = skb_peek_tail(&sk->sk_receive_queue);
sk               2029 net/ipv4/tcp.c 		skb_queue_walk(&sk->sk_receive_queue, skb) {
sk               2056 net/ipv4/tcp.c 		if (copied >= target && !sk->sk_backlog.tail)
sk               2060 net/ipv4/tcp.c 			if (sk->sk_err ||
sk               2061 net/ipv4/tcp.c 			    sk->sk_state == TCP_CLOSE ||
sk               2062 net/ipv4/tcp.c 			    (sk->sk_shutdown & RCV_SHUTDOWN) ||
sk               2067 net/ipv4/tcp.c 			if (sock_flag(sk, SOCK_DONE))
sk               2070 net/ipv4/tcp.c 			if (sk->sk_err) {
sk               2071 net/ipv4/tcp.c 				copied = sock_error(sk);
sk               2075 net/ipv4/tcp.c 			if (sk->sk_shutdown & RCV_SHUTDOWN)
sk               2078 net/ipv4/tcp.c 			if (sk->sk_state == TCP_CLOSE) {
sk               2097 net/ipv4/tcp.c 		tcp_cleanup_rbuf(sk, copied);
sk               2101 net/ipv4/tcp.c 			release_sock(sk);
sk               2102 net/ipv4/tcp.c 			lock_sock(sk);
sk               2104 net/ipv4/tcp.c 			sk_wait_data(sk, &timeo, last);
sk               2127 net/ipv4/tcp.c 					if (!sock_flag(sk, SOCK_URGINLINE)) {
sk               2154 net/ipv4/tcp.c 		tcp_rcv_space_adjust(sk);
sk               2159 net/ipv4/tcp.c 			tcp_fast_path_check(sk);
sk               2173 net/ipv4/tcp.c 			sk_eat_skb(sk, skb);
sk               2180 net/ipv4/tcp.c 			sk_eat_skb(sk, skb);
sk               2189 net/ipv4/tcp.c 	tcp_cleanup_rbuf(sk, copied);
sk               2191 net/ipv4/tcp.c 	release_sock(sk);
sk               2195 net/ipv4/tcp.c 			tcp_recv_timestamp(msg, sk, &tss);
sk               2197 net/ipv4/tcp.c 			inq = tcp_inq_hint(sk);
sk               2205 net/ipv4/tcp.c 	release_sock(sk);
sk               2209 net/ipv4/tcp.c 	err = tcp_recv_urg(sk, msg, len, flags);
sk               2213 net/ipv4/tcp.c 	err = tcp_peek_sndq(sk, msg, len);
sk               2218 net/ipv4/tcp.c void tcp_set_state(struct sock *sk, int state)
sk               2220 net/ipv4/tcp.c 	int oldstate = sk->sk_state;
sk               2243 net/ipv4/tcp.c 	if (BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk), BPF_SOCK_OPS_STATE_CB_FLAG))
sk               2244 net/ipv4/tcp.c 		tcp_call_bpf_2arg(sk, BPF_SOCK_OPS_STATE_CB, oldstate, state);
sk               2249 net/ipv4/tcp.c 			TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
sk               2254 net/ipv4/tcp.c 			TCP_INC_STATS(sock_net(sk), TCP_MIB_ESTABRESETS);
sk               2256 net/ipv4/tcp.c 		sk->sk_prot->unhash(sk);
sk               2257 net/ipv4/tcp.c 		if (inet_csk(sk)->icsk_bind_hash &&
sk               2258 net/ipv4/tcp.c 		    !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
sk               2259 net/ipv4/tcp.c 			inet_put_port(sk);
sk               2263 net/ipv4/tcp.c 			TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
sk               2269 net/ipv4/tcp.c 	inet_sk_state_store(sk, state);
sk               2297 net/ipv4/tcp.c static int tcp_close_state(struct sock *sk)
sk               2299 net/ipv4/tcp.c 	int next = (int)new_state[sk->sk_state];
sk               2302 net/ipv4/tcp.c 	tcp_set_state(sk, ns);
sk               2312 net/ipv4/tcp.c void tcp_shutdown(struct sock *sk, int how)
sk               2322 net/ipv4/tcp.c 	if ((1 << sk->sk_state) &
sk               2326 net/ipv4/tcp.c 		if (tcp_close_state(sk))
sk               2327 net/ipv4/tcp.c 			tcp_send_fin(sk);
sk               2332 net/ipv4/tcp.c bool tcp_check_oom(struct sock *sk, int shift)
sk               2336 net/ipv4/tcp.c 	too_many_orphans = tcp_too_many_orphans(sk, shift);
sk               2337 net/ipv4/tcp.c 	out_of_socket_memory = tcp_out_of_memory(sk);
sk               2346 net/ipv4/tcp.c void tcp_close(struct sock *sk, long timeout)
sk               2352 net/ipv4/tcp.c 	lock_sock(sk);
sk               2353 net/ipv4/tcp.c 	sk->sk_shutdown = SHUTDOWN_MASK;
sk               2355 net/ipv4/tcp.c 	if (sk->sk_state == TCP_LISTEN) {
sk               2356 net/ipv4/tcp.c 		tcp_set_state(sk, TCP_CLOSE);
sk               2359 net/ipv4/tcp.c 		inet_csk_listen_stop(sk);
sk               2368 net/ipv4/tcp.c 	while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
sk               2377 net/ipv4/tcp.c 	sk_mem_reclaim(sk);
sk               2380 net/ipv4/tcp.c 	if (sk->sk_state == TCP_CLOSE)
sk               2390 net/ipv4/tcp.c 	if (unlikely(tcp_sk(sk)->repair)) {
sk               2391 net/ipv4/tcp.c 		sk->sk_prot->disconnect(sk, 0);
sk               2394 net/ipv4/tcp.c 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE);
sk               2395 net/ipv4/tcp.c 		tcp_set_state(sk, TCP_CLOSE);
sk               2396 net/ipv4/tcp.c 		tcp_send_active_reset(sk, sk->sk_allocation);
sk               2397 net/ipv4/tcp.c 	} else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
sk               2399 net/ipv4/tcp.c 		sk->sk_prot->disconnect(sk, 0);
sk               2400 net/ipv4/tcp.c 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
sk               2401 net/ipv4/tcp.c 	} else if (tcp_close_state(sk)) {
sk               2431 net/ipv4/tcp.c 		tcp_send_fin(sk);
sk               2434 net/ipv4/tcp.c 	sk_stream_wait_close(sk, timeout);
sk               2437 net/ipv4/tcp.c 	state = sk->sk_state;
sk               2438 net/ipv4/tcp.c 	sock_hold(sk);
sk               2439 net/ipv4/tcp.c 	sock_orphan(sk);
sk               2442 net/ipv4/tcp.c 	bh_lock_sock(sk);
sk               2444 net/ipv4/tcp.c 	__release_sock(sk);
sk               2446 net/ipv4/tcp.c 	percpu_counter_inc(sk->sk_prot->orphan_count);
sk               2449 net/ipv4/tcp.c 	if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
sk               2466 net/ipv4/tcp.c 	if (sk->sk_state == TCP_FIN_WAIT2) {
sk               2467 net/ipv4/tcp.c 		struct tcp_sock *tp = tcp_sk(sk);
sk               2469 net/ipv4/tcp.c 			tcp_set_state(sk, TCP_CLOSE);
sk               2470 net/ipv4/tcp.c 			tcp_send_active_reset(sk, GFP_ATOMIC);
sk               2471 net/ipv4/tcp.c 			__NET_INC_STATS(sock_net(sk),
sk               2474 net/ipv4/tcp.c 			const int tmo = tcp_fin_time(sk);
sk               2477 net/ipv4/tcp.c 				inet_csk_reset_keepalive_timer(sk,
sk               2480 net/ipv4/tcp.c 				tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
sk               2485 net/ipv4/tcp.c 	if (sk->sk_state != TCP_CLOSE) {
sk               2486 net/ipv4/tcp.c 		sk_mem_reclaim(sk);
sk               2487 net/ipv4/tcp.c 		if (tcp_check_oom(sk, 0)) {
sk               2488 net/ipv4/tcp.c 			tcp_set_state(sk, TCP_CLOSE);
sk               2489 net/ipv4/tcp.c 			tcp_send_active_reset(sk, GFP_ATOMIC);
sk               2490 net/ipv4/tcp.c 			__NET_INC_STATS(sock_net(sk),
sk               2492 net/ipv4/tcp.c 		} else if (!check_net(sock_net(sk))) {
sk               2494 net/ipv4/tcp.c 			tcp_set_state(sk, TCP_CLOSE);
sk               2498 net/ipv4/tcp.c 	if (sk->sk_state == TCP_CLOSE) {
sk               2501 net/ipv4/tcp.c 		req = rcu_dereference_protected(tcp_sk(sk)->fastopen_rsk,
sk               2502 net/ipv4/tcp.c 						lockdep_sock_is_held(sk));
sk               2508 net/ipv4/tcp.c 			reqsk_fastopen_remove(sk, req, false);
sk               2509 net/ipv4/tcp.c 		inet_csk_destroy_sock(sk);
sk               2514 net/ipv4/tcp.c 	bh_unlock_sock(sk);
sk               2516 net/ipv4/tcp.c 	release_sock(sk);
sk               2517 net/ipv4/tcp.c 	sock_put(sk);
sk               2530 net/ipv4/tcp.c static void tcp_rtx_queue_purge(struct sock *sk)
sk               2532 net/ipv4/tcp.c 	struct rb_node *p = rb_first(&sk->tcp_rtx_queue);
sk               2534 net/ipv4/tcp.c 	tcp_sk(sk)->highest_sack = NULL;
sk               2542 net/ipv4/tcp.c 		tcp_rtx_queue_unlink(skb, sk);
sk               2543 net/ipv4/tcp.c 		sk_wmem_free_skb(sk, skb);
sk               2547 net/ipv4/tcp.c void tcp_write_queue_purge(struct sock *sk)
sk               2551 net/ipv4/tcp.c 	tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
sk               2552 net/ipv4/tcp.c 	while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
sk               2554 net/ipv4/tcp.c 		sk_wmem_free_skb(sk, skb);
sk               2556 net/ipv4/tcp.c 	tcp_rtx_queue_purge(sk);
sk               2557 net/ipv4/tcp.c 	skb = sk->sk_tx_skb_cache;
sk               2560 net/ipv4/tcp.c 		sk->sk_tx_skb_cache = NULL;
sk               2562 net/ipv4/tcp.c 	INIT_LIST_HEAD(&tcp_sk(sk)->tsorted_sent_queue);
sk               2563 net/ipv4/tcp.c 	sk_mem_reclaim(sk);
sk               2564 net/ipv4/tcp.c 	tcp_clear_all_retrans_hints(tcp_sk(sk));
sk               2565 net/ipv4/tcp.c 	tcp_sk(sk)->packets_out = 0;
sk               2566 net/ipv4/tcp.c 	inet_csk(sk)->icsk_backoff = 0;
sk               2569 net/ipv4/tcp.c int tcp_disconnect(struct sock *sk, int flags)
sk               2571 net/ipv4/tcp.c 	struct inet_sock *inet = inet_sk(sk);
sk               2572 net/ipv4/tcp.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk               2573 net/ipv4/tcp.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               2574 net/ipv4/tcp.c 	int old_state = sk->sk_state;
sk               2578 net/ipv4/tcp.c 		tcp_set_state(sk, TCP_CLOSE);
sk               2582 net/ipv4/tcp.c 		inet_csk_listen_stop(sk);
sk               2584 net/ipv4/tcp.c 		sk->sk_err = ECONNABORTED;
sk               2591 net/ipv4/tcp.c 		tcp_send_active_reset(sk, gfp_any());
sk               2592 net/ipv4/tcp.c 		sk->sk_err = ECONNRESET;
sk               2594 net/ipv4/tcp.c 		sk->sk_err = ECONNRESET;
sk               2596 net/ipv4/tcp.c 	tcp_clear_xmit_timers(sk);
sk               2597 net/ipv4/tcp.c 	__skb_queue_purge(&sk->sk_receive_queue);
sk               2598 net/ipv4/tcp.c 	if (sk->sk_rx_skb_cache) {
sk               2599 net/ipv4/tcp.c 		__kfree_skb(sk->sk_rx_skb_cache);
sk               2600 net/ipv4/tcp.c 		sk->sk_rx_skb_cache = NULL;
sk               2604 net/ipv4/tcp.c 	tcp_write_queue_purge(sk);
sk               2605 net/ipv4/tcp.c 	tcp_fastopen_active_disable_ofo_check(sk);
sk               2610 net/ipv4/tcp.c 	if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
sk               2611 net/ipv4/tcp.c 		inet_reset_saddr(sk);
sk               2613 net/ipv4/tcp.c 	sk->sk_shutdown = 0;
sk               2614 net/ipv4/tcp.c 	sock_reset_flag(sk, SOCK_DONE);
sk               2634 net/ipv4/tcp.c 	tcp_set_ca_state(sk, TCP_CA_Open);
sk               2638 net/ipv4/tcp.c 	inet_csk_delack_init(sk);
sk               2644 net/ipv4/tcp.c 	__sk_dst_reset(sk);
sk               2645 net/ipv4/tcp.c 	dst_release(sk->sk_rx_dst);
sk               2646 net/ipv4/tcp.c 	sk->sk_rx_dst = NULL;
sk               2686 net/ipv4/tcp.c 	if (sk->sk_frag.page) {
sk               2687 net/ipv4/tcp.c 		put_page(sk->sk_frag.page);
sk               2688 net/ipv4/tcp.c 		sk->sk_frag.page = NULL;
sk               2689 net/ipv4/tcp.c 		sk->sk_frag.offset = 0;
sk               2692 net/ipv4/tcp.c 	sk->sk_error_report(sk);
sk               2697 net/ipv4/tcp.c static inline bool tcp_can_repair_sock(const struct sock *sk)
sk               2699 net/ipv4/tcp.c 	return ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN) &&
sk               2700 net/ipv4/tcp.c 		(sk->sk_state != TCP_LISTEN);
sk               2735 net/ipv4/tcp.c static int tcp_repair_options_est(struct sock *sk,
sk               2738 net/ipv4/tcp.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               2751 net/ipv4/tcp.c 			tcp_mtup_init(sk);
sk               2802 net/ipv4/tcp.c static int do_tcp_setsockopt(struct sock *sk, int level,
sk               2805 net/ipv4/tcp.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               2806 net/ipv4/tcp.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk               2807 net/ipv4/tcp.c 	struct net *net = sock_net(sk);
sk               2825 net/ipv4/tcp.c 		lock_sock(sk);
sk               2826 net/ipv4/tcp.c 		err = tcp_set_congestion_control(sk, name, true, true,
sk               2827 net/ipv4/tcp.c 						 ns_capable(sock_net(sk)->user_ns,
sk               2829 net/ipv4/tcp.c 		release_sock(sk);
sk               2845 net/ipv4/tcp.c 		lock_sock(sk);
sk               2846 net/ipv4/tcp.c 		err = tcp_set_ulp(sk, name);
sk               2847 net/ipv4/tcp.c 		release_sock(sk);
sk               2867 net/ipv4/tcp.c 		return tcp_fastopen_reset_cipher(net, sk, key, backup_key);
sk               2880 net/ipv4/tcp.c 	lock_sock(sk);
sk               2906 net/ipv4/tcp.c 			tcp_push_pending_frames(sk);
sk               2925 net/ipv4/tcp.c 		if (!tcp_can_repair_sock(sk))
sk               2929 net/ipv4/tcp.c 			sk->sk_reuse = SK_FORCE_REUSE;
sk               2933 net/ipv4/tcp.c 			sk->sk_reuse = SK_NO_REUSE;
sk               2934 net/ipv4/tcp.c 			tcp_send_window_probe(sk);
sk               2937 net/ipv4/tcp.c 			sk->sk_reuse = SK_NO_REUSE;
sk               2953 net/ipv4/tcp.c 		if (sk->sk_state != TCP_CLOSE)
sk               2968 net/ipv4/tcp.c 		else if (sk->sk_state == TCP_ESTABLISHED)
sk               2969 net/ipv4/tcp.c 			err = tcp_repair_options_est(sk,
sk               2994 net/ipv4/tcp.c 			tcp_push_pending_frames(sk);
sk               3003 net/ipv4/tcp.c 			if (sock_flag(sk, SOCK_KEEPOPEN) &&
sk               3004 net/ipv4/tcp.c 			    !((1 << sk->sk_state) &
sk               3011 net/ipv4/tcp.c 				inet_csk_reset_keepalive_timer(sk, elapsed);
sk               3059 net/ipv4/tcp.c 			if (sk->sk_state != TCP_CLOSE) {
sk               3071 net/ipv4/tcp.c 			inet_csk_enter_pingpong_mode(sk);
sk               3073 net/ipv4/tcp.c 			inet_csk_exit_pingpong_mode(sk);
sk               3074 net/ipv4/tcp.c 			if ((1 << sk->sk_state) &
sk               3076 net/ipv4/tcp.c 			    inet_csk_ack_scheduled(sk)) {
sk               3078 net/ipv4/tcp.c 				tcp_cleanup_rbuf(sk, 1);
sk               3080 net/ipv4/tcp.c 					inet_csk_enter_pingpong_mode(sk);
sk               3088 net/ipv4/tcp.c 		if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
sk               3089 net/ipv4/tcp.c 			err = tp->af_specific->md5_parse(sk, optname, optval, optlen);
sk               3105 net/ipv4/tcp.c 		if (val >= 0 && ((1 << sk->sk_state) & (TCPF_CLOSE |
sk               3109 net/ipv4/tcp.c 			fastopen_queue_tune(sk, val);
sk               3118 net/ipv4/tcp.c 			if (sk->sk_state == TCP_CLOSE)
sk               3129 net/ipv4/tcp.c 		else if (!((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
sk               3145 net/ipv4/tcp.c 		sk->sk_write_space(sk);
sk               3163 net/ipv4/tcp.c 	release_sock(sk);
sk               3167 net/ipv4/tcp.c int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
sk               3170 net/ipv4/tcp.c 	const struct inet_connection_sock *icsk = inet_csk(sk);
sk               3173 net/ipv4/tcp.c 		return icsk->icsk_af_ops->setsockopt(sk, level, optname,
sk               3175 net/ipv4/tcp.c 	return do_tcp_setsockopt(sk, level, optname, optval, optlen);
sk               3180 net/ipv4/tcp.c int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
sk               3184 net/ipv4/tcp.c 		return inet_csk_compat_setsockopt(sk, level, optname,
sk               3186 net/ipv4/tcp.c 	return do_tcp_setsockopt(sk, level, optname, optval, optlen);
sk               3211 net/ipv4/tcp.c void tcp_get_info(struct sock *sk, struct tcp_info *info)
sk               3213 net/ipv4/tcp.c 	const struct tcp_sock *tp = tcp_sk(sk); /* iff sk_type == SOCK_STREAM */
sk               3214 net/ipv4/tcp.c 	const struct inet_connection_sock *icsk = inet_csk(sk);
sk               3221 net/ipv4/tcp.c 	if (sk->sk_type != SOCK_STREAM)
sk               3224 net/ipv4/tcp.c 	info->tcpi_state = inet_sk_state_load(sk);
sk               3227 net/ipv4/tcp.c 	rate = READ_ONCE(sk->sk_pacing_rate);
sk               3231 net/ipv4/tcp.c 	rate = READ_ONCE(sk->sk_max_pacing_rate);
sk               3243 net/ipv4/tcp.c 		info->tcpi_unacked = sk->sk_ack_backlog;
sk               3244 net/ipv4/tcp.c 		info->tcpi_sacked = sk->sk_max_ack_backlog;
sk               3248 net/ipv4/tcp.c 	slow = lock_sock_fast(sk);
sk               3324 net/ipv4/tcp.c 	unlock_sock_fast(sk, slow);
sk               3356 net/ipv4/tcp.c struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk)
sk               3358 net/ipv4/tcp.c 	const struct tcp_sock *tp = tcp_sk(sk);
sk               3380 net/ipv4/tcp.c 	rate = READ_ONCE(sk->sk_pacing_rate);
sk               3391 net/ipv4/tcp.c 	nla_put_u8(stats, TCP_NLA_RECUR_RETRANS, inet_csk(sk)->icsk_retransmits);
sk               3398 net/ipv4/tcp.c 	nla_put_u8(stats, TCP_NLA_CA_STATE, inet_csk(sk)->icsk_ca_state);
sk               3411 net/ipv4/tcp.c static int do_tcp_getsockopt(struct sock *sk, int level,
sk               3414 net/ipv4/tcp.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk               3415 net/ipv4/tcp.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               3416 net/ipv4/tcp.c 	struct net *net = sock_net(sk);
sk               3430 net/ipv4/tcp.c 		if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
sk               3471 net/ipv4/tcp.c 		tcp_get_info(sk, &info);
sk               3491 net/ipv4/tcp.c 			sz = ca_ops->get_info(sk, ~0U, &attr, &info);
sk               3501 net/ipv4/tcp.c 		val = !inet_csk_in_pingpong_mode(sk);
sk               3639 net/ipv4/tcp.c 		lock_sock(sk);
sk               3643 net/ipv4/tcp.c 					release_sock(sk);
sk               3646 net/ipv4/tcp.c 				release_sock(sk);
sk               3651 net/ipv4/tcp.c 				release_sock(sk);
sk               3655 net/ipv4/tcp.c 				release_sock(sk);
sk               3659 net/ipv4/tcp.c 			release_sock(sk);
sk               3661 net/ipv4/tcp.c 			release_sock(sk);
sk               3679 net/ipv4/tcp.c 		lock_sock(sk);
sk               3680 net/ipv4/tcp.c 		err = tcp_zerocopy_receive(sk, &zc);
sk               3681 net/ipv4/tcp.c 		release_sock(sk);
sk               3698 net/ipv4/tcp.c int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
sk               3701 net/ipv4/tcp.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk               3704 net/ipv4/tcp.c 		return icsk->icsk_af_ops->getsockopt(sk, level, optname,
sk               3706 net/ipv4/tcp.c 	return do_tcp_getsockopt(sk, level, optname, optval, optlen);
sk               3711 net/ipv4/tcp.c int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
sk               3715 net/ipv4/tcp.c 		return inet_csk_compat_getsockopt(sk, level, optname,
sk               3717 net/ipv4/tcp.c 	return do_tcp_getsockopt(sk, level, optname, optval, optlen);
sk               3857 net/ipv4/tcp.c void tcp_done(struct sock *sk)
sk               3865 net/ipv4/tcp.c 	req = rcu_dereference_protected(tcp_sk(sk)->fastopen_rsk, 1);
sk               3867 net/ipv4/tcp.c 	if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV)
sk               3868 net/ipv4/tcp.c 		TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
sk               3870 net/ipv4/tcp.c 	tcp_set_state(sk, TCP_CLOSE);
sk               3871 net/ipv4/tcp.c 	tcp_clear_xmit_timers(sk);
sk               3873 net/ipv4/tcp.c 		reqsk_fastopen_remove(sk, req, false);
sk               3875 net/ipv4/tcp.c 	sk->sk_shutdown = SHUTDOWN_MASK;
sk               3877 net/ipv4/tcp.c 	if (!sock_flag(sk, SOCK_DEAD))
sk               3878 net/ipv4/tcp.c 		sk->sk_state_change(sk);
sk               3880 net/ipv4/tcp.c 		inet_csk_destroy_sock(sk);
sk               3884 net/ipv4/tcp.c int tcp_abort(struct sock *sk, int err)
sk               3886 net/ipv4/tcp.c 	if (!sk_fullsock(sk)) {
sk               3887 net/ipv4/tcp.c 		if (sk->sk_state == TCP_NEW_SYN_RECV) {
sk               3888 net/ipv4/tcp.c 			struct request_sock *req = inet_reqsk(sk);
sk               3899 net/ipv4/tcp.c 	lock_sock(sk);
sk               3901 net/ipv4/tcp.c 	if (sk->sk_state == TCP_LISTEN) {
sk               3902 net/ipv4/tcp.c 		tcp_set_state(sk, TCP_CLOSE);
sk               3903 net/ipv4/tcp.c 		inet_csk_listen_stop(sk);
sk               3908 net/ipv4/tcp.c 	bh_lock_sock(sk);
sk               3910 net/ipv4/tcp.c 	if (!sock_flag(sk, SOCK_DEAD)) {
sk               3911 net/ipv4/tcp.c 		sk->sk_err = err;
sk               3914 net/ipv4/tcp.c 		sk->sk_error_report(sk);
sk               3915 net/ipv4/tcp.c 		if (tcp_need_reset(sk->sk_state))
sk               3916 net/ipv4/tcp.c 			tcp_send_active_reset(sk, GFP_ATOMIC);
sk               3917 net/ipv4/tcp.c 		tcp_done(sk);
sk               3920 net/ipv4/tcp.c 	bh_unlock_sock(sk);
sk               3922 net/ipv4/tcp.c 	tcp_write_queue_purge(sk);
sk               3923 net/ipv4/tcp.c 	release_sock(sk);
sk                202 net/ipv4/tcp_bbr.c static void bbr_check_probe_rtt_done(struct sock *sk);
sk                205 net/ipv4/tcp_bbr.c static bool bbr_full_bw_reached(const struct sock *sk)
sk                207 net/ipv4/tcp_bbr.c 	const struct bbr *bbr = inet_csk_ca(sk);
sk                213 net/ipv4/tcp_bbr.c static u32 bbr_max_bw(const struct sock *sk)
sk                215 net/ipv4/tcp_bbr.c 	struct bbr *bbr = inet_csk_ca(sk);
sk                221 net/ipv4/tcp_bbr.c static u32 bbr_bw(const struct sock *sk)
sk                223 net/ipv4/tcp_bbr.c 	struct bbr *bbr = inet_csk_ca(sk);
sk                225 net/ipv4/tcp_bbr.c 	return bbr->lt_use_bw ? bbr->lt_bw : bbr_max_bw(sk);
sk                231 net/ipv4/tcp_bbr.c static u16 bbr_extra_acked(const struct sock *sk)
sk                233 net/ipv4/tcp_bbr.c 	struct bbr *bbr = inet_csk_ca(sk);
sk                242 net/ipv4/tcp_bbr.c static u64 bbr_rate_bytes_per_sec(struct sock *sk, u64 rate, int gain)
sk                244 net/ipv4/tcp_bbr.c 	unsigned int mss = tcp_sk(sk)->mss_cache;
sk                254 net/ipv4/tcp_bbr.c static unsigned long bbr_bw_to_pacing_rate(struct sock *sk, u32 bw, int gain)
sk                258 net/ipv4/tcp_bbr.c 	rate = bbr_rate_bytes_per_sec(sk, rate, gain);
sk                259 net/ipv4/tcp_bbr.c 	rate = min_t(u64, rate, sk->sk_max_pacing_rate);
sk                264 net/ipv4/tcp_bbr.c static void bbr_init_pacing_rate_from_rtt(struct sock *sk)
sk                266 net/ipv4/tcp_bbr.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                267 net/ipv4/tcp_bbr.c 	struct bbr *bbr = inet_csk_ca(sk);
sk                279 net/ipv4/tcp_bbr.c 	sk->sk_pacing_rate = bbr_bw_to_pacing_rate(sk, bw, bbr_high_gain);
sk                283 net/ipv4/tcp_bbr.c static void bbr_set_pacing_rate(struct sock *sk, u32 bw, int gain)
sk                285 net/ipv4/tcp_bbr.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                286 net/ipv4/tcp_bbr.c 	struct bbr *bbr = inet_csk_ca(sk);
sk                287 net/ipv4/tcp_bbr.c 	unsigned long rate = bbr_bw_to_pacing_rate(sk, bw, gain);
sk                290 net/ipv4/tcp_bbr.c 		bbr_init_pacing_rate_from_rtt(sk);
sk                291 net/ipv4/tcp_bbr.c 	if (bbr_full_bw_reached(sk) || rate > sk->sk_pacing_rate)
sk                292 net/ipv4/tcp_bbr.c 		sk->sk_pacing_rate = rate;
sk                296 net/ipv4/tcp_bbr.c static u32 bbr_min_tso_segs(struct sock *sk)
sk                298 net/ipv4/tcp_bbr.c 	return sk->sk_pacing_rate < (bbr_min_tso_rate >> 3) ? 1 : 2;
sk                301 net/ipv4/tcp_bbr.c static u32 bbr_tso_segs_goal(struct sock *sk)
sk                303 net/ipv4/tcp_bbr.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                310 net/ipv4/tcp_bbr.c 		      sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift),
sk                312 net/ipv4/tcp_bbr.c 	segs = max_t(u32, bytes / tp->mss_cache, bbr_min_tso_segs(sk));
sk                318 net/ipv4/tcp_bbr.c static void bbr_save_cwnd(struct sock *sk)
sk                320 net/ipv4/tcp_bbr.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                321 net/ipv4/tcp_bbr.c 	struct bbr *bbr = inet_csk_ca(sk);
sk                329 net/ipv4/tcp_bbr.c static void bbr_cwnd_event(struct sock *sk, enum tcp_ca_event event)
sk                331 net/ipv4/tcp_bbr.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                332 net/ipv4/tcp_bbr.c 	struct bbr *bbr = inet_csk_ca(sk);
sk                342 net/ipv4/tcp_bbr.c 			bbr_set_pacing_rate(sk, bbr_bw(sk), BBR_UNIT);
sk                344 net/ipv4/tcp_bbr.c 			bbr_check_probe_rtt_done(sk);
sk                357 net/ipv4/tcp_bbr.c static u32 bbr_bdp(struct sock *sk, u32 bw, int gain)
sk                359 net/ipv4/tcp_bbr.c 	struct bbr *bbr = inet_csk_ca(sk);
sk                392 net/ipv4/tcp_bbr.c static u32 bbr_quantization_budget(struct sock *sk, u32 cwnd)
sk                394 net/ipv4/tcp_bbr.c 	struct bbr *bbr = inet_csk_ca(sk);
sk                397 net/ipv4/tcp_bbr.c 	cwnd += 3 * bbr_tso_segs_goal(sk);
sk                410 net/ipv4/tcp_bbr.c static u32 bbr_inflight(struct sock *sk, u32 bw, int gain)
sk                414 net/ipv4/tcp_bbr.c 	inflight = bbr_bdp(sk, bw, gain);
sk                415 net/ipv4/tcp_bbr.c 	inflight = bbr_quantization_budget(sk, inflight);
sk                434 net/ipv4/tcp_bbr.c static u32 bbr_packets_in_net_at_edt(struct sock *sk, u32 inflight_now)
sk                436 net/ipv4/tcp_bbr.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                437 net/ipv4/tcp_bbr.c 	struct bbr *bbr = inet_csk_ca(sk);
sk                444 net/ipv4/tcp_bbr.c 	interval_delivered = (u64)bbr_bw(sk) * interval_us >> BW_SCALE;
sk                447 net/ipv4/tcp_bbr.c 		inflight_at_edt += bbr_tso_segs_goal(sk);  /* include EDT skb */
sk                454 net/ipv4/tcp_bbr.c static u32 bbr_ack_aggregation_cwnd(struct sock *sk)
sk                458 net/ipv4/tcp_bbr.c 	if (bbr_extra_acked_gain && bbr_full_bw_reached(sk)) {
sk                459 net/ipv4/tcp_bbr.c 		max_aggr_cwnd = ((u64)bbr_bw(sk) * bbr_extra_acked_max_us)
sk                461 net/ipv4/tcp_bbr.c 		aggr_cwnd = (bbr_extra_acked_gain * bbr_extra_acked(sk))
sk                478 net/ipv4/tcp_bbr.c 	struct sock *sk, const struct rate_sample *rs, u32 acked, u32 *new_cwnd)
sk                480 net/ipv4/tcp_bbr.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                481 net/ipv4/tcp_bbr.c 	struct bbr *bbr = inet_csk_ca(sk);
sk                482 net/ipv4/tcp_bbr.c 	u8 prev_state = bbr->prev_ca_state, state = inet_csk(sk)->icsk_ca_state;
sk                516 net/ipv4/tcp_bbr.c static void bbr_set_cwnd(struct sock *sk, const struct rate_sample *rs,
sk                519 net/ipv4/tcp_bbr.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                520 net/ipv4/tcp_bbr.c 	struct bbr *bbr = inet_csk_ca(sk);
sk                526 net/ipv4/tcp_bbr.c 	if (bbr_set_cwnd_to_recover_or_restore(sk, rs, acked, &cwnd))
sk                529 net/ipv4/tcp_bbr.c 	target_cwnd = bbr_bdp(sk, bw, gain);
sk                534 net/ipv4/tcp_bbr.c 	target_cwnd += bbr_ack_aggregation_cwnd(sk);
sk                535 net/ipv4/tcp_bbr.c 	target_cwnd = bbr_quantization_budget(sk, target_cwnd);
sk                538 net/ipv4/tcp_bbr.c 	if (bbr_full_bw_reached(sk))  /* only cut cwnd if we filled the pipe */
sk                551 net/ipv4/tcp_bbr.c static bool bbr_is_next_cycle_phase(struct sock *sk,
sk                554 net/ipv4/tcp_bbr.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                555 net/ipv4/tcp_bbr.c 	struct bbr *bbr = inet_csk_ca(sk);
sk                567 net/ipv4/tcp_bbr.c 	inflight = bbr_packets_in_net_at_edt(sk, rs->prior_in_flight);
sk                568 net/ipv4/tcp_bbr.c 	bw = bbr_max_bw(sk);
sk                578 net/ipv4/tcp_bbr.c 			 inflight >= bbr_inflight(sk, bw, bbr->pacing_gain));
sk                585 net/ipv4/tcp_bbr.c 		inflight <= bbr_inflight(sk, bw, BBR_UNIT);
sk                588 net/ipv4/tcp_bbr.c static void bbr_advance_cycle_phase(struct sock *sk)
sk                590 net/ipv4/tcp_bbr.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                591 net/ipv4/tcp_bbr.c 	struct bbr *bbr = inet_csk_ca(sk);
sk                598 net/ipv4/tcp_bbr.c static void bbr_update_cycle_phase(struct sock *sk,
sk                601 net/ipv4/tcp_bbr.c 	struct bbr *bbr = inet_csk_ca(sk);
sk                603 net/ipv4/tcp_bbr.c 	if (bbr->mode == BBR_PROBE_BW && bbr_is_next_cycle_phase(sk, rs))
sk                604 net/ipv4/tcp_bbr.c 		bbr_advance_cycle_phase(sk);
sk                607 net/ipv4/tcp_bbr.c static void bbr_reset_startup_mode(struct sock *sk)
sk                609 net/ipv4/tcp_bbr.c 	struct bbr *bbr = inet_csk_ca(sk);
sk                614 net/ipv4/tcp_bbr.c static void bbr_reset_probe_bw_mode(struct sock *sk)
sk                616 net/ipv4/tcp_bbr.c 	struct bbr *bbr = inet_csk_ca(sk);
sk                620 net/ipv4/tcp_bbr.c 	bbr_advance_cycle_phase(sk);	/* flip to next phase of gain cycle */
sk                623 net/ipv4/tcp_bbr.c static void bbr_reset_mode(struct sock *sk)
sk                625 net/ipv4/tcp_bbr.c 	if (!bbr_full_bw_reached(sk))
sk                626 net/ipv4/tcp_bbr.c 		bbr_reset_startup_mode(sk);
sk                628 net/ipv4/tcp_bbr.c 		bbr_reset_probe_bw_mode(sk);
sk                632 net/ipv4/tcp_bbr.c static void bbr_reset_lt_bw_sampling_interval(struct sock *sk)
sk                634 net/ipv4/tcp_bbr.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                635 net/ipv4/tcp_bbr.c 	struct bbr *bbr = inet_csk_ca(sk);
sk                644 net/ipv4/tcp_bbr.c static void bbr_reset_lt_bw_sampling(struct sock *sk)
sk                646 net/ipv4/tcp_bbr.c 	struct bbr *bbr = inet_csk_ca(sk);
sk                651 net/ipv4/tcp_bbr.c 	bbr_reset_lt_bw_sampling_interval(sk);
sk                655 net/ipv4/tcp_bbr.c static void bbr_lt_bw_interval_done(struct sock *sk, u32 bw)
sk                657 net/ipv4/tcp_bbr.c 	struct bbr *bbr = inet_csk_ca(sk);
sk                664 net/ipv4/tcp_bbr.c 		    (bbr_rate_bytes_per_sec(sk, diff, BBR_UNIT) <=
sk                675 net/ipv4/tcp_bbr.c 	bbr_reset_lt_bw_sampling_interval(sk);
sk                685 net/ipv4/tcp_bbr.c static void bbr_lt_bw_sampling(struct sock *sk, const struct rate_sample *rs)
sk                687 net/ipv4/tcp_bbr.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                688 net/ipv4/tcp_bbr.c 	struct bbr *bbr = inet_csk_ca(sk);
sk                696 net/ipv4/tcp_bbr.c 			bbr_reset_lt_bw_sampling(sk);    /* stop using lt_bw */
sk                697 net/ipv4/tcp_bbr.c 			bbr_reset_probe_bw_mode(sk);  /* restart gain cycling */
sk                709 net/ipv4/tcp_bbr.c 		bbr_reset_lt_bw_sampling_interval(sk);
sk                715 net/ipv4/tcp_bbr.c 		bbr_reset_lt_bw_sampling(sk);
sk                724 net/ipv4/tcp_bbr.c 		bbr_reset_lt_bw_sampling(sk);  /* interval is too long */
sk                748 net/ipv4/tcp_bbr.c 		bbr_reset_lt_bw_sampling(sk);  /* interval too long; reset */
sk                754 net/ipv4/tcp_bbr.c 	bbr_lt_bw_interval_done(sk, bw);
sk                758 net/ipv4/tcp_bbr.c static void bbr_update_bw(struct sock *sk, const struct rate_sample *rs)
sk                760 net/ipv4/tcp_bbr.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                761 net/ipv4/tcp_bbr.c 	struct bbr *bbr = inet_csk_ca(sk);
sk                776 net/ipv4/tcp_bbr.c 	bbr_lt_bw_sampling(sk, rs);
sk                795 net/ipv4/tcp_bbr.c 	if (!rs->is_app_limited || bw >= bbr_max_bw(sk)) {
sk                814 net/ipv4/tcp_bbr.c static void bbr_update_ack_aggregation(struct sock *sk,
sk                818 net/ipv4/tcp_bbr.c 	struct bbr *bbr = inet_csk_ca(sk);
sk                819 net/ipv4/tcp_bbr.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                839 net/ipv4/tcp_bbr.c 	expected_acked = ((u64)bbr_bw(sk) * epoch_us) / BW_UNIT;
sk                870 net/ipv4/tcp_bbr.c static void bbr_check_full_bw_reached(struct sock *sk,
sk                873 net/ipv4/tcp_bbr.c 	struct bbr *bbr = inet_csk_ca(sk);
sk                876 net/ipv4/tcp_bbr.c 	if (bbr_full_bw_reached(sk) || !bbr->round_start || rs->is_app_limited)
sk                880 net/ipv4/tcp_bbr.c 	if (bbr_max_bw(sk) >= bw_thresh) {
sk                881 net/ipv4/tcp_bbr.c 		bbr->full_bw = bbr_max_bw(sk);
sk                890 net/ipv4/tcp_bbr.c static void bbr_check_drain(struct sock *sk, const struct rate_sample *rs)
sk                892 net/ipv4/tcp_bbr.c 	struct bbr *bbr = inet_csk_ca(sk);
sk                894 net/ipv4/tcp_bbr.c 	if (bbr->mode == BBR_STARTUP && bbr_full_bw_reached(sk)) {
sk                896 net/ipv4/tcp_bbr.c 		tcp_sk(sk)->snd_ssthresh =
sk                897 net/ipv4/tcp_bbr.c 				bbr_inflight(sk, bbr_max_bw(sk), BBR_UNIT);
sk                900 net/ipv4/tcp_bbr.c 	    bbr_packets_in_net_at_edt(sk, tcp_packets_in_flight(tcp_sk(sk))) <=
sk                901 net/ipv4/tcp_bbr.c 	    bbr_inflight(sk, bbr_max_bw(sk), BBR_UNIT))
sk                902 net/ipv4/tcp_bbr.c 		bbr_reset_probe_bw_mode(sk);  /* we estimate queue is drained */
sk                905 net/ipv4/tcp_bbr.c static void bbr_check_probe_rtt_done(struct sock *sk)
sk                907 net/ipv4/tcp_bbr.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                908 net/ipv4/tcp_bbr.c 	struct bbr *bbr = inet_csk_ca(sk);
sk                916 net/ipv4/tcp_bbr.c 	bbr_reset_mode(sk);
sk                938 net/ipv4/tcp_bbr.c static void bbr_update_min_rtt(struct sock *sk, const struct rate_sample *rs)
sk                940 net/ipv4/tcp_bbr.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                941 net/ipv4/tcp_bbr.c 	struct bbr *bbr = inet_csk_ca(sk);
sk                957 net/ipv4/tcp_bbr.c 		bbr_save_cwnd(sk);  /* note cwnd so we can restore it */
sk                976 net/ipv4/tcp_bbr.c 				bbr_check_probe_rtt_done(sk);
sk                984 net/ipv4/tcp_bbr.c static void bbr_update_gains(struct sock *sk)
sk                986 net/ipv4/tcp_bbr.c 	struct bbr *bbr = inet_csk_ca(sk);
sk               1013 net/ipv4/tcp_bbr.c static void bbr_update_model(struct sock *sk, const struct rate_sample *rs)
sk               1015 net/ipv4/tcp_bbr.c 	bbr_update_bw(sk, rs);
sk               1016 net/ipv4/tcp_bbr.c 	bbr_update_ack_aggregation(sk, rs);
sk               1017 net/ipv4/tcp_bbr.c 	bbr_update_cycle_phase(sk, rs);
sk               1018 net/ipv4/tcp_bbr.c 	bbr_check_full_bw_reached(sk, rs);
sk               1019 net/ipv4/tcp_bbr.c 	bbr_check_drain(sk, rs);
sk               1020 net/ipv4/tcp_bbr.c 	bbr_update_min_rtt(sk, rs);
sk               1021 net/ipv4/tcp_bbr.c 	bbr_update_gains(sk);
sk               1024 net/ipv4/tcp_bbr.c static void bbr_main(struct sock *sk, const struct rate_sample *rs)
sk               1026 net/ipv4/tcp_bbr.c 	struct bbr *bbr = inet_csk_ca(sk);
sk               1029 net/ipv4/tcp_bbr.c 	bbr_update_model(sk, rs);
sk               1031 net/ipv4/tcp_bbr.c 	bw = bbr_bw(sk);
sk               1032 net/ipv4/tcp_bbr.c 	bbr_set_pacing_rate(sk, bw, bbr->pacing_gain);
sk               1033 net/ipv4/tcp_bbr.c 	bbr_set_cwnd(sk, rs, rs->acked_sacked, bw, bbr->cwnd_gain);
sk               1036 net/ipv4/tcp_bbr.c static void bbr_init(struct sock *sk)
sk               1038 net/ipv4/tcp_bbr.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               1039 net/ipv4/tcp_bbr.c 	struct bbr *bbr = inet_csk_ca(sk);
sk               1056 net/ipv4/tcp_bbr.c 	bbr_init_pacing_rate_from_rtt(sk);
sk               1065 net/ipv4/tcp_bbr.c 	bbr_reset_lt_bw_sampling(sk);
sk               1066 net/ipv4/tcp_bbr.c 	bbr_reset_startup_mode(sk);
sk               1075 net/ipv4/tcp_bbr.c 	cmpxchg(&sk->sk_pacing_status, SK_PACING_NONE, SK_PACING_NEEDED);
sk               1078 net/ipv4/tcp_bbr.c static u32 bbr_sndbuf_expand(struct sock *sk)
sk               1087 net/ipv4/tcp_bbr.c static u32 bbr_undo_cwnd(struct sock *sk)
sk               1089 net/ipv4/tcp_bbr.c 	struct bbr *bbr = inet_csk_ca(sk);
sk               1093 net/ipv4/tcp_bbr.c 	bbr_reset_lt_bw_sampling(sk);
sk               1094 net/ipv4/tcp_bbr.c 	return tcp_sk(sk)->snd_cwnd;
sk               1098 net/ipv4/tcp_bbr.c static u32 bbr_ssthresh(struct sock *sk)
sk               1100 net/ipv4/tcp_bbr.c 	bbr_save_cwnd(sk);
sk               1101 net/ipv4/tcp_bbr.c 	return tcp_sk(sk)->snd_ssthresh;
sk               1104 net/ipv4/tcp_bbr.c static size_t bbr_get_info(struct sock *sk, u32 ext, int *attr,
sk               1109 net/ipv4/tcp_bbr.c 		struct tcp_sock *tp = tcp_sk(sk);
sk               1110 net/ipv4/tcp_bbr.c 		struct bbr *bbr = inet_csk_ca(sk);
sk               1111 net/ipv4/tcp_bbr.c 		u64 bw = bbr_bw(sk);
sk               1126 net/ipv4/tcp_bbr.c static void bbr_set_state(struct sock *sk, u8 new_state)
sk               1128 net/ipv4/tcp_bbr.c 	struct bbr *bbr = inet_csk_ca(sk);
sk               1136 net/ipv4/tcp_bbr.c 		bbr_lt_bw_sampling(sk, &rs);
sk                 70 net/ipv4/tcp_bic.c static void bictcp_init(struct sock *sk)
sk                 72 net/ipv4/tcp_bic.c 	struct bictcp *ca = inet_csk_ca(sk);
sk                 77 net/ipv4/tcp_bic.c 		tcp_sk(sk)->snd_ssthresh = initial_ssthresh;
sk                140 net/ipv4/tcp_bic.c static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
sk                142 net/ipv4/tcp_bic.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                143 net/ipv4/tcp_bic.c 	struct bictcp *ca = inet_csk_ca(sk);
sk                145 net/ipv4/tcp_bic.c 	if (!tcp_is_cwnd_limited(sk))
sk                160 net/ipv4/tcp_bic.c static u32 bictcp_recalc_ssthresh(struct sock *sk)
sk                162 net/ipv4/tcp_bic.c 	const struct tcp_sock *tp = tcp_sk(sk);
sk                163 net/ipv4/tcp_bic.c 	struct bictcp *ca = inet_csk_ca(sk);
sk                180 net/ipv4/tcp_bic.c static void bictcp_state(struct sock *sk, u8 new_state)
sk                183 net/ipv4/tcp_bic.c 		bictcp_reset(inet_csk_ca(sk));
sk                189 net/ipv4/tcp_bic.c static void bictcp_acked(struct sock *sk, const struct ack_sample *sample)
sk                191 net/ipv4/tcp_bic.c 	const struct inet_connection_sock *icsk = inet_csk(sk);
sk                194 net/ipv4/tcp_bic.c 		struct bictcp *ca = inet_csk_ca(sk);
sk                 13 net/ipv4/tcp_bpf.c static bool tcp_bpf_stream_read(const struct sock *sk)
sk                 19 net/ipv4/tcp_bpf.c 	psock = sk_psock(sk);
sk                 26 net/ipv4/tcp_bpf.c static int tcp_bpf_wait_data(struct sock *sk, struct sk_psock *psock,
sk                 35 net/ipv4/tcp_bpf.c 	add_wait_queue(sk_sleep(sk), &wait);
sk                 36 net/ipv4/tcp_bpf.c 	sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
sk                 37 net/ipv4/tcp_bpf.c 	ret = sk_wait_event(sk, &timeo,
sk                 39 net/ipv4/tcp_bpf.c 			    !skb_queue_empty(&sk->sk_receive_queue), &wait);
sk                 40 net/ipv4/tcp_bpf.c 	sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
sk                 41 net/ipv4/tcp_bpf.c 	remove_wait_queue(sk_sleep(sk), &wait);
sk                 45 net/ipv4/tcp_bpf.c int __tcp_bpf_recvmsg(struct sock *sk, struct sk_psock *psock,
sk                 82 net/ipv4/tcp_bpf.c 				sk_mem_uncharge(sk, copy);
sk                118 net/ipv4/tcp_bpf.c int tcp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
sk                125 net/ipv4/tcp_bpf.c 		return inet_recv_error(sk, msg, len, addr_len);
sk                127 net/ipv4/tcp_bpf.c 	psock = sk_psock_get(sk);
sk                129 net/ipv4/tcp_bpf.c 		return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
sk                130 net/ipv4/tcp_bpf.c 	if (!skb_queue_empty(&sk->sk_receive_queue) &&
sk                132 net/ipv4/tcp_bpf.c 		sk_psock_put(sk, psock);
sk                133 net/ipv4/tcp_bpf.c 		return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
sk                135 net/ipv4/tcp_bpf.c 	lock_sock(sk);
sk                137 net/ipv4/tcp_bpf.c 	copied = __tcp_bpf_recvmsg(sk, psock, msg, len, flags);
sk                142 net/ipv4/tcp_bpf.c 		timeo = sock_rcvtimeo(sk, nonblock);
sk                143 net/ipv4/tcp_bpf.c 		data = tcp_bpf_wait_data(sk, psock, flags, timeo, &err);
sk                147 net/ipv4/tcp_bpf.c 			release_sock(sk);
sk                148 net/ipv4/tcp_bpf.c 			sk_psock_put(sk, psock);
sk                149 net/ipv4/tcp_bpf.c 			return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
sk                159 net/ipv4/tcp_bpf.c 	release_sock(sk);
sk                160 net/ipv4/tcp_bpf.c 	sk_psock_put(sk, psock);
sk                164 net/ipv4/tcp_bpf.c static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock,
sk                177 net/ipv4/tcp_bpf.c 	lock_sock(sk);
sk                184 net/ipv4/tcp_bpf.c 		if (!sk_wmem_schedule(sk, size)) {
sk                190 net/ipv4/tcp_bpf.c 		sk_mem_charge(sk, size);
sk                207 net/ipv4/tcp_bpf.c 		sk_psock_data_ready(sk, psock);
sk                209 net/ipv4/tcp_bpf.c 		sk_msg_free(sk, tmp);
sk                213 net/ipv4/tcp_bpf.c 	release_sock(sk);
sk                217 net/ipv4/tcp_bpf.c static int tcp_bpf_push(struct sock *sk, struct sk_msg *msg, u32 apply_bytes,
sk                235 net/ipv4/tcp_bpf.c 		tcp_rate_check_app_limited(sk);
sk                237 net/ipv4/tcp_bpf.c 		has_tx_ulp = tls_sw_has_ctx_tx(sk);
sk                240 net/ipv4/tcp_bpf.c 			ret = kernel_sendpage_locked(sk,
sk                243 net/ipv4/tcp_bpf.c 			ret = do_tcp_sendpages(sk, page, off, size, flags);
sk                254 net/ipv4/tcp_bpf.c 			sk_mem_uncharge(sk, ret);
sk                274 net/ipv4/tcp_bpf.c static int tcp_bpf_push_locked(struct sock *sk, struct sk_msg *msg,
sk                279 net/ipv4/tcp_bpf.c 	lock_sock(sk);
sk                280 net/ipv4/tcp_bpf.c 	ret = tcp_bpf_push(sk, msg, apply_bytes, flags, uncharge);
sk                281 net/ipv4/tcp_bpf.c 	release_sock(sk);
sk                285 net/ipv4/tcp_bpf.c int tcp_bpf_sendmsg_redir(struct sock *sk, struct sk_msg *msg,
sk                289 net/ipv4/tcp_bpf.c 	struct sk_psock *psock = sk_psock_get(sk);
sk                293 net/ipv4/tcp_bpf.c 		sk_msg_free(sk, msg);
sk                296 net/ipv4/tcp_bpf.c 	ret = ingress ? bpf_tcp_ingress(sk, psock, msg, bytes, flags) :
sk                297 net/ipv4/tcp_bpf.c 			tcp_bpf_push_locked(sk, msg, bytes, flags, false);
sk                298 net/ipv4/tcp_bpf.c 	sk_psock_put(sk, psock);
sk                303 net/ipv4/tcp_bpf.c static int tcp_bpf_send_verdict(struct sock *sk, struct sk_psock *psock,
sk                319 net/ipv4/tcp_bpf.c 		psock->eval = sk_psock_msg_verdict(sk, psock, msg);
sk                342 net/ipv4/tcp_bpf.c 		ret = tcp_bpf_push(sk, msg, tosend, flags, true);
sk                344 net/ipv4/tcp_bpf.c 			*copied -= sk_msg_free(sk, msg);
sk                356 net/ipv4/tcp_bpf.c 		sk_msg_return(sk, msg, tosend);
sk                357 net/ipv4/tcp_bpf.c 		release_sock(sk);
sk                359 net/ipv4/tcp_bpf.c 		lock_sock(sk);
sk                361 net/ipv4/tcp_bpf.c 			int free = sk_msg_free_nocharge(sk, msg);
sk                367 net/ipv4/tcp_bpf.c 			sk_msg_free(sk, msg);
sk                375 net/ipv4/tcp_bpf.c 		sk_msg_free_partial(sk, msg, tosend);
sk                397 net/ipv4/tcp_bpf.c static int tcp_bpf_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
sk                409 net/ipv4/tcp_bpf.c 	psock = sk_psock_get(sk);
sk                411 net/ipv4/tcp_bpf.c 		return tcp_sendmsg(sk, msg, size);
sk                413 net/ipv4/tcp_bpf.c 	lock_sock(sk);
sk                414 net/ipv4/tcp_bpf.c 	timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
sk                419 net/ipv4/tcp_bpf.c 		if (sk->sk_err) {
sk                420 net/ipv4/tcp_bpf.c 			err = -sk->sk_err;
sk                425 net/ipv4/tcp_bpf.c 		if (!sk_stream_memory_free(sk))
sk                435 net/ipv4/tcp_bpf.c 		err = sk_msg_alloc(sk, msg_tx, msg_tx->sg.size + copy, msg_tx->sg.end - 1);
sk                443 net/ipv4/tcp_bpf.c 		err = sk_msg_memcopy_from_iter(sk, &msg->msg_iter, msg_tx,
sk                446 net/ipv4/tcp_bpf.c 			sk_msg_trim(sk, msg_tx, osize);
sk                463 net/ipv4/tcp_bpf.c 		err = tcp_bpf_send_verdict(sk, psock, msg_tx, &copied, flags);
sk                468 net/ipv4/tcp_bpf.c 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
sk                470 net/ipv4/tcp_bpf.c 		err = sk_stream_wait_memory(sk, &timeo);
sk                473 net/ipv4/tcp_bpf.c 				sk_msg_free(sk, msg_tx);
sk                479 net/ipv4/tcp_bpf.c 		err = sk_stream_error(sk, msg->msg_flags, err);
sk                480 net/ipv4/tcp_bpf.c 	release_sock(sk);
sk                481 net/ipv4/tcp_bpf.c 	sk_psock_put(sk, psock);
sk                485 net/ipv4/tcp_bpf.c static int tcp_bpf_sendpage(struct sock *sk, struct page *page, int offset,
sk                493 net/ipv4/tcp_bpf.c 	psock = sk_psock_get(sk);
sk                495 net/ipv4/tcp_bpf.c 		return tcp_sendpage(sk, page, offset, size, flags);
sk                497 net/ipv4/tcp_bpf.c 	lock_sock(sk);
sk                510 net/ipv4/tcp_bpf.c 	sk_mem_charge(sk, size);
sk                526 net/ipv4/tcp_bpf.c 	err = tcp_bpf_send_verdict(sk, psock, msg, &copied, flags);
sk                528 net/ipv4/tcp_bpf.c 	release_sock(sk);
sk                529 net/ipv4/tcp_bpf.c 	sk_psock_put(sk, psock);
sk                533 net/ipv4/tcp_bpf.c static void tcp_bpf_remove(struct sock *sk, struct sk_psock *psock)
sk                538 net/ipv4/tcp_bpf.c 		sk_psock_unlink(sk, link);
sk                543 net/ipv4/tcp_bpf.c static void tcp_bpf_unhash(struct sock *sk)
sk                545 net/ipv4/tcp_bpf.c 	void (*saved_unhash)(struct sock *sk);
sk                549 net/ipv4/tcp_bpf.c 	psock = sk_psock(sk);
sk                552 net/ipv4/tcp_bpf.c 		if (sk->sk_prot->unhash)
sk                553 net/ipv4/tcp_bpf.c 			sk->sk_prot->unhash(sk);
sk                558 net/ipv4/tcp_bpf.c 	tcp_bpf_remove(sk, psock);
sk                560 net/ipv4/tcp_bpf.c 	saved_unhash(sk);
sk                563 net/ipv4/tcp_bpf.c static void tcp_bpf_close(struct sock *sk, long timeout)
sk                565 net/ipv4/tcp_bpf.c 	void (*saved_close)(struct sock *sk, long timeout);
sk                568 net/ipv4/tcp_bpf.c 	lock_sock(sk);
sk                570 net/ipv4/tcp_bpf.c 	psock = sk_psock(sk);
sk                573 net/ipv4/tcp_bpf.c 		release_sock(sk);
sk                574 net/ipv4/tcp_bpf.c 		return sk->sk_prot->close(sk, timeout);
sk                578 net/ipv4/tcp_bpf.c 	tcp_bpf_remove(sk, psock);
sk                580 net/ipv4/tcp_bpf.c 	release_sock(sk);
sk                581 net/ipv4/tcp_bpf.c 	saved_close(sk, timeout);
sk                614 net/ipv4/tcp_bpf.c static void tcp_bpf_check_v6_needs_rebuild(struct sock *sk, struct proto *ops)
sk                616 net/ipv4/tcp_bpf.c 	if (sk->sk_family == AF_INET6 &&
sk                634 net/ipv4/tcp_bpf.c static void tcp_bpf_update_sk_prot(struct sock *sk, struct sk_psock *psock)
sk                636 net/ipv4/tcp_bpf.c 	int family = sk->sk_family == AF_INET6 ? TCP_BPF_IPV6 : TCP_BPF_IPV4;
sk                639 net/ipv4/tcp_bpf.c 	sk_psock_update_proto(sk, psock, &tcp_bpf_prots[family][config]);
sk                642 net/ipv4/tcp_bpf.c static void tcp_bpf_reinit_sk_prot(struct sock *sk, struct sk_psock *psock)
sk                644 net/ipv4/tcp_bpf.c 	int family = sk->sk_family == AF_INET6 ? TCP_BPF_IPV6 : TCP_BPF_IPV4;
sk                651 net/ipv4/tcp_bpf.c 	sk->sk_prot = &tcp_bpf_prots[family][config];
sk                665 net/ipv4/tcp_bpf.c void tcp_bpf_reinit(struct sock *sk)
sk                669 net/ipv4/tcp_bpf.c 	sock_owned_by_me(sk);
sk                672 net/ipv4/tcp_bpf.c 	psock = sk_psock(sk);
sk                673 net/ipv4/tcp_bpf.c 	tcp_bpf_reinit_sk_prot(sk, psock);
sk                677 net/ipv4/tcp_bpf.c int tcp_bpf_init(struct sock *sk)
sk                679 net/ipv4/tcp_bpf.c 	struct proto *ops = READ_ONCE(sk->sk_prot);
sk                682 net/ipv4/tcp_bpf.c 	sock_owned_by_me(sk);
sk                685 net/ipv4/tcp_bpf.c 	psock = sk_psock(sk);
sk                691 net/ipv4/tcp_bpf.c 	tcp_bpf_check_v6_needs_rebuild(sk, ops);
sk                692 net/ipv4/tcp_bpf.c 	tcp_bpf_update_sk_prot(sk, psock);
sk                140 net/ipv4/tcp_cdg.c static void tcp_cdg_hystart_update(struct sock *sk)
sk                142 net/ipv4/tcp_cdg.c 	struct cdg *ca = inet_csk_ca(sk);
sk                143 net/ipv4/tcp_cdg.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                152 net/ipv4/tcp_cdg.c 		if (ca->last_ack == 0 || !tcp_is_cwnd_limited(sk)) {
sk                160 net/ipv4/tcp_cdg.c 				NET_INC_STATS(sock_net(sk),
sk                162 net/ipv4/tcp_cdg.c 				NET_ADD_STATS(sock_net(sk),
sk                179 net/ipv4/tcp_cdg.c 				NET_INC_STATS(sock_net(sk),
sk                181 net/ipv4/tcp_cdg.c 				NET_ADD_STATS(sock_net(sk),
sk                241 net/ipv4/tcp_cdg.c static bool tcp_cdg_backoff(struct sock *sk, u32 grad)
sk                243 net/ipv4/tcp_cdg.c 	struct cdg *ca = inet_csk_ca(sk);
sk                244 net/ipv4/tcp_cdg.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                257 net/ipv4/tcp_cdg.c 	tcp_enter_cwr(sk);
sk                262 net/ipv4/tcp_cdg.c static void tcp_cdg_cong_avoid(struct sock *sk, u32 ack, u32 acked)
sk                264 net/ipv4/tcp_cdg.c 	struct cdg *ca = inet_csk_ca(sk);
sk                265 net/ipv4/tcp_cdg.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                270 net/ipv4/tcp_cdg.c 		tcp_cdg_hystart_update(sk);
sk                283 net/ipv4/tcp_cdg.c 		if (grad > 0 && tcp_cdg_backoff(sk, grad))
sk                287 net/ipv4/tcp_cdg.c 	if (!tcp_is_cwnd_limited(sk)) {
sk                293 net/ipv4/tcp_cdg.c 	tcp_reno_cong_avoid(sk, ack, acked);
sk                299 net/ipv4/tcp_cdg.c static void tcp_cdg_acked(struct sock *sk, const struct ack_sample *sample)
sk                301 net/ipv4/tcp_cdg.c 	struct cdg *ca = inet_csk_ca(sk);
sk                302 net/ipv4/tcp_cdg.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                328 net/ipv4/tcp_cdg.c static u32 tcp_cdg_ssthresh(struct sock *sk)
sk                330 net/ipv4/tcp_cdg.c 	struct cdg *ca = inet_csk_ca(sk);
sk                331 net/ipv4/tcp_cdg.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                345 net/ipv4/tcp_cdg.c static void tcp_cdg_cwnd_event(struct sock *sk, const enum tcp_ca_event ev)
sk                347 net/ipv4/tcp_cdg.c 	struct cdg *ca = inet_csk_ca(sk);
sk                348 net/ipv4/tcp_cdg.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                373 net/ipv4/tcp_cdg.c static void tcp_cdg_init(struct sock *sk)
sk                375 net/ipv4/tcp_cdg.c 	struct cdg *ca = inet_csk_ca(sk);
sk                376 net/ipv4/tcp_cdg.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                386 net/ipv4/tcp_cdg.c static void tcp_cdg_release(struct sock *sk)
sk                388 net/ipv4/tcp_cdg.c 	struct cdg *ca = inet_csk_ca(sk);
sk                157 net/ipv4/tcp_cong.c void tcp_assign_congestion_control(struct sock *sk)
sk                159 net/ipv4/tcp_cong.c 	struct net *net = sock_net(sk);
sk                160 net/ipv4/tcp_cong.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk                172 net/ipv4/tcp_cong.c 		INET_ECN_xmit(sk);
sk                174 net/ipv4/tcp_cong.c 		INET_ECN_dontxmit(sk);
sk                177 net/ipv4/tcp_cong.c void tcp_init_congestion_control(struct sock *sk)
sk                179 net/ipv4/tcp_cong.c 	const struct inet_connection_sock *icsk = inet_csk(sk);
sk                181 net/ipv4/tcp_cong.c 	tcp_sk(sk)->prior_ssthresh = 0;
sk                183 net/ipv4/tcp_cong.c 		icsk->icsk_ca_ops->init(sk);
sk                184 net/ipv4/tcp_cong.c 	if (tcp_ca_needs_ecn(sk))
sk                185 net/ipv4/tcp_cong.c 		INET_ECN_xmit(sk);
sk                187 net/ipv4/tcp_cong.c 		INET_ECN_dontxmit(sk);
sk                190 net/ipv4/tcp_cong.c static void tcp_reinit_congestion_control(struct sock *sk,
sk                193 net/ipv4/tcp_cong.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk                195 net/ipv4/tcp_cong.c 	tcp_cleanup_congestion_control(sk);
sk                200 net/ipv4/tcp_cong.c 	if (sk->sk_state != TCP_CLOSE)
sk                201 net/ipv4/tcp_cong.c 		tcp_init_congestion_control(sk);
sk                205 net/ipv4/tcp_cong.c void tcp_cleanup_congestion_control(struct sock *sk)
sk                207 net/ipv4/tcp_cong.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk                210 net/ipv4/tcp_cong.c 		icsk->icsk_ca_ops->release(sk);
sk                336 net/ipv4/tcp_cong.c int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
sk                339 net/ipv4/tcp_cong.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk                350 net/ipv4/tcp_cong.c 		ca = tcp_ca_find_autoload(sock_net(sk), name);
sk                365 net/ipv4/tcp_cong.c 				tcp_reinit_congestion_control(sk, ca);
sk                378 net/ipv4/tcp_cong.c 		tcp_reinit_congestion_control(sk, ca);
sk                434 net/ipv4/tcp_cong.c void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
sk                436 net/ipv4/tcp_cong.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                438 net/ipv4/tcp_cong.c 	if (!tcp_is_cwnd_limited(sk))
sk                453 net/ipv4/tcp_cong.c u32 tcp_reno_ssthresh(struct sock *sk)
sk                455 net/ipv4/tcp_cong.c 	const struct tcp_sock *tp = tcp_sk(sk);
sk                461 net/ipv4/tcp_cong.c u32 tcp_reno_undo_cwnd(struct sock *sk)
sk                463 net/ipv4/tcp_cong.c 	const struct tcp_sock *tp = tcp_sk(sk);
sk                129 net/ipv4/tcp_cubic.c static inline void bictcp_hystart_reset(struct sock *sk)
sk                131 net/ipv4/tcp_cubic.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                132 net/ipv4/tcp_cubic.c 	struct bictcp *ca = inet_csk_ca(sk);
sk                140 net/ipv4/tcp_cubic.c static void bictcp_init(struct sock *sk)
sk                142 net/ipv4/tcp_cubic.c 	struct bictcp *ca = inet_csk_ca(sk);
sk                147 net/ipv4/tcp_cubic.c 		bictcp_hystart_reset(sk);
sk                150 net/ipv4/tcp_cubic.c 		tcp_sk(sk)->snd_ssthresh = initial_ssthresh;
sk                153 net/ipv4/tcp_cubic.c static void bictcp_cwnd_event(struct sock *sk, enum tcp_ca_event event)
sk                156 net/ipv4/tcp_cubic.c 		struct bictcp *ca = inet_csk_ca(sk);
sk                160 net/ipv4/tcp_cubic.c 		delta = now - tcp_sk(sk)->lsndtime;
sk                335 net/ipv4/tcp_cubic.c static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
sk                337 net/ipv4/tcp_cubic.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                338 net/ipv4/tcp_cubic.c 	struct bictcp *ca = inet_csk_ca(sk);
sk                340 net/ipv4/tcp_cubic.c 	if (!tcp_is_cwnd_limited(sk))
sk                345 net/ipv4/tcp_cubic.c 			bictcp_hystart_reset(sk);
sk                354 net/ipv4/tcp_cubic.c static u32 bictcp_recalc_ssthresh(struct sock *sk)
sk                356 net/ipv4/tcp_cubic.c 	const struct tcp_sock *tp = tcp_sk(sk);
sk                357 net/ipv4/tcp_cubic.c 	struct bictcp *ca = inet_csk_ca(sk);
sk                371 net/ipv4/tcp_cubic.c static void bictcp_state(struct sock *sk, u8 new_state)
sk                374 net/ipv4/tcp_cubic.c 		bictcp_reset(inet_csk_ca(sk));
sk                375 net/ipv4/tcp_cubic.c 		bictcp_hystart_reset(sk);
sk                379 net/ipv4/tcp_cubic.c static void hystart_update(struct sock *sk, u32 delay)
sk                381 net/ipv4/tcp_cubic.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                382 net/ipv4/tcp_cubic.c 	struct bictcp *ca = inet_csk_ca(sk);
sk                395 net/ipv4/tcp_cubic.c 				NET_INC_STATS(sock_net(sk),
sk                397 net/ipv4/tcp_cubic.c 				NET_ADD_STATS(sock_net(sk),
sk                416 net/ipv4/tcp_cubic.c 				NET_INC_STATS(sock_net(sk),
sk                418 net/ipv4/tcp_cubic.c 				NET_ADD_STATS(sock_net(sk),
sk                430 net/ipv4/tcp_cubic.c static void bictcp_acked(struct sock *sk, const struct ack_sample *sample)
sk                432 net/ipv4/tcp_cubic.c 	const struct tcp_sock *tp = tcp_sk(sk);
sk                433 net/ipv4/tcp_cubic.c 	struct bictcp *ca = inet_csk_ca(sk);
sk                455 net/ipv4/tcp_cubic.c 		hystart_update(sk, delay);
sk                 75 net/ipv4/tcp_dctcp.c static void dctcp_init(struct sock *sk)
sk                 77 net/ipv4/tcp_dctcp.c 	const struct tcp_sock *tp = tcp_sk(sk);
sk                 80 net/ipv4/tcp_dctcp.c 	    (sk->sk_state == TCP_LISTEN ||
sk                 81 net/ipv4/tcp_dctcp.c 	     sk->sk_state == TCP_CLOSE)) {
sk                 82 net/ipv4/tcp_dctcp.c 		struct dctcp *ca = inet_csk_ca(sk);
sk                 98 net/ipv4/tcp_dctcp.c 	inet_csk(sk)->icsk_ca_ops = &dctcp_reno;
sk                 99 net/ipv4/tcp_dctcp.c 	INET_ECN_dontxmit(sk);
sk                102 net/ipv4/tcp_dctcp.c static u32 dctcp_ssthresh(struct sock *sk)
sk                104 net/ipv4/tcp_dctcp.c 	struct dctcp *ca = inet_csk_ca(sk);
sk                105 net/ipv4/tcp_dctcp.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                111 net/ipv4/tcp_dctcp.c static void dctcp_update_alpha(struct sock *sk, u32 flags)
sk                113 net/ipv4/tcp_dctcp.c 	const struct tcp_sock *tp = tcp_sk(sk);
sk                114 net/ipv4/tcp_dctcp.c 	struct dctcp *ca = inet_csk_ca(sk);
sk                144 net/ipv4/tcp_dctcp.c static void dctcp_react_to_loss(struct sock *sk)
sk                146 net/ipv4/tcp_dctcp.c 	struct dctcp *ca = inet_csk_ca(sk);
sk                147 net/ipv4/tcp_dctcp.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                153 net/ipv4/tcp_dctcp.c static void dctcp_state(struct sock *sk, u8 new_state)
sk                156 net/ipv4/tcp_dctcp.c 	    new_state != inet_csk(sk)->icsk_ca_state)
sk                157 net/ipv4/tcp_dctcp.c 		dctcp_react_to_loss(sk);
sk                163 net/ipv4/tcp_dctcp.c static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
sk                165 net/ipv4/tcp_dctcp.c 	struct dctcp *ca = inet_csk_ca(sk);
sk                170 net/ipv4/tcp_dctcp.c 		dctcp_ece_ack_update(sk, ev, &ca->prior_rcv_nxt, &ca->ce_state);
sk                173 net/ipv4/tcp_dctcp.c 		dctcp_react_to_loss(sk);
sk                181 net/ipv4/tcp_dctcp.c static size_t dctcp_get_info(struct sock *sk, u32 ext, int *attr,
sk                184 net/ipv4/tcp_dctcp.c 	const struct dctcp *ca = inet_csk_ca(sk);
sk                185 net/ipv4/tcp_dctcp.c 	const struct tcp_sock *tp = tcp_sk(sk);
sk                193 net/ipv4/tcp_dctcp.c 		if (inet_csk(sk)->icsk_ca_ops != &dctcp_reno) {
sk                209 net/ipv4/tcp_dctcp.c static u32 dctcp_cwnd_undo(struct sock *sk)
sk                211 net/ipv4/tcp_dctcp.c 	const struct dctcp *ca = inet_csk_ca(sk);
sk                213 net/ipv4/tcp_dctcp.c 	return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd);
sk                  4 net/ipv4/tcp_dctcp.h static inline void dctcp_ece_ack_cwr(struct sock *sk, u32 ce_state)
sk                  6 net/ipv4/tcp_dctcp.h 	struct tcp_sock *tp = tcp_sk(sk);
sk                 19 net/ipv4/tcp_dctcp.h static inline void dctcp_ece_ack_update(struct sock *sk, enum tcp_ca_event evt,
sk                 29 net/ipv4/tcp_dctcp.h 		if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) {
sk                 30 net/ipv4/tcp_dctcp.h 			dctcp_ece_ack_cwr(sk, *ce_state);
sk                 31 net/ipv4/tcp_dctcp.h 			__tcp_send_ack(sk, *prior_rcv_nxt);
sk                 33 net/ipv4/tcp_dctcp.h 		inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW;
sk                 35 net/ipv4/tcp_dctcp.h 	*prior_rcv_nxt = tcp_sk(sk)->rcv_nxt;
sk                 37 net/ipv4/tcp_dctcp.h 	dctcp_ece_ack_cwr(sk, new_ce_state);
sk                 18 net/ipv4/tcp_diag.c static void tcp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
sk                 23 net/ipv4/tcp_diag.c 	if (inet_sk_state_load(sk) == TCP_LISTEN) {
sk                 24 net/ipv4/tcp_diag.c 		r->idiag_rqueue = sk->sk_ack_backlog;
sk                 25 net/ipv4/tcp_diag.c 		r->idiag_wqueue = sk->sk_max_ack_backlog;
sk                 26 net/ipv4/tcp_diag.c 	} else if (sk->sk_type == SOCK_STREAM) {
sk                 27 net/ipv4/tcp_diag.c 		const struct tcp_sock *tp = tcp_sk(sk);
sk                 34 net/ipv4/tcp_diag.c 		tcp_get_info(sk, info);
sk                 85 net/ipv4/tcp_diag.c static int tcp_diag_put_ulp(struct sk_buff *skb, struct sock *sk,
sk                100 net/ipv4/tcp_diag.c 		err = ulp_ops->get_info(sk, skb);
sk                112 net/ipv4/tcp_diag.c static int tcp_diag_get_aux(struct sock *sk, bool net_admin,
sk                115 net/ipv4/tcp_diag.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk                123 net/ipv4/tcp_diag.c 		md5sig = rcu_dereference(tcp_sk(sk)->md5sig_info);
sk                137 net/ipv4/tcp_diag.c 			err = tcp_diag_put_ulp(skb, sk, ulp_ops);
sk                144 net/ipv4/tcp_diag.c static size_t tcp_diag_get_aux_size(struct sock *sk, bool net_admin)
sk                146 net/ipv4/tcp_diag.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk                150 net/ipv4/tcp_diag.c 	if (net_admin && sk_fullsock(sk)) {
sk                156 net/ipv4/tcp_diag.c 		md5sig = rcu_dereference(tcp_sk(sk)->md5sig_info);
sk                167 net/ipv4/tcp_diag.c 	if (net_admin && sk_fullsock(sk)) {
sk                175 net/ipv4/tcp_diag.c 				size += ulp_ops->get_info_size(sk);
sk                197 net/ipv4/tcp_diag.c 	struct net *net = sock_net(in_skb->sk);
sk                198 net/ipv4/tcp_diag.c 	struct sock *sk = inet_diag_find_one_icsk(net, &tcp_hashinfo, req);
sk                201 net/ipv4/tcp_diag.c 	if (IS_ERR(sk))
sk                202 net/ipv4/tcp_diag.c 		return PTR_ERR(sk);
sk                204 net/ipv4/tcp_diag.c 	err = sock_diag_destroy(sk, ECONNABORTED);
sk                206 net/ipv4/tcp_diag.c 	sock_gen_put(sk);
sk                 44 net/ipv4/tcp_fastopen.c void tcp_fastopen_destroy_cipher(struct sock *sk)
sk                 49 net/ipv4/tcp_fastopen.c 			inet_csk(sk)->icsk_accept_queue.fastopenq.ctx, 1);
sk                 69 net/ipv4/tcp_fastopen.c int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk,
sk                 93 net/ipv4/tcp_fastopen.c 	if (sk) {
sk                 94 net/ipv4/tcp_fastopen.c 		q = &inet_csk(sk)->icsk_accept_queue.fastopenq;
sk                146 net/ipv4/tcp_fastopen.c static void tcp_fastopen_cookie_gen(struct sock *sk,
sk                154 net/ipv4/tcp_fastopen.c 	ctx = tcp_fastopen_get_ctx(sk);
sk                163 net/ipv4/tcp_fastopen.c void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb)
sk                165 net/ipv4/tcp_fastopen.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                184 net/ipv4/tcp_fastopen.c 	sk_forced_mem_schedule(sk, skb->truesize);
sk                185 net/ipv4/tcp_fastopen.c 	skb_set_owner_r(skb, sk);
sk                191 net/ipv4/tcp_fastopen.c 	__skb_queue_tail(&sk->sk_receive_queue, skb);
sk                200 net/ipv4/tcp_fastopen.c 		tcp_fin(sk);
sk                204 net/ipv4/tcp_fastopen.c static int tcp_fastopen_cookie_gen_check(struct sock *sk,
sk                216 net/ipv4/tcp_fastopen.c 	ctx = tcp_fastopen_get_ctx(sk);
sk                232 net/ipv4/tcp_fastopen.c static struct sock *tcp_fastopen_create_child(struct sock *sk,
sk                237 net/ipv4/tcp_fastopen.c 	struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
sk                241 net/ipv4/tcp_fastopen.c 	child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
sk                289 net/ipv4/tcp_fastopen.c static bool tcp_fastopen_queue_check(struct sock *sk)
sk                303 net/ipv4/tcp_fastopen.c 	fastopenq = &inet_csk(sk)->icsk_accept_queue.fastopenq;
sk                312 net/ipv4/tcp_fastopen.c 			__NET_INC_STATS(sock_net(sk),
sk                325 net/ipv4/tcp_fastopen.c static bool tcp_fastopen_no_cookie(const struct sock *sk,
sk                329 net/ipv4/tcp_fastopen.c 	return (sock_net(sk)->ipv4.sysctl_tcp_fastopen & flag) ||
sk                330 net/ipv4/tcp_fastopen.c 	       tcp_sk(sk)->fastopen_no_cookie ||
sk                338 net/ipv4/tcp_fastopen.c struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
sk                344 net/ipv4/tcp_fastopen.c 	int tcp_fastopen = sock_net(sk)->ipv4.sysctl_tcp_fastopen;
sk                350 net/ipv4/tcp_fastopen.c 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENCOOKIEREQD);
sk                354 net/ipv4/tcp_fastopen.c 	      tcp_fastopen_queue_check(sk))) {
sk                360 net/ipv4/tcp_fastopen.c 	    tcp_fastopen_no_cookie(sk, dst, TFO_SERVER_COOKIE_NOT_REQD))
sk                365 net/ipv4/tcp_fastopen.c 		tcp_fastopen_cookie_gen(sk, req, skb, &valid_foc);
sk                367 net/ipv4/tcp_fastopen.c 		ret = tcp_fastopen_cookie_gen_check(sk, req, skb, foc,
sk                370 net/ipv4/tcp_fastopen.c 			NET_INC_STATS(sock_net(sk),
sk                383 net/ipv4/tcp_fastopen.c 			child = tcp_fastopen_create_child(sk, skb, req);
sk                388 net/ipv4/tcp_fastopen.c 					NET_INC_STATS(sock_net(sk),
sk                393 net/ipv4/tcp_fastopen.c 				NET_INC_STATS(sock_net(sk),
sk                397 net/ipv4/tcp_fastopen.c 			NET_INC_STATS(sock_net(sk),
sk                406 net/ipv4/tcp_fastopen.c bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
sk                411 net/ipv4/tcp_fastopen.c 	tcp_fastopen_cache_get(sk, mss, cookie);
sk                414 net/ipv4/tcp_fastopen.c 	if (tcp_fastopen_active_should_disable(sk)) {
sk                419 net/ipv4/tcp_fastopen.c 	dst = __sk_dst_get(sk);
sk                421 net/ipv4/tcp_fastopen.c 	if (tcp_fastopen_no_cookie(sk, dst, TFO_CLIENT_NO_COOKIE)) {
sk                435 net/ipv4/tcp_fastopen.c bool tcp_fastopen_defer_connect(struct sock *sk, int *err)
sk                438 net/ipv4/tcp_fastopen.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                442 net/ipv4/tcp_fastopen.c 		if (tcp_fastopen_cookie_check(sk, &mss, &cookie)) {
sk                443 net/ipv4/tcp_fastopen.c 			inet_sk(sk)->defer_connect = 1;
sk                451 net/ipv4/tcp_fastopen.c 					   sk->sk_allocation);
sk                480 net/ipv4/tcp_fastopen.c void tcp_fastopen_active_disable(struct sock *sk)
sk                482 net/ipv4/tcp_fastopen.c 	struct net *net = sock_net(sk);
sk                493 net/ipv4/tcp_fastopen.c bool tcp_fastopen_active_should_disable(struct sock *sk)
sk                495 net/ipv4/tcp_fastopen.c 	unsigned int tfo_bh_timeout = sock_net(sk)->ipv4.sysctl_tcp_fastopen_blackhole_timeout;
sk                496 net/ipv4/tcp_fastopen.c 	int tfo_da_times = atomic_read(&sock_net(sk)->ipv4.tfo_active_disable_times);
sk                506 net/ipv4/tcp_fastopen.c 	if (time_before(jiffies, sock_net(sk)->ipv4.tfo_active_disable_stamp + timeout))
sk                512 net/ipv4/tcp_fastopen.c 	tcp_sk(sk)->syn_fastopen_ch = 1;
sk                522 net/ipv4/tcp_fastopen.c void tcp_fastopen_active_disable_ofo_check(struct sock *sk)
sk                524 net/ipv4/tcp_fastopen.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                535 net/ipv4/tcp_fastopen.c 				tcp_fastopen_active_disable(sk);
sk                540 net/ipv4/tcp_fastopen.c 		   atomic_read(&sock_net(sk)->ipv4.tfo_active_disable_times)) {
sk                541 net/ipv4/tcp_fastopen.c 		dst = sk_dst_get(sk);
sk                543 net/ipv4/tcp_fastopen.c 			atomic_set(&sock_net(sk)->ipv4.tfo_active_disable_times, 0);
sk                548 net/ipv4/tcp_fastopen.c void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired)
sk                550 net/ipv4/tcp_fastopen.c 	u32 timeouts = inet_csk(sk)->icsk_retransmits;
sk                551 net/ipv4/tcp_fastopen.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                560 net/ipv4/tcp_fastopen.c 		tcp_fastopen_active_disable(sk);
sk                561 net/ipv4/tcp_fastopen.c 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVEFAIL);
sk                100 net/ipv4/tcp_highspeed.c static void hstcp_init(struct sock *sk)
sk                102 net/ipv4/tcp_highspeed.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                103 net/ipv4/tcp_highspeed.c 	struct hstcp *ca = inet_csk_ca(sk);
sk                112 net/ipv4/tcp_highspeed.c static void hstcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
sk                114 net/ipv4/tcp_highspeed.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                115 net/ipv4/tcp_highspeed.c 	struct hstcp *ca = inet_csk_ca(sk);
sk                117 net/ipv4/tcp_highspeed.c 	if (!tcp_is_cwnd_limited(sk))
sk                151 net/ipv4/tcp_highspeed.c static u32 hstcp_ssthresh(struct sock *sk)
sk                153 net/ipv4/tcp_highspeed.c 	const struct tcp_sock *tp = tcp_sk(sk);
sk                154 net/ipv4/tcp_highspeed.c 	struct hstcp *ca = inet_csk_ca(sk);
sk                 68 net/ipv4/tcp_htcp.c static u32 htcp_cwnd_undo(struct sock *sk)
sk                 70 net/ipv4/tcp_htcp.c 	struct htcp *ca = inet_csk_ca(sk);
sk                 79 net/ipv4/tcp_htcp.c 	return tcp_reno_undo_cwnd(sk);
sk                 82 net/ipv4/tcp_htcp.c static inline void measure_rtt(struct sock *sk, u32 srtt)
sk                 84 net/ipv4/tcp_htcp.c 	const struct inet_connection_sock *icsk = inet_csk(sk);
sk                 85 net/ipv4/tcp_htcp.c 	struct htcp *ca = inet_csk_ca(sk);
sk                101 net/ipv4/tcp_htcp.c static void measure_achieved_throughput(struct sock *sk,
sk                104 net/ipv4/tcp_htcp.c 	const struct inet_connection_sock *icsk = inet_csk(sk);
sk                105 net/ipv4/tcp_htcp.c 	const struct tcp_sock *tp = tcp_sk(sk);
sk                106 net/ipv4/tcp_htcp.c 	struct htcp *ca = inet_csk_ca(sk);
sk                113 net/ipv4/tcp_htcp.c 		measure_rtt(sk, usecs_to_jiffies(sample->rtt_us));
sk                208 net/ipv4/tcp_htcp.c static void htcp_param_update(struct sock *sk)
sk                210 net/ipv4/tcp_htcp.c 	struct htcp *ca = inet_csk_ca(sk);
sk                222 net/ipv4/tcp_htcp.c static u32 htcp_recalc_ssthresh(struct sock *sk)
sk                224 net/ipv4/tcp_htcp.c 	const struct tcp_sock *tp = tcp_sk(sk);
sk                225 net/ipv4/tcp_htcp.c 	const struct htcp *ca = inet_csk_ca(sk);
sk                227 net/ipv4/tcp_htcp.c 	htcp_param_update(sk);
sk                231 net/ipv4/tcp_htcp.c static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
sk                233 net/ipv4/tcp_htcp.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                234 net/ipv4/tcp_htcp.c 	struct htcp *ca = inet_csk_ca(sk);
sk                236 net/ipv4/tcp_htcp.c 	if (!tcp_is_cwnd_limited(sk))
sk                257 net/ipv4/tcp_htcp.c static void htcp_init(struct sock *sk)
sk                259 net/ipv4/tcp_htcp.c 	struct htcp *ca = inet_csk_ca(sk);
sk                268 net/ipv4/tcp_htcp.c static void htcp_state(struct sock *sk, u8 new_state)
sk                273 net/ipv4/tcp_htcp.c 			struct htcp *ca = inet_csk_ca(sk);
sk                284 net/ipv4/tcp_htcp.c 		htcp_reset(inet_csk_ca(sk));
sk                 34 net/ipv4/tcp_hybla.c static inline void hybla_recalc_param (struct sock *sk)
sk                 36 net/ipv4/tcp_hybla.c 	struct hybla *ca = inet_csk_ca(sk);
sk                 39 net/ipv4/tcp_hybla.c 			    tcp_sk(sk)->srtt_us / (rtt0 * USEC_PER_MSEC),
sk                 46 net/ipv4/tcp_hybla.c static void hybla_init(struct sock *sk)
sk                 48 net/ipv4/tcp_hybla.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                 49 net/ipv4/tcp_hybla.c 	struct hybla *ca = inet_csk_ca(sk);
sk                 61 net/ipv4/tcp_hybla.c 	hybla_recalc_param(sk);
sk                 68 net/ipv4/tcp_hybla.c static void hybla_state(struct sock *sk, u8 ca_state)
sk                 70 net/ipv4/tcp_hybla.c 	struct hybla *ca = inet_csk_ca(sk);
sk                 90 net/ipv4/tcp_hybla.c static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 acked)
sk                 92 net/ipv4/tcp_hybla.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                 93 net/ipv4/tcp_hybla.c 	struct hybla *ca = inet_csk_ca(sk);
sk                 99 net/ipv4/tcp_hybla.c 		hybla_recalc_param(sk);
sk                103 net/ipv4/tcp_hybla.c 	if (!tcp_is_cwnd_limited(sk))
sk                107 net/ipv4/tcp_hybla.c 		tcp_reno_cong_avoid(sk, ack, acked);
sk                112 net/ipv4/tcp_hybla.c 		hybla_recalc_param(sk);
sk                 57 net/ipv4/tcp_illinois.c static void rtt_reset(struct sock *sk)
sk                 59 net/ipv4/tcp_illinois.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                 60 net/ipv4/tcp_illinois.c 	struct illinois *ca = inet_csk_ca(sk);
sk                 69 net/ipv4/tcp_illinois.c static void tcp_illinois_init(struct sock *sk)
sk                 71 net/ipv4/tcp_illinois.c 	struct illinois *ca = inet_csk_ca(sk);
sk                 82 net/ipv4/tcp_illinois.c 	rtt_reset(sk);
sk                 86 net/ipv4/tcp_illinois.c static void tcp_illinois_acked(struct sock *sk, const struct ack_sample *sample)
sk                 88 net/ipv4/tcp_illinois.c 	struct illinois *ca = inet_csk_ca(sk);
sk                222 net/ipv4/tcp_illinois.c static void update_params(struct sock *sk)
sk                224 net/ipv4/tcp_illinois.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                225 net/ipv4/tcp_illinois.c 	struct illinois *ca = inet_csk_ca(sk);
sk                238 net/ipv4/tcp_illinois.c 	rtt_reset(sk);
sk                244 net/ipv4/tcp_illinois.c static void tcp_illinois_state(struct sock *sk, u8 new_state)
sk                246 net/ipv4/tcp_illinois.c 	struct illinois *ca = inet_csk_ca(sk);
sk                253 net/ipv4/tcp_illinois.c 		rtt_reset(sk);
sk                260 net/ipv4/tcp_illinois.c static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 acked)
sk                262 net/ipv4/tcp_illinois.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                263 net/ipv4/tcp_illinois.c 	struct illinois *ca = inet_csk_ca(sk);
sk                266 net/ipv4/tcp_illinois.c 		update_params(sk);
sk                269 net/ipv4/tcp_illinois.c 	if (!tcp_is_cwnd_limited(sk))
sk                295 net/ipv4/tcp_illinois.c static u32 tcp_illinois_ssthresh(struct sock *sk)
sk                297 net/ipv4/tcp_illinois.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                298 net/ipv4/tcp_illinois.c 	struct illinois *ca = inet_csk_ca(sk);
sk                305 net/ipv4/tcp_illinois.c static size_t tcp_illinois_info(struct sock *sk, u32 ext, int *attr,
sk                308 net/ipv4/tcp_illinois.c 	const struct illinois *ca = inet_csk_ca(sk);
sk                119 net/ipv4/tcp_input.c 			     void (*cad)(struct sock *sk, u32 ack_seq))
sk                140 net/ipv4/tcp_input.c static void tcp_gro_dev_warn(struct sock *sk, const struct sk_buff *skb,
sk                151 net/ipv4/tcp_input.c 		dev = dev_get_by_index_rcu(sock_net(sk), skb->skb_iif);
sk                162 net/ipv4/tcp_input.c static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb)
sk                164 net/ipv4/tcp_input.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk                176 net/ipv4/tcp_input.c 					       tcp_sk(sk)->advmss);
sk                180 net/ipv4/tcp_input.c 			tcp_gro_dev_warn(sk, skb, len);
sk                200 net/ipv4/tcp_input.c 			len -= tcp_sk(sk)->tcp_header_len;
sk                213 net/ipv4/tcp_input.c static void tcp_incr_quickack(struct sock *sk, unsigned int max_quickacks)
sk                215 net/ipv4/tcp_input.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk                216 net/ipv4/tcp_input.c 	unsigned int quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss);
sk                225 net/ipv4/tcp_input.c void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks)
sk                227 net/ipv4/tcp_input.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk                229 net/ipv4/tcp_input.c 	tcp_incr_quickack(sk, max_quickacks);
sk                230 net/ipv4/tcp_input.c 	inet_csk_exit_pingpong_mode(sk);
sk                239 net/ipv4/tcp_input.c static bool tcp_in_quickack_mode(struct sock *sk)
sk                241 net/ipv4/tcp_input.c 	const struct inet_connection_sock *icsk = inet_csk(sk);
sk                242 net/ipv4/tcp_input.c 	const struct dst_entry *dst = __sk_dst_get(sk);
sk                245 net/ipv4/tcp_input.c 		(icsk->icsk_ack.quick && !inet_csk_in_pingpong_mode(sk));
sk                254 net/ipv4/tcp_input.c static void tcp_ecn_accept_cwr(struct sock *sk, const struct sk_buff *skb)
sk                257 net/ipv4/tcp_input.c 		tcp_sk(sk)->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
sk                263 net/ipv4/tcp_input.c 		inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW;
sk                272 net/ipv4/tcp_input.c static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb)
sk                274 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                283 net/ipv4/tcp_input.c 			tcp_enter_quickack_mode(sk, 2);
sk                286 net/ipv4/tcp_input.c 		if (tcp_ca_needs_ecn(sk))
sk                287 net/ipv4/tcp_input.c 			tcp_ca_event(sk, CA_EVENT_ECN_IS_CE);
sk                291 net/ipv4/tcp_input.c 			tcp_enter_quickack_mode(sk, 2);
sk                297 net/ipv4/tcp_input.c 		if (tcp_ca_needs_ecn(sk))
sk                298 net/ipv4/tcp_input.c 			tcp_ca_event(sk, CA_EVENT_ECN_NO_CE);
sk                304 net/ipv4/tcp_input.c static void tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb)
sk                306 net/ipv4/tcp_input.c 	if (tcp_sk(sk)->ecn_flags & TCP_ECN_OK)
sk                307 net/ipv4/tcp_input.c 		__tcp_ecn_check_ce(sk, skb);
sk                334 net/ipv4/tcp_input.c static void tcp_sndbuf_expand(struct sock *sk)
sk                336 net/ipv4/tcp_input.c 	const struct tcp_sock *tp = tcp_sk(sk);
sk                337 net/ipv4/tcp_input.c 	const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
sk                358 net/ipv4/tcp_input.c 	sndmem = ca_ops->sndbuf_expand ? ca_ops->sndbuf_expand(sk) : 2;
sk                361 net/ipv4/tcp_input.c 	if (sk->sk_sndbuf < sndmem)
sk                362 net/ipv4/tcp_input.c 		WRITE_ONCE(sk->sk_sndbuf,
sk                363 net/ipv4/tcp_input.c 			   min(sndmem, sock_net(sk)->ipv4.sysctl_tcp_wmem[2]));
sk                392 net/ipv4/tcp_input.c static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb)
sk                394 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                396 net/ipv4/tcp_input.c 	int truesize = tcp_win_from_space(sk, skb->truesize) >> 1;
sk                397 net/ipv4/tcp_input.c 	int window = tcp_win_from_space(sk, sock_net(sk)->ipv4.sysctl_tcp_rmem[2]) >> 1;
sk                401 net/ipv4/tcp_input.c 			return 2 * inet_csk(sk)->icsk_ack.rcv_mss;
sk                409 net/ipv4/tcp_input.c static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
sk                411 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                414 net/ipv4/tcp_input.c 	room = min_t(int, tp->window_clamp, tcp_space(sk)) - tp->rcv_ssthresh;
sk                417 net/ipv4/tcp_input.c 	if (room > 0 && !tcp_under_memory_pressure(sk)) {
sk                423 net/ipv4/tcp_input.c 		if (tcp_win_from_space(sk, skb->truesize) <= skb->len)
sk                426 net/ipv4/tcp_input.c 			incr = __tcp_grow_window(sk, skb);
sk                431 net/ipv4/tcp_input.c 			inet_csk(sk)->icsk_ack.quick |= 1;
sk                439 net/ipv4/tcp_input.c void tcp_init_buffer_space(struct sock *sk)
sk                441 net/ipv4/tcp_input.c 	int tcp_app_win = sock_net(sk)->ipv4.sysctl_tcp_app_win;
sk                442 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                445 net/ipv4/tcp_input.c 	if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK))
sk                446 net/ipv4/tcp_input.c 		tcp_sndbuf_expand(sk);
sk                453 net/ipv4/tcp_input.c 	maxwin = tcp_full_space(sk);
sk                475 net/ipv4/tcp_input.c static void tcp_clamp_window(struct sock *sk)
sk                477 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                478 net/ipv4/tcp_input.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk                479 net/ipv4/tcp_input.c 	struct net *net = sock_net(sk);
sk                483 net/ipv4/tcp_input.c 	if (sk->sk_rcvbuf < net->ipv4.sysctl_tcp_rmem[2] &&
sk                484 net/ipv4/tcp_input.c 	    !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) &&
sk                485 net/ipv4/tcp_input.c 	    !tcp_under_memory_pressure(sk) &&
sk                486 net/ipv4/tcp_input.c 	    sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)) {
sk                487 net/ipv4/tcp_input.c 		WRITE_ONCE(sk->sk_rcvbuf,
sk                488 net/ipv4/tcp_input.c 			   min(atomic_read(&sk->sk_rmem_alloc),
sk                491 net/ipv4/tcp_input.c 	if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
sk                502 net/ipv4/tcp_input.c void tcp_initialize_rcv_mss(struct sock *sk)
sk                504 net/ipv4/tcp_input.c 	const struct tcp_sock *tp = tcp_sk(sk);
sk                511 net/ipv4/tcp_input.c 	inet_csk(sk)->icsk_ack.rcv_mss = hint;
sk                576 net/ipv4/tcp_input.c static inline void tcp_rcv_rtt_measure_ts(struct sock *sk,
sk                579 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                586 net/ipv4/tcp_input.c 	    TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss) {
sk                603 net/ipv4/tcp_input.c void tcp_rcv_space_adjust(struct sock *sk)
sk                605 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                609 net/ipv4/tcp_input.c 	trace_tcp_rcv_space_adjust(sk);
sk                630 net/ipv4/tcp_input.c 	if (sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf &&
sk                631 net/ipv4/tcp_input.c 	    !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
sk                646 net/ipv4/tcp_input.c 		while (tcp_win_from_space(sk, rcvmem) < tp->advmss)
sk                651 net/ipv4/tcp_input.c 			       sock_net(sk)->ipv4.sysctl_tcp_rmem[2]);
sk                652 net/ipv4/tcp_input.c 		if (rcvbuf > sk->sk_rcvbuf) {
sk                653 net/ipv4/tcp_input.c 			WRITE_ONCE(sk->sk_rcvbuf, rcvbuf);
sk                656 net/ipv4/tcp_input.c 			tp->window_clamp = tcp_win_from_space(sk, rcvbuf);
sk                676 net/ipv4/tcp_input.c static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb)
sk                678 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                679 net/ipv4/tcp_input.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk                682 net/ipv4/tcp_input.c 	inet_csk_schedule_ack(sk);
sk                684 net/ipv4/tcp_input.c 	tcp_measure_rcv_mss(sk, skb);
sk                694 net/ipv4/tcp_input.c 		tcp_incr_quickack(sk, TCP_MAX_QUICKACKS);
sk                710 net/ipv4/tcp_input.c 			tcp_incr_quickack(sk, TCP_MAX_QUICKACKS);
sk                711 net/ipv4/tcp_input.c 			sk_mem_reclaim(sk);
sk                716 net/ipv4/tcp_input.c 	tcp_ecn_check_ce(sk, skb);
sk                719 net/ipv4/tcp_input.c 		tcp_grow_window(sk, skb);
sk                731 net/ipv4/tcp_input.c static void tcp_rtt_estimator(struct sock *sk, long mrtt_us)
sk                733 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                782 net/ipv4/tcp_input.c 			tp->mdev_max_us = tcp_rto_min_us(sk);
sk                784 net/ipv4/tcp_input.c 			tcp_bpf_rtt(sk);
sk                790 net/ipv4/tcp_input.c 		tp->rttvar_us = max(tp->mdev_us, tcp_rto_min_us(sk));
sk                794 net/ipv4/tcp_input.c 		tcp_bpf_rtt(sk);
sk                799 net/ipv4/tcp_input.c static void tcp_update_pacing_rate(struct sock *sk)
sk                801 net/ipv4/tcp_input.c 	const struct tcp_sock *tp = tcp_sk(sk);
sk                816 net/ipv4/tcp_input.c 		rate *= sock_net(sk)->ipv4.sysctl_tcp_pacing_ss_ratio;
sk                818 net/ipv4/tcp_input.c 		rate *= sock_net(sk)->ipv4.sysctl_tcp_pacing_ca_ratio;
sk                829 net/ipv4/tcp_input.c 	WRITE_ONCE(sk->sk_pacing_rate, min_t(u64, rate,
sk                830 net/ipv4/tcp_input.c 					     sk->sk_max_pacing_rate));
sk                836 net/ipv4/tcp_input.c static void tcp_set_rto(struct sock *sk)
sk                838 net/ipv4/tcp_input.c 	const struct tcp_sock *tp = tcp_sk(sk);
sk                849 net/ipv4/tcp_input.c 	inet_csk(sk)->icsk_rto = __tcp_set_rto(tp);
sk                860 net/ipv4/tcp_input.c 	tcp_bound_rto(sk);
sk                884 net/ipv4/tcp_input.c static void tcp_check_sack_reordering(struct sock *sk, const u32 low_seq,
sk                887 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                899 net/ipv4/tcp_input.c 			 tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state,
sk                906 net/ipv4/tcp_input.c 				       sock_net(sk)->ipv4.sysctl_tcp_max_reordering);
sk                911 net/ipv4/tcp_input.c 	NET_INC_STATS(sock_net(sk),
sk               1093 net/ipv4/tcp_input.c static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
sk               1097 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               1105 net/ipv4/tcp_input.c 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDSACKRECV);
sk               1114 net/ipv4/tcp_input.c 			NET_INC_STATS(sock_net(sk),
sk               1149 net/ipv4/tcp_input.c static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
sk               1188 net/ipv4/tcp_input.c 		err = tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb,
sk               1198 net/ipv4/tcp_input.c static u8 tcp_sacktag_one(struct sock *sk,
sk               1204 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               1282 net/ipv4/tcp_input.c static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *prev,
sk               1288 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               1300 net/ipv4/tcp_input.c 	tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked,
sk               1303 net/ipv4/tcp_input.c 	tcp_rate_skb_delivered(sk, skb, state->rate);
sk               1332 net/ipv4/tcp_input.c 		NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKSHIFTED);
sk               1350 net/ipv4/tcp_input.c 	if (skb == tcp_highest_sack(sk))
sk               1351 net/ipv4/tcp_input.c 		tcp_advance_highest_sack(sk, skb);
sk               1357 net/ipv4/tcp_input.c 	tcp_rtx_queue_unlink_and_free(skb, sk);
sk               1359 net/ipv4/tcp_input.c 	NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKMERGED);
sk               1396 net/ipv4/tcp_input.c static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
sk               1401 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               1500 net/ipv4/tcp_input.c 	if (!tcp_shifted_skb(sk, prev, skb, state, pcount, len, mss, dup_sack))
sk               1518 net/ipv4/tcp_input.c 		tcp_shifted_skb(sk, prev, skb, state, pcount,
sk               1528 net/ipv4/tcp_input.c 	NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKSHIFTFALLBACK);
sk               1532 net/ipv4/tcp_input.c static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
sk               1538 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               1551 net/ipv4/tcp_input.c 			in_sack = tcp_match_skb_to_sack(sk, skb,
sk               1563 net/ipv4/tcp_input.c 			tmp = tcp_shift_skb_data(sk, skb, state,
sk               1573 net/ipv4/tcp_input.c 				in_sack = tcp_match_skb_to_sack(sk, skb,
sk               1584 net/ipv4/tcp_input.c 				tcp_sacktag_one(sk,
sk               1592 net/ipv4/tcp_input.c 			tcp_rate_skb_delivered(sk, skb, state->rate);
sk               1598 net/ipv4/tcp_input.c 				tcp_advance_highest_sack(sk, skb);
sk               1604 net/ipv4/tcp_input.c static struct sk_buff *tcp_sacktag_bsearch(struct sock *sk, u32 seq)
sk               1606 net/ipv4/tcp_input.c 	struct rb_node *parent, **p = &sk->tcp_rtx_queue.rb_node;
sk               1625 net/ipv4/tcp_input.c static struct sk_buff *tcp_sacktag_skip(struct sk_buff *skb, struct sock *sk,
sk               1631 net/ipv4/tcp_input.c 	return tcp_sacktag_bsearch(sk, skip_to_seq);
sk               1635 net/ipv4/tcp_input.c 						struct sock *sk,
sk               1644 net/ipv4/tcp_input.c 		skb = tcp_sacktag_skip(skb, sk, next_dup->start_seq);
sk               1645 net/ipv4/tcp_input.c 		skb = tcp_sacktag_walk(skb, sk, NULL, state,
sk               1659 net/ipv4/tcp_input.c tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
sk               1662 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               1679 net/ipv4/tcp_input.c 		tcp_highest_sack_reset(sk);
sk               1681 net/ipv4/tcp_input.c 	found_dup_sack = tcp_check_dsack(sk, ack_skb, sp_wire,
sk               1724 net/ipv4/tcp_input.c 			NET_INC_STATS(sock_net(sk), mib_idx);
sk               1753 net/ipv4/tcp_input.c 	state->mss_now = tcp_current_mss(sk);
sk               1788 net/ipv4/tcp_input.c 				skb = tcp_sacktag_skip(skb, sk, start_seq);
sk               1789 net/ipv4/tcp_input.c 				skb = tcp_sacktag_walk(skb, sk, next_dup,
sk               1800 net/ipv4/tcp_input.c 			skb = tcp_maybe_skipping_dsack(skb, sk, next_dup,
sk               1807 net/ipv4/tcp_input.c 				skb = tcp_highest_sack(sk);
sk               1814 net/ipv4/tcp_input.c 			skb = tcp_sacktag_skip(skb, sk, cache->end_seq);
sk               1821 net/ipv4/tcp_input.c 			skb = tcp_highest_sack(sk);
sk               1825 net/ipv4/tcp_input.c 		skb = tcp_sacktag_skip(skb, sk, start_seq);
sk               1828 net/ipv4/tcp_input.c 		skb = tcp_sacktag_walk(skb, sk, next_dup, state,
sk               1843 net/ipv4/tcp_input.c 	if (inet_csk(sk)->icsk_ca_state != TCP_CA_Loss || tp->undo_marker)
sk               1844 net/ipv4/tcp_input.c 		tcp_check_sack_reordering(sk, state->reord, 0);
sk               1879 net/ipv4/tcp_input.c static void tcp_check_reno_reordering(struct sock *sk, const int addend)
sk               1881 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               1887 net/ipv4/tcp_input.c 			       sock_net(sk)->ipv4.sysctl_tcp_max_reordering);
sk               1889 net/ipv4/tcp_input.c 	NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRENOREORDER);
sk               1894 net/ipv4/tcp_input.c static void tcp_add_reno_sack(struct sock *sk, int num_dupack)
sk               1897 net/ipv4/tcp_input.c 		struct tcp_sock *tp = tcp_sk(sk);
sk               1902 net/ipv4/tcp_input.c 		tcp_check_reno_reordering(sk, 0);
sk               1912 net/ipv4/tcp_input.c static void tcp_remove_reno_sacks(struct sock *sk, int acked)
sk               1914 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               1924 net/ipv4/tcp_input.c 	tcp_check_reno_reordering(sk, acked);
sk               1949 net/ipv4/tcp_input.c static bool tcp_is_rack(const struct sock *sk)
sk               1951 net/ipv4/tcp_input.c 	return sock_net(sk)->ipv4.sysctl_tcp_recovery & TCP_RACK_LOSS_DETECTION;
sk               1958 net/ipv4/tcp_input.c static void tcp_timeout_mark_lost(struct sock *sk)
sk               1960 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               1964 net/ipv4/tcp_input.c 	head = tcp_rtx_queue_head(sk);
sk               1967 net/ipv4/tcp_input.c 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSACKRENEGING);
sk               1979 net/ipv4/tcp_input.c 		else if (tcp_is_rack(sk) && skb != head &&
sk               1982 net/ipv4/tcp_input.c 		tcp_mark_skb_lost(sk, skb);
sk               1989 net/ipv4/tcp_input.c void tcp_enter_loss(struct sock *sk)
sk               1991 net/ipv4/tcp_input.c 	const struct inet_connection_sock *icsk = inet_csk(sk);
sk               1992 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               1993 net/ipv4/tcp_input.c 	struct net *net = sock_net(sk);
sk               1996 net/ipv4/tcp_input.c 	tcp_timeout_mark_lost(sk);
sk               2002 net/ipv4/tcp_input.c 		tp->prior_ssthresh = tcp_current_ssthresh(sk);
sk               2004 net/ipv4/tcp_input.c 		tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
sk               2005 net/ipv4/tcp_input.c 		tcp_ca_event(sk, CA_EVENT_LOSS);
sk               2019 net/ipv4/tcp_input.c 	tcp_set_ca_state(sk, TCP_CA_Loss);
sk               2029 net/ipv4/tcp_input.c 		   !inet_csk(sk)->icsk_mtup.probe_size;
sk               2042 net/ipv4/tcp_input.c static bool tcp_check_sack_reneging(struct sock *sk, int flag)
sk               2045 net/ipv4/tcp_input.c 		struct tcp_sock *tp = tcp_sk(sk);
sk               2049 net/ipv4/tcp_input.c 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
sk               2169 net/ipv4/tcp_input.c static bool tcp_time_to_recover(struct sock *sk, int flag)
sk               2171 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               2178 net/ipv4/tcp_input.c 	if (!tcp_is_rack(sk) && tcp_dupack_heuristics(tp) > tp->reordering)
sk               2190 net/ipv4/tcp_input.c static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head)
sk               2192 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               2207 net/ipv4/tcp_input.c 		skb = tcp_rtx_queue_head(sk);
sk               2235 net/ipv4/tcp_input.c 			    tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb,
sk               2251 net/ipv4/tcp_input.c static void tcp_update_scoreboard(struct sock *sk, int fast_rexmit)
sk               2253 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               2258 net/ipv4/tcp_input.c 			tcp_mark_head_lost(sk, sacked_upto, 0);
sk               2260 net/ipv4/tcp_input.c 			tcp_mark_head_lost(sk, 1, 1);
sk               2305 net/ipv4/tcp_input.c static bool tcp_any_retrans_done(const struct sock *sk)
sk               2307 net/ipv4/tcp_input.c 	const struct tcp_sock *tp = tcp_sk(sk);
sk               2313 net/ipv4/tcp_input.c 	skb = tcp_rtx_queue_head(sk);
sk               2320 net/ipv4/tcp_input.c static void DBGUNDO(struct sock *sk, const char *msg)
sk               2323 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               2324 net/ipv4/tcp_input.c 	struct inet_sock *inet = inet_sk(sk);
sk               2326 net/ipv4/tcp_input.c 	if (sk->sk_family == AF_INET) {
sk               2335 net/ipv4/tcp_input.c 	else if (sk->sk_family == AF_INET6) {
sk               2338 net/ipv4/tcp_input.c 			 &sk->sk_v6_daddr, ntohs(inet->inet_dport),
sk               2347 net/ipv4/tcp_input.c static void tcp_undo_cwnd_reduction(struct sock *sk, bool unmark_loss)
sk               2349 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               2354 net/ipv4/tcp_input.c 		skb_rbtree_walk(skb, &sk->tcp_rtx_queue) {
sk               2362 net/ipv4/tcp_input.c 		const struct inet_connection_sock *icsk = inet_csk(sk);
sk               2364 net/ipv4/tcp_input.c 		tp->snd_cwnd = icsk->icsk_ca_ops->undo_cwnd(sk);
sk               2382 net/ipv4/tcp_input.c static bool tcp_try_undo_recovery(struct sock *sk)
sk               2384 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               2392 net/ipv4/tcp_input.c 		DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans");
sk               2393 net/ipv4/tcp_input.c 		tcp_undo_cwnd_reduction(sk, false);
sk               2394 net/ipv4/tcp_input.c 		if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss)
sk               2399 net/ipv4/tcp_input.c 		NET_INC_STATS(sock_net(sk), mib_idx);
sk               2407 net/ipv4/tcp_input.c 		if (!tcp_any_retrans_done(sk))
sk               2411 net/ipv4/tcp_input.c 	tcp_set_ca_state(sk, TCP_CA_Open);
sk               2417 net/ipv4/tcp_input.c static bool tcp_try_undo_dsack(struct sock *sk)
sk               2419 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               2424 net/ipv4/tcp_input.c 		DBGUNDO(sk, "D-SACK");
sk               2425 net/ipv4/tcp_input.c 		tcp_undo_cwnd_reduction(sk, false);
sk               2426 net/ipv4/tcp_input.c 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDSACKUNDO);
sk               2433 net/ipv4/tcp_input.c static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo)
sk               2435 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               2438 net/ipv4/tcp_input.c 		tcp_undo_cwnd_reduction(sk, true);
sk               2440 net/ipv4/tcp_input.c 		DBGUNDO(sk, "partial loss");
sk               2441 net/ipv4/tcp_input.c 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSUNDO);
sk               2443 net/ipv4/tcp_input.c 			NET_INC_STATS(sock_net(sk),
sk               2445 net/ipv4/tcp_input.c 		inet_csk(sk)->icsk_retransmits = 0;
sk               2447 net/ipv4/tcp_input.c 			tcp_set_ca_state(sk, TCP_CA_Open);
sk               2464 net/ipv4/tcp_input.c static void tcp_init_cwnd_reduction(struct sock *sk)
sk               2466 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               2474 net/ipv4/tcp_input.c 	tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk);
sk               2478 net/ipv4/tcp_input.c void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int flag)
sk               2480 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               2505 net/ipv4/tcp_input.c static inline void tcp_end_cwnd_reduction(struct sock *sk)
sk               2507 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               2509 net/ipv4/tcp_input.c 	if (inet_csk(sk)->icsk_ca_ops->cong_control)
sk               2514 net/ipv4/tcp_input.c 	    (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || tp->undo_marker)) {
sk               2518 net/ipv4/tcp_input.c 	tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR);
sk               2522 net/ipv4/tcp_input.c void tcp_enter_cwr(struct sock *sk)
sk               2524 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               2527 net/ipv4/tcp_input.c 	if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
sk               2529 net/ipv4/tcp_input.c 		tcp_init_cwnd_reduction(sk);
sk               2530 net/ipv4/tcp_input.c 		tcp_set_ca_state(sk, TCP_CA_CWR);
sk               2535 net/ipv4/tcp_input.c static void tcp_try_keep_open(struct sock *sk)
sk               2537 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               2540 net/ipv4/tcp_input.c 	if (tcp_left_out(tp) || tcp_any_retrans_done(sk))
sk               2543 net/ipv4/tcp_input.c 	if (inet_csk(sk)->icsk_ca_state != state) {
sk               2544 net/ipv4/tcp_input.c 		tcp_set_ca_state(sk, state);
sk               2549 net/ipv4/tcp_input.c static void tcp_try_to_open(struct sock *sk, int flag)
sk               2551 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               2555 net/ipv4/tcp_input.c 	if (!tcp_any_retrans_done(sk))
sk               2559 net/ipv4/tcp_input.c 		tcp_enter_cwr(sk);
sk               2561 net/ipv4/tcp_input.c 	if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) {
sk               2562 net/ipv4/tcp_input.c 		tcp_try_keep_open(sk);
sk               2566 net/ipv4/tcp_input.c static void tcp_mtup_probe_failed(struct sock *sk)
sk               2568 net/ipv4/tcp_input.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk               2572 net/ipv4/tcp_input.c 	NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMTUPFAIL);
sk               2575 net/ipv4/tcp_input.c static void tcp_mtup_probe_success(struct sock *sk)
sk               2577 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               2578 net/ipv4/tcp_input.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk               2581 net/ipv4/tcp_input.c 	tp->prior_ssthresh = tcp_current_ssthresh(sk);
sk               2583 net/ipv4/tcp_input.c 		       tcp_mss_to_mtu(sk, tp->mss_cache) /
sk               2587 net/ipv4/tcp_input.c 	tp->snd_ssthresh = tcp_current_ssthresh(sk);
sk               2591 net/ipv4/tcp_input.c 	tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
sk               2592 net/ipv4/tcp_input.c 	NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMTUPSUCCESS);
sk               2599 net/ipv4/tcp_input.c void tcp_simple_retransmit(struct sock *sk)
sk               2601 net/ipv4/tcp_input.c 	const struct inet_connection_sock *icsk = inet_csk(sk);
sk               2602 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               2604 net/ipv4/tcp_input.c 	unsigned int mss = tcp_current_mss(sk);
sk               2606 net/ipv4/tcp_input.c 	skb_rbtree_walk(skb, &sk->tcp_rtx_queue) {
sk               2634 net/ipv4/tcp_input.c 		tp->snd_ssthresh = tcp_current_ssthresh(sk);
sk               2637 net/ipv4/tcp_input.c 		tcp_set_ca_state(sk, TCP_CA_Loss);
sk               2639 net/ipv4/tcp_input.c 	tcp_xmit_retransmit_queue(sk);
sk               2643 net/ipv4/tcp_input.c void tcp_enter_recovery(struct sock *sk, bool ece_ack)
sk               2645 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               2653 net/ipv4/tcp_input.c 	NET_INC_STATS(sock_net(sk), mib_idx);
sk               2658 net/ipv4/tcp_input.c 	if (!tcp_in_cwnd_reduction(sk)) {
sk               2660 net/ipv4/tcp_input.c 			tp->prior_ssthresh = tcp_current_ssthresh(sk);
sk               2661 net/ipv4/tcp_input.c 		tcp_init_cwnd_reduction(sk);
sk               2663 net/ipv4/tcp_input.c 	tcp_set_ca_state(sk, TCP_CA_Recovery);
sk               2669 net/ipv4/tcp_input.c static void tcp_process_loss(struct sock *sk, int flag, int num_dupack,
sk               2672 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               2676 net/ipv4/tcp_input.c 	    tcp_try_undo_loss(sk, false))
sk               2684 net/ipv4/tcp_input.c 		    tcp_try_undo_loss(sk, true))
sk               2696 net/ipv4/tcp_input.c 			if (!tcp_write_queue_empty(sk) &&
sk               2707 net/ipv4/tcp_input.c 		tcp_try_undo_recovery(sk);
sk               2715 net/ipv4/tcp_input.c 			tcp_add_reno_sack(sk, num_dupack);
sk               2723 net/ipv4/tcp_input.c static bool tcp_try_undo_partial(struct sock *sk, u32 prior_snd_una)
sk               2725 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               2731 net/ipv4/tcp_input.c 		tcp_check_sack_reordering(sk, prior_snd_una, 1);
sk               2741 net/ipv4/tcp_input.c 		if (!tcp_any_retrans_done(sk))
sk               2744 net/ipv4/tcp_input.c 		DBGUNDO(sk, "partial recovery");
sk               2745 net/ipv4/tcp_input.c 		tcp_undo_cwnd_reduction(sk, true);
sk               2746 net/ipv4/tcp_input.c 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO);
sk               2747 net/ipv4/tcp_input.c 		tcp_try_keep_open(sk);
sk               2753 net/ipv4/tcp_input.c static void tcp_identify_packet_loss(struct sock *sk, int *ack_flag)
sk               2755 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               2757 net/ipv4/tcp_input.c 	if (tcp_rtx_queue_empty(sk))
sk               2761 net/ipv4/tcp_input.c 		tcp_newreno_mark_lost(sk, *ack_flag & FLAG_SND_UNA_ADVANCED);
sk               2762 net/ipv4/tcp_input.c 	} else if (tcp_is_rack(sk)) {
sk               2765 net/ipv4/tcp_input.c 		tcp_rack_mark_lost(sk);
sk               2771 net/ipv4/tcp_input.c static bool tcp_force_fast_retransmit(struct sock *sk)
sk               2773 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               2791 net/ipv4/tcp_input.c static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
sk               2794 net/ipv4/tcp_input.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk               2795 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               2798 net/ipv4/tcp_input.c 				      tcp_force_fast_retransmit(sk));
sk               2809 net/ipv4/tcp_input.c 	if (tcp_check_sack_reneging(sk, flag))
sk               2826 net/ipv4/tcp_input.c 				tcp_end_cwnd_reduction(sk);
sk               2827 net/ipv4/tcp_input.c 				tcp_set_ca_state(sk, TCP_CA_Open);
sk               2834 net/ipv4/tcp_input.c 			if (tcp_try_undo_recovery(sk))
sk               2836 net/ipv4/tcp_input.c 			tcp_end_cwnd_reduction(sk);
sk               2846 net/ipv4/tcp_input.c 				tcp_add_reno_sack(sk, num_dupack);
sk               2848 net/ipv4/tcp_input.c 			if (tcp_try_undo_partial(sk, prior_snd_una))
sk               2852 net/ipv4/tcp_input.c 				  tcp_force_fast_retransmit(sk);
sk               2854 net/ipv4/tcp_input.c 		if (tcp_try_undo_dsack(sk)) {
sk               2855 net/ipv4/tcp_input.c 			tcp_try_keep_open(sk);
sk               2858 net/ipv4/tcp_input.c 		tcp_identify_packet_loss(sk, ack_flag);
sk               2861 net/ipv4/tcp_input.c 		tcp_process_loss(sk, flag, num_dupack, rexmit);
sk               2862 net/ipv4/tcp_input.c 		tcp_identify_packet_loss(sk, ack_flag);
sk               2872 net/ipv4/tcp_input.c 			tcp_add_reno_sack(sk, num_dupack);
sk               2876 net/ipv4/tcp_input.c 			tcp_try_undo_dsack(sk);
sk               2878 net/ipv4/tcp_input.c 		tcp_identify_packet_loss(sk, ack_flag);
sk               2879 net/ipv4/tcp_input.c 		if (!tcp_time_to_recover(sk, flag)) {
sk               2880 net/ipv4/tcp_input.c 			tcp_try_to_open(sk, flag);
sk               2888 net/ipv4/tcp_input.c 			tcp_mtup_probe_failed(sk);
sk               2891 net/ipv4/tcp_input.c 			tcp_simple_retransmit(sk);
sk               2896 net/ipv4/tcp_input.c 		tcp_enter_recovery(sk, (flag & FLAG_ECE));
sk               2900 net/ipv4/tcp_input.c 	if (!tcp_is_rack(sk) && do_lost)
sk               2901 net/ipv4/tcp_input.c 		tcp_update_scoreboard(sk, fast_rexmit);
sk               2905 net/ipv4/tcp_input.c static void tcp_update_rtt_min(struct sock *sk, u32 rtt_us, const int flag)
sk               2907 net/ipv4/tcp_input.c 	u32 wlen = sock_net(sk)->ipv4.sysctl_tcp_min_rtt_wlen * HZ;
sk               2908 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               2921 net/ipv4/tcp_input.c static bool tcp_ack_update_rtt(struct sock *sk, const int flag,
sk               2925 net/ipv4/tcp_input.c 	const struct tcp_sock *tp = tcp_sk(sk);
sk               2958 net/ipv4/tcp_input.c 	tcp_update_rtt_min(sk, ca_rtt_us, flag);
sk               2959 net/ipv4/tcp_input.c 	tcp_rtt_estimator(sk, seq_rtt_us);
sk               2960 net/ipv4/tcp_input.c 	tcp_set_rto(sk);
sk               2963 net/ipv4/tcp_input.c 	inet_csk(sk)->icsk_backoff = 0;
sk               2968 net/ipv4/tcp_input.c void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req)
sk               2976 net/ipv4/tcp_input.c 	tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, rtt_us, -1L, rtt_us, &rs);
sk               2980 net/ipv4/tcp_input.c static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
sk               2982 net/ipv4/tcp_input.c 	const struct inet_connection_sock *icsk = inet_csk(sk);
sk               2984 net/ipv4/tcp_input.c 	icsk->icsk_ca_ops->cong_avoid(sk, ack, acked);
sk               2985 net/ipv4/tcp_input.c 	tcp_sk(sk)->snd_cwnd_stamp = tcp_jiffies32;
sk               2991 net/ipv4/tcp_input.c void tcp_rearm_rto(struct sock *sk)
sk               2993 net/ipv4/tcp_input.c 	const struct inet_connection_sock *icsk = inet_csk(sk);
sk               2994 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               3003 net/ipv4/tcp_input.c 		inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
sk               3005 net/ipv4/tcp_input.c 		u32 rto = inet_csk(sk)->icsk_rto;
sk               3009 net/ipv4/tcp_input.c 			s64 delta_us = tcp_rto_delta_us(sk);
sk               3015 net/ipv4/tcp_input.c 		tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto,
sk               3016 net/ipv4/tcp_input.c 				     TCP_RTO_MAX, tcp_rtx_queue_head(sk));
sk               3021 net/ipv4/tcp_input.c static void tcp_set_xmit_timer(struct sock *sk)
sk               3023 net/ipv4/tcp_input.c 	if (!tcp_schedule_loss_probe(sk, true))
sk               3024 net/ipv4/tcp_input.c 		tcp_rearm_rto(sk);
sk               3028 net/ipv4/tcp_input.c static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb)
sk               3030 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               3036 net/ipv4/tcp_input.c 	if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
sk               3048 net/ipv4/tcp_input.c static void tcp_ack_tstamp(struct sock *sk, struct sk_buff *skb,
sk               3059 net/ipv4/tcp_input.c 	    before(shinfo->tskey, tcp_sk(sk)->snd_una)) {
sk               3061 net/ipv4/tcp_input.c 			__skb_tstamp_tx(skb, NULL, sk, SCM_TSTAMP_ACK);
sk               3070 net/ipv4/tcp_input.c static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack,
sk               3074 net/ipv4/tcp_input.c 	const struct inet_connection_sock *icsk = inet_csk(sk);
sk               3076 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               3091 net/ipv4/tcp_input.c 	for (skb = skb_rb_first(&sk->tcp_rtx_queue); skb; skb = next) {
sk               3097 net/ipv4/tcp_input.c 		tcp_ack_tstamp(sk, skb, prior_snd_una);
sk               3105 net/ipv4/tcp_input.c 			acked_pcount = tcp_tso_acked(sk, skb);
sk               3143 net/ipv4/tcp_input.c 		tcp_rate_skb_delivered(sk, skb, sack->rate);
sk               3167 net/ipv4/tcp_input.c 		tcp_highest_sack_replace(sk, skb, next);
sk               3168 net/ipv4/tcp_input.c 		tcp_rtx_queue_unlink_and_free(skb, sk);
sk               3172 net/ipv4/tcp_input.c 		tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
sk               3199 net/ipv4/tcp_input.c 	rtt_update = tcp_ack_update_rtt(sk, flag, seq_rtt_us, sack_rtt_us,
sk               3206 net/ipv4/tcp_input.c 			tcp_mtup_probe_success(sk);
sk               3210 net/ipv4/tcp_input.c 			tcp_remove_reno_sacks(sk, pkts_acked);
sk               3225 net/ipv4/tcp_input.c 				tcp_check_sack_reordering(sk, reord, 0);
sk               3245 net/ipv4/tcp_input.c 		icsk->icsk_ca_ops->pkts_acked(sk, &sample);
sk               3253 net/ipv4/tcp_input.c 		icsk = inet_csk(sk);
sk               3274 net/ipv4/tcp_input.c static void tcp_ack_probe(struct sock *sk)
sk               3276 net/ipv4/tcp_input.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk               3277 net/ipv4/tcp_input.c 	struct sk_buff *head = tcp_send_head(sk);
sk               3278 net/ipv4/tcp_input.c 	const struct tcp_sock *tp = tcp_sk(sk);
sk               3285 net/ipv4/tcp_input.c 		inet_csk_clear_xmit_timer(sk, ICSK_TIME_PROBE0);
sk               3290 net/ipv4/tcp_input.c 		unsigned long when = tcp_probe0_when(sk, TCP_RTO_MAX);
sk               3292 net/ipv4/tcp_input.c 		tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
sk               3297 net/ipv4/tcp_input.c static inline bool tcp_ack_is_dubious(const struct sock *sk, const int flag)
sk               3300 net/ipv4/tcp_input.c 		inet_csk(sk)->icsk_ca_state != TCP_CA_Open;
sk               3304 net/ipv4/tcp_input.c static inline bool tcp_may_raise_cwnd(const struct sock *sk, const int flag)
sk               3312 net/ipv4/tcp_input.c 	if (tcp_sk(sk)->reordering > sock_net(sk)->ipv4.sysctl_tcp_reordering)
sk               3323 net/ipv4/tcp_input.c static void tcp_cong_control(struct sock *sk, u32 ack, u32 acked_sacked,
sk               3326 net/ipv4/tcp_input.c 	const struct inet_connection_sock *icsk = inet_csk(sk);
sk               3329 net/ipv4/tcp_input.c 		icsk->icsk_ca_ops->cong_control(sk, rs);
sk               3333 net/ipv4/tcp_input.c 	if (tcp_in_cwnd_reduction(sk)) {
sk               3335 net/ipv4/tcp_input.c 		tcp_cwnd_reduction(sk, acked_sacked, flag);
sk               3336 net/ipv4/tcp_input.c 	} else if (tcp_may_raise_cwnd(sk, flag)) {
sk               3338 net/ipv4/tcp_input.c 		tcp_cong_avoid(sk, ack, acked_sacked);
sk               3340 net/ipv4/tcp_input.c 	tcp_update_pacing_rate(sk);
sk               3380 net/ipv4/tcp_input.c static int tcp_ack_update_window(struct sock *sk, const struct sk_buff *skb, u32 ack,
sk               3383 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               3401 net/ipv4/tcp_input.c 			tcp_fast_path_check(sk);
sk               3403 net/ipv4/tcp_input.c 			if (!tcp_write_queue_empty(sk))
sk               3404 net/ipv4/tcp_input.c 				tcp_slow_start_after_idle_check(sk);
sk               3408 net/ipv4/tcp_input.c 				tcp_sync_mss(sk, inet_csk(sk)->icsk_pmtu_cookie);
sk               3454 net/ipv4/tcp_input.c static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb)
sk               3459 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               3460 net/ipv4/tcp_input.c 	struct net *net = sock_net(sk);
sk               3482 net/ipv4/tcp_input.c 		tcp_send_ack(sk);
sk               3512 net/ipv4/tcp_input.c static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag)
sk               3514 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               3526 net/ipv4/tcp_input.c 		tcp_init_cwnd_reduction(sk);
sk               3527 net/ipv4/tcp_input.c 		tcp_set_ca_state(sk, TCP_CA_CWR);
sk               3528 net/ipv4/tcp_input.c 		tcp_end_cwnd_reduction(sk);
sk               3529 net/ipv4/tcp_input.c 		tcp_try_keep_open(sk);
sk               3530 net/ipv4/tcp_input.c 		NET_INC_STATS(sock_net(sk),
sk               3539 net/ipv4/tcp_input.c static inline void tcp_in_ack_event(struct sock *sk, u32 flags)
sk               3541 net/ipv4/tcp_input.c 	const struct inet_connection_sock *icsk = inet_csk(sk);
sk               3544 net/ipv4/tcp_input.c 		icsk->icsk_ca_ops->in_ack_event(sk, flags);
sk               3551 net/ipv4/tcp_input.c static void tcp_xmit_recovery(struct sock *sk, int rexmit)
sk               3553 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               3555 net/ipv4/tcp_input.c 	if (rexmit == REXMIT_NONE || sk->sk_state == TCP_SYN_SENT)
sk               3559 net/ipv4/tcp_input.c 		__tcp_push_pending_frames(sk, tcp_current_mss(sk),
sk               3565 net/ipv4/tcp_input.c 	tcp_xmit_retransmit_queue(sk);
sk               3569 net/ipv4/tcp_input.c static u32 tcp_newly_delivered(struct sock *sk, u32 prior_delivered, int flag)
sk               3571 net/ipv4/tcp_input.c 	const struct net *net = sock_net(sk);
sk               3572 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               3585 net/ipv4/tcp_input.c static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
sk               3587 net/ipv4/tcp_input.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk               3588 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               3606 net/ipv4/tcp_input.c 	prefetch(sk->tcp_rtx_queue.rb_node);
sk               3615 net/ipv4/tcp_input.c 				tcp_send_challenge_ack(sk, skb);
sk               3634 net/ipv4/tcp_input.c 				icsk->icsk_clean_acked(sk, ack);
sk               3657 net/ipv4/tcp_input.c 		tcp_in_ack_event(sk, CA_ACK_WIN_UPDATE);
sk               3659 net/ipv4/tcp_input.c 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPACKS);
sk               3666 net/ipv4/tcp_input.c 			NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPUREACKS);
sk               3668 net/ipv4/tcp_input.c 		flag |= tcp_ack_update_window(sk, skb, ack, ack_seq);
sk               3671 net/ipv4/tcp_input.c 			flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una,
sk               3682 net/ipv4/tcp_input.c 		tcp_in_ack_event(sk, ack_ev_flags);
sk               3688 net/ipv4/tcp_input.c 	sk->sk_err_soft = 0;
sk               3695 net/ipv4/tcp_input.c 	flag |= tcp_clean_rtx_queue(sk, prior_fack, prior_snd_una, &sack_state);
sk               3697 net/ipv4/tcp_input.c 	tcp_rack_update_reo_wnd(sk, &rs);
sk               3700 net/ipv4/tcp_input.c 		tcp_process_tlp_ack(sk, ack, flag);
sk               3703 net/ipv4/tcp_input.c 		tcp_set_xmit_timer(sk);
sk               3705 net/ipv4/tcp_input.c 	if (tcp_ack_is_dubious(sk, flag)) {
sk               3712 net/ipv4/tcp_input.c 		tcp_fastretrans_alert(sk, prior_snd_una, num_dupack, &flag,
sk               3717 net/ipv4/tcp_input.c 		sk_dst_confirm(sk);
sk               3719 net/ipv4/tcp_input.c 	delivered = tcp_newly_delivered(sk, delivered, flag);
sk               3722 net/ipv4/tcp_input.c 	tcp_rate_gen(sk, delivered, lost, is_sack_reneg, sack_state.rate);
sk               3723 net/ipv4/tcp_input.c 	tcp_cong_control(sk, ack, delivered, flag, sack_state.rate);
sk               3724 net/ipv4/tcp_input.c 	tcp_xmit_recovery(sk, rexmit);
sk               3730 net/ipv4/tcp_input.c 		tcp_fastretrans_alert(sk, prior_snd_una, num_dupack, &flag,
sk               3732 net/ipv4/tcp_input.c 		tcp_newly_delivered(sk, delivered, flag);
sk               3738 net/ipv4/tcp_input.c 	tcp_ack_probe(sk);
sk               3741 net/ipv4/tcp_input.c 		tcp_process_tlp_ack(sk, ack, flag);
sk               3749 net/ipv4/tcp_input.c 		flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una,
sk               3751 net/ipv4/tcp_input.c 		tcp_fastretrans_alert(sk, prior_snd_una, num_dupack, &flag,
sk               3753 net/ipv4/tcp_input.c 		tcp_newly_delivered(sk, delivered, flag);
sk               3754 net/ipv4/tcp_input.c 		tcp_xmit_recovery(sk, rexmit);
sk               4060 net/ipv4/tcp_input.c static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb)
sk               4062 net/ipv4/tcp_input.c 	const struct tcp_sock *tp = tcp_sk(sk);
sk               4077 net/ipv4/tcp_input.c 		(s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) <= (inet_csk(sk)->icsk_rto * 1024) / HZ);
sk               4080 net/ipv4/tcp_input.c static inline bool tcp_paws_discard(const struct sock *sk,
sk               4083 net/ipv4/tcp_input.c 	const struct tcp_sock *tp = tcp_sk(sk);
sk               4086 net/ipv4/tcp_input.c 	       !tcp_disordered_ack(sk, skb);
sk               4109 net/ipv4/tcp_input.c void tcp_reset(struct sock *sk)
sk               4111 net/ipv4/tcp_input.c 	trace_tcp_receive_reset(sk);
sk               4114 net/ipv4/tcp_input.c 	switch (sk->sk_state) {
sk               4116 net/ipv4/tcp_input.c 		sk->sk_err = ECONNREFUSED;
sk               4119 net/ipv4/tcp_input.c 		sk->sk_err = EPIPE;
sk               4124 net/ipv4/tcp_input.c 		sk->sk_err = ECONNRESET;
sk               4129 net/ipv4/tcp_input.c 	tcp_write_queue_purge(sk);
sk               4130 net/ipv4/tcp_input.c 	tcp_done(sk);
sk               4132 net/ipv4/tcp_input.c 	if (!sock_flag(sk, SOCK_DEAD))
sk               4133 net/ipv4/tcp_input.c 		sk->sk_error_report(sk);
sk               4150 net/ipv4/tcp_input.c void tcp_fin(struct sock *sk)
sk               4152 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               4154 net/ipv4/tcp_input.c 	inet_csk_schedule_ack(sk);
sk               4156 net/ipv4/tcp_input.c 	sk->sk_shutdown |= RCV_SHUTDOWN;
sk               4157 net/ipv4/tcp_input.c 	sock_set_flag(sk, SOCK_DONE);
sk               4159 net/ipv4/tcp_input.c 	switch (sk->sk_state) {
sk               4163 net/ipv4/tcp_input.c 		tcp_set_state(sk, TCP_CLOSE_WAIT);
sk               4164 net/ipv4/tcp_input.c 		inet_csk_enter_pingpong_mode(sk);
sk               4182 net/ipv4/tcp_input.c 		tcp_send_ack(sk);
sk               4183 net/ipv4/tcp_input.c 		tcp_set_state(sk, TCP_CLOSING);
sk               4187 net/ipv4/tcp_input.c 		tcp_send_ack(sk);
sk               4188 net/ipv4/tcp_input.c 		tcp_time_wait(sk, TCP_TIME_WAIT, 0);
sk               4195 net/ipv4/tcp_input.c 		       __func__, sk->sk_state);
sk               4205 net/ipv4/tcp_input.c 	sk_mem_reclaim(sk);
sk               4207 net/ipv4/tcp_input.c 	if (!sock_flag(sk, SOCK_DEAD)) {
sk               4208 net/ipv4/tcp_input.c 		sk->sk_state_change(sk);
sk               4211 net/ipv4/tcp_input.c 		if (sk->sk_shutdown == SHUTDOWN_MASK ||
sk               4212 net/ipv4/tcp_input.c 		    sk->sk_state == TCP_CLOSE)
sk               4213 net/ipv4/tcp_input.c 			sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
sk               4215 net/ipv4/tcp_input.c 			sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
sk               4232 net/ipv4/tcp_input.c static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq)
sk               4234 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               4236 net/ipv4/tcp_input.c 	if (tcp_is_sack(tp) && sock_net(sk)->ipv4.sysctl_tcp_dsack) {
sk               4244 net/ipv4/tcp_input.c 		NET_INC_STATS(sock_net(sk), mib_idx);
sk               4252 net/ipv4/tcp_input.c static void tcp_dsack_extend(struct sock *sk, u32 seq, u32 end_seq)
sk               4254 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               4257 net/ipv4/tcp_input.c 		tcp_dsack_set(sk, seq, end_seq);
sk               4262 net/ipv4/tcp_input.c static void tcp_rcv_spurious_retrans(struct sock *sk, const struct sk_buff *skb)
sk               4269 net/ipv4/tcp_input.c 	if (TCP_SKB_CB(skb)->seq == tcp_sk(sk)->duplicate_sack[0].start_seq)
sk               4270 net/ipv4/tcp_input.c 		sk_rethink_txhash(sk);
sk               4273 net/ipv4/tcp_input.c static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb)
sk               4275 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               4279 net/ipv4/tcp_input.c 		NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
sk               4280 net/ipv4/tcp_input.c 		tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS);
sk               4282 net/ipv4/tcp_input.c 		if (tcp_is_sack(tp) && sock_net(sk)->ipv4.sysctl_tcp_dsack) {
sk               4285 net/ipv4/tcp_input.c 			tcp_rcv_spurious_retrans(sk, skb);
sk               4288 net/ipv4/tcp_input.c 			tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, end_seq);
sk               4292 net/ipv4/tcp_input.c 	tcp_send_ack(sk);
sk               4323 net/ipv4/tcp_input.c static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq)
sk               4325 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               4352 net/ipv4/tcp_input.c 			tcp_send_ack(sk);
sk               4415 net/ipv4/tcp_input.c static bool tcp_try_coalesce(struct sock *sk,
sk               4436 net/ipv4/tcp_input.c 	atomic_add(delta, &sk->sk_rmem_alloc);
sk               4437 net/ipv4/tcp_input.c 	sk_mem_charge(sk, delta);
sk               4438 net/ipv4/tcp_input.c 	NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE);
sk               4452 net/ipv4/tcp_input.c static bool tcp_ooo_try_coalesce(struct sock *sk,
sk               4457 net/ipv4/tcp_input.c 	bool res = tcp_try_coalesce(sk, to, from, fragstolen);
sk               4469 net/ipv4/tcp_input.c static void tcp_drop(struct sock *sk, struct sk_buff *skb)
sk               4471 net/ipv4/tcp_input.c 	sk_drops_add(sk, skb);
sk               4478 net/ipv4/tcp_input.c static void tcp_ofo_queue(struct sock *sk)
sk               4480 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               4496 net/ipv4/tcp_input.c 			tcp_dsack_extend(sk, TCP_SKB_CB(skb)->seq, dsack);
sk               4502 net/ipv4/tcp_input.c 			tcp_drop(sk, skb);
sk               4506 net/ipv4/tcp_input.c 		tail = skb_peek_tail(&sk->sk_receive_queue);
sk               4507 net/ipv4/tcp_input.c 		eaten = tail && tcp_try_coalesce(sk, tail, skb, &fragstolen);
sk               4511 net/ipv4/tcp_input.c 			__skb_queue_tail(&sk->sk_receive_queue, skb);
sk               4516 net/ipv4/tcp_input.c 			tcp_fin(sk);
sk               4525 net/ipv4/tcp_input.c static bool tcp_prune_ofo_queue(struct sock *sk);
sk               4526 net/ipv4/tcp_input.c static int tcp_prune_queue(struct sock *sk);
sk               4528 net/ipv4/tcp_input.c static int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb,
sk               4531 net/ipv4/tcp_input.c 	if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
sk               4532 net/ipv4/tcp_input.c 	    !sk_rmem_schedule(sk, skb, size)) {
sk               4534 net/ipv4/tcp_input.c 		if (tcp_prune_queue(sk) < 0)
sk               4537 net/ipv4/tcp_input.c 		while (!sk_rmem_schedule(sk, skb, size)) {
sk               4538 net/ipv4/tcp_input.c 			if (!tcp_prune_ofo_queue(sk))
sk               4545 net/ipv4/tcp_input.c static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
sk               4547 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               4553 net/ipv4/tcp_input.c 	tcp_ecn_check_ce(sk, skb);
sk               4555 net/ipv4/tcp_input.c 	if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) {
sk               4556 net/ipv4/tcp_input.c 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFODROP);
sk               4557 net/ipv4/tcp_input.c 		tcp_drop(sk, skb);
sk               4563 net/ipv4/tcp_input.c 	inet_csk_schedule_ack(sk);
sk               4566 net/ipv4/tcp_input.c 	NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOQUEUE);
sk               4587 net/ipv4/tcp_input.c 	if (tcp_ooo_try_coalesce(sk, tp->ooo_last_skb,
sk               4590 net/ipv4/tcp_input.c 		tcp_grow_window(sk, skb);
sk               4614 net/ipv4/tcp_input.c 				NET_INC_STATS(sock_net(sk),
sk               4616 net/ipv4/tcp_input.c 				tcp_drop(sk, skb);
sk               4618 net/ipv4/tcp_input.c 				tcp_dsack_set(sk, seq, end_seq);
sk               4623 net/ipv4/tcp_input.c 				tcp_dsack_set(sk, seq, TCP_SKB_CB(skb1)->end_seq);
sk               4630 net/ipv4/tcp_input.c 				tcp_dsack_extend(sk,
sk               4633 net/ipv4/tcp_input.c 				NET_INC_STATS(sock_net(sk),
sk               4635 net/ipv4/tcp_input.c 				tcp_drop(sk, skb1);
sk               4638 net/ipv4/tcp_input.c 		} else if (tcp_ooo_try_coalesce(sk, skb1,
sk               4655 net/ipv4/tcp_input.c 			tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq,
sk               4660 net/ipv4/tcp_input.c 		tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq,
sk               4662 net/ipv4/tcp_input.c 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOMERGE);
sk               4663 net/ipv4/tcp_input.c 		tcp_drop(sk, skb1);
sk               4671 net/ipv4/tcp_input.c 		tcp_sack_new_ofo_skb(sk, seq, end_seq);
sk               4674 net/ipv4/tcp_input.c 		tcp_grow_window(sk, skb);
sk               4676 net/ipv4/tcp_input.c 		skb_set_owner_r(skb, sk);
sk               4680 net/ipv4/tcp_input.c static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb,
sk               4684 net/ipv4/tcp_input.c 	struct sk_buff *tail = skb_peek_tail(&sk->sk_receive_queue);
sk               4687 net/ipv4/tcp_input.c 		 tcp_try_coalesce(sk, tail,
sk               4689 net/ipv4/tcp_input.c 	tcp_rcv_nxt_update(tcp_sk(sk), TCP_SKB_CB(skb)->end_seq);
sk               4691 net/ipv4/tcp_input.c 		__skb_queue_tail(&sk->sk_receive_queue, skb);
sk               4692 net/ipv4/tcp_input.c 		skb_set_owner_r(skb, sk);
sk               4697 net/ipv4/tcp_input.c int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size)
sk               4715 net/ipv4/tcp_input.c 				   &err, sk->sk_allocation);
sk               4723 net/ipv4/tcp_input.c 	if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) {
sk               4724 net/ipv4/tcp_input.c 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVQDROP);
sk               4732 net/ipv4/tcp_input.c 	TCP_SKB_CB(skb)->seq = tcp_sk(sk)->rcv_nxt;
sk               4734 net/ipv4/tcp_input.c 	TCP_SKB_CB(skb)->ack_seq = tcp_sk(sk)->snd_una - 1;
sk               4736 net/ipv4/tcp_input.c 	if (tcp_queue_rcv(sk, skb, &fragstolen)) {
sk               4749 net/ipv4/tcp_input.c void tcp_data_ready(struct sock *sk)
sk               4751 net/ipv4/tcp_input.c 	const struct tcp_sock *tp = tcp_sk(sk);
sk               4754 net/ipv4/tcp_input.c 	if (avail < sk->sk_rcvlowat && !tcp_rmem_pressure(sk) &&
sk               4755 net/ipv4/tcp_input.c 	    !sock_flag(sk, SOCK_DONE))
sk               4758 net/ipv4/tcp_input.c 	sk->sk_data_ready(sk);
sk               4761 net/ipv4/tcp_input.c static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
sk               4763 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               4774 net/ipv4/tcp_input.c 	tcp_ecn_accept_cwr(sk, skb);
sk               4784 net/ipv4/tcp_input.c 			NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPZEROWINDOWDROP);
sk               4790 net/ipv4/tcp_input.c 		if (skb_queue_len(&sk->sk_receive_queue) == 0)
sk               4791 net/ipv4/tcp_input.c 			sk_forced_mem_schedule(sk, skb->truesize);
sk               4792 net/ipv4/tcp_input.c 		else if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) {
sk               4793 net/ipv4/tcp_input.c 			NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVQDROP);
sk               4797 net/ipv4/tcp_input.c 		eaten = tcp_queue_rcv(sk, skb, &fragstolen);
sk               4799 net/ipv4/tcp_input.c 			tcp_event_data_recv(sk, skb);
sk               4801 net/ipv4/tcp_input.c 			tcp_fin(sk);
sk               4804 net/ipv4/tcp_input.c 			tcp_ofo_queue(sk);
sk               4810 net/ipv4/tcp_input.c 				inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW;
sk               4816 net/ipv4/tcp_input.c 		tcp_fast_path_check(sk);
sk               4820 net/ipv4/tcp_input.c 		if (!sock_flag(sk, SOCK_DEAD))
sk               4821 net/ipv4/tcp_input.c 			tcp_data_ready(sk);
sk               4826 net/ipv4/tcp_input.c 		tcp_rcv_spurious_retrans(sk, skb);
sk               4828 net/ipv4/tcp_input.c 		NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
sk               4829 net/ipv4/tcp_input.c 		tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
sk               4832 net/ipv4/tcp_input.c 		tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS);
sk               4833 net/ipv4/tcp_input.c 		inet_csk_schedule_ack(sk);
sk               4835 net/ipv4/tcp_input.c 		tcp_drop(sk, skb);
sk               4845 net/ipv4/tcp_input.c 		tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, tp->rcv_nxt);
sk               4851 net/ipv4/tcp_input.c 			NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPZEROWINDOWDROP);
sk               4857 net/ipv4/tcp_input.c 	tcp_data_queue_ofo(sk, skb);
sk               4868 net/ipv4/tcp_input.c static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
sk               4880 net/ipv4/tcp_input.c 	NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED);
sk               4913 net/ipv4/tcp_input.c tcp_collapse(struct sock *sk, struct sk_buff_head *list, struct rb_root *root,
sk               4929 net/ipv4/tcp_input.c 			skb = tcp_collapse_one(sk, skb, list, root);
sk               4941 net/ipv4/tcp_input.c 		    (tcp_win_from_space(sk, skb->truesize) > skb->len ||
sk               4979 net/ipv4/tcp_input.c 		skb_set_owner_r(nskb, sk);
sk               4996 net/ipv4/tcp_input.c 				skb = tcp_collapse_one(sk, skb, list, root);
sk               5016 net/ipv4/tcp_input.c static void tcp_collapse_ofo_queue(struct sock *sk)
sk               5018 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               5045 net/ipv4/tcp_input.c 				tcp_collapse(sk, NULL, &tp->out_of_order_queue,
sk               5049 net/ipv4/tcp_input.c 				if (sum_tiny > sk->sk_rcvbuf >> 3)
sk               5074 net/ipv4/tcp_input.c static bool tcp_prune_ofo_queue(struct sock *sk)
sk               5076 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               5083 net/ipv4/tcp_input.c 	NET_INC_STATS(sock_net(sk), LINUX_MIB_OFOPRUNED);
sk               5084 net/ipv4/tcp_input.c 	goal = sk->sk_rcvbuf >> 3;
sk               5090 net/ipv4/tcp_input.c 		tcp_drop(sk, rb_to_skb(node));
sk               5092 net/ipv4/tcp_input.c 			sk_mem_reclaim(sk);
sk               5093 net/ipv4/tcp_input.c 			if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
sk               5094 net/ipv4/tcp_input.c 			    !tcp_under_memory_pressure(sk))
sk               5096 net/ipv4/tcp_input.c 			goal = sk->sk_rcvbuf >> 3;
sk               5119 net/ipv4/tcp_input.c static int tcp_prune_queue(struct sock *sk)
sk               5121 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               5123 net/ipv4/tcp_input.c 	NET_INC_STATS(sock_net(sk), LINUX_MIB_PRUNECALLED);
sk               5125 net/ipv4/tcp_input.c 	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
sk               5126 net/ipv4/tcp_input.c 		tcp_clamp_window(sk);
sk               5127 net/ipv4/tcp_input.c 	else if (tcp_under_memory_pressure(sk))
sk               5130 net/ipv4/tcp_input.c 	if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
sk               5133 net/ipv4/tcp_input.c 	tcp_collapse_ofo_queue(sk);
sk               5134 net/ipv4/tcp_input.c 	if (!skb_queue_empty(&sk->sk_receive_queue))
sk               5135 net/ipv4/tcp_input.c 		tcp_collapse(sk, &sk->sk_receive_queue, NULL,
sk               5136 net/ipv4/tcp_input.c 			     skb_peek(&sk->sk_receive_queue),
sk               5139 net/ipv4/tcp_input.c 	sk_mem_reclaim(sk);
sk               5141 net/ipv4/tcp_input.c 	if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
sk               5147 net/ipv4/tcp_input.c 	tcp_prune_ofo_queue(sk);
sk               5149 net/ipv4/tcp_input.c 	if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
sk               5156 net/ipv4/tcp_input.c 	NET_INC_STATS(sock_net(sk), LINUX_MIB_RCVPRUNED);
sk               5163 net/ipv4/tcp_input.c static bool tcp_should_expand_sndbuf(const struct sock *sk)
sk               5165 net/ipv4/tcp_input.c 	const struct tcp_sock *tp = tcp_sk(sk);
sk               5170 net/ipv4/tcp_input.c 	if (sk->sk_userlocks & SOCK_SNDBUF_LOCK)
sk               5174 net/ipv4/tcp_input.c 	if (tcp_under_memory_pressure(sk))
sk               5178 net/ipv4/tcp_input.c 	if (sk_memory_allocated(sk) >= sk_prot_mem_limits(sk, 0))
sk               5194 net/ipv4/tcp_input.c static void tcp_new_space(struct sock *sk)
sk               5196 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               5198 net/ipv4/tcp_input.c 	if (tcp_should_expand_sndbuf(sk)) {
sk               5199 net/ipv4/tcp_input.c 		tcp_sndbuf_expand(sk);
sk               5203 net/ipv4/tcp_input.c 	sk->sk_write_space(sk);
sk               5206 net/ipv4/tcp_input.c static void tcp_check_space(struct sock *sk)
sk               5208 net/ipv4/tcp_input.c 	if (sock_flag(sk, SOCK_QUEUE_SHRUNK)) {
sk               5209 net/ipv4/tcp_input.c 		sock_reset_flag(sk, SOCK_QUEUE_SHRUNK);
sk               5212 net/ipv4/tcp_input.c 		if (sk->sk_socket &&
sk               5213 net/ipv4/tcp_input.c 		    test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
sk               5214 net/ipv4/tcp_input.c 			tcp_new_space(sk);
sk               5215 net/ipv4/tcp_input.c 			if (!test_bit(SOCK_NOSPACE, &sk->sk_socket->flags))
sk               5216 net/ipv4/tcp_input.c 				tcp_chrono_stop(sk, TCP_CHRONO_SNDBUF_LIMITED);
sk               5221 net/ipv4/tcp_input.c static inline void tcp_data_snd_check(struct sock *sk)
sk               5223 net/ipv4/tcp_input.c 	tcp_push_pending_frames(sk);
sk               5224 net/ipv4/tcp_input.c 	tcp_check_space(sk);
sk               5230 net/ipv4/tcp_input.c static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible)
sk               5232 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               5236 net/ipv4/tcp_input.c 	if (((tp->rcv_nxt - tp->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss &&
sk               5242 net/ipv4/tcp_input.c 	    (tp->rcv_nxt - tp->copied_seq < sk->sk_rcvlowat ||
sk               5243 net/ipv4/tcp_input.c 	     __tcp_select_window(sk) >= tp->rcv_wnd)) ||
sk               5245 net/ipv4/tcp_input.c 	    tcp_in_quickack_mode(sk) ||
sk               5247 net/ipv4/tcp_input.c 	    inet_csk(sk)->icsk_ack.pending & ICSK_ACK_NOW) {
sk               5249 net/ipv4/tcp_input.c 		tcp_send_ack(sk);
sk               5254 net/ipv4/tcp_input.c 		tcp_send_delayed_ack(sk);
sk               5259 net/ipv4/tcp_input.c 	    tp->compressed_ack >= sock_net(sk)->ipv4.sysctl_tcp_comp_sack_nr)
sk               5265 net/ipv4/tcp_input.c 			NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPACKCOMPRESSED,
sk               5282 net/ipv4/tcp_input.c 	delay = min_t(unsigned long, sock_net(sk)->ipv4.sysctl_tcp_comp_sack_delay_ns,
sk               5284 net/ipv4/tcp_input.c 	sock_hold(sk);
sk               5289 net/ipv4/tcp_input.c static inline void tcp_ack_snd_check(struct sock *sk)
sk               5291 net/ipv4/tcp_input.c 	if (!inet_csk_ack_scheduled(sk)) {
sk               5295 net/ipv4/tcp_input.c 	__tcp_ack_snd_check(sk, 1);
sk               5308 net/ipv4/tcp_input.c static void tcp_check_urg(struct sock *sk, const struct tcphdr *th)
sk               5310 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               5313 net/ipv4/tcp_input.c 	if (ptr && !sock_net(sk)->ipv4.sysctl_tcp_stdurg)
sk               5339 net/ipv4/tcp_input.c 	sk_send_sigurg(sk);
sk               5357 net/ipv4/tcp_input.c 	    !sock_flag(sk, SOCK_URGINLINE) && tp->copied_seq != tp->rcv_nxt) {
sk               5358 net/ipv4/tcp_input.c 		struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
sk               5361 net/ipv4/tcp_input.c 			__skb_unlink(skb, &sk->sk_receive_queue);
sk               5374 net/ipv4/tcp_input.c static void tcp_urg(struct sock *sk, struct sk_buff *skb, const struct tcphdr *th)
sk               5376 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               5380 net/ipv4/tcp_input.c 		tcp_check_urg(sk, th);
sk               5393 net/ipv4/tcp_input.c 			if (!sock_flag(sk, SOCK_DEAD))
sk               5394 net/ipv4/tcp_input.c 				sk->sk_data_ready(sk);
sk               5407 net/ipv4/tcp_input.c static bool tcp_reset_check(const struct sock *sk, const struct sk_buff *skb)
sk               5409 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               5412 net/ipv4/tcp_input.c 			(1 << sk->sk_state) & (TCPF_CLOSE_WAIT | TCPF_LAST_ACK |
sk               5419 net/ipv4/tcp_input.c static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
sk               5422 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               5426 net/ipv4/tcp_input.c 	if (tcp_fast_parse_options(sock_net(sk), skb, th, tp) &&
sk               5428 net/ipv4/tcp_input.c 	    tcp_paws_discard(sk, skb)) {
sk               5430 net/ipv4/tcp_input.c 			NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
sk               5431 net/ipv4/tcp_input.c 			if (!tcp_oow_rate_limited(sock_net(sk), skb,
sk               5434 net/ipv4/tcp_input.c 				tcp_send_dupack(sk, skb);
sk               5451 net/ipv4/tcp_input.c 			if (!tcp_oow_rate_limited(sock_net(sk), skb,
sk               5454 net/ipv4/tcp_input.c 				tcp_send_dupack(sk, skb);
sk               5455 net/ipv4/tcp_input.c 		} else if (tcp_reset_check(sk, skb)) {
sk               5456 net/ipv4/tcp_input.c 			tcp_reset(sk);
sk               5473 net/ipv4/tcp_input.c 		    tcp_reset_check(sk, skb)) {
sk               5492 net/ipv4/tcp_input.c 			tcp_reset(sk);
sk               5499 net/ipv4/tcp_input.c 			    sk->sk_state == TCP_ESTABLISHED)
sk               5500 net/ipv4/tcp_input.c 				tcp_fastopen_active_disable(sk);
sk               5501 net/ipv4/tcp_input.c 			tcp_send_challenge_ack(sk, skb);
sk               5514 net/ipv4/tcp_input.c 			TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
sk               5515 net/ipv4/tcp_input.c 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNCHALLENGE);
sk               5516 net/ipv4/tcp_input.c 		tcp_send_challenge_ack(sk, skb);
sk               5523 net/ipv4/tcp_input.c 	tcp_drop(sk, skb);
sk               5550 net/ipv4/tcp_input.c void tcp_rcv_established(struct sock *sk, struct sk_buff *skb)
sk               5553 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               5557 net/ipv4/tcp_input.c 	trace_tcp_probe(sk, skb);
sk               5560 net/ipv4/tcp_input.c 	if (unlikely(!sk->sk_rx_dst))
sk               5561 net/ipv4/tcp_input.c 		inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb);
sk               5630 net/ipv4/tcp_input.c 				tcp_ack(sk, skb, 0);
sk               5632 net/ipv4/tcp_input.c 				tcp_data_snd_check(sk);
sk               5640 net/ipv4/tcp_input.c 				TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
sk               5650 net/ipv4/tcp_input.c 			if ((int)skb->truesize > sk->sk_forward_alloc)
sk               5662 net/ipv4/tcp_input.c 			tcp_rcv_rtt_measure_ts(sk, skb);
sk               5664 net/ipv4/tcp_input.c 			NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPHITS);
sk               5668 net/ipv4/tcp_input.c 			eaten = tcp_queue_rcv(sk, skb, &fragstolen);
sk               5670 net/ipv4/tcp_input.c 			tcp_event_data_recv(sk, skb);
sk               5674 net/ipv4/tcp_input.c 				tcp_ack(sk, skb, FLAG_DATA);
sk               5675 net/ipv4/tcp_input.c 				tcp_data_snd_check(sk);
sk               5676 net/ipv4/tcp_input.c 				if (!inet_csk_ack_scheduled(sk))
sk               5680 net/ipv4/tcp_input.c 			__tcp_ack_snd_check(sk, 0);
sk               5684 net/ipv4/tcp_input.c 			tcp_data_ready(sk);
sk               5700 net/ipv4/tcp_input.c 	if (!tcp_validate_incoming(sk, skb, th, 1))
sk               5704 net/ipv4/tcp_input.c 	if (tcp_ack(sk, skb, FLAG_SLOWPATH | FLAG_UPDATE_TS_RECENT) < 0)
sk               5707 net/ipv4/tcp_input.c 	tcp_rcv_rtt_measure_ts(sk, skb);
sk               5710 net/ipv4/tcp_input.c 	tcp_urg(sk, skb, th);
sk               5713 net/ipv4/tcp_input.c 	tcp_data_queue(sk, skb);
sk               5715 net/ipv4/tcp_input.c 	tcp_data_snd_check(sk);
sk               5716 net/ipv4/tcp_input.c 	tcp_ack_snd_check(sk);
sk               5720 net/ipv4/tcp_input.c 	TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
sk               5721 net/ipv4/tcp_input.c 	TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
sk               5724 net/ipv4/tcp_input.c 	tcp_drop(sk, skb);
sk               5728 net/ipv4/tcp_input.c void tcp_init_transfer(struct sock *sk, int bpf_op)
sk               5730 net/ipv4/tcp_input.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk               5731 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               5733 net/ipv4/tcp_input.c 	tcp_mtup_init(sk);
sk               5734 net/ipv4/tcp_input.c 	icsk->icsk_af_ops->rebuild_header(sk);
sk               5735 net/ipv4/tcp_input.c 	tcp_init_metrics(sk);
sk               5746 net/ipv4/tcp_input.c 		tp->snd_cwnd = tcp_init_cwnd(tp, __sk_dst_get(sk));
sk               5749 net/ipv4/tcp_input.c 	tcp_call_bpf(sk, bpf_op, 0, NULL);
sk               5750 net/ipv4/tcp_input.c 	tcp_init_congestion_control(sk);
sk               5751 net/ipv4/tcp_input.c 	tcp_init_buffer_space(sk);
sk               5754 net/ipv4/tcp_input.c void tcp_finish_connect(struct sock *sk, struct sk_buff *skb)
sk               5756 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               5757 net/ipv4/tcp_input.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk               5759 net/ipv4/tcp_input.c 	tcp_set_state(sk, TCP_ESTABLISHED);
sk               5763 net/ipv4/tcp_input.c 		icsk->icsk_af_ops->sk_rx_dst_set(sk, skb);
sk               5764 net/ipv4/tcp_input.c 		security_inet_conn_established(sk, skb);
sk               5765 net/ipv4/tcp_input.c 		sk_mark_napi_id(sk, skb);
sk               5768 net/ipv4/tcp_input.c 	tcp_init_transfer(sk, BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB);
sk               5775 net/ipv4/tcp_input.c 	if (sock_flag(sk, SOCK_KEEPOPEN))
sk               5776 net/ipv4/tcp_input.c 		inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tp));
sk               5784 net/ipv4/tcp_input.c static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
sk               5787 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               5788 net/ipv4/tcp_input.c 	struct sk_buff *data = tp->syn_data ? tcp_rtx_queue_head(sk) : NULL;
sk               5798 net/ipv4/tcp_input.c 		tcp_parse_options(sock_net(sk), synack, &opt, 0, NULL);
sk               5820 net/ipv4/tcp_input.c 	tcp_fastopen_cache_set(sk, mss, cookie, syn_drop, try_exp);
sk               5824 net/ipv4/tcp_input.c 			if (__tcp_retransmit_skb(sk, data, 1))
sk               5827 net/ipv4/tcp_input.c 		tcp_rearm_rto(sk);
sk               5828 net/ipv4/tcp_input.c 		NET_INC_STATS(sock_net(sk),
sk               5834 net/ipv4/tcp_input.c 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVE);
sk               5840 net/ipv4/tcp_input.c 	tcp_fastopen_add_skb(sk, synack);
sk               5855 net/ipv4/tcp_input.c static void tcp_try_undo_spurious_syn(struct sock *sk)
sk               5857 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               5870 net/ipv4/tcp_input.c static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
sk               5873 net/ipv4/tcp_input.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk               5874 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               5879 net/ipv4/tcp_input.c 	tcp_parse_options(sock_net(sk), skb, &tp->rx_opt, 0, &foc);
sk               5899 net/ipv4/tcp_input.c 			NET_INC_STATS(sock_net(sk),
sk               5913 net/ipv4/tcp_input.c 			tcp_reset(sk);
sk               5937 net/ipv4/tcp_input.c 		tcp_try_undo_spurious_syn(sk);
sk               5938 net/ipv4/tcp_input.c 		tcp_ack(sk, skb, FLAG_SLOWPATH);
sk               5966 net/ipv4/tcp_input.c 		tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
sk               5967 net/ipv4/tcp_input.c 		tcp_initialize_rcv_mss(sk);
sk               5978 net/ipv4/tcp_input.c 		tcp_finish_connect(sk, skb);
sk               5981 net/ipv4/tcp_input.c 				tcp_rcv_fastopen_synack(sk, skb, &foc);
sk               5983 net/ipv4/tcp_input.c 		if (!sock_flag(sk, SOCK_DEAD)) {
sk               5984 net/ipv4/tcp_input.c 			sk->sk_state_change(sk);
sk               5985 net/ipv4/tcp_input.c 			sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
sk               5989 net/ipv4/tcp_input.c 		if (sk->sk_write_pending ||
sk               5991 net/ipv4/tcp_input.c 		    inet_csk_in_pingpong_mode(sk)) {
sk               5999 net/ipv4/tcp_input.c 			inet_csk_schedule_ack(sk);
sk               6000 net/ipv4/tcp_input.c 			tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS);
sk               6001 net/ipv4/tcp_input.c 			inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
sk               6005 net/ipv4/tcp_input.c 			tcp_drop(sk, skb);
sk               6008 net/ipv4/tcp_input.c 			tcp_send_ack(sk);
sk               6035 net/ipv4/tcp_input.c 		tcp_set_state(sk, TCP_SYN_RECV);
sk               6059 net/ipv4/tcp_input.c 		tcp_mtup_init(sk);
sk               6060 net/ipv4/tcp_input.c 		tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
sk               6061 net/ipv4/tcp_input.c 		tcp_initialize_rcv_mss(sk);
sk               6063 net/ipv4/tcp_input.c 		tcp_send_synack(sk);
sk               6096 net/ipv4/tcp_input.c static void tcp_rcv_synrecv_state_fastopen(struct sock *sk)
sk               6103 net/ipv4/tcp_input.c 	if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss)
sk               6104 net/ipv4/tcp_input.c 		tcp_try_undo_loss(sk, false);
sk               6107 net/ipv4/tcp_input.c 	tcp_sk(sk)->retrans_stamp = 0;
sk               6108 net/ipv4/tcp_input.c 	inet_csk(sk)->icsk_retransmits = 0;
sk               6113 net/ipv4/tcp_input.c 	req = rcu_dereference_protected(tcp_sk(sk)->fastopen_rsk,
sk               6114 net/ipv4/tcp_input.c 					lockdep_sock_is_held(sk));
sk               6115 net/ipv4/tcp_input.c 	reqsk_fastopen_remove(sk, req, false);
sk               6125 net/ipv4/tcp_input.c 	tcp_rearm_rto(sk);
sk               6135 net/ipv4/tcp_input.c int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
sk               6137 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               6138 net/ipv4/tcp_input.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk               6144 net/ipv4/tcp_input.c 	switch (sk->sk_state) {
sk               6163 net/ipv4/tcp_input.c 			acceptable = icsk->icsk_af_ops->conn_request(sk, skb) >= 0;
sk               6177 net/ipv4/tcp_input.c 		queued = tcp_rcv_synsent_state_process(sk, skb, th);
sk               6182 net/ipv4/tcp_input.c 		tcp_urg(sk, skb, th);
sk               6184 net/ipv4/tcp_input.c 		tcp_data_snd_check(sk);
sk               6191 net/ipv4/tcp_input.c 					lockdep_sock_is_held(sk));
sk               6195 net/ipv4/tcp_input.c 		WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
sk               6196 net/ipv4/tcp_input.c 		    sk->sk_state != TCP_FIN_WAIT1);
sk               6198 net/ipv4/tcp_input.c 		if (!tcp_check_req(sk, skb, req, true, &req_stolen))
sk               6205 net/ipv4/tcp_input.c 	if (!tcp_validate_incoming(sk, skb, th, 0))
sk               6209 net/ipv4/tcp_input.c 	acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH |
sk               6214 net/ipv4/tcp_input.c 		if (sk->sk_state == TCP_SYN_RECV)
sk               6216 net/ipv4/tcp_input.c 		tcp_send_challenge_ack(sk, skb);
sk               6219 net/ipv4/tcp_input.c 	switch (sk->sk_state) {
sk               6223 net/ipv4/tcp_input.c 			tcp_synack_rtt_meas(sk, req);
sk               6226 net/ipv4/tcp_input.c 			tcp_rcv_synrecv_state_fastopen(sk);
sk               6228 net/ipv4/tcp_input.c 			tcp_try_undo_spurious_syn(sk);
sk               6230 net/ipv4/tcp_input.c 			tcp_init_transfer(sk, BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB);
sk               6234 net/ipv4/tcp_input.c 		tcp_set_state(sk, TCP_ESTABLISHED);
sk               6235 net/ipv4/tcp_input.c 		sk->sk_state_change(sk);
sk               6241 net/ipv4/tcp_input.c 		if (sk->sk_socket)
sk               6242 net/ipv4/tcp_input.c 			sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
sk               6251 net/ipv4/tcp_input.c 		if (!inet_csk(sk)->icsk_ca_ops->cong_control)
sk               6252 net/ipv4/tcp_input.c 			tcp_update_pacing_rate(sk);
sk               6257 net/ipv4/tcp_input.c 		tcp_initialize_rcv_mss(sk);
sk               6265 net/ipv4/tcp_input.c 			tcp_rcv_synrecv_state_fastopen(sk);
sk               6270 net/ipv4/tcp_input.c 		tcp_set_state(sk, TCP_FIN_WAIT2);
sk               6271 net/ipv4/tcp_input.c 		sk->sk_shutdown |= SEND_SHUTDOWN;
sk               6273 net/ipv4/tcp_input.c 		sk_dst_confirm(sk);
sk               6275 net/ipv4/tcp_input.c 		if (!sock_flag(sk, SOCK_DEAD)) {
sk               6277 net/ipv4/tcp_input.c 			sk->sk_state_change(sk);
sk               6282 net/ipv4/tcp_input.c 			tcp_done(sk);
sk               6283 net/ipv4/tcp_input.c 			NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
sk               6290 net/ipv4/tcp_input.c 				tcp_fastopen_active_disable(sk);
sk               6291 net/ipv4/tcp_input.c 			tcp_done(sk);
sk               6292 net/ipv4/tcp_input.c 			NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
sk               6296 net/ipv4/tcp_input.c 		tmo = tcp_fin_time(sk);
sk               6298 net/ipv4/tcp_input.c 			inet_csk_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN);
sk               6299 net/ipv4/tcp_input.c 		} else if (th->fin || sock_owned_by_user(sk)) {
sk               6306 net/ipv4/tcp_input.c 			inet_csk_reset_keepalive_timer(sk, tmo);
sk               6308 net/ipv4/tcp_input.c 			tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
sk               6316 net/ipv4/tcp_input.c 			tcp_time_wait(sk, TCP_TIME_WAIT, 0);
sk               6323 net/ipv4/tcp_input.c 			tcp_update_metrics(sk);
sk               6324 net/ipv4/tcp_input.c 			tcp_done(sk);
sk               6331 net/ipv4/tcp_input.c 	tcp_urg(sk, skb, th);
sk               6334 net/ipv4/tcp_input.c 	switch (sk->sk_state) {
sk               6347 net/ipv4/tcp_input.c 		if (sk->sk_shutdown & RCV_SHUTDOWN) {
sk               6350 net/ipv4/tcp_input.c 				NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
sk               6351 net/ipv4/tcp_input.c 				tcp_reset(sk);
sk               6357 net/ipv4/tcp_input.c 		tcp_data_queue(sk, skb);
sk               6363 net/ipv4/tcp_input.c 	if (sk->sk_state != TCP_CLOSE) {
sk               6364 net/ipv4/tcp_input.c 		tcp_data_snd_check(sk);
sk               6365 net/ipv4/tcp_input.c 		tcp_ack_snd_check(sk);
sk               6370 net/ipv4/tcp_input.c 		tcp_drop(sk, skb);
sk               6433 net/ipv4/tcp_input.c 			     struct sk_buff *skb, const struct sock *sk)
sk               6453 net/ipv4/tcp_input.c 	ireq->ir_mark = inet_request_mark(sk, skb);
sk               6486 net/ipv4/tcp_input.c static bool tcp_syn_flood_action(const struct sock *sk, const char *proto)
sk               6488 net/ipv4/tcp_input.c 	struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
sk               6491 net/ipv4/tcp_input.c 	struct net *net = sock_net(sk);
sk               6497 net/ipv4/tcp_input.c 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
sk               6500 net/ipv4/tcp_input.c 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
sk               6506 net/ipv4/tcp_input.c 				     proto, sk->sk_num, msg);
sk               6511 net/ipv4/tcp_input.c static void tcp_reqsk_record_syn(const struct sock *sk,
sk               6515 net/ipv4/tcp_input.c 	if (tcp_sk(sk)->save_syn) {
sk               6533 net/ipv4/tcp_input.c 			  struct sock *sk, struct tcphdr *th)
sk               6535 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               6538 net/ipv4/tcp_input.c 	if (sock_net(sk)->ipv4.sysctl_tcp_syncookies != 2 &&
sk               6539 net/ipv4/tcp_input.c 	    !inet_csk_reqsk_queue_is_full(sk))
sk               6542 net/ipv4/tcp_input.c 	if (!tcp_syn_flood_action(sk, rsk_ops->slab_name))
sk               6545 net/ipv4/tcp_input.c 	if (sk_acceptq_is_full(sk)) {
sk               6546 net/ipv4/tcp_input.c 		NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
sk               6560 net/ipv4/tcp_input.c 		     struct sock *sk, struct sk_buff *skb)
sk               6565 net/ipv4/tcp_input.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               6566 net/ipv4/tcp_input.c 	struct net *net = sock_net(sk);
sk               6578 net/ipv4/tcp_input.c 	     inet_csk_reqsk_queue_is_full(sk)) && !isn) {
sk               6579 net/ipv4/tcp_input.c 		want_cookie = tcp_syn_flood_action(sk, rsk_ops->slab_name);
sk               6584 net/ipv4/tcp_input.c 	if (sk_acceptq_is_full(sk)) {
sk               6585 net/ipv4/tcp_input.c 		NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
sk               6589 net/ipv4/tcp_input.c 	req = inet_reqsk_alloc(rsk_ops, sk, !want_cookie);
sk               6599 net/ipv4/tcp_input.c 	tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0,
sk               6609 net/ipv4/tcp_input.c 	tcp_openreq_init(req, &tmp_opt, skb, sk);
sk               6610 net/ipv4/tcp_input.c 	inet_rsk(req)->no_srccheck = inet_sk(sk)->transparent;
sk               6613 net/ipv4/tcp_input.c 	inet_rsk(req)->ir_iif = inet_request_bound_dev_if(sk, skb);
sk               6615 net/ipv4/tcp_input.c 	af_ops->init_req(req, sk, skb);
sk               6617 net/ipv4/tcp_input.c 	if (security_inet_conn_request(sk, skb, req))
sk               6623 net/ipv4/tcp_input.c 	dst = af_ops->route_req(sk, &fl, req);
sk               6630 net/ipv4/tcp_input.c 		    (net->ipv4.sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
sk               6648 net/ipv4/tcp_input.c 	tcp_ecn_create_request(req, skb, sk, dst);
sk               6651 net/ipv4/tcp_input.c 		isn = cookie_init_sequence(af_ops, sk, skb, &req->mss);
sk               6659 net/ipv4/tcp_input.c 	tcp_openreq_init_rwin(req, sk, dst);
sk               6662 net/ipv4/tcp_input.c 		tcp_reqsk_record_syn(sk, req, skb);
sk               6663 net/ipv4/tcp_input.c 		fastopen_sk = tcp_try_fastopen(sk, skb, req, &foc, dst);
sk               6669 net/ipv4/tcp_input.c 		if (!inet_csk_reqsk_queue_add(sk, req, fastopen_sk)) {
sk               6675 net/ipv4/tcp_input.c 		sk->sk_data_ready(sk);
sk               6681 net/ipv4/tcp_input.c 			inet_csk_reqsk_queue_hash_add(sk, req,
sk               6683 net/ipv4/tcp_input.c 		af_ops->send_synack(sk, dst, &fl, req, &foc,
sk               6699 net/ipv4/tcp_input.c 	tcp_listendrop(sk);
sk                106 net/ipv4/tcp_ipv4.c int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
sk                110 net/ipv4/tcp_ipv4.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                111 net/ipv4/tcp_ipv4.c 	int reuse = sock_net(sk)->ipv4.sysctl_tcp_tw_reuse;
sk                183 net/ipv4/tcp_ipv4.c static int tcp_v4_pre_connect(struct sock *sk, struct sockaddr *uaddr,
sk                193 net/ipv4/tcp_ipv4.c 	sock_owned_by_me(sk);
sk                195 net/ipv4/tcp_ipv4.c 	return BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr);
sk                199 net/ipv4/tcp_ipv4.c int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
sk                202 net/ipv4/tcp_ipv4.c 	struct inet_sock *inet = inet_sk(sk);
sk                203 net/ipv4/tcp_ipv4.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                210 net/ipv4/tcp_ipv4.c 	struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
sk                220 net/ipv4/tcp_ipv4.c 					     lockdep_sock_is_held(sk));
sk                231 net/ipv4/tcp_ipv4.c 			      RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
sk                233 net/ipv4/tcp_ipv4.c 			      orig_sport, orig_dport, sk);
sk                237 net/ipv4/tcp_ipv4.c 			IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
sk                251 net/ipv4/tcp_ipv4.c 	sk_rcv_saddr_set(sk, inet->inet_saddr);
sk                262 net/ipv4/tcp_ipv4.c 	sk_daddr_set(sk, daddr);
sk                264 net/ipv4/tcp_ipv4.c 	inet_csk(sk)->icsk_ext_hdr_len = 0;
sk                266 net/ipv4/tcp_ipv4.c 		inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
sk                275 net/ipv4/tcp_ipv4.c 	tcp_set_state(sk, TCP_SYN_SENT);
sk                276 net/ipv4/tcp_ipv4.c 	err = inet_hash_connect(tcp_death_row, sk);
sk                280 net/ipv4/tcp_ipv4.c 	sk_set_txhash(sk);
sk                283 net/ipv4/tcp_ipv4.c 			       inet->inet_sport, inet->inet_dport, sk);
sk                290 net/ipv4/tcp_ipv4.c 	sk->sk_gso_type = SKB_GSO_TCPV4;
sk                291 net/ipv4/tcp_ipv4.c 	sk_setup_caps(sk, &rt->dst);
sk                301 net/ipv4/tcp_ipv4.c 		tp->tsoffset = secure_tcp_ts_off(sock_net(sk),
sk                308 net/ipv4/tcp_ipv4.c 	if (tcp_fastopen_defer_connect(sk, &err))
sk                313 net/ipv4/tcp_ipv4.c 	err = tcp_connect(sk);
sk                325 net/ipv4/tcp_ipv4.c 	tcp_set_state(sk, TCP_CLOSE);
sk                327 net/ipv4/tcp_ipv4.c 	sk->sk_route_caps = 0;
sk                338 net/ipv4/tcp_ipv4.c void tcp_v4_mtu_reduced(struct sock *sk)
sk                340 net/ipv4/tcp_ipv4.c 	struct inet_sock *inet = inet_sk(sk);
sk                344 net/ipv4/tcp_ipv4.c 	if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
sk                346 net/ipv4/tcp_ipv4.c 	mtu = tcp_sk(sk)->mtu_info;
sk                347 net/ipv4/tcp_ipv4.c 	dst = inet_csk_update_pmtu(sk, mtu);
sk                354 net/ipv4/tcp_ipv4.c 	if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
sk                355 net/ipv4/tcp_ipv4.c 		sk->sk_err_soft = EMSGSIZE;
sk                360 net/ipv4/tcp_ipv4.c 	    ip_sk_accept_pmtu(sk) &&
sk                361 net/ipv4/tcp_ipv4.c 	    inet_csk(sk)->icsk_pmtu_cookie > mtu) {
sk                362 net/ipv4/tcp_ipv4.c 		tcp_sync_mss(sk, mtu);
sk                369 net/ipv4/tcp_ipv4.c 		tcp_simple_retransmit(sk);
sk                374 net/ipv4/tcp_ipv4.c static void do_redirect(struct sk_buff *skb, struct sock *sk)
sk                376 net/ipv4/tcp_ipv4.c 	struct dst_entry *dst = __sk_dst_check(sk, 0);
sk                379 net/ipv4/tcp_ipv4.c 		dst->ops->redirect(dst, sk, skb);
sk                384 net/ipv4/tcp_ipv4.c void tcp_req_err(struct sock *sk, u32 seq, bool abort)
sk                386 net/ipv4/tcp_ipv4.c 	struct request_sock *req = inet_reqsk(sk);
sk                387 net/ipv4/tcp_ipv4.c 	struct net *net = sock_net(sk);
sk                433 net/ipv4/tcp_ipv4.c 	struct sock *sk;
sk                442 net/ipv4/tcp_ipv4.c 	sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr,
sk                445 net/ipv4/tcp_ipv4.c 	if (!sk) {
sk                449 net/ipv4/tcp_ipv4.c 	if (sk->sk_state == TCP_TIME_WAIT) {
sk                450 net/ipv4/tcp_ipv4.c 		inet_twsk_put(inet_twsk(sk));
sk                454 net/ipv4/tcp_ipv4.c 	if (sk->sk_state == TCP_NEW_SYN_RECV) {
sk                455 net/ipv4/tcp_ipv4.c 		tcp_req_err(sk, seq, type == ICMP_PARAMETERPROB ||
sk                463 net/ipv4/tcp_ipv4.c 	bh_lock_sock(sk);
sk                469 net/ipv4/tcp_ipv4.c 	if (sock_owned_by_user(sk)) {
sk                473 net/ipv4/tcp_ipv4.c 	if (sk->sk_state == TCP_CLOSE)
sk                476 net/ipv4/tcp_ipv4.c 	if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
sk                481 net/ipv4/tcp_ipv4.c 	icsk = inet_csk(sk);
sk                482 net/ipv4/tcp_ipv4.c 	tp = tcp_sk(sk);
sk                486 net/ipv4/tcp_ipv4.c 	if (sk->sk_state != TCP_LISTEN &&
sk                494 net/ipv4/tcp_ipv4.c 		if (!sock_owned_by_user(sk))
sk                495 net/ipv4/tcp_ipv4.c 			do_redirect(icmp_skb, sk);
sk                512 net/ipv4/tcp_ipv4.c 			if (sk->sk_state == TCP_LISTEN)
sk                516 net/ipv4/tcp_ipv4.c 			if (!sock_owned_by_user(sk)) {
sk                517 net/ipv4/tcp_ipv4.c 				tcp_v4_mtu_reduced(sk);
sk                519 net/ipv4/tcp_ipv4.c 				if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &sk->sk_tsq_flags))
sk                520 net/ipv4/tcp_ipv4.c 					sock_hold(sk);
sk                534 net/ipv4/tcp_ipv4.c 		if (sock_owned_by_user(sk))
sk                537 net/ipv4/tcp_ipv4.c 		skb = tcp_rtx_queue_head(sk);
sk                553 net/ipv4/tcp_ipv4.c 			inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
sk                558 net/ipv4/tcp_ipv4.c 			tcp_retransmit_timer(sk);
sk                569 net/ipv4/tcp_ipv4.c 	switch (sk->sk_state) {
sk                575 net/ipv4/tcp_ipv4.c 		if (fastopen && !fastopen->sk)
sk                578 net/ipv4/tcp_ipv4.c 		if (!sock_owned_by_user(sk)) {
sk                579 net/ipv4/tcp_ipv4.c 			sk->sk_err = err;
sk                581 net/ipv4/tcp_ipv4.c 			sk->sk_error_report(sk);
sk                583 net/ipv4/tcp_ipv4.c 			tcp_done(sk);
sk                585 net/ipv4/tcp_ipv4.c 			sk->sk_err_soft = err;
sk                606 net/ipv4/tcp_ipv4.c 	inet = inet_sk(sk);
sk                607 net/ipv4/tcp_ipv4.c 	if (!sock_owned_by_user(sk) && inet->recverr) {
sk                608 net/ipv4/tcp_ipv4.c 		sk->sk_err = err;
sk                609 net/ipv4/tcp_ipv4.c 		sk->sk_error_report(sk);
sk                611 net/ipv4/tcp_ipv4.c 		sk->sk_err_soft = err;
sk                615 net/ipv4/tcp_ipv4.c 	bh_unlock_sock(sk);
sk                616 net/ipv4/tcp_ipv4.c 	sock_put(sk);
sk                630 net/ipv4/tcp_ipv4.c void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
sk                632 net/ipv4/tcp_ipv4.c 	const struct inet_sock *inet = inet_sk(sk);
sk                651 net/ipv4/tcp_ipv4.c static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
sk                679 net/ipv4/tcp_ipv4.c 	if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
sk                701 net/ipv4/tcp_ipv4.c 	net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
sk                705 net/ipv4/tcp_ipv4.c 	if (sk && sk_fullsock(sk)) {
sk                706 net/ipv4/tcp_ipv4.c 		key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
sk                755 net/ipv4/tcp_ipv4.c 	arg.flags = (sk && inet_sk_transparent(sk)) ? IP_REPLY_ARG_NOSRCCHECK : 0;
sk                761 net/ipv4/tcp_ipv4.c 	if (sk) {
sk                762 net/ipv4/tcp_ipv4.c 		arg.bound_dev_if = sk->sk_bound_dev_if;
sk                763 net/ipv4/tcp_ipv4.c 		if (sk_fullsock(sk))
sk                764 net/ipv4/tcp_ipv4.c 			trace_tcp_send_reset(sk, skb);
sk                771 net/ipv4/tcp_ipv4.c 	arg.uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
sk                774 net/ipv4/tcp_ipv4.c 	if (sk) {
sk                775 net/ipv4/tcp_ipv4.c 		ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
sk                776 net/ipv4/tcp_ipv4.c 				   inet_twsk(sk)->tw_mark : sk->sk_mark;
sk                777 net/ipv4/tcp_ipv4.c 		ctl_sk->sk_priority = (sk->sk_state == TCP_TIME_WAIT) ?
sk                778 net/ipv4/tcp_ipv4.c 				   inet_twsk(sk)->tw_priority : sk->sk_priority;
sk                779 net/ipv4/tcp_ipv4.c 		transmit_time = tcp_transmit_time(sk);
sk                802 net/ipv4/tcp_ipv4.c static void tcp_v4_send_ack(const struct sock *sk,
sk                817 net/ipv4/tcp_ipv4.c 	struct net *net = sock_net(sk);
sk                869 net/ipv4/tcp_ipv4.c 	arg.uid = sock_net_uid(net, sk_fullsock(sk) ? sk : NULL);
sk                872 net/ipv4/tcp_ipv4.c 	ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
sk                873 net/ipv4/tcp_ipv4.c 			   inet_twsk(sk)->tw_mark : sk->sk_mark;
sk                874 net/ipv4/tcp_ipv4.c 	ctl_sk->sk_priority = (sk->sk_state == TCP_TIME_WAIT) ?
sk                875 net/ipv4/tcp_ipv4.c 			   inet_twsk(sk)->tw_priority : sk->sk_priority;
sk                876 net/ipv4/tcp_ipv4.c 	transmit_time = tcp_transmit_time(sk);
sk                888 net/ipv4/tcp_ipv4.c static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
sk                890 net/ipv4/tcp_ipv4.c 	struct inet_timewait_sock *tw = inet_twsk(sk);
sk                891 net/ipv4/tcp_ipv4.c 	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
sk                893 net/ipv4/tcp_ipv4.c 	tcp_v4_send_ack(sk, skb,
sk                907 net/ipv4/tcp_ipv4.c static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
sk                913 net/ipv4/tcp_ipv4.c 	u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
sk                914 net/ipv4/tcp_ipv4.c 					     tcp_sk(sk)->snd_nxt;
sk                921 net/ipv4/tcp_ipv4.c 	tcp_v4_send_ack(sk, skb, seq,
sk                927 net/ipv4/tcp_ipv4.c 			tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->saddr,
sk                938 net/ipv4/tcp_ipv4.c static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
sk                950 net/ipv4/tcp_ipv4.c 	if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
sk                953 net/ipv4/tcp_ipv4.c 	skb = tcp_make_synack(sk, dst, req, foc, synack_type);
sk                959 net/ipv4/tcp_ipv4.c 		err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
sk                988 net/ipv4/tcp_ipv4.c struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk,
sk                992 net/ipv4/tcp_ipv4.c 	const struct tcp_sock *tp = tcp_sk(sk);
sk               1001 net/ipv4/tcp_ipv4.c 				       lockdep_sock_is_held(sk));
sk               1030 net/ipv4/tcp_ipv4.c static struct tcp_md5sig_key *tcp_md5_do_lookup_exact(const struct sock *sk,
sk               1034 net/ipv4/tcp_ipv4.c 	const struct tcp_sock *tp = tcp_sk(sk);
sk               1041 net/ipv4/tcp_ipv4.c 				       lockdep_sock_is_held(sk));
sk               1058 net/ipv4/tcp_ipv4.c struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
sk               1064 net/ipv4/tcp_ipv4.c 	return tcp_md5_do_lookup(sk, addr, AF_INET);
sk               1069 net/ipv4/tcp_ipv4.c int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
sk               1075 net/ipv4/tcp_ipv4.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               1078 net/ipv4/tcp_ipv4.c 	key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen);
sk               1087 net/ipv4/tcp_ipv4.c 					   lockdep_sock_is_held(sk));
sk               1093 net/ipv4/tcp_ipv4.c 		sk_nocaps_add(sk, NETIF_F_GSO_MASK);
sk               1098 net/ipv4/tcp_ipv4.c 	key = sock_kmalloc(sk, sizeof(*key), gfp);
sk               1102 net/ipv4/tcp_ipv4.c 		sock_kfree_s(sk, key, sizeof(*key));
sk               1118 net/ipv4/tcp_ipv4.c int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family,
sk               1123 net/ipv4/tcp_ipv4.c 	key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen);
sk               1127 net/ipv4/tcp_ipv4.c 	atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
sk               1133 net/ipv4/tcp_ipv4.c static void tcp_clear_md5_list(struct sock *sk)
sk               1135 net/ipv4/tcp_ipv4.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               1144 net/ipv4/tcp_ipv4.c 		atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
sk               1149 net/ipv4/tcp_ipv4.c static int tcp_v4_parse_md5_keys(struct sock *sk, int optname,
sk               1173 net/ipv4/tcp_ipv4.c 		return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
sk               1179 net/ipv4/tcp_ipv4.c 	return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
sk               1241 net/ipv4/tcp_ipv4.c 			const struct sock *sk,
sk               1249 net/ipv4/tcp_ipv4.c 	if (sk) { /* valid for establish/request sockets */
sk               1250 net/ipv4/tcp_ipv4.c 		saddr = sk->sk_rcv_saddr;
sk               1251 net/ipv4/tcp_ipv4.c 		daddr = sk->sk_daddr;
sk               1290 net/ipv4/tcp_ipv4.c static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
sk               1309 net/ipv4/tcp_ipv4.c 	hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
sk               1318 net/ipv4/tcp_ipv4.c 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
sk               1323 net/ipv4/tcp_ipv4.c 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
sk               1335 net/ipv4/tcp_ipv4.c 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
sk               1360 net/ipv4/tcp_ipv4.c static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
sk               1364 net/ipv4/tcp_ipv4.c 	return inet_csk_route_req(sk, &fl->u.ip4, req);
sk               1393 net/ipv4/tcp_ipv4.c int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
sk               1400 net/ipv4/tcp_ipv4.c 				&tcp_request_sock_ipv4_ops, sk, skb);
sk               1403 net/ipv4/tcp_ipv4.c 	tcp_listendrop(sk);
sk               1413 net/ipv4/tcp_ipv4.c struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
sk               1428 net/ipv4/tcp_ipv4.c 	if (sk_acceptq_is_full(sk))
sk               1431 net/ipv4/tcp_ipv4.c 	newsk = tcp_create_openreq_child(sk, req, skb);
sk               1456 net/ipv4/tcp_ipv4.c 		dst = inet_csk_route_child_sock(sk, newsk, req);
sk               1467 net/ipv4/tcp_ipv4.c 	newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
sk               1473 net/ipv4/tcp_ipv4.c 	key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
sk               1488 net/ipv4/tcp_ipv4.c 	if (__inet_inherit_port(sk, newsk) < 0)
sk               1500 net/ipv4/tcp_ipv4.c 	NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
sk               1504 net/ipv4/tcp_ipv4.c 	tcp_listendrop(sk);
sk               1514 net/ipv4/tcp_ipv4.c static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb)
sk               1520 net/ipv4/tcp_ipv4.c 		sk = cookie_v4_check(sk, skb);
sk               1522 net/ipv4/tcp_ipv4.c 	return sk;
sk               1525 net/ipv4/tcp_ipv4.c u16 tcp_v4_get_syncookie(struct sock *sk, struct iphdr *iph,
sk               1531 net/ipv4/tcp_ipv4.c 				    &tcp_request_sock_ipv4_ops, sk, th);
sk               1534 net/ipv4/tcp_ipv4.c 		tcp_synq_overflow(sk);
sk               1548 net/ipv4/tcp_ipv4.c int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
sk               1552 net/ipv4/tcp_ipv4.c 	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
sk               1553 net/ipv4/tcp_ipv4.c 		struct dst_entry *dst = sk->sk_rx_dst;
sk               1555 net/ipv4/tcp_ipv4.c 		sock_rps_save_rxhash(sk, skb);
sk               1556 net/ipv4/tcp_ipv4.c 		sk_mark_napi_id(sk, skb);
sk               1558 net/ipv4/tcp_ipv4.c 			if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
sk               1561 net/ipv4/tcp_ipv4.c 				sk->sk_rx_dst = NULL;
sk               1564 net/ipv4/tcp_ipv4.c 		tcp_rcv_established(sk, skb);
sk               1571 net/ipv4/tcp_ipv4.c 	if (sk->sk_state == TCP_LISTEN) {
sk               1572 net/ipv4/tcp_ipv4.c 		struct sock *nsk = tcp_v4_cookie_check(sk, skb);
sk               1576 net/ipv4/tcp_ipv4.c 		if (nsk != sk) {
sk               1577 net/ipv4/tcp_ipv4.c 			if (tcp_child_process(sk, nsk, skb)) {
sk               1584 net/ipv4/tcp_ipv4.c 		sock_rps_save_rxhash(sk, skb);
sk               1586 net/ipv4/tcp_ipv4.c 	if (tcp_rcv_state_process(sk, skb)) {
sk               1587 net/ipv4/tcp_ipv4.c 		rsk = sk;
sk               1604 net/ipv4/tcp_ipv4.c 	TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
sk               1605 net/ipv4/tcp_ipv4.c 	TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
sk               1614 net/ipv4/tcp_ipv4.c 	struct sock *sk;
sk               1628 net/ipv4/tcp_ipv4.c 	sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
sk               1632 net/ipv4/tcp_ipv4.c 	if (sk) {
sk               1633 net/ipv4/tcp_ipv4.c 		skb->sk = sk;
sk               1635 net/ipv4/tcp_ipv4.c 		if (sk_fullsock(sk)) {
sk               1636 net/ipv4/tcp_ipv4.c 			struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
sk               1641 net/ipv4/tcp_ipv4.c 			    inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
sk               1648 net/ipv4/tcp_ipv4.c bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
sk               1650 net/ipv4/tcp_ipv4.c 	u32 limit = READ_ONCE(sk->sk_rcvbuf) + READ_ONCE(sk->sk_sndbuf);
sk               1671 net/ipv4/tcp_ipv4.c 		bh_unlock_sock(sk);
sk               1672 net/ipv4/tcp_ipv4.c 		__TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
sk               1673 net/ipv4/tcp_ipv4.c 		__TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
sk               1691 net/ipv4/tcp_ipv4.c 	tail = sk->sk_backlog.tail;
sk               1744 net/ipv4/tcp_ipv4.c 		sk->sk_backlog.len += delta;
sk               1745 net/ipv4/tcp_ipv4.c 		__NET_INC_STATS(sock_net(sk),
sk               1759 net/ipv4/tcp_ipv4.c 	if (unlikely(sk_add_backlog(sk, skb, limit))) {
sk               1760 net/ipv4/tcp_ipv4.c 		bh_unlock_sock(sk);
sk               1761 net/ipv4/tcp_ipv4.c 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPBACKLOGDROP);
sk               1768 net/ipv4/tcp_ipv4.c int tcp_filter(struct sock *sk, struct sk_buff *skb)
sk               1772 net/ipv4/tcp_ipv4.c 	return sk_filter_trim_cap(sk, skb, th->doff * 4);
sk               1816 net/ipv4/tcp_ipv4.c 	struct sock *sk;
sk               1846 net/ipv4/tcp_ipv4.c 	sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source,
sk               1848 net/ipv4/tcp_ipv4.c 	if (!sk)
sk               1852 net/ipv4/tcp_ipv4.c 	if (sk->sk_state == TCP_TIME_WAIT)
sk               1855 net/ipv4/tcp_ipv4.c 	if (sk->sk_state == TCP_NEW_SYN_RECV) {
sk               1856 net/ipv4/tcp_ipv4.c 		struct request_sock *req = inet_reqsk(sk);
sk               1860 net/ipv4/tcp_ipv4.c 		sk = req->rsk_listener;
sk               1861 net/ipv4/tcp_ipv4.c 		if (unlikely(tcp_v4_inbound_md5_hash(sk, skb))) {
sk               1862 net/ipv4/tcp_ipv4.c 			sk_drops_add(sk, skb);
sk               1870 net/ipv4/tcp_ipv4.c 		if (unlikely(sk->sk_state != TCP_LISTEN)) {
sk               1871 net/ipv4/tcp_ipv4.c 			inet_csk_reqsk_queue_drop_and_put(sk, req);
sk               1877 net/ipv4/tcp_ipv4.c 		sock_hold(sk);
sk               1880 net/ipv4/tcp_ipv4.c 		if (!tcp_filter(sk, skb)) {
sk               1884 net/ipv4/tcp_ipv4.c 			nsk = tcp_check_req(sk, skb, req, false, &req_stolen);
sk               1895 net/ipv4/tcp_ipv4.c 				sock_put(sk);
sk               1900 net/ipv4/tcp_ipv4.c 		if (nsk == sk) {
sk               1903 net/ipv4/tcp_ipv4.c 		} else if (tcp_child_process(sk, nsk, skb)) {
sk               1907 net/ipv4/tcp_ipv4.c 			sock_put(sk);
sk               1911 net/ipv4/tcp_ipv4.c 	if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
sk               1916 net/ipv4/tcp_ipv4.c 	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
sk               1919 net/ipv4/tcp_ipv4.c 	if (tcp_v4_inbound_md5_hash(sk, skb))
sk               1924 net/ipv4/tcp_ipv4.c 	if (tcp_filter(sk, skb))
sk               1932 net/ipv4/tcp_ipv4.c 	if (sk->sk_state == TCP_LISTEN) {
sk               1933 net/ipv4/tcp_ipv4.c 		ret = tcp_v4_do_rcv(sk, skb);
sk               1937 net/ipv4/tcp_ipv4.c 	sk_incoming_cpu_update(sk);
sk               1939 net/ipv4/tcp_ipv4.c 	bh_lock_sock_nested(sk);
sk               1940 net/ipv4/tcp_ipv4.c 	tcp_segs_in(tcp_sk(sk), skb);
sk               1942 net/ipv4/tcp_ipv4.c 	if (!sock_owned_by_user(sk)) {
sk               1943 net/ipv4/tcp_ipv4.c 		skb_to_free = sk->sk_rx_skb_cache;
sk               1944 net/ipv4/tcp_ipv4.c 		sk->sk_rx_skb_cache = NULL;
sk               1945 net/ipv4/tcp_ipv4.c 		ret = tcp_v4_do_rcv(sk, skb);
sk               1947 net/ipv4/tcp_ipv4.c 		if (tcp_add_backlog(sk, skb))
sk               1951 net/ipv4/tcp_ipv4.c 	bh_unlock_sock(sk);
sk               1957 net/ipv4/tcp_ipv4.c 		sock_put(sk);
sk               1982 net/ipv4/tcp_ipv4.c 	sk_drops_add(sk, skb);
sk               1984 net/ipv4/tcp_ipv4.c 		sock_put(sk);
sk               1989 net/ipv4/tcp_ipv4.c 		inet_twsk_put(inet_twsk(sk));
sk               1996 net/ipv4/tcp_ipv4.c 		inet_twsk_put(inet_twsk(sk));
sk               1999 net/ipv4/tcp_ipv4.c 	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
sk               2009 net/ipv4/tcp_ipv4.c 			inet_twsk_deschedule_put(inet_twsk(sk));
sk               2010 net/ipv4/tcp_ipv4.c 			sk = sk2;
sk               2019 net/ipv4/tcp_ipv4.c 		tcp_v4_timewait_ack(sk, skb);
sk               2022 net/ipv4/tcp_ipv4.c 		tcp_v4_send_reset(sk, skb);
sk               2023 net/ipv4/tcp_ipv4.c 		inet_twsk_deschedule_put(inet_twsk(sk));
sk               2036 net/ipv4/tcp_ipv4.c void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
sk               2041 net/ipv4/tcp_ipv4.c 		sk->sk_rx_dst = dst;
sk               2042 net/ipv4/tcp_ipv4.c 		inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
sk               2078 net/ipv4/tcp_ipv4.c static int tcp_v4_init_sock(struct sock *sk)
sk               2080 net/ipv4/tcp_ipv4.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk               2082 net/ipv4/tcp_ipv4.c 	tcp_init_sock(sk);
sk               2087 net/ipv4/tcp_ipv4.c 	tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
sk               2093 net/ipv4/tcp_ipv4.c void tcp_v4_destroy_sock(struct sock *sk)
sk               2095 net/ipv4/tcp_ipv4.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               2097 net/ipv4/tcp_ipv4.c 	trace_tcp_destroy_sock(sk);
sk               2099 net/ipv4/tcp_ipv4.c 	tcp_clear_xmit_timers(sk);
sk               2101 net/ipv4/tcp_ipv4.c 	tcp_cleanup_congestion_control(sk);
sk               2103 net/ipv4/tcp_ipv4.c 	tcp_cleanup_ulp(sk);
sk               2106 net/ipv4/tcp_ipv4.c 	tcp_write_queue_purge(sk);
sk               2109 net/ipv4/tcp_ipv4.c 	tcp_fastopen_active_disable_ofo_check(sk);
sk               2117 net/ipv4/tcp_ipv4.c 		tcp_clear_md5_list(sk);
sk               2124 net/ipv4/tcp_ipv4.c 	if (inet_csk(sk)->icsk_bind_hash)
sk               2125 net/ipv4/tcp_ipv4.c 		inet_put_port(sk);
sk               2131 net/ipv4/tcp_ipv4.c 	tcp_fastopen_destroy_cipher(sk);
sk               2134 net/ipv4/tcp_ipv4.c 	sk_sockets_allocated_dec(sk);
sk               2153 net/ipv4/tcp_ipv4.c 	struct sock *sk = cur;
sk               2155 net/ipv4/tcp_ipv4.c 	if (!sk) {
sk               2159 net/ipv4/tcp_ipv4.c 		sk = sk_nulls_head(&ilb->nulls_head);
sk               2167 net/ipv4/tcp_ipv4.c 	sk = sk_nulls_next(sk);
sk               2169 net/ipv4/tcp_ipv4.c 	sk_nulls_for_each_from(sk, node) {
sk               2170 net/ipv4/tcp_ipv4.c 		if (!net_eq(sock_net(sk), net))
sk               2172 net/ipv4/tcp_ipv4.c 		if (sk->sk_family == afinfo->family)
sk               2173 net/ipv4/tcp_ipv4.c 			return sk;
sk               2216 net/ipv4/tcp_ipv4.c 		struct sock *sk;
sk               2225 net/ipv4/tcp_ipv4.c 		sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
sk               2226 net/ipv4/tcp_ipv4.c 			if (sk->sk_family != afinfo->family ||
sk               2227 net/ipv4/tcp_ipv4.c 			    !net_eq(sock_net(sk), net)) {
sk               2230 net/ipv4/tcp_ipv4.c 			rc = sk;
sk               2242 net/ipv4/tcp_ipv4.c 	struct sock *sk = cur;
sk               2250 net/ipv4/tcp_ipv4.c 	sk = sk_nulls_next(sk);
sk               2252 net/ipv4/tcp_ipv4.c 	sk_nulls_for_each_from(sk, node) {
sk               2253 net/ipv4/tcp_ipv4.c 		if (sk->sk_family == afinfo->family &&
sk               2254 net/ipv4/tcp_ipv4.c 		    net_eq(sock_net(sk), net))
sk               2255 net/ipv4/tcp_ipv4.c 			return sk;
sk               2424 net/ipv4/tcp_ipv4.c static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
sk               2428 net/ipv4/tcp_ipv4.c 	const struct tcp_sock *tp = tcp_sk(sk);
sk               2429 net/ipv4/tcp_ipv4.c 	const struct inet_connection_sock *icsk = inet_csk(sk);
sk               2430 net/ipv4/tcp_ipv4.c 	const struct inet_sock *inet = inet_sk(sk);
sk               2447 net/ipv4/tcp_ipv4.c 	} else if (timer_pending(&sk->sk_timer)) {
sk               2449 net/ipv4/tcp_ipv4.c 		timer_expires	= sk->sk_timer.expires;
sk               2455 net/ipv4/tcp_ipv4.c 	state = inet_sk_state_load(sk);
sk               2457 net/ipv4/tcp_ipv4.c 		rx_queue = sk->sk_ack_backlog;
sk               2473 net/ipv4/tcp_ipv4.c 		from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
sk               2475 net/ipv4/tcp_ipv4.c 		sock_i_ino(sk),
sk               2476 net/ipv4/tcp_ipv4.c 		refcount_read(&sk->sk_refcnt), sk,
sk               2479 net/ipv4/tcp_ipv4.c 		(icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(sk),
sk               2510 net/ipv4/tcp_ipv4.c 	struct sock *sk = v;
sk               2521 net/ipv4/tcp_ipv4.c 	if (sk->sk_state == TCP_TIME_WAIT)
sk               2523 net/ipv4/tcp_ipv4.c 	else if (sk->sk_state == TCP_NEW_SYN_RECV)
sk               2641 net/ipv4/tcp_ipv4.c 		struct sock *sk;
sk               2643 net/ipv4/tcp_ipv4.c 		res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
sk               2647 net/ipv4/tcp_ipv4.c 		sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
sk               2652 net/ipv4/tcp_ipv4.c 		inet_sk(sk)->pmtudisc = IP_PMTUDISC_DO;
sk               2654 net/ipv4/tcp_ipv4.c 		*per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
sk                 96 net/ipv4/tcp_lp.c static void tcp_lp_init(struct sock *sk)
sk                 98 net/ipv4/tcp_lp.c 	struct lp *lp = inet_csk_ca(sk);
sk                119 net/ipv4/tcp_lp.c static void tcp_lp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
sk                121 net/ipv4/tcp_lp.c 	struct lp *lp = inet_csk_ca(sk);
sk                124 net/ipv4/tcp_lp.c 		tcp_reno_cong_avoid(sk, ack, acked);
sk                134 net/ipv4/tcp_lp.c static u32 tcp_lp_remote_hz_estimator(struct sock *sk)
sk                136 net/ipv4/tcp_lp.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                137 net/ipv4/tcp_lp.c 	struct lp *lp = inet_csk_ca(sk);
sk                187 net/ipv4/tcp_lp.c static u32 tcp_lp_owd_calculator(struct sock *sk)
sk                189 net/ipv4/tcp_lp.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                190 net/ipv4/tcp_lp.c 	struct lp *lp = inet_csk_ca(sk);
sk                193 net/ipv4/tcp_lp.c 	lp->remote_hz = tcp_lp_remote_hz_estimator(sk);
sk                221 net/ipv4/tcp_lp.c static void tcp_lp_rtt_sample(struct sock *sk, u32 rtt)
sk                223 net/ipv4/tcp_lp.c 	struct lp *lp = inet_csk_ca(sk);
sk                224 net/ipv4/tcp_lp.c 	s64 mowd = tcp_lp_owd_calculator(sk);
sk                264 net/ipv4/tcp_lp.c static void tcp_lp_pkts_acked(struct sock *sk, const struct ack_sample *sample)
sk                266 net/ipv4/tcp_lp.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                267 net/ipv4/tcp_lp.c 	struct lp *lp = inet_csk_ca(sk);
sk                272 net/ipv4/tcp_lp.c 		tcp_lp_rtt_sample(sk, sample->rtt_us);
sk                271 net/ipv4/tcp_metrics.c static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
sk                280 net/ipv4/tcp_metrics.c 	if (sk->sk_family == AF_INET) {
sk                281 net/ipv4/tcp_metrics.c 		inetpeer_set_addr_v4(&saddr, inet_sk(sk)->inet_saddr);
sk                282 net/ipv4/tcp_metrics.c 		inetpeer_set_addr_v4(&daddr, inet_sk(sk)->inet_daddr);
sk                283 net/ipv4/tcp_metrics.c 		hash = ipv4_addr_hash(inet_sk(sk)->inet_daddr);
sk                286 net/ipv4/tcp_metrics.c 	else if (sk->sk_family == AF_INET6) {
sk                287 net/ipv4/tcp_metrics.c 		if (ipv6_addr_v4mapped(&sk->sk_v6_daddr)) {
sk                288 net/ipv4/tcp_metrics.c 			inetpeer_set_addr_v4(&saddr, inet_sk(sk)->inet_saddr);
sk                289 net/ipv4/tcp_metrics.c 			inetpeer_set_addr_v4(&daddr, inet_sk(sk)->inet_daddr);
sk                290 net/ipv4/tcp_metrics.c 			hash = ipv4_addr_hash(inet_sk(sk)->inet_daddr);
sk                292 net/ipv4/tcp_metrics.c 			inetpeer_set_addr_v6(&saddr, &sk->sk_v6_rcv_saddr);
sk                293 net/ipv4/tcp_metrics.c 			inetpeer_set_addr_v6(&daddr, &sk->sk_v6_daddr);
sk                294 net/ipv4/tcp_metrics.c 			hash = ipv6_addr_hash(&sk->sk_v6_daddr);
sk                320 net/ipv4/tcp_metrics.c void tcp_update_metrics(struct sock *sk)
sk                322 net/ipv4/tcp_metrics.c 	const struct inet_connection_sock *icsk = inet_csk(sk);
sk                323 net/ipv4/tcp_metrics.c 	struct dst_entry *dst = __sk_dst_get(sk);
sk                324 net/ipv4/tcp_metrics.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                325 net/ipv4/tcp_metrics.c 	struct net *net = sock_net(sk);
sk                331 net/ipv4/tcp_metrics.c 	sk_dst_confirm(sk);
sk                341 net/ipv4/tcp_metrics.c 		tm = tcp_get_metrics(sk, dst, false);
sk                346 net/ipv4/tcp_metrics.c 		tm = tcp_get_metrics(sk, dst, true);
sk                440 net/ipv4/tcp_metrics.c void tcp_init_metrics(struct sock *sk)
sk                442 net/ipv4/tcp_metrics.c 	struct dst_entry *dst = __sk_dst_get(sk);
sk                443 net/ipv4/tcp_metrics.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                447 net/ipv4/tcp_metrics.c 	sk_dst_confirm(sk);
sk                452 net/ipv4/tcp_metrics.c 	tm = tcp_get_metrics(sk, dst, true);
sk                502 net/ipv4/tcp_metrics.c 		inet_csk(sk)->icsk_rto = crtt + max(2 * crtt, tcp_rto_min(sk));
sk                513 net/ipv4/tcp_metrics.c 		inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK;
sk                538 net/ipv4/tcp_metrics.c void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
sk                544 net/ipv4/tcp_metrics.c 	tm = tcp_get_metrics(sk, __sk_dst_get(sk), false);
sk                561 net/ipv4/tcp_metrics.c void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
sk                565 net/ipv4/tcp_metrics.c 	struct dst_entry *dst = __sk_dst_get(sk);
sk                571 net/ipv4/tcp_metrics.c 	tm = tcp_get_metrics(sk, dst, true);
sk                742 net/ipv4/tcp_metrics.c 	struct net *net = sock_net(skb->sk);
sk                253 net/ipv4/tcp_minisocks.c void tcp_time_wait(struct sock *sk, int state, int timeo)
sk                255 net/ipv4/tcp_minisocks.c 	const struct inet_connection_sock *icsk = inet_csk(sk);
sk                256 net/ipv4/tcp_minisocks.c 	const struct tcp_sock *tp = tcp_sk(sk);
sk                258 net/ipv4/tcp_minisocks.c 	struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
sk                260 net/ipv4/tcp_minisocks.c 	tw = inet_twsk_alloc(sk, tcp_death_row, state);
sk                265 net/ipv4/tcp_minisocks.c 		struct inet_sock *inet = inet_sk(sk);
sk                268 net/ipv4/tcp_minisocks.c 		tw->tw_mark		= sk->sk_mark;
sk                269 net/ipv4/tcp_minisocks.c 		tw->tw_priority		= sk->sk_priority;
sk                281 net/ipv4/tcp_minisocks.c 			struct ipv6_pinfo *np = inet6_sk(sk);
sk                283 net/ipv4/tcp_minisocks.c 			tw->tw_v6_daddr = sk->sk_v6_daddr;
sk                284 net/ipv4/tcp_minisocks.c 			tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
sk                287 net/ipv4/tcp_minisocks.c 			tw->tw_txhash = sk->sk_txhash;
sk                288 net/ipv4/tcp_minisocks.c 			tw->tw_ipv6only = sk->sk_ipv6only;
sk                304 net/ipv4/tcp_minisocks.c 				key = tp->af_specific->md5_lookup(sk, sk);
sk                329 net/ipv4/tcp_minisocks.c 		inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
sk                336 net/ipv4/tcp_minisocks.c 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEWAITOVERFLOW);
sk                339 net/ipv4/tcp_minisocks.c 	tcp_update_metrics(sk);
sk                340 net/ipv4/tcp_minisocks.c 	tcp_done(sk);
sk                344 net/ipv4/tcp_minisocks.c void tcp_twsk_destructor(struct sock *sk)
sk                348 net/ipv4/tcp_minisocks.c 		struct tcp_timewait_sock *twsk = tcp_twsk(sk);
sk                406 net/ipv4/tcp_minisocks.c void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst)
sk                408 net/ipv4/tcp_minisocks.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk                429 net/ipv4/tcp_minisocks.c 		tcp_assign_congestion_control(sk);
sk                431 net/ipv4/tcp_minisocks.c 	tcp_set_ca_state(sk, TCP_CA_Open);
sk                456 net/ipv4/tcp_minisocks.c struct sock *tcp_create_openreq_child(const struct sock *sk,
sk                460 net/ipv4/tcp_minisocks.c 	struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
sk                472 net/ipv4/tcp_minisocks.c 	oldtp = tcp_sk(sk);
sk                541 net/ipv4/tcp_minisocks.c 	if (newtp->af_specific->md5_lookup(sk, newsk))
sk                551 net/ipv4/tcp_minisocks.c 	__TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
sk                568 net/ipv4/tcp_minisocks.c struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
sk                581 net/ipv4/tcp_minisocks.c 		tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0, NULL);
sk                623 net/ipv4/tcp_minisocks.c 		if (!tcp_oow_rate_limited(sock_net(sk), skb,
sk                627 net/ipv4/tcp_minisocks.c 		    !inet_rtx_syn_ack(sk, req)) {
sk                700 net/ipv4/tcp_minisocks.c 		return sk;
sk                713 net/ipv4/tcp_minisocks.c 		    !tcp_oow_rate_limited(sock_net(sk), skb,
sk                716 net/ipv4/tcp_minisocks.c 			req->rsk_ops->send_ack(sk, skb, req);
sk                718 net/ipv4/tcp_minisocks.c 			__NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
sk                737 net/ipv4/tcp_minisocks.c 		__TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
sk                754 net/ipv4/tcp_minisocks.c 		return sk;
sk                757 net/ipv4/tcp_minisocks.c 	if (req->num_timeout < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
sk                760 net/ipv4/tcp_minisocks.c 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
sk                770 net/ipv4/tcp_minisocks.c 	child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
sk                778 net/ipv4/tcp_minisocks.c 	return inet_csk_complete_hashdance(sk, child, req, own_req);
sk                781 net/ipv4/tcp_minisocks.c 	if (!sock_net(sk)->ipv4.sysctl_tcp_abort_on_overflow) {
sk                793 net/ipv4/tcp_minisocks.c 		req->rsk_ops->send_reset(sk, skb);
sk                795 net/ipv4/tcp_minisocks.c 		reqsk_fastopen_remove(sk, req, true);
sk                796 net/ipv4/tcp_minisocks.c 		tcp_reset(sk);
sk                799 net/ipv4/tcp_minisocks.c 		inet_csk_reqsk_queue_drop(sk, req);
sk                800 net/ipv4/tcp_minisocks.c 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
sk                124 net/ipv4/tcp_nv.c static inline void tcpnv_reset(struct tcpnv *ca, struct sock *sk)
sk                126 net/ipv4/tcp_nv.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                138 net/ipv4/tcp_nv.c static void tcpnv_init(struct sock *sk)
sk                140 net/ipv4/tcp_nv.c 	struct tcpnv *ca = inet_csk_ca(sk);
sk                143 net/ipv4/tcp_nv.c 	tcpnv_reset(ca, sk);
sk                150 net/ipv4/tcp_nv.c 	base_rtt = tcp_call_bpf(sk, BPF_SOCK_OPS_BASE_RTT, 0, NULL);
sk                181 net/ipv4/tcp_nv.c static void tcpnv_cong_avoid(struct sock *sk, u32 ack, u32 acked)
sk                183 net/ipv4/tcp_nv.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                184 net/ipv4/tcp_nv.c 	struct tcpnv *ca = inet_csk_ca(sk);
sk                187 net/ipv4/tcp_nv.c 	if (!tcp_is_cwnd_limited(sk))
sk                209 net/ipv4/tcp_nv.c static u32 tcpnv_recalc_ssthresh(struct sock *sk)
sk                211 net/ipv4/tcp_nv.c 	const struct tcp_sock *tp = tcp_sk(sk);
sk                216 net/ipv4/tcp_nv.c static void tcpnv_state(struct sock *sk, u8 new_state)
sk                218 net/ipv4/tcp_nv.c 	struct tcpnv *ca = inet_csk_ca(sk);
sk                221 net/ipv4/tcp_nv.c 		tcpnv_reset(ca, sk);
sk                240 net/ipv4/tcp_nv.c static void tcpnv_acked(struct sock *sk, const struct ack_sample *sample)
sk                242 net/ipv4/tcp_nv.c 	const struct inet_connection_sock *icsk = inet_csk(sk);
sk                243 net/ipv4/tcp_nv.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                244 net/ipv4/tcp_nv.c 	struct tcpnv *ca = inet_csk_ca(sk);
sk                454 net/ipv4/tcp_nv.c static size_t tcpnv_get_info(struct sock *sk, u32 ext, int *attr,
sk                457 net/ipv4/tcp_nv.c 	const struct tcpnv *ca = inet_csk_ca(sk);
sk                136 net/ipv4/tcp_offload.c 			skb->sk = gso_skb->sk;
sk                154 net/ipv4/tcp_offload.c 		swap(gso_skb->sk, skb->sk);
sk                162 net/ipv4/tcp_offload.c 			refcount_add(delta, &skb->sk->sk_wmem_alloc);
sk                164 net/ipv4/tcp_offload.c 			WARN_ON_ONCE(refcount_sub_and_test(-delta, &skb->sk->sk_wmem_alloc));
sk                 60 net/ipv4/tcp_output.c static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
sk                 64 net/ipv4/tcp_output.c static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb)
sk                 66 net/ipv4/tcp_output.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk                 67 net/ipv4/tcp_output.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                 72 net/ipv4/tcp_output.c 	__skb_unlink(skb, &sk->sk_write_queue);
sk                 73 net/ipv4/tcp_output.c 	tcp_rbtree_insert(&sk->tcp_rtx_queue, skb);
sk                 80 net/ipv4/tcp_output.c 		tcp_rearm_rto(sk);
sk                 82 net/ipv4/tcp_output.c 	NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT,
sk                 93 net/ipv4/tcp_output.c static inline __u32 tcp_acceptable_seq(const struct sock *sk)
sk                 95 net/ipv4/tcp_output.c 	const struct tcp_sock *tp = tcp_sk(sk);
sk                119 net/ipv4/tcp_output.c static __u16 tcp_advertise_mss(struct sock *sk)
sk                121 net/ipv4/tcp_output.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                122 net/ipv4/tcp_output.c 	const struct dst_entry *dst = __sk_dst_get(sk);
sk                140 net/ipv4/tcp_output.c void tcp_cwnd_restart(struct sock *sk, s32 delta)
sk                142 net/ipv4/tcp_output.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                143 net/ipv4/tcp_output.c 	u32 restart_cwnd = tcp_init_cwnd(tp, __sk_dst_get(sk));
sk                146 net/ipv4/tcp_output.c 	tcp_ca_event(sk, CA_EVENT_CWND_RESTART);
sk                148 net/ipv4/tcp_output.c 	tp->snd_ssthresh = tcp_current_ssthresh(sk);
sk                151 net/ipv4/tcp_output.c 	while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd)
sk                160 net/ipv4/tcp_output.c 				struct sock *sk)
sk                162 net/ipv4/tcp_output.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk                166 net/ipv4/tcp_output.c 		tcp_ca_event(sk, CA_EVENT_TX_START);
sk                175 net/ipv4/tcp_output.c 		inet_csk_inc_pingpong_cnt(sk);
sk                181 net/ipv4/tcp_output.c static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts,
sk                184 net/ipv4/tcp_output.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                187 net/ipv4/tcp_output.c 		NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPACKCOMPRESSED,
sk                191 net/ipv4/tcp_output.c 			__sock_put(sk);
sk                196 net/ipv4/tcp_output.c 	tcp_dec_quickack_mode(sk, pkts);
sk                197 net/ipv4/tcp_output.c 	inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
sk                207 net/ipv4/tcp_output.c void tcp_select_initial_window(const struct sock *sk, int __space, __u32 mss,
sk                231 net/ipv4/tcp_output.c 	if (sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows)
sk                242 net/ipv4/tcp_output.c 		space = max_t(u32, space, sock_net(sk)->ipv4.sysctl_tcp_rmem[2]);
sk                258 net/ipv4/tcp_output.c static u16 tcp_select_window(struct sock *sk)
sk                260 net/ipv4/tcp_output.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                263 net/ipv4/tcp_output.c 	u32 new_win = __tcp_select_window(sk);
sk                275 net/ipv4/tcp_output.c 			NET_INC_STATS(sock_net(sk),
sk                286 net/ipv4/tcp_output.c 	    sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows)
sk                298 net/ipv4/tcp_output.c 			NET_INC_STATS(sock_net(sk),
sk                301 net/ipv4/tcp_output.c 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFROMZEROWINDOWADV);
sk                308 net/ipv4/tcp_output.c static void tcp_ecn_send_synack(struct sock *sk, struct sk_buff *skb)
sk                310 net/ipv4/tcp_output.c 	const struct tcp_sock *tp = tcp_sk(sk);
sk                315 net/ipv4/tcp_output.c 	else if (tcp_ca_needs_ecn(sk) ||
sk                316 net/ipv4/tcp_output.c 		 tcp_bpf_ca_needs_ecn(sk))
sk                317 net/ipv4/tcp_output.c 		INET_ECN_xmit(sk);
sk                321 net/ipv4/tcp_output.c static void tcp_ecn_send_syn(struct sock *sk, struct sk_buff *skb)
sk                323 net/ipv4/tcp_output.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                324 net/ipv4/tcp_output.c 	bool bpf_needs_ecn = tcp_bpf_ca_needs_ecn(sk);
sk                325 net/ipv4/tcp_output.c 	bool use_ecn = sock_net(sk)->ipv4.sysctl_tcp_ecn == 1 ||
sk                326 net/ipv4/tcp_output.c 		tcp_ca_needs_ecn(sk) || bpf_needs_ecn;
sk                329 net/ipv4/tcp_output.c 		const struct dst_entry *dst = __sk_dst_get(sk);
sk                340 net/ipv4/tcp_output.c 		if (tcp_ca_needs_ecn(sk) || bpf_needs_ecn)
sk                341 net/ipv4/tcp_output.c 			INET_ECN_xmit(sk);
sk                345 net/ipv4/tcp_output.c static void tcp_ecn_clear_syn(struct sock *sk, struct sk_buff *skb)
sk                347 net/ipv4/tcp_output.c 	if (sock_net(sk)->ipv4.sysctl_tcp_ecn_fallback)
sk                364 net/ipv4/tcp_output.c static void tcp_ecn_send(struct sock *sk, struct sk_buff *skb,
sk                367 net/ipv4/tcp_output.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                373 net/ipv4/tcp_output.c 			INET_ECN_xmit(sk);
sk                379 net/ipv4/tcp_output.c 		} else if (!tcp_ca_needs_ecn(sk)) {
sk                381 net/ipv4/tcp_output.c 			INET_ECN_dontxmit(sk);
sk                590 net/ipv4/tcp_output.c static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
sk                594 net/ipv4/tcp_output.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                602 net/ipv4/tcp_output.c 		*md5 = tp->af_specific->md5_lookup(sk, sk);
sk                619 net/ipv4/tcp_output.c 	opts->mss = tcp_advertise_mss(sk);
sk                622 net/ipv4/tcp_output.c 	if (likely(sock_net(sk)->ipv4.sysctl_tcp_timestamps && !*md5)) {
sk                628 net/ipv4/tcp_output.c 	if (likely(sock_net(sk)->ipv4.sysctl_tcp_window_scaling)) {
sk                633 net/ipv4/tcp_output.c 	if (likely(sock_net(sk)->ipv4.sysctl_tcp_sack)) {
sk                660 net/ipv4/tcp_output.c static unsigned int tcp_synack_options(const struct sock *sk,
sk                717 net/ipv4/tcp_output.c 	smc_set_option_cond(tcp_sk(sk), ireq, opts, &remaining);
sk                725 net/ipv4/tcp_output.c static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb,
sk                729 net/ipv4/tcp_output.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                739 net/ipv4/tcp_output.c 		*md5 = tp->af_specific->md5_lookup(sk, sk);
sk                790 net/ipv4/tcp_output.c static void tcp_tsq_write(struct sock *sk)
sk                792 net/ipv4/tcp_output.c 	if ((1 << sk->sk_state) &
sk                795 net/ipv4/tcp_output.c 		struct tcp_sock *tp = tcp_sk(sk);
sk                800 net/ipv4/tcp_output.c 			tcp_xmit_retransmit_queue(sk);
sk                803 net/ipv4/tcp_output.c 		tcp_write_xmit(sk, tcp_current_mss(sk), tp->nonagle,
sk                808 net/ipv4/tcp_output.c static void tcp_tsq_handler(struct sock *sk)
sk                810 net/ipv4/tcp_output.c 	bh_lock_sock(sk);
sk                811 net/ipv4/tcp_output.c 	if (!sock_owned_by_user(sk))
sk                812 net/ipv4/tcp_output.c 		tcp_tsq_write(sk);
sk                813 net/ipv4/tcp_output.c 	else if (!test_and_set_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags))
sk                814 net/ipv4/tcp_output.c 		sock_hold(sk);
sk                815 net/ipv4/tcp_output.c 	bh_unlock_sock(sk);
sk                830 net/ipv4/tcp_output.c 	struct sock *sk;
sk                840 net/ipv4/tcp_output.c 		sk = (struct sock *)tp;
sk                842 net/ipv4/tcp_output.c 		clear_bit(TSQ_QUEUED, &sk->sk_tsq_flags);
sk                844 net/ipv4/tcp_output.c 		tcp_tsq_handler(sk);
sk                845 net/ipv4/tcp_output.c 		sk_free(sk);
sk                860 net/ipv4/tcp_output.c void tcp_release_cb(struct sock *sk)
sk                866 net/ipv4/tcp_output.c 		flags = sk->sk_tsq_flags;
sk                870 net/ipv4/tcp_output.c 	} while (cmpxchg(&sk->sk_tsq_flags, flags, nflags) != flags);
sk                873 net/ipv4/tcp_output.c 		tcp_tsq_write(sk);
sk                874 net/ipv4/tcp_output.c 		__sock_put(sk);
sk                885 net/ipv4/tcp_output.c 	sock_release_ownership(sk);
sk                888 net/ipv4/tcp_output.c 		tcp_write_timer_handler(sk);
sk                889 net/ipv4/tcp_output.c 		__sock_put(sk);
sk                892 net/ipv4/tcp_output.c 		tcp_delack_timer_handler(sk);
sk                893 net/ipv4/tcp_output.c 		__sock_put(sk);
sk                896 net/ipv4/tcp_output.c 		inet_csk(sk)->icsk_af_ops->mtu_reduced(sk);
sk                897 net/ipv4/tcp_output.c 		__sock_put(sk);
sk                923 net/ipv4/tcp_output.c 	struct sock *sk = skb->sk;
sk                924 net/ipv4/tcp_output.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                930 net/ipv4/tcp_output.c 	WARN_ON(refcount_sub_and_test(skb->truesize - 1, &sk->sk_wmem_alloc));
sk                939 net/ipv4/tcp_output.c 	if (refcount_read(&sk->sk_wmem_alloc) >= SKB_TRUESIZE(1) && this_cpu_ksoftirqd() == current)
sk                942 net/ipv4/tcp_output.c 	for (oval = READ_ONCE(sk->sk_tsq_flags);; oval = nval) {
sk                950 net/ipv4/tcp_output.c 		nval = cmpxchg(&sk->sk_tsq_flags, oval, nval);
sk                965 net/ipv4/tcp_output.c 	sk_free(sk);
sk                974 net/ipv4/tcp_output.c 	struct sock *sk = (struct sock *)tp;
sk                976 net/ipv4/tcp_output.c 	tcp_tsq_handler(sk);
sk                977 net/ipv4/tcp_output.c 	sock_put(sk);
sk                982 net/ipv4/tcp_output.c static void tcp_update_skb_after_send(struct sock *sk, struct sk_buff *skb,
sk                985 net/ipv4/tcp_output.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                987 net/ipv4/tcp_output.c 	if (sk->sk_pacing_status != SK_PACING_NONE) {
sk                988 net/ipv4/tcp_output.c 		unsigned long rate = sk->sk_pacing_rate;
sk               1017 net/ipv4/tcp_output.c static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
sk               1020 net/ipv4/tcp_output.c 	const struct inet_connection_sock *icsk = inet_csk(sk);
sk               1033 net/ipv4/tcp_output.c 	tp = tcp_sk(sk);
sk               1057 net/ipv4/tcp_output.c 	inet = inet_sk(sk);
sk               1062 net/ipv4/tcp_output.c 		tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5);
sk               1064 net/ipv4/tcp_output.c 		tcp_options_size = tcp_established_options(sk, skb, &opts,
sk               1086 net/ipv4/tcp_output.c 	skb->ooo_okay = sk_wmem_alloc_get(sk) < SKB_TRUESIZE(1);
sk               1099 net/ipv4/tcp_output.c 	skb->sk = sk;
sk               1101 net/ipv4/tcp_output.c 	skb_set_hash_from_sk(skb, sk);
sk               1102 net/ipv4/tcp_output.c 	refcount_add(skb->truesize, &sk->sk_wmem_alloc);
sk               1104 net/ipv4/tcp_output.c 	skb_set_dst_pending_confirm(skb, sk->sk_dst_pending_confirm);
sk               1130 net/ipv4/tcp_output.c 	skb_shinfo(skb)->gso_type = sk->sk_gso_type;
sk               1132 net/ipv4/tcp_output.c 		th->window      = htons(tcp_select_window(sk));
sk               1133 net/ipv4/tcp_output.c 		tcp_ecn_send(sk, skb, th, tcp_header_size);
sk               1143 net/ipv4/tcp_output.c 		sk_nocaps_add(sk, NETIF_F_GSO_MASK);
sk               1145 net/ipv4/tcp_output.c 					       md5, sk, skb);
sk               1149 net/ipv4/tcp_output.c 	icsk->icsk_af_ops->send_check(sk, skb);
sk               1152 net/ipv4/tcp_output.c 		tcp_event_ack_sent(sk, tcp_skb_pcount(skb), rcv_nxt);
sk               1155 net/ipv4/tcp_output.c 		tcp_event_data_sent(tp, sk);
sk               1161 net/ipv4/tcp_output.c 		TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS,
sk               1177 net/ipv4/tcp_output.c 	err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl);
sk               1180 net/ipv4/tcp_output.c 		tcp_enter_cwr(sk);
sk               1184 net/ipv4/tcp_output.c 		tcp_update_skb_after_send(sk, oskb, prior_wstamp);
sk               1185 net/ipv4/tcp_output.c 		tcp_rate_skb_sent(sk, oskb);
sk               1190 net/ipv4/tcp_output.c static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
sk               1193 net/ipv4/tcp_output.c 	return __tcp_transmit_skb(sk, skb, clone_it, gfp_mask,
sk               1194 net/ipv4/tcp_output.c 				  tcp_sk(sk)->rcv_nxt);
sk               1202 net/ipv4/tcp_output.c static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
sk               1204 net/ipv4/tcp_output.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               1209 net/ipv4/tcp_output.c 	tcp_add_write_queue_tail(sk, skb);
sk               1210 net/ipv4/tcp_output.c 	sk_wmem_queued_add(sk, skb->truesize);
sk               1211 net/ipv4/tcp_output.c 	sk_mem_charge(sk, skb->truesize);
sk               1232 net/ipv4/tcp_output.c static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int decr)
sk               1234 net/ipv4/tcp_output.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               1289 net/ipv4/tcp_output.c 					 struct sock *sk,
sk               1293 net/ipv4/tcp_output.c 		__skb_queue_after(&sk->sk_write_queue, skb, buff);
sk               1295 net/ipv4/tcp_output.c 		tcp_rbtree_insert(&sk->tcp_rtx_queue, buff);
sk               1303 net/ipv4/tcp_output.c int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
sk               1307 net/ipv4/tcp_output.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               1326 net/ipv4/tcp_output.c 	limit = sk->sk_sndbuf + 2 * SKB_TRUESIZE(GSO_MAX_SIZE);
sk               1327 net/ipv4/tcp_output.c 	if (unlikely((sk->sk_wmem_queued >> 1) > limit &&
sk               1329 net/ipv4/tcp_output.c 		     skb != tcp_rtx_queue_head(sk) &&
sk               1330 net/ipv4/tcp_output.c 		     skb != tcp_rtx_queue_tail(sk))) {
sk               1331 net/ipv4/tcp_output.c 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPWQUEUETOOBIG);
sk               1339 net/ipv4/tcp_output.c 	buff = sk_stream_alloc_skb(sk, nsize, gfp, true);
sk               1344 net/ipv4/tcp_output.c 	sk_wmem_queued_add(sk, buff->truesize);
sk               1345 net/ipv4/tcp_output.c 	sk_mem_charge(sk, buff->truesize);
sk               1386 net/ipv4/tcp_output.c 			tcp_adjust_pcount(sk, skb, diff);
sk               1391 net/ipv4/tcp_output.c 	tcp_insert_write_queue_after(skb, buff, sk, tcp_queue);
sk               1440 net/ipv4/tcp_output.c int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
sk               1454 net/ipv4/tcp_output.c 		sk_wmem_queued_add(sk, -delta_truesize);
sk               1455 net/ipv4/tcp_output.c 		sk_mem_uncharge(sk, delta_truesize);
sk               1456 net/ipv4/tcp_output.c 		sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
sk               1467 net/ipv4/tcp_output.c static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu)
sk               1469 net/ipv4/tcp_output.c 	const struct tcp_sock *tp = tcp_sk(sk);
sk               1470 net/ipv4/tcp_output.c 	const struct inet_connection_sock *icsk = inet_csk(sk);
sk               1480 net/ipv4/tcp_output.c 		const struct dst_entry *dst = __sk_dst_get(sk);
sk               1494 net/ipv4/tcp_output.c 	mss_now = max(mss_now, sock_net(sk)->ipv4.sysctl_tcp_min_snd_mss);
sk               1499 net/ipv4/tcp_output.c int tcp_mtu_to_mss(struct sock *sk, int pmtu)
sk               1502 net/ipv4/tcp_output.c 	return __tcp_mtu_to_mss(sk, pmtu) -
sk               1503 net/ipv4/tcp_output.c 	       (tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr));
sk               1507 net/ipv4/tcp_output.c int tcp_mss_to_mtu(struct sock *sk, int mss)
sk               1509 net/ipv4/tcp_output.c 	const struct tcp_sock *tp = tcp_sk(sk);
sk               1510 net/ipv4/tcp_output.c 	const struct inet_connection_sock *icsk = inet_csk(sk);
sk               1520 net/ipv4/tcp_output.c 		const struct dst_entry *dst = __sk_dst_get(sk);
sk               1530 net/ipv4/tcp_output.c void tcp_mtup_init(struct sock *sk)
sk               1532 net/ipv4/tcp_output.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               1533 net/ipv4/tcp_output.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk               1534 net/ipv4/tcp_output.c 	struct net *net = sock_net(sk);
sk               1539 net/ipv4/tcp_output.c 	icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, net->ipv4.sysctl_tcp_base_mss);
sk               1568 net/ipv4/tcp_output.c unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu)
sk               1570 net/ipv4/tcp_output.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               1571 net/ipv4/tcp_output.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk               1577 net/ipv4/tcp_output.c 	mss_now = tcp_mtu_to_mss(sk, pmtu);
sk               1583 net/ipv4/tcp_output.c 		mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low));
sk               1593 net/ipv4/tcp_output.c unsigned int tcp_current_mss(struct sock *sk)
sk               1595 net/ipv4/tcp_output.c 	const struct tcp_sock *tp = tcp_sk(sk);
sk               1596 net/ipv4/tcp_output.c 	const struct dst_entry *dst = __sk_dst_get(sk);
sk               1606 net/ipv4/tcp_output.c 		if (mtu != inet_csk(sk)->icsk_pmtu_cookie)
sk               1607 net/ipv4/tcp_output.c 			mss_now = tcp_sync_mss(sk, mtu);
sk               1610 net/ipv4/tcp_output.c 	header_len = tcp_established_options(sk, NULL, &opts, &md5) +
sk               1628 net/ipv4/tcp_output.c static void tcp_cwnd_application_limited(struct sock *sk)
sk               1630 net/ipv4/tcp_output.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               1632 net/ipv4/tcp_output.c 	if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open &&
sk               1633 net/ipv4/tcp_output.c 	    sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
sk               1635 net/ipv4/tcp_output.c 		u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk));
sk               1638 net/ipv4/tcp_output.c 			tp->snd_ssthresh = tcp_current_ssthresh(sk);
sk               1646 net/ipv4/tcp_output.c static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited)
sk               1648 net/ipv4/tcp_output.c 	const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
sk               1649 net/ipv4/tcp_output.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               1661 net/ipv4/tcp_output.c 	if (tcp_is_cwnd_limited(sk)) {
sk               1670 net/ipv4/tcp_output.c 		if (sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle &&
sk               1671 net/ipv4/tcp_output.c 		    (s32)(tcp_jiffies32 - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto &&
sk               1673 net/ipv4/tcp_output.c 			tcp_cwnd_application_limited(sk);
sk               1682 net/ipv4/tcp_output.c 		if (tcp_write_queue_empty(sk) && sk->sk_socket &&
sk               1683 net/ipv4/tcp_output.c 		    test_bit(SOCK_NOSPACE, &sk->sk_socket->flags) &&
sk               1684 net/ipv4/tcp_output.c 		    (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
sk               1685 net/ipv4/tcp_output.c 			tcp_chrono_start(sk, TCP_CHRONO_SNDBUF_LIMITED);
sk               1729 net/ipv4/tcp_output.c static u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
sk               1735 net/ipv4/tcp_output.c 		      sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift),
sk               1736 net/ipv4/tcp_output.c 		      sk->sk_gso_max_size - 1 - MAX_TCP_HEADER);
sk               1751 net/ipv4/tcp_output.c static u32 tcp_tso_segs(struct sock *sk, unsigned int mss_now)
sk               1753 net/ipv4/tcp_output.c 	const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
sk               1757 net/ipv4/tcp_output.c 			ca_ops->min_tso_segs(sk) :
sk               1758 net/ipv4/tcp_output.c 			sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs;
sk               1760 net/ipv4/tcp_output.c 	tso_segs = tcp_tso_autosize(sk, mss_now, min_tso);
sk               1761 net/ipv4/tcp_output.c 	return min_t(u32, tso_segs, sk->sk_gso_max_segs);
sk               1765 net/ipv4/tcp_output.c static unsigned int tcp_mss_split_point(const struct sock *sk,
sk               1771 net/ipv4/tcp_output.c 	const struct tcp_sock *tp = tcp_sk(sk);
sk               1777 net/ipv4/tcp_output.c 	if (likely(max_len <= window && skb != tcp_write_queue_tail(sk)))
sk               1882 net/ipv4/tcp_output.c static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
sk               1891 net/ipv4/tcp_output.c 		return tcp_fragment(sk, TCP_FRAG_IN_WRITE_QUEUE,
sk               1894 net/ipv4/tcp_output.c 	buff = sk_stream_alloc_skb(sk, 0, gfp, true);
sk               1899 net/ipv4/tcp_output.c 	sk_wmem_queued_add(sk, buff->truesize);
sk               1900 net/ipv4/tcp_output.c 	sk_mem_charge(sk, buff->truesize);
sk               1929 net/ipv4/tcp_output.c 	tcp_insert_write_queue_after(skb, buff, sk, TCP_FRAG_IN_WRITE_QUEUE);
sk               1939 net/ipv4/tcp_output.c static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
sk               1944 net/ipv4/tcp_output.c 	const struct inet_connection_sock *icsk = inet_csk(sk);
sk               1946 net/ipv4/tcp_output.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               1980 net/ipv4/tcp_output.c 	if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len))
sk               1983 net/ipv4/tcp_output.c 	win_divisor = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tso_win_divisor);
sk               2004 net/ipv4/tcp_output.c 	head = tcp_rtx_queue_head(sk);
sk               2041 net/ipv4/tcp_output.c static inline void tcp_mtu_check_reprobe(struct sock *sk)
sk               2043 net/ipv4/tcp_output.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk               2044 net/ipv4/tcp_output.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               2045 net/ipv4/tcp_output.c 	struct net *net = sock_net(sk);
sk               2052 net/ipv4/tcp_output.c 		int mss = tcp_current_mss(sk);
sk               2059 net/ipv4/tcp_output.c 		icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
sk               2066 net/ipv4/tcp_output.c static bool tcp_can_coalesce_send_queue_head(struct sock *sk, int len)
sk               2070 net/ipv4/tcp_output.c 	skb = tcp_send_head(sk);
sk               2071 net/ipv4/tcp_output.c 	tcp_for_write_queue_from_safe(skb, next, sk) {
sk               2093 net/ipv4/tcp_output.c static int tcp_mtu_probe(struct sock *sk)
sk               2095 net/ipv4/tcp_output.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk               2096 net/ipv4/tcp_output.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               2098 net/ipv4/tcp_output.c 	struct net *net = sock_net(sk);
sk               2112 net/ipv4/tcp_output.c 		   inet_csk(sk)->icsk_ca_state != TCP_CA_Open ||
sk               2121 net/ipv4/tcp_output.c 	mss_now = tcp_current_mss(sk);
sk               2122 net/ipv4/tcp_output.c 	probe_size = tcp_mtu_to_mss(sk, (icsk->icsk_mtup.search_high +
sk               2130 net/ipv4/tcp_output.c 	if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high) ||
sk               2135 net/ipv4/tcp_output.c 		tcp_mtu_check_reprobe(sk);
sk               2156 net/ipv4/tcp_output.c 	if (!tcp_can_coalesce_send_queue_head(sk, probe_size))
sk               2160 net/ipv4/tcp_output.c 	nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC, false);
sk               2163 net/ipv4/tcp_output.c 	sk_wmem_queued_add(sk, nskb->truesize);
sk               2164 net/ipv4/tcp_output.c 	sk_mem_charge(sk, nskb->truesize);
sk               2166 net/ipv4/tcp_output.c 	skb = tcp_send_head(sk);
sk               2176 net/ipv4/tcp_output.c 	tcp_insert_write_queue_before(nskb, skb, sk);
sk               2177 net/ipv4/tcp_output.c 	tcp_highest_sack_replace(sk, skb, nskb);
sk               2180 net/ipv4/tcp_output.c 	tcp_for_write_queue_from_safe(skb, next, sk) {
sk               2193 net/ipv4/tcp_output.c 			tcp_unlink_write_queue(skb, sk);
sk               2194 net/ipv4/tcp_output.c 			sk_wmem_free_skb(sk, skb);
sk               2217 net/ipv4/tcp_output.c 	if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) {
sk               2221 net/ipv4/tcp_output.c 		tcp_event_new_data_sent(sk, nskb);
sk               2223 net/ipv4/tcp_output.c 		icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len);
sk               2233 net/ipv4/tcp_output.c static bool tcp_pacing_check(struct sock *sk)
sk               2235 net/ipv4/tcp_output.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               2237 net/ipv4/tcp_output.c 	if (!tcp_needs_internal_pacing(sk))
sk               2247 net/ipv4/tcp_output.c 		sock_hold(sk);
sk               2263 net/ipv4/tcp_output.c static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
sk               2270 net/ipv4/tcp_output.c 		      sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift));
sk               2271 net/ipv4/tcp_output.c 	if (sk->sk_pacing_status == SK_PACING_NONE)
sk               2273 net/ipv4/tcp_output.c 			      sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes);
sk               2277 net/ipv4/tcp_output.c 	    tcp_sk(sk)->tcp_tx_delay) {
sk               2278 net/ipv4/tcp_output.c 		u64 extra_bytes = (u64)sk->sk_pacing_rate * tcp_sk(sk)->tcp_tx_delay;
sk               2288 net/ipv4/tcp_output.c 	if (refcount_read(&sk->sk_wmem_alloc) > limit) {
sk               2294 net/ipv4/tcp_output.c 		if (tcp_rtx_queue_empty(sk))
sk               2297 net/ipv4/tcp_output.c 		set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags);
sk               2303 net/ipv4/tcp_output.c 		if (refcount_read(&sk->sk_wmem_alloc) > limit)
sk               2320 net/ipv4/tcp_output.c void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type)
sk               2322 net/ipv4/tcp_output.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               2333 net/ipv4/tcp_output.c void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type)
sk               2335 net/ipv4/tcp_output.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               2345 net/ipv4/tcp_output.c 	if (tcp_rtx_and_write_queues_empty(sk))
sk               2365 net/ipv4/tcp_output.c static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
sk               2368 net/ipv4/tcp_output.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               2381 net/ipv4/tcp_output.c 		result = tcp_mtu_probe(sk);
sk               2389 net/ipv4/tcp_output.c 	max_segs = tcp_tso_segs(sk, mss_now);
sk               2390 net/ipv4/tcp_output.c 	while ((skb = tcp_send_head(sk))) {
sk               2401 net/ipv4/tcp_output.c 		if (tcp_pacing_check(sk))
sk               2423 net/ipv4/tcp_output.c 						     (tcp_skb_is_last(sk, skb) ?
sk               2428 net/ipv4/tcp_output.c 			    tcp_tso_should_defer(sk, skb, &is_cwnd_limited,
sk               2435 net/ipv4/tcp_output.c 			limit = tcp_mss_split_point(sk, skb, mss_now,
sk               2442 net/ipv4/tcp_output.c 		    unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
sk               2445 net/ipv4/tcp_output.c 		if (tcp_small_queue_check(sk, skb, 0))
sk               2456 net/ipv4/tcp_output.c 		if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp)))
sk               2463 net/ipv4/tcp_output.c 		tcp_event_new_data_sent(sk, skb);
sk               2473 net/ipv4/tcp_output.c 		tcp_chrono_start(sk, TCP_CHRONO_RWND_LIMITED);
sk               2475 net/ipv4/tcp_output.c 		tcp_chrono_stop(sk, TCP_CHRONO_RWND_LIMITED);
sk               2478 net/ipv4/tcp_output.c 		if (tcp_in_cwnd_reduction(sk))
sk               2483 net/ipv4/tcp_output.c 			tcp_schedule_loss_probe(sk, false);
sk               2485 net/ipv4/tcp_output.c 		tcp_cwnd_validate(sk, is_cwnd_limited);
sk               2488 net/ipv4/tcp_output.c 	return !tp->packets_out && !tcp_write_queue_empty(sk);
sk               2491 net/ipv4/tcp_output.c bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto)
sk               2493 net/ipv4/tcp_output.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk               2494 net/ipv4/tcp_output.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               2504 net/ipv4/tcp_output.c 	early_retrans = sock_net(sk)->ipv4.sysctl_tcp_early_retrans;
sk               2530 net/ipv4/tcp_output.c 			jiffies_to_usecs(inet_csk(sk)->icsk_rto) :
sk               2531 net/ipv4/tcp_output.c 			tcp_rto_delta_us(sk);  /* How far in future is RTO? */
sk               2535 net/ipv4/tcp_output.c 	tcp_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout,
sk               2544 net/ipv4/tcp_output.c static bool skb_still_in_host_queue(const struct sock *sk,
sk               2547 net/ipv4/tcp_output.c 	if (unlikely(skb_fclone_busy(sk, skb))) {
sk               2548 net/ipv4/tcp_output.c 		NET_INC_STATS(sock_net(sk),
sk               2558 net/ipv4/tcp_output.c void tcp_send_loss_probe(struct sock *sk)
sk               2560 net/ipv4/tcp_output.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               2563 net/ipv4/tcp_output.c 	int mss = tcp_current_mss(sk);
sk               2565 net/ipv4/tcp_output.c 	skb = tcp_send_head(sk);
sk               2568 net/ipv4/tcp_output.c 		tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC);
sk               2573 net/ipv4/tcp_output.c 	skb = skb_rb_last(&sk->tcp_rtx_queue);
sk               2577 net/ipv4/tcp_output.c 			  tp->packets_out, sk->sk_state, tp->snd_cwnd, mss);
sk               2578 net/ipv4/tcp_output.c 		inet_csk(sk)->icsk_pending = 0;
sk               2586 net/ipv4/tcp_output.c 	if (skb_still_in_host_queue(sk, skb))
sk               2594 net/ipv4/tcp_output.c 		if (unlikely(tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb,
sk               2604 net/ipv4/tcp_output.c 	if (__tcp_retransmit_skb(sk, skb, 1))
sk               2611 net/ipv4/tcp_output.c 	NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSPROBES);
sk               2613 net/ipv4/tcp_output.c 	inet_csk(sk)->icsk_pending = 0;
sk               2615 net/ipv4/tcp_output.c 	tcp_rearm_rto(sk);
sk               2622 net/ipv4/tcp_output.c void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
sk               2629 net/ipv4/tcp_output.c 	if (unlikely(sk->sk_state == TCP_CLOSE))
sk               2632 net/ipv4/tcp_output.c 	if (tcp_write_xmit(sk, cur_mss, nonagle, 0,
sk               2633 net/ipv4/tcp_output.c 			   sk_gfp_mask(sk, GFP_ATOMIC)))
sk               2634 net/ipv4/tcp_output.c 		tcp_check_probe_timer(sk);
sk               2640 net/ipv4/tcp_output.c void tcp_push_one(struct sock *sk, unsigned int mss_now)
sk               2642 net/ipv4/tcp_output.c 	struct sk_buff *skb = tcp_send_head(sk);
sk               2646 net/ipv4/tcp_output.c 	tcp_write_xmit(sk, mss_now, TCP_NAGLE_PUSH, 1, sk->sk_allocation);
sk               2701 net/ipv4/tcp_output.c u32 __tcp_select_window(struct sock *sk)
sk               2703 net/ipv4/tcp_output.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk               2704 net/ipv4/tcp_output.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               2712 net/ipv4/tcp_output.c 	int free_space = tcp_space(sk);
sk               2713 net/ipv4/tcp_output.c 	int allowed_space = tcp_full_space(sk);
sk               2725 net/ipv4/tcp_output.c 		if (tcp_under_memory_pressure(sk))
sk               2795 net/ipv4/tcp_output.c static bool tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
sk               2797 net/ipv4/tcp_output.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               2812 net/ipv4/tcp_output.c 	tcp_highest_sack_replace(sk, next_skb, skb);
sk               2831 net/ipv4/tcp_output.c 	tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb));
sk               2835 net/ipv4/tcp_output.c 	tcp_rtx_queue_unlink_and_free(next_skb, sk);
sk               2840 net/ipv4/tcp_output.c static bool tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb)
sk               2856 net/ipv4/tcp_output.c static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
sk               2859 net/ipv4/tcp_output.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               2863 net/ipv4/tcp_output.c 	if (!sock_net(sk)->ipv4.sysctl_tcp_retrans_collapse)
sk               2869 net/ipv4/tcp_output.c 		if (!tcp_can_collapse(sk, skb))
sk               2888 net/ipv4/tcp_output.c 		if (!tcp_collapse_retrans(sk, to))
sk               2897 net/ipv4/tcp_output.c int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
sk               2899 net/ipv4/tcp_output.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk               2900 net/ipv4/tcp_output.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               2912 net/ipv4/tcp_output.c 	if (refcount_read(&sk->sk_wmem_alloc) >
sk               2913 net/ipv4/tcp_output.c 	    min_t(u32, sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2),
sk               2914 net/ipv4/tcp_output.c 		  sk->sk_sndbuf))
sk               2917 net/ipv4/tcp_output.c 	if (skb_still_in_host_queue(sk, skb))
sk               2925 net/ipv4/tcp_output.c 		if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
sk               2929 net/ipv4/tcp_output.c 	if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
sk               2932 net/ipv4/tcp_output.c 	cur_mss = tcp_current_mss(sk);
sk               2945 net/ipv4/tcp_output.c 		if (tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb, len,
sk               2956 net/ipv4/tcp_output.c 			tcp_adjust_pcount(sk, skb, diff);
sk               2958 net/ipv4/tcp_output.c 			tcp_retrans_try_collapse(sk, skb, cur_mss);
sk               2963 net/ipv4/tcp_output.c 		tcp_ecn_clear_syn(sk, skb);
sk               2967 net/ipv4/tcp_output.c 	TCP_ADD_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS, segs);
sk               2969 net/ipv4/tcp_output.c 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
sk               2985 net/ipv4/tcp_output.c 				err = tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC);
sk               2992 net/ipv4/tcp_output.c 			tcp_update_skb_after_send(sk, skb, tp->tcp_wstamp_ns);
sk               2993 net/ipv4/tcp_output.c 			tcp_rate_skb_sent(sk, skb);
sk               2996 net/ipv4/tcp_output.c 		err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
sk               3005 net/ipv4/tcp_output.c 		tcp_call_bpf_3arg(sk, BPF_SOCK_OPS_RETRANS_CB,
sk               3009 net/ipv4/tcp_output.c 		trace_tcp_retransmit_skb(sk, skb);
sk               3011 net/ipv4/tcp_output.c 		NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL, segs);
sk               3016 net/ipv4/tcp_output.c int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
sk               3018 net/ipv4/tcp_output.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               3019 net/ipv4/tcp_output.c 	int err = __tcp_retransmit_skb(sk, skb, segs);
sk               3046 net/ipv4/tcp_output.c void tcp_xmit_retransmit_queue(struct sock *sk)
sk               3048 net/ipv4/tcp_output.c 	const struct inet_connection_sock *icsk = inet_csk(sk);
sk               3050 net/ipv4/tcp_output.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               3057 net/ipv4/tcp_output.c 	rtx_head = tcp_rtx_queue_head(sk);
sk               3059 net/ipv4/tcp_output.c 	max_segs = tcp_tso_segs(sk, tcp_current_mss(sk));
sk               3064 net/ipv4/tcp_output.c 		if (tcp_pacing_check(sk))
sk               3097 net/ipv4/tcp_output.c 		if (tcp_small_queue_check(sk, skb, 1))
sk               3100 net/ipv4/tcp_output.c 		if (tcp_retransmit_skb(sk, skb, segs))
sk               3103 net/ipv4/tcp_output.c 		NET_ADD_STATS(sock_net(sk), mib_idx, tcp_skb_pcount(skb));
sk               3105 net/ipv4/tcp_output.c 		if (tcp_in_cwnd_reduction(sk))
sk               3110 net/ipv4/tcp_output.c 			tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
sk               3111 net/ipv4/tcp_output.c 					     inet_csk(sk)->icsk_rto,
sk               3124 net/ipv4/tcp_output.c void sk_forced_mem_schedule(struct sock *sk, int size)
sk               3128 net/ipv4/tcp_output.c 	if (size <= sk->sk_forward_alloc)
sk               3131 net/ipv4/tcp_output.c 	sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
sk               3132 net/ipv4/tcp_output.c 	sk_memory_allocated_add(sk, amt);
sk               3134 net/ipv4/tcp_output.c 	if (mem_cgroup_sockets_enabled && sk->sk_memcg)
sk               3135 net/ipv4/tcp_output.c 		mem_cgroup_charge_skmem(sk->sk_memcg, amt);
sk               3141 net/ipv4/tcp_output.c void tcp_send_fin(struct sock *sk)
sk               3143 net/ipv4/tcp_output.c 	struct sk_buff *skb, *tskb = tcp_write_queue_tail(sk);
sk               3144 net/ipv4/tcp_output.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               3151 net/ipv4/tcp_output.c 	if (!tskb && tcp_under_memory_pressure(sk))
sk               3152 net/ipv4/tcp_output.c 		tskb = skb_rb_last(&sk->tcp_rtx_queue);
sk               3158 net/ipv4/tcp_output.c 		if (tcp_write_queue_empty(sk)) {
sk               3169 net/ipv4/tcp_output.c 		skb = alloc_skb_fclone(MAX_TCP_HEADER, sk->sk_allocation);
sk               3175 net/ipv4/tcp_output.c 		sk_forced_mem_schedule(sk, skb->truesize);
sk               3179 net/ipv4/tcp_output.c 		tcp_queue_skb(sk, skb);
sk               3181 net/ipv4/tcp_output.c 	__tcp_push_pending_frames(sk, tcp_current_mss(sk), TCP_NAGLE_OFF);
sk               3189 net/ipv4/tcp_output.c void tcp_send_active_reset(struct sock *sk, gfp_t priority)
sk               3193 net/ipv4/tcp_output.c 	TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS);
sk               3198 net/ipv4/tcp_output.c 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
sk               3204 net/ipv4/tcp_output.c 	tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk),
sk               3206 net/ipv4/tcp_output.c 	tcp_mstamp_refresh(tcp_sk(sk));
sk               3208 net/ipv4/tcp_output.c 	if (tcp_transmit_skb(sk, skb, 0, priority))
sk               3209 net/ipv4/tcp_output.c 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
sk               3214 net/ipv4/tcp_output.c 	trace_tcp_send_reset(sk, NULL);
sk               3223 net/ipv4/tcp_output.c int tcp_send_synack(struct sock *sk)
sk               3227 net/ipv4/tcp_output.c 	skb = tcp_rtx_queue_head(sk);
sk               3242 net/ipv4/tcp_output.c 			tcp_highest_sack_replace(sk, skb, nskb);
sk               3243 net/ipv4/tcp_output.c 			tcp_rtx_queue_unlink_and_free(skb, sk);
sk               3245 net/ipv4/tcp_output.c 			tcp_rbtree_insert(&sk->tcp_rtx_queue, nskb);
sk               3246 net/ipv4/tcp_output.c 			sk_wmem_queued_add(sk, nskb->truesize);
sk               3247 net/ipv4/tcp_output.c 			sk_mem_charge(sk, nskb->truesize);
sk               3252 net/ipv4/tcp_output.c 		tcp_ecn_send_synack(sk, skb);
sk               3254 net/ipv4/tcp_output.c 	return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
sk               3266 net/ipv4/tcp_output.c struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
sk               3272 net/ipv4/tcp_output.c 	const struct tcp_sock *tp = tcp_sk(sk);
sk               3303 net/ipv4/tcp_output.c 		skb_set_owner_w(skb, (struct sock *)sk);
sk               3325 net/ipv4/tcp_output.c 	md5 = tcp_rsk(req)->af_specific->req_md5_lookup(sk, req_to_sk(req));
sk               3328 net/ipv4/tcp_output.c 	tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, md5,
sk               3351 net/ipv4/tcp_output.c 	__TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS);
sk               3368 net/ipv4/tcp_output.c static void tcp_ca_dst_init(struct sock *sk, const struct dst_entry *dst)
sk               3370 net/ipv4/tcp_output.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk               3388 net/ipv4/tcp_output.c static void tcp_connect_init(struct sock *sk)
sk               3390 net/ipv4/tcp_output.c 	const struct dst_entry *dst = __sk_dst_get(sk);
sk               3391 net/ipv4/tcp_output.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               3399 net/ipv4/tcp_output.c 	if (sock_net(sk)->ipv4.sysctl_tcp_timestamps)
sk               3403 net/ipv4/tcp_output.c 	if (tp->af_specific->md5_lookup(sk, sk))
sk               3411 net/ipv4/tcp_output.c 	tcp_mtup_init(sk);
sk               3412 net/ipv4/tcp_output.c 	tcp_sync_mss(sk, dst_mtu(dst));
sk               3414 net/ipv4/tcp_output.c 	tcp_ca_dst_init(sk, dst);
sk               3420 net/ipv4/tcp_output.c 	tcp_initialize_rcv_mss(sk);
sk               3423 net/ipv4/tcp_output.c 	if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
sk               3424 net/ipv4/tcp_output.c 	    (tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0))
sk               3425 net/ipv4/tcp_output.c 		tp->window_clamp = tcp_full_space(sk);
sk               3427 net/ipv4/tcp_output.c 	rcv_wnd = tcp_rwnd_init_bpf(sk);
sk               3431 net/ipv4/tcp_output.c 	tcp_select_initial_window(sk, tcp_full_space(sk),
sk               3435 net/ipv4/tcp_output.c 				  sock_net(sk)->ipv4.sysctl_tcp_window_scaling,
sk               3442 net/ipv4/tcp_output.c 	sk->sk_err = 0;
sk               3443 net/ipv4/tcp_output.c 	sock_reset_flag(sk, SOCK_DONE);
sk               3446 net/ipv4/tcp_output.c 	tcp_write_queue_purge(sk);
sk               3459 net/ipv4/tcp_output.c 	inet_csk(sk)->icsk_rto = tcp_timeout_init(sk);
sk               3460 net/ipv4/tcp_output.c 	inet_csk(sk)->icsk_retransmits = 0;
sk               3464 net/ipv4/tcp_output.c static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb)
sk               3466 net/ipv4/tcp_output.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               3471 net/ipv4/tcp_output.c 	sk_wmem_queued_add(sk, skb->truesize);
sk               3472 net/ipv4/tcp_output.c 	sk_mem_charge(sk, skb->truesize);
sk               3484 net/ipv4/tcp_output.c static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
sk               3486 net/ipv4/tcp_output.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               3492 net/ipv4/tcp_output.c 	if (!tcp_fastopen_cookie_check(sk, &tp->rx_opt.mss_clamp, &fo->cookie))
sk               3501 net/ipv4/tcp_output.c 	space = __tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) -
sk               3509 net/ipv4/tcp_output.c 	syn_data = sk_stream_alloc_skb(sk, space, sk->sk_allocation, false);
sk               3533 net/ipv4/tcp_output.c 	tcp_connect_queue_skb(sk, syn_data);
sk               3535 net/ipv4/tcp_output.c 		tcp_chrono_start(sk, TCP_CHRONO_BUSY);
sk               3537 net/ipv4/tcp_output.c 	err = tcp_transmit_skb(sk, syn_data, 1, sk->sk_allocation);
sk               3550 net/ipv4/tcp_output.c 		tcp_rbtree_insert(&sk->tcp_rtx_queue, syn_data);
sk               3551 net/ipv4/tcp_output.c 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT);
sk               3556 net/ipv4/tcp_output.c 	__skb_queue_tail(&sk->sk_write_queue, syn_data);
sk               3563 net/ipv4/tcp_output.c 	err = tcp_transmit_skb(sk, syn, 1, sk->sk_allocation);
sk               3572 net/ipv4/tcp_output.c int tcp_connect(struct sock *sk)
sk               3574 net/ipv4/tcp_output.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               3578 net/ipv4/tcp_output.c 	tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_CONNECT_CB, 0, NULL);
sk               3580 net/ipv4/tcp_output.c 	if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
sk               3583 net/ipv4/tcp_output.c 	tcp_connect_init(sk);
sk               3586 net/ipv4/tcp_output.c 		tcp_finish_connect(sk, NULL);
sk               3590 net/ipv4/tcp_output.c 	buff = sk_stream_alloc_skb(sk, 0, sk->sk_allocation, true);
sk               3597 net/ipv4/tcp_output.c 	tcp_connect_queue_skb(sk, buff);
sk               3598 net/ipv4/tcp_output.c 	tcp_ecn_send_syn(sk, buff);
sk               3599 net/ipv4/tcp_output.c 	tcp_rbtree_insert(&sk->tcp_rtx_queue, buff);
sk               3602 net/ipv4/tcp_output.c 	err = tp->fastopen_req ? tcp_send_syn_data(sk, buff) :
sk               3603 net/ipv4/tcp_output.c 	      tcp_transmit_skb(sk, buff, 1, sk->sk_allocation);
sk               3612 net/ipv4/tcp_output.c 	buff = tcp_send_head(sk);
sk               3617 net/ipv4/tcp_output.c 	TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS);
sk               3620 net/ipv4/tcp_output.c 	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
sk               3621 net/ipv4/tcp_output.c 				  inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
sk               3630 net/ipv4/tcp_output.c void tcp_send_delayed_ack(struct sock *sk)
sk               3632 net/ipv4/tcp_output.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk               3637 net/ipv4/tcp_output.c 		const struct tcp_sock *tp = tcp_sk(sk);
sk               3640 net/ipv4/tcp_output.c 		if (inet_csk_in_pingpong_mode(sk) ||
sk               3671 net/ipv4/tcp_output.c 			tcp_send_ack(sk);
sk               3680 net/ipv4/tcp_output.c 	sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
sk               3684 net/ipv4/tcp_output.c void __tcp_send_ack(struct sock *sk, u32 rcv_nxt)
sk               3689 net/ipv4/tcp_output.c 	if (sk->sk_state == TCP_CLOSE)
sk               3697 net/ipv4/tcp_output.c 			 sk_gfp_mask(sk, GFP_ATOMIC | __GFP_NOWARN));
sk               3699 net/ipv4/tcp_output.c 		inet_csk_schedule_ack(sk);
sk               3700 net/ipv4/tcp_output.c 		inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
sk               3701 net/ipv4/tcp_output.c 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
sk               3708 net/ipv4/tcp_output.c 	tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK);
sk               3717 net/ipv4/tcp_output.c 	__tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0, rcv_nxt);
sk               3721 net/ipv4/tcp_output.c void tcp_send_ack(struct sock *sk)
sk               3723 net/ipv4/tcp_output.c 	__tcp_send_ack(sk, tcp_sk(sk)->rcv_nxt);
sk               3737 net/ipv4/tcp_output.c static int tcp_xmit_probe_skb(struct sock *sk, int urgent, int mib)
sk               3739 net/ipv4/tcp_output.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               3744 net/ipv4/tcp_output.c 			sk_gfp_mask(sk, GFP_ATOMIC | __GFP_NOWARN));
sk               3755 net/ipv4/tcp_output.c 	NET_INC_STATS(sock_net(sk), mib);
sk               3756 net/ipv4/tcp_output.c 	return tcp_transmit_skb(sk, skb, 0, (__force gfp_t)0);
sk               3760 net/ipv4/tcp_output.c void tcp_send_window_probe(struct sock *sk)
sk               3762 net/ipv4/tcp_output.c 	if (sk->sk_state == TCP_ESTABLISHED) {
sk               3763 net/ipv4/tcp_output.c 		tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1;
sk               3764 net/ipv4/tcp_output.c 		tcp_mstamp_refresh(tcp_sk(sk));
sk               3765 net/ipv4/tcp_output.c 		tcp_xmit_probe_skb(sk, 0, LINUX_MIB_TCPWINPROBE);
sk               3770 net/ipv4/tcp_output.c int tcp_write_wakeup(struct sock *sk, int mib)
sk               3772 net/ipv4/tcp_output.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               3775 net/ipv4/tcp_output.c 	if (sk->sk_state == TCP_CLOSE)
sk               3778 net/ipv4/tcp_output.c 	skb = tcp_send_head(sk);
sk               3781 net/ipv4/tcp_output.c 		unsigned int mss = tcp_current_mss(sk);
sk               3795 net/ipv4/tcp_output.c 			if (tcp_fragment(sk, TCP_FRAG_IN_WRITE_QUEUE,
sk               3802 net/ipv4/tcp_output.c 		err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
sk               3804 net/ipv4/tcp_output.c 			tcp_event_new_data_sent(sk, skb);
sk               3808 net/ipv4/tcp_output.c 			tcp_xmit_probe_skb(sk, 1, mib);
sk               3809 net/ipv4/tcp_output.c 		return tcp_xmit_probe_skb(sk, 0, mib);
sk               3816 net/ipv4/tcp_output.c void tcp_send_probe0(struct sock *sk)
sk               3818 net/ipv4/tcp_output.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk               3819 net/ipv4/tcp_output.c 	struct tcp_sock *tp = tcp_sk(sk);
sk               3820 net/ipv4/tcp_output.c 	struct net *net = sock_net(sk);
sk               3824 net/ipv4/tcp_output.c 	err = tcp_write_wakeup(sk, LINUX_MIB_TCPWINPROBE);
sk               3826 net/ipv4/tcp_output.c 	if (tp->packets_out || tcp_write_queue_empty(sk)) {
sk               3837 net/ipv4/tcp_output.c 		timeout = tcp_probe0_when(sk, TCP_RTO_MAX);
sk               3844 net/ipv4/tcp_output.c 	tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, timeout, TCP_RTO_MAX, NULL);
sk               3847 net/ipv4/tcp_output.c int tcp_rtx_synack(const struct sock *sk, struct request_sock *req)
sk               3854 net/ipv4/tcp_output.c 	res = af_ops->send_synack(sk, NULL, &fl, req, NULL, TCP_SYNACK_NORMAL);
sk               3856 net/ipv4/tcp_output.c 		__TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);
sk               3857 net/ipv4/tcp_output.c 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
sk               3858 net/ipv4/tcp_output.c 		if (unlikely(tcp_passive_fastopen(sk)))
sk               3859 net/ipv4/tcp_output.c 			tcp_sk(sk)->total_retrans++;
sk               3860 net/ipv4/tcp_output.c 		trace_tcp_retransmit_synack(sk, req);
sk                 40 net/ipv4/tcp_rate.c void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb)
sk                 42 net/ipv4/tcp_rate.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                 78 net/ipv4/tcp_rate.c void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
sk                 81 net/ipv4/tcp_rate.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                110 net/ipv4/tcp_rate.c void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
sk                113 net/ipv4/tcp_rate.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                166 net/ipv4/tcp_rate.c 				 inet_csk(sk)->icsk_ca_state,
sk                183 net/ipv4/tcp_rate.c void tcp_rate_check_app_limited(struct sock *sk)
sk                185 net/ipv4/tcp_rate.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                190 net/ipv4/tcp_rate.c 	    sk_wmem_alloc_get(sk) < SKB_TRUESIZE(1) &&
sk                  5 net/ipv4/tcp_recovery.c void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb)
sk                  7 net/ipv4/tcp_recovery.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                 14 net/ipv4/tcp_recovery.c 		NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT,
sk                 24 net/ipv4/tcp_recovery.c static u32 tcp_rack_reo_wnd(const struct sock *sk)
sk                 26 net/ipv4/tcp_recovery.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                 32 net/ipv4/tcp_recovery.c 		if (inet_csk(sk)->icsk_ca_state >= TCP_CA_Recovery)
sk                 36 net/ipv4/tcp_recovery.c 		    !(sock_net(sk)->ipv4.sysctl_tcp_recovery & TCP_RACK_NO_DUPTHRESH))
sk                 76 net/ipv4/tcp_recovery.c static void tcp_rack_detect_loss(struct sock *sk, u32 *reo_timeout)
sk                 78 net/ipv4/tcp_recovery.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                 83 net/ipv4/tcp_recovery.c 	reo_wnd = tcp_rack_reo_wnd(sk);
sk                104 net/ipv4/tcp_recovery.c 			tcp_mark_skb_lost(sk, skb);
sk                113 net/ipv4/tcp_recovery.c void tcp_rack_mark_lost(struct sock *sk)
sk                115 net/ipv4/tcp_recovery.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                123 net/ipv4/tcp_recovery.c 	tcp_rack_detect_loss(sk, &timeout);
sk                126 net/ipv4/tcp_recovery.c 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_REO_TIMEOUT,
sk                127 net/ipv4/tcp_recovery.c 					  timeout, inet_csk(sk)->icsk_rto);
sk                166 net/ipv4/tcp_recovery.c void tcp_rack_reo_timeout(struct sock *sk)
sk                168 net/ipv4/tcp_recovery.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                172 net/ipv4/tcp_recovery.c 	tcp_rack_detect_loss(sk, &timeout);
sk                174 net/ipv4/tcp_recovery.c 		if (inet_csk(sk)->icsk_ca_state != TCP_CA_Recovery) {
sk                175 net/ipv4/tcp_recovery.c 			tcp_enter_recovery(sk, false);
sk                176 net/ipv4/tcp_recovery.c 			if (!inet_csk(sk)->icsk_ca_ops->cong_control)
sk                177 net/ipv4/tcp_recovery.c 				tcp_cwnd_reduction(sk, 1, 0);
sk                179 net/ipv4/tcp_recovery.c 		tcp_xmit_retransmit_queue(sk);
sk                181 net/ipv4/tcp_recovery.c 	if (inet_csk(sk)->icsk_pending != ICSK_TIME_RETRANS)
sk                182 net/ipv4/tcp_recovery.c 		tcp_rearm_rto(sk);
sk                202 net/ipv4/tcp_recovery.c void tcp_rack_update_reo_wnd(struct sock *sk, struct rate_sample *rs)
sk                204 net/ipv4/tcp_recovery.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                206 net/ipv4/tcp_recovery.c 	if (sock_net(sk)->ipv4.sysctl_tcp_recovery & TCP_RACK_STATIC_REO_WND ||
sk                231 net/ipv4/tcp_recovery.c void tcp_newreno_mark_lost(struct sock *sk, bool snd_una_advanced)
sk                233 net/ipv4/tcp_recovery.c 	const u8 state = inet_csk(sk)->icsk_ca_state;
sk                234 net/ipv4/tcp_recovery.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                238 net/ipv4/tcp_recovery.c 		struct sk_buff *skb = tcp_rtx_queue_head(sk);
sk                246 net/ipv4/tcp_recovery.c 			tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb,
sk                 19 net/ipv4/tcp_scalable.c static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked)
sk                 21 net/ipv4/tcp_scalable.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                 23 net/ipv4/tcp_scalable.c 	if (!tcp_is_cwnd_limited(sk))
sk                 33 net/ipv4/tcp_scalable.c static u32 tcp_scalable_ssthresh(struct sock *sk)
sk                 35 net/ipv4/tcp_scalable.c 	const struct tcp_sock *tp = tcp_sk(sk);
sk                 26 net/ipv4/tcp_timer.c static u32 tcp_clamp_rto_to_user_timeout(const struct sock *sk)
sk                 28 net/ipv4/tcp_timer.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk                 32 net/ipv4/tcp_timer.c 	start_ts = tcp_sk(sk)->retrans_stamp;
sk                 35 net/ipv4/tcp_timer.c 	elapsed = tcp_time_stamp(tcp_sk(sk)) - start_ts;
sk                 50 net/ipv4/tcp_timer.c static void tcp_write_err(struct sock *sk)
sk                 52 net/ipv4/tcp_timer.c 	sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
sk                 53 net/ipv4/tcp_timer.c 	sk->sk_error_report(sk);
sk                 55 net/ipv4/tcp_timer.c 	tcp_write_queue_purge(sk);
sk                 56 net/ipv4/tcp_timer.c 	tcp_done(sk);
sk                 57 net/ipv4/tcp_timer.c 	__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT);
sk                 84 net/ipv4/tcp_timer.c static int tcp_out_of_resources(struct sock *sk, bool do_reset)
sk                 86 net/ipv4/tcp_timer.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                 95 net/ipv4/tcp_timer.c 	if (sk->sk_err_soft)
sk                 98 net/ipv4/tcp_timer.c 	if (tcp_check_oom(sk, shift)) {
sk                106 net/ipv4/tcp_timer.c 			tcp_send_active_reset(sk, GFP_ATOMIC);
sk                107 net/ipv4/tcp_timer.c 		tcp_done(sk);
sk                108 net/ipv4/tcp_timer.c 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY);
sk                112 net/ipv4/tcp_timer.c 	if (!check_net(sock_net(sk))) {
sk                114 net/ipv4/tcp_timer.c 		tcp_done(sk);
sk                126 net/ipv4/tcp_timer.c static int tcp_orphan_retries(struct sock *sk, bool alive)
sk                128 net/ipv4/tcp_timer.c 	int retries = sock_net(sk)->ipv4.sysctl_tcp_orphan_retries; /* May be zero. */
sk                131 net/ipv4/tcp_timer.c 	if (sk->sk_err_soft && !alive)
sk                142 net/ipv4/tcp_timer.c static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
sk                144 net/ipv4/tcp_timer.c 	const struct net *net = sock_net(sk);
sk                155 net/ipv4/tcp_timer.c 		mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1;
sk                159 net/ipv4/tcp_timer.c 		icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
sk                161 net/ipv4/tcp_timer.c 	tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
sk                164 net/ipv4/tcp_timer.c static unsigned int tcp_model_timeout(struct sock *sk,
sk                191 net/ipv4/tcp_timer.c static bool retransmits_timed_out(struct sock *sk,
sk                197 net/ipv4/tcp_timer.c 	if (!inet_csk(sk)->icsk_retransmits)
sk                200 net/ipv4/tcp_timer.c 	start_ts = tcp_sk(sk)->retrans_stamp;
sk                204 net/ipv4/tcp_timer.c 		if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
sk                205 net/ipv4/tcp_timer.c 			rto_base = tcp_timeout_init(sk);
sk                206 net/ipv4/tcp_timer.c 		timeout = tcp_model_timeout(sk, boundary, rto_base);
sk                209 net/ipv4/tcp_timer.c 	return (s32)(tcp_time_stamp(tcp_sk(sk)) - start_ts - timeout) >= 0;
sk                213 net/ipv4/tcp_timer.c static int tcp_write_timeout(struct sock *sk)
sk                215 net/ipv4/tcp_timer.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk                216 net/ipv4/tcp_timer.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                217 net/ipv4/tcp_timer.c 	struct net *net = sock_net(sk);
sk                221 net/ipv4/tcp_timer.c 	if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
sk                223 net/ipv4/tcp_timer.c 			dst_negative_advice(sk);
sk                225 net/ipv4/tcp_timer.c 			sk_rethink_txhash(sk);
sk                230 net/ipv4/tcp_timer.c 		if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1, 0)) {
sk                232 net/ipv4/tcp_timer.c 			tcp_mtu_probing(icsk, sk);
sk                234 net/ipv4/tcp_timer.c 			dst_negative_advice(sk);
sk                236 net/ipv4/tcp_timer.c 			sk_rethink_txhash(sk);
sk                240 net/ipv4/tcp_timer.c 		if (sock_flag(sk, SOCK_DEAD)) {
sk                243 net/ipv4/tcp_timer.c 			retry_until = tcp_orphan_retries(sk, alive);
sk                245 net/ipv4/tcp_timer.c 				!retransmits_timed_out(sk, retry_until, 0);
sk                247 net/ipv4/tcp_timer.c 			if (tcp_out_of_resources(sk, do_reset))
sk                252 net/ipv4/tcp_timer.c 		expired = retransmits_timed_out(sk, retry_until,
sk                254 net/ipv4/tcp_timer.c 	tcp_fastopen_active_detect_blackhole(sk, expired);
sk                257 net/ipv4/tcp_timer.c 		tcp_call_bpf_3arg(sk, BPF_SOCK_OPS_RTO_CB,
sk                263 net/ipv4/tcp_timer.c 		tcp_write_err(sk);
sk                271 net/ipv4/tcp_timer.c void tcp_delack_timer_handler(struct sock *sk)
sk                273 net/ipv4/tcp_timer.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk                275 net/ipv4/tcp_timer.c 	sk_mem_reclaim_partial(sk);
sk                277 net/ipv4/tcp_timer.c 	if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
sk                282 net/ipv4/tcp_timer.c 		sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout);
sk                287 net/ipv4/tcp_timer.c 	if (inet_csk_ack_scheduled(sk)) {
sk                288 net/ipv4/tcp_timer.c 		if (!inet_csk_in_pingpong_mode(sk)) {
sk                295 net/ipv4/tcp_timer.c 			inet_csk_exit_pingpong_mode(sk);
sk                298 net/ipv4/tcp_timer.c 		tcp_mstamp_refresh(tcp_sk(sk));
sk                299 net/ipv4/tcp_timer.c 		tcp_send_ack(sk);
sk                300 net/ipv4/tcp_timer.c 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKS);
sk                304 net/ipv4/tcp_timer.c 	if (tcp_under_memory_pressure(sk))
sk                305 net/ipv4/tcp_timer.c 		sk_mem_reclaim(sk);
sk                322 net/ipv4/tcp_timer.c 	struct sock *sk = &icsk->icsk_inet.sk;
sk                324 net/ipv4/tcp_timer.c 	bh_lock_sock(sk);
sk                325 net/ipv4/tcp_timer.c 	if (!sock_owned_by_user(sk)) {
sk                326 net/ipv4/tcp_timer.c 		tcp_delack_timer_handler(sk);
sk                329 net/ipv4/tcp_timer.c 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
sk                331 net/ipv4/tcp_timer.c 		if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &sk->sk_tsq_flags))
sk                332 net/ipv4/tcp_timer.c 			sock_hold(sk);
sk                334 net/ipv4/tcp_timer.c 	bh_unlock_sock(sk);
sk                335 net/ipv4/tcp_timer.c 	sock_put(sk);
sk                338 net/ipv4/tcp_timer.c static void tcp_probe_timer(struct sock *sk)
sk                340 net/ipv4/tcp_timer.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk                341 net/ipv4/tcp_timer.c 	struct sk_buff *skb = tcp_send_head(sk);
sk                342 net/ipv4/tcp_timer.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                359 net/ipv4/tcp_timer.c 		u32 elapsed = tcp_model_timeout(sk, icsk->icsk_probes_out,
sk                360 net/ipv4/tcp_timer.c 						tcp_probe0_base(sk));
sk                366 net/ipv4/tcp_timer.c 	max_probes = sock_net(sk)->ipv4.sysctl_tcp_retries2;
sk                367 net/ipv4/tcp_timer.c 	if (sock_flag(sk, SOCK_DEAD)) {
sk                370 net/ipv4/tcp_timer.c 		max_probes = tcp_orphan_retries(sk, alive);
sk                373 net/ipv4/tcp_timer.c 		if (tcp_out_of_resources(sk, true))
sk                378 net/ipv4/tcp_timer.c abort:		tcp_write_err(sk);
sk                381 net/ipv4/tcp_timer.c 		tcp_send_probe0(sk);
sk                389 net/ipv4/tcp_timer.c static void tcp_fastopen_synack_timer(struct sock *sk, struct request_sock *req)
sk                391 net/ipv4/tcp_timer.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk                393 net/ipv4/tcp_timer.c 	    sock_net(sk)->ipv4.sysctl_tcp_synack_retries + 1; /* add one more retry for fastopen */
sk                394 net/ipv4/tcp_timer.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                399 net/ipv4/tcp_timer.c 		tcp_write_err(sk);
sk                404 net/ipv4/tcp_timer.c 		tcp_enter_loss(sk);
sk                410 net/ipv4/tcp_timer.c 	inet_rtx_syn_ack(sk, req);
sk                415 net/ipv4/tcp_timer.c 	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
sk                431 net/ipv4/tcp_timer.c void tcp_retransmit_timer(struct sock *sk)
sk                433 net/ipv4/tcp_timer.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                434 net/ipv4/tcp_timer.c 	struct net *net = sock_net(sk);
sk                435 net/ipv4/tcp_timer.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk                439 net/ipv4/tcp_timer.c 					lockdep_sock_is_held(sk));
sk                441 net/ipv4/tcp_timer.c 		WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
sk                442 net/ipv4/tcp_timer.c 			     sk->sk_state != TCP_FIN_WAIT1);
sk                443 net/ipv4/tcp_timer.c 		tcp_fastopen_synack_timer(sk, req);
sk                449 net/ipv4/tcp_timer.c 	if (!tp->packets_out || WARN_ON_ONCE(tcp_rtx_queue_empty(sk)))
sk                454 net/ipv4/tcp_timer.c 	if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) &&
sk                455 net/ipv4/tcp_timer.c 	    !((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) {
sk                461 net/ipv4/tcp_timer.c 		struct inet_sock *inet = inet_sk(sk);
sk                462 net/ipv4/tcp_timer.c 		if (sk->sk_family == AF_INET) {
sk                470 net/ipv4/tcp_timer.c 		else if (sk->sk_family == AF_INET6) {
sk                472 net/ipv4/tcp_timer.c 					    &sk->sk_v6_daddr,
sk                479 net/ipv4/tcp_timer.c 			tcp_write_err(sk);
sk                482 net/ipv4/tcp_timer.c 		tcp_enter_loss(sk);
sk                483 net/ipv4/tcp_timer.c 		tcp_retransmit_skb(sk, tcp_rtx_queue_head(sk), 1);
sk                484 net/ipv4/tcp_timer.c 		__sk_dst_reset(sk);
sk                488 net/ipv4/tcp_timer.c 	__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEOUTS);
sk                489 net/ipv4/tcp_timer.c 	if (tcp_write_timeout(sk))
sk                510 net/ipv4/tcp_timer.c 			__NET_INC_STATS(sock_net(sk), mib_idx);
sk                513 net/ipv4/tcp_timer.c 	tcp_enter_loss(sk);
sk                516 net/ipv4/tcp_timer.c 	if (tcp_retransmit_skb(sk, tcp_rtx_queue_head(sk), 1) > 0) {
sk                520 net/ipv4/tcp_timer.c 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
sk                553 net/ipv4/tcp_timer.c 	if (sk->sk_state == TCP_ESTABLISHED &&
sk                563 net/ipv4/tcp_timer.c 	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
sk                564 net/ipv4/tcp_timer.c 				  tcp_clamp_rto_to_user_timeout(sk), TCP_RTO_MAX);
sk                565 net/ipv4/tcp_timer.c 	if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1 + 1, 0))
sk                566 net/ipv4/tcp_timer.c 		__sk_dst_reset(sk);
sk                573 net/ipv4/tcp_timer.c void tcp_write_timer_handler(struct sock *sk)
sk                575 net/ipv4/tcp_timer.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk                578 net/ipv4/tcp_timer.c 	if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
sk                583 net/ipv4/tcp_timer.c 		sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
sk                587 net/ipv4/tcp_timer.c 	tcp_mstamp_refresh(tcp_sk(sk));
sk                592 net/ipv4/tcp_timer.c 		tcp_rack_reo_timeout(sk);
sk                595 net/ipv4/tcp_timer.c 		tcp_send_loss_probe(sk);
sk                599 net/ipv4/tcp_timer.c 		tcp_retransmit_timer(sk);
sk                603 net/ipv4/tcp_timer.c 		tcp_probe_timer(sk);
sk                608 net/ipv4/tcp_timer.c 	sk_mem_reclaim(sk);
sk                615 net/ipv4/tcp_timer.c 	struct sock *sk = &icsk->icsk_inet.sk;
sk                617 net/ipv4/tcp_timer.c 	bh_lock_sock(sk);
sk                618 net/ipv4/tcp_timer.c 	if (!sock_owned_by_user(sk)) {
sk                619 net/ipv4/tcp_timer.c 		tcp_write_timer_handler(sk);
sk                622 net/ipv4/tcp_timer.c 		if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED, &sk->sk_tsq_flags))
sk                623 net/ipv4/tcp_timer.c 			sock_hold(sk);
sk                625 net/ipv4/tcp_timer.c 	bh_unlock_sock(sk);
sk                626 net/ipv4/tcp_timer.c 	sock_put(sk);
sk                637 net/ipv4/tcp_timer.c void tcp_set_keepalive(struct sock *sk, int val)
sk                639 net/ipv4/tcp_timer.c 	if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
sk                642 net/ipv4/tcp_timer.c 	if (val && !sock_flag(sk, SOCK_KEEPOPEN))
sk                643 net/ipv4/tcp_timer.c 		inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tcp_sk(sk)));
sk                645 net/ipv4/tcp_timer.c 		inet_csk_delete_keepalive_timer(sk);
sk                652 net/ipv4/tcp_timer.c 	struct sock *sk = from_timer(sk, t, sk_timer);
sk                653 net/ipv4/tcp_timer.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk                654 net/ipv4/tcp_timer.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                658 net/ipv4/tcp_timer.c 	bh_lock_sock(sk);
sk                659 net/ipv4/tcp_timer.c 	if (sock_owned_by_user(sk)) {
sk                661 net/ipv4/tcp_timer.c 		inet_csk_reset_keepalive_timer (sk, HZ/20);
sk                665 net/ipv4/tcp_timer.c 	if (sk->sk_state == TCP_LISTEN) {
sk                671 net/ipv4/tcp_timer.c 	if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) {
sk                673 net/ipv4/tcp_timer.c 			const int tmo = tcp_fin_time(sk) - TCP_TIMEWAIT_LEN;
sk                676 net/ipv4/tcp_timer.c 				tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
sk                680 net/ipv4/tcp_timer.c 		tcp_send_active_reset(sk, GFP_ATOMIC);
sk                684 net/ipv4/tcp_timer.c 	if (!sock_flag(sk, SOCK_KEEPOPEN) ||
sk                685 net/ipv4/tcp_timer.c 	    ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)))
sk                691 net/ipv4/tcp_timer.c 	if (tp->packets_out || !tcp_write_queue_empty(sk))
sk                705 net/ipv4/tcp_timer.c 			tcp_send_active_reset(sk, GFP_ATOMIC);
sk                706 net/ipv4/tcp_timer.c 			tcp_write_err(sk);
sk                709 net/ipv4/tcp_timer.c 		if (tcp_write_wakeup(sk, LINUX_MIB_TCPKEEPALIVE) <= 0) {
sk                723 net/ipv4/tcp_timer.c 	sk_mem_reclaim(sk);
sk                726 net/ipv4/tcp_timer.c 	inet_csk_reset_keepalive_timer (sk, elapsed);
sk                730 net/ipv4/tcp_timer.c 	tcp_done(sk);
sk                733 net/ipv4/tcp_timer.c 	bh_unlock_sock(sk);
sk                734 net/ipv4/tcp_timer.c 	sock_put(sk);
sk                740 net/ipv4/tcp_timer.c 	struct sock *sk = (struct sock *)tp;
sk                742 net/ipv4/tcp_timer.c 	bh_lock_sock(sk);
sk                743 net/ipv4/tcp_timer.c 	if (!sock_owned_by_user(sk)) {
sk                745 net/ipv4/tcp_timer.c 			tcp_send_ack(sk);
sk                748 net/ipv4/tcp_timer.c 				      &sk->sk_tsq_flags))
sk                749 net/ipv4/tcp_timer.c 			sock_hold(sk);
sk                751 net/ipv4/tcp_timer.c 	bh_unlock_sock(sk);
sk                753 net/ipv4/tcp_timer.c 	sock_put(sk);
sk                758 net/ipv4/tcp_timer.c void tcp_init_xmit_timers(struct sock *sk)
sk                760 net/ipv4/tcp_timer.c 	inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer,
sk                762 net/ipv4/tcp_timer.c 	hrtimer_init(&tcp_sk(sk)->pacing_timer, CLOCK_MONOTONIC,
sk                764 net/ipv4/tcp_timer.c 	tcp_sk(sk)->pacing_timer.function = tcp_pace_kick;
sk                766 net/ipv4/tcp_timer.c 	hrtimer_init(&tcp_sk(sk)->compressed_ack_timer, CLOCK_MONOTONIC,
sk                768 net/ipv4/tcp_timer.c 	tcp_sk(sk)->compressed_ack_timer.function = tcp_compressed_ack_kick;
sk                 99 net/ipv4/tcp_ulp.c void tcp_update_ulp(struct sock *sk, struct proto *proto,
sk                100 net/ipv4/tcp_ulp.c 		    void (*write_space)(struct sock *sk))
sk                102 net/ipv4/tcp_ulp.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk                105 net/ipv4/tcp_ulp.c 		sk->sk_write_space = write_space;
sk                106 net/ipv4/tcp_ulp.c 		sk->sk_prot = proto;
sk                111 net/ipv4/tcp_ulp.c 		icsk->icsk_ulp_ops->update(sk, proto, write_space);
sk                114 net/ipv4/tcp_ulp.c void tcp_cleanup_ulp(struct sock *sk)
sk                116 net/ipv4/tcp_ulp.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk                126 net/ipv4/tcp_ulp.c 		icsk->icsk_ulp_ops->release(sk);
sk                132 net/ipv4/tcp_ulp.c static int __tcp_set_ulp(struct sock *sk, const struct tcp_ulp_ops *ulp_ops)
sk                134 net/ipv4/tcp_ulp.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk                141 net/ipv4/tcp_ulp.c 	err = ulp_ops->init(sk);
sk                152 net/ipv4/tcp_ulp.c int tcp_set_ulp(struct sock *sk, const char *name)
sk                156 net/ipv4/tcp_ulp.c 	sock_owned_by_me(sk);
sk                162 net/ipv4/tcp_ulp.c 	return __tcp_set_ulp(sk, ulp_ops);
sk                 71 net/ipv4/tcp_vegas.c static void vegas_enable(struct sock *sk)
sk                 73 net/ipv4/tcp_vegas.c 	const struct tcp_sock *tp = tcp_sk(sk);
sk                 74 net/ipv4/tcp_vegas.c 	struct vegas *vegas = inet_csk_ca(sk);
sk                 87 net/ipv4/tcp_vegas.c static inline void vegas_disable(struct sock *sk)
sk                 89 net/ipv4/tcp_vegas.c 	struct vegas *vegas = inet_csk_ca(sk);
sk                 94 net/ipv4/tcp_vegas.c void tcp_vegas_init(struct sock *sk)
sk                 96 net/ipv4/tcp_vegas.c 	struct vegas *vegas = inet_csk_ca(sk);
sk                 99 net/ipv4/tcp_vegas.c 	vegas_enable(sk);
sk                111 net/ipv4/tcp_vegas.c void tcp_vegas_pkts_acked(struct sock *sk, const struct ack_sample *sample)
sk                113 net/ipv4/tcp_vegas.c 	struct vegas *vegas = inet_csk_ca(sk);
sk                134 net/ipv4/tcp_vegas.c void tcp_vegas_state(struct sock *sk, u8 ca_state)
sk                137 net/ipv4/tcp_vegas.c 		vegas_enable(sk);
sk                139 net/ipv4/tcp_vegas.c 		vegas_disable(sk);
sk                152 net/ipv4/tcp_vegas.c void tcp_vegas_cwnd_event(struct sock *sk, enum tcp_ca_event event)
sk                156 net/ipv4/tcp_vegas.c 		tcp_vegas_init(sk);
sk                165 net/ipv4/tcp_vegas.c static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked)
sk                167 net/ipv4/tcp_vegas.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                168 net/ipv4/tcp_vegas.c 	struct vegas *vegas = inet_csk_ca(sk);
sk                171 net/ipv4/tcp_vegas.c 		tcp_reno_cong_avoid(sk, ack, acked);
sk                196 net/ipv4/tcp_vegas.c 			tcp_reno_cong_avoid(sk, ack, acked);
sk                277 net/ipv4/tcp_vegas.c 			tp->snd_ssthresh = tcp_current_ssthresh(sk);
sk                290 net/ipv4/tcp_vegas.c size_t tcp_vegas_get_info(struct sock *sk, u32 ext, int *attr,
sk                293 net/ipv4/tcp_vegas.c 	const struct vegas *ca = inet_csk_ca(sk);
sk                 19 net/ipv4/tcp_vegas.h void tcp_vegas_init(struct sock *sk);
sk                 20 net/ipv4/tcp_vegas.h void tcp_vegas_state(struct sock *sk, u8 ca_state);
sk                 21 net/ipv4/tcp_vegas.h void tcp_vegas_pkts_acked(struct sock *sk, const struct ack_sample *sample);
sk                 22 net/ipv4/tcp_vegas.h void tcp_vegas_cwnd_event(struct sock *sk, enum tcp_ca_event event);
sk                 23 net/ipv4/tcp_vegas.h size_t tcp_vegas_get_info(struct sock *sk, u32 ext, int *attr,
sk                 45 net/ipv4/tcp_veno.c static inline void veno_enable(struct sock *sk)
sk                 47 net/ipv4/tcp_veno.c 	struct veno *veno = inet_csk_ca(sk);
sk                 55 net/ipv4/tcp_veno.c static inline void veno_disable(struct sock *sk)
sk                 57 net/ipv4/tcp_veno.c 	struct veno *veno = inet_csk_ca(sk);
sk                 63 net/ipv4/tcp_veno.c static void tcp_veno_init(struct sock *sk)
sk                 65 net/ipv4/tcp_veno.c 	struct veno *veno = inet_csk_ca(sk);
sk                 69 net/ipv4/tcp_veno.c 	veno_enable(sk);
sk                 73 net/ipv4/tcp_veno.c static void tcp_veno_pkts_acked(struct sock *sk,
sk                 76 net/ipv4/tcp_veno.c 	struct veno *veno = inet_csk_ca(sk);
sk                 96 net/ipv4/tcp_veno.c static void tcp_veno_state(struct sock *sk, u8 ca_state)
sk                 99 net/ipv4/tcp_veno.c 		veno_enable(sk);
sk                101 net/ipv4/tcp_veno.c 		veno_disable(sk);
sk                113 net/ipv4/tcp_veno.c static void tcp_veno_cwnd_event(struct sock *sk, enum tcp_ca_event event)
sk                116 net/ipv4/tcp_veno.c 		tcp_veno_init(sk);
sk                119 net/ipv4/tcp_veno.c static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
sk                121 net/ipv4/tcp_veno.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                122 net/ipv4/tcp_veno.c 	struct veno *veno = inet_csk_ca(sk);
sk                125 net/ipv4/tcp_veno.c 		tcp_reno_cong_avoid(sk, ack, acked);
sk                130 net/ipv4/tcp_veno.c 	if (!tcp_is_cwnd_limited(sk))
sk                138 net/ipv4/tcp_veno.c 		tcp_reno_cong_avoid(sk, ack, acked);
sk                192 net/ipv4/tcp_veno.c static u32 tcp_veno_ssthresh(struct sock *sk)
sk                194 net/ipv4/tcp_veno.c 	const struct tcp_sock *tp = tcp_sk(sk);
sk                195 net/ipv4/tcp_veno.c 	struct veno *veno = inet_csk_ca(sk);
sk                 61 net/ipv4/tcp_westwood.c static void tcp_westwood_init(struct sock *sk)
sk                 63 net/ipv4/tcp_westwood.c 	struct westwood *w = inet_csk_ca(sk);
sk                 73 net/ipv4/tcp_westwood.c 	w->snd_una = tcp_sk(sk)->snd_una;
sk                103 net/ipv4/tcp_westwood.c static void tcp_westwood_pkts_acked(struct sock *sk,
sk                106 net/ipv4/tcp_westwood.c 	struct westwood *w = inet_csk_ca(sk);
sk                117 net/ipv4/tcp_westwood.c static void westwood_update_window(struct sock *sk)
sk                119 net/ipv4/tcp_westwood.c 	struct westwood *w = inet_csk_ca(sk);
sk                127 net/ipv4/tcp_westwood.c 		w->snd_una = tcp_sk(sk)->snd_una;
sk                163 net/ipv4/tcp_westwood.c static inline void westwood_fast_bw(struct sock *sk)
sk                165 net/ipv4/tcp_westwood.c 	const struct tcp_sock *tp = tcp_sk(sk);
sk                166 net/ipv4/tcp_westwood.c 	struct westwood *w = inet_csk_ca(sk);
sk                168 net/ipv4/tcp_westwood.c 	westwood_update_window(sk);
sk                180 net/ipv4/tcp_westwood.c static inline u32 westwood_acked_count(struct sock *sk)
sk                182 net/ipv4/tcp_westwood.c 	const struct tcp_sock *tp = tcp_sk(sk);
sk                183 net/ipv4/tcp_westwood.c 	struct westwood *w = inet_csk_ca(sk);
sk                217 net/ipv4/tcp_westwood.c static u32 tcp_westwood_bw_rttmin(const struct sock *sk)
sk                219 net/ipv4/tcp_westwood.c 	const struct tcp_sock *tp = tcp_sk(sk);
sk                220 net/ipv4/tcp_westwood.c 	const struct westwood *w = inet_csk_ca(sk);
sk                225 net/ipv4/tcp_westwood.c static void tcp_westwood_ack(struct sock *sk, u32 ack_flags)
sk                228 net/ipv4/tcp_westwood.c 		struct westwood *w = inet_csk_ca(sk);
sk                230 net/ipv4/tcp_westwood.c 		westwood_update_window(sk);
sk                231 net/ipv4/tcp_westwood.c 		w->bk += westwood_acked_count(sk);
sk                237 net/ipv4/tcp_westwood.c 	westwood_fast_bw(sk);
sk                240 net/ipv4/tcp_westwood.c static void tcp_westwood_event(struct sock *sk, enum tcp_ca_event event)
sk                242 net/ipv4/tcp_westwood.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                243 net/ipv4/tcp_westwood.c 	struct westwood *w = inet_csk_ca(sk);
sk                247 net/ipv4/tcp_westwood.c 		tp->snd_cwnd = tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk);
sk                250 net/ipv4/tcp_westwood.c 		tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk);
sk                261 net/ipv4/tcp_westwood.c static size_t tcp_westwood_info(struct sock *sk, u32 ext, int *attr,
sk                264 net/ipv4/tcp_westwood.c 	const struct westwood *ca = inet_csk_ca(sk);
sk                 43 net/ipv4/tcp_yeah.c static void tcp_yeah_init(struct sock *sk)
sk                 45 net/ipv4/tcp_yeah.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                 46 net/ipv4/tcp_yeah.c 	struct yeah *yeah = inet_csk_ca(sk);
sk                 48 net/ipv4/tcp_yeah.c 	tcp_vegas_init(sk);
sk                 60 net/ipv4/tcp_yeah.c static void tcp_yeah_pkts_acked(struct sock *sk,
sk                 63 net/ipv4/tcp_yeah.c 	const struct inet_connection_sock *icsk = inet_csk(sk);
sk                 64 net/ipv4/tcp_yeah.c 	struct yeah *yeah = inet_csk_ca(sk);
sk                 69 net/ipv4/tcp_yeah.c 	tcp_vegas_pkts_acked(sk, sample);
sk                 72 net/ipv4/tcp_yeah.c static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked)
sk                 74 net/ipv4/tcp_yeah.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                 75 net/ipv4/tcp_yeah.c 	struct yeah *yeah = inet_csk_ca(sk);
sk                 77 net/ipv4/tcp_yeah.c 	if (!tcp_is_cwnd_limited(sk))
sk                206 net/ipv4/tcp_yeah.c static u32 tcp_yeah_ssthresh(struct sock *sk)
sk                208 net/ipv4/tcp_yeah.c 	const struct tcp_sock *tp = tcp_sk(sk);
sk                209 net/ipv4/tcp_yeah.c 	struct yeah *yeah = inet_csk_ca(sk);
sk                131 net/ipv4/udp.c 			       struct sock *sk, unsigned int log)
sk                134 net/ipv4/udp.c 	kuid_t uid = sock_i_uid(sk);
sk                138 net/ipv4/udp.c 		    sk2 != sk &&
sk                140 net/ipv4/udp.c 		    (!sk2->sk_reuse || !sk->sk_reuse) &&
sk                141 net/ipv4/udp.c 		    (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if ||
sk                142 net/ipv4/udp.c 		     sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
sk                143 net/ipv4/udp.c 		    inet_rcv_saddr_equal(sk, sk2, true)) {
sk                144 net/ipv4/udp.c 			if (sk2->sk_reuseport && sk->sk_reuseport &&
sk                145 net/ipv4/udp.c 			    !rcu_access_pointer(sk->sk_reuseport_cb) &&
sk                166 net/ipv4/udp.c 				struct sock *sk)
sk                169 net/ipv4/udp.c 	kuid_t uid = sock_i_uid(sk);
sk                175 net/ipv4/udp.c 		    sk2 != sk &&
sk                177 net/ipv4/udp.c 		    (!sk2->sk_reuse || !sk->sk_reuse) &&
sk                178 net/ipv4/udp.c 		    (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if ||
sk                179 net/ipv4/udp.c 		     sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
sk                180 net/ipv4/udp.c 		    inet_rcv_saddr_equal(sk, sk2, true)) {
sk                181 net/ipv4/udp.c 			if (sk2->sk_reuseport && sk->sk_reuseport &&
sk                182 net/ipv4/udp.c 			    !rcu_access_pointer(sk->sk_reuseport_cb) &&
sk                195 net/ipv4/udp.c static int udp_reuseport_add_sock(struct sock *sk, struct udp_hslot *hslot)
sk                197 net/ipv4/udp.c 	struct net *net = sock_net(sk);
sk                198 net/ipv4/udp.c 	kuid_t uid = sock_i_uid(sk);
sk                203 net/ipv4/udp.c 		    sk2 != sk &&
sk                204 net/ipv4/udp.c 		    sk2->sk_family == sk->sk_family &&
sk                205 net/ipv4/udp.c 		    ipv6_only_sock(sk2) == ipv6_only_sock(sk) &&
sk                206 net/ipv4/udp.c 		    (udp_sk(sk2)->udp_port_hash == udp_sk(sk)->udp_port_hash) &&
sk                207 net/ipv4/udp.c 		    (sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
sk                209 net/ipv4/udp.c 		    inet_rcv_saddr_equal(sk, sk2, false)) {
sk                210 net/ipv4/udp.c 			return reuseport_add_sock(sk, sk2,
sk                211 net/ipv4/udp.c 						  inet_rcv_saddr_any(sk));
sk                215 net/ipv4/udp.c 	return reuseport_alloc(sk, inet_rcv_saddr_any(sk));
sk                226 net/ipv4/udp.c int udp_lib_get_port(struct sock *sk, unsigned short snum,
sk                230 net/ipv4/udp.c 	struct udp_table *udptable = sk->sk_prot->h.udp_table;
sk                232 net/ipv4/udp.c 	struct net *net = sock_net(sk);
sk                254 net/ipv4/udp.c 			udp_lib_lport_inuse(net, snum, hslot, bitmap, sk,
sk                279 net/ipv4/udp.c 			unsigned int slot2 = udp_sk(sk)->udp_portaddr_hash ^ snum;
sk                288 net/ipv4/udp.c 			exist = udp_lib_lport_inuse2(net, snum, hslot2, sk);
sk                292 net/ipv4/udp.c 							     sk);
sk                300 net/ipv4/udp.c 		if (udp_lib_lport_inuse(net, snum, hslot, NULL, sk, 0))
sk                304 net/ipv4/udp.c 	inet_sk(sk)->inet_num = snum;
sk                305 net/ipv4/udp.c 	udp_sk(sk)->udp_port_hash = snum;
sk                306 net/ipv4/udp.c 	udp_sk(sk)->udp_portaddr_hash ^= snum;
sk                307 net/ipv4/udp.c 	if (sk_unhashed(sk)) {
sk                308 net/ipv4/udp.c 		if (sk->sk_reuseport &&
sk                309 net/ipv4/udp.c 		    udp_reuseport_add_sock(sk, hslot)) {
sk                310 net/ipv4/udp.c 			inet_sk(sk)->inet_num = 0;
sk                311 net/ipv4/udp.c 			udp_sk(sk)->udp_port_hash = 0;
sk                312 net/ipv4/udp.c 			udp_sk(sk)->udp_portaddr_hash ^= snum;
sk                316 net/ipv4/udp.c 		sk_add_node_rcu(sk, &hslot->head);
sk                318 net/ipv4/udp.c 		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
sk                320 net/ipv4/udp.c 		hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
sk                322 net/ipv4/udp.c 		if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
sk                323 net/ipv4/udp.c 		    sk->sk_family == AF_INET6)
sk                324 net/ipv4/udp.c 			hlist_add_tail_rcu(&udp_sk(sk)->udp_portaddr_node,
sk                327 net/ipv4/udp.c 			hlist_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
sk                332 net/ipv4/udp.c 	sock_set_flag(sk, SOCK_RCU_FREE);
sk                341 net/ipv4/udp.c int udp_v4_get_port(struct sock *sk, unsigned short snum)
sk                344 net/ipv4/udp.c 		ipv4_portaddr_hash(sock_net(sk), htonl(INADDR_ANY), snum);
sk                346 net/ipv4/udp.c 		ipv4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, 0);
sk                349 net/ipv4/udp.c 	udp_sk(sk)->udp_portaddr_hash = hash2_partial;
sk                350 net/ipv4/udp.c 	return udp_lib_get_port(sk, snum, hash2_nulladdr);
sk                353 net/ipv4/udp.c static int compute_score(struct sock *sk, struct net *net,
sk                362 net/ipv4/udp.c 	if (!net_eq(sock_net(sk), net) ||
sk                363 net/ipv4/udp.c 	    udp_sk(sk)->udp_port_hash != hnum ||
sk                364 net/ipv4/udp.c 	    ipv6_only_sock(sk))
sk                367 net/ipv4/udp.c 	if (sk->sk_rcv_saddr != daddr)
sk                370 net/ipv4/udp.c 	score = (sk->sk_family == PF_INET) ? 2 : 1;
sk                372 net/ipv4/udp.c 	inet = inet_sk(sk);
sk                385 net/ipv4/udp.c 	dev_match = udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if,
sk                391 net/ipv4/udp.c 	if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
sk                416 net/ipv4/udp.c 	struct sock *sk, *result;
sk                422 net/ipv4/udp.c 	udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
sk                423 net/ipv4/udp.c 		score = compute_score(sk, net, saddr, sport,
sk                426 net/ipv4/udp.c 			if (sk->sk_reuseport &&
sk                427 net/ipv4/udp.c 			    sk->sk_state != TCP_ESTABLISHED) {
sk                430 net/ipv4/udp.c 				result = reuseport_select_sock(sk, hash, skb,
sk                432 net/ipv4/udp.c 				if (result && !reuseport_has_conns(sk, false))
sk                436 net/ipv4/udp.c 			result = sk;
sk                505 net/ipv4/udp.c 	struct sock *sk;
sk                507 net/ipv4/udp.c 	sk = __udp4_lib_lookup(net, saddr, sport, daddr, dport,
sk                509 net/ipv4/udp.c 	if (sk && !refcount_inc_not_zero(&sk->sk_refcnt))
sk                510 net/ipv4/udp.c 		sk = NULL;
sk                511 net/ipv4/udp.c 	return sk;
sk                516 net/ipv4/udp.c static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk,
sk                521 net/ipv4/udp.c 	struct inet_sock *inet = inet_sk(sk);
sk                523 net/ipv4/udp.c 	if (!net_eq(sock_net(sk), net) ||
sk                524 net/ipv4/udp.c 	    udp_sk(sk)->udp_port_hash != hnum ||
sk                528 net/ipv4/udp.c 	    ipv6_only_sock(sk) ||
sk                529 net/ipv4/udp.c 	    !udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif))
sk                531 net/ipv4/udp.c 	if (!ip_mc_sf_allow(sk, loc_addr, rmt_addr, dif, sdif))
sk                589 net/ipv4/udp.c 	struct sock *sk;
sk                600 net/ipv4/udp.c 	sk = __udp4_lib_lookup(net, iph->daddr, uh->source,
sk                603 net/ipv4/udp.c 	if (sk) {
sk                604 net/ipv4/udp.c 		int (*lookup)(struct sock *sk, struct sk_buff *skb);
sk                605 net/ipv4/udp.c 		struct udp_sock *up = udp_sk(sk);
sk                608 net/ipv4/udp.c 		if (!lookup || lookup(sk, skb))
sk                609 net/ipv4/udp.c 			sk = NULL;
sk                612 net/ipv4/udp.c 	if (!sk)
sk                613 net/ipv4/udp.c 		sk = ERR_PTR(__udp4_lib_err_encap_no_sk(skb, info));
sk                618 net/ipv4/udp.c 	return sk;
sk                640 net/ipv4/udp.c 	struct sock *sk;
sk                645 net/ipv4/udp.c 	sk = __udp4_lib_lookup(net, iph->daddr, uh->dest,
sk                648 net/ipv4/udp.c 	if (!sk) {
sk                650 net/ipv4/udp.c 		sk = ERR_PTR(-ENOENT);
sk                652 net/ipv4/udp.c 			sk = __udp4_lib_err_encap(net, iph, uh, udptable, skb,
sk                654 net/ipv4/udp.c 			if (!sk)
sk                658 net/ipv4/udp.c 		if (IS_ERR(sk)) {
sk                660 net/ipv4/udp.c 			return PTR_ERR(sk);
sk                668 net/ipv4/udp.c 	inet = inet_sk(sk);
sk                683 net/ipv4/udp.c 			ipv4_sk_update_pmtu(skb, sk, info);
sk                698 net/ipv4/udp.c 		ipv4_sk_redirect(skb, sk);
sk                711 net/ipv4/udp.c 		if (!harderr || sk->sk_state != TCP_ESTABLISHED)
sk                714 net/ipv4/udp.c 		ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1));
sk                716 net/ipv4/udp.c 	sk->sk_err = err;
sk                717 net/ipv4/udp.c 	sk->sk_error_report(sk);
sk                730 net/ipv4/udp.c void udp_flush_pending_frames(struct sock *sk)
sk                732 net/ipv4/udp.c 	struct udp_sock *up = udp_sk(sk);
sk                737 net/ipv4/udp.c 		ip_flush_pending_frames(sk);
sk                817 net/ipv4/udp.c 	struct sock *sk = skb->sk;
sk                818 net/ipv4/udp.c 	struct inet_sock *inet = inet_sk(sk);
sk                821 net/ipv4/udp.c 	int is_udplite = IS_UDPLITE(sk);
sk                848 net/ipv4/udp.c 		if (sk->sk_no_check_tx) {
sk                870 net/ipv4/udp.c 	else if (sk->sk_no_check_tx) {			 /* UDP csum off */
sk                886 net/ipv4/udp.c 				      sk->sk_protocol, csum);
sk                891 net/ipv4/udp.c 	err = ip_send_skb(sock_net(sk), skb);
sk                894 net/ipv4/udp.c 			UDP_INC_STATS(sock_net(sk),
sk                899 net/ipv4/udp.c 		UDP_INC_STATS(sock_net(sk),
sk                907 net/ipv4/udp.c int udp_push_pending_frames(struct sock *sk)
sk                909 net/ipv4/udp.c 	struct udp_sock  *up = udp_sk(sk);
sk                910 net/ipv4/udp.c 	struct inet_sock *inet = inet_sk(sk);
sk                915 net/ipv4/udp.c 	skb = ip_finish_skb(sk, fl4);
sk                941 net/ipv4/udp.c int udp_cmsg_send(struct sock *sk, struct msghdr *msg, u16 *gso_size)
sk                965 net/ipv4/udp.c int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
sk                967 net/ipv4/udp.c 	struct inet_sock *inet = inet_sk(sk);
sk                968 net/ipv4/udp.c 	struct udp_sock *up = udp_sk(sk);
sk                980 net/ipv4/udp.c 	int err, is_udplite = IS_UDPLITE(sk);
sk               1004 net/ipv4/udp.c 		lock_sock(sk);
sk               1007 net/ipv4/udp.c 				release_sock(sk);
sk               1012 net/ipv4/udp.c 		release_sock(sk);
sk               1032 net/ipv4/udp.c 		if (sk->sk_state != TCP_ESTABLISHED)
sk               1046 net/ipv4/udp.c 		err = udp_cmsg_send(sk, msg, &ipc.gso_size);
sk               1048 net/ipv4/udp.c 			err = ip_cmsg_send(sk, msg, &ipc,
sk               1049 net/ipv4/udp.c 					   sk->sk_family == AF_INET6);
sk               1072 net/ipv4/udp.c 		err = BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk,
sk               1099 net/ipv4/udp.c 	if (sock_flag(sk, SOCK_LOCALROUTE) ||
sk               1107 net/ipv4/udp.c 		if (!ipc.oif || netif_index_is_l3_master(sock_net(sk), ipc.oif))
sk               1122 net/ipv4/udp.c 		    ipc.oif == l3mdev_master_ifindex_by_index(sock_net(sk),
sk               1129 net/ipv4/udp.c 		rt = (struct rtable *)sk_dst_check(sk, 0);
sk               1132 net/ipv4/udp.c 		struct net *net = sock_net(sk);
sk               1133 net/ipv4/udp.c 		__u8 flow_flags = inet_sk_flowi_flags(sk);
sk               1138 net/ipv4/udp.c 				   RT_SCOPE_UNIVERSE, sk->sk_protocol,
sk               1141 net/ipv4/udp.c 				   sk->sk_uid);
sk               1143 net/ipv4/udp.c 		security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
sk               1144 net/ipv4/udp.c 		rt = ip_route_output_flow(net, fl4, sk);
sk               1155 net/ipv4/udp.c 		    !sock_flag(sk, SOCK_BROADCAST))
sk               1158 net/ipv4/udp.c 			sk_dst_set(sk, dst_clone(&rt->dst));
sk               1173 net/ipv4/udp.c 		skb = ip_make_skb(sk, fl4, getfrag, msg, ulen,
sk               1182 net/ipv4/udp.c 	lock_sock(sk);
sk               1186 net/ipv4/udp.c 		release_sock(sk);
sk               1204 net/ipv4/udp.c 	err = ip_append_data(sk, fl4, getfrag, msg, ulen,
sk               1208 net/ipv4/udp.c 		udp_flush_pending_frames(sk);
sk               1210 net/ipv4/udp.c 		err = udp_push_pending_frames(sk);
sk               1211 net/ipv4/udp.c 	else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
sk               1213 net/ipv4/udp.c 	release_sock(sk);
sk               1229 net/ipv4/udp.c 	if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
sk               1230 net/ipv4/udp.c 		UDP_INC_STATS(sock_net(sk),
sk               1245 net/ipv4/udp.c int udp_sendpage(struct sock *sk, struct page *page, int offset,
sk               1248 net/ipv4/udp.c 	struct inet_sock *inet = inet_sk(sk);
sk               1249 net/ipv4/udp.c 	struct udp_sock *up = udp_sk(sk);
sk               1262 net/ipv4/udp.c 		ret = udp_sendmsg(sk, &msg, 0);
sk               1267 net/ipv4/udp.c 	lock_sock(sk);
sk               1270 net/ipv4/udp.c 		release_sock(sk);
sk               1276 net/ipv4/udp.c 	ret = ip_append_page(sk, &inet->cork.fl.u.ip4,
sk               1279 net/ipv4/udp.c 		release_sock(sk);
sk               1280 net/ipv4/udp.c 		return sock_no_sendpage(sk->sk_socket, page, offset,
sk               1284 net/ipv4/udp.c 		udp_flush_pending_frames(sk);
sk               1290 net/ipv4/udp.c 		ret = udp_push_pending_frames(sk);
sk               1294 net/ipv4/udp.c 	release_sock(sk);
sk               1361 net/ipv4/udp.c static void udp_rmem_release(struct sock *sk, int size, int partial,
sk               1364 net/ipv4/udp.c 	struct udp_sock *up = udp_sk(sk);
sk               1371 net/ipv4/udp.c 		if (size < (sk->sk_rcvbuf >> 2) &&
sk               1382 net/ipv4/udp.c 	sk_queue = &sk->sk_receive_queue;
sk               1387 net/ipv4/udp.c 	sk->sk_forward_alloc += size;
sk               1388 net/ipv4/udp.c 	amt = (sk->sk_forward_alloc - partial) & ~(SK_MEM_QUANTUM - 1);
sk               1389 net/ipv4/udp.c 	sk->sk_forward_alloc -= amt;
sk               1392 net/ipv4/udp.c 		__sk_mem_reduce_allocated(sk, amt >> SK_MEM_QUANTUM_SHIFT);
sk               1394 net/ipv4/udp.c 	atomic_sub(size, &sk->sk_rmem_alloc);
sk               1408 net/ipv4/udp.c void udp_skb_destructor(struct sock *sk, struct sk_buff *skb)
sk               1411 net/ipv4/udp.c 	udp_rmem_release(sk, udp_skb_truesize(skb), 1, false);
sk               1416 net/ipv4/udp.c static void udp_skb_dtor_locked(struct sock *sk, struct sk_buff *skb)
sk               1419 net/ipv4/udp.c 	udp_rmem_release(sk, udp_skb_truesize(skb), 1, true);
sk               1447 net/ipv4/udp.c int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
sk               1449 net/ipv4/udp.c 	struct sk_buff_head *list = &sk->sk_receive_queue;
sk               1457 net/ipv4/udp.c 	rmem = atomic_read(&sk->sk_rmem_alloc);
sk               1458 net/ipv4/udp.c 	if (rmem > sk->sk_rcvbuf)
sk               1467 net/ipv4/udp.c 	if (rmem > (sk->sk_rcvbuf >> 1)) {
sk               1470 net/ipv4/udp.c 		busy = busylock_acquire(sk);
sk               1478 net/ipv4/udp.c 	rmem = atomic_add_return(size, &sk->sk_rmem_alloc);
sk               1479 net/ipv4/udp.c 	if (rmem > (size + (unsigned int)sk->sk_rcvbuf))
sk               1483 net/ipv4/udp.c 	if (size >= sk->sk_forward_alloc) {
sk               1486 net/ipv4/udp.c 		if (!__sk_mem_raise_allocated(sk, delta, amt, SK_MEM_RECV)) {
sk               1492 net/ipv4/udp.c 		sk->sk_forward_alloc += delta;
sk               1495 net/ipv4/udp.c 	sk->sk_forward_alloc -= size;
sk               1500 net/ipv4/udp.c 	sock_skb_set_dropcount(sk, skb);
sk               1505 net/ipv4/udp.c 	if (!sock_flag(sk, SOCK_DEAD))
sk               1506 net/ipv4/udp.c 		sk->sk_data_ready(sk);
sk               1512 net/ipv4/udp.c 	atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
sk               1515 net/ipv4/udp.c 	atomic_inc(&sk->sk_drops);
sk               1521 net/ipv4/udp.c void udp_destruct_sock(struct sock *sk)
sk               1524 net/ipv4/udp.c 	struct udp_sock *up = udp_sk(sk);
sk               1528 net/ipv4/udp.c 	skb_queue_splice_tail_init(&sk->sk_receive_queue, &up->reader_queue);
sk               1533 net/ipv4/udp.c 	udp_rmem_release(sk, total, 0, true);
sk               1535 net/ipv4/udp.c 	inet_sock_destruct(sk);
sk               1539 net/ipv4/udp.c int udp_init_sock(struct sock *sk)
sk               1541 net/ipv4/udp.c 	skb_queue_head_init(&udp_sk(sk)->reader_queue);
sk               1542 net/ipv4/udp.c 	sk->sk_destruct = udp_destruct_sock;
sk               1547 net/ipv4/udp.c void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len)
sk               1549 net/ipv4/udp.c 	if (unlikely(READ_ONCE(sk->sk_peek_off) >= 0)) {
sk               1550 net/ipv4/udp.c 		bool slow = lock_sock_fast(sk);
sk               1552 net/ipv4/udp.c 		sk_peek_offset_bwd(sk, len);
sk               1553 net/ipv4/udp.c 		unlock_sock_fast(sk, slow);
sk               1568 net/ipv4/udp.c static struct sk_buff *__first_packet_length(struct sock *sk,
sk               1576 net/ipv4/udp.c 			__UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS,
sk               1577 net/ipv4/udp.c 					IS_UDPLITE(sk));
sk               1578 net/ipv4/udp.c 			__UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS,
sk               1579 net/ipv4/udp.c 					IS_UDPLITE(sk));
sk               1580 net/ipv4/udp.c 			atomic_inc(&sk->sk_drops);
sk               1599 net/ipv4/udp.c static int first_packet_length(struct sock *sk)
sk               1601 net/ipv4/udp.c 	struct sk_buff_head *rcvq = &udp_sk(sk)->reader_queue;
sk               1602 net/ipv4/udp.c 	struct sk_buff_head *sk_queue = &sk->sk_receive_queue;
sk               1608 net/ipv4/udp.c 	skb = __first_packet_length(sk, rcvq, &total);
sk               1614 net/ipv4/udp.c 		skb = __first_packet_length(sk, rcvq, &total);
sk               1618 net/ipv4/udp.c 		udp_rmem_release(sk, total, 1, false);
sk               1627 net/ipv4/udp.c int udp_ioctl(struct sock *sk, int cmd, unsigned long arg)
sk               1632 net/ipv4/udp.c 		int amount = sk_wmem_alloc_get(sk);
sk               1639 net/ipv4/udp.c 		int amount = max_t(int, 0, first_packet_length(sk));
sk               1652 net/ipv4/udp.c struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags,
sk               1655 net/ipv4/udp.c 	struct sk_buff_head *sk_queue = &sk->sk_receive_queue;
sk               1661 net/ipv4/udp.c 	queue = &udp_sk(sk)->reader_queue;
sk               1663 net/ipv4/udp.c 	timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
sk               1667 net/ipv4/udp.c 		error = sock_error(sk);
sk               1674 net/ipv4/udp.c 			skb = __skb_try_recv_from_queue(sk, queue, flags,
sk               1695 net/ipv4/udp.c 			skb = __skb_try_recv_from_queue(sk, queue, flags,
sk               1704 net/ipv4/udp.c 			if (!sk_can_busy_loop(sk))
sk               1707 net/ipv4/udp.c 			sk_busy_loop(sk, flags & MSG_DONTWAIT);
sk               1712 net/ipv4/udp.c 		 !__skb_wait_for_more_packets(sk, &error, &timeo,
sk               1725 net/ipv4/udp.c int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
sk               1728 net/ipv4/udp.c 	struct inet_sock *inet = inet_sk(sk);
sk               1733 net/ipv4/udp.c 	int is_udplite = IS_UDPLITE(sk);
sk               1737 net/ipv4/udp.c 		return ip_recv_error(sk, msg, len, addr_len);
sk               1740 net/ipv4/udp.c 	off = sk_peek_offset(sk, flags);
sk               1741 net/ipv4/udp.c 	skb = __skb_recv_udp(sk, flags, noblock, &off, &err);
sk               1780 net/ipv4/udp.c 			atomic_inc(&sk->sk_drops);
sk               1781 net/ipv4/udp.c 			UDP_INC_STATS(sock_net(sk),
sk               1789 net/ipv4/udp.c 		UDP_INC_STATS(sock_net(sk),
sk               1792 net/ipv4/udp.c 	sock_recv_ts_and_drops(msg, sk, skb);
sk               1803 net/ipv4/udp.c 			BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk,
sk               1807 net/ipv4/udp.c 	if (udp_sk(sk)->gro_enabled)
sk               1808 net/ipv4/udp.c 		udp_cmsg_recv(msg, sk, skb);
sk               1811 net/ipv4/udp.c 		ip_cmsg_recv_offset(msg, sk, skb, sizeof(struct udphdr), off);
sk               1817 net/ipv4/udp.c 	skb_consume_udp(sk, skb, peeking ? -err : err);
sk               1821 net/ipv4/udp.c 	if (!__sk_queue_drop_skb(sk, &udp_sk(sk)->reader_queue, skb, flags,
sk               1823 net/ipv4/udp.c 		UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
sk               1824 net/ipv4/udp.c 		UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
sk               1834 net/ipv4/udp.c int udp_pre_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
sk               1843 net/ipv4/udp.c 	return BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr);
sk               1847 net/ipv4/udp.c int __udp_disconnect(struct sock *sk, int flags)
sk               1849 net/ipv4/udp.c 	struct inet_sock *inet = inet_sk(sk);
sk               1854 net/ipv4/udp.c 	sk->sk_state = TCP_CLOSE;
sk               1857 net/ipv4/udp.c 	sock_rps_reset_rxhash(sk);
sk               1858 net/ipv4/udp.c 	sk->sk_bound_dev_if = 0;
sk               1859 net/ipv4/udp.c 	if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) {
sk               1860 net/ipv4/udp.c 		inet_reset_saddr(sk);
sk               1861 net/ipv4/udp.c 		if (sk->sk_prot->rehash &&
sk               1862 net/ipv4/udp.c 		    (sk->sk_userlocks & SOCK_BINDPORT_LOCK))
sk               1863 net/ipv4/udp.c 			sk->sk_prot->rehash(sk);
sk               1866 net/ipv4/udp.c 	if (!(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) {
sk               1867 net/ipv4/udp.c 		sk->sk_prot->unhash(sk);
sk               1870 net/ipv4/udp.c 	sk_dst_reset(sk);
sk               1875 net/ipv4/udp.c int udp_disconnect(struct sock *sk, int flags)
sk               1877 net/ipv4/udp.c 	lock_sock(sk);
sk               1878 net/ipv4/udp.c 	__udp_disconnect(sk, flags);
sk               1879 net/ipv4/udp.c 	release_sock(sk);
sk               1884 net/ipv4/udp.c void udp_lib_unhash(struct sock *sk)
sk               1886 net/ipv4/udp.c 	if (sk_hashed(sk)) {
sk               1887 net/ipv4/udp.c 		struct udp_table *udptable = sk->sk_prot->h.udp_table;
sk               1890 net/ipv4/udp.c 		hslot  = udp_hashslot(udptable, sock_net(sk),
sk               1891 net/ipv4/udp.c 				      udp_sk(sk)->udp_port_hash);
sk               1892 net/ipv4/udp.c 		hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
sk               1895 net/ipv4/udp.c 		if (rcu_access_pointer(sk->sk_reuseport_cb))
sk               1896 net/ipv4/udp.c 			reuseport_detach_sock(sk);
sk               1897 net/ipv4/udp.c 		if (sk_del_node_init_rcu(sk)) {
sk               1899 net/ipv4/udp.c 			inet_sk(sk)->inet_num = 0;
sk               1900 net/ipv4/udp.c 			sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
sk               1903 net/ipv4/udp.c 			hlist_del_init_rcu(&udp_sk(sk)->udp_portaddr_node);
sk               1915 net/ipv4/udp.c void udp_lib_rehash(struct sock *sk, u16 newhash)
sk               1917 net/ipv4/udp.c 	if (sk_hashed(sk)) {
sk               1918 net/ipv4/udp.c 		struct udp_table *udptable = sk->sk_prot->h.udp_table;
sk               1921 net/ipv4/udp.c 		hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
sk               1923 net/ipv4/udp.c 		udp_sk(sk)->udp_portaddr_hash = newhash;
sk               1926 net/ipv4/udp.c 		    rcu_access_pointer(sk->sk_reuseport_cb)) {
sk               1927 net/ipv4/udp.c 			hslot = udp_hashslot(udptable, sock_net(sk),
sk               1928 net/ipv4/udp.c 					     udp_sk(sk)->udp_port_hash);
sk               1931 net/ipv4/udp.c 			if (rcu_access_pointer(sk->sk_reuseport_cb))
sk               1932 net/ipv4/udp.c 				reuseport_detach_sock(sk);
sk               1936 net/ipv4/udp.c 				hlist_del_init_rcu(&udp_sk(sk)->udp_portaddr_node);
sk               1941 net/ipv4/udp.c 				hlist_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
sk               1953 net/ipv4/udp.c void udp_v4_rehash(struct sock *sk)
sk               1955 net/ipv4/udp.c 	u16 new_hash = ipv4_portaddr_hash(sock_net(sk),
sk               1956 net/ipv4/udp.c 					  inet_sk(sk)->inet_rcv_saddr,
sk               1957 net/ipv4/udp.c 					  inet_sk(sk)->inet_num);
sk               1958 net/ipv4/udp.c 	udp_lib_rehash(sk, new_hash);
sk               1961 net/ipv4/udp.c static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
sk               1965 net/ipv4/udp.c 	if (inet_sk(sk)->inet_daddr) {
sk               1966 net/ipv4/udp.c 		sock_rps_save_rxhash(sk, skb);
sk               1967 net/ipv4/udp.c 		sk_mark_napi_id(sk, skb);
sk               1968 net/ipv4/udp.c 		sk_incoming_cpu_update(sk);
sk               1970 net/ipv4/udp.c 		sk_mark_napi_id_once(sk, skb);
sk               1973 net/ipv4/udp.c 	rc = __udp_enqueue_schedule_skb(sk, skb);
sk               1975 net/ipv4/udp.c 		int is_udplite = IS_UDPLITE(sk);
sk               1979 net/ipv4/udp.c 			UDP_INC_STATS(sock_net(sk), UDP_MIB_RCVBUFERRORS,
sk               1981 net/ipv4/udp.c 		UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
sk               1983 net/ipv4/udp.c 		trace_udp_fail_queue_rcv_skb(rc, sk);
sk               1998 net/ipv4/udp.c static int udp_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
sk               2000 net/ipv4/udp.c 	struct udp_sock *up = udp_sk(sk);
sk               2001 net/ipv4/udp.c 	int is_udplite = IS_UDPLITE(sk);
sk               2006 net/ipv4/udp.c 	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
sk               2011 net/ipv4/udp.c 		int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
sk               2033 net/ipv4/udp.c 			ret = encap_rcv(sk, skb);
sk               2035 net/ipv4/udp.c 				__UDP_INC_STATS(sock_net(sk),
sk               2079 net/ipv4/udp.c 	prefetch(&sk->sk_rmem_alloc);
sk               2080 net/ipv4/udp.c 	if (rcu_access_pointer(sk->sk_filter) &&
sk               2084 net/ipv4/udp.c 	if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr)))
sk               2089 net/ipv4/udp.c 	ipv4_pktinfo_prepare(sk, skb);
sk               2090 net/ipv4/udp.c 	return __udp_queue_rcv_skb(sk, skb);
sk               2093 net/ipv4/udp.c 	__UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
sk               2095 net/ipv4/udp.c 	__UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
sk               2096 net/ipv4/udp.c 	atomic_inc(&sk->sk_drops);
sk               2101 net/ipv4/udp.c static int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
sk               2106 net/ipv4/udp.c 	if (likely(!udp_unexpected_gso(sk, skb)))
sk               2107 net/ipv4/udp.c 		return udp_queue_rcv_one_skb(sk, skb);
sk               2111 net/ipv4/udp.c 	segs = udp_rcv_segment(sk, skb, true);
sk               2115 net/ipv4/udp.c 		ret = udp_queue_rcv_one_skb(sk, skb);
sk               2125 net/ipv4/udp.c bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
sk               2130 net/ipv4/udp.c 		old = xchg(&sk->sk_rx_dst, dst);
sk               2149 net/ipv4/udp.c 	struct sock *sk, *first = NULL;
sk               2153 net/ipv4/udp.c 	unsigned int offset = offsetof(typeof(*sk), sk_node);
sk               2165 net/ipv4/udp.c 		offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
sk               2168 net/ipv4/udp.c 	sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) {
sk               2169 net/ipv4/udp.c 		if (!__udp_is_mcast_sock(net, sk, uh->dest, daddr,
sk               2174 net/ipv4/udp.c 			first = sk;
sk               2180 net/ipv4/udp.c 			atomic_inc(&sk->sk_drops);
sk               2182 net/ipv4/udp.c 					IS_UDPLITE(sk));
sk               2184 net/ipv4/udp.c 					IS_UDPLITE(sk));
sk               2187 net/ipv4/udp.c 		if (udp_queue_rcv_skb(sk, nskb) > 0)
sk               2258 net/ipv4/udp.c static int udp_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb,
sk               2263 net/ipv4/udp.c 	if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
sk               2266 net/ipv4/udp.c 	ret = udp_queue_rcv_skb(sk, skb);
sk               2283 net/ipv4/udp.c 	struct sock *sk;
sk               2314 net/ipv4/udp.c 	sk = skb_steal_sock(skb);
sk               2315 net/ipv4/udp.c 	if (sk) {
sk               2319 net/ipv4/udp.c 		if (unlikely(sk->sk_rx_dst != dst))
sk               2320 net/ipv4/udp.c 			udp_sk_rx_dst_set(sk, dst);
sk               2322 net/ipv4/udp.c 		ret = udp_unicast_rcv_skb(sk, skb, uh);
sk               2323 net/ipv4/udp.c 		sock_put(sk);
sk               2331 net/ipv4/udp.c 	sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
sk               2332 net/ipv4/udp.c 	if (sk)
sk               2333 net/ipv4/udp.c 		return udp_unicast_rcv_skb(sk, skb, uh);
sk               2385 net/ipv4/udp.c 	struct sock *sk, *result;
sk               2395 net/ipv4/udp.c 	sk_for_each_rcu(sk, &hslot->head) {
sk               2396 net/ipv4/udp.c 		if (__udp_is_mcast_sock(net, sk, loc_port, loc_addr,
sk               2400 net/ipv4/udp.c 			result = sk;
sk               2422 net/ipv4/udp.c 	struct sock *sk;
sk               2424 net/ipv4/udp.c 	udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
sk               2425 net/ipv4/udp.c 		if (INET_MATCH(sk, net, acookie, rmt_addr,
sk               2427 net/ipv4/udp.c 			return sk;
sk               2440 net/ipv4/udp.c 	struct sock *sk = NULL;
sk               2464 net/ipv4/udp.c 		sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr,
sk               2468 net/ipv4/udp.c 		sk = __udp4_lib_demux_lookup(net, uh->dest, iph->daddr,
sk               2472 net/ipv4/udp.c 	if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt))
sk               2475 net/ipv4/udp.c 	skb->sk = sk;
sk               2477 net/ipv4/udp.c 	dst = READ_ONCE(sk->sk_rx_dst);
sk               2493 net/ipv4/udp.c 		if (!inet_sk(sk)->inet_daddr && in_dev)
sk               2506 net/ipv4/udp.c void udp_destroy_sock(struct sock *sk)
sk               2508 net/ipv4/udp.c 	struct udp_sock *up = udp_sk(sk);
sk               2509 net/ipv4/udp.c 	bool slow = lock_sock_fast(sk);
sk               2510 net/ipv4/udp.c 	udp_flush_pending_frames(sk);
sk               2511 net/ipv4/udp.c 	unlock_sock_fast(sk, slow);
sk               2514 net/ipv4/udp.c 			void (*encap_destroy)(struct sock *sk);
sk               2517 net/ipv4/udp.c 				encap_destroy(sk);
sk               2527 net/ipv4/udp.c int udp_lib_setsockopt(struct sock *sk, int level, int optname,
sk               2531 net/ipv4/udp.c 	struct udp_sock *up = udp_sk(sk);
sk               2534 net/ipv4/udp.c 	int is_udplite = IS_UDPLITE(sk);
sk               2550 net/ipv4/udp.c 			lock_sock(sk);
sk               2551 net/ipv4/udp.c 			push_pending_frames(sk);
sk               2552 net/ipv4/udp.c 			release_sock(sk);
sk               2565 net/ipv4/udp.c 			lock_sock(sk);
sk               2566 net/ipv4/udp.c 			udp_tunnel_encap_enable(sk->sk_socket);
sk               2567 net/ipv4/udp.c 			release_sock(sk);
sk               2590 net/ipv4/udp.c 		lock_sock(sk);
sk               2592 net/ipv4/udp.c 			udp_tunnel_encap_enable(sk->sk_socket);
sk               2594 net/ipv4/udp.c 		release_sock(sk);
sk               2636 net/ipv4/udp.c int udp_setsockopt(struct sock *sk, int level, int optname,
sk               2640 net/ipv4/udp.c 		return udp_lib_setsockopt(sk, level, optname, optval, optlen,
sk               2642 net/ipv4/udp.c 	return ip_setsockopt(sk, level, optname, optval, optlen);
sk               2646 net/ipv4/udp.c int compat_udp_setsockopt(struct sock *sk, int level, int optname,
sk               2650 net/ipv4/udp.c 		return udp_lib_setsockopt(sk, level, optname, optval, optlen,
sk               2652 net/ipv4/udp.c 	return compat_ip_setsockopt(sk, level, optname, optval, optlen);
sk               2656 net/ipv4/udp.c int udp_lib_getsockopt(struct sock *sk, int level, int optname,
sk               2659 net/ipv4/udp.c 	struct udp_sock *up = udp_sk(sk);
sk               2713 net/ipv4/udp.c int udp_getsockopt(struct sock *sk, int level, int optname,
sk               2717 net/ipv4/udp.c 		return udp_lib_getsockopt(sk, level, optname, optval, optlen);
sk               2718 net/ipv4/udp.c 	return ip_getsockopt(sk, level, optname, optval, optlen);
sk               2722 net/ipv4/udp.c int compat_udp_getsockopt(struct sock *sk, int level, int optname,
sk               2726 net/ipv4/udp.c 		return udp_lib_getsockopt(sk, level, optname, optval, optlen);
sk               2727 net/ipv4/udp.c 	return compat_ip_getsockopt(sk, level, optname, optval, optlen);
sk               2746 net/ipv4/udp.c 	struct sock *sk = sock->sk;
sk               2748 net/ipv4/udp.c 	if (!skb_queue_empty_lockless(&udp_sk(sk)->reader_queue))
sk               2753 net/ipv4/udp.c 	    !(sk->sk_shutdown & RCV_SHUTDOWN) && first_packet_length(sk) == -1)
sk               2761 net/ipv4/udp.c int udp_abort(struct sock *sk, int err)
sk               2763 net/ipv4/udp.c 	lock_sock(sk);
sk               2765 net/ipv4/udp.c 	sk->sk_err = err;
sk               2766 net/ipv4/udp.c 	sk->sk_error_report(sk);
sk               2767 net/ipv4/udp.c 	__udp_disconnect(sk, 0);
sk               2769 net/ipv4/udp.c 	release_sock(sk);
sk               2814 net/ipv4/udp.c 	struct sock *sk;
sk               2827 net/ipv4/udp.c 		sk_for_each(sk, &hslot->head) {
sk               2828 net/ipv4/udp.c 			if (!net_eq(sock_net(sk), net))
sk               2830 net/ipv4/udp.c 			if (sk->sk_family == afinfo->family)
sk               2835 net/ipv4/udp.c 	sk = NULL;
sk               2837 net/ipv4/udp.c 	return sk;
sk               2840 net/ipv4/udp.c static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk)
sk               2847 net/ipv4/udp.c 		sk = sk_next(sk);
sk               2848 net/ipv4/udp.c 	} while (sk && (!net_eq(sock_net(sk), net) || sk->sk_family != afinfo->family));
sk               2850 net/ipv4/udp.c 	if (!sk) {
sk               2855 net/ipv4/udp.c 	return sk;
sk               2860 net/ipv4/udp.c 	struct sock *sk = udp_get_first(seq, 0);
sk               2862 net/ipv4/udp.c 	if (sk)
sk               2863 net/ipv4/udp.c 		while (pos && (sk = udp_get_next(seq, sk)) != NULL)
sk               2865 net/ipv4/udp.c 	return pos ? NULL : sk;
sk               2879 net/ipv4/udp.c 	struct sock *sk;
sk               2882 net/ipv4/udp.c 		sk = udp_get_idx(seq, 0);
sk               2884 net/ipv4/udp.c 		sk = udp_get_next(seq, v);
sk               2887 net/ipv4/udp.c 	return sk;
sk                 16 net/ipv4/udp_diag.c static int sk_diag_dump(struct sock *sk, struct sk_buff *skb,
sk                 21 net/ipv4/udp_diag.c 	if (!inet_diag_bc_sk(bc, sk))
sk                 24 net/ipv4/udp_diag.c 	return inet_sk_diag_fill(sk, NULL, skb, req,
sk                 25 net/ipv4/udp_diag.c 			sk_user_ns(NETLINK_CB(cb->skb).sk),
sk                 35 net/ipv4/udp_diag.c 	struct sock *sk = NULL;
sk                 37 net/ipv4/udp_diag.c 	struct net *net = sock_net(in_skb->sk);
sk                 42 net/ipv4/udp_diag.c 		sk = __udp4_lib_lookup(net,
sk                 48 net/ipv4/udp_diag.c 		sk = __udp6_lib_lookup(net,
sk                 55 net/ipv4/udp_diag.c 	if (sk && !refcount_inc_not_zero(&sk->sk_refcnt))
sk                 56 net/ipv4/udp_diag.c 		sk = NULL;
sk                 59 net/ipv4/udp_diag.c 	if (!sk)
sk                 62 net/ipv4/udp_diag.c 	err = sock_diag_check_cookie(sk, req->id.idiag_cookie);
sk                 74 net/ipv4/udp_diag.c 	err = inet_sk_diag_fill(sk, NULL, rep, req,
sk                 75 net/ipv4/udp_diag.c 			   sk_user_ns(NETLINK_CB(in_skb).sk),
sk                 89 net/ipv4/udp_diag.c 	if (sk)
sk                 90 net/ipv4/udp_diag.c 		sock_put(sk);
sk                100 net/ipv4/udp_diag.c 	struct net *net = sock_net(skb->sk);
sk                108 net/ipv4/udp_diag.c 		struct sock *sk;
sk                116 net/ipv4/udp_diag.c 		sk_for_each(sk, &hslot->head) {
sk                117 net/ipv4/udp_diag.c 			struct inet_sock *inet = inet_sk(sk);
sk                119 net/ipv4/udp_diag.c 			if (!net_eq(sock_net(sk), net))
sk                123 net/ipv4/udp_diag.c 			if (!(r->idiag_states & (1 << sk->sk_state)))
sk                126 net/ipv4/udp_diag.c 					sk->sk_family != r->sdiag_family)
sk                135 net/ipv4/udp_diag.c 			if (sk_diag_dump(sk, skb, cb, r, bc, net_admin) < 0) {
sk                161 net/ipv4/udp_diag.c static void udp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
sk                164 net/ipv4/udp_diag.c 	r->idiag_rqueue = udp_rqueue_get(sk);
sk                165 net/ipv4/udp_diag.c 	r->idiag_wqueue = sk_wmem_alloc_get(sk);
sk                173 net/ipv4/udp_diag.c 	struct net *net = sock_net(in_skb->sk);
sk                174 net/ipv4/udp_diag.c 	struct sock *sk;
sk                180 net/ipv4/udp_diag.c 		sk = __udp4_lib_lookup(net,
sk                188 net/ipv4/udp_diag.c 			sk = __udp4_lib_lookup(net,
sk                194 net/ipv4/udp_diag.c 			sk = __udp6_lib_lookup(net,
sk                207 net/ipv4/udp_diag.c 	if (sk && !refcount_inc_not_zero(&sk->sk_refcnt))
sk                208 net/ipv4/udp_diag.c 		sk = NULL;
sk                212 net/ipv4/udp_diag.c 	if (!sk)
sk                215 net/ipv4/udp_diag.c 	if (sock_diag_check_cookie(sk, req->id.idiag_cookie)) {
sk                216 net/ipv4/udp_diag.c 		sock_put(sk);
sk                220 net/ipv4/udp_diag.c 	err = sock_diag_destroy(sk, ECONNABORTED);
sk                222 net/ipv4/udp_diag.c 	sock_put(sk);
sk                 12 net/ipv4/udp_impl.h int udp_v4_get_port(struct sock *sk, unsigned short snum);
sk                 13 net/ipv4/udp_impl.h void udp_v4_rehash(struct sock *sk);
sk                 15 net/ipv4/udp_impl.h int udp_setsockopt(struct sock *sk, int level, int optname,
sk                 17 net/ipv4/udp_impl.h int udp_getsockopt(struct sock *sk, int level, int optname,
sk                 21 net/ipv4/udp_impl.h int compat_udp_setsockopt(struct sock *sk, int level, int optname,
sk                 23 net/ipv4/udp_impl.h int compat_udp_getsockopt(struct sock *sk, int level, int optname,
sk                 26 net/ipv4/udp_impl.h int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
sk                 28 net/ipv4/udp_impl.h int udp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
sk                 30 net/ipv4/udp_impl.h void udp_destroy_sock(struct sock *sk);
sk                190 net/ipv4/udp_offload.c 	struct sock *sk = gso_skb->sk;
sk                239 net/ipv4/udp_offload.c 			seg->sk = sk;
sk                280 net/ipv4/udp_offload.c 			refcount_add(delta, &sk->sk_wmem_alloc);
sk                282 net/ipv4/udp_offload.c 			WARN_ON_ONCE(refcount_sub_and_test(-delta, &sk->sk_wmem_alloc));
sk                414 net/ipv4/udp_offload.c 	struct sock *sk;
sk                417 net/ipv4/udp_offload.c 	sk = INDIRECT_CALL_INET(lookup, udp6_lib_lookup_skb,
sk                419 net/ipv4/udp_offload.c 	if (!sk)
sk                422 net/ipv4/udp_offload.c 	if (udp_sk(sk)->gro_enabled) {
sk                432 net/ipv4/udp_offload.c 	    !udp_sk(sk)->gro_receive)
sk                458 net/ipv4/udp_offload.c 	pp = call_gro_receive_sk(udp_sk(sk)->gro_receive, sk, head, skb);
sk                513 net/ipv4/udp_offload.c 	struct sock *sk;
sk                518 net/ipv4/udp_offload.c 	sk = INDIRECT_CALL_INET(lookup, udp6_lib_lookup_skb,
sk                520 net/ipv4/udp_offload.c 	if (sk && udp_sk(sk)->gro_enabled) {
sk                522 net/ipv4/udp_offload.c 	} else if (sk && udp_sk(sk)->gro_complete) {
sk                530 net/ipv4/udp_offload.c 		err = udp_sk(sk)->gro_complete(sk, skb,
sk                 50 net/ipv4/udp_tunnel.c 	sock->sk->sk_no_check_tx = !cfg->use_udp_checksums;
sk                 68 net/ipv4/udp_tunnel.c 	struct sock *sk = sock->sk;
sk                 71 net/ipv4/udp_tunnel.c 	inet_sk(sk)->mc_loop = 0;
sk                 74 net/ipv4/udp_tunnel.c 	inet_inc_convert_csum(sk);
sk                 76 net/ipv4/udp_tunnel.c 	rcu_assign_sk_user_data(sk, cfg->sk_user_data);
sk                 78 net/ipv4/udp_tunnel.c 	udp_sk(sk)->encap_type = cfg->encap_type;
sk                 79 net/ipv4/udp_tunnel.c 	udp_sk(sk)->encap_rcv = cfg->encap_rcv;
sk                 80 net/ipv4/udp_tunnel.c 	udp_sk(sk)->encap_err_lookup = cfg->encap_err_lookup;
sk                 81 net/ipv4/udp_tunnel.c 	udp_sk(sk)->encap_destroy = cfg->encap_destroy;
sk                 82 net/ipv4/udp_tunnel.c 	udp_sk(sk)->gro_receive = cfg->gro_receive;
sk                 83 net/ipv4/udp_tunnel.c 	udp_sk(sk)->gro_complete = cfg->gro_complete;
sk                 92 net/ipv4/udp_tunnel.c 	struct sock *sk = sock->sk;
sk                100 net/ipv4/udp_tunnel.c 	ti.sa_family = sk->sk_family;
sk                101 net/ipv4/udp_tunnel.c 	ti.port = inet_sk(sk)->inet_sport;
sk                110 net/ipv4/udp_tunnel.c 	struct sock *sk = sock->sk;
sk                118 net/ipv4/udp_tunnel.c 	ti.sa_family = sk->sk_family;
sk                119 net/ipv4/udp_tunnel.c 	ti.port = inet_sk(sk)->inet_sport;
sk                128 net/ipv4/udp_tunnel.c 	struct sock *sk = sock->sk;
sk                129 net/ipv4/udp_tunnel.c 	struct net *net = sock_net(sk);
sk                134 net/ipv4/udp_tunnel.c 	ti.sa_family = sk->sk_family;
sk                135 net/ipv4/udp_tunnel.c 	ti.port = inet_sk(sk)->inet_sport;
sk                152 net/ipv4/udp_tunnel.c 	struct sock *sk = sock->sk;
sk                153 net/ipv4/udp_tunnel.c 	struct net *net = sock_net(sk);
sk                158 net/ipv4/udp_tunnel.c 	ti.sa_family = sk->sk_family;
sk                159 net/ipv4/udp_tunnel.c 	ti.port = inet_sk(sk)->inet_sport;
sk                173 net/ipv4/udp_tunnel.c void udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
sk                192 net/ipv4/udp_tunnel.c 	iptunnel_xmit(sk, rt, skb, src, dst, IPPROTO_UDP, tos, ttl, df, xnet);
sk                198 net/ipv4/udp_tunnel.c 	rcu_assign_sk_user_data(sock->sk, NULL);
sk                 26 net/ipv4/xfrm4_input.c static int xfrm4_rcv_encap_finish2(struct net *net, struct sock *sk,
sk                 32 net/ipv4/xfrm4_input.c static inline int xfrm4_rcv_encap_finish(struct net *net, struct sock *sk,
sk                 87 net/ipv4/xfrm4_input.c int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb)
sk                 89 net/ipv4/xfrm4_input.c 	struct udp_sock *up = udp_sk(sk);
sk                 30 net/ipv4/xfrm4_output.c 	     !skb_gso_validate_network_len(skb, ip_skb_dst_mtu(skb->sk, skb)))) {
sk                 33 net/ipv4/xfrm4_output.c 		if (skb->sk)
sk                 57 net/ipv4/xfrm4_output.c int xfrm4_output_finish(struct sock *sk, struct sk_buff *skb)
sk                 63 net/ipv4/xfrm4_output.c 	return xfrm_output(sk, skb);
sk                 66 net/ipv4/xfrm4_output.c static int __xfrm4_output(struct net *net, struct sock *sk, struct sk_buff *skb)
sk                 75 net/ipv4/xfrm4_output.c 		return dst_output(net, sk, skb);
sk                 82 net/ipv4/xfrm4_output.c 		ret = afinfo->output_finish(sk, skb);
sk                 90 net/ipv4/xfrm4_output.c int xfrm4_output(struct net *net, struct sock *sk, struct sk_buff *skb)
sk                 93 net/ipv4/xfrm4_output.c 			    net, sk, skb, NULL, skb_dst(skb)->dev,
sk                103 net/ipv4/xfrm4_output.c 	ip_local_error(skb->sk, EMSGSIZE, hdr->daddr,
sk                104 net/ipv4/xfrm4_output.c 		       inet_sk(skb->sk)->inet_dport, mtu);
sk                102 net/ipv4/xfrm4_policy.c static void xfrm4_update_pmtu(struct dst_entry *dst, struct sock *sk,
sk                109 net/ipv4/xfrm4_policy.c 	path->ops->update_pmtu(path, sk, skb, mtu, confirm_neigh);
sk                112 net/ipv4/xfrm4_policy.c static void xfrm4_redirect(struct dst_entry *dst, struct sock *sk,
sk                118 net/ipv4/xfrm4_policy.c 	path->ops->redirect(path, sk, skb);
sk                640 net/ipv6/addrconf.c 	struct net *net = sock_net(in_skb->sk);
sk                704 net/ipv6/addrconf.c 	struct net *net = sock_net(skb->sk);
sk               2876 net/ipv6/addrconf.c static int ipv6_mc_config(struct sock *sk, bool join,
sk               2883 net/ipv6/addrconf.c 	lock_sock(sk);
sk               2885 net/ipv6/addrconf.c 		ret = ipv6_sock_mc_join(sk, ifindex, addr);
sk               2887 net/ipv6/addrconf.c 		ret = ipv6_sock_mc_drop(sk, ifindex, addr);
sk               2888 net/ipv6/addrconf.c 	release_sock(sk);
sk               4572 net/ipv6/addrconf.c 	struct net *net = sock_net(skb->sk);
sk               4756 net/ipv6/addrconf.c 	struct net *net = sock_net(skb->sk);
sk               5098 net/ipv6/addrconf.c 				       struct net **tgt_net, struct sock *sk,
sk               5136 net/ipv6/addrconf.c 			net = rtnl_get_net_ns_capable(sk, fillargs->netnsid);
sk               5163 net/ipv6/addrconf.c 	struct net *net = sock_net(skb->sk);
sk               5178 net/ipv6/addrconf.c 						  skb->sk, cb);
sk               5303 net/ipv6/addrconf.c 	struct net *net = sock_net(in_skb->sk);
sk               5327 net/ipv6/addrconf.c 		tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(in_skb).sk,
sk               5848 net/ipv6/addrconf.c 	struct net *net = sock_net(skb->sk);
sk                133 net/ipv6/addrconf_core.c 							   const struct sock *sk,
sk                379 net/ipv6/addrlabel.c 	struct net *net = sock_net(skb->sk);
sk                489 net/ipv6/addrlabel.c 	struct net *net = sock_net(skb->sk);
sk                573 net/ipv6/addrlabel.c 	struct net *net = sock_net(in_skb->sk);
sk                100 net/ipv6/af_inet6.c static __inline__ struct ipv6_pinfo *inet6_sk_generic(struct sock *sk)
sk                102 net/ipv6/af_inet6.c 	const int offset = sk->sk_prot->obj_size - sizeof(struct ipv6_pinfo);
sk                104 net/ipv6/af_inet6.c 	return (struct ipv6_pinfo *)(((u8 *)sk) + offset);
sk                112 net/ipv6/af_inet6.c 	struct sock *sk;
sk                180 net/ipv6/af_inet6.c 	sk = sk_alloc(net, PF_INET6, GFP_KERNEL, answer_prot, kern);
sk                181 net/ipv6/af_inet6.c 	if (!sk)
sk                184 net/ipv6/af_inet6.c 	sock_init_data(sock, sk);
sk                188 net/ipv6/af_inet6.c 		sk->sk_reuse = SK_CAN_REUSE;
sk                190 net/ipv6/af_inet6.c 	inet = inet_sk(sk);
sk                199 net/ipv6/af_inet6.c 	sk->sk_destruct		= inet_sock_destruct;
sk                200 net/ipv6/af_inet6.c 	sk->sk_family		= PF_INET6;
sk                201 net/ipv6/af_inet6.c 	sk->sk_protocol		= protocol;
sk                203 net/ipv6/af_inet6.c 	sk->sk_backlog_rcv	= answer->prot->backlog_rcv;
sk                205 net/ipv6/af_inet6.c 	inet_sk(sk)->pinet6 = np = inet6_sk_generic(sk);
sk                212 net/ipv6/af_inet6.c 	sk->sk_ipv6only	= net->ipv6.sysctl.bindv6only;
sk                238 net/ipv6/af_inet6.c 	sk_refcnt_debug_inc(sk);
sk                246 net/ipv6/af_inet6.c 		err = sk->sk_prot->hash(sk);
sk                248 net/ipv6/af_inet6.c 			sk_common_release(sk);
sk                252 net/ipv6/af_inet6.c 	if (sk->sk_prot->init) {
sk                253 net/ipv6/af_inet6.c 		err = sk->sk_prot->init(sk);
sk                255 net/ipv6/af_inet6.c 			sk_common_release(sk);
sk                261 net/ipv6/af_inet6.c 		err = BPF_CGROUP_RUN_PROG_INET_SOCK(sk);
sk                263 net/ipv6/af_inet6.c 			sk_common_release(sk);
sk                274 net/ipv6/af_inet6.c static int __inet6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len,
sk                278 net/ipv6/af_inet6.c 	struct inet_sock *inet = inet_sk(sk);
sk                279 net/ipv6/af_inet6.c 	struct ipv6_pinfo *np = inet6_sk(sk);
sk                280 net/ipv6/af_inet6.c 	struct net *net = sock_net(sk);
sk                291 net/ipv6/af_inet6.c 	if ((addr_type & IPV6_ADDR_MULTICAST) && sk->sk_type == SOCK_STREAM)
sk                300 net/ipv6/af_inet6.c 		lock_sock(sk);
sk                303 net/ipv6/af_inet6.c 	if (sk->sk_state != TCP_CLOSE || inet->inet_num) {
sk                316 net/ipv6/af_inet6.c 		if (sk->sk_ipv6only) {
sk                322 net/ipv6/af_inet6.c 		if (sk->sk_bound_dev_if) {
sk                323 net/ipv6/af_inet6.c 			dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if);
sk                354 net/ipv6/af_inet6.c 					sk->sk_bound_dev_if = addr->sin6_scope_id;
sk                358 net/ipv6/af_inet6.c 				if (!sk->sk_bound_dev_if) {
sk                364 net/ipv6/af_inet6.c 			if (sk->sk_bound_dev_if) {
sk                365 net/ipv6/af_inet6.c 				dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if);
sk                391 net/ipv6/af_inet6.c 	sk->sk_v6_rcv_saddr = addr->sin6_addr;
sk                396 net/ipv6/af_inet6.c 	saved_ipv6only = sk->sk_ipv6only;
sk                398 net/ipv6/af_inet6.c 		sk->sk_ipv6only = 1;
sk                403 net/ipv6/af_inet6.c 		if (sk->sk_prot->get_port(sk, snum)) {
sk                404 net/ipv6/af_inet6.c 			sk->sk_ipv6only = saved_ipv6only;
sk                405 net/ipv6/af_inet6.c 			inet_reset_saddr(sk);
sk                409 net/ipv6/af_inet6.c 		err = BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk);
sk                411 net/ipv6/af_inet6.c 			sk->sk_ipv6only = saved_ipv6only;
sk                412 net/ipv6/af_inet6.c 			inet_reset_saddr(sk);
sk                418 net/ipv6/af_inet6.c 		sk->sk_userlocks |= SOCK_BINDADDR_LOCK;
sk                420 net/ipv6/af_inet6.c 		sk->sk_userlocks |= SOCK_BINDPORT_LOCK;
sk                426 net/ipv6/af_inet6.c 		release_sock(sk);
sk                436 net/ipv6/af_inet6.c 	struct sock *sk = sock->sk;
sk                440 net/ipv6/af_inet6.c 	if (sk->sk_prot->bind)
sk                441 net/ipv6/af_inet6.c 		return sk->sk_prot->bind(sk, uaddr, addr_len);
sk                449 net/ipv6/af_inet6.c 	err = BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr);
sk                453 net/ipv6/af_inet6.c 	return __inet6_bind(sk, uaddr, addr_len, false, true);
sk                459 net/ipv6/af_inet6.c 	struct sock *sk = sock->sk;
sk                461 net/ipv6/af_inet6.c 	if (!sk)
sk                465 net/ipv6/af_inet6.c 	ipv6_sock_mc_close(sk);
sk                468 net/ipv6/af_inet6.c 	ipv6_sock_ac_close(sk);
sk                474 net/ipv6/af_inet6.c void inet6_destroy_sock(struct sock *sk)
sk                476 net/ipv6/af_inet6.c 	struct ipv6_pinfo *np = inet6_sk(sk);
sk                489 net/ipv6/af_inet6.c 	fl6_free_socklist(sk);
sk                495 net/ipv6/af_inet6.c 		atomic_sub(opt->tot_len, &sk->sk_omem_alloc);
sk                509 net/ipv6/af_inet6.c 	struct sock *sk = sock->sk;
sk                510 net/ipv6/af_inet6.c 	struct inet_sock *inet = inet_sk(sk);
sk                511 net/ipv6/af_inet6.c 	struct ipv6_pinfo *np = inet6_sk(sk);
sk                519 net/ipv6/af_inet6.c 		if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)) &&
sk                523 net/ipv6/af_inet6.c 		sin->sin6_addr = sk->sk_v6_daddr;
sk                527 net/ipv6/af_inet6.c 		if (ipv6_addr_any(&sk->sk_v6_rcv_saddr))
sk                530 net/ipv6/af_inet6.c 			sin->sin6_addr = sk->sk_v6_rcv_saddr;
sk                535 net/ipv6/af_inet6.c 						 sk->sk_bound_dev_if);
sk                542 net/ipv6/af_inet6.c 	struct sock *sk = sock->sk;
sk                543 net/ipv6/af_inet6.c 	struct net *net = sock_net(sk);
sk                558 net/ipv6/af_inet6.c 		if (!sk->sk_prot->ioctl)
sk                560 net/ipv6/af_inet6.c 		return sk->sk_prot->ioctl(sk, cmd, arg);
sk                571 net/ipv6/af_inet6.c 	struct sock *sk = sock->sk;
sk                573 net/ipv6/af_inet6.c 	if (unlikely(inet_send_prepare(sk)))
sk                576 net/ipv6/af_inet6.c 	return INDIRECT_CALL_2(sk->sk_prot->sendmsg, tcp_sendmsg, udpv6_sendmsg,
sk                577 net/ipv6/af_inet6.c 			       sk, msg, size);
sk                585 net/ipv6/af_inet6.c 	struct sock *sk = sock->sk;
sk                590 net/ipv6/af_inet6.c 		sock_rps_record_flow(sk);
sk                592 net/ipv6/af_inet6.c 	err = INDIRECT_CALL_2(sk->sk_prot->recvmsg, tcp_recvmsg, udpv6_recvmsg,
sk                593 net/ipv6/af_inet6.c 			      sk, msg, size, flags & MSG_DONTWAIT,
sk                739 net/ipv6/af_inet6.c int inet6_sk_rebuild_header(struct sock *sk)
sk                741 net/ipv6/af_inet6.c 	struct ipv6_pinfo *np = inet6_sk(sk);
sk                744 net/ipv6/af_inet6.c 	dst = __sk_dst_check(sk, np->dst_cookie);
sk                747 net/ipv6/af_inet6.c 		struct inet_sock *inet = inet_sk(sk);
sk                752 net/ipv6/af_inet6.c 		fl6.flowi6_proto = sk->sk_protocol;
sk                753 net/ipv6/af_inet6.c 		fl6.daddr = sk->sk_v6_daddr;
sk                756 net/ipv6/af_inet6.c 		fl6.flowi6_oif = sk->sk_bound_dev_if;
sk                757 net/ipv6/af_inet6.c 		fl6.flowi6_mark = sk->sk_mark;
sk                760 net/ipv6/af_inet6.c 		fl6.flowi6_uid = sk->sk_uid;
sk                761 net/ipv6/af_inet6.c 		security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
sk                768 net/ipv6/af_inet6.c 		dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
sk                770 net/ipv6/af_inet6.c 			sk->sk_route_caps = 0;
sk                771 net/ipv6/af_inet6.c 			sk->sk_err_soft = -PTR_ERR(dst);
sk                775 net/ipv6/af_inet6.c 		ip6_dst_store(sk, dst, NULL, NULL);
sk                782 net/ipv6/af_inet6.c bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb,
sk                785 net/ipv6/af_inet6.c 	const struct ipv6_pinfo *np = inet6_sk(sk);
sk                 63 net/ipv6/anycast.c int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
sk                 65 net/ipv6/anycast.c 	struct ipv6_pinfo *np = inet6_sk(sk);
sk                 69 net/ipv6/anycast.c 	struct net *net = sock_net(sk);
sk                 86 net/ipv6/anycast.c 	pac = sock_kmalloc(sk, sizeof(struct ipv6_ac_socklist), GFP_KERNEL);
sk                148 net/ipv6/anycast.c 		sock_kfree_s(sk, pac, sizeof(*pac));
sk                155 net/ipv6/anycast.c int ipv6_sock_ac_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
sk                157 net/ipv6/anycast.c 	struct ipv6_pinfo *np = inet6_sk(sk);
sk                160 net/ipv6/anycast.c 	struct net *net = sock_net(sk);
sk                182 net/ipv6/anycast.c 	sock_kfree_s(sk, pac, sizeof(*pac));
sk                186 net/ipv6/anycast.c void ipv6_sock_ac_close(struct sock *sk)
sk                188 net/ipv6/anycast.c 	struct ipv6_pinfo *np = inet6_sk(sk);
sk                191 net/ipv6/anycast.c 	struct net *net = sock_net(sk);
sk                211 net/ipv6/anycast.c 		sock_kfree_s(sk, pac, sizeof(*pac));
sk                784 net/ipv6/calipso.c static int calipso_opt_update(struct sock *sk, struct ipv6_opt_hdr *hop)
sk                786 net/ipv6/calipso.c 	struct ipv6_txoptions *old = txopt_get(inet6_sk(sk)), *txopts;
sk                788 net/ipv6/calipso.c 	txopts = ipv6_renew_options(sk, old, IPV6_HOPOPTS, hop);
sk                793 net/ipv6/calipso.c 	txopts = ipv6_update_options(sk, txopts);
sk                795 net/ipv6/calipso.c 		atomic_sub(txopts->tot_len, &sk->sk_omem_alloc);
sk               1076 net/ipv6/calipso.c static int calipso_sock_getattr(struct sock *sk,
sk               1082 net/ipv6/calipso.c 	struct ipv6_txoptions *txopts = txopt_get(inet6_sk(sk));
sk               1129 net/ipv6/calipso.c static int calipso_sock_setattr(struct sock *sk,
sk               1135 net/ipv6/calipso.c 	struct ipv6_txoptions *txopts = txopt_get(inet6_sk(sk));
sk               1146 net/ipv6/calipso.c 	ret_val = calipso_opt_update(sk, new);
sk               1160 net/ipv6/calipso.c static void calipso_sock_delattr(struct sock *sk)
sk               1163 net/ipv6/calipso.c 	struct ipv6_txoptions *txopts = txopt_get(inet6_sk(sk));
sk               1171 net/ipv6/calipso.c 	calipso_opt_update(sk, new_hop);
sk               1200 net/ipv6/calipso.c 	struct sock *sk = sk_to_full_sk(req_to_sk(req));
sk               1211 net/ipv6/calipso.c 	txopts = ipv6_renew_options(sk, req_inet->ipv6_opt, IPV6_HOPOPTS, new);
sk               1220 net/ipv6/calipso.c 		atomic_sub(txopts->tot_len, &sk->sk_omem_alloc);
sk               1240 net/ipv6/calipso.c 	struct sock *sk = sk_to_full_sk(req_to_sk(req));
sk               1248 net/ipv6/calipso.c 	txopts = ipv6_renew_options(sk, req_inet->ipv6_opt, IPV6_HOPOPTS, new);
sk               1253 net/ipv6/calipso.c 			atomic_sub(txopts->tot_len, &sk->sk_omem_alloc);
sk                 40 net/ipv6/datagram.c static void ip6_datagram_flow_key_init(struct flowi6 *fl6, struct sock *sk)
sk                 42 net/ipv6/datagram.c 	struct inet_sock *inet = inet_sk(sk);
sk                 43 net/ipv6/datagram.c 	struct ipv6_pinfo *np = inet6_sk(sk);
sk                 46 net/ipv6/datagram.c 	fl6->flowi6_proto = sk->sk_protocol;
sk                 47 net/ipv6/datagram.c 	fl6->daddr = sk->sk_v6_daddr;
sk                 49 net/ipv6/datagram.c 	fl6->flowi6_oif = sk->sk_bound_dev_if;
sk                 50 net/ipv6/datagram.c 	fl6->flowi6_mark = sk->sk_mark;
sk                 54 net/ipv6/datagram.c 	fl6->flowi6_uid = sk->sk_uid;
sk                 62 net/ipv6/datagram.c 	security_sk_classify_flow(sk, flowi6_to_flowi(fl6));
sk                 65 net/ipv6/datagram.c int ip6_datagram_dst_update(struct sock *sk, bool fix_sk_saddr)
sk                 71 net/ipv6/datagram.c 	struct inet_sock *inet = inet_sk(sk);
sk                 72 net/ipv6/datagram.c 	struct ipv6_pinfo *np = inet6_sk(sk);
sk                 77 net/ipv6/datagram.c 		flowlabel = fl6_sock_lookup(sk, np->flow_label);
sk                 81 net/ipv6/datagram.c 	ip6_datagram_flow_key_init(&fl6, sk);
sk                 88 net/ipv6/datagram.c 	dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
sk                 98 net/ipv6/datagram.c 		if (ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
sk                 99 net/ipv6/datagram.c 			sk->sk_v6_rcv_saddr = fl6.saddr;
sk                101 net/ipv6/datagram.c 			if (sk->sk_prot->rehash)
sk                102 net/ipv6/datagram.c 				sk->sk_prot->rehash(sk);
sk                106 net/ipv6/datagram.c 	ip6_sk_dst_store_flow(sk, dst, &fl6);
sk                113 net/ipv6/datagram.c void ip6_datagram_release_cb(struct sock *sk)
sk                117 net/ipv6/datagram.c 	if (ipv6_addr_v4mapped(&sk->sk_v6_daddr))
sk                121 net/ipv6/datagram.c 	dst = __sk_dst_get(sk);
sk                123 net/ipv6/datagram.c 	    dst->ops->check(dst, inet6_sk(sk)->dst_cookie)) {
sk                129 net/ipv6/datagram.c 	ip6_datagram_dst_update(sk, false);
sk                133 net/ipv6/datagram.c int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr,
sk                137 net/ipv6/datagram.c 	struct inet_sock	*inet = inet_sk(sk);
sk                138 net/ipv6/datagram.c 	struct ipv6_pinfo	*np = inet6_sk(sk);
sk                147 net/ipv6/datagram.c 		if (__ipv6_only_sock(sk))
sk                149 net/ipv6/datagram.c 		err = __ip4_datagram_connect(sk, uaddr, addr_len);
sk                166 net/ipv6/datagram.c 		if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
sk                180 net/ipv6/datagram.c 		if (__ipv6_only_sock(sk)) {
sk                188 net/ipv6/datagram.c 		err = __ip4_datagram_connect(sk,
sk                196 net/ipv6/datagram.c 		ipv6_addr_set_v4mapped(inet->inet_daddr, &sk->sk_v6_daddr);
sk                202 net/ipv6/datagram.c 		if (ipv6_addr_any(&sk->sk_v6_rcv_saddr) ||
sk                203 net/ipv6/datagram.c 		    ipv6_mapped_addr_any(&sk->sk_v6_rcv_saddr)) {
sk                205 net/ipv6/datagram.c 					       &sk->sk_v6_rcv_saddr);
sk                206 net/ipv6/datagram.c 			if (sk->sk_prot->rehash)
sk                207 net/ipv6/datagram.c 				sk->sk_prot->rehash(sk);
sk                216 net/ipv6/datagram.c 			if (!sk_dev_equal_l3scope(sk, usin->sin6_scope_id)) {
sk                220 net/ipv6/datagram.c 			sk->sk_bound_dev_if = usin->sin6_scope_id;
sk                223 net/ipv6/datagram.c 		if (!sk->sk_bound_dev_if && (addr_type & IPV6_ADDR_MULTICAST))
sk                224 net/ipv6/datagram.c 			sk->sk_bound_dev_if = np->mcast_oif;
sk                227 net/ipv6/datagram.c 		if (!sk->sk_bound_dev_if) {
sk                234 net/ipv6/datagram.c 	old_daddr = sk->sk_v6_daddr;
sk                238 net/ipv6/datagram.c 	sk->sk_v6_daddr = *daddr;
sk                247 net/ipv6/datagram.c 	err = ip6_datagram_dst_update(sk, true);
sk                252 net/ipv6/datagram.c 		sk->sk_v6_daddr = old_daddr;
sk                258 net/ipv6/datagram.c 	reuseport_has_conns(sk, true);
sk                259 net/ipv6/datagram.c 	sk->sk_state = TCP_ESTABLISHED;
sk                260 net/ipv6/datagram.c 	sk_set_txhash(sk);
sk                266 net/ipv6/datagram.c int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
sk                270 net/ipv6/datagram.c 	lock_sock(sk);
sk                271 net/ipv6/datagram.c 	res = __ip6_datagram_connect(sk, uaddr, addr_len);
sk                272 net/ipv6/datagram.c 	release_sock(sk);
sk                277 net/ipv6/datagram.c int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr *uaddr,
sk                283 net/ipv6/datagram.c 	return ip6_datagram_connect(sk, uaddr, addr_len);
sk                287 net/ipv6/datagram.c void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
sk                290 net/ipv6/datagram.c 	struct ipv6_pinfo *np  = inet6_sk(sk);
sk                318 net/ipv6/datagram.c 	if (sock_queue_err_skb(sk, skb))
sk                322 net/ipv6/datagram.c void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info)
sk                324 net/ipv6/datagram.c 	const struct ipv6_pinfo *np = inet6_sk(sk);
sk                358 net/ipv6/datagram.c 	if (sock_queue_err_skb(sk, skb))
sk                362 net/ipv6/datagram.c void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu)
sk                364 net/ipv6/datagram.c 	struct ipv6_pinfo *np = inet6_sk(sk);
sk                432 net/ipv6/datagram.c int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
sk                434 net/ipv6/datagram.c 	struct ipv6_pinfo *np = inet6_sk(sk);
sk                446 net/ipv6/datagram.c 	skb = sock_dequeue_err_skb(sk);
sk                460 net/ipv6/datagram.c 	sock_recv_timestamp(msg, sk, skb);
sk                493 net/ipv6/datagram.c 			ip6_datagram_recv_common_ctl(sk, msg, skb);
sk                497 net/ipv6/datagram.c 				ip6_datagram_recv_specific_ctl(sk, msg, skb);
sk                504 net/ipv6/datagram.c 			if (inet_sk(sk)->cmsg_flags)
sk                525 net/ipv6/datagram.c int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len,
sk                528 net/ipv6/datagram.c 	struct ipv6_pinfo *np = inet6_sk(sk);
sk                549 net/ipv6/datagram.c 	sock_recv_timestamp(msg, sk, skb);
sk                573 net/ipv6/datagram.c void ip6_datagram_recv_common_ctl(struct sock *sk, struct msghdr *msg,
sk                576 net/ipv6/datagram.c 	struct ipv6_pinfo *np = inet6_sk(sk);
sk                598 net/ipv6/datagram.c void ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg,
sk                601 net/ipv6/datagram.c 	struct ipv6_pinfo *np = inet6_sk(sk);
sk                729 net/ipv6/datagram.c void ip6_datagram_recv_ctl(struct sock *sk, struct msghdr *msg,
sk                732 net/ipv6/datagram.c 	ip6_datagram_recv_common_ctl(sk, msg, skb);
sk                733 net/ipv6/datagram.c 	ip6_datagram_recv_specific_ctl(sk, msg, skb);
sk                737 net/ipv6/datagram.c int ip6_datagram_send_ctl(struct net *net, struct sock *sk,
sk                758 net/ipv6/datagram.c 			err = __sock_cmsg_send(sk, msg, cmsg, &ipc6->sockc);
sk                785 net/ipv6/datagram.c 				    (sk->sk_bound_dev_if != fl6->flowi6_oif ||
sk                786 net/ipv6/datagram.c 				     !sk_dev_equal_l3scope(sk, src_idx)))
sk                807 net/ipv6/datagram.c 				if (!ipv6_can_nonlocal_bind(net, inet_sk(sk)) &&
sk                245 net/ipv6/esp6.c 			struct sock *sk = skb->sk;
sk                285 net/ipv6/esp6.c 			if (sk && sk_fullsock(sk))
sk                286 net/ipv6/esp6.c 				refcount_add(tailen, &sk->sk_wmem_alloc);
sk                926 net/ipv6/exthdrs.c 		else if (skb->sk)
sk                927 net/ipv6/exthdrs.c 			net = sock_net(skb->sk);
sk                992 net/ipv6/exthdrs.c ipv6_dup_options(struct sock *sk, struct ipv6_txoptions *opt)
sk                996 net/ipv6/exthdrs.c 	opt2 = sock_kmalloc(sk, opt->tot_len, GFP_ATOMIC);
sk               1053 net/ipv6/exthdrs.c ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt,
sk               1078 net/ipv6/exthdrs.c 	opt2 = sock_kmalloc(sk, tot_len, GFP_ATOMIC);
sk                345 net/ipv6/fib6_rules.c 	struct net *net = sock_net(skb->sk);
sk                114 net/ipv6/icmp.c 	struct sock *sk;
sk                116 net/ipv6/icmp.c 	sk = icmpv6_sk(net);
sk                117 net/ipv6/icmp.c 	if (unlikely(!spin_trylock(&sk->sk_lock.slock))) {
sk                124 net/ipv6/icmp.c 	return sk;
sk                127 net/ipv6/icmp.c static __inline__ void icmpv6_xmit_unlock(struct sock *sk)
sk                129 net/ipv6/icmp.c 	spin_unlock(&sk->sk_lock.slock);
sk                193 net/ipv6/icmp.c static bool icmpv6_xrlim_allow(struct sock *sk, u8 type,
sk                196 net/ipv6/icmp.c 	struct net *net = sock_net(sk);
sk                208 net/ipv6/icmp.c 	dst = ip6_route_output(net, sk, fl6);
sk                250 net/ipv6/icmp.c void icmpv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
sk                256 net/ipv6/icmp.c 	skb = skb_peek(&sk->sk_write_queue);
sk                264 net/ipv6/icmp.c 	if (skb_queue_len(&sk->sk_write_queue) == 1) {
sk                274 net/ipv6/icmp.c 		skb_queue_walk(&sk->sk_write_queue, skb) {
sk                285 net/ipv6/icmp.c 	ip6_push_pending_frames(sk);
sk                334 net/ipv6/icmp.c 					     struct sock *sk,
sk                341 net/ipv6/icmp.c 	err = ip6_dst_lookup(net, sk, &dst, fl6);
sk                358 net/ipv6/icmp.c 	dst = xfrm_lookup(net, dst, flowi6_to_flowi(fl6), sk, 0);
sk                373 net/ipv6/icmp.c 	err = ip6_dst_lookup(net, sk, &dst2, &fl2);
sk                377 net/ipv6/icmp.c 	dst2 = xfrm_lookup(net, dst2, flowi6_to_flowi(&fl2), sk, XFRM_LOOKUP_ICMP);
sk                428 net/ipv6/icmp.c 	struct sock *sk;
sk                534 net/ipv6/icmp.c 	sk = icmpv6_xmit_lock(net);
sk                535 net/ipv6/icmp.c 	if (!sk)
sk                538 net/ipv6/icmp.c 	sk->sk_mark = mark;
sk                539 net/ipv6/icmp.c 	np = inet6_sk(sk);
sk                541 net/ipv6/icmp.c 	if (!icmpv6_xrlim_allow(sk, type, &fl6))
sk                557 net/ipv6/icmp.c 	dst = icmpv6_route_lookup(net, skb, sk, &fl6);
sk                578 net/ipv6/icmp.c 	if (ip6_append_data(sk, icmpv6_getfrag, &msg,
sk                584 net/ipv6/icmp.c 		ip6_flush_pending_frames(sk);
sk                586 net/ipv6/icmp.c 		icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr,
sk                593 net/ipv6/icmp.c 	icmpv6_xmit_unlock(sk);
sk                675 net/ipv6/icmp.c 	struct sock *sk;
sk                720 net/ipv6/icmp.c 	sk = icmpv6_xmit_lock(net);
sk                721 net/ipv6/icmp.c 	if (!sk)
sk                723 net/ipv6/icmp.c 	sk->sk_mark = mark;
sk                724 net/ipv6/icmp.c 	np = inet6_sk(sk);
sk                731 net/ipv6/icmp.c 	if (ip6_dst_lookup(net, sk, &dst, &fl6))
sk                733 net/ipv6/icmp.c 	dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), sk, 0);
sk                739 net/ipv6/icmp.c 	    !icmpv6_xrlim_allow(sk, ICMPV6_ECHO_REPLY, &fl6))
sk                752 net/ipv6/icmp.c 	if (ip6_append_data(sk, icmpv6_getfrag, &msg,
sk                757 net/ipv6/icmp.c 		ip6_flush_pending_frames(sk);
sk                759 net/ipv6/icmp.c 		icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr,
sk                765 net/ipv6/icmp.c 	icmpv6_xmit_unlock(sk);
sk                957 net/ipv6/icmp.c void icmpv6_flow_init(struct sock *sk, struct flowi6 *fl6,
sk                970 net/ipv6/icmp.c 	security_sk_classify_flow(sk, flowi6_to_flowi(fl6));
sk                984 net/ipv6/icmp.c 	struct sock *sk;
sk                992 net/ipv6/icmp.c 		err = inet_ctl_sock_create(&sk, PF_INET6,
sk               1000 net/ipv6/icmp.c 		*per_cpu_ptr(net->ipv6.icmp_sk, i) = sk;
sk               1005 net/ipv6/icmp.c 		sk->sk_sndbuf = 2 * SKB_TRUESIZE(64 * 1024);
sk                 38 net/ipv6/ila/ila_lwt.c static int ila_output(struct net *net, struct sock *sk, struct sk_buff *skb)
sk                 58 net/ipv6/ila/ila_lwt.c 		return orig_dst->lwtstate->orig_output(net, sk, skb);
sk                 94 net/ipv6/ila/ila_lwt.c 	return dst_output(net, sk, skb);
sk                507 net/ipv6/ila/ila_xlat.c 	struct net *net = sock_net(cb->skb->sk);
sk                 27 net/ipv6/inet6_connection_sock.c struct dst_entry *inet6_csk_route_req(const struct sock *sk,
sk                 33 net/ipv6/inet6_connection_sock.c 	const struct ipv6_pinfo *np = inet6_sk(sk);
sk                 48 net/ipv6/inet6_connection_sock.c 	fl6->flowi6_uid = sk->sk_uid;
sk                 51 net/ipv6/inet6_connection_sock.c 	dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p);
sk                 59 net/ipv6/inet6_connection_sock.c void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr)
sk                 64 net/ipv6/inet6_connection_sock.c 	sin6->sin6_addr = sk->sk_v6_daddr;
sk                 65 net/ipv6/inet6_connection_sock.c 	sin6->sin6_port	= inet_sk(sk)->inet_dport;
sk                 69 net/ipv6/inet6_connection_sock.c 						  sk->sk_bound_dev_if);
sk                 74 net/ipv6/inet6_connection_sock.c struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
sk                 76 net/ipv6/inet6_connection_sock.c 	return __sk_dst_check(sk, cookie);
sk                 79 net/ipv6/inet6_connection_sock.c static struct dst_entry *inet6_csk_route_socket(struct sock *sk,
sk                 82 net/ipv6/inet6_connection_sock.c 	struct inet_sock *inet = inet_sk(sk);
sk                 83 net/ipv6/inet6_connection_sock.c 	struct ipv6_pinfo *np = inet6_sk(sk);
sk                 88 net/ipv6/inet6_connection_sock.c 	fl6->flowi6_proto = sk->sk_protocol;
sk                 89 net/ipv6/inet6_connection_sock.c 	fl6->daddr = sk->sk_v6_daddr;
sk                 92 net/ipv6/inet6_connection_sock.c 	IP6_ECN_flow_xmit(sk, fl6->flowlabel);
sk                 93 net/ipv6/inet6_connection_sock.c 	fl6->flowi6_oif = sk->sk_bound_dev_if;
sk                 94 net/ipv6/inet6_connection_sock.c 	fl6->flowi6_mark = sk->sk_mark;
sk                 97 net/ipv6/inet6_connection_sock.c 	fl6->flowi6_uid = sk->sk_uid;
sk                 98 net/ipv6/inet6_connection_sock.c 	security_sk_classify_flow(sk, flowi6_to_flowi(fl6));
sk                104 net/ipv6/inet6_connection_sock.c 	dst = __inet6_csk_dst_check(sk, np->dst_cookie);
sk                106 net/ipv6/inet6_connection_sock.c 		dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p);
sk                109 net/ipv6/inet6_connection_sock.c 			ip6_dst_store(sk, dst, NULL, NULL);
sk                114 net/ipv6/inet6_connection_sock.c int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl_unused)
sk                116 net/ipv6/inet6_connection_sock.c 	struct ipv6_pinfo *np = inet6_sk(sk);
sk                121 net/ipv6/inet6_connection_sock.c 	dst = inet6_csk_route_socket(sk, &fl6);
sk                123 net/ipv6/inet6_connection_sock.c 		sk->sk_err_soft = -PTR_ERR(dst);
sk                124 net/ipv6/inet6_connection_sock.c 		sk->sk_route_caps = 0;
sk                133 net/ipv6/inet6_connection_sock.c 	fl6.daddr = sk->sk_v6_daddr;
sk                135 net/ipv6/inet6_connection_sock.c 	res = ip6_xmit(sk, skb, &fl6, sk->sk_mark, rcu_dereference(np->opt),
sk                136 net/ipv6/inet6_connection_sock.c 		       np->tclass,  sk->sk_priority);
sk                142 net/ipv6/inet6_connection_sock.c struct dst_entry *inet6_csk_update_pmtu(struct sock *sk, u32 mtu)
sk                145 net/ipv6/inet6_connection_sock.c 	struct dst_entry *dst = inet6_csk_route_socket(sk, &fl6);
sk                149 net/ipv6/inet6_connection_sock.c 	dst->ops->update_pmtu(dst, sk, NULL, mtu, true);
sk                151 net/ipv6/inet6_connection_sock.c 	dst = inet6_csk_route_socket(sk, &fl6);
sk                 57 net/ipv6/inet6_hashtables.c 	struct sock *sk;
sk                 69 net/ipv6/inet6_hashtables.c 	sk_nulls_for_each_rcu(sk, node, &head->chain) {
sk                 70 net/ipv6/inet6_hashtables.c 		if (sk->sk_hash != hash)
sk                 72 net/ipv6/inet6_hashtables.c 		if (!INET6_MATCH(sk, net, saddr, daddr, ports, dif, sdif))
sk                 74 net/ipv6/inet6_hashtables.c 		if (unlikely(!refcount_inc_not_zero(&sk->sk_refcnt)))
sk                 77 net/ipv6/inet6_hashtables.c 		if (unlikely(!INET6_MATCH(sk, net, saddr, daddr, ports, dif, sdif))) {
sk                 78 net/ipv6/inet6_hashtables.c 			sock_gen_put(sk);
sk                 86 net/ipv6/inet6_hashtables.c 	sk = NULL;
sk                 88 net/ipv6/inet6_hashtables.c 	return sk;
sk                 92 net/ipv6/inet6_hashtables.c static inline int compute_score(struct sock *sk, struct net *net,
sk                 99 net/ipv6/inet6_hashtables.c 	if (net_eq(sock_net(sk), net) && inet_sk(sk)->inet_num == hnum &&
sk                100 net/ipv6/inet6_hashtables.c 	    sk->sk_family == PF_INET6) {
sk                101 net/ipv6/inet6_hashtables.c 		if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
sk                104 net/ipv6/inet6_hashtables.c 		if (!inet_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif))
sk                108 net/ipv6/inet6_hashtables.c 		if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
sk                124 net/ipv6/inet6_hashtables.c 	struct sock *sk, *result = NULL;
sk                129 net/ipv6/inet6_hashtables.c 		sk = (struct sock *)icsk;
sk                130 net/ipv6/inet6_hashtables.c 		score = compute_score(sk, net, hnum, daddr, dif, sdif,
sk                133 net/ipv6/inet6_hashtables.c 			if (sk->sk_reuseport) {
sk                136 net/ipv6/inet6_hashtables.c 				result = reuseport_select_sock(sk, phash,
sk                141 net/ipv6/inet6_hashtables.c 			result = sk;
sk                189 net/ipv6/inet6_hashtables.c 	struct sock *sk;
sk                192 net/ipv6/inet6_hashtables.c 	sk = __inet6_lookup(net, hashinfo, skb, doff, saddr, sport, daddr,
sk                194 net/ipv6/inet6_hashtables.c 	if (sk && !refcounted && !refcount_inc_not_zero(&sk->sk_refcnt))
sk                195 net/ipv6/inet6_hashtables.c 		sk = NULL;
sk                196 net/ipv6/inet6_hashtables.c 	return sk;
sk                201 net/ipv6/inet6_hashtables.c 				     struct sock *sk, const __u16 lport,
sk                205 net/ipv6/inet6_hashtables.c 	struct inet_sock *inet = inet_sk(sk);
sk                206 net/ipv6/inet6_hashtables.c 	const struct in6_addr *daddr = &sk->sk_v6_rcv_saddr;
sk                207 net/ipv6/inet6_hashtables.c 	const struct in6_addr *saddr = &sk->sk_v6_daddr;
sk                208 net/ipv6/inet6_hashtables.c 	const int dif = sk->sk_bound_dev_if;
sk                209 net/ipv6/inet6_hashtables.c 	struct net *net = sock_net(sk);
sk                230 net/ipv6/inet6_hashtables.c 				if (twsk_unique(sk, sk2, twp))
sk                242 net/ipv6/inet6_hashtables.c 	sk->sk_hash = hash;
sk                243 net/ipv6/inet6_hashtables.c 	WARN_ON(!sk_unhashed(sk));
sk                244 net/ipv6/inet6_hashtables.c 	__sk_nulls_add_node_rcu(sk, &head->chain);
sk                250 net/ipv6/inet6_hashtables.c 	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
sk                265 net/ipv6/inet6_hashtables.c static u32 inet6_sk_port_offset(const struct sock *sk)
sk                267 net/ipv6/inet6_hashtables.c 	const struct inet_sock *inet = inet_sk(sk);
sk                269 net/ipv6/inet6_hashtables.c 	return secure_ipv6_port_ephemeral(sk->sk_v6_rcv_saddr.s6_addr32,
sk                270 net/ipv6/inet6_hashtables.c 					  sk->sk_v6_daddr.s6_addr32,
sk                275 net/ipv6/inet6_hashtables.c 		       struct sock *sk)
sk                279 net/ipv6/inet6_hashtables.c 	if (!inet_sk(sk)->inet_num)
sk                280 net/ipv6/inet6_hashtables.c 		port_offset = inet6_sk_port_offset(sk);
sk                281 net/ipv6/inet6_hashtables.c 	return __inet_hash_connect(death_row, sk, port_offset,
sk                286 net/ipv6/inet6_hashtables.c int inet6_hash(struct sock *sk)
sk                290 net/ipv6/inet6_hashtables.c 	if (sk->sk_state != TCP_CLOSE) {
sk                292 net/ipv6/inet6_hashtables.c 		err = __inet_hash(sk, NULL);
sk                497 net/ipv6/ip6_fib.c 	struct net *net = sock_net(cb->skb->sk);
sk                521 net/ipv6/ip6_fib.c 	struct net *net = sock_net(skb->sk);
sk                568 net/ipv6/ip6_fib.c 	struct net *net = sock_net(skb->sk);
sk                259 net/ipv6/ip6_flowlabel.c struct ip6_flowlabel *__fl6_sock_lookup(struct sock *sk, __be32 label)
sk                262 net/ipv6/ip6_flowlabel.c 	struct ipv6_pinfo *np = inet6_sk(sk);
sk                281 net/ipv6/ip6_flowlabel.c void fl6_free_socklist(struct sock *sk)
sk                283 net/ipv6/ip6_flowlabel.c 	struct ipv6_pinfo *np = inet6_sk(sk);
sk                373 net/ipv6/ip6_flowlabel.c fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
sk                412 net/ipv6/ip6_flowlabel.c 		err = ip6_datagram_send_ctl(net, sk, &msg, &flowi6, &ipc6);
sk                465 net/ipv6/ip6_flowlabel.c static int mem_check(struct sock *sk)
sk                467 net/ipv6/ip6_flowlabel.c 	struct ipv6_pinfo *np = inet6_sk(sk);
sk                499 net/ipv6/ip6_flowlabel.c int ipv6_flowlabel_opt_get(struct sock *sk, struct in6_flowlabel_req *freq,
sk                502 net/ipv6/ip6_flowlabel.c 	struct ipv6_pinfo *np = inet6_sk(sk);
sk                536 net/ipv6/ip6_flowlabel.c int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
sk                539 net/ipv6/ip6_flowlabel.c 	struct net *net = sock_net(sk);
sk                540 net/ipv6/ip6_flowlabel.c 	struct ipv6_pinfo *np = inet6_sk(sk);
sk                557 net/ipv6/ip6_flowlabel.c 			if (sk->sk_protocol != IPPROTO_TCP)
sk                607 net/ipv6/ip6_flowlabel.c 			struct net *net = sock_net(sk);
sk                613 net/ipv6/ip6_flowlabel.c 			if (sk->sk_protocol != IPPROTO_TCP)
sk                627 net/ipv6/ip6_flowlabel.c 		fl = fl_create(net, sk, &freq, optval, optlen, &err);
sk                689 net/ipv6/ip6_flowlabel.c 		err = mem_check(sk);
sk                 49 net/ipv6/ip6_input.c static void ip6_rcv_finish_core(struct net *net, struct sock *sk,
sk                 54 net/ipv6/ip6_input.c 	if (net->ipv4.sysctl_ip_early_demux && !skb_dst(skb) && skb->sk == NULL) {
sk                 66 net/ipv6/ip6_input.c int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
sk                 74 net/ipv6/ip6_input.c 	ip6_rcv_finish_core(net, sk, skb);
sk                 89 net/ipv6/ip6_input.c static void ip6_list_rcv_finish(struct net *net, struct sock *sk,
sk                107 net/ipv6/ip6_input.c 		ip6_rcv_finish_core(net, sk, skb);
sk                447 net/ipv6/ip6_input.c static int ip6_input_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
sk                 58 net/ipv6/ip6_output.c static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
sk                 69 net/ipv6/ip6_output.c 		if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(sk) &&
sk                 81 net/ipv6/ip6_output.c 					net, sk, newskb, NULL, newskb->dev,
sk                127 net/ipv6/ip6_output.c static int __ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
sk                133 net/ipv6/ip6_output.c 		return dst_output(net, sk, skb);
sk                140 net/ipv6/ip6_output.c 		return ip6_fragment(net, sk, skb, ip6_finish_output2);
sk                142 net/ipv6/ip6_output.c 		return ip6_finish_output2(net, sk, skb);
sk                145 net/ipv6/ip6_output.c static int ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
sk                149 net/ipv6/ip6_output.c 	ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb);
sk                152 net/ipv6/ip6_output.c 		return __ip6_finish_output(net, sk, skb);
sk                154 net/ipv6/ip6_output.c 		return __ip6_finish_output(net, sk, skb) ? : ret;
sk                161 net/ipv6/ip6_output.c int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
sk                176 net/ipv6/ip6_output.c 			    net, sk, skb, NULL, dev,
sk                195 net/ipv6/ip6_output.c int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
sk                198 net/ipv6/ip6_output.c 	struct net *net = sock_net(sk);
sk                199 net/ipv6/ip6_output.c 	const struct ipv6_pinfo *np = inet6_sk(sk);
sk                221 net/ipv6/ip6_output.c 		if (skb->sk)
sk                222 net/ipv6/ip6_output.c 			skb_set_owner_w(skb2, skb->sk);
sk                272 net/ipv6/ip6_output.c 		skb = l3mdev_ip6_out((struct sock *)sk, skb);
sk                280 net/ipv6/ip6_output.c 			       net, (struct sock *)sk, skb, NULL, dst->dev,
sk                288 net/ipv6/ip6_output.c 	ipv6_local_error((struct sock *)sk, EMSGSIZE, fl6, mtu);
sk                303 net/ipv6/ip6_output.c 		struct sock *sk = ra->sk;
sk                304 net/ipv6/ip6_output.c 		if (sk && ra->sel == sel &&
sk                305 net/ipv6/ip6_output.c 		    (!sk->sk_bound_dev_if ||
sk                306 net/ipv6/ip6_output.c 		     sk->sk_bound_dev_if == skb->dev->ifindex)) {
sk                307 net/ipv6/ip6_output.c 			struct ipv6_pinfo *np = inet6_sk(sk);
sk                310 net/ipv6/ip6_output.c 			    !net_eq(sock_net(sk), dev_net(skb->dev))) {
sk                318 net/ipv6/ip6_output.c 			last = sk;
sk                383 net/ipv6/ip6_output.c static inline int ip6_forward_finish(struct net *net, struct sock *sk,
sk                399 net/ipv6/ip6_output.c 	return dst_output(net, sk, skb);
sk                435 net/ipv6/ip6_output.c 	if (unlikely(skb->sk))
sk                724 net/ipv6/ip6_output.c 	if (skb->sk)
sk                725 net/ipv6/ip6_output.c 		skb_set_owner_w(frag, skb->sk);
sk                762 net/ipv6/ip6_output.c int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
sk                767 net/ipv6/ip6_output.c 	struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
sk                768 net/ipv6/ip6_output.c 				inet6_sk(skb->sk) : NULL;
sk                840 net/ipv6/ip6_output.c 			BUG_ON(frag->sk);
sk                841 net/ipv6/ip6_output.c 			if (skb->sk) {
sk                842 net/ipv6/ip6_output.c 				frag->sk = skb->sk;
sk                860 net/ipv6/ip6_output.c 			err = output(net, sk, skb);
sk                889 net/ipv6/ip6_output.c 			frag2->sk = NULL;
sk                919 net/ipv6/ip6_output.c 		err = output(net, sk, frag);
sk                932 net/ipv6/ip6_output.c 	if (skb->sk && dst_allfrag(skb_dst(skb)))
sk                933 net/ipv6/ip6_output.c 		sk_nocaps_add(skb->sk, NETIF_F_GSO_MASK);
sk                953 net/ipv6/ip6_output.c static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
sk                957 net/ipv6/ip6_output.c 	struct ipv6_pinfo *np = inet6_sk(sk);
sk               1000 net/ipv6/ip6_output.c static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk,
sk               1025 net/ipv6/ip6_output.c 			*dst = ip6_route_output(net, sk, fl6);
sk               1031 net/ipv6/ip6_output.c 					  sk ? inet6_sk(sk)->srcprefs : 0,
sk               1052 net/ipv6/ip6_output.c 		*dst = ip6_route_output_flags(net, sk, fl6, flags);
sk               1094 net/ipv6/ip6_output.c 			*dst = ip6_route_output(net, sk, &fl_gw6);
sk               1128 net/ipv6/ip6_output.c int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst,
sk               1132 net/ipv6/ip6_output.c 	return ip6_dst_lookup_tail(net, sk, dst, fl6);
sk               1147 net/ipv6/ip6_output.c struct dst_entry *ip6_dst_lookup_flow(struct net *net, const struct sock *sk, struct flowi6 *fl6,
sk               1153 net/ipv6/ip6_output.c 	err = ip6_dst_lookup_tail(net, sk, &dst, fl6);
sk               1159 net/ipv6/ip6_output.c 	return xfrm_lookup_route(net, dst, flowi6_to_flowi(fl6), sk, 0);
sk               1181 net/ipv6/ip6_output.c struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
sk               1185 net/ipv6/ip6_output.c 	struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
sk               1187 net/ipv6/ip6_output.c 	dst = ip6_sk_dst_check(sk, dst, fl6);
sk               1191 net/ipv6/ip6_output.c 	dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_dst);
sk               1193 net/ipv6/ip6_output.c 		ip6_sk_dst_store_flow(sk, dst_clone(dst), fl6);
sk               1235 net/ipv6/ip6_output.c static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork,
sk               1239 net/ipv6/ip6_output.c 	struct ipv6_pinfo *np = inet6_sk(sk);
sk               1250 net/ipv6/ip6_output.c 		v6_cork->opt = kzalloc(sizeof(*opt), sk->sk_allocation);
sk               1259 net/ipv6/ip6_output.c 						    sk->sk_allocation);
sk               1264 net/ipv6/ip6_output.c 						    sk->sk_allocation);
sk               1269 net/ipv6/ip6_output.c 						   sk->sk_allocation);
sk               1274 net/ipv6/ip6_output.c 						    sk->sk_allocation);
sk               1301 net/ipv6/ip6_output.c 	sock_tx_timestamp(sk, ipc6->sockc.tsflags, &cork->base.tx_flags);
sk               1312 net/ipv6/ip6_output.c static int __ip6_append_data(struct sock *sk,
sk               1351 net/ipv6/ip6_output.c 	    sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)
sk               1352 net/ipv6/ip6_output.c 		tskey = sk->sk_tskey++;
sk               1374 net/ipv6/ip6_output.c 	    (sk->sk_protocol == IPPROTO_UDP ||
sk               1375 net/ipv6/ip6_output.c 	     sk->sk_protocol == IPPROTO_RAW)) {
sk               1376 net/ipv6/ip6_output.c 		ipv6_local_rxpmtu(sk, fl6, mtu - headersize +
sk               1381 net/ipv6/ip6_output.c 	if (ip6_sk_ignore_df(sk))
sk               1389 net/ipv6/ip6_output.c 		ipv6_local_error(sk, EMSGSIZE, fl6, pmtu);
sk               1396 net/ipv6/ip6_output.c 	if (transhdrlen && sk->sk_protocol == IPPROTO_UDP &&
sk               1403 net/ipv6/ip6_output.c 	if (flags & MSG_ZEROCOPY && length && sock_flag(sk, SOCK_ZEROCOPY)) {
sk               1404 net/ipv6/ip6_output.c 		uarg = sock_zerocopy_realloc(sk, length, skb_zcopy(skb));
sk               1511 net/ipv6/ip6_output.c 				skb = sock_alloc_send_skb(sk,
sk               1516 net/ipv6/ip6_output.c 				if (refcount_read(&sk->sk_wmem_alloc) + wmem_alloc_delta <=
sk               1517 net/ipv6/ip6_output.c 				    2 * sk->sk_sndbuf)
sk               1519 net/ipv6/ip6_output.c 							sk->sk_allocation);
sk               1581 net/ipv6/ip6_output.c 				skb->sk = sk;
sk               1606 net/ipv6/ip6_output.c 			if (!sk_page_frag_refill(sk, pfrag))
sk               1642 net/ipv6/ip6_output.c 		refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc);
sk               1651 net/ipv6/ip6_output.c 	IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
sk               1652 net/ipv6/ip6_output.c 	refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc);
sk               1656 net/ipv6/ip6_output.c int ip6_append_data(struct sock *sk,
sk               1663 net/ipv6/ip6_output.c 	struct inet_sock *inet = inet_sk(sk);
sk               1664 net/ipv6/ip6_output.c 	struct ipv6_pinfo *np = inet6_sk(sk);
sk               1670 net/ipv6/ip6_output.c 	if (skb_queue_empty(&sk->sk_write_queue)) {
sk               1674 net/ipv6/ip6_output.c 		err = ip6_setup_cork(sk, &inet->cork, &np->cork,
sk               1687 net/ipv6/ip6_output.c 	return __ip6_append_data(sk, fl6, &sk->sk_write_queue, &inet->cork.base,
sk               1688 net/ipv6/ip6_output.c 				 &np->cork, sk_page_frag(sk), getfrag,
sk               1713 net/ipv6/ip6_output.c struct sk_buff *__ip6_make_skb(struct sock *sk,
sk               1721 net/ipv6/ip6_output.c 	struct ipv6_pinfo *np = inet6_sk(sk);
sk               1722 net/ipv6/ip6_output.c 	struct net *net = sock_net(sk);
sk               1745 net/ipv6/ip6_output.c 		tmp_skb->sk = NULL;
sk               1749 net/ipv6/ip6_output.c 	skb->ignore_df = ip6_sk_ignore_df(sk);
sk               1770 net/ipv6/ip6_output.c 	skb->priority = sk->sk_priority;
sk               1791 net/ipv6/ip6_output.c 	struct net *net = sock_net(skb->sk);
sk               1795 net/ipv6/ip6_output.c 	err = ip6_local_out(net, skb->sk, skb);
sk               1807 net/ipv6/ip6_output.c int ip6_push_pending_frames(struct sock *sk)
sk               1811 net/ipv6/ip6_output.c 	skb = ip6_finish_skb(sk);
sk               1819 net/ipv6/ip6_output.c static void __ip6_flush_pending_frames(struct sock *sk,
sk               1828 net/ipv6/ip6_output.c 			IP6_INC_STATS(sock_net(sk), ip6_dst_idev(skb_dst(skb)),
sk               1836 net/ipv6/ip6_output.c void ip6_flush_pending_frames(struct sock *sk)
sk               1838 net/ipv6/ip6_output.c 	__ip6_flush_pending_frames(sk, &sk->sk_write_queue,
sk               1839 net/ipv6/ip6_output.c 				   &inet_sk(sk)->cork, &inet6_sk(sk)->cork);
sk               1843 net/ipv6/ip6_output.c struct sk_buff *ip6_make_skb(struct sock *sk,
sk               1866 net/ipv6/ip6_output.c 	err = ip6_setup_cork(sk, cork, &v6_cork, ipc6, rt, fl6);
sk               1872 net/ipv6/ip6_output.c 		ipc6->dontfrag = inet6_sk(sk)->dontfrag;
sk               1874 net/ipv6/ip6_output.c 	err = __ip6_append_data(sk, fl6, &queue, &cork->base, &v6_cork,
sk               1879 net/ipv6/ip6_output.c 		__ip6_flush_pending_frames(sk, &queue, cork, &v6_cork);
sk               1883 net/ipv6/ip6_output.c 	return __ip6_make_skb(sk, &queue, cork, &v6_cork);
sk               1168 net/ipv6/ip6_tunnel.c 		if (skb->sk)
sk               1169 net/ipv6/ip6_tunnel.c 			skb_set_owner_w(new_skb, skb->sk);
sk                 65 net/ipv6/ip6_udp_tunnel.c 	udp_set_no_check6_tx(sock->sk, !cfg->use_udp6_tx_checksums);
sk                 66 net/ipv6/ip6_udp_tunnel.c 	udp_set_no_check6_rx(sock->sk, !cfg->use_udp6_rx_checksums);
sk                 81 net/ipv6/ip6_udp_tunnel.c int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk,
sk                114 net/ipv6/ip6_udp_tunnel.c 	ip6tunnel_xmit(sk, skb, dev);
sk                529 net/ipv6/ip6_vti.c 	err = dst_output(t->net, skb->sk, skb);
sk               1542 net/ipv6/ip6mr.c static int ip6mr_sk_init(struct mr_table *mrt, struct sock *sk)
sk               1545 net/ipv6/ip6mr.c 	struct net *net = sock_net(sk);
sk               1552 net/ipv6/ip6mr.c 		rcu_assign_pointer(mrt->mroute_sk, sk);
sk               1553 net/ipv6/ip6mr.c 		sock_set_flag(sk, SOCK_RCU_FREE);
sk               1568 net/ipv6/ip6mr.c int ip6mr_sk_done(struct sock *sk)
sk               1571 net/ipv6/ip6mr.c 	struct net *net = sock_net(sk);
sk               1574 net/ipv6/ip6mr.c 	if (sk->sk_type != SOCK_RAW ||
sk               1575 net/ipv6/ip6mr.c 	    inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
sk               1580 net/ipv6/ip6mr.c 		if (sk == rtnl_dereference(mrt->mroute_sk)) {
sk               1627 net/ipv6/ip6mr.c int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen)
sk               1633 net/ipv6/ip6mr.c 	struct net *net = sock_net(sk);
sk               1636 net/ipv6/ip6mr.c 	if (sk->sk_type != SOCK_RAW ||
sk               1637 net/ipv6/ip6mr.c 	    inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
sk               1640 net/ipv6/ip6mr.c 	mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
sk               1645 net/ipv6/ip6mr.c 		if (sk != rcu_access_pointer(mrt->mroute_sk) &&
sk               1655 net/ipv6/ip6mr.c 		return ip6mr_sk_init(mrt, sk);
sk               1658 net/ipv6/ip6mr.c 		return ip6mr_sk_done(sk);
sk               1669 net/ipv6/ip6mr.c 			       sk == rtnl_dereference(mrt->mroute_sk));
sk               1704 net/ipv6/ip6mr.c 					    sk ==
sk               1772 net/ipv6/ip6mr.c 		if (sk == rcu_access_pointer(mrt->mroute_sk))
sk               1781 net/ipv6/ip6mr.c 			raw6_sk(sk)->ip6mr_table = v;
sk               1799 net/ipv6/ip6mr.c int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval,
sk               1804 net/ipv6/ip6mr.c 	struct net *net = sock_net(sk);
sk               1807 net/ipv6/ip6mr.c 	if (sk->sk_type != SOCK_RAW ||
sk               1808 net/ipv6/ip6mr.c 	    inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
sk               1811 net/ipv6/ip6mr.c 	mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
sk               1849 net/ipv6/ip6mr.c int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
sk               1855 net/ipv6/ip6mr.c 	struct net *net = sock_net(sk);
sk               1858 net/ipv6/ip6mr.c 	mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
sk               1924 net/ipv6/ip6mr.c int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
sk               1930 net/ipv6/ip6mr.c 	struct net *net = sock_net(sk);
sk               1933 net/ipv6/ip6mr.c 	mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
sk               1983 net/ipv6/ip6mr.c static inline int ip6mr_forward2_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
sk               1989 net/ipv6/ip6mr.c 	return dst_output(net, sk, skb);
sk               2490 net/ipv6/ip6mr.c 		err = ip_valid_fib_dump_req(sock_net(skb->sk), nlh,
sk               2499 net/ipv6/ip6mr.c 		mrt = ip6mr_get_table(sock_net(skb->sk), filter.table_id);
sk                 58 net/ipv6/ipv6_sockglue.c int ip6_ra_control(struct sock *sk, int sel)
sk                 63 net/ipv6/ipv6_sockglue.c 	if (sk->sk_type != SOCK_RAW || inet_sk(sk)->inet_num != IPPROTO_RAW)
sk                 72 net/ipv6/ipv6_sockglue.c 		if (ra->sk == sk) {
sk                 82 net/ipv6/ipv6_sockglue.c 			sock_put(sk);
sk                 91 net/ipv6/ipv6_sockglue.c 	new_ra->sk = sk;
sk                 95 net/ipv6/ipv6_sockglue.c 	sock_hold(sk);
sk                100 net/ipv6/ipv6_sockglue.c struct ipv6_txoptions *ipv6_update_options(struct sock *sk,
sk                103 net/ipv6/ipv6_sockglue.c 	if (inet_sk(sk)->is_icsk) {
sk                105 net/ipv6/ipv6_sockglue.c 		    !((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) &&
sk                106 net/ipv6/ipv6_sockglue.c 		    inet_sk(sk)->inet_daddr != LOOPBACK4_IPV6) {
sk                107 net/ipv6/ipv6_sockglue.c 			struct inet_connection_sock *icsk = inet_csk(sk);
sk                109 net/ipv6/ipv6_sockglue.c 			icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie);
sk                112 net/ipv6/ipv6_sockglue.c 	opt = xchg((__force struct ipv6_txoptions **)&inet6_sk(sk)->opt,
sk                114 net/ipv6/ipv6_sockglue.c 	sk_dst_reset(sk);
sk                139 net/ipv6/ipv6_sockglue.c static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
sk                142 net/ipv6/ipv6_sockglue.c 	struct ipv6_pinfo *np = inet6_sk(sk);
sk                143 net/ipv6/ipv6_sockglue.c 	struct net *net = sock_net(sk);
sk                161 net/ipv6/ipv6_sockglue.c 		return ip6_mroute_setsockopt(sk, optname, optval, optlen);
sk                165 net/ipv6/ipv6_sockglue.c 	lock_sock(sk);
sk                176 net/ipv6/ipv6_sockglue.c 			if (sk->sk_type == SOCK_RAW)
sk                179 net/ipv6/ipv6_sockglue.c 			if (sk->sk_protocol == IPPROTO_UDP ||
sk                180 net/ipv6/ipv6_sockglue.c 			    sk->sk_protocol == IPPROTO_UDPLITE) {
sk                181 net/ipv6/ipv6_sockglue.c 				struct udp_sock *up = udp_sk(sk);
sk                186 net/ipv6/ipv6_sockglue.c 			} else if (sk->sk_protocol == IPPROTO_TCP) {
sk                187 net/ipv6/ipv6_sockglue.c 				if (sk->sk_prot != &tcpv6_prot) {
sk                195 net/ipv6/ipv6_sockglue.c 			if (sk->sk_state != TCP_ESTABLISHED) {
sk                200 net/ipv6/ipv6_sockglue.c 			if (ipv6_only_sock(sk) ||
sk                201 net/ipv6/ipv6_sockglue.c 			    !ipv6_addr_v4mapped(&sk->sk_v6_daddr)) {
sk                206 net/ipv6/ipv6_sockglue.c 			fl6_free_socklist(sk);
sk                207 net/ipv6/ipv6_sockglue.c 			__ipv6_sock_mc_close(sk);
sk                214 net/ipv6/ipv6_sockglue.c 			sk_refcnt_debug_dec(sk);
sk                216 net/ipv6/ipv6_sockglue.c 			if (sk->sk_protocol == IPPROTO_TCP) {
sk                217 net/ipv6/ipv6_sockglue.c 				struct inet_connection_sock *icsk = inet_csk(sk);
sk                219 net/ipv6/ipv6_sockglue.c 				sock_prot_inuse_add(net, sk->sk_prot, -1);
sk                222 net/ipv6/ipv6_sockglue.c 				sk->sk_prot = &tcp_prot;
sk                224 net/ipv6/ipv6_sockglue.c 				sk->sk_socket->ops = &inet_stream_ops;
sk                225 net/ipv6/ipv6_sockglue.c 				sk->sk_family = PF_INET;
sk                226 net/ipv6/ipv6_sockglue.c 				tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
sk                230 net/ipv6/ipv6_sockglue.c 				if (sk->sk_protocol == IPPROTO_UDPLITE)
sk                233 net/ipv6/ipv6_sockglue.c 				sock_prot_inuse_add(net, sk->sk_prot, -1);
sk                236 net/ipv6/ipv6_sockglue.c 				sk->sk_prot = prot;
sk                237 net/ipv6/ipv6_sockglue.c 				sk->sk_socket->ops = &inet_dgram_ops;
sk                238 net/ipv6/ipv6_sockglue.c 				sk->sk_family = PF_INET;
sk                243 net/ipv6/ipv6_sockglue.c 				atomic_sub(opt->tot_len, &sk->sk_omem_alloc);
sk                253 net/ipv6/ipv6_sockglue.c 			sk_refcnt_debug_inc(sk);
sk                262 net/ipv6/ipv6_sockglue.c 		    inet_sk(sk)->inet_num)
sk                264 net/ipv6/ipv6_sockglue.c 		sk->sk_ipv6only = valbool;
sk                380 net/ipv6/ipv6_sockglue.c 		inet_sk(sk)->transparent = valbool;
sk                388 net/ipv6/ipv6_sockglue.c 		inet_sk(sk)->freebind = valbool;
sk                435 net/ipv6/ipv6_sockglue.c 						lockdep_sock_is_held(sk));
sk                436 net/ipv6/ipv6_sockglue.c 		opt = ipv6_renew_options(sk, opt, optname, new);
sk                471 net/ipv6/ipv6_sockglue.c 		opt = ipv6_update_options(sk, opt);
sk                474 net/ipv6/ipv6_sockglue.c 			atomic_sub(opt->tot_len, &sk->sk_omem_alloc);
sk                493 net/ipv6/ipv6_sockglue.c 		if (!sk_dev_equal_l3scope(sk, pkt.ipi6_ifindex))
sk                510 net/ipv6/ipv6_sockglue.c 		fl6.flowi6_oif = sk->sk_bound_dev_if;
sk                511 net/ipv6/ipv6_sockglue.c 		fl6.flowi6_mark = sk->sk_mark;
sk                523 net/ipv6/ipv6_sockglue.c 		opt = sock_kmalloc(sk, sizeof(*opt) + optlen, GFP_KERNEL);
sk                539 net/ipv6/ipv6_sockglue.c 		retv = ip6_datagram_send_ctl(net, sk, &msg, &fl6, &ipc6);
sk                544 net/ipv6/ipv6_sockglue.c 		opt = ipv6_update_options(sk, opt);
sk                547 net/ipv6/ipv6_sockglue.c 			atomic_sub(opt->tot_len, &sk->sk_omem_alloc);
sk                562 net/ipv6/ipv6_sockglue.c 		if (sk->sk_type == SOCK_STREAM)
sk                603 net/ipv6/ipv6_sockglue.c 		if (sk->sk_bound_dev_if)
sk                612 net/ipv6/ipv6_sockglue.c 		if (sk->sk_type == SOCK_STREAM)
sk                633 net/ipv6/ipv6_sockglue.c 			if (sk->sk_bound_dev_if &&
sk                634 net/ipv6/ipv6_sockglue.c 			    sk->sk_bound_dev_if != val &&
sk                635 net/ipv6/ipv6_sockglue.c 			    (!midx || midx != sk->sk_bound_dev_if))
sk                650 net/ipv6/ipv6_sockglue.c 		if (inet_sk(sk)->is_icsk)
sk                658 net/ipv6/ipv6_sockglue.c 			retv = ipv6_sock_mc_join(sk, mreq.ipv6mr_ifindex, &mreq.ipv6mr_multiaddr);
sk                660 net/ipv6/ipv6_sockglue.c 			retv = ipv6_sock_mc_drop(sk, mreq.ipv6mr_ifindex, &mreq.ipv6mr_multiaddr);
sk                676 net/ipv6/ipv6_sockglue.c 			retv = ipv6_sock_ac_join(sk, mreq.ipv6mr_ifindex, &mreq.ipv6mr_acaddr);
sk                678 net/ipv6/ipv6_sockglue.c 			retv = ipv6_sock_ac_drop(sk, mreq.ipv6mr_ifindex, &mreq.ipv6mr_acaddr);
sk                706 net/ipv6/ipv6_sockglue.c 			retv = ipv6_sock_mc_join(sk, greq.gr_interface,
sk                709 net/ipv6/ipv6_sockglue.c 			retv = ipv6_sock_mc_drop(sk, greq.gr_interface,
sk                742 net/ipv6/ipv6_sockglue.c 			retv = ipv6_sock_mc_join_ssm(sk, greqs.gsr_interface,
sk                754 net/ipv6/ipv6_sockglue.c 		retv = ip6_mc_source(add, omode, sk, &greqs);
sk                784 net/ipv6/ipv6_sockglue.c 		retv = ip6_mc_msfilter(sk, gsf);
sk                792 net/ipv6/ipv6_sockglue.c 		retv = ip6_ra_control(sk, val);
sk                821 net/ipv6/ipv6_sockglue.c 			skb_queue_purge(&sk->sk_error_queue);
sk                831 net/ipv6/ipv6_sockglue.c 		retv = ipv6_flowlabel_opt(sk, optval, optlen);
sk                838 net/ipv6/ipv6_sockglue.c 		retv = xfrm_user_policy(sk, optname, optval, optlen);
sk                926 net/ipv6/ipv6_sockglue.c 	release_sock(sk);
sk                933 net/ipv6/ipv6_sockglue.c 	release_sock(sk);
sk                939 net/ipv6/ipv6_sockglue.c int ipv6_setsockopt(struct sock *sk, int level, int optname,
sk                944 net/ipv6/ipv6_sockglue.c 	if (level == SOL_IP && sk->sk_type != SOCK_RAW)
sk                945 net/ipv6/ipv6_sockglue.c 		return udp_prot.setsockopt(sk, level, optname, optval, optlen);
sk                950 net/ipv6/ipv6_sockglue.c 	err = do_ipv6_setsockopt(sk, level, optname, optval, optlen);
sk                955 net/ipv6/ipv6_sockglue.c 		err = nf_setsockopt(sk, PF_INET6, optname, optval, optlen);
sk                962 net/ipv6/ipv6_sockglue.c int compat_ipv6_setsockopt(struct sock *sk, int level, int optname,
sk                967 net/ipv6/ipv6_sockglue.c 	if (level == SOL_IP && sk->sk_type != SOCK_RAW) {
sk                969 net/ipv6/ipv6_sockglue.c 			return udp_prot.compat_setsockopt(sk, level, optname,
sk                971 net/ipv6/ipv6_sockglue.c 		return udp_prot.setsockopt(sk, level, optname, optval, optlen);
sk                978 net/ipv6/ipv6_sockglue.c 		return compat_mc_setsockopt(sk, level, optname, optval, optlen,
sk                981 net/ipv6/ipv6_sockglue.c 	err = do_ipv6_setsockopt(sk, level, optname, optval, optlen);
sk                986 net/ipv6/ipv6_sockglue.c 		err = compat_nf_setsockopt(sk, PF_INET6, optname, optval,
sk                994 net/ipv6/ipv6_sockglue.c static int ipv6_getsockopt_sticky(struct sock *sk, struct ipv6_txoptions *opt,
sk               1028 net/ipv6/ipv6_sockglue.c static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
sk               1031 net/ipv6/ipv6_sockglue.c 	struct ipv6_pinfo *np = inet6_sk(sk);
sk               1036 net/ipv6/ipv6_sockglue.c 		return ip6_mroute_getsockopt(sk, optname, optval, optlen);
sk               1042 net/ipv6/ipv6_sockglue.c 		if (sk->sk_protocol != IPPROTO_UDP &&
sk               1043 net/ipv6/ipv6_sockglue.c 		    sk->sk_protocol != IPPROTO_UDPLITE &&
sk               1044 net/ipv6/ipv6_sockglue.c 		    sk->sk_protocol != IPPROTO_TCP)
sk               1046 net/ipv6/ipv6_sockglue.c 		if (sk->sk_state != TCP_ESTABLISHED)
sk               1048 net/ipv6/ipv6_sockglue.c 		val = sk->sk_family;
sk               1061 net/ipv6/ipv6_sockglue.c 		lock_sock(sk);
sk               1062 net/ipv6/ipv6_sockglue.c 		err = ip6_mc_msfget(sk, &gsf,
sk               1064 net/ipv6/ipv6_sockglue.c 		release_sock(sk);
sk               1073 net/ipv6/ipv6_sockglue.c 		if (sk->sk_type != SOCK_STREAM)
sk               1080 net/ipv6/ipv6_sockglue.c 		lock_sock(sk);
sk               1083 net/ipv6/ipv6_sockglue.c 			ip6_datagram_recv_ctl(sk, &msg, skb);
sk               1084 net/ipv6/ipv6_sockglue.c 		release_sock(sk);
sk               1090 net/ipv6/ipv6_sockglue.c 				src_info.ipi6_addr = np->mcast_oif ? sk->sk_v6_daddr : np->sticky_pktinfo.ipi6_addr;
sk               1106 net/ipv6/ipv6_sockglue.c 				src_info.ipi6_addr = np->mcast_oif ? sk->sk_v6_daddr :
sk               1129 net/ipv6/ipv6_sockglue.c 		dst = __sk_dst_get(sk);
sk               1139 net/ipv6/ipv6_sockglue.c 		val = sk->sk_ipv6only;
sk               1173 net/ipv6/ipv6_sockglue.c 		lock_sock(sk);
sk               1175 net/ipv6/ipv6_sockglue.c 						lockdep_sock_is_held(sk));
sk               1176 net/ipv6/ipv6_sockglue.c 		len = ipv6_getsockopt_sticky(sk, opt, optname, optval, len);
sk               1177 net/ipv6/ipv6_sockglue.c 		release_sock(sk);
sk               1228 net/ipv6/ipv6_sockglue.c 		dst = __sk_dst_get(sk);
sk               1244 net/ipv6/ipv6_sockglue.c 		val = inet_sk(sk)->transparent;
sk               1248 net/ipv6/ipv6_sockglue.c 		val = inet_sk(sk)->freebind;
sk               1267 net/ipv6/ipv6_sockglue.c 			dst = __sk_dst_get(sk);
sk               1274 net/ipv6/ipv6_sockglue.c 			val = sock_net(sk)->ipv6.devconf_all->hop_limit;
sk               1325 net/ipv6/ipv6_sockglue.c 		val = ipv6_flowlabel_opt_get(sk, &freq, flags);
sk               1364 net/ipv6/ipv6_sockglue.c 		val = ip6_autoflowlabel(sock_net(sk), np);
sk               1386 net/ipv6/ipv6_sockglue.c int ipv6_getsockopt(struct sock *sk, int level, int optname,
sk               1391 net/ipv6/ipv6_sockglue.c 	if (level == SOL_IP && sk->sk_type != SOCK_RAW)
sk               1392 net/ipv6/ipv6_sockglue.c 		return udp_prot.getsockopt(sk, level, optname, optval, optlen);
sk               1397 net/ipv6/ipv6_sockglue.c 	err = do_ipv6_getsockopt(sk, level, optname, optval, optlen, 0);
sk               1406 net/ipv6/ipv6_sockglue.c 		err = nf_getsockopt(sk, PF_INET6, optname, optval, &len);
sk               1416 net/ipv6/ipv6_sockglue.c int compat_ipv6_getsockopt(struct sock *sk, int level, int optname,
sk               1421 net/ipv6/ipv6_sockglue.c 	if (level == SOL_IP && sk->sk_type != SOCK_RAW) {
sk               1423 net/ipv6/ipv6_sockglue.c 			return udp_prot.compat_getsockopt(sk, level, optname,
sk               1425 net/ipv6/ipv6_sockglue.c 		return udp_prot.getsockopt(sk, level, optname, optval, optlen);
sk               1432 net/ipv6/ipv6_sockglue.c 		return compat_mc_getsockopt(sk, level, optname, optval, optlen,
sk               1435 net/ipv6/ipv6_sockglue.c 	err = do_ipv6_getsockopt(sk, level, optname, optval, optlen,
sk               1445 net/ipv6/ipv6_sockglue.c 		err = compat_nf_getsockopt(sk, PF_INET6, optname, optval, &len);
sk                 92 net/ipv6/mcast.c static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
sk                133 net/ipv6/mcast.c static int __ipv6_sock_mc_join(struct sock *sk, int ifindex,
sk                138 net/ipv6/mcast.c 	struct ipv6_pinfo *np = inet6_sk(sk);
sk                139 net/ipv6/mcast.c 	struct net *net = sock_net(sk);
sk                157 net/ipv6/mcast.c 	mc_lst = sock_kmalloc(sk, sizeof(struct ipv6_mc_socklist), GFP_KERNEL);
sk                176 net/ipv6/mcast.c 		sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
sk                192 net/ipv6/mcast.c 		sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
sk                202 net/ipv6/mcast.c int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
sk                204 net/ipv6/mcast.c 	return __ipv6_sock_mc_join(sk, ifindex, addr, MCAST_EXCLUDE);
sk                208 net/ipv6/mcast.c int ipv6_sock_mc_join_ssm(struct sock *sk, int ifindex,
sk                211 net/ipv6/mcast.c 	return __ipv6_sock_mc_join(sk, ifindex, addr, mode);
sk                217 net/ipv6/mcast.c int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
sk                219 net/ipv6/mcast.c 	struct ipv6_pinfo *np = inet6_sk(sk);
sk                222 net/ipv6/mcast.c 	struct net *net = sock_net(sk);
sk                242 net/ipv6/mcast.c 				(void) ip6_mc_leave_src(sk, mc_lst, idev);
sk                246 net/ipv6/mcast.c 				(void) ip6_mc_leave_src(sk, mc_lst, NULL);
sk                248 net/ipv6/mcast.c 			atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc);
sk                289 net/ipv6/mcast.c void __ipv6_sock_mc_close(struct sock *sk)
sk                291 net/ipv6/mcast.c 	struct ipv6_pinfo *np = inet6_sk(sk);
sk                293 net/ipv6/mcast.c 	struct net *net = sock_net(sk);
sk                306 net/ipv6/mcast.c 			(void) ip6_mc_leave_src(sk, mc_lst, idev);
sk                310 net/ipv6/mcast.c 			(void) ip6_mc_leave_src(sk, mc_lst, NULL);
sk                312 net/ipv6/mcast.c 		atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc);
sk                317 net/ipv6/mcast.c void ipv6_sock_mc_close(struct sock *sk)
sk                319 net/ipv6/mcast.c 	struct ipv6_pinfo *np = inet6_sk(sk);
sk                324 net/ipv6/mcast.c 	__ipv6_sock_mc_close(sk);
sk                328 net/ipv6/mcast.c int ip6_mc_source(int add, int omode, struct sock *sk,
sk                334 net/ipv6/mcast.c 	struct ipv6_pinfo *inet6 = inet6_sk(sk);
sk                336 net/ipv6/mcast.c 	struct net *net = sock_net(sk);
sk                423 net/ipv6/mcast.c 		newpsl = sock_kmalloc(sk, IP6_SFLSIZE(count), GFP_ATOMIC);
sk                433 net/ipv6/mcast.c 			sock_kfree_s(sk, psl, IP6_SFLSIZE(psl->sl_max));
sk                456 net/ipv6/mcast.c 		err = ipv6_sock_mc_drop(sk, pgsr->gsr_interface, group);
sk                460 net/ipv6/mcast.c int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf)
sk                465 net/ipv6/mcast.c 	struct ipv6_pinfo *inet6 = inet6_sk(sk);
sk                467 net/ipv6/mcast.c 	struct net *net = sock_net(sk);
sk                505 net/ipv6/mcast.c 		newpsl = sock_kmalloc(sk, IP6_SFLSIZE(gsf->gf_numsrc),
sk                521 net/ipv6/mcast.c 			sock_kfree_s(sk, newpsl, IP6_SFLSIZE(newpsl->sl_max));
sk                534 net/ipv6/mcast.c 		sock_kfree_s(sk, psl, IP6_SFLSIZE(psl->sl_max));
sk                545 net/ipv6/mcast.c 		err = ipv6_sock_mc_drop(sk, gsf->gf_interface, group);
sk                549 net/ipv6/mcast.c int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
sk                556 net/ipv6/mcast.c 	struct ipv6_pinfo *inet6 = inet6_sk(sk);
sk                558 net/ipv6/mcast.c 	struct net *net = sock_net(sk);
sk                620 net/ipv6/mcast.c bool inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr,
sk                623 net/ipv6/mcast.c 	struct ipv6_pinfo *np = inet6_sk(sk);
sk               1564 net/ipv6/mcast.c static void ip6_mc_hdr(struct sock *sk, struct sk_buff *skb,
sk               1583 net/ipv6/mcast.c 	hdr->hop_limit = inet6_sk(sk)->hop_limit;
sk               1593 net/ipv6/mcast.c 	struct sock *sk = net->ipv6.igmp_sk;
sk               1609 net/ipv6/mcast.c 	skb = sock_alloc_send_skb(sk, size, 1, &err);
sk               1627 net/ipv6/mcast.c 	ip6_mc_hdr(sk, skb, dev, saddr, &mld2_all_mcr, NEXTHDR_HOP, 0);
sk               1984 net/ipv6/mcast.c 	struct sock *sk = net->ipv6.igmp_sk;
sk               2013 net/ipv6/mcast.c 	skb = sock_alloc_send_skb(sk, hlen + tlen + full_len, 1, &err);
sk               2034 net/ipv6/mcast.c 	ip6_mc_hdr(sk, skb, dev, saddr, snd_addr, NEXTHDR_HOP, payload_len);
sk               2049 net/ipv6/mcast.c 	icmpv6_flow_init(sk, &fl6, type,
sk               2060 net/ipv6/mcast.c 		      net, sk, skb, NULL, skb->dev,
sk               2433 net/ipv6/mcast.c static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
sk               2445 net/ipv6/mcast.c 		sock_kfree_s(sk, iml->sflist, IP6_SFLSIZE(iml->sflist->sl_max));
sk                 74 net/ipv6/mip6.c static int mip6_mh_filter(struct sock *sk, struct sk_buff *skb)
sk                414 net/ipv6/ndisc.c 	struct sock *sk = dev_net(dev)->ipv6.ndisc_sk;
sk                433 net/ipv6/ndisc.c 	skb_set_owner_w(skb, sk);
sk                472 net/ipv6/ndisc.c 	struct sock *sk = net->ipv6.ndisc_sk;
sk                484 net/ipv6/ndisc.c 		icmpv6_flow_init(sk, &fl6, type, saddr, daddr, oif);
sk                499 net/ipv6/ndisc.c 	ip6_nd_hdr(skb, saddr, daddr, inet6_sk(sk)->hop_limit, skb->len);
sk                506 net/ipv6/ndisc.c 		      net, sk, skb, NULL, dst->dev,
sk               1580 net/ipv6/ndisc.c 	struct sock *sk = net->ipv6.ndisc_sk;
sk               1613 net/ipv6/ndisc.c 	icmpv6_flow_init(sk, &fl6, NDISC_REDIRECT,
sk               1878 net/ipv6/ndisc.c 	struct sock *sk;
sk               1881 net/ipv6/ndisc.c 	err = inet_ctl_sock_create(&sk, PF_INET6,
sk               1890 net/ipv6/ndisc.c 	net->ipv6.ndisc_sk = sk;
sk               1892 net/ipv6/ndisc.c 	np = inet6_sk(sk);
sk                 26 net/ipv6/netfilter.c 	struct sock *sk = sk_to_full_sk(skb->sk);
sk                 32 net/ipv6/netfilter.c 		.flowi6_oif = sk && sk->sk_bound_dev_if ? sk->sk_bound_dev_if :
sk                 35 net/ipv6/netfilter.c 		.flowi6_uid = sock_net_uid(net, sk),
sk                 41 net/ipv6/netfilter.c 	dst = ip6_route_output(net, sk, &fl6);
sk                 59 net/ipv6/netfilter.c 		dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), sk, 0);
sk                 98 net/ipv6/netfilter.c 		.sk.sk_bound_dev_if = 1,
sk                101 net/ipv6/netfilter.c 	const void *sk = strict ? &fake_sk : NULL;
sk                105 net/ipv6/netfilter.c 	result = ip6_route_output(net, sk, &fl->u.ip6);
sk                115 net/ipv6/netfilter.c int br_ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
sk                117 net/ipv6/netfilter.c 		    int (*output)(struct net *, struct sock *sk,
sk                188 net/ipv6/netfilter.c 			err = output(net, sk, data, skb);
sk                221 net/ipv6/netfilter.c 		err = output(net, sk, data, skb2);
sk               1547 net/ipv6/netfilter/ip6_tables.c compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
sk               1552 net/ipv6/netfilter/ip6_tables.c 	if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
sk               1557 net/ipv6/netfilter/ip6_tables.c 		ret = compat_do_replace(sock_net(sk), user, len);
sk               1561 net/ipv6/netfilter/ip6_tables.c 		ret = do_add_counters(sock_net(sk), user, len, 1);
sk               1650 net/ipv6/netfilter/ip6_tables.c compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
sk               1654 net/ipv6/netfilter/ip6_tables.c 	if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
sk               1659 net/ipv6/netfilter/ip6_tables.c 		ret = get_info(sock_net(sk), user, len, 1);
sk               1662 net/ipv6/netfilter/ip6_tables.c 		ret = compat_get_entries(sock_net(sk), user, len);
sk               1665 net/ipv6/netfilter/ip6_tables.c 		ret = do_ip6t_get_ctl(sk, cmd, user, len);
sk               1672 net/ipv6/netfilter/ip6_tables.c do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
sk               1676 net/ipv6/netfilter/ip6_tables.c 	if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
sk               1681 net/ipv6/netfilter/ip6_tables.c 		ret = do_replace(sock_net(sk), user, len);
sk               1685 net/ipv6/netfilter/ip6_tables.c 		ret = do_add_counters(sock_net(sk), user, len, 0);
sk               1696 net/ipv6/netfilter/ip6_tables.c do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
sk               1700 net/ipv6/netfilter/ip6_tables.c 	if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
sk               1705 net/ipv6/netfilter/ip6_tables.c 		ret = get_info(sock_net(sk), user, len, 0);
sk               1709 net/ipv6/netfilter/ip6_tables.c 		ret = get_entries(sock_net(sk), user, len);
sk                 67 net/ipv6/netfilter/nf_dup_ipv6.c 		ip6_local_out(net, skb->sk, skb);
sk                278 net/ipv6/netfilter/nf_log_ipv6.c 		nf_log_dump_sk_uid_gid(net, m, skb->sk);
sk                213 net/ipv6/netfilter/nf_reject_ipv6.c 		ip6_local_out(net, nskb->sk, nskb);
sk                 43 net/ipv6/netfilter/nf_tproxy_ipv6.c 			 struct sock *sk)
sk                 50 net/ipv6/netfilter/nf_tproxy_ipv6.c 		inet_twsk_put(inet_twsk(sk));
sk                 66 net/ipv6/netfilter/nf_tproxy_ipv6.c 			inet_twsk_deschedule_put(inet_twsk(sk));
sk                 67 net/ipv6/netfilter/nf_tproxy_ipv6.c 			sk = sk2;
sk                 71 net/ipv6/netfilter/nf_tproxy_ipv6.c 	return sk;
sk                 83 net/ipv6/netfilter/nf_tproxy_ipv6.c 	struct sock *sk;
sk                 96 net/ipv6/netfilter/nf_tproxy_ipv6.c 			sk = inet6_lookup_listener(net, &tcp_hashinfo, skb,
sk                102 net/ipv6/netfilter/nf_tproxy_ipv6.c 			if (sk && !refcount_inc_not_zero(&sk->sk_refcnt))
sk                103 net/ipv6/netfilter/nf_tproxy_ipv6.c 				sk = NULL;
sk                111 net/ipv6/netfilter/nf_tproxy_ipv6.c 			sk = __inet6_lookup_established(net, &tcp_hashinfo,
sk                121 net/ipv6/netfilter/nf_tproxy_ipv6.c 		sk = udp6_lib_lookup(net, saddr, sport, daddr, dport,
sk                123 net/ipv6/netfilter/nf_tproxy_ipv6.c 		if (sk) {
sk                124 net/ipv6/netfilter/nf_tproxy_ipv6.c 			int connected = (sk->sk_state == TCP_ESTABLISHED);
sk                125 net/ipv6/netfilter/nf_tproxy_ipv6.c 			int wildcard = ipv6_addr_any(&sk->sk_v6_rcv_saddr);
sk                134 net/ipv6/netfilter/nf_tproxy_ipv6.c 				sock_put(sk);
sk                135 net/ipv6/netfilter/nf_tproxy_ipv6.c 				sk = NULL;
sk                141 net/ipv6/netfilter/nf_tproxy_ipv6.c 		sk = NULL;
sk                145 net/ipv6/netfilter/nf_tproxy_ipv6.c 		 protocol, saddr, ntohs(sport), daddr, ntohs(dport), lookup_type, sk);
sk                147 net/ipv6/netfilter/nf_tproxy_ipv6.c 	return sk;
sk                148 net/ipv6/output_core.c int __ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
sk                161 net/ipv6/output_core.c 	skb = l3mdev_ip6_out(sk, skb);
sk                168 net/ipv6/output_core.c 		       net, sk, skb, NULL, skb_dst(skb)->dev,
sk                173 net/ipv6/output_core.c int ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
sk                177 net/ipv6/output_core.c 	err = __ip6_local_out(net, sk, skb);
sk                179 net/ipv6/output_core.c 		err = dst_output(net, sk, skb);
sk                 26 net/ipv6/ping.c static int dummy_ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len,
sk                 31 net/ipv6/ping.c static void dummy_ip6_datagram_recv_ctl(struct sock *sk, struct msghdr *msg,
sk                 39 net/ipv6/ping.c static void dummy_ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
sk                 47 net/ipv6/ping.c static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
sk                 49 net/ipv6/ping.c 	struct inet_sock *inet = inet_sk(sk);
sk                 50 net/ipv6/ping.c 	struct ipv6_pinfo *np = inet6_sk(sk);
sk                 80 net/ipv6/ping.c 		if (sk->sk_state != TCP_ESTABLISHED)
sk                 82 net/ipv6/ping.c 		daddr = &sk->sk_v6_daddr;
sk                 86 net/ipv6/ping.c 		oif = sk->sk_bound_dev_if;
sk                 99 net/ipv6/ping.c 	    (oif && sk->sk_bound_dev_if && oif != sk->sk_bound_dev_if))
sk                110 net/ipv6/ping.c 	fl6.flowi6_mark = sk->sk_mark;
sk                111 net/ipv6/ping.c 	fl6.flowi6_uid = sk->sk_uid;
sk                114 net/ipv6/ping.c 	security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
sk                119 net/ipv6/ping.c 	dst = ip6_sk_dst_lookup_flow(sk, &fl6, daddr, false);
sk                140 net/ipv6/ping.c 	lock_sock(sk);
sk                141 net/ipv6/ping.c 	err = ip6_append_data(sk, ping_getfrag, &pfh, len,
sk                146 net/ipv6/ping.c 		ICMP6_INC_STATS(sock_net(sk), rt->rt6i_idev,
sk                148 net/ipv6/ping.c 		ip6_flush_pending_frames(sk);
sk                150 net/ipv6/ping.c 		icmpv6_push_pending_frames(sk, &fl6,
sk                153 net/ipv6/ping.c 	release_sock(sk);
sk                 69 net/ipv6/raw.c struct sock *__raw_v6_lookup(struct net *net, struct sock *sk,
sk                 75 net/ipv6/raw.c 	sk_for_each_from(sk)
sk                 76 net/ipv6/raw.c 		if (inet_sk(sk)->inet_num == num) {
sk                 78 net/ipv6/raw.c 			if (!net_eq(sock_net(sk), net))
sk                 81 net/ipv6/raw.c 			if (!ipv6_addr_any(&sk->sk_v6_daddr) &&
sk                 82 net/ipv6/raw.c 			    !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr))
sk                 85 net/ipv6/raw.c 			if (!raw_sk_bound_dev_eq(net, sk->sk_bound_dev_if,
sk                 89 net/ipv6/raw.c 			if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
sk                 90 net/ipv6/raw.c 				if (ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr))
sk                 93 net/ipv6/raw.c 				    inet6_mc_check(sk, loc_addr, rmt_addr))
sk                 99 net/ipv6/raw.c 	sk = NULL;
sk                101 net/ipv6/raw.c 	return sk;
sk                109 net/ipv6/raw.c static int icmpv6_filter(const struct sock *sk, const struct sk_buff *skb)
sk                120 net/ipv6/raw.c 		const __u32 *data = &raw6_sk(sk)->filter.data[0];
sk                161 net/ipv6/raw.c 	struct sock *sk;
sk                172 net/ipv6/raw.c 	sk = sk_head(&raw_v6_hashinfo.ht[hash]);
sk                174 net/ipv6/raw.c 	if (!sk)
sk                178 net/ipv6/raw.c 	sk = __raw_v6_lookup(net, sk, nexthdr, daddr, saddr,
sk                181 net/ipv6/raw.c 	while (sk) {
sk                187 net/ipv6/raw.c 			filtered = icmpv6_filter(sk, skb);
sk                202 net/ipv6/raw.c 			filtered = filter ? (*filter)(sk, skb) : 0;
sk                219 net/ipv6/raw.c 				rawv6_rcv(sk, clone);
sk                222 net/ipv6/raw.c 		sk = __raw_v6_lookup(net, sk_next(sk), nexthdr, daddr, saddr,
sk                242 net/ipv6/raw.c static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
sk                244 net/ipv6/raw.c 	struct inet_sock *inet = inet_sk(sk);
sk                245 net/ipv6/raw.c 	struct ipv6_pinfo *np = inet6_sk(sk);
sk                263 net/ipv6/raw.c 	lock_sock(sk);
sk                266 net/ipv6/raw.c 	if (sk->sk_state != TCP_CLOSE)
sk                280 net/ipv6/raw.c 				sk->sk_bound_dev_if = addr->sin6_scope_id;
sk                284 net/ipv6/raw.c 			if (!sk->sk_bound_dev_if)
sk                288 net/ipv6/raw.c 		if (sk->sk_bound_dev_if) {
sk                290 net/ipv6/raw.c 			dev = dev_get_by_index_rcu(sock_net(sk),
sk                291 net/ipv6/raw.c 						   sk->sk_bound_dev_if);
sk                301 net/ipv6/raw.c 		    !sock_net(sk)->ipv6.sysctl.ip_nonlocal_bind) {
sk                303 net/ipv6/raw.c 			if (!ipv6_chk_addr(sock_net(sk), &addr->sin6_addr,
sk                311 net/ipv6/raw.c 	sk->sk_v6_rcv_saddr = addr->sin6_addr;
sk                318 net/ipv6/raw.c 	release_sock(sk);
sk                322 net/ipv6/raw.c static void rawv6_err(struct sock *sk, struct sk_buff *skb,
sk                326 net/ipv6/raw.c 	struct inet_sock *inet = inet_sk(sk);
sk                327 net/ipv6/raw.c 	struct ipv6_pinfo *np = inet6_sk(sk);
sk                336 net/ipv6/raw.c 	if (!np->recverr && sk->sk_state != TCP_ESTABLISHED)
sk                341 net/ipv6/raw.c 		ip6_sk_update_pmtu(skb, sk, info);
sk                345 net/ipv6/raw.c 		ip6_sk_redirect(skb, sk);
sk                352 net/ipv6/raw.c 		ipv6_icmp_error(sk, skb, err, 0, ntohl(info), payload);
sk                356 net/ipv6/raw.c 		sk->sk_err = err;
sk                357 net/ipv6/raw.c 		sk->sk_error_report(sk);
sk                364 net/ipv6/raw.c 	struct sock *sk;
sk                372 net/ipv6/raw.c 	sk = sk_head(&raw_v6_hashinfo.ht[hash]);
sk                373 net/ipv6/raw.c 	if (sk) {
sk                380 net/ipv6/raw.c 		while ((sk = __raw_v6_lookup(net, sk, nexthdr, saddr, daddr,
sk                382 net/ipv6/raw.c 			rawv6_err(sk, skb, NULL, type, code,
sk                384 net/ipv6/raw.c 			sk = sk_next(sk);
sk                390 net/ipv6/raw.c static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
sk                392 net/ipv6/raw.c 	if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
sk                394 net/ipv6/raw.c 		atomic_inc(&sk->sk_drops);
sk                401 net/ipv6/raw.c 	if (sock_queue_rcv_skb(sk, skb) < 0) {
sk                416 net/ipv6/raw.c int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
sk                418 net/ipv6/raw.c 	struct inet_sock *inet = inet_sk(sk);
sk                419 net/ipv6/raw.c 	struct raw6_sock *rp = raw6_sk(sk);
sk                421 net/ipv6/raw.c 	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
sk                422 net/ipv6/raw.c 		atomic_inc(&sk->sk_drops);
sk                446 net/ipv6/raw.c 			atomic_inc(&sk->sk_drops);
sk                452 net/ipv6/raw.c 	rawv6_rcv_skb(sk, skb);
sk                462 net/ipv6/raw.c static int rawv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
sk                465 net/ipv6/raw.c 	struct ipv6_pinfo *np = inet6_sk(sk);
sk                475 net/ipv6/raw.c 		return ipv6_recv_error(sk, msg, len, addr_len);
sk                478 net/ipv6/raw.c 		return ipv6_recv_rxpmtu(sk, msg, len, addr_len);
sk                480 net/ipv6/raw.c 	skb = skb_recv_datagram(sk, flags, noblock, &err);
sk                515 net/ipv6/raw.c 	sock_recv_ts_and_drops(msg, sk, skb);
sk                518 net/ipv6/raw.c 		ip6_datagram_recv_ctl(sk, msg, skb);
sk                525 net/ipv6/raw.c 	skb_free_datagram(sk, skb);
sk                530 net/ipv6/raw.c 	skb_kill_datagram(sk, skb, flags);
sk                539 net/ipv6/raw.c static int rawv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
sk                553 net/ipv6/raw.c 	skb = skb_peek(&sk->sk_write_queue);
sk                558 net/ipv6/raw.c 	total_len = inet_sk(sk)->cork.base.length;
sk                561 net/ipv6/raw.c 		ip6_flush_pending_frames(sk);
sk                566 net/ipv6/raw.c 	if (skb_queue_len(&sk->sk_write_queue) == 1) {
sk                575 net/ipv6/raw.c 		skb_queue_walk(&sk->sk_write_queue, skb) {
sk                596 net/ipv6/raw.c 		ip6_flush_pending_frames(sk);
sk                613 net/ipv6/raw.c 	err = ip6_push_pending_frames(sk);
sk                618 net/ipv6/raw.c static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
sk                622 net/ipv6/raw.c 	struct ipv6_pinfo *np = inet6_sk(sk);
sk                623 net/ipv6/raw.c 	struct net *net = sock_net(sk);
sk                632 net/ipv6/raw.c 		ipv6_local_error(sk, EMSGSIZE, fl6, rt->dst.dev->mtu);
sk                640 net/ipv6/raw.c 	skb = sock_alloc_send_skb(sk,
sk                648 net/ipv6/raw.c 	skb->priority = sk->sk_priority;
sk                677 net/ipv6/raw.c 	skb = l3mdev_ip6_out(sk, skb);
sk                687 net/ipv6/raw.c 	err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk, skb,
sk                766 net/ipv6/raw.c static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
sk                772 net/ipv6/raw.c 	struct inet_sock *inet = inet_sk(sk);
sk                773 net/ipv6/raw.c 	struct ipv6_pinfo *np = inet6_sk(sk);
sk                774 net/ipv6/raw.c 	struct raw6_sock *rp = raw6_sk(sk);
sk                808 net/ipv6/raw.c 	fl6.flowi6_mark = sk->sk_mark;
sk                809 net/ipv6/raw.c 	fl6.flowi6_uid = sk->sk_uid;
sk                812 net/ipv6/raw.c 	ipc6.sockc.tsflags = sk->sk_tsflags;
sk                813 net/ipv6/raw.c 	ipc6.sockc.mark = sk->sk_mark;
sk                837 net/ipv6/raw.c 				flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
sk                847 net/ipv6/raw.c 		if (sk->sk_state == TCP_ESTABLISHED &&
sk                848 net/ipv6/raw.c 		    ipv6_addr_equal(daddr, &sk->sk_v6_daddr))
sk                849 net/ipv6/raw.c 			daddr = &sk->sk_v6_daddr;
sk                856 net/ipv6/raw.c 		if (sk->sk_state != TCP_ESTABLISHED)
sk                860 net/ipv6/raw.c 		daddr = &sk->sk_v6_daddr;
sk                865 net/ipv6/raw.c 		fl6.flowi6_oif = sk->sk_bound_dev_if;
sk                873 net/ipv6/raw.c 		err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, &ipc6);
sk                879 net/ipv6/raw.c 			flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
sk                918 net/ipv6/raw.c 	security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
sk                928 net/ipv6/raw.c 	dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
sk                944 net/ipv6/raw.c 		err = rawv6_send_hdrinc(sk, msg, len, &fl6, &dst,
sk                948 net/ipv6/raw.c 		lock_sock(sk);
sk                949 net/ipv6/raw.c 		err = ip6_append_data(sk, raw6_getfrag, &rfv,
sk                954 net/ipv6/raw.c 			ip6_flush_pending_frames(sk);
sk                956 net/ipv6/raw.c 			err = rawv6_push_pending_frames(sk, &fl6, rp);
sk                957 net/ipv6/raw.c 		release_sock(sk);
sk                974 net/ipv6/raw.c static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
sk                981 net/ipv6/raw.c 		if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
sk                991 net/ipv6/raw.c static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
sk               1006 net/ipv6/raw.c 		if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
sk               1017 net/ipv6/raw.c static int do_rawv6_setsockopt(struct sock *sk, int level, int optname,
sk               1020 net/ipv6/raw.c 	struct raw6_sock *rp = raw6_sk(sk);
sk               1028 net/ipv6/raw.c 		if (sk->sk_type != SOCK_RAW)
sk               1030 net/ipv6/raw.c 		inet_sk(sk)->hdrincl = !!val;
sk               1033 net/ipv6/raw.c 		if (inet_sk(sk)->inet_num == IPPROTO_ICMPV6 &&
sk               1064 net/ipv6/raw.c static int rawv6_setsockopt(struct sock *sk, int level, int optname,
sk               1072 net/ipv6/raw.c 		if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
sk               1074 net/ipv6/raw.c 		return rawv6_seticmpfilter(sk, level, optname, optval, optlen);
sk               1081 net/ipv6/raw.c 		return ipv6_setsockopt(sk, level, optname, optval, optlen);
sk               1084 net/ipv6/raw.c 	return do_rawv6_setsockopt(sk, level, optname, optval, optlen);
sk               1088 net/ipv6/raw.c static int compat_rawv6_setsockopt(struct sock *sk, int level, int optname,
sk               1095 net/ipv6/raw.c 		if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
sk               1097 net/ipv6/raw.c 		return rawv6_seticmpfilter(sk, level, optname, optval, optlen);
sk               1104 net/ipv6/raw.c 		return compat_ipv6_setsockopt(sk, level, optname,
sk               1107 net/ipv6/raw.c 	return do_rawv6_setsockopt(sk, level, optname, optval, optlen);
sk               1111 net/ipv6/raw.c static int do_rawv6_getsockopt(struct sock *sk, int level, int optname,
sk               1114 net/ipv6/raw.c 	struct raw6_sock *rp = raw6_sk(sk);
sk               1122 net/ipv6/raw.c 		val = inet_sk(sk)->hdrincl;
sk               1149 net/ipv6/raw.c static int rawv6_getsockopt(struct sock *sk, int level, int optname,
sk               1157 net/ipv6/raw.c 		if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
sk               1159 net/ipv6/raw.c 		return rawv6_geticmpfilter(sk, level, optname, optval, optlen);
sk               1166 net/ipv6/raw.c 		return ipv6_getsockopt(sk, level, optname, optval, optlen);
sk               1169 net/ipv6/raw.c 	return do_rawv6_getsockopt(sk, level, optname, optval, optlen);
sk               1173 net/ipv6/raw.c static int compat_rawv6_getsockopt(struct sock *sk, int level, int optname,
sk               1180 net/ipv6/raw.c 		if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
sk               1182 net/ipv6/raw.c 		return rawv6_geticmpfilter(sk, level, optname, optval, optlen);
sk               1189 net/ipv6/raw.c 		return compat_ipv6_getsockopt(sk, level, optname,
sk               1192 net/ipv6/raw.c 	return do_rawv6_getsockopt(sk, level, optname, optval, optlen);
sk               1196 net/ipv6/raw.c static int rawv6_ioctl(struct sock *sk, int cmd, unsigned long arg)
sk               1200 net/ipv6/raw.c 		int amount = sk_wmem_alloc_get(sk);
sk               1208 net/ipv6/raw.c 		spin_lock_bh(&sk->sk_receive_queue.lock);
sk               1209 net/ipv6/raw.c 		skb = skb_peek(&sk->sk_receive_queue);
sk               1212 net/ipv6/raw.c 		spin_unlock_bh(&sk->sk_receive_queue.lock);
sk               1218 net/ipv6/raw.c 		return ip6mr_ioctl(sk, cmd, (void __user *)arg);
sk               1226 net/ipv6/raw.c static int compat_rawv6_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg)
sk               1234 net/ipv6/raw.c 		return ip6mr_compat_ioctl(sk, cmd, compat_ptr(arg));
sk               1242 net/ipv6/raw.c static void rawv6_close(struct sock *sk, long timeout)
sk               1244 net/ipv6/raw.c 	if (inet_sk(sk)->inet_num == IPPROTO_RAW)
sk               1245 net/ipv6/raw.c 		ip6_ra_control(sk, -1);
sk               1246 net/ipv6/raw.c 	ip6mr_sk_done(sk);
sk               1247 net/ipv6/raw.c 	sk_common_release(sk);
sk               1250 net/ipv6/raw.c static void raw6_destroy(struct sock *sk)
sk               1252 net/ipv6/raw.c 	lock_sock(sk);
sk               1253 net/ipv6/raw.c 	ip6_flush_pending_frames(sk);
sk               1254 net/ipv6/raw.c 	release_sock(sk);
sk               1256 net/ipv6/raw.c 	inet6_destroy_sock(sk);
sk               1259 net/ipv6/raw.c static int rawv6_init_sk(struct sock *sk)
sk               1261 net/ipv6/raw.c 	struct raw6_sock *rp = raw6_sk(sk);
sk               1263 net/ipv6/raw.c 	switch (inet_sk(sk)->inet_num) {
sk                 93 net/ipv6/route.c static int		ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb);
sk                 95 net/ipv6/route.c static int		ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb);
sk                 97 net/ipv6/route.c static void		ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
sk                100 net/ipv6/route.c static void		rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
sk                267 net/ipv6/route.c static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
sk                273 net/ipv6/route.c static void ip6_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
sk               2484 net/ipv6/route.c 					       const struct sock *sk,
sk               2503 net/ipv6/route.c 	if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr) ||
sk               2509 net/ipv6/route.c 	else if (sk)
sk               2510 net/ipv6/route.c 		flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs);
sk               2517 net/ipv6/route.c 					 const struct sock *sk,
sk               2525 net/ipv6/route.c         dst = ip6_route_output_flags_noref(net, sk, fl6, flags);
sk               2724 net/ipv6/route.c static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
sk               2739 net/ipv6/route.c 	} else if (sk) {
sk               2740 net/ipv6/route.c 		daddr = &sk->sk_v6_daddr;
sk               2741 net/ipv6/route.c 		saddr = &inet6_sk(sk)->saddr;
sk               2802 net/ipv6/route.c static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
sk               2806 net/ipv6/route.c 	__ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu,
sk               2831 net/ipv6/route.c void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
sk               2833 net/ipv6/route.c 	int oif = sk->sk_bound_dev_if;
sk               2839 net/ipv6/route.c 	ip6_update_pmtu(skb, sock_net(sk), mtu, oif, sk->sk_mark, sk->sk_uid);
sk               2841 net/ipv6/route.c 	dst = __sk_dst_get(sk);
sk               2843 net/ipv6/route.c 	    dst->ops->check(dst, inet6_sk(sk)->dst_cookie))
sk               2846 net/ipv6/route.c 	bh_lock_sock(sk);
sk               2847 net/ipv6/route.c 	if (!sock_owned_by_user(sk) && !ipv6_addr_v4mapped(&sk->sk_v6_daddr))
sk               2848 net/ipv6/route.c 		ip6_datagram_dst_update(sk, false);
sk               2849 net/ipv6/route.c 	bh_unlock_sock(sk);
sk               2853 net/ipv6/route.c void ip6_sk_dst_store_flow(struct sock *sk, struct dst_entry *dst,
sk               2857 net/ipv6/route.c 	struct ipv6_pinfo *np = inet6_sk(sk);
sk               2860 net/ipv6/route.c 	ip6_dst_store(sk, dst,
sk               2861 net/ipv6/route.c 		      ipv6_addr_equal(&fl6->daddr, &sk->sk_v6_daddr) ?
sk               2862 net/ipv6/route.c 		      &sk->sk_v6_daddr : NULL,
sk               3064 net/ipv6/route.c void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk)
sk               3066 net/ipv6/route.c 	ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark,
sk               3067 net/ipv6/route.c 		     sk->sk_uid);
sk               3976 net/ipv6/route.c static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
sk               4402 net/ipv6/route.c static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
sk               4413 net/ipv6/route.c static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb)
sk               4893 net/ipv6/route.c 		.fc_nlinfo.nl_net = sock_net(skb->sk),
sk               5277 net/ipv6/route.c 	    !nexthop_find_by_id(sock_net(skb->sk), cfg.fc_nh_id)) {
sk               5799 net/ipv6/route.c 	struct net *net = sock_net(in_skb->sk);
sk                265 net/ipv6/seg6.c 	struct net *net = sock_net(cb->skb->sk);
sk                326 net/ipv6/seg6_iptunnel.c static int seg6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
sk                373 net/ipv6/seg6_iptunnel.c 	return dst_output(net, sk, skb);
sk                979 net/ipv6/sit.c 		if (skb->sk)
sk                980 net/ipv6/sit.c 			skb_set_owner_w(new_skb, skb->sk);
sk                128 net/ipv6/syncookies.c struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
sk                133 net/ipv6/syncookies.c 	struct ipv6_pinfo *np = inet6_sk(sk);
sk                134 net/ipv6/syncookies.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                137 net/ipv6/syncookies.c 	struct sock *ret = sk;
sk                144 net/ipv6/syncookies.c 	if (!sock_net(sk)->ipv4.sysctl_tcp_syncookies || !th->ack || th->rst)
sk                147 net/ipv6/syncookies.c 	if (tcp_synq_no_recent_overflow(sk))
sk                152 net/ipv6/syncookies.c 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED);
sk                156 net/ipv6/syncookies.c 	__NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV);
sk                160 net/ipv6/syncookies.c 	tcp_parse_options(sock_net(sk), skb, &tcp_opt, 0, NULL);
sk                163 net/ipv6/syncookies.c 		tsoff = secure_tcpv6_ts_off(sock_net(sk),
sk                169 net/ipv6/syncookies.c 	if (!cookie_timestamp_decode(sock_net(sk), &tcp_opt))
sk                173 net/ipv6/syncookies.c 	req = inet_reqsk_alloc(&tcp6_request_sock_ops, sk, false);
sk                181 net/ipv6/syncookies.c 	if (security_inet_conn_request(sk, skb, req))
sk                189 net/ipv6/syncookies.c 	if (ipv6_opt_accepted(sk, skb, &TCP_SKB_CB(skb)->header.h6) ||
sk                196 net/ipv6/syncookies.c 	ireq->ir_iif = inet_request_bound_dev_if(sk, skb);
sk                198 net/ipv6/syncookies.c 	if (!sk->sk_bound_dev_if &&
sk                202 net/ipv6/syncookies.c 	ireq->ir_mark = inet_request_mark(sk, skb);
sk                234 net/ipv6/syncookies.c 		fl6.fl6_sport = inet_sk(sk)->inet_sport;
sk                235 net/ipv6/syncookies.c 		fl6.flowi6_uid = sk->sk_uid;
sk                238 net/ipv6/syncookies.c 		dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
sk                244 net/ipv6/syncookies.c 	tcp_select_initial_window(sk, tcp_full_space(sk), req->mss,
sk                250 net/ipv6/syncookies.c 	ireq->ecn_ok = cookie_ecn_ok(&tcp_opt, sock_net(sk), dst);
sk                252 net/ipv6/syncookies.c 	ret = tcp_get_cookie_sock(sk, skb, req, dst, tsoff);
sk                 71 net/ipv6/tcp_ipv6.c static void	tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
sk                 72 net/ipv6/tcp_ipv6.c static void	tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
sk                 75 net/ipv6/tcp_ipv6.c static int	tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
sk                 83 net/ipv6/tcp_ipv6.c static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
sk                 95 net/ipv6/tcp_ipv6.c static struct ipv6_pinfo *tcp_inet6_sk(const struct sock *sk)
sk                 99 net/ipv6/tcp_ipv6.c 	return (struct ipv6_pinfo *)(((u8 *)sk) + offset);
sk                102 net/ipv6/tcp_ipv6.c static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
sk                109 net/ipv6/tcp_ipv6.c 		sk->sk_rx_dst = dst;
sk                110 net/ipv6/tcp_ipv6.c 		inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
sk                111 net/ipv6/tcp_ipv6.c 		tcp_inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
sk                129 net/ipv6/tcp_ipv6.c static int tcp_v6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
sk                139 net/ipv6/tcp_ipv6.c 	sock_owned_by_me(sk);
sk                141 net/ipv6/tcp_ipv6.c 	return BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr);
sk                144 net/ipv6/tcp_ipv6.c static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
sk                148 net/ipv6/tcp_ipv6.c 	struct inet_sock *inet = inet_sk(sk);
sk                149 net/ipv6/tcp_ipv6.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk                150 net/ipv6/tcp_ipv6.c 	struct ipv6_pinfo *np = tcp_inet6_sk(sk);
sk                151 net/ipv6/tcp_ipv6.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                158 net/ipv6/tcp_ipv6.c 	struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
sk                173 net/ipv6/tcp_ipv6.c 			flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
sk                185 net/ipv6/tcp_ipv6.c 		if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
sk                203 net/ipv6/tcp_ipv6.c 			if (!sk_dev_equal_l3scope(sk, usin->sin6_scope_id))
sk                206 net/ipv6/tcp_ipv6.c 			sk->sk_bound_dev_if = usin->sin6_scope_id;
sk                210 net/ipv6/tcp_ipv6.c 		if (!sk->sk_bound_dev_if)
sk                215 net/ipv6/tcp_ipv6.c 	    !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
sk                221 net/ipv6/tcp_ipv6.c 	sk->sk_v6_daddr = usin->sin6_addr;
sk                232 net/ipv6/tcp_ipv6.c 		if (__ipv6_only_sock(sk))
sk                240 net/ipv6/tcp_ipv6.c 		sk->sk_backlog_rcv = tcp_v4_do_rcv;
sk                245 net/ipv6/tcp_ipv6.c 		err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
sk                250 net/ipv6/tcp_ipv6.c 			sk->sk_backlog_rcv = tcp_v6_do_rcv;
sk                256 net/ipv6/tcp_ipv6.c 		np->saddr = sk->sk_v6_rcv_saddr;
sk                261 net/ipv6/tcp_ipv6.c 	if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
sk                262 net/ipv6/tcp_ipv6.c 		saddr = &sk->sk_v6_rcv_saddr;
sk                265 net/ipv6/tcp_ipv6.c 	fl6.daddr = sk->sk_v6_daddr;
sk                267 net/ipv6/tcp_ipv6.c 	fl6.flowi6_oif = sk->sk_bound_dev_if;
sk                268 net/ipv6/tcp_ipv6.c 	fl6.flowi6_mark = sk->sk_mark;
sk                271 net/ipv6/tcp_ipv6.c 	fl6.flowi6_uid = sk->sk_uid;
sk                273 net/ipv6/tcp_ipv6.c 	opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
sk                276 net/ipv6/tcp_ipv6.c 	security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
sk                278 net/ipv6/tcp_ipv6.c 	dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
sk                286 net/ipv6/tcp_ipv6.c 		sk->sk_v6_rcv_saddr = *saddr;
sk                293 net/ipv6/tcp_ipv6.c 	sk->sk_gso_type = SKB_GSO_TCPV6;
sk                294 net/ipv6/tcp_ipv6.c 	ip6_dst_store(sk, dst, NULL, NULL);
sk                305 net/ipv6/tcp_ipv6.c 	tcp_set_state(sk, TCP_SYN_SENT);
sk                306 net/ipv6/tcp_ipv6.c 	err = inet6_hash_connect(tcp_death_row, sk);
sk                310 net/ipv6/tcp_ipv6.c 	sk_set_txhash(sk);
sk                316 net/ipv6/tcp_ipv6.c 						    sk->sk_v6_daddr.s6_addr32,
sk                319 net/ipv6/tcp_ipv6.c 		tp->tsoffset = secure_tcpv6_ts_off(sock_net(sk),
sk                321 net/ipv6/tcp_ipv6.c 						   sk->sk_v6_daddr.s6_addr32);
sk                324 net/ipv6/tcp_ipv6.c 	if (tcp_fastopen_defer_connect(sk, &err))
sk                329 net/ipv6/tcp_ipv6.c 	err = tcp_connect(sk);
sk                336 net/ipv6/tcp_ipv6.c 	tcp_set_state(sk, TCP_CLOSE);
sk                339 net/ipv6/tcp_ipv6.c 	sk->sk_route_caps = 0;
sk                343 net/ipv6/tcp_ipv6.c static void tcp_v6_mtu_reduced(struct sock *sk)
sk                347 net/ipv6/tcp_ipv6.c 	if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
sk                350 net/ipv6/tcp_ipv6.c 	dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
sk                354 net/ipv6/tcp_ipv6.c 	if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
sk                355 net/ipv6/tcp_ipv6.c 		tcp_sync_mss(sk, dst_mtu(dst));
sk                356 net/ipv6/tcp_ipv6.c 		tcp_simple_retransmit(sk);
sk                370 net/ipv6/tcp_ipv6.c 	struct sock *sk;
sk                374 net/ipv6/tcp_ipv6.c 	sk = __inet6_lookup_established(net, &tcp_hashinfo,
sk                379 net/ipv6/tcp_ipv6.c 	if (!sk) {
sk                385 net/ipv6/tcp_ipv6.c 	if (sk->sk_state == TCP_TIME_WAIT) {
sk                386 net/ipv6/tcp_ipv6.c 		inet_twsk_put(inet_twsk(sk));
sk                391 net/ipv6/tcp_ipv6.c 	if (sk->sk_state == TCP_NEW_SYN_RECV) {
sk                392 net/ipv6/tcp_ipv6.c 		tcp_req_err(sk, seq, fatal);
sk                396 net/ipv6/tcp_ipv6.c 	bh_lock_sock(sk);
sk                397 net/ipv6/tcp_ipv6.c 	if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
sk                400 net/ipv6/tcp_ipv6.c 	if (sk->sk_state == TCP_CLOSE)
sk                403 net/ipv6/tcp_ipv6.c 	if (ipv6_hdr(skb)->hop_limit < tcp_inet6_sk(sk)->min_hopcount) {
sk                408 net/ipv6/tcp_ipv6.c 	tp = tcp_sk(sk);
sk                412 net/ipv6/tcp_ipv6.c 	if (sk->sk_state != TCP_LISTEN &&
sk                418 net/ipv6/tcp_ipv6.c 	np = tcp_inet6_sk(sk);
sk                421 net/ipv6/tcp_ipv6.c 		if (!sock_owned_by_user(sk)) {
sk                422 net/ipv6/tcp_ipv6.c 			struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
sk                425 net/ipv6/tcp_ipv6.c 				dst->ops->redirect(dst, sk, skb);
sk                435 net/ipv6/tcp_ipv6.c 		if (sk->sk_state == TCP_LISTEN)
sk                438 net/ipv6/tcp_ipv6.c 		if (!ip6_sk_accept_pmtu(sk))
sk                442 net/ipv6/tcp_ipv6.c 		if (!sock_owned_by_user(sk))
sk                443 net/ipv6/tcp_ipv6.c 			tcp_v6_mtu_reduced(sk);
sk                445 net/ipv6/tcp_ipv6.c 					   &sk->sk_tsq_flags))
sk                446 net/ipv6/tcp_ipv6.c 			sock_hold(sk);
sk                452 net/ipv6/tcp_ipv6.c 	switch (sk->sk_state) {
sk                458 net/ipv6/tcp_ipv6.c 		if (fastopen && !fastopen->sk)
sk                461 net/ipv6/tcp_ipv6.c 		if (!sock_owned_by_user(sk)) {
sk                462 net/ipv6/tcp_ipv6.c 			sk->sk_err = err;
sk                463 net/ipv6/tcp_ipv6.c 			sk->sk_error_report(sk);		/* Wake people up to see the error (see connect in sock.c) */
sk                465 net/ipv6/tcp_ipv6.c 			tcp_done(sk);
sk                467 net/ipv6/tcp_ipv6.c 			sk->sk_err_soft = err;
sk                471 net/ipv6/tcp_ipv6.c 	if (!sock_owned_by_user(sk) && np->recverr) {
sk                472 net/ipv6/tcp_ipv6.c 		sk->sk_err = err;
sk                473 net/ipv6/tcp_ipv6.c 		sk->sk_error_report(sk);
sk                475 net/ipv6/tcp_ipv6.c 		sk->sk_err_soft = err;
sk                478 net/ipv6/tcp_ipv6.c 	bh_unlock_sock(sk);
sk                479 net/ipv6/tcp_ipv6.c 	sock_put(sk);
sk                484 net/ipv6/tcp_ipv6.c static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
sk                491 net/ipv6/tcp_ipv6.c 	struct ipv6_pinfo *np = tcp_inet6_sk(sk);
sk                498 net/ipv6/tcp_ipv6.c 	if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
sk                502 net/ipv6/tcp_ipv6.c 	skb = tcp_make_synack(sk, dst, req, foc, synack_type);
sk                516 net/ipv6/tcp_ipv6.c 		err = ip6_xmit(sk, skb, fl6, sk->sk_mark, opt, np->tclass,
sk                517 net/ipv6/tcp_ipv6.c 			       sk->sk_priority);
sk                534 net/ipv6/tcp_ipv6.c static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
sk                537 net/ipv6/tcp_ipv6.c 	return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
sk                540 net/ipv6/tcp_ipv6.c static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
sk                543 net/ipv6/tcp_ipv6.c 	return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
sk                546 net/ipv6/tcp_ipv6.c static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
sk                574 net/ipv6/tcp_ipv6.c 			return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
sk                576 net/ipv6/tcp_ipv6.c 		return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
sk                584 net/ipv6/tcp_ipv6.c 		return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
sk                588 net/ipv6/tcp_ipv6.c 	return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
sk                653 net/ipv6/tcp_ipv6.c 			       const struct sock *sk,
sk                661 net/ipv6/tcp_ipv6.c 	if (sk) { /* valid for establish/request sockets */
sk                662 net/ipv6/tcp_ipv6.c 		saddr = &sk->sk_v6_rcv_saddr;
sk                663 net/ipv6/tcp_ipv6.c 		daddr = &sk->sk_v6_daddr;
sk                700 net/ipv6/tcp_ipv6.c static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
sk                711 net/ipv6/tcp_ipv6.c 	hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
sk                719 net/ipv6/tcp_ipv6.c 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
sk                724 net/ipv6/tcp_ipv6.c 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
sk                734 net/ipv6/tcp_ipv6.c 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
sk                771 net/ipv6/tcp_ipv6.c static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
sk                775 net/ipv6/tcp_ipv6.c 	return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
sk                805 net/ipv6/tcp_ipv6.c static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
sk                814 net/ipv6/tcp_ipv6.c 	struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
sk                888 net/ipv6/tcp_ipv6.c 	if (sk) {
sk                889 net/ipv6/tcp_ipv6.c 		if (sk->sk_state == TCP_TIME_WAIT) {
sk                890 net/ipv6/tcp_ipv6.c 			mark = inet_twsk(sk)->tw_mark;
sk                892 net/ipv6/tcp_ipv6.c 			skb_set_hash(buff, inet_twsk(sk)->tw_txhash,
sk                895 net/ipv6/tcp_ipv6.c 			mark = sk->sk_mark;
sk                897 net/ipv6/tcp_ipv6.c 		buff->tstamp = tcp_transmit_time(sk);
sk                902 net/ipv6/tcp_ipv6.c 	fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
sk                923 net/ipv6/tcp_ipv6.c static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
sk                946 net/ipv6/tcp_ipv6.c 	if (!sk && !ipv6_unicast_destination(skb))
sk                949 net/ipv6/tcp_ipv6.c 	net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
sk                953 net/ipv6/tcp_ipv6.c 	if (sk && sk_fullsock(sk)) {
sk                954 net/ipv6/tcp_ipv6.c 		key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr);
sk                989 net/ipv6/tcp_ipv6.c 	if (sk) {
sk                990 net/ipv6/tcp_ipv6.c 		oif = sk->sk_bound_dev_if;
sk                991 net/ipv6/tcp_ipv6.c 		if (sk_fullsock(sk)) {
sk                992 net/ipv6/tcp_ipv6.c 			const struct ipv6_pinfo *np = tcp_inet6_sk(sk);
sk                994 net/ipv6/tcp_ipv6.c 			trace_tcp_send_reset(sk, skb);
sk                997 net/ipv6/tcp_ipv6.c 			priority = sk->sk_priority;
sk                999 net/ipv6/tcp_ipv6.c 		if (sk->sk_state == TCP_TIME_WAIT) {
sk               1000 net/ipv6/tcp_ipv6.c 			label = cpu_to_be32(inet_twsk(sk)->tw_flowlabel);
sk               1001 net/ipv6/tcp_ipv6.c 			priority = inet_twsk(sk)->tw_priority;
sk               1008 net/ipv6/tcp_ipv6.c 	tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0,
sk               1017 net/ipv6/tcp_ipv6.c static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
sk               1022 net/ipv6/tcp_ipv6.c 	tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
sk               1026 net/ipv6/tcp_ipv6.c static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
sk               1028 net/ipv6/tcp_ipv6.c 	struct inet_timewait_sock *tw = inet_twsk(sk);
sk               1029 net/ipv6/tcp_ipv6.c 	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
sk               1031 net/ipv6/tcp_ipv6.c 	tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
sk               1040 net/ipv6/tcp_ipv6.c static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
sk               1051 net/ipv6/tcp_ipv6.c 	tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
sk               1052 net/ipv6/tcp_ipv6.c 			tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
sk               1056 net/ipv6/tcp_ipv6.c 			req->ts_recent, sk->sk_bound_dev_if,
sk               1057 net/ipv6/tcp_ipv6.c 			tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr),
sk               1058 net/ipv6/tcp_ipv6.c 			0, 0, sk->sk_priority);
sk               1062 net/ipv6/tcp_ipv6.c static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
sk               1068 net/ipv6/tcp_ipv6.c 		sk = cookie_v6_check(sk, skb);
sk               1070 net/ipv6/tcp_ipv6.c 	return sk;
sk               1073 net/ipv6/tcp_ipv6.c u16 tcp_v6_get_syncookie(struct sock *sk, struct ipv6hdr *iph,
sk               1079 net/ipv6/tcp_ipv6.c 				    &tcp_request_sock_ipv6_ops, sk, th);
sk               1082 net/ipv6/tcp_ipv6.c 		tcp_synq_overflow(sk);
sk               1088 net/ipv6/tcp_ipv6.c static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
sk               1091 net/ipv6/tcp_ipv6.c 		return tcp_v4_conn_request(sk, skb);
sk               1097 net/ipv6/tcp_ipv6.c 				&tcp_request_sock_ipv6_ops, sk, skb);
sk               1100 net/ipv6/tcp_ipv6.c 	tcp_listendrop(sk);
sk               1114 net/ipv6/tcp_ipv6.c static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
sk               1122 net/ipv6/tcp_ipv6.c 	const struct ipv6_pinfo *np = tcp_inet6_sk(sk);
sk               1137 net/ipv6/tcp_ipv6.c 		newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
sk               1187 net/ipv6/tcp_ipv6.c 	if (sk_acceptq_is_full(sk))
sk               1191 net/ipv6/tcp_ipv6.c 		dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
sk               1196 net/ipv6/tcp_ipv6.c 	newsk = tcp_create_openreq_child(sk, req, skb);
sk               1264 net/ipv6/tcp_ipv6.c 	newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
sk               1273 net/ipv6/tcp_ipv6.c 	key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
sk               1282 net/ipv6/tcp_ipv6.c 			       sk_gfp_mask(sk, GFP_ATOMIC));
sk               1286 net/ipv6/tcp_ipv6.c 	if (__inet_inherit_port(sk, newsk) < 0) {
sk               1298 net/ipv6/tcp_ipv6.c 						      sk_gfp_mask(sk, GFP_ATOMIC));
sk               1311 net/ipv6/tcp_ipv6.c 	__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
sk               1315 net/ipv6/tcp_ipv6.c 	tcp_listendrop(sk);
sk               1327 net/ipv6/tcp_ipv6.c static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
sk               1329 net/ipv6/tcp_ipv6.c 	struct ipv6_pinfo *np = tcp_inet6_sk(sk);
sk               1342 net/ipv6/tcp_ipv6.c 		return tcp_v4_do_rcv(sk, skb);
sk               1363 net/ipv6/tcp_ipv6.c 		opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
sk               1365 net/ipv6/tcp_ipv6.c 	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
sk               1366 net/ipv6/tcp_ipv6.c 		struct dst_entry *dst = sk->sk_rx_dst;
sk               1368 net/ipv6/tcp_ipv6.c 		sock_rps_save_rxhash(sk, skb);
sk               1369 net/ipv6/tcp_ipv6.c 		sk_mark_napi_id(sk, skb);
sk               1371 net/ipv6/tcp_ipv6.c 			if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
sk               1374 net/ipv6/tcp_ipv6.c 				sk->sk_rx_dst = NULL;
sk               1378 net/ipv6/tcp_ipv6.c 		tcp_rcv_established(sk, skb);
sk               1387 net/ipv6/tcp_ipv6.c 	if (sk->sk_state == TCP_LISTEN) {
sk               1388 net/ipv6/tcp_ipv6.c 		struct sock *nsk = tcp_v6_cookie_check(sk, skb);
sk               1393 net/ipv6/tcp_ipv6.c 		if (nsk != sk) {
sk               1394 net/ipv6/tcp_ipv6.c 			if (tcp_child_process(sk, nsk, skb))
sk               1401 net/ipv6/tcp_ipv6.c 		sock_rps_save_rxhash(sk, skb);
sk               1403 net/ipv6/tcp_ipv6.c 	if (tcp_rcv_state_process(sk, skb))
sk               1410 net/ipv6/tcp_ipv6.c 	tcp_v6_send_reset(sk, skb);
sk               1417 net/ipv6/tcp_ipv6.c 	TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
sk               1418 net/ipv6/tcp_ipv6.c 	TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
sk               1430 net/ipv6/tcp_ipv6.c 	tp = tcp_sk(sk);
sk               1432 net/ipv6/tcp_ipv6.c 	    !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
sk               1441 net/ipv6/tcp_ipv6.c 		if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
sk               1442 net/ipv6/tcp_ipv6.c 			skb_set_owner_r(opt_skb, sk);
sk               1486 net/ipv6/tcp_ipv6.c 	struct sock *sk;
sk               1515 net/ipv6/tcp_ipv6.c 	sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th),
sk               1518 net/ipv6/tcp_ipv6.c 	if (!sk)
sk               1522 net/ipv6/tcp_ipv6.c 	if (sk->sk_state == TCP_TIME_WAIT)
sk               1525 net/ipv6/tcp_ipv6.c 	if (sk->sk_state == TCP_NEW_SYN_RECV) {
sk               1526 net/ipv6/tcp_ipv6.c 		struct request_sock *req = inet_reqsk(sk);
sk               1530 net/ipv6/tcp_ipv6.c 		sk = req->rsk_listener;
sk               1531 net/ipv6/tcp_ipv6.c 		if (tcp_v6_inbound_md5_hash(sk, skb)) {
sk               1532 net/ipv6/tcp_ipv6.c 			sk_drops_add(sk, skb);
sk               1540 net/ipv6/tcp_ipv6.c 		if (unlikely(sk->sk_state != TCP_LISTEN)) {
sk               1541 net/ipv6/tcp_ipv6.c 			inet_csk_reqsk_queue_drop_and_put(sk, req);
sk               1544 net/ipv6/tcp_ipv6.c 		sock_hold(sk);
sk               1547 net/ipv6/tcp_ipv6.c 		if (!tcp_filter(sk, skb)) {
sk               1551 net/ipv6/tcp_ipv6.c 			nsk = tcp_check_req(sk, skb, req, false, &req_stolen);
sk               1562 net/ipv6/tcp_ipv6.c 				sock_put(sk);
sk               1567 net/ipv6/tcp_ipv6.c 		if (nsk == sk) {
sk               1570 net/ipv6/tcp_ipv6.c 		} else if (tcp_child_process(sk, nsk, skb)) {
sk               1574 net/ipv6/tcp_ipv6.c 			sock_put(sk);
sk               1578 net/ipv6/tcp_ipv6.c 	if (hdr->hop_limit < tcp_inet6_sk(sk)->min_hopcount) {
sk               1583 net/ipv6/tcp_ipv6.c 	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
sk               1586 net/ipv6/tcp_ipv6.c 	if (tcp_v6_inbound_md5_hash(sk, skb))
sk               1589 net/ipv6/tcp_ipv6.c 	if (tcp_filter(sk, skb))
sk               1597 net/ipv6/tcp_ipv6.c 	if (sk->sk_state == TCP_LISTEN) {
sk               1598 net/ipv6/tcp_ipv6.c 		ret = tcp_v6_do_rcv(sk, skb);
sk               1602 net/ipv6/tcp_ipv6.c 	sk_incoming_cpu_update(sk);
sk               1604 net/ipv6/tcp_ipv6.c 	bh_lock_sock_nested(sk);
sk               1605 net/ipv6/tcp_ipv6.c 	tcp_segs_in(tcp_sk(sk), skb);
sk               1607 net/ipv6/tcp_ipv6.c 	if (!sock_owned_by_user(sk)) {
sk               1608 net/ipv6/tcp_ipv6.c 		skb_to_free = sk->sk_rx_skb_cache;
sk               1609 net/ipv6/tcp_ipv6.c 		sk->sk_rx_skb_cache = NULL;
sk               1610 net/ipv6/tcp_ipv6.c 		ret = tcp_v6_do_rcv(sk, skb);
sk               1612 net/ipv6/tcp_ipv6.c 		if (tcp_add_backlog(sk, skb))
sk               1616 net/ipv6/tcp_ipv6.c 	bh_unlock_sock(sk);
sk               1621 net/ipv6/tcp_ipv6.c 		sock_put(sk);
sk               1644 net/ipv6/tcp_ipv6.c 	sk_drops_add(sk, skb);
sk               1646 net/ipv6/tcp_ipv6.c 		sock_put(sk);
sk               1651 net/ipv6/tcp_ipv6.c 		inet_twsk_put(inet_twsk(sk));
sk               1658 net/ipv6/tcp_ipv6.c 		inet_twsk_put(inet_twsk(sk));
sk               1662 net/ipv6/tcp_ipv6.c 	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
sk               1675 net/ipv6/tcp_ipv6.c 			struct inet_timewait_sock *tw = inet_twsk(sk);
sk               1677 net/ipv6/tcp_ipv6.c 			sk = sk2;
sk               1686 net/ipv6/tcp_ipv6.c 		tcp_v6_timewait_ack(sk, skb);
sk               1689 net/ipv6/tcp_ipv6.c 		tcp_v6_send_reset(sk, skb);
sk               1690 net/ipv6/tcp_ipv6.c 		inet_twsk_deschedule_put(inet_twsk(sk));
sk               1702 net/ipv6/tcp_ipv6.c 	struct sock *sk;
sk               1717 net/ipv6/tcp_ipv6.c 	sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
sk               1721 net/ipv6/tcp_ipv6.c 	if (sk) {
sk               1722 net/ipv6/tcp_ipv6.c 		skb->sk = sk;
sk               1724 net/ipv6/tcp_ipv6.c 		if (sk_fullsock(sk)) {
sk               1725 net/ipv6/tcp_ipv6.c 			struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
sk               1728 net/ipv6/tcp_ipv6.c 				dst = dst_check(dst, tcp_inet6_sk(sk)->rx_dst_cookie);
sk               1730 net/ipv6/tcp_ipv6.c 			    inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
sk               1803 net/ipv6/tcp_ipv6.c static int tcp_v6_init_sock(struct sock *sk)
sk               1805 net/ipv6/tcp_ipv6.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk               1807 net/ipv6/tcp_ipv6.c 	tcp_init_sock(sk);
sk               1812 net/ipv6/tcp_ipv6.c 	tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
sk               1818 net/ipv6/tcp_ipv6.c static void tcp_v6_destroy_sock(struct sock *sk)
sk               1820 net/ipv6/tcp_ipv6.c 	tcp_v4_destroy_sock(sk);
sk               1821 net/ipv6/tcp_ipv6.c 	inet6_destroy_sock(sk);
sk               1958 net/ipv6/tcp_ipv6.c 	struct sock *sk = v;
sk               1971 net/ipv6/tcp_ipv6.c 	if (sk->sk_state == TCP_TIME_WAIT)
sk               1973 net/ipv6/tcp_ipv6.c 	else if (sk->sk_state == TCP_NEW_SYN_RECV)
sk                 80 net/ipv6/udp.c int udp_v6_get_port(struct sock *sk, unsigned short snum)
sk                 83 net/ipv6/udp.c 		ipv6_portaddr_hash(sock_net(sk), &in6addr_any, snum);
sk                 85 net/ipv6/udp.c 		ipv6_portaddr_hash(sock_net(sk), &sk->sk_v6_rcv_saddr, 0);
sk                 88 net/ipv6/udp.c 	udp_sk(sk)->udp_portaddr_hash = hash2_partial;
sk                 89 net/ipv6/udp.c 	return udp_lib_get_port(sk, snum, hash2_nulladdr);
sk                 92 net/ipv6/udp.c void udp_v6_rehash(struct sock *sk)
sk                 94 net/ipv6/udp.c 	u16 new_hash = ipv6_portaddr_hash(sock_net(sk),
sk                 95 net/ipv6/udp.c 					  &sk->sk_v6_rcv_saddr,
sk                 96 net/ipv6/udp.c 					  inet_sk(sk)->inet_num);
sk                 98 net/ipv6/udp.c 	udp_lib_rehash(sk, new_hash);
sk                101 net/ipv6/udp.c static int compute_score(struct sock *sk, struct net *net,
sk                110 net/ipv6/udp.c 	if (!net_eq(sock_net(sk), net) ||
sk                111 net/ipv6/udp.c 	    udp_sk(sk)->udp_port_hash != hnum ||
sk                112 net/ipv6/udp.c 	    sk->sk_family != PF_INET6)
sk                115 net/ipv6/udp.c 	if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
sk                119 net/ipv6/udp.c 	inet = inet_sk(sk);
sk                127 net/ipv6/udp.c 	if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
sk                128 net/ipv6/udp.c 		if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr))
sk                133 net/ipv6/udp.c 	dev_match = udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif);
sk                138 net/ipv6/udp.c 	if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
sk                151 net/ipv6/udp.c 	struct sock *sk, *result;
sk                157 net/ipv6/udp.c 	udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
sk                158 net/ipv6/udp.c 		score = compute_score(sk, net, saddr, sport,
sk                161 net/ipv6/udp.c 			if (sk->sk_reuseport &&
sk                162 net/ipv6/udp.c 			    sk->sk_state != TCP_ESTABLISHED) {
sk                166 net/ipv6/udp.c 				result = reuseport_select_sock(sk, hash, skb,
sk                168 net/ipv6/udp.c 				if (result && !reuseport_has_conns(sk, false))
sk                171 net/ipv6/udp.c 			result = sk;
sk                242 net/ipv6/udp.c 	struct sock *sk;
sk                244 net/ipv6/udp.c 	sk =  __udp6_lib_lookup(net, saddr, sport, daddr, dport,
sk                246 net/ipv6/udp.c 	if (sk && !refcount_inc_not_zero(&sk->sk_refcnt))
sk                247 net/ipv6/udp.c 		sk = NULL;
sk                248 net/ipv6/udp.c 	return sk;
sk                267 net/ipv6/udp.c int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
sk                270 net/ipv6/udp.c 	struct ipv6_pinfo *np = inet6_sk(sk);
sk                271 net/ipv6/udp.c 	struct inet_sock *inet = inet_sk(sk);
sk                275 net/ipv6/udp.c 	int is_udplite = IS_UDPLITE(sk);
sk                281 net/ipv6/udp.c 		return ipv6_recv_error(sk, msg, len, addr_len);
sk                284 net/ipv6/udp.c 		return ipv6_recv_rxpmtu(sk, msg, len, addr_len);
sk                287 net/ipv6/udp.c 	off = sk_peek_offset(sk, flags);
sk                288 net/ipv6/udp.c 	skb = __skb_recv_udp(sk, flags, noblock, &off, &err);
sk                300 net/ipv6/udp.c 	mib = __UDPX_MIB(sk, is_udp4);
sk                328 net/ipv6/udp.c 			atomic_inc(&sk->sk_drops);
sk                337 net/ipv6/udp.c 	sock_recv_ts_and_drops(msg, sk, skb);
sk                359 net/ipv6/udp.c 			BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk,
sk                363 net/ipv6/udp.c 	if (udp_sk(sk)->gro_enabled)
sk                364 net/ipv6/udp.c 		udp_cmsg_recv(msg, sk, skb);
sk                367 net/ipv6/udp.c 		ip6_datagram_recv_common_ctl(sk, msg, skb);
sk                371 net/ipv6/udp.c 			ip_cmsg_recv_offset(msg, sk, skb,
sk                375 net/ipv6/udp.c 			ip6_datagram_recv_specific_ctl(sk, msg, skb);
sk                382 net/ipv6/udp.c 	skb_consume_udp(sk, skb, peeking ? -err : err);
sk                386 net/ipv6/udp.c 	if (!__sk_queue_drop_skb(sk, &udp_sk(sk)->reader_queue, skb, flags,
sk                457 net/ipv6/udp.c 	struct sock *sk;
sk                468 net/ipv6/udp.c 	sk = __udp6_lib_lookup(net, &hdr->daddr, uh->source,
sk                471 net/ipv6/udp.c 	if (sk) {
sk                472 net/ipv6/udp.c 		int (*lookup)(struct sock *sk, struct sk_buff *skb);
sk                473 net/ipv6/udp.c 		struct udp_sock *up = udp_sk(sk);
sk                476 net/ipv6/udp.c 		if (!lookup || lookup(sk, skb))
sk                477 net/ipv6/udp.c 			sk = NULL;
sk                480 net/ipv6/udp.c 	if (!sk) {
sk                481 net/ipv6/udp.c 		sk = ERR_PTR(__udp6_lib_err_encap_no_sk(skb, opt, type, code,
sk                488 net/ipv6/udp.c 	return sk;
sk                501 net/ipv6/udp.c 	struct sock *sk;
sk                506 net/ipv6/udp.c 	sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source,
sk                508 net/ipv6/udp.c 	if (!sk) {
sk                510 net/ipv6/udp.c 		sk = ERR_PTR(-ENOENT);
sk                512 net/ipv6/udp.c 			sk = __udp6_lib_err_encap(net, hdr, offset, uh,
sk                515 net/ipv6/udp.c 			if (!sk)
sk                519 net/ipv6/udp.c 		if (IS_ERR(sk)) {
sk                522 net/ipv6/udp.c 			return PTR_ERR(sk);
sk                529 net/ipv6/udp.c 	np = inet6_sk(sk);
sk                532 net/ipv6/udp.c 		if (!ip6_sk_accept_pmtu(sk))
sk                534 net/ipv6/udp.c 		ip6_sk_update_pmtu(skb, sk, info);
sk                540 net/ipv6/udp.c 			ip6_redirect(skb, sock_net(sk), inet6_iif(skb),
sk                541 net/ipv6/udp.c 				     sk->sk_mark, sk->sk_uid);
sk                543 net/ipv6/udp.c 			ip6_sk_redirect(skb, sk);
sk                553 net/ipv6/udp.c 		if (!harderr || sk->sk_state != TCP_ESTABLISHED)
sk                556 net/ipv6/udp.c 		ipv6_icmp_error(sk, skb, err, uh->dest, ntohl(info), (u8 *)(uh+1));
sk                559 net/ipv6/udp.c 	sk->sk_err = err;
sk                560 net/ipv6/udp.c 	sk->sk_error_report(sk);
sk                565 net/ipv6/udp.c static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
sk                569 net/ipv6/udp.c 	if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
sk                570 net/ipv6/udp.c 		sock_rps_save_rxhash(sk, skb);
sk                571 net/ipv6/udp.c 		sk_mark_napi_id(sk, skb);
sk                572 net/ipv6/udp.c 		sk_incoming_cpu_update(sk);
sk                574 net/ipv6/udp.c 		sk_mark_napi_id_once(sk, skb);
sk                577 net/ipv6/udp.c 	rc = __udp_enqueue_schedule_skb(sk, skb);
sk                579 net/ipv6/udp.c 		int is_udplite = IS_UDPLITE(sk);
sk                583 net/ipv6/udp.c 			UDP6_INC_STATS(sock_net(sk),
sk                585 net/ipv6/udp.c 		UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
sk                600 net/ipv6/udp.c static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
sk                602 net/ipv6/udp.c 	struct udp_sock *up = udp_sk(sk);
sk                603 net/ipv6/udp.c 	int is_udplite = IS_UDPLITE(sk);
sk                605 net/ipv6/udp.c 	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
sk                609 net/ipv6/udp.c 		int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
sk                631 net/ipv6/udp.c 			ret = encap_rcv(sk, skb);
sk                633 net/ipv6/udp.c 				__UDP_INC_STATS(sock_net(sk),
sk                660 net/ipv6/udp.c 	prefetch(&sk->sk_rmem_alloc);
sk                661 net/ipv6/udp.c 	if (rcu_access_pointer(sk->sk_filter) &&
sk                665 net/ipv6/udp.c 	if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr)))
sk                672 net/ipv6/udp.c 	return __udpv6_queue_rcv_skb(sk, skb);
sk                675 net/ipv6/udp.c 	__UDP6_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
sk                677 net/ipv6/udp.c 	__UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
sk                678 net/ipv6/udp.c 	atomic_inc(&sk->sk_drops);
sk                683 net/ipv6/udp.c static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
sk                688 net/ipv6/udp.c 	if (likely(!udp_unexpected_gso(sk, skb)))
sk                689 net/ipv6/udp.c 		return udpv6_queue_rcv_one_skb(sk, skb);
sk                692 net/ipv6/udp.c 	segs = udp_rcv_segment(sk, skb, false);
sk                697 net/ipv6/udp.c 		ret = udpv6_queue_rcv_one_skb(sk, skb);
sk                705 net/ipv6/udp.c static bool __udp_v6_is_mcast_sock(struct net *net, struct sock *sk,
sk                710 net/ipv6/udp.c 	struct inet_sock *inet = inet_sk(sk);
sk                712 net/ipv6/udp.c 	if (!net_eq(sock_net(sk), net))
sk                715 net/ipv6/udp.c 	if (udp_sk(sk)->udp_port_hash != hnum ||
sk                716 net/ipv6/udp.c 	    sk->sk_family != PF_INET6 ||
sk                718 net/ipv6/udp.c 	    (!ipv6_addr_any(&sk->sk_v6_daddr) &&
sk                719 net/ipv6/udp.c 		    !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) ||
sk                720 net/ipv6/udp.c 	    !udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif) ||
sk                721 net/ipv6/udp.c 	    (!ipv6_addr_any(&sk->sk_v6_rcv_saddr) &&
sk                722 net/ipv6/udp.c 		    !ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr)))
sk                724 net/ipv6/udp.c 	if (!inet6_mc_check(sk, loc_addr, rmt_addr))
sk                747 net/ipv6/udp.c 	struct sock *sk, *first = NULL;
sk                751 net/ipv6/udp.c 	unsigned int offset = offsetof(typeof(*sk), sk_node);
sk                764 net/ipv6/udp.c 		offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
sk                767 net/ipv6/udp.c 	sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) {
sk                768 net/ipv6/udp.c 		if (!__udp_v6_is_mcast_sock(net, sk, uh->dest, daddr,
sk                775 net/ipv6/udp.c 		if (!uh->check && !udp_sk(sk)->no_check6_rx)
sk                778 net/ipv6/udp.c 			first = sk;
sk                783 net/ipv6/udp.c 			atomic_inc(&sk->sk_drops);
sk                785 net/ipv6/udp.c 					 IS_UDPLITE(sk));
sk                787 net/ipv6/udp.c 					 IS_UDPLITE(sk));
sk                791 net/ipv6/udp.c 		if (udpv6_queue_rcv_skb(sk, nskb) > 0)
sk                812 net/ipv6/udp.c static void udp6_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
sk                814 net/ipv6/udp.c 	if (udp_sk_rx_dst_set(sk, dst)) {
sk                817 net/ipv6/udp.c 		inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
sk                824 net/ipv6/udp.c static int udp6_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb,
sk                829 net/ipv6/udp.c 	if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
sk                832 net/ipv6/udp.c 	ret = udpv6_queue_rcv_skb(sk, skb);
sk                846 net/ipv6/udp.c 	struct sock *sk;
sk                883 net/ipv6/udp.c 	sk = skb_steal_sock(skb);
sk                884 net/ipv6/udp.c 	if (sk) {
sk                888 net/ipv6/udp.c 		if (unlikely(sk->sk_rx_dst != dst))
sk                889 net/ipv6/udp.c 			udp6_sk_rx_dst_set(sk, dst);
sk                891 net/ipv6/udp.c 		if (!uh->check && !udp_sk(sk)->no_check6_rx) {
sk                892 net/ipv6/udp.c 			sock_put(sk);
sk                896 net/ipv6/udp.c 		ret = udp6_unicast_rcv_skb(sk, skb, uh);
sk                897 net/ipv6/udp.c 		sock_put(sk);
sk                909 net/ipv6/udp.c 	sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
sk                910 net/ipv6/udp.c 	if (sk) {
sk                911 net/ipv6/udp.c 		if (!uh->check && !udp_sk(sk)->no_check6_rx)
sk                913 net/ipv6/udp.c 		return udp6_unicast_rcv_skb(sk, skb, uh);
sk                960 net/ipv6/udp.c 	struct sock *sk;
sk                962 net/ipv6/udp.c 	udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
sk                963 net/ipv6/udp.c 		if (sk->sk_state == TCP_ESTABLISHED &&
sk                964 net/ipv6/udp.c 		    INET6_MATCH(sk, net, rmt_addr, loc_addr, ports, dif, sdif))
sk                965 net/ipv6/udp.c 			return sk;
sk                976 net/ipv6/udp.c 	struct sock *sk;
sk                988 net/ipv6/udp.c 		sk = __udp6_lib_demux_lookup(net, uh->dest,
sk                995 net/ipv6/udp.c 	if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt))
sk                998 net/ipv6/udp.c 	skb->sk = sk;
sk               1000 net/ipv6/udp.c 	dst = READ_ONCE(sk->sk_rx_dst);
sk               1003 net/ipv6/udp.c 		dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
sk               1021 net/ipv6/udp.c static void udp_v6_flush_pending_frames(struct sock *sk)
sk               1023 net/ipv6/udp.c 	struct udp_sock *up = udp_sk(sk);
sk               1026 net/ipv6/udp.c 		udp_flush_pending_frames(sk);
sk               1030 net/ipv6/udp.c 		ip6_flush_pending_frames(sk);
sk               1034 net/ipv6/udp.c static int udpv6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
sk               1044 net/ipv6/udp.c 		if (__ipv6_only_sock(sk))
sk               1046 net/ipv6/udp.c 		return udp_pre_connect(sk, uaddr, addr_len);
sk               1052 net/ipv6/udp.c 	return BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr);
sk               1061 net/ipv6/udp.c static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb,
sk               1105 net/ipv6/udp.c 	struct sock *sk = skb->sk;
sk               1108 net/ipv6/udp.c 	int is_udplite = IS_UDPLITE(sk);
sk               1135 net/ipv6/udp.c 		if (udp_sk(sk)->no_check6_tx) {
sk               1156 net/ipv6/udp.c 	else if (udp_sk(sk)->no_check6_tx) {   /* UDP csum disabled */
sk               1161 net/ipv6/udp.c 		udp6_hwcsum_outgoing(sk, skb, &fl6->saddr, &fl6->daddr, len);
sk               1175 net/ipv6/udp.c 		if (err == -ENOBUFS && !inet6_sk(sk)->recverr) {
sk               1176 net/ipv6/udp.c 			UDP6_INC_STATS(sock_net(sk),
sk               1181 net/ipv6/udp.c 		UDP6_INC_STATS(sock_net(sk),
sk               1187 net/ipv6/udp.c static int udp_v6_push_pending_frames(struct sock *sk)
sk               1190 net/ipv6/udp.c 	struct udp_sock  *up = udp_sk(sk);
sk               1195 net/ipv6/udp.c 		return udp_push_pending_frames(sk);
sk               1200 net/ipv6/udp.c 	fl6 = inet_sk(sk)->cork.fl.u.ip6;
sk               1202 net/ipv6/udp.c 	skb = ip6_finish_skb(sk);
sk               1206 net/ipv6/udp.c 	err = udp_v6_send_skb(skb, &fl6, &inet_sk(sk)->cork.base);
sk               1214 net/ipv6/udp.c int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
sk               1217 net/ipv6/udp.c 	struct udp_sock *up = udp_sk(sk);
sk               1218 net/ipv6/udp.c 	struct inet_sock *inet = inet_sk(sk);
sk               1219 net/ipv6/udp.c 	struct ipv6_pinfo *np = inet6_sk(sk);
sk               1233 net/ipv6/udp.c 	int is_udplite = IS_UDPLITE(sk);
sk               1238 net/ipv6/udp.c 	ipc6.sockc.tsflags = sk->sk_tsflags;
sk               1239 net/ipv6/udp.c 	ipc6.sockc.mark = sk->sk_mark;
sk               1267 net/ipv6/udp.c 		if (sk->sk_state != TCP_ESTABLISHED)
sk               1269 net/ipv6/udp.c 		daddr = &sk->sk_v6_daddr;
sk               1282 net/ipv6/udp.c 			if (__ipv6_only_sock(sk))
sk               1284 net/ipv6/udp.c 			return udp_sendmsg(sk, msg, len);
sk               1289 net/ipv6/udp.c 		return udp_sendmsg(sk, msg, len);
sk               1303 net/ipv6/udp.c 		lock_sock(sk);
sk               1306 net/ipv6/udp.c 				release_sock(sk);
sk               1312 net/ipv6/udp.c 		release_sock(sk);
sk               1328 net/ipv6/udp.c 				flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
sk               1338 net/ipv6/udp.c 		if (sk->sk_state == TCP_ESTABLISHED &&
sk               1339 net/ipv6/udp.c 		    ipv6_addr_equal(daddr, &sk->sk_v6_daddr))
sk               1340 net/ipv6/udp.c 			daddr = &sk->sk_v6_daddr;
sk               1347 net/ipv6/udp.c 		if (sk->sk_state != TCP_ESTABLISHED)
sk               1351 net/ipv6/udp.c 		daddr = &sk->sk_v6_daddr;
sk               1357 net/ipv6/udp.c 		fl6.flowi6_oif = sk->sk_bound_dev_if;
sk               1363 net/ipv6/udp.c 	fl6.flowi6_uid = sk->sk_uid;
sk               1371 net/ipv6/udp.c 		err = udp_cmsg_send(sk, msg, &ipc6.gso_size);
sk               1373 net/ipv6/udp.c 			err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6,
sk               1380 net/ipv6/udp.c 			flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
sk               1397 net/ipv6/udp.c 	fl6.flowi6_proto = sk->sk_protocol;
sk               1404 net/ipv6/udp.c 		err = BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk,
sk               1439 net/ipv6/udp.c 	security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
sk               1446 net/ipv6/udp.c 	dst = ip6_sk_dst_lookup_flow(sk, &fl6, final_p, connected);
sk               1465 net/ipv6/udp.c 		skb = ip6_make_skb(sk, getfrag, msg, ulen,
sk               1475 net/ipv6/udp.c 	lock_sock(sk);
sk               1479 net/ipv6/udp.c 		release_sock(sk);
sk               1492 net/ipv6/udp.c 	err = ip6_append_data(sk, getfrag, msg, ulen, sizeof(struct udphdr),
sk               1496 net/ipv6/udp.c 		udp_v6_flush_pending_frames(sk);
sk               1498 net/ipv6/udp.c 		err = udp_v6_push_pending_frames(sk);
sk               1499 net/ipv6/udp.c 	else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
sk               1504 net/ipv6/udp.c 	release_sock(sk);
sk               1520 net/ipv6/udp.c 	if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
sk               1521 net/ipv6/udp.c 		UDP6_INC_STATS(sock_net(sk),
sk               1535 net/ipv6/udp.c void udpv6_destroy_sock(struct sock *sk)
sk               1537 net/ipv6/udp.c 	struct udp_sock *up = udp_sk(sk);
sk               1538 net/ipv6/udp.c 	lock_sock(sk);
sk               1539 net/ipv6/udp.c 	udp_v6_flush_pending_frames(sk);
sk               1540 net/ipv6/udp.c 	release_sock(sk);
sk               1544 net/ipv6/udp.c 			void (*encap_destroy)(struct sock *sk);
sk               1547 net/ipv6/udp.c 				encap_destroy(sk);
sk               1553 net/ipv6/udp.c 	inet6_destroy_sock(sk);
sk               1559 net/ipv6/udp.c int udpv6_setsockopt(struct sock *sk, int level, int optname,
sk               1563 net/ipv6/udp.c 		return udp_lib_setsockopt(sk, level, optname, optval, optlen,
sk               1565 net/ipv6/udp.c 	return ipv6_setsockopt(sk, level, optname, optval, optlen);
sk               1569 net/ipv6/udp.c int compat_udpv6_setsockopt(struct sock *sk, int level, int optname,
sk               1573 net/ipv6/udp.c 		return udp_lib_setsockopt(sk, level, optname, optval, optlen,
sk               1575 net/ipv6/udp.c 	return compat_ipv6_setsockopt(sk, level, optname, optval, optlen);
sk               1579 net/ipv6/udp.c int udpv6_getsockopt(struct sock *sk, int level, int optname,
sk               1583 net/ipv6/udp.c 		return udp_lib_getsockopt(sk, level, optname, optval, optlen);
sk               1584 net/ipv6/udp.c 	return ipv6_getsockopt(sk, level, optname, optval, optlen);
sk               1588 net/ipv6/udp.c int compat_udpv6_getsockopt(struct sock *sk, int level, int optname,
sk               1592 net/ipv6/udp.c 		return udp_lib_getsockopt(sk, level, optname, optval, optlen);
sk               1593 net/ipv6/udp.c 	return compat_ipv6_getsockopt(sk, level, optname, optval, optlen);
sk                 15 net/ipv6/udp_impl.h int udp_v6_get_port(struct sock *sk, unsigned short snum);
sk                 16 net/ipv6/udp_impl.h void udp_v6_rehash(struct sock *sk);
sk                 18 net/ipv6/udp_impl.h int udpv6_getsockopt(struct sock *sk, int level, int optname,
sk                 20 net/ipv6/udp_impl.h int udpv6_setsockopt(struct sock *sk, int level, int optname,
sk                 23 net/ipv6/udp_impl.h int compat_udpv6_setsockopt(struct sock *sk, int level, int optname,
sk                 25 net/ipv6/udp_impl.h int compat_udpv6_getsockopt(struct sock *sk, int level, int optname,
sk                 28 net/ipv6/udp_impl.h int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len);
sk                 29 net/ipv6/udp_impl.h int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
sk                 31 net/ipv6/udp_impl.h void udpv6_destroy_sock(struct sock *sk);
sk                 35 net/ipv6/xfrm6_input.c static int xfrm6_transport_finish2(struct net *net, struct sock *sk,
sk                 29 net/ipv6/xfrm6_output.c 	struct sock *sk = skb->sk;
sk                 31 net/ipv6/xfrm6_output.c 	if (sk) {
sk                 32 net/ipv6/xfrm6_output.c 		if (sk->sk_family != AF_INET6)
sk                 35 net/ipv6/xfrm6_output.c 		proto = sk->sk_protocol;
sk                 37 net/ipv6/xfrm6_output.c 			return inet6_sk(sk)->dontfrag;
sk                 46 net/ipv6/xfrm6_output.c 	struct sock *sk = skb->sk;
sk                 48 net/ipv6/xfrm6_output.c 	fl6.flowi6_oif = sk->sk_bound_dev_if;
sk                 51 net/ipv6/xfrm6_output.c 	ipv6_local_rxpmtu(sk, &fl6, mtu);
sk                 58 net/ipv6/xfrm6_output.c 	struct sock *sk = skb->sk;
sk                 61 net/ipv6/xfrm6_output.c 	fl6.fl6_dport = inet_sk(sk)->inet_dport;
sk                 64 net/ipv6/xfrm6_output.c 	ipv6_local_error(sk, EMSGSIZE, &fl6, mtu);
sk                 87 net/ipv6/xfrm6_output.c 		else if (skb->sk)
sk                110 net/ipv6/xfrm6_output.c int xfrm6_output_finish(struct sock *sk, struct sk_buff *skb)
sk                116 net/ipv6/xfrm6_output.c 	return xfrm_output(sk, skb);
sk                119 net/ipv6/xfrm6_output.c static int __xfrm6_output_state_finish(struct xfrm_state *x, struct sock *sk,
sk                128 net/ipv6/xfrm6_output.c 		ret = afinfo->output_finish(sk, skb);
sk                136 net/ipv6/xfrm6_output.c static int __xfrm6_output_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
sk                140 net/ipv6/xfrm6_output.c 	return __xfrm6_output_state_finish(x, sk, skb);
sk                143 net/ipv6/xfrm6_output.c static int __xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
sk                153 net/ipv6/xfrm6_output.c 		return dst_output(net, sk, skb);
sk                171 net/ipv6/xfrm6_output.c 	} else if (!skb->ignore_df && toobig && skb->sk) {
sk                178 net/ipv6/xfrm6_output.c 		return ip6_fragment(net, sk, skb,
sk                182 net/ipv6/xfrm6_output.c 	return __xfrm6_output_state_finish(x, sk, skb);
sk                185 net/ipv6/xfrm6_output.c int xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
sk                188 net/ipv6/xfrm6_output.c 			    net, sk, skb,  NULL, skb_dst(skb)->dev,
sk                100 net/ipv6/xfrm6_policy.c static void xfrm6_update_pmtu(struct dst_entry *dst, struct sock *sk,
sk                107 net/ipv6/xfrm6_policy.c 	path->ops->update_pmtu(path, sk, skb, mtu, confirm_neigh);
sk                110 net/ipv6/xfrm6_policy.c static void xfrm6_redirect(struct dst_entry *dst, struct sock *sk,
sk                116 net/ipv6/xfrm6_policy.c 	path->ops->redirect(path, sk, skb);
sk                 55 net/iucv/af_iucv.c #define __iucv_sock_wait(sk, condition, timeo, ret)			\
sk                 60 net/iucv/af_iucv.c 	prepare_to_wait(sk_sleep(sk), &__wait, TASK_INTERRUPTIBLE);	\
sk                 70 net/iucv/af_iucv.c 		release_sock(sk);					\
sk                 72 net/iucv/af_iucv.c 		lock_sock(sk);						\
sk                 73 net/iucv/af_iucv.c 		ret = sock_error(sk);					\
sk                 77 net/iucv/af_iucv.c 	finish_wait(sk_sleep(sk), &__wait);				\
sk                 80 net/iucv/af_iucv.c #define iucv_sock_wait(sk, condition, timeo)				\
sk                 84 net/iucv/af_iucv.c 		__iucv_sock_wait(sk, condition, timeo, __ret);		\
sk                 88 net/iucv/af_iucv.c static void iucv_sock_kill(struct sock *sk);
sk                 89 net/iucv/af_iucv.c static void iucv_sock_close(struct sock *sk);
sk                154 net/iucv/af_iucv.c 	struct sock *sk;
sk                160 net/iucv/af_iucv.c 	sk_for_each(sk, &iucv_sk_list.head) {
sk                161 net/iucv/af_iucv.c 		iucv = iucv_sk(sk);
sk                162 net/iucv/af_iucv.c 		switch (sk->sk_state) {
sk                166 net/iucv/af_iucv.c 			iucv_sever_path(sk, 0);
sk                190 net/iucv/af_iucv.c 	struct sock *sk;
sk                196 net/iucv/af_iucv.c 	sk_for_each(sk, &iucv_sk_list.head) {
sk                197 net/iucv/af_iucv.c 		switch (sk->sk_state) {
sk                199 net/iucv/af_iucv.c 			sk->sk_err = EPIPE;
sk                200 net/iucv/af_iucv.c 			sk->sk_state = IUCV_DISCONN;
sk                201 net/iucv/af_iucv.c 			sk->sk_state_change(sk);
sk                273 net/iucv/af_iucv.c static int iucv_sock_in_state(struct sock *sk, int state, int state2)
sk                275 net/iucv/af_iucv.c 	return (sk->sk_state == state || sk->sk_state == state2);
sk                286 net/iucv/af_iucv.c static inline int iucv_below_msglim(struct sock *sk)
sk                288 net/iucv/af_iucv.c 	struct iucv_sock *iucv = iucv_sk(sk);
sk                290 net/iucv/af_iucv.c 	if (sk->sk_state != IUCV_CONNECTED)
sk                302 net/iucv/af_iucv.c static void iucv_sock_wake_msglim(struct sock *sk)
sk                307 net/iucv/af_iucv.c 	wq = rcu_dereference(sk->sk_wq);
sk                310 net/iucv/af_iucv.c 	sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
sk                397 net/iucv/af_iucv.c 	struct sock *sk;
sk                399 net/iucv/af_iucv.c 	sk_for_each(sk, &iucv_sk_list.head)
sk                400 net/iucv/af_iucv.c 		if (!memcmp(&iucv_sk(sk)->src_name, nm, 8))
sk                401 net/iucv/af_iucv.c 			return sk;
sk                406 net/iucv/af_iucv.c static void iucv_sock_destruct(struct sock *sk)
sk                408 net/iucv/af_iucv.c 	skb_queue_purge(&sk->sk_receive_queue);
sk                409 net/iucv/af_iucv.c 	skb_queue_purge(&sk->sk_error_queue);
sk                411 net/iucv/af_iucv.c 	sk_mem_reclaim(sk);
sk                413 net/iucv/af_iucv.c 	if (!sock_flag(sk, SOCK_DEAD)) {
sk                414 net/iucv/af_iucv.c 		pr_err("Attempt to release alive iucv socket %p\n", sk);
sk                418 net/iucv/af_iucv.c 	WARN_ON(atomic_read(&sk->sk_rmem_alloc));
sk                419 net/iucv/af_iucv.c 	WARN_ON(refcount_read(&sk->sk_wmem_alloc));
sk                420 net/iucv/af_iucv.c 	WARN_ON(sk->sk_wmem_queued);
sk                421 net/iucv/af_iucv.c 	WARN_ON(sk->sk_forward_alloc);
sk                427 net/iucv/af_iucv.c 	struct sock *sk;
sk                430 net/iucv/af_iucv.c 	while ((sk = iucv_accept_dequeue(parent, NULL))) {
sk                431 net/iucv/af_iucv.c 		iucv_sock_close(sk);
sk                432 net/iucv/af_iucv.c 		iucv_sock_kill(sk);
sk                439 net/iucv/af_iucv.c static void iucv_sock_kill(struct sock *sk)
sk                441 net/iucv/af_iucv.c 	if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
sk                444 net/iucv/af_iucv.c 	iucv_sock_unlink(&iucv_sk_list, sk);
sk                445 net/iucv/af_iucv.c 	sock_set_flag(sk, SOCK_DEAD);
sk                446 net/iucv/af_iucv.c 	sock_put(sk);
sk                450 net/iucv/af_iucv.c static void iucv_sever_path(struct sock *sk, int with_user_data)
sk                453 net/iucv/af_iucv.c 	struct iucv_sock *iucv = iucv_sk(sk);
sk                470 net/iucv/af_iucv.c static int iucv_send_ctrl(struct sock *sk, u8 flags)
sk                472 net/iucv/af_iucv.c 	struct iucv_sock *iucv = iucv_sk(sk);
sk                480 net/iucv/af_iucv.c 	if (sk->sk_shutdown & SEND_SHUTDOWN) {
sk                482 net/iucv/af_iucv.c 		shutdown = sk->sk_shutdown;
sk                483 net/iucv/af_iucv.c 		sk->sk_shutdown &= RCV_SHUTDOWN;
sk                485 net/iucv/af_iucv.c 	skb = sock_alloc_send_skb(sk, blen, 1, &err);
sk                488 net/iucv/af_iucv.c 		err = afiucv_hs_send(NULL, sk, skb, flags);
sk                491 net/iucv/af_iucv.c 		sk->sk_shutdown = shutdown;
sk                496 net/iucv/af_iucv.c static void iucv_sock_close(struct sock *sk)
sk                498 net/iucv/af_iucv.c 	struct iucv_sock *iucv = iucv_sk(sk);
sk                502 net/iucv/af_iucv.c 	lock_sock(sk);
sk                504 net/iucv/af_iucv.c 	switch (sk->sk_state) {
sk                506 net/iucv/af_iucv.c 		iucv_sock_cleanup_listen(sk);
sk                511 net/iucv/af_iucv.c 			err = iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN);
sk                512 net/iucv/af_iucv.c 			sk->sk_state = IUCV_DISCONN;
sk                513 net/iucv/af_iucv.c 			sk->sk_state_change(sk);
sk                518 net/iucv/af_iucv.c 		sk->sk_state = IUCV_CLOSING;
sk                519 net/iucv/af_iucv.c 		sk->sk_state_change(sk);
sk                522 net/iucv/af_iucv.c 			if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
sk                523 net/iucv/af_iucv.c 				timeo = sk->sk_lingertime;
sk                526 net/iucv/af_iucv.c 			iucv_sock_wait(sk,
sk                527 net/iucv/af_iucv.c 					iucv_sock_in_state(sk, IUCV_CLOSED, 0),
sk                533 net/iucv/af_iucv.c 		sk->sk_state = IUCV_CLOSED;
sk                534 net/iucv/af_iucv.c 		sk->sk_state_change(sk);
sk                536 net/iucv/af_iucv.c 		sk->sk_err = ECONNRESET;
sk                537 net/iucv/af_iucv.c 		sk->sk_state_change(sk);
sk                544 net/iucv/af_iucv.c 		iucv_sever_path(sk, 1);
sk                550 net/iucv/af_iucv.c 		sk->sk_bound_dev_if = 0;
sk                554 net/iucv/af_iucv.c 	sock_set_flag(sk, SOCK_ZAPPED);
sk                556 net/iucv/af_iucv.c 	release_sock(sk);
sk                559 net/iucv/af_iucv.c static void iucv_sock_init(struct sock *sk, struct sock *parent)
sk                562 net/iucv/af_iucv.c 		sk->sk_type = parent->sk_type;
sk                563 net/iucv/af_iucv.c 		security_sk_clone(parent, sk);
sk                569 net/iucv/af_iucv.c 	struct sock *sk;
sk                572 net/iucv/af_iucv.c 	sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto, kern);
sk                573 net/iucv/af_iucv.c 	if (!sk)
sk                575 net/iucv/af_iucv.c 	iucv = iucv_sk(sk);
sk                577 net/iucv/af_iucv.c 	sock_init_data(sock, sk);
sk                598 net/iucv/af_iucv.c 	sk->sk_destruct = iucv_sock_destruct;
sk                599 net/iucv/af_iucv.c 	sk->sk_sndtimeo = IUCV_CONN_TIMEOUT;
sk                601 net/iucv/af_iucv.c 	sock_reset_flag(sk, SOCK_ZAPPED);
sk                603 net/iucv/af_iucv.c 	sk->sk_protocol = proto;
sk                604 net/iucv/af_iucv.c 	sk->sk_state	= IUCV_OPEN;
sk                606 net/iucv/af_iucv.c 	iucv_sock_link(&iucv_sk_list, sk);
sk                607 net/iucv/af_iucv.c 	return sk;
sk                614 net/iucv/af_iucv.c 	struct sock *sk;
sk                633 net/iucv/af_iucv.c 	sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL, kern);
sk                634 net/iucv/af_iucv.c 	if (!sk)
sk                637 net/iucv/af_iucv.c 	iucv_sock_init(sk, NULL);
sk                642 net/iucv/af_iucv.c void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk)
sk                645 net/iucv/af_iucv.c 	sk_add_node(sk, &l->head);
sk                649 net/iucv/af_iucv.c void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk)
sk                652 net/iucv/af_iucv.c 	sk_del_node_init(sk);
sk                656 net/iucv/af_iucv.c void iucv_accept_enqueue(struct sock *parent, struct sock *sk)
sk                661 net/iucv/af_iucv.c 	sock_hold(sk);
sk                663 net/iucv/af_iucv.c 	list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q);
sk                665 net/iucv/af_iucv.c 	iucv_sk(sk)->parent = parent;
sk                669 net/iucv/af_iucv.c void iucv_accept_unlink(struct sock *sk)
sk                672 net/iucv/af_iucv.c 	struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent);
sk                675 net/iucv/af_iucv.c 	list_del_init(&iucv_sk(sk)->accept_q);
sk                677 net/iucv/af_iucv.c 	sk_acceptq_removed(iucv_sk(sk)->parent);
sk                678 net/iucv/af_iucv.c 	iucv_sk(sk)->parent = NULL;
sk                679 net/iucv/af_iucv.c 	sock_put(sk);
sk                685 net/iucv/af_iucv.c 	struct sock *sk;
sk                688 net/iucv/af_iucv.c 		sk = (struct sock *) isk;
sk                689 net/iucv/af_iucv.c 		lock_sock(sk);
sk                691 net/iucv/af_iucv.c 		if (sk->sk_state == IUCV_CLOSED) {
sk                692 net/iucv/af_iucv.c 			iucv_accept_unlink(sk);
sk                693 net/iucv/af_iucv.c 			release_sock(sk);
sk                697 net/iucv/af_iucv.c 		if (sk->sk_state == IUCV_CONNECTED ||
sk                698 net/iucv/af_iucv.c 		    sk->sk_state == IUCV_DISCONN ||
sk                700 net/iucv/af_iucv.c 			iucv_accept_unlink(sk);
sk                702 net/iucv/af_iucv.c 				sock_graft(sk, newsock);
sk                704 net/iucv/af_iucv.c 			release_sock(sk);
sk                705 net/iucv/af_iucv.c 			return sk;
sk                708 net/iucv/af_iucv.c 		release_sock(sk);
sk                730 net/iucv/af_iucv.c 	struct sock *sk = sock->sk;
sk                741 net/iucv/af_iucv.c 	lock_sock(sk);
sk                742 net/iucv/af_iucv.c 	if (sk->sk_state != IUCV_OPEN) {
sk                749 net/iucv/af_iucv.c 	iucv = iucv_sk(sk);
sk                774 net/iucv/af_iucv.c 			sk->sk_bound_dev_if = dev->ifindex;
sk                777 net/iucv/af_iucv.c 			sk->sk_state = IUCV_BOUND;
sk                791 net/iucv/af_iucv.c 		sk->sk_state = IUCV_BOUND;
sk                793 net/iucv/af_iucv.c 		sk->sk_allocation |= GFP_DMA;
sk                804 net/iucv/af_iucv.c 	release_sock(sk);
sk                809 net/iucv/af_iucv.c static int iucv_sock_autobind(struct sock *sk)
sk                811 net/iucv/af_iucv.c 	struct iucv_sock *iucv = iucv_sk(sk);
sk                819 net/iucv/af_iucv.c 	sk->sk_allocation |= GFP_DMA;
sk                834 net/iucv/af_iucv.c 	struct sock *sk = sock->sk;
sk                835 net/iucv/af_iucv.c 	struct iucv_sock *iucv = iucv_sk(sk);
sk                852 net/iucv/af_iucv.c 				    sk);
sk                881 net/iucv/af_iucv.c 	struct sock *sk = sock->sk;
sk                882 net/iucv/af_iucv.c 	struct iucv_sock *iucv = iucv_sk(sk);
sk                888 net/iucv/af_iucv.c 	if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
sk                891 net/iucv/af_iucv.c 	if (sk->sk_state == IUCV_OPEN &&
sk                895 net/iucv/af_iucv.c 	if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_SEQPACKET)
sk                898 net/iucv/af_iucv.c 	if (sk->sk_state == IUCV_OPEN) {
sk                899 net/iucv/af_iucv.c 		err = iucv_sock_autobind(sk);
sk                904 net/iucv/af_iucv.c 	lock_sock(sk);
sk                911 net/iucv/af_iucv.c 		err = iucv_send_ctrl(sock->sk, AF_IUCV_FLAG_SYN);
sk                917 net/iucv/af_iucv.c 	if (sk->sk_state != IUCV_CONNECTED)
sk                918 net/iucv/af_iucv.c 		err = iucv_sock_wait(sk, iucv_sock_in_state(sk, IUCV_CONNECTED,
sk                920 net/iucv/af_iucv.c 				     sock_sndtimeo(sk, flags & O_NONBLOCK));
sk                922 net/iucv/af_iucv.c 	if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_CLOSED)
sk                926 net/iucv/af_iucv.c 		iucv_sever_path(sk, 0);
sk                929 net/iucv/af_iucv.c 	release_sock(sk);
sk                936 net/iucv/af_iucv.c 	struct sock *sk = sock->sk;
sk                939 net/iucv/af_iucv.c 	lock_sock(sk);
sk                942 net/iucv/af_iucv.c 	if (sk->sk_state != IUCV_BOUND)
sk                948 net/iucv/af_iucv.c 	sk->sk_max_ack_backlog = backlog;
sk                949 net/iucv/af_iucv.c 	sk->sk_ack_backlog = 0;
sk                950 net/iucv/af_iucv.c 	sk->sk_state = IUCV_LISTEN;
sk                954 net/iucv/af_iucv.c 	release_sock(sk);
sk                963 net/iucv/af_iucv.c 	struct sock *sk = sock->sk, *nsk;
sk                967 net/iucv/af_iucv.c 	lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
sk                969 net/iucv/af_iucv.c 	if (sk->sk_state != IUCV_LISTEN) {
sk                974 net/iucv/af_iucv.c 	timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
sk                977 net/iucv/af_iucv.c 	add_wait_queue_exclusive(sk_sleep(sk), &wait);
sk                978 net/iucv/af_iucv.c 	while (!(nsk = iucv_accept_dequeue(sk, newsock))) {
sk                985 net/iucv/af_iucv.c 		release_sock(sk);
sk                987 net/iucv/af_iucv.c 		lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
sk                989 net/iucv/af_iucv.c 		if (sk->sk_state != IUCV_LISTEN) {
sk               1001 net/iucv/af_iucv.c 	remove_wait_queue(sk_sleep(sk), &wait);
sk               1009 net/iucv/af_iucv.c 	release_sock(sk);
sk               1017 net/iucv/af_iucv.c 	struct sock *sk = sock->sk;
sk               1018 net/iucv/af_iucv.c 	struct iucv_sock *iucv = iucv_sk(sk);
sk               1063 net/iucv/af_iucv.c 	struct sock *sk = sock->sk;
sk               1064 net/iucv/af_iucv.c 	struct iucv_sock *iucv = iucv_sk(sk);
sk               1077 net/iucv/af_iucv.c 	err = sock_error(sk);
sk               1085 net/iucv/af_iucv.c 	if (sk->sk_type == SOCK_SEQPACKET && !(msg->msg_flags & MSG_EOR))
sk               1088 net/iucv/af_iucv.c 	lock_sock(sk);
sk               1090 net/iucv/af_iucv.c 	if (sk->sk_shutdown & SEND_SHUTDOWN) {
sk               1096 net/iucv/af_iucv.c 	if (sk->sk_state != IUCV_CONNECTED) {
sk               1160 net/iucv/af_iucv.c 	skb = sock_alloc_send_pskb(sk, headroom + linear, len - linear,
sk               1174 net/iucv/af_iucv.c 	timeo = sock_sndtimeo(sk, noblock);
sk               1175 net/iucv/af_iucv.c 	err = iucv_sock_wait(sk, iucv_below_msglim(sk), timeo);
sk               1180 net/iucv/af_iucv.c 	if (sk->sk_state != IUCV_CONNECTED) {
sk               1191 net/iucv/af_iucv.c 		err = afiucv_hs_send(&txmsg, sk, skb, 0);
sk               1257 net/iucv/af_iucv.c 	release_sock(sk);
sk               1263 net/iucv/af_iucv.c 	release_sock(sk);
sk               1299 net/iucv/af_iucv.c static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
sk               1348 net/iucv/af_iucv.c 	if (sk_filter(sk, skb)) {
sk               1349 net/iucv/af_iucv.c 		atomic_inc(&sk->sk_drops);	/* skb rejected by filter */
sk               1353 net/iucv/af_iucv.c 	if (__sock_queue_rcv_skb(sk, skb))	/* handle rcv queue full */
sk               1354 net/iucv/af_iucv.c 		skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb);
sk               1361 net/iucv/af_iucv.c static void iucv_process_message_q(struct sock *sk)
sk               1363 net/iucv/af_iucv.c 	struct iucv_sock *iucv = iucv_sk(sk);
sk               1371 net/iucv/af_iucv.c 		iucv_process_message(sk, skb, p->path, &p->msg);
sk               1383 net/iucv/af_iucv.c 	struct sock *sk = sock->sk;
sk               1384 net/iucv/af_iucv.c 	struct iucv_sock *iucv = iucv_sk(sk);
sk               1390 net/iucv/af_iucv.c 	if ((sk->sk_state == IUCV_DISCONN) &&
sk               1392 net/iucv/af_iucv.c 	    skb_queue_empty(&sk->sk_receive_queue) &&
sk               1401 net/iucv/af_iucv.c 	skb = skb_recv_datagram(sk, flags, noblock, &err);
sk               1403 net/iucv/af_iucv.c 		if (sk->sk_shutdown & RCV_SHUTDOWN)
sk               1412 net/iucv/af_iucv.c 		sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN;
sk               1417 net/iucv/af_iucv.c 			skb_queue_head(&sk->sk_receive_queue, skb);
sk               1422 net/iucv/af_iucv.c 	if (sk->sk_type == SOCK_SEQPACKET) {
sk               1437 net/iucv/af_iucv.c 			skb_queue_head(&sk->sk_receive_queue, skb);
sk               1445 net/iucv/af_iucv.c 		if (sk->sk_type == SOCK_STREAM) {
sk               1448 net/iucv/af_iucv.c 				skb_queue_head(&sk->sk_receive_queue, skb);
sk               1458 net/iucv/af_iucv.c 				iucv_sock_close(sk);
sk               1468 net/iucv/af_iucv.c 			if (__sock_queue_rcv_skb(sk, rskb)) {
sk               1478 net/iucv/af_iucv.c 				iucv_process_message_q(sk);
sk               1481 net/iucv/af_iucv.c 				err = iucv_send_ctrl(sk, AF_IUCV_FLAG_WIN);
sk               1483 net/iucv/af_iucv.c 					sk->sk_state = IUCV_DISCONN;
sk               1484 net/iucv/af_iucv.c 					sk->sk_state_change(sk);
sk               1493 net/iucv/af_iucv.c 	if (sk->sk_type == SOCK_SEQPACKET && (flags & MSG_TRUNC))
sk               1502 net/iucv/af_iucv.c 	struct sock *sk;
sk               1505 net/iucv/af_iucv.c 		sk = (struct sock *) isk;
sk               1507 net/iucv/af_iucv.c 		if (sk->sk_state == IUCV_CONNECTED)
sk               1517 net/iucv/af_iucv.c 	struct sock *sk = sock->sk;
sk               1522 net/iucv/af_iucv.c 	if (sk->sk_state == IUCV_LISTEN)
sk               1523 net/iucv/af_iucv.c 		return iucv_accept_poll(sk);
sk               1525 net/iucv/af_iucv.c 	if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
sk               1527 net/iucv/af_iucv.c 			(sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
sk               1529 net/iucv/af_iucv.c 	if (sk->sk_shutdown & RCV_SHUTDOWN)
sk               1532 net/iucv/af_iucv.c 	if (sk->sk_shutdown == SHUTDOWN_MASK)
sk               1535 net/iucv/af_iucv.c 	if (!skb_queue_empty(&sk->sk_receive_queue) ||
sk               1536 net/iucv/af_iucv.c 	    (sk->sk_shutdown & RCV_SHUTDOWN))
sk               1539 net/iucv/af_iucv.c 	if (sk->sk_state == IUCV_CLOSED)
sk               1542 net/iucv/af_iucv.c 	if (sk->sk_state == IUCV_DISCONN)
sk               1545 net/iucv/af_iucv.c 	if (sock_writeable(sk) && iucv_below_msglim(sk))
sk               1548 net/iucv/af_iucv.c 		sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
sk               1555 net/iucv/af_iucv.c 	struct sock *sk = sock->sk;
sk               1556 net/iucv/af_iucv.c 	struct iucv_sock *iucv = iucv_sk(sk);
sk               1565 net/iucv/af_iucv.c 	lock_sock(sk);
sk               1566 net/iucv/af_iucv.c 	switch (sk->sk_state) {
sk               1597 net/iucv/af_iucv.c 			iucv_send_ctrl(sk, AF_IUCV_FLAG_SHT);
sk               1600 net/iucv/af_iucv.c 	sk->sk_shutdown |= how;
sk               1609 net/iucv/af_iucv.c 		skb_queue_purge(&sk->sk_receive_queue);
sk               1613 net/iucv/af_iucv.c 	sk->sk_state_change(sk);
sk               1616 net/iucv/af_iucv.c 	release_sock(sk);
sk               1622 net/iucv/af_iucv.c 	struct sock *sk = sock->sk;
sk               1625 net/iucv/af_iucv.c 	if (!sk)
sk               1628 net/iucv/af_iucv.c 	iucv_sock_close(sk);
sk               1630 net/iucv/af_iucv.c 	sock_orphan(sk);
sk               1631 net/iucv/af_iucv.c 	iucv_sock_kill(sk);
sk               1639 net/iucv/af_iucv.c 	struct sock *sk = sock->sk;
sk               1640 net/iucv/af_iucv.c 	struct iucv_sock *iucv = iucv_sk(sk);
sk               1655 net/iucv/af_iucv.c 	lock_sock(sk);
sk               1664 net/iucv/af_iucv.c 		switch (sk->sk_state) {
sk               1681 net/iucv/af_iucv.c 	release_sock(sk);
sk               1689 net/iucv/af_iucv.c 	struct sock *sk = sock->sk;
sk               1690 net/iucv/af_iucv.c 	struct iucv_sock *iucv = iucv_sk(sk);
sk               1710 net/iucv/af_iucv.c 		lock_sock(sk);
sk               1713 net/iucv/af_iucv.c 		release_sock(sk);
sk               1716 net/iucv/af_iucv.c 		if (sk->sk_state == IUCV_OPEN)
sk               1742 net/iucv/af_iucv.c 	struct sock *sk, *nsk;
sk               1751 net/iucv/af_iucv.c 	sk = NULL;
sk               1752 net/iucv/af_iucv.c 	sk_for_each(sk, &iucv_sk_list.head)
sk               1753 net/iucv/af_iucv.c 		if (sk->sk_state == IUCV_LISTEN &&
sk               1754 net/iucv/af_iucv.c 		    !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) {
sk               1759 net/iucv/af_iucv.c 			iucv = iucv_sk(sk);
sk               1767 net/iucv/af_iucv.c 	bh_lock_sock(sk);
sk               1773 net/iucv/af_iucv.c 	if (sk->sk_state != IUCV_LISTEN) {
sk               1780 net/iucv/af_iucv.c 	if (sk_acceptq_is_full(sk)) {
sk               1787 net/iucv/af_iucv.c 	nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC, 0);
sk               1795 net/iucv/af_iucv.c 	iucv_sock_init(nsk, sk);
sk               1822 net/iucv/af_iucv.c 	iucv_accept_enqueue(sk, nsk);
sk               1826 net/iucv/af_iucv.c 	sk->sk_data_ready(sk);
sk               1829 net/iucv/af_iucv.c 	bh_unlock_sock(sk);
sk               1835 net/iucv/af_iucv.c 	struct sock *sk = path->private;
sk               1837 net/iucv/af_iucv.c 	sk->sk_state = IUCV_CONNECTED;
sk               1838 net/iucv/af_iucv.c 	sk->sk_state_change(sk);
sk               1843 net/iucv/af_iucv.c 	struct sock *sk = path->private;
sk               1844 net/iucv/af_iucv.c 	struct iucv_sock *iucv = iucv_sk(sk);
sk               1849 net/iucv/af_iucv.c 	if (sk->sk_shutdown & RCV_SHUTDOWN) {
sk               1860 net/iucv/af_iucv.c 	len = atomic_read(&sk->sk_rmem_alloc);
sk               1862 net/iucv/af_iucv.c 	if (len > sk->sk_rcvbuf)
sk               1869 net/iucv/af_iucv.c 	iucv_process_message(sk, skb, path, msg);
sk               1888 net/iucv/af_iucv.c 	struct sock *sk = path->private;
sk               1890 net/iucv/af_iucv.c 	struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q;
sk               1894 net/iucv/af_iucv.c 	bh_lock_sock(sk);
sk               1910 net/iucv/af_iucv.c 		iucv_sock_wake_msglim(sk);
sk               1913 net/iucv/af_iucv.c 	if (sk->sk_state == IUCV_CLOSING) {
sk               1914 net/iucv/af_iucv.c 		if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
sk               1915 net/iucv/af_iucv.c 			sk->sk_state = IUCV_CLOSED;
sk               1916 net/iucv/af_iucv.c 			sk->sk_state_change(sk);
sk               1919 net/iucv/af_iucv.c 	bh_unlock_sock(sk);
sk               1925 net/iucv/af_iucv.c 	struct sock *sk = path->private;
sk               1927 net/iucv/af_iucv.c 	if (sk->sk_state == IUCV_CLOSED)
sk               1930 net/iucv/af_iucv.c 	bh_lock_sock(sk);
sk               1931 net/iucv/af_iucv.c 	iucv_sever_path(sk, 1);
sk               1932 net/iucv/af_iucv.c 	sk->sk_state = IUCV_DISCONN;
sk               1934 net/iucv/af_iucv.c 	sk->sk_state_change(sk);
sk               1935 net/iucv/af_iucv.c 	bh_unlock_sock(sk);
sk               1943 net/iucv/af_iucv.c 	struct sock *sk = path->private;
sk               1945 net/iucv/af_iucv.c 	bh_lock_sock(sk);
sk               1946 net/iucv/af_iucv.c 	if (sk->sk_state != IUCV_CLOSED) {
sk               1947 net/iucv/af_iucv.c 		sk->sk_shutdown |= SEND_SHUTDOWN;
sk               1948 net/iucv/af_iucv.c 		sk->sk_state_change(sk);
sk               1950 net/iucv/af_iucv.c 	bh_unlock_sock(sk);
sk               1977 net/iucv/af_iucv.c static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb)
sk               1984 net/iucv/af_iucv.c 	iucv = iucv_sk(sk);
sk               1993 net/iucv/af_iucv.c 	nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC, 0);
sk               1994 net/iucv/af_iucv.c 	bh_lock_sock(sk);
sk               1995 net/iucv/af_iucv.c 	if ((sk->sk_state != IUCV_LISTEN) ||
sk               1996 net/iucv/af_iucv.c 	    sk_acceptq_is_full(sk) ||
sk               2003 net/iucv/af_iucv.c 		bh_unlock_sock(sk);
sk               2008 net/iucv/af_iucv.c 	iucv_sock_init(nsk, sk);
sk               2019 net/iucv/af_iucv.c 	nsk->sk_bound_dev_if = sk->sk_bound_dev_if;
sk               2028 net/iucv/af_iucv.c 		iucv_accept_enqueue(sk, nsk);
sk               2030 net/iucv/af_iucv.c 		sk->sk_data_ready(sk);
sk               2033 net/iucv/af_iucv.c 	bh_unlock_sock(sk);
sk               2042 net/iucv/af_iucv.c static int afiucv_hs_callback_synack(struct sock *sk, struct sk_buff *skb)
sk               2044 net/iucv/af_iucv.c 	struct iucv_sock *iucv = iucv_sk(sk);
sk               2048 net/iucv/af_iucv.c 	if (sk->sk_state != IUCV_BOUND)
sk               2050 net/iucv/af_iucv.c 	bh_lock_sock(sk);
sk               2052 net/iucv/af_iucv.c 	sk->sk_state = IUCV_CONNECTED;
sk               2053 net/iucv/af_iucv.c 	sk->sk_state_change(sk);
sk               2054 net/iucv/af_iucv.c 	bh_unlock_sock(sk);
sk               2063 net/iucv/af_iucv.c static int afiucv_hs_callback_synfin(struct sock *sk, struct sk_buff *skb)
sk               2065 net/iucv/af_iucv.c 	struct iucv_sock *iucv = iucv_sk(sk);
sk               2069 net/iucv/af_iucv.c 	if (sk->sk_state != IUCV_BOUND)
sk               2071 net/iucv/af_iucv.c 	bh_lock_sock(sk);
sk               2072 net/iucv/af_iucv.c 	sk->sk_state = IUCV_DISCONN;
sk               2073 net/iucv/af_iucv.c 	sk->sk_state_change(sk);
sk               2074 net/iucv/af_iucv.c 	bh_unlock_sock(sk);
sk               2083 net/iucv/af_iucv.c static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb)
sk               2085 net/iucv/af_iucv.c 	struct iucv_sock *iucv = iucv_sk(sk);
sk               2090 net/iucv/af_iucv.c 	bh_lock_sock(sk);
sk               2091 net/iucv/af_iucv.c 	if (sk->sk_state == IUCV_CONNECTED) {
sk               2092 net/iucv/af_iucv.c 		sk->sk_state = IUCV_DISCONN;
sk               2093 net/iucv/af_iucv.c 		sk->sk_state_change(sk);
sk               2095 net/iucv/af_iucv.c 	bh_unlock_sock(sk);
sk               2104 net/iucv/af_iucv.c static int afiucv_hs_callback_win(struct sock *sk, struct sk_buff *skb)
sk               2106 net/iucv/af_iucv.c 	struct iucv_sock *iucv = iucv_sk(sk);
sk               2111 net/iucv/af_iucv.c 	if (sk->sk_state != IUCV_CONNECTED)
sk               2115 net/iucv/af_iucv.c 	iucv_sock_wake_msglim(sk);
sk               2122 net/iucv/af_iucv.c static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb)
sk               2124 net/iucv/af_iucv.c 	struct iucv_sock *iucv = iucv_sk(sk);
sk               2131 net/iucv/af_iucv.c 	if (sk->sk_state != IUCV_CONNECTED) {
sk               2136 net/iucv/af_iucv.c 	if (sk->sk_shutdown & RCV_SHUTDOWN) {
sk               2146 net/iucv/af_iucv.c 	if (sk_filter(sk, skb)) {
sk               2147 net/iucv/af_iucv.c 		atomic_inc(&sk->sk_drops);	/* skb rejected by filter */
sk               2154 net/iucv/af_iucv.c 		if (__sock_queue_rcv_skb(sk, skb))
sk               2158 net/iucv/af_iucv.c 		skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb);
sk               2171 net/iucv/af_iucv.c 	struct sock *sk;
sk               2190 net/iucv/af_iucv.c 	sk = NULL;
sk               2192 net/iucv/af_iucv.c 	sk_for_each(sk, &iucv_sk_list.head) {
sk               2194 net/iucv/af_iucv.c 			if ((!memcmp(&iucv_sk(sk)->src_name,
sk               2196 net/iucv/af_iucv.c 			    (!memcmp(&iucv_sk(sk)->src_user_id,
sk               2198 net/iucv/af_iucv.c 			    (!memcmp(&iucv_sk(sk)->dst_name, nullstring, 8)) &&
sk               2199 net/iucv/af_iucv.c 			    (!memcmp(&iucv_sk(sk)->dst_user_id,
sk               2201 net/iucv/af_iucv.c 				iucv = iucv_sk(sk);
sk               2205 net/iucv/af_iucv.c 			if ((!memcmp(&iucv_sk(sk)->src_name,
sk               2207 net/iucv/af_iucv.c 			    (!memcmp(&iucv_sk(sk)->src_user_id,
sk               2209 net/iucv/af_iucv.c 			    (!memcmp(&iucv_sk(sk)->dst_name,
sk               2211 net/iucv/af_iucv.c 			    (!memcmp(&iucv_sk(sk)->dst_user_id,
sk               2213 net/iucv/af_iucv.c 				iucv = iucv_sk(sk);
sk               2220 net/iucv/af_iucv.c 		sk = NULL;
sk               2234 net/iucv/af_iucv.c 		err = afiucv_hs_callback_syn(sk, skb);
sk               2238 net/iucv/af_iucv.c 		err = afiucv_hs_callback_synack(sk, skb);
sk               2242 net/iucv/af_iucv.c 		err = afiucv_hs_callback_synfin(sk, skb);
sk               2246 net/iucv/af_iucv.c 		err = afiucv_hs_callback_fin(sk, skb);
sk               2249 net/iucv/af_iucv.c 		err = afiucv_hs_callback_win(sk, skb);
sk               2261 net/iucv/af_iucv.c 		err = afiucv_hs_callback_rx(sk, skb);
sk               2277 net/iucv/af_iucv.c 	struct sock *isk = skb->sk;
sk               2278 net/iucv/af_iucv.c 	struct sock *sk = NULL;
sk               2286 net/iucv/af_iucv.c 	sk_for_each(sk, &iucv_sk_list.head)
sk               2287 net/iucv/af_iucv.c 		if (sk == isk) {
sk               2288 net/iucv/af_iucv.c 			iucv = iucv_sk(sk);
sk               2293 net/iucv/af_iucv.c 	if (!iucv || sock_flag(sk, SOCK_ZAPPED))
sk               2304 net/iucv/af_iucv.c 				iucv_sock_wake_msglim(sk);
sk               2313 net/iucv/af_iucv.c 					iucv_sock_wake_msglim(sk);
sk               2323 net/iucv/af_iucv.c 				if (sk->sk_state == IUCV_CONNECTED) {
sk               2324 net/iucv/af_iucv.c 					sk->sk_state = IUCV_DISCONN;
sk               2325 net/iucv/af_iucv.c 					sk->sk_state_change(sk);
sk               2334 net/iucv/af_iucv.c 	if (sk->sk_state == IUCV_CLOSING) {
sk               2335 net/iucv/af_iucv.c 		if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
sk               2336 net/iucv/af_iucv.c 			sk->sk_state = IUCV_CLOSED;
sk               2337 net/iucv/af_iucv.c 			sk->sk_state_change(sk);
sk               2350 net/iucv/af_iucv.c 	struct sock *sk;
sk               2356 net/iucv/af_iucv.c 		sk_for_each(sk, &iucv_sk_list.head) {
sk               2357 net/iucv/af_iucv.c 			iucv = iucv_sk(sk);
sk               2359 net/iucv/af_iucv.c 			    (sk->sk_state == IUCV_CONNECTED)) {
sk               2361 net/iucv/af_iucv.c 					iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN);
sk               2362 net/iucv/af_iucv.c 				sk->sk_state = IUCV_DISCONN;
sk               2363 net/iucv/af_iucv.c 				sk->sk_state_change(sk);
sk                119 net/kcm/kcmproc.c 		   kcm->sk.sk_receive_queue.qlen,
sk                120 net/kcm/kcmproc.c 		   sk_rmem_alloc_get(&kcm->sk),
sk                121 net/kcm/kcmproc.c 		   kcm->sk.sk_write_queue.qlen,
sk                149 net/kcm/kcmproc.c 		   psock->sk->sk_receive_queue.qlen,
sk                150 net/kcm/kcmproc.c 		   atomic_read(&psock->sk->sk_rmem_alloc),
sk                151 net/kcm/kcmproc.c 		   psock->sk->sk_write_queue.qlen,
sk                152 net/kcm/kcmproc.c 		   refcount_read(&psock->sk->sk_wmem_alloc));
sk                167 net/kcm/kcmproc.c 		if (psock->sk->sk_receive_queue.qlen) {
sk                 37 net/kcm/kcmsock.c static inline struct kcm_sock *kcm_sk(const struct sock *sk)
sk                 39 net/kcm/kcmsock.c 	return (struct kcm_sock *)sk;
sk                 56 net/kcm/kcmsock.c 	struct sock *csk = psock->sk;
sk                115 net/kcm/kcmsock.c static int kcm_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
sk                131 net/kcm/kcmsock.c 		if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
sk                134 net/kcm/kcmsock.c 			WARN_ON(!sk_rmem_alloc_get(&kcm->sk));
sk                143 net/kcm/kcmsock.c 		if (kcm_queue_rcv_skb(&kcm->sk, psock->ready_rx_msg)) {
sk                145 net/kcm/kcmsock.c 			WARN_ON(!sk_rmem_alloc_get(&kcm->sk));
sk                169 net/kcm/kcmsock.c 	struct sock *sk = skb->sk;
sk                170 net/kcm/kcmsock.c 	struct kcm_sock *kcm = kcm_sk(sk);
sk                174 net/kcm/kcmsock.c 	sk_mem_uncharge(sk, len);
sk                175 net/kcm/kcmsock.c 	atomic_sub(len, &sk->sk_rmem_alloc);
sk                181 net/kcm/kcmsock.c 	    sk_rmem_alloc_get(sk) < sk->sk_rcvlowat) {
sk                188 net/kcm/kcmsock.c static int kcm_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
sk                190 net/kcm/kcmsock.c 	struct sk_buff_head *list = &sk->sk_receive_queue;
sk                192 net/kcm/kcmsock.c 	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
sk                195 net/kcm/kcmsock.c 	if (!sk_rmem_schedule(sk, skb, skb->truesize))
sk                201 net/kcm/kcmsock.c 	skb->sk = sk;
sk                203 net/kcm/kcmsock.c 	atomic_add(skb->truesize, &sk->sk_rmem_alloc);
sk                204 net/kcm/kcmsock.c 	sk_mem_charge(sk, skb->truesize);
sk                208 net/kcm/kcmsock.c 	if (!sock_flag(sk, SOCK_DEAD))
sk                209 net/kcm/kcmsock.c 		sk->sk_data_ready(sk);
sk                236 net/kcm/kcmsock.c 		if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
sk                331 net/kcm/kcmsock.c 		requeue_rx_msgs(mux, &kcm->sk.sk_receive_queue);
sk                332 net/kcm/kcmsock.c 	} else if (rcv_ready || unlikely(!sk_rmem_alloc_get(&kcm->sk))) {
sk                342 net/kcm/kcmsock.c static void psock_data_ready(struct sock *sk)
sk                346 net/kcm/kcmsock.c 	read_lock_bh(&sk->sk_callback_lock);
sk                348 net/kcm/kcmsock.c 	psock = (struct kcm_psock *)sk->sk_user_data;
sk                352 net/kcm/kcmsock.c 	read_unlock_bh(&sk->sk_callback_lock);
sk                370 net/kcm/kcmsock.c 	if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
sk                398 net/kcm/kcmsock.c static void psock_state_change(struct sock *sk)
sk                405 net/kcm/kcmsock.c 	report_csk_error(sk, EPIPE);
sk                408 net/kcm/kcmsock.c static void psock_write_space(struct sock *sk)
sk                414 net/kcm/kcmsock.c 	read_lock_bh(&sk->sk_callback_lock);
sk                416 net/kcm/kcmsock.c 	psock = (struct kcm_psock *)sk->sk_user_data;
sk                430 net/kcm/kcmsock.c 	read_unlock_bh(&sk->sk_callback_lock);
sk                546 net/kcm/kcmsock.c 			sock_put(psock->sk);
sk                547 net/kcm/kcmsock.c 			fput(psock->sk->sk_socket->file);
sk                577 net/kcm/kcmsock.c 	struct sock *sk = &kcm->sk;
sk                593 net/kcm/kcmsock.c 		if (skb_queue_empty(&sk->sk_write_queue))
sk                596 net/kcm/kcmsock.c 		kcm_tx_msg(skb_peek(&sk->sk_write_queue))->sent = 0;
sk                598 net/kcm/kcmsock.c 	} else if (skb_queue_empty(&sk->sk_write_queue)) {
sk                602 net/kcm/kcmsock.c 	head = skb_peek(&sk->sk_write_queue);
sk                647 net/kcm/kcmsock.c 			ret = kernel_sendpage(psock->sk->sk_socket,
sk                702 net/kcm/kcmsock.c 		skb_dequeue(&sk->sk_write_queue);
sk                704 net/kcm/kcmsock.c 		sk->sk_wmem_queued -= sent;
sk                707 net/kcm/kcmsock.c 	} while ((head = skb_peek(&sk->sk_write_queue)));
sk                711 net/kcm/kcmsock.c 		WARN_ON(!skb_queue_empty(&sk->sk_write_queue));
sk                716 net/kcm/kcmsock.c 	sk->sk_write_space(sk);
sk                724 net/kcm/kcmsock.c 	struct sock *sk = &kcm->sk;
sk                727 net/kcm/kcmsock.c 	lock_sock(sk);
sk                736 net/kcm/kcmsock.c 		report_csk_error(&kcm->sk, -err);
sk                741 net/kcm/kcmsock.c 	if (likely(sk->sk_socket) &&
sk                742 net/kcm/kcmsock.c 	    test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
sk                743 net/kcm/kcmsock.c 		clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
sk                744 net/kcm/kcmsock.c 		sk->sk_write_space(sk);
sk                748 net/kcm/kcmsock.c 	release_sock(sk);
sk                761 net/kcm/kcmsock.c 	struct sock *sk = sock->sk;
sk                762 net/kcm/kcmsock.c 	struct kcm_sock *kcm = kcm_sk(sk);
sk                764 net/kcm/kcmsock.c 	long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
sk                775 net/kcm/kcmsock.c 	lock_sock(sk);
sk                777 net/kcm/kcmsock.c 	sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
sk                780 net/kcm/kcmsock.c 	if (sk->sk_err)
sk                798 net/kcm/kcmsock.c 			tskb = alloc_skb(0, sk->sk_allocation);
sk                801 net/kcm/kcmsock.c 				err = sk_stream_wait_memory(sk, &timeo);
sk                817 net/kcm/kcmsock.c 		if (!sk_stream_memory_free(sk)) {
sk                819 net/kcm/kcmsock.c 			set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
sk                820 net/kcm/kcmsock.c 			err = sk_stream_wait_memory(sk, &timeo);
sk                825 net/kcm/kcmsock.c 		head = alloc_skb(0, sk->sk_allocation);
sk                828 net/kcm/kcmsock.c 			err = sk_stream_wait_memory(sk, &timeo);
sk                845 net/kcm/kcmsock.c 	sk->sk_wmem_queued += size;
sk                846 net/kcm/kcmsock.c 	sk_mem_charge(sk, size);
sk                855 net/kcm/kcmsock.c 		bool not_busy = skb_queue_empty(&sk->sk_write_queue);
sk                858 net/kcm/kcmsock.c 		__skb_queue_tail(&sk->sk_write_queue, head);
sk                873 net/kcm/kcmsock.c 				report_csk_error(&kcm->sk, -err);
sk                884 net/kcm/kcmsock.c 	release_sock(sk);
sk                890 net/kcm/kcmsock.c 	err = sk_stream_error(sk, flags, err);
sk                893 net/kcm/kcmsock.c 	if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && err == -EAGAIN))
sk                894 net/kcm/kcmsock.c 		sk->sk_write_space(sk);
sk                896 net/kcm/kcmsock.c 	release_sock(sk);
sk                902 net/kcm/kcmsock.c 	struct sock *sk = sock->sk;
sk                903 net/kcm/kcmsock.c 	struct kcm_sock *kcm = kcm_sk(sk);
sk                906 net/kcm/kcmsock.c 	long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
sk                911 net/kcm/kcmsock.c 	lock_sock(sk);
sk                914 net/kcm/kcmsock.c 	sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
sk                916 net/kcm/kcmsock.c 	if (sk->sk_err)
sk                927 net/kcm/kcmsock.c 	if (!sk_stream_memory_free(sk)) {
sk                929 net/kcm/kcmsock.c 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
sk                930 net/kcm/kcmsock.c 		err = sk_stream_wait_memory(sk, &timeo);
sk                937 net/kcm/kcmsock.c 		head = alloc_skb(0, sk->sk_allocation);
sk                940 net/kcm/kcmsock.c 			err = sk_stream_wait_memory(sk, &timeo);
sk                944 net/kcm/kcmsock.c 			head = alloc_skb(0, sk->sk_allocation);
sk                959 net/kcm/kcmsock.c 		struct page_frag *pfrag = sk_page_frag(sk);
sk                961 net/kcm/kcmsock.c 		if (!sk_page_frag_refill(sk, pfrag))
sk                969 net/kcm/kcmsock.c 				tskb = alloc_skb(0, sk->sk_allocation);
sk                988 net/kcm/kcmsock.c 		if (!sk_wmem_schedule(sk, copy))
sk                991 net/kcm/kcmsock.c 		err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb,
sk               1018 net/kcm/kcmsock.c 		err = sk_stream_wait_memory(sk, &timeo);
sk               1024 net/kcm/kcmsock.c 		bool not_busy = skb_queue_empty(&sk->sk_write_queue);
sk               1028 net/kcm/kcmsock.c 			__skb_queue_tail(&sk->sk_write_queue, head);
sk               1044 net/kcm/kcmsock.c 				report_csk_error(&kcm->sk, -err);
sk               1058 net/kcm/kcmsock.c 	release_sock(sk);
sk               1074 net/kcm/kcmsock.c 	err = sk_stream_error(sk, msg->msg_flags, err);
sk               1077 net/kcm/kcmsock.c 	if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && err == -EAGAIN))
sk               1078 net/kcm/kcmsock.c 		sk->sk_write_space(sk);
sk               1080 net/kcm/kcmsock.c 	release_sock(sk);
sk               1084 net/kcm/kcmsock.c static struct sk_buff *kcm_wait_data(struct sock *sk, int flags,
sk               1089 net/kcm/kcmsock.c 	while (!(skb = skb_peek(&sk->sk_receive_queue))) {
sk               1090 net/kcm/kcmsock.c 		if (sk->sk_err) {
sk               1091 net/kcm/kcmsock.c 			*err = sock_error(sk);
sk               1095 net/kcm/kcmsock.c 		if (sock_flag(sk, SOCK_DONE))
sk               1103 net/kcm/kcmsock.c 		sk_wait_data(sk, &timeo, NULL);
sk               1118 net/kcm/kcmsock.c 	struct sock *sk = sock->sk;
sk               1119 net/kcm/kcmsock.c 	struct kcm_sock *kcm = kcm_sk(sk);
sk               1126 net/kcm/kcmsock.c 	timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
sk               1128 net/kcm/kcmsock.c 	lock_sock(sk);
sk               1130 net/kcm/kcmsock.c 	skb = kcm_wait_data(sk, flags, timeo, &err);
sk               1161 net/kcm/kcmsock.c 			skb_unlink(skb, &sk->sk_receive_queue);
sk               1167 net/kcm/kcmsock.c 	release_sock(sk);
sk               1176 net/kcm/kcmsock.c 	struct sock *sk = sock->sk;
sk               1177 net/kcm/kcmsock.c 	struct kcm_sock *kcm = kcm_sk(sk);
sk               1186 net/kcm/kcmsock.c 	timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
sk               1188 net/kcm/kcmsock.c 	lock_sock(sk);
sk               1190 net/kcm/kcmsock.c 	skb = kcm_wait_data(sk, flags, timeo, &err);
sk               1201 net/kcm/kcmsock.c 	copied = skb_splice_bits(skb, sk, stm->offset, pipe, len, flags);
sk               1218 net/kcm/kcmsock.c 	release_sock(sk);
sk               1223 net/kcm/kcmsock.c 	release_sock(sk);
sk               1247 net/kcm/kcmsock.c 		requeue_rx_msgs(mux, &kcm->sk.sk_receive_queue);
sk               1272 net/kcm/kcmsock.c 	struct kcm_sock *kcm = kcm_sk(sock->sk);
sk               1289 net/kcm/kcmsock.c 		lock_sock(&kcm->sk);
sk               1294 net/kcm/kcmsock.c 		release_sock(&kcm->sk);
sk               1306 net/kcm/kcmsock.c 	struct kcm_sock *kcm = kcm_sk(sock->sk);
sk               1344 net/kcm/kcmsock.c 	kcm->sk.sk_state = TCP_ESTABLISHED;
sk               1374 net/kcm/kcmsock.c 	struct kcm_sock *kcm = kcm_sk(sock->sk);
sk               1387 net/kcm/kcmsock.c 	csk = csock->sk;
sk               1413 net/kcm/kcmsock.c 	psock->sk = csk;
sk               1507 net/kcm/kcmsock.c 	struct sock *csk = psock->sk;
sk               1603 net/kcm/kcmsock.c 	struct kcm_sock *kcm = kcm_sk(sock->sk);
sk               1614 net/kcm/kcmsock.c 	csk = csock->sk;
sk               1625 net/kcm/kcmsock.c 		if (psock->sk != csk)
sk               1674 net/kcm/kcmsock.c 	newsk = sk_alloc(sock_net(osock->sk), PF_KCM, GFP_KERNEL,
sk               1681 net/kcm/kcmsock.c 	init_kcm_sock(kcm_sk(newsk), kcm_sk(osock->sk)->mux);
sk               1683 net/kcm/kcmsock.c 	return sock_alloc_file(newsock, 0, osock->sk->sk_prot_creator->name);
sk               1783 net/kcm/kcmsock.c 	struct sock *sk = &kcm->sk;
sk               1801 net/kcm/kcmsock.c 	requeue_rx_msgs(mux, &sk->sk_receive_queue);
sk               1805 net/kcm/kcmsock.c 	if (WARN_ON(sk_rmem_alloc_get(sk)))
sk               1824 net/kcm/kcmsock.c 	sock_put(&kcm->sk);
sk               1832 net/kcm/kcmsock.c 	struct sock *sk = sock->sk;
sk               1837 net/kcm/kcmsock.c 	if (!sk)
sk               1840 net/kcm/kcmsock.c 	kcm = kcm_sk(sk);
sk               1843 net/kcm/kcmsock.c 	sock_orphan(sk);
sk               1846 net/kcm/kcmsock.c 	lock_sock(sk);
sk               1851 net/kcm/kcmsock.c 	__skb_queue_purge(&sk->sk_write_queue);
sk               1859 net/kcm/kcmsock.c 	release_sock(sk);
sk               1876 net/kcm/kcmsock.c 	lock_sock(sk);
sk               1886 net/kcm/kcmsock.c 	release_sock(sk);
sk               1891 net/kcm/kcmsock.c 	sock->sk = NULL;
sk               1946 net/kcm/kcmsock.c 	struct sock *sk;
sk               1963 net/kcm/kcmsock.c 	sk = sk_alloc(net, PF_KCM, GFP_KERNEL, &kcm_proto, kern);
sk               1964 net/kcm/kcmsock.c 	if (!sk)
sk               1970 net/kcm/kcmsock.c 		sk_free(sk);
sk               1995 net/kcm/kcmsock.c 	sock_init_data(sock, sk);
sk               1996 net/kcm/kcmsock.c 	init_kcm_sock(kcm_sk(sk), mux);
sk                 47 net/key/af_key.c 	struct sock	sk;
sk                 54 net/key/af_key.c 		int		(*dump)(struct pfkey_sock *sk);
sk                 55 net/key/af_key.c 		void		(*done)(struct pfkey_sock *sk);
sk                 69 net/key/af_key.c static inline struct pfkey_sock *pfkey_sk(struct sock *sk)
sk                 71 net/key/af_key.c 	return (struct pfkey_sock *)sk;
sk                 74 net/key/af_key.c static int pfkey_can_dump(const struct sock *sk)
sk                 76 net/key/af_key.c 	if (3 * atomic_read(&sk->sk_rmem_alloc) <= 2 * sk->sk_rcvbuf)
sk                 94 net/key/af_key.c static void pfkey_sock_destruct(struct sock *sk)
sk                 96 net/key/af_key.c 	struct net *net = sock_net(sk);
sk                 99 net/key/af_key.c 	pfkey_terminate_dump(pfkey_sk(sk));
sk                100 net/key/af_key.c 	skb_queue_purge(&sk->sk_receive_queue);
sk                102 net/key/af_key.c 	if (!sock_flag(sk, SOCK_DEAD)) {
sk                103 net/key/af_key.c 		pr_err("Attempt to release alive pfkey socket: %p\n", sk);
sk                107 net/key/af_key.c 	WARN_ON(atomic_read(&sk->sk_rmem_alloc));
sk                108 net/key/af_key.c 	WARN_ON(refcount_read(&sk->sk_wmem_alloc));
sk                115 net/key/af_key.c static void pfkey_insert(struct sock *sk)
sk                117 net/key/af_key.c 	struct net *net = sock_net(sk);
sk                121 net/key/af_key.c 	sk_add_node_rcu(sk, &net_pfkey->table);
sk                125 net/key/af_key.c static void pfkey_remove(struct sock *sk)
sk                128 net/key/af_key.c 	sk_del_node_init_rcu(sk);
sk                142 net/key/af_key.c 	struct sock *sk;
sk                154 net/key/af_key.c 	sk = sk_alloc(net, PF_KEY, GFP_KERNEL, &key_proto, kern);
sk                155 net/key/af_key.c 	if (sk == NULL)
sk                158 net/key/af_key.c 	pfk = pfkey_sk(sk);
sk                162 net/key/af_key.c 	sock_init_data(sock, sk);
sk                164 net/key/af_key.c 	sk->sk_family = PF_KEY;
sk                165 net/key/af_key.c 	sk->sk_destruct = pfkey_sock_destruct;
sk                169 net/key/af_key.c 	pfkey_insert(sk);
sk                178 net/key/af_key.c 	struct sock *sk = sock->sk;
sk                180 net/key/af_key.c 	if (!sk)
sk                183 net/key/af_key.c 	pfkey_remove(sk);
sk                185 net/key/af_key.c 	sock_orphan(sk);
sk                186 net/key/af_key.c 	sock->sk = NULL;
sk                187 net/key/af_key.c 	skb_queue_purge(&sk->sk_write_queue);
sk                190 net/key/af_key.c 	sock_put(sk);
sk                196 net/key/af_key.c 			       struct sock *sk)
sk                200 net/key/af_key.c 	if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
sk                206 net/key/af_key.c 		skb_set_owner_r(skb, sk);
sk                207 net/key/af_key.c 		skb_queue_tail(&sk->sk_receive_queue, skb);
sk                208 net/key/af_key.c 		sk->sk_data_ready(sk);
sk                224 net/key/af_key.c 	struct sock *sk;
sk                234 net/key/af_key.c 	sk_for_each_rcu(sk, &net_pfkey->table) {
sk                235 net/key/af_key.c 		struct pfkey_sock *pfk = pfkey_sk(sk);
sk                243 net/key/af_key.c 			pfkey_broadcast_one(skb, GFP_ATOMIC, sk);
sk                246 net/key/af_key.c 		if (sk == one_sk)
sk                258 net/key/af_key.c 		err2 = pfkey_broadcast_one(skb, GFP_ATOMIC, sk);
sk                292 net/key/af_key.c 		if (!pfkey_can_dump(&pfk->sk)) {
sk                301 net/key/af_key.c 				&pfk->sk, sock_net(&pfk->sk));
sk                318 net/key/af_key.c static int pfkey_error(const struct sadb_msg *orig, int err, struct sock *sk)
sk                344 net/key/af_key.c 	pfkey_broadcast(skb, GFP_KERNEL, BROADCAST_ONE, sk, sock_net(sk));
sk               1304 net/key/af_key.c static int pfkey_reserved(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
sk               1309 net/key/af_key.c static int pfkey_getspi(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
sk               1311 net/key/af_key.c 	struct net *net = sock_net(sk);
sk               1409 net/key/af_key.c 	pfkey_broadcast(resp_skb, GFP_KERNEL, BROADCAST_ONE, sk, net);
sk               1414 net/key/af_key.c static int pfkey_acquire(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
sk               1416 net/key/af_key.c 	struct net *net = sock_net(sk);
sk               1501 net/key/af_key.c static int pfkey_add(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
sk               1503 net/key/af_key.c 	struct net *net = sock_net(sk);
sk               1538 net/key/af_key.c static int pfkey_delete(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
sk               1540 net/key/af_key.c 	struct net *net = sock_net(sk);
sk               1578 net/key/af_key.c static int pfkey_get(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
sk               1580 net/key/af_key.c 	struct net *net = sock_net(sk);
sk               1609 net/key/af_key.c 	pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, sk, sock_net(sk));
sk               1690 net/key/af_key.c static int pfkey_register(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
sk               1692 net/key/af_key.c 	struct pfkey_sock *pfk = pfkey_sk(sk);
sk               1714 net/key/af_key.c 	pfkey_broadcast(supp_skb, GFP_KERNEL, BROADCAST_REGISTERED, sk,
sk               1715 net/key/af_key.c 			sock_net(sk));
sk               1719 net/key/af_key.c static int unicast_flush_resp(struct sock *sk, const struct sadb_msg *ihdr)
sk               1732 net/key/af_key.c 	return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ONE, sk,
sk               1733 net/key/af_key.c 			       sock_net(sk));
sk               1759 net/key/af_key.c static int pfkey_flush(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
sk               1761 net/key/af_key.c 	struct net *net = sock_net(sk);
sk               1771 net/key/af_key.c 	err2 = unicast_flush_resp(sk, hdr);
sk               1794 net/key/af_key.c 	if (!pfkey_can_dump(&pfk->sk))
sk               1812 net/key/af_key.c 				&pfk->sk, sock_net(&pfk->sk));
sk               1820 net/key/af_key.c 	struct net *net = sock_net(&pfk->sk);
sk               1826 net/key/af_key.c 	struct net *net = sock_net(&pfk->sk);
sk               1831 net/key/af_key.c static int pfkey_dump(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
sk               1835 net/key/af_key.c 	struct pfkey_sock *pfk = pfkey_sk(sk);
sk               1877 net/key/af_key.c static int pfkey_promisc(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
sk               1879 net/key/af_key.c 	struct pfkey_sock *pfk = pfkey_sk(sk);
sk               1899 net/key/af_key.c 	pfkey_broadcast(skb, GFP_KERNEL, BROADCAST_ALL, NULL, sock_net(sk));
sk               2236 net/key/af_key.c static int pfkey_spdadd(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
sk               2238 net/key/af_key.c 	struct net *net = sock_net(sk);
sk               2351 net/key/af_key.c static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
sk               2353 net/key/af_key.c 	struct net *net = sock_net(sk);
sk               2426 net/key/af_key.c static int key_pol_get_resp(struct sock *sk, struct xfrm_policy *xp, const struct sadb_msg *hdr, int dir)
sk               2451 net/key/af_key.c 	pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, sk, xp_net(xp));
sk               2537 net/key/af_key.c static int pfkey_migrate(struct sock *sk, struct sk_buff *skb,
sk               2549 net/key/af_key.c 	struct net *net = sock_net(sk);
sk               2629 net/key/af_key.c static int pfkey_migrate(struct sock *sk, struct sk_buff *skb,
sk               2637 net/key/af_key.c static int pfkey_spdget(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
sk               2639 net/key/af_key.c 	struct net *net = sock_net(sk);
sk               2670 net/key/af_key.c 		err = key_pol_get_resp(sk, xp, hdr, dir);
sk               2685 net/key/af_key.c 	if (!pfkey_can_dump(&pfk->sk))
sk               2708 net/key/af_key.c 				&pfk->sk, sock_net(&pfk->sk));
sk               2716 net/key/af_key.c 	struct net *net = sock_net(&pfk->sk);
sk               2727 net/key/af_key.c static int pfkey_spddump(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
sk               2729 net/key/af_key.c 	struct pfkey_sock *pfk = pfkey_sk(sk);
sk               2769 net/key/af_key.c static int pfkey_spdflush(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
sk               2771 net/key/af_key.c 	struct net *net = sock_net(sk);
sk               2776 net/key/af_key.c 	err2 = unicast_flush_resp(sk, hdr);
sk               2793 net/key/af_key.c typedef int (*pfkey_handler)(struct sock *sk, struct sk_buff *skb,
sk               2821 net/key/af_key.c static int pfkey_process(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr)
sk               2827 net/key/af_key.c 			BROADCAST_PROMISC_ONLY, NULL, sock_net(sk));
sk               2834 net/key/af_key.c 			err = pfkey_funcs[hdr->sadb_msg_type](sk, skb, hdr, ext_hdrs);
sk               3117 net/key/af_key.c 	struct sock *sk;
sk               3121 net/key/af_key.c 	sk_for_each_rcu(sk, &net_pfkey->table) {
sk               3122 net/key/af_key.c 		if (pfkey_sk(sk)->registered) {
sk               3240 net/key/af_key.c static struct xfrm_policy *pfkey_compile_policy(struct sock *sk, int opt,
sk               3243 net/key/af_key.c 	struct net *net = sock_net(sk);
sk               3248 net/key/af_key.c 	switch (sk->sk_family) {
sk               3289 net/key/af_key.c 	xp->family = sk->sk_family;
sk               3645 net/key/af_key.c 	struct sock *sk = sock->sk;
sk               3649 net/key/af_key.c 	struct net *net = sock_net(sk);
sk               3656 net/key/af_key.c 	if ((unsigned int)len > sk->sk_sndbuf - 32)
sk               3673 net/key/af_key.c 	err = pfkey_process(sk, skb, hdr);
sk               3677 net/key/af_key.c 	if (err && hdr && pfkey_error(hdr, err, sk) == 0)
sk               3687 net/key/af_key.c 	struct sock *sk = sock->sk;
sk               3688 net/key/af_key.c 	struct pfkey_sock *pfk = pfkey_sk(sk);
sk               3696 net/key/af_key.c 	skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
sk               3711 net/key/af_key.c 	sock_recv_ts_and_drops(msg, sk, skb);
sk               3716 net/key/af_key.c 	    3 * atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
sk               3720 net/key/af_key.c 	skb_free_datagram(sk, skb);
sk                111 net/l2tp/l2tp_core.c static bool l2tp_sk_is_v6(struct sock *sk)
sk                113 net/l2tp/l2tp_core.c 	return sk->sk_family == PF_INET6 &&
sk                114 net/l2tp/l2tp_core.c 	       !ipv6_addr_v4mapped(&sk->sk_v6_daddr);
sk                118 net/l2tp/l2tp_core.c static inline struct l2tp_tunnel *l2tp_tunnel(struct sock *sk)
sk                120 net/l2tp/l2tp_core.c 	return sk->sk_user_data;
sk                910 net/l2tp/l2tp_core.c int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
sk                914 net/l2tp/l2tp_core.c 	tunnel = rcu_dereference_sk_user_data(sk);
sk               1059 net/l2tp/l2tp_core.c 	struct sock *sk = tunnel->sock;
sk               1088 net/l2tp/l2tp_core.c 	bh_lock_sock(sk);
sk               1089 net/l2tp/l2tp_core.c 	if (sock_owned_by_user(sk)) {
sk               1098 net/l2tp/l2tp_core.c 	if (tunnel->fd >= 0 && sk->sk_state != TCP_ESTABLISHED) {
sk               1106 net/l2tp/l2tp_core.c 	skb_dst_set(skb, sk_dst_check(sk, 0));
sk               1108 net/l2tp/l2tp_core.c 	inet = inet_sk(sk);
sk               1123 net/l2tp/l2tp_core.c 		if (l2tp_sk_is_v6(sk))
sk               1124 net/l2tp/l2tp_core.c 			udp6_set_csum(udp_get_no_check6_tx(sk),
sk               1125 net/l2tp/l2tp_core.c 				      skb, &inet6_sk(sk)->saddr,
sk               1126 net/l2tp/l2tp_core.c 				      &sk->sk_v6_daddr, udp_len);
sk               1129 net/l2tp/l2tp_core.c 		udp_set_csum(sk->sk_no_check_tx, skb, inet->inet_saddr,
sk               1139 net/l2tp/l2tp_core.c 	bh_unlock_sock(sk);
sk               1153 net/l2tp/l2tp_core.c static void l2tp_tunnel_destruct(struct sock *sk)
sk               1155 net/l2tp/l2tp_core.c 	struct l2tp_tunnel *tunnel = l2tp_tunnel(sk);
sk               1166 net/l2tp/l2tp_core.c 		(udp_sk(sk))->encap_type = 0;
sk               1167 net/l2tp/l2tp_core.c 		(udp_sk(sk))->encap_rcv = NULL;
sk               1168 net/l2tp/l2tp_core.c 		(udp_sk(sk))->encap_destroy = NULL;
sk               1175 net/l2tp/l2tp_core.c 	sk->sk_destruct = tunnel->old_sk_destruct;
sk               1176 net/l2tp/l2tp_core.c 	sk->sk_user_data = NULL;
sk               1179 net/l2tp/l2tp_core.c 	if (sk->sk_destruct)
sk               1180 net/l2tp/l2tp_core.c 		(*sk->sk_destruct)(sk);
sk               1240 net/l2tp/l2tp_core.c static void l2tp_udp_encap_destroy(struct sock *sk)
sk               1242 net/l2tp/l2tp_core.c 	struct l2tp_tunnel *tunnel = l2tp_tunnel(sk);
sk               1253 net/l2tp/l2tp_core.c 	struct sock *sk = tunnel->sock;
sk               1254 net/l2tp/l2tp_core.c 	struct socket *sock = sk->sk_socket;
sk               1454 net/l2tp/l2tp_core.c static int l2tp_validate_socket(const struct sock *sk, const struct net *net,
sk               1457 net/l2tp/l2tp_core.c 	if (!net_eq(sock_net(sk), net))
sk               1460 net/l2tp/l2tp_core.c 	if (sk->sk_type != SOCK_DGRAM)
sk               1463 net/l2tp/l2tp_core.c 	if (sk->sk_family != PF_INET && sk->sk_family != PF_INET6)
sk               1466 net/l2tp/l2tp_core.c 	if ((encap == L2TP_ENCAPTYPE_UDP && sk->sk_protocol != IPPROTO_UDP) ||
sk               1467 net/l2tp/l2tp_core.c 	    (encap == L2TP_ENCAPTYPE_IP && sk->sk_protocol != IPPROTO_L2TP))
sk               1470 net/l2tp/l2tp_core.c 	if (sk->sk_user_data)
sk               1482 net/l2tp/l2tp_core.c 	struct sock *sk;
sk               1496 net/l2tp/l2tp_core.c 		ret = l2tp_validate_socket(sock->sk, net, tunnel->encap);
sk               1516 net/l2tp/l2tp_core.c 	sk = sock->sk;
sk               1517 net/l2tp/l2tp_core.c 	sock_hold(sk);
sk               1518 net/l2tp/l2tp_core.c 	tunnel->sock = sk;
sk               1530 net/l2tp/l2tp_core.c 		sk->sk_user_data = tunnel;
sk               1533 net/l2tp/l2tp_core.c 	tunnel->old_sk_destruct = sk->sk_destruct;
sk               1534 net/l2tp/l2tp_core.c 	sk->sk_destruct = &l2tp_tunnel_destruct;
sk               1535 net/l2tp/l2tp_core.c 	lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class,
sk               1537 net/l2tp/l2tp_core.c 	sk->sk_allocation = GFP_ATOMIC;
sk                224 net/l2tp/l2tp_core.h int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb);
sk                233 net/l2tp/l2tp_core.h int l2tp_ioctl(struct sock *sk, int cmd, unsigned long arg);
sk                289 net/l2tp/l2tp_core.h 	struct sock *sk = tunnel->sock;
sk                291 net/l2tp/l2tp_core.h 	return sk && (rcu_access_pointer(sk->sk_policy[0]) ||
sk                292 net/l2tp/l2tp_core.h 		      rcu_access_pointer(sk->sk_policy[1]));
sk                 41 net/l2tp/l2tp_ip.c static inline struct l2tp_ip_sock *l2tp_ip_sk(const struct sock *sk)
sk                 43 net/l2tp/l2tp_ip.c 	return (struct l2tp_ip_sock *)sk;
sk                 49 net/l2tp/l2tp_ip.c 	struct sock *sk;
sk                 51 net/l2tp/l2tp_ip.c 	sk_for_each_bound(sk, &l2tp_ip_bind_table) {
sk                 52 net/l2tp/l2tp_ip.c 		const struct l2tp_ip_sock *l2tp = l2tp_ip_sk(sk);
sk                 53 net/l2tp/l2tp_ip.c 		const struct inet_sock *inet = inet_sk(sk);
sk                 55 net/l2tp/l2tp_ip.c 		if (!net_eq(sock_net(sk), net))
sk                 58 net/l2tp/l2tp_ip.c 		if (sk->sk_bound_dev_if && dif && sk->sk_bound_dev_if != dif)
sk                 74 net/l2tp/l2tp_ip.c 	sk = NULL;
sk                 76 net/l2tp/l2tp_ip.c 	return sk;
sk                115 net/l2tp/l2tp_ip.c 	struct sock *sk;
sk                183 net/l2tp/l2tp_ip.c 	sk = __l2tp_ip_bind_lookup(net, iph->daddr, iph->saddr, inet_iif(skb),
sk                185 net/l2tp/l2tp_ip.c 	if (!sk) {
sk                189 net/l2tp/l2tp_ip.c 	sock_hold(sk);
sk                192 net/l2tp/l2tp_ip.c 	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
sk                197 net/l2tp/l2tp_ip.c 	return sk_receive_skb(sk, skb, 1);
sk                204 net/l2tp/l2tp_ip.c 	sock_put(sk);
sk                211 net/l2tp/l2tp_ip.c static int l2tp_ip_hash(struct sock *sk)
sk                213 net/l2tp/l2tp_ip.c 	if (sk_unhashed(sk)) {
sk                215 net/l2tp/l2tp_ip.c 		sk_add_node(sk, &l2tp_ip_table);
sk                221 net/l2tp/l2tp_ip.c static void l2tp_ip_unhash(struct sock *sk)
sk                223 net/l2tp/l2tp_ip.c 	if (sk_unhashed(sk))
sk                226 net/l2tp/l2tp_ip.c 	sk_del_node_init(sk);
sk                230 net/l2tp/l2tp_ip.c static int l2tp_ip_open(struct sock *sk)
sk                233 net/l2tp/l2tp_ip.c 	inet_sk(sk)->inet_num = IPPROTO_L2TP;
sk                235 net/l2tp/l2tp_ip.c 	l2tp_ip_hash(sk);
sk                239 net/l2tp/l2tp_ip.c static void l2tp_ip_close(struct sock *sk, long timeout)
sk                242 net/l2tp/l2tp_ip.c 	hlist_del_init(&sk->sk_bind_node);
sk                243 net/l2tp/l2tp_ip.c 	sk_del_node_init(sk);
sk                245 net/l2tp/l2tp_ip.c 	sk_common_release(sk);
sk                248 net/l2tp/l2tp_ip.c static void l2tp_ip_destroy_sock(struct sock *sk)
sk                251 net/l2tp/l2tp_ip.c 	struct l2tp_tunnel *tunnel = sk->sk_user_data;
sk                253 net/l2tp/l2tp_ip.c 	while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
sk                260 net/l2tp/l2tp_ip.c static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
sk                262 net/l2tp/l2tp_ip.c 	struct inet_sock *inet = inet_sk(sk);
sk                264 net/l2tp/l2tp_ip.c 	struct net *net = sock_net(sk);
sk                273 net/l2tp/l2tp_ip.c 	lock_sock(sk);
sk                276 net/l2tp/l2tp_ip.c 	if (!sock_flag(sk, SOCK_ZAPPED))
sk                279 net/l2tp/l2tp_ip.c 	if (sk->sk_state != TCP_CLOSE)
sk                295 net/l2tp/l2tp_ip.c 				  sk->sk_bound_dev_if, addr->l2tp_conn_id)) {
sk                301 net/l2tp/l2tp_ip.c 	sk_dst_reset(sk);
sk                302 net/l2tp/l2tp_ip.c 	l2tp_ip_sk(sk)->conn_id = addr->l2tp_conn_id;
sk                304 net/l2tp/l2tp_ip.c 	sk_add_bind_node(sk, &l2tp_ip_bind_table);
sk                305 net/l2tp/l2tp_ip.c 	sk_del_node_init(sk);
sk                309 net/l2tp/l2tp_ip.c 	sock_reset_flag(sk, SOCK_ZAPPED);
sk                312 net/l2tp/l2tp_ip.c 	release_sock(sk);
sk                317 net/l2tp/l2tp_ip.c static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
sk                328 net/l2tp/l2tp_ip.c 	lock_sock(sk);
sk                331 net/l2tp/l2tp_ip.c 	if (sock_flag(sk, SOCK_ZAPPED)) {
sk                336 net/l2tp/l2tp_ip.c 	rc = __ip4_datagram_connect(sk, uaddr, addr_len);
sk                340 net/l2tp/l2tp_ip.c 	l2tp_ip_sk(sk)->peer_conn_id = lsa->l2tp_conn_id;
sk                343 net/l2tp/l2tp_ip.c 	hlist_del_init(&sk->sk_bind_node);
sk                344 net/l2tp/l2tp_ip.c 	sk_add_bind_node(sk, &l2tp_ip_bind_table);
sk                348 net/l2tp/l2tp_ip.c 	release_sock(sk);
sk                353 net/l2tp/l2tp_ip.c static int l2tp_ip_disconnect(struct sock *sk, int flags)
sk                355 net/l2tp/l2tp_ip.c 	if (sock_flag(sk, SOCK_ZAPPED))
sk                358 net/l2tp/l2tp_ip.c 	return __udp_disconnect(sk, flags);
sk                364 net/l2tp/l2tp_ip.c 	struct sock *sk		= sock->sk;
sk                365 net/l2tp/l2tp_ip.c 	struct inet_sock *inet	= inet_sk(sk);
sk                366 net/l2tp/l2tp_ip.c 	struct l2tp_ip_sock *lsk = l2tp_ip_sk(sk);
sk                386 net/l2tp/l2tp_ip.c static int l2tp_ip_backlog_recv(struct sock *sk, struct sk_buff *skb)
sk                391 net/l2tp/l2tp_ip.c 	rc = sock_queue_rcv_skb(sk, skb);
sk                398 net/l2tp/l2tp_ip.c 	IP_INC_STATS(sock_net(sk), IPSTATS_MIB_INDISCARDS);
sk                406 net/l2tp/l2tp_ip.c static int l2tp_ip_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
sk                410 net/l2tp/l2tp_ip.c 	struct inet_sock *inet = inet_sk(sk);
sk                416 net/l2tp/l2tp_ip.c 	lock_sock(sk);
sk                419 net/l2tp/l2tp_ip.c 	if (sock_flag(sk, SOCK_DEAD))
sk                438 net/l2tp/l2tp_ip.c 		if (sk->sk_state != TCP_ESTABLISHED)
sk                447 net/l2tp/l2tp_ip.c 	skb = sock_wmalloc(sk, 2 + NET_SKB_PAD + sizeof(struct iphdr) +
sk                470 net/l2tp/l2tp_ip.c 		rt = (struct rtable *) __sk_dst_check(sk, 0);
sk                486 net/l2tp/l2tp_ip.c 		rt = ip_route_output_ports(sock_net(sk), fl4, sk,
sk                489 net/l2tp/l2tp_ip.c 					   sk->sk_protocol, RT_CONN_FLAGS(sk),
sk                490 net/l2tp/l2tp_ip.c 					   sk->sk_bound_dev_if);
sk                494 net/l2tp/l2tp_ip.c 			sk_setup_caps(sk, &rt->dst);
sk                508 net/l2tp/l2tp_ip.c 	rc = ip_queue_xmit(sk, skb, &inet->cork.fl);
sk                516 net/l2tp/l2tp_ip.c 	release_sock(sk);
sk                521 net/l2tp/l2tp_ip.c 	IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
sk                527 net/l2tp/l2tp_ip.c static int l2tp_ip_recvmsg(struct sock *sk, struct msghdr *msg,
sk                530 net/l2tp/l2tp_ip.c 	struct inet_sock *inet = inet_sk(sk);
sk                539 net/l2tp/l2tp_ip.c 	skb = skb_recv_datagram(sk, flags, noblock, &err);
sk                553 net/l2tp/l2tp_ip.c 	sock_recv_timestamp(msg, sk, skb);
sk                568 net/l2tp/l2tp_ip.c 	skb_free_datagram(sk, skb);
sk                573 net/l2tp/l2tp_ip.c int l2tp_ioctl(struct sock *sk, int cmd, unsigned long arg)
sk                580 net/l2tp/l2tp_ip.c 		amount = sk_wmem_alloc_get(sk);
sk                583 net/l2tp/l2tp_ip.c 		spin_lock_bh(&sk->sk_receive_queue.lock);
sk                584 net/l2tp/l2tp_ip.c 		skb = skb_peek(&sk->sk_receive_queue);
sk                586 net/l2tp/l2tp_ip.c 		spin_unlock_bh(&sk->sk_receive_queue.lock);
sk                 49 net/l2tp/l2tp_ip6.c static inline struct l2tp_ip6_sock *l2tp_ip6_sk(const struct sock *sk)
sk                 51 net/l2tp/l2tp_ip6.c 	return (struct l2tp_ip6_sock *)sk;
sk                 59 net/l2tp/l2tp_ip6.c 	struct sock *sk;
sk                 61 net/l2tp/l2tp_ip6.c 	sk_for_each_bound(sk, &l2tp_ip6_bind_table) {
sk                 62 net/l2tp/l2tp_ip6.c 		const struct in6_addr *sk_laddr = inet6_rcv_saddr(sk);
sk                 63 net/l2tp/l2tp_ip6.c 		const struct in6_addr *sk_raddr = &sk->sk_v6_daddr;
sk                 64 net/l2tp/l2tp_ip6.c 		const struct l2tp_ip6_sock *l2tp = l2tp_ip6_sk(sk);
sk                 66 net/l2tp/l2tp_ip6.c 		if (!net_eq(sock_net(sk), net))
sk                 69 net/l2tp/l2tp_ip6.c 		if (sk->sk_bound_dev_if && dif && sk->sk_bound_dev_if != dif)
sk                 86 net/l2tp/l2tp_ip6.c 	sk = NULL;
sk                 88 net/l2tp/l2tp_ip6.c 	return sk;
sk                127 net/l2tp/l2tp_ip6.c 	struct sock *sk;
sk                195 net/l2tp/l2tp_ip6.c 	sk = __l2tp_ip6_bind_lookup(net, &iph->daddr, &iph->saddr,
sk                197 net/l2tp/l2tp_ip6.c 	if (!sk) {
sk                201 net/l2tp/l2tp_ip6.c 	sock_hold(sk);
sk                204 net/l2tp/l2tp_ip6.c 	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
sk                209 net/l2tp/l2tp_ip6.c 	return sk_receive_skb(sk, skb, 1);
sk                216 net/l2tp/l2tp_ip6.c 	sock_put(sk);
sk                223 net/l2tp/l2tp_ip6.c static int l2tp_ip6_hash(struct sock *sk)
sk                225 net/l2tp/l2tp_ip6.c 	if (sk_unhashed(sk)) {
sk                227 net/l2tp/l2tp_ip6.c 		sk_add_node(sk, &l2tp_ip6_table);
sk                233 net/l2tp/l2tp_ip6.c static void l2tp_ip6_unhash(struct sock *sk)
sk                235 net/l2tp/l2tp_ip6.c 	if (sk_unhashed(sk))
sk                238 net/l2tp/l2tp_ip6.c 	sk_del_node_init(sk);
sk                242 net/l2tp/l2tp_ip6.c static int l2tp_ip6_open(struct sock *sk)
sk                245 net/l2tp/l2tp_ip6.c 	inet_sk(sk)->inet_num = IPPROTO_L2TP;
sk                247 net/l2tp/l2tp_ip6.c 	l2tp_ip6_hash(sk);
sk                251 net/l2tp/l2tp_ip6.c static void l2tp_ip6_close(struct sock *sk, long timeout)
sk                254 net/l2tp/l2tp_ip6.c 	hlist_del_init(&sk->sk_bind_node);
sk                255 net/l2tp/l2tp_ip6.c 	sk_del_node_init(sk);
sk                258 net/l2tp/l2tp_ip6.c 	sk_common_release(sk);
sk                261 net/l2tp/l2tp_ip6.c static void l2tp_ip6_destroy_sock(struct sock *sk)
sk                263 net/l2tp/l2tp_ip6.c 	struct l2tp_tunnel *tunnel = sk->sk_user_data;
sk                265 net/l2tp/l2tp_ip6.c 	lock_sock(sk);
sk                266 net/l2tp/l2tp_ip6.c 	ip6_flush_pending_frames(sk);
sk                267 net/l2tp/l2tp_ip6.c 	release_sock(sk);
sk                272 net/l2tp/l2tp_ip6.c 	inet6_destroy_sock(sk);
sk                275 net/l2tp/l2tp_ip6.c static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
sk                277 net/l2tp/l2tp_ip6.c 	struct inet_sock *inet = inet_sk(sk);
sk                278 net/l2tp/l2tp_ip6.c 	struct ipv6_pinfo *np = inet6_sk(sk);
sk                280 net/l2tp/l2tp_ip6.c 	struct net *net = sock_net(sk);
sk                301 net/l2tp/l2tp_ip6.c 	lock_sock(sk);
sk                304 net/l2tp/l2tp_ip6.c 	if (!sock_flag(sk, SOCK_ZAPPED))
sk                307 net/l2tp/l2tp_ip6.c 	if (sk->sk_state != TCP_CLOSE)
sk                310 net/l2tp/l2tp_ip6.c 	bound_dev_if = sk->sk_bound_dev_if;
sk                328 net/l2tp/l2tp_ip6.c 			dev = dev_get_by_index_rcu(sock_net(sk), bound_dev_if);
sk                338 net/l2tp/l2tp_ip6.c 		if (!ipv6_chk_addr(sock_net(sk), &addr->l2tp_addr, dev, 0))
sk                353 net/l2tp/l2tp_ip6.c 	sk->sk_bound_dev_if = bound_dev_if;
sk                354 net/l2tp/l2tp_ip6.c 	sk->sk_v6_rcv_saddr = addr->l2tp_addr;
sk                357 net/l2tp/l2tp_ip6.c 	l2tp_ip6_sk(sk)->conn_id = addr->l2tp_conn_id;
sk                359 net/l2tp/l2tp_ip6.c 	sk_add_bind_node(sk, &l2tp_ip6_bind_table);
sk                360 net/l2tp/l2tp_ip6.c 	sk_del_node_init(sk);
sk                363 net/l2tp/l2tp_ip6.c 	sock_reset_flag(sk, SOCK_ZAPPED);
sk                364 net/l2tp/l2tp_ip6.c 	release_sock(sk);
sk                370 net/l2tp/l2tp_ip6.c 	release_sock(sk);
sk                375 net/l2tp/l2tp_ip6.c static int l2tp_ip6_connect(struct sock *sk, struct sockaddr *uaddr,
sk                400 net/l2tp/l2tp_ip6.c 	lock_sock(sk);
sk                403 net/l2tp/l2tp_ip6.c 	if (sock_flag(sk, SOCK_ZAPPED)) {
sk                408 net/l2tp/l2tp_ip6.c 	rc = __ip6_datagram_connect(sk, uaddr, addr_len);
sk                412 net/l2tp/l2tp_ip6.c 	l2tp_ip6_sk(sk)->peer_conn_id = lsa->l2tp_conn_id;
sk                415 net/l2tp/l2tp_ip6.c 	hlist_del_init(&sk->sk_bind_node);
sk                416 net/l2tp/l2tp_ip6.c 	sk_add_bind_node(sk, &l2tp_ip6_bind_table);
sk                420 net/l2tp/l2tp_ip6.c 	release_sock(sk);
sk                425 net/l2tp/l2tp_ip6.c static int l2tp_ip6_disconnect(struct sock *sk, int flags)
sk                427 net/l2tp/l2tp_ip6.c 	if (sock_flag(sk, SOCK_ZAPPED))
sk                430 net/l2tp/l2tp_ip6.c 	return __udp_disconnect(sk, flags);
sk                437 net/l2tp/l2tp_ip6.c 	struct sock *sk = sock->sk;
sk                438 net/l2tp/l2tp_ip6.c 	struct ipv6_pinfo *np = inet6_sk(sk);
sk                439 net/l2tp/l2tp_ip6.c 	struct l2tp_ip6_sock *lsk = l2tp_ip6_sk(sk);
sk                449 net/l2tp/l2tp_ip6.c 		lsa->l2tp_addr = sk->sk_v6_daddr;
sk                453 net/l2tp/l2tp_ip6.c 		if (ipv6_addr_any(&sk->sk_v6_rcv_saddr))
sk                456 net/l2tp/l2tp_ip6.c 			lsa->l2tp_addr = sk->sk_v6_rcv_saddr;
sk                461 net/l2tp/l2tp_ip6.c 		lsa->l2tp_scope_id = sk->sk_bound_dev_if;
sk                465 net/l2tp/l2tp_ip6.c static int l2tp_ip6_backlog_recv(struct sock *sk, struct sk_buff *skb)
sk                470 net/l2tp/l2tp_ip6.c 	rc = sock_queue_rcv_skb(sk, skb);
sk                477 net/l2tp/l2tp_ip6.c 	IP_INC_STATS(sock_net(sk), IPSTATS_MIB_INDISCARDS);
sk                482 net/l2tp/l2tp_ip6.c static int l2tp_ip6_push_pending_frames(struct sock *sk)
sk                488 net/l2tp/l2tp_ip6.c 	skb = skb_peek(&sk->sk_write_queue);
sk                495 net/l2tp/l2tp_ip6.c 	err = ip6_push_pending_frames(sk);
sk                504 net/l2tp/l2tp_ip6.c static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
sk                509 net/l2tp/l2tp_ip6.c 	struct ipv6_pinfo *np = inet6_sk(sk);
sk                536 net/l2tp/l2tp_ip6.c 	fl6.flowi6_mark = sk->sk_mark;
sk                537 net/l2tp/l2tp_ip6.c 	fl6.flowi6_uid = sk->sk_uid;
sk                552 net/l2tp/l2tp_ip6.c 				flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
sk                562 net/l2tp/l2tp_ip6.c 		if (sk->sk_state == TCP_ESTABLISHED &&
sk                563 net/l2tp/l2tp_ip6.c 		    ipv6_addr_equal(daddr, &sk->sk_v6_daddr))
sk                564 net/l2tp/l2tp_ip6.c 			daddr = &sk->sk_v6_daddr;
sk                571 net/l2tp/l2tp_ip6.c 		if (sk->sk_state != TCP_ESTABLISHED)
sk                574 net/l2tp/l2tp_ip6.c 		daddr = &sk->sk_v6_daddr;
sk                579 net/l2tp/l2tp_ip6.c 		fl6.flowi6_oif = sk->sk_bound_dev_if;
sk                587 net/l2tp/l2tp_ip6.c 		err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, &ipc6);
sk                593 net/l2tp/l2tp_ip6.c 			flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
sk                610 net/l2tp/l2tp_ip6.c 	fl6.flowi6_proto = sk->sk_protocol;
sk                625 net/l2tp/l2tp_ip6.c 	security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
sk                632 net/l2tp/l2tp_ip6.c 	dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
sk                648 net/l2tp/l2tp_ip6.c 	lock_sock(sk);
sk                649 net/l2tp/l2tp_ip6.c 	err = ip6_append_data(sk, ip_generic_getfrag, msg,
sk                654 net/l2tp/l2tp_ip6.c 		ip6_flush_pending_frames(sk);
sk                656 net/l2tp/l2tp_ip6.c 		err = l2tp_ip6_push_pending_frames(sk);
sk                657 net/l2tp/l2tp_ip6.c 	release_sock(sk);
sk                675 net/l2tp/l2tp_ip6.c static int l2tp_ip6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
sk                678 net/l2tp/l2tp_ip6.c 	struct ipv6_pinfo *np = inet6_sk(sk);
sk                688 net/l2tp/l2tp_ip6.c 		return ipv6_recv_error(sk, msg, len, addr_len);
sk                690 net/l2tp/l2tp_ip6.c 	skb = skb_recv_datagram(sk, flags, noblock, &err);
sk                704 net/l2tp/l2tp_ip6.c 	sock_recv_timestamp(msg, sk, skb);
sk                720 net/l2tp/l2tp_ip6.c 		ip6_datagram_recv_ctl(sk, msg, skb);
sk                725 net/l2tp/l2tp_ip6.c 	skb_free_datagram(sk, skb);
sk                328 net/l2tp/l2tp_netlink.c 	struct sock *sk = NULL;
sk                376 net/l2tp/l2tp_netlink.c 	sk = tunnel->sock;
sk                377 net/l2tp/l2tp_netlink.c 	if (!sk)
sk                381 net/l2tp/l2tp_netlink.c 	if (sk->sk_family == AF_INET6)
sk                382 net/l2tp/l2tp_netlink.c 		np = inet6_sk(sk);
sk                385 net/l2tp/l2tp_netlink.c 	inet = inet_sk(sk);
sk                389 net/l2tp/l2tp_netlink.c 		switch (sk->sk_family) {
sk                391 net/l2tp/l2tp_netlink.c 			if (nla_put_u8(skb, L2TP_ATTR_UDP_CSUM, !sk->sk_no_check_tx))
sk                396 net/l2tp/l2tp_netlink.c 			if (udp_get_no_check6_tx(sk) &&
sk                399 net/l2tp/l2tp_netlink.c 			if (udp_get_no_check6_rx(sk) &&
sk                415 net/l2tp/l2tp_netlink.c 					     &sk->sk_v6_daddr))
sk                484 net/l2tp/l2tp_netlink.c 	struct net *net = sock_net(skb->sk);
sk                820 net/l2tp/l2tp_netlink.c 	struct net *net = sock_net(skb->sk);
sk                120 net/l2tp/l2tp_ppp.c 	struct sock __rcu	*sk;		/* Pointer to the session
sk                141 net/l2tp/l2tp_ppp.c 	struct sock *sk;
sk                144 net/l2tp/l2tp_ppp.c 	sk = rcu_dereference(ps->sk);
sk                145 net/l2tp/l2tp_ppp.c 	if (sk)
sk                146 net/l2tp/l2tp_ppp.c 		sock_hold(sk);
sk                149 net/l2tp/l2tp_ppp.c 	return sk;
sk                154 net/l2tp/l2tp_ppp.c static inline struct l2tp_session *pppol2tp_sock_to_session(struct sock *sk)
sk                158 net/l2tp/l2tp_ppp.c 	if (sk == NULL)
sk                161 net/l2tp/l2tp_ppp.c 	sock_hold(sk);
sk                162 net/l2tp/l2tp_ppp.c 	session = (struct l2tp_session *)(sk->sk_user_data);
sk                164 net/l2tp/l2tp_ppp.c 		sock_put(sk);
sk                185 net/l2tp/l2tp_ppp.c 	struct sock *sk = sock->sk;
sk                188 net/l2tp/l2tp_ppp.c 	if (sk->sk_state & PPPOX_BOUND)
sk                192 net/l2tp/l2tp_ppp.c 	skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
sk                214 net/l2tp/l2tp_ppp.c 	struct sock *sk = NULL;
sk                220 net/l2tp/l2tp_ppp.c 	sk = rcu_dereference(ps->sk);
sk                221 net/l2tp/l2tp_ppp.c 	if (sk == NULL)
sk                235 net/l2tp/l2tp_ppp.c 	if (sk->sk_state & PPPOX_BOUND) {
sk                242 net/l2tp/l2tp_ppp.c 		po = pppox_sk(sk);
sk                249 net/l2tp/l2tp_ppp.c 		if (sock_queue_rcv_skb(sk, skb) < 0) {
sk                275 net/l2tp/l2tp_ppp.c 	struct sock *sk = sock->sk;
sk                283 net/l2tp/l2tp_ppp.c 	if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
sk                288 net/l2tp/l2tp_ppp.c 	session = pppol2tp_sock_to_session(sk);
sk                298 net/l2tp/l2tp_ppp.c 	skb = sock_wmalloc(sk, NET_SKB_PAD + sizeof(struct iphdr) +
sk                328 net/l2tp/l2tp_ppp.c 	sock_put(sk);
sk                333 net/l2tp/l2tp_ppp.c 	sock_put(sk);
sk                354 net/l2tp/l2tp_ppp.c 	struct sock *sk = (struct sock *) chan->private;
sk                359 net/l2tp/l2tp_ppp.c 	if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
sk                363 net/l2tp/l2tp_ppp.c 	session = pppol2tp_sock_to_session(sk);
sk                387 net/l2tp/l2tp_ppp.c 	sock_put(sk);
sk                392 net/l2tp/l2tp_ppp.c 	sock_put(sk);
sk                414 net/l2tp/l2tp_ppp.c static void pppol2tp_session_destruct(struct sock *sk)
sk                416 net/l2tp/l2tp_ppp.c 	struct l2tp_session *session = sk->sk_user_data;
sk                418 net/l2tp/l2tp_ppp.c 	skb_queue_purge(&sk->sk_receive_queue);
sk                419 net/l2tp/l2tp_ppp.c 	skb_queue_purge(&sk->sk_write_queue);
sk                422 net/l2tp/l2tp_ppp.c 		sk->sk_user_data = NULL;
sk                432 net/l2tp/l2tp_ppp.c 	struct sock *sk = sock->sk;
sk                436 net/l2tp/l2tp_ppp.c 	if (!sk)
sk                440 net/l2tp/l2tp_ppp.c 	lock_sock(sk);
sk                441 net/l2tp/l2tp_ppp.c 	if (sock_flag(sk, SOCK_DEAD) != 0)
sk                444 net/l2tp/l2tp_ppp.c 	pppox_unbind_sock(sk);
sk                447 net/l2tp/l2tp_ppp.c 	sk->sk_state = PPPOX_DEAD;
sk                448 net/l2tp/l2tp_ppp.c 	sock_orphan(sk);
sk                449 net/l2tp/l2tp_ppp.c 	sock->sk = NULL;
sk                451 net/l2tp/l2tp_ppp.c 	session = pppol2tp_sock_to_session(sk);
sk                459 net/l2tp/l2tp_ppp.c 		ps->__sk = rcu_dereference_protected(ps->sk,
sk                461 net/l2tp/l2tp_ppp.c 		RCU_INIT_POINTER(ps->sk, NULL);
sk                471 net/l2tp/l2tp_ppp.c 	release_sock(sk);
sk                477 net/l2tp/l2tp_ppp.c 	sock_put(sk);
sk                482 net/l2tp/l2tp_ppp.c 	release_sock(sk);
sk                492 net/l2tp/l2tp_ppp.c static int pppol2tp_backlog_recv(struct sock *sk, struct sk_buff *skb)
sk                496 net/l2tp/l2tp_ppp.c 	rc = l2tp_udp_encap_recv(sk, skb);
sk                508 net/l2tp/l2tp_ppp.c 	struct sock *sk;
sk                510 net/l2tp/l2tp_ppp.c 	sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppol2tp_sk_proto, kern);
sk                511 net/l2tp/l2tp_ppp.c 	if (!sk)
sk                514 net/l2tp/l2tp_ppp.c 	sock_init_data(sock, sk);
sk                519 net/l2tp/l2tp_ppp.c 	sk->sk_backlog_rcv = pppol2tp_backlog_recv;
sk                520 net/l2tp/l2tp_ppp.c 	sk->sk_protocol	   = PX_PROTO_OL2TP;
sk                521 net/l2tp/l2tp_ppp.c 	sk->sk_family	   = PF_PPPOX;
sk                522 net/l2tp/l2tp_ppp.c 	sk->sk_state	   = PPPOX_NONE;
sk                523 net/l2tp/l2tp_ppp.c 	sk->sk_type	   = SOCK_STREAM;
sk                524 net/l2tp/l2tp_ppp.c 	sk->sk_destruct	   = pppol2tp_session_destruct;
sk                535 net/l2tp/l2tp_ppp.c 	struct sock *sk;
sk                537 net/l2tp/l2tp_ppp.c 	sk = pppol2tp_session_get_sock(session);
sk                538 net/l2tp/l2tp_ppp.c 	if (sk) {
sk                539 net/l2tp/l2tp_ppp.c 		struct pppox_sock *po = pppox_sk(sk);
sk                542 net/l2tp/l2tp_ppp.c 		sock_put(sk);
sk                664 net/l2tp/l2tp_ppp.c 	struct sock *sk = sock->sk;
sk                665 net/l2tp/l2tp_ppp.c 	struct pppox_sock *po = pppox_sk(sk);
sk                681 net/l2tp/l2tp_ppp.c 	lock_sock(sk);
sk                685 net/l2tp/l2tp_ppp.c 	if (sk->sk_state & PPPOX_CONNECTED)
sk                690 net/l2tp/l2tp_ppp.c 	if (sk->sk_user_data)
sk                698 net/l2tp/l2tp_ppp.c 	tunnel = l2tp_tunnel_get(sock_net(sk), info.tunnel_id);
sk                721 net/l2tp/l2tp_ppp.c 			error = l2tp_tunnel_create(sock_net(sk), info.fd,
sk                730 net/l2tp/l2tp_ppp.c 			error = l2tp_tunnel_register(tunnel, sock_net(sk),
sk                768 net/l2tp/l2tp_ppp.c 		if (rcu_dereference_protected(ps->sk,
sk                818 net/l2tp/l2tp_ppp.c 	po->chan.private = sk;
sk                822 net/l2tp/l2tp_ppp.c 	error = ppp_register_net_channel(sock_net(sk), &po->chan);
sk                830 net/l2tp/l2tp_ppp.c 	sk->sk_user_data = session;
sk                831 net/l2tp/l2tp_ppp.c 	rcu_assign_pointer(ps->sk, sk);
sk                840 net/l2tp/l2tp_ppp.c 	sk->sk_state = PPPOX_CONNECTED;
sk                855 net/l2tp/l2tp_ppp.c 	release_sock(sk);
sk                910 net/l2tp/l2tp_ppp.c 	struct sock *sk = sock->sk;
sk                915 net/l2tp/l2tp_ppp.c 	if (sk == NULL)
sk                917 net/l2tp/l2tp_ppp.c 	if (!(sk->sk_state & PPPOX_CONNECTED))
sk                921 net/l2tp/l2tp_ppp.c 	session = pppol2tp_sock_to_session(sk);
sk               1005 net/l2tp/l2tp_ppp.c 	sock_put(sk);
sk               1073 net/l2tp/l2tp_ppp.c 		session = sock->sk->sk_user_data;
sk               1087 net/l2tp/l2tp_ppp.c 		session = sock->sk->sk_user_data;
sk               1100 net/l2tp/l2tp_ppp.c 		session = sock->sk->sk_user_data;
sk               1150 net/l2tp/l2tp_ppp.c static int pppol2tp_tunnel_setsockopt(struct sock *sk,
sk               1173 net/l2tp/l2tp_ppp.c static int pppol2tp_session_setsockopt(struct sock *sk,
sk               1198 net/l2tp/l2tp_ppp.c 			struct pppox_sock *po = pppox_sk(sk);
sk               1249 net/l2tp/l2tp_ppp.c 	struct sock *sk = sock->sk;
sk               1265 net/l2tp/l2tp_ppp.c 	if (sk->sk_user_data == NULL)
sk               1270 net/l2tp/l2tp_ppp.c 	session = pppol2tp_sock_to_session(sk);
sk               1279 net/l2tp/l2tp_ppp.c 		err = pppol2tp_tunnel_setsockopt(sk, tunnel, optname, val);
sk               1281 net/l2tp/l2tp_ppp.c 		err = pppol2tp_session_setsockopt(sk, session, optname, val);
sk               1284 net/l2tp/l2tp_ppp.c 	sock_put(sk);
sk               1291 net/l2tp/l2tp_ppp.c static int pppol2tp_tunnel_getsockopt(struct sock *sk,
sk               1314 net/l2tp/l2tp_ppp.c static int pppol2tp_session_getsockopt(struct sock *sk,
sk               1366 net/l2tp/l2tp_ppp.c 	struct sock *sk = sock->sk;
sk               1384 net/l2tp/l2tp_ppp.c 	if (sk->sk_user_data == NULL)
sk               1389 net/l2tp/l2tp_ppp.c 	session = pppol2tp_sock_to_session(sk);
sk               1397 net/l2tp/l2tp_ppp.c 		err = pppol2tp_tunnel_getsockopt(sk, tunnel, optname, &val);
sk               1401 net/l2tp/l2tp_ppp.c 		err = pppol2tp_session_getsockopt(sk, session, optname, &val);
sk               1416 net/l2tp/l2tp_ppp.c 	sock_put(sk);
sk               1548 net/l2tp/l2tp_ppp.c 	struct sock *sk;
sk               1558 net/l2tp/l2tp_ppp.c 	sk = pppol2tp_session_get_sock(session);
sk               1559 net/l2tp/l2tp_ppp.c 	if (sk) {
sk               1560 net/l2tp/l2tp_ppp.c 		state = sk->sk_state;
sk               1561 net/l2tp/l2tp_ppp.c 		user_data_ok = (session == sk->sk_user_data) ? 'Y' : 'N';
sk               1590 net/l2tp/l2tp_ppp.c 	if (sk) {
sk               1591 net/l2tp/l2tp_ppp.c 		struct pppox_sock *po = pppox_sk(sk);
sk               1594 net/l2tp/l2tp_ppp.c 		sock_put(sk);
sk                 90 net/lapb/lapb_out.c 			if (skb->sk)
sk                 91 net/lapb/lapb_out.c 				skb_set_owner_w(skbn, skb->sk);
sk                 43 net/llc/af_llc.c static bool llc_ui_wait_for_conn(struct sock *sk, long timeout);
sk                 44 net/llc/af_llc.c static int llc_ui_wait_for_disc(struct sock *sk, long timeout);
sk                 45 net/llc/af_llc.c static int llc_ui_wait_for_busy_core(struct sock *sk, long timeout);
sk                 97 net/llc/af_llc.c static inline u8 llc_ui_header_len(struct sock *sk, struct sockaddr_llc *addr)
sk                103 net/llc/af_llc.c 	else if (sk->sk_type == SOCK_STREAM)
sk                119 net/llc/af_llc.c static int llc_ui_send_data(struct sock* sk, struct sk_buff *skb, int noblock)
sk                121 net/llc/af_llc.c 	struct llc_sock* llc = llc_sk(sk);
sk                126 net/llc/af_llc.c 		long timeout = sock_sndtimeo(sk, noblock);
sk                129 net/llc/af_llc.c 		rc = llc_ui_wait_for_busy_core(sk, timeout);
sk                135 net/llc/af_llc.c 	return llc_build_and_send_pkt(sk, skb);
sk                138 net/llc/af_llc.c static void llc_ui_sk_init(struct socket *sock, struct sock *sk)
sk                140 net/llc/af_llc.c 	sock_graft(sk, sock);
sk                141 net/llc/af_llc.c 	sk->sk_type	= sock->type;
sk                166 net/llc/af_llc.c 	struct sock *sk;
sk                177 net/llc/af_llc.c 		sk = llc_sk_alloc(net, PF_LLC, GFP_KERNEL, &llc_proto, kern);
sk                178 net/llc/af_llc.c 		if (sk) {
sk                180 net/llc/af_llc.c 			llc_ui_sk_init(sock, sk);
sk                194 net/llc/af_llc.c 	struct sock *sk = sock->sk;
sk                197 net/llc/af_llc.c 	if (unlikely(sk == NULL))
sk                199 net/llc/af_llc.c 	sock_hold(sk);
sk                200 net/llc/af_llc.c 	lock_sock(sk);
sk                201 net/llc/af_llc.c 	llc = llc_sk(sk);
sk                204 net/llc/af_llc.c 	if (!llc_send_disc(sk))
sk                205 net/llc/af_llc.c 		llc_ui_wait_for_disc(sk, sk->sk_rcvtimeo);
sk                206 net/llc/af_llc.c 	if (!sock_flag(sk, SOCK_ZAPPED)) {
sk                213 net/llc/af_llc.c 		llc_sap_remove_socket(llc->sap, sk);
sk                214 net/llc/af_llc.c 		release_sock(sk);
sk                217 net/llc/af_llc.c 		release_sock(sk);
sk                221 net/llc/af_llc.c 	sock_put(sk);
sk                222 net/llc/af_llc.c 	llc_sk_free(sk);
sk                269 net/llc/af_llc.c 	struct sock *sk = sock->sk;
sk                270 net/llc/af_llc.c 	struct llc_sock *llc = llc_sk(sk);
sk                274 net/llc/af_llc.c 	if (!sock_flag(sk, SOCK_ZAPPED))
sk                277 net/llc/af_llc.c 	if (sk->sk_bound_dev_if) {
sk                278 net/llc/af_llc.c 		llc->dev = dev_get_by_index(&init_net, sk->sk_bound_dev_if);
sk                298 net/llc/af_llc.c 	llc_sap_add_socket(sap, sk);
sk                299 net/llc/af_llc.c 	sock_reset_flag(sk, SOCK_ZAPPED);
sk                322 net/llc/af_llc.c 	struct sock *sk = sock->sk;
sk                323 net/llc/af_llc.c 	struct llc_sock *llc = llc_sk(sk);
sk                327 net/llc/af_llc.c 	lock_sock(sk);
sk                328 net/llc/af_llc.c 	if (unlikely(!sock_flag(sk, SOCK_ZAPPED) || addrlen != sizeof(*addr)))
sk                336 net/llc/af_llc.c 	if (sk->sk_bound_dev_if) {
sk                337 net/llc/af_llc.c 		llc->dev = dev_get_by_index_rcu(&init_net, sk->sk_bound_dev_if);
sk                394 net/llc/af_llc.c 	llc_sap_add_socket(sap, sk);
sk                395 net/llc/af_llc.c 	sock_reset_flag(sk, SOCK_ZAPPED);
sk                400 net/llc/af_llc.c 	release_sock(sk);
sk                417 net/llc/af_llc.c 	struct sock *sk = sock->sk;
sk                420 net/llc/af_llc.c 	lock_sock(sk);
sk                421 net/llc/af_llc.c 	if (unlikely(sk->sk_state != TCP_ESTABLISHED))
sk                426 net/llc/af_llc.c 	rc = llc_send_disc(sk);
sk                428 net/llc/af_llc.c 		rc = llc_ui_wait_for_disc(sk, sk->sk_rcvtimeo);
sk                430 net/llc/af_llc.c 	sk->sk_state_change(sk);
sk                432 net/llc/af_llc.c 	release_sock(sk);
sk                453 net/llc/af_llc.c 	struct sock *sk = sock->sk;
sk                454 net/llc/af_llc.c 	struct llc_sock *llc = llc_sk(sk);
sk                458 net/llc/af_llc.c 	lock_sock(sk);
sk                464 net/llc/af_llc.c 	if (unlikely(sk->sk_type != SOCK_STREAM))
sk                470 net/llc/af_llc.c 	if (sock_flag(sk, SOCK_ZAPPED)) {
sk                479 net/llc/af_llc.c 	sk->sk_state   = TCP_SYN_SENT;
sk                481 net/llc/af_llc.c 	rc = llc_establish_connection(sk, llc->dev->dev_addr,
sk                486 net/llc/af_llc.c 		sk->sk_state = TCP_CLOSE;
sk                490 net/llc/af_llc.c 	if (sk->sk_state == TCP_SYN_SENT) {
sk                491 net/llc/af_llc.c 		const long timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
sk                493 net/llc/af_llc.c 		if (!timeo || !llc_ui_wait_for_conn(sk, timeo))
sk                501 net/llc/af_llc.c 	if (sk->sk_state == TCP_CLOSE)
sk                507 net/llc/af_llc.c 	release_sock(sk);
sk                510 net/llc/af_llc.c 	rc = sock_error(sk) ? : -ECONNABORTED;
sk                525 net/llc/af_llc.c 	struct sock *sk = sock->sk;
sk                528 net/llc/af_llc.c 	lock_sock(sk);
sk                532 net/llc/af_llc.c 	if (unlikely(sk->sk_type != SOCK_STREAM))
sk                535 net/llc/af_llc.c 	if (sock_flag(sk, SOCK_ZAPPED))
sk                540 net/llc/af_llc.c 	sk->sk_max_ack_backlog = backlog;
sk                541 net/llc/af_llc.c 	if (sk->sk_state != TCP_LISTEN) {
sk                542 net/llc/af_llc.c 		sk->sk_ack_backlog = 0;
sk                543 net/llc/af_llc.c 		sk->sk_state	   = TCP_LISTEN;
sk                545 net/llc/af_llc.c 	sk->sk_socket->flags |= __SO_ACCEPTCON;
sk                547 net/llc/af_llc.c 	release_sock(sk);
sk                551 net/llc/af_llc.c static int llc_ui_wait_for_disc(struct sock *sk, long timeout)
sk                556 net/llc/af_llc.c 	add_wait_queue(sk_sleep(sk), &wait);
sk                558 net/llc/af_llc.c 		if (sk_wait_event(sk, &timeout, sk->sk_state == TCP_CLOSE, &wait))
sk                568 net/llc/af_llc.c 	remove_wait_queue(sk_sleep(sk), &wait);
sk                572 net/llc/af_llc.c static bool llc_ui_wait_for_conn(struct sock *sk, long timeout)
sk                576 net/llc/af_llc.c 	add_wait_queue(sk_sleep(sk), &wait);
sk                578 net/llc/af_llc.c 		if (sk_wait_event(sk, &timeout, sk->sk_state != TCP_SYN_SENT, &wait))
sk                583 net/llc/af_llc.c 	remove_wait_queue(sk_sleep(sk), &wait);
sk                587 net/llc/af_llc.c static int llc_ui_wait_for_busy_core(struct sock *sk, long timeout)
sk                590 net/llc/af_llc.c 	struct llc_sock *llc = llc_sk(sk);
sk                593 net/llc/af_llc.c 	add_wait_queue(sk_sleep(sk), &wait);
sk                596 net/llc/af_llc.c 		if (sk_wait_event(sk, &timeout,
sk                597 net/llc/af_llc.c 				  (sk->sk_shutdown & RCV_SHUTDOWN) ||
sk                609 net/llc/af_llc.c 	remove_wait_queue(sk_sleep(sk), &wait);
sk                613 net/llc/af_llc.c static int llc_wait_data(struct sock *sk, long timeo)
sk                621 net/llc/af_llc.c 		rc = sock_error(sk);
sk                625 net/llc/af_llc.c 		if (sk->sk_shutdown & RCV_SHUTDOWN)
sk                634 net/llc/af_llc.c 		if (sk_wait_data(sk, &timeo, NULL))
sk                642 net/llc/af_llc.c 	struct llc_sock *llc = llc_sk(skb->sk);
sk                648 net/llc/af_llc.c 		info.lpi_ifindex = llc_sk(skb->sk)->dev->ifindex;
sk                668 net/llc/af_llc.c 	struct sock *sk = sock->sk, *newsk;
sk                674 net/llc/af_llc.c 		llc_sk(sk)->laddr.lsap);
sk                675 net/llc/af_llc.c 	lock_sock(sk);
sk                676 net/llc/af_llc.c 	if (unlikely(sk->sk_type != SOCK_STREAM))
sk                680 net/llc/af_llc.c 		     sk->sk_state != TCP_LISTEN))
sk                683 net/llc/af_llc.c 	if (skb_queue_empty(&sk->sk_receive_queue)) {
sk                684 net/llc/af_llc.c 		rc = llc_wait_data(sk, sk->sk_rcvtimeo);
sk                689 net/llc/af_llc.c 		llc_sk(sk)->laddr.lsap);
sk                690 net/llc/af_llc.c 	skb = skb_dequeue(&sk->sk_receive_queue);
sk                692 net/llc/af_llc.c 	if (!skb->sk)
sk                695 net/llc/af_llc.c 	newsk = skb->sk;
sk                701 net/llc/af_llc.c 	llc			= llc_sk(sk);
sk                707 net/llc/af_llc.c 	sk->sk_state = TCP_LISTEN;
sk                708 net/llc/af_llc.c 	sk->sk_ack_backlog--;
sk                710 net/llc/af_llc.c 		llc_sk(sk)->addr.sllc_sap, newllc->daddr.lsap);
sk                714 net/llc/af_llc.c 	release_sock(sk);
sk                734 net/llc/af_llc.c 	struct sock *sk = sock->sk;
sk                735 net/llc/af_llc.c 	struct llc_sock *llc = llc_sk(sk);
sk                743 net/llc/af_llc.c 	lock_sock(sk);
sk                745 net/llc/af_llc.c 	if (unlikely(sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_LISTEN))
sk                748 net/llc/af_llc.c 	timeo = sock_rcvtimeo(sk, nonblock);
sk                756 net/llc/af_llc.c 	target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
sk                776 net/llc/af_llc.c 		skb = skb_peek(&sk->sk_receive_queue);
sk                783 net/llc/af_llc.c 		if (copied >= target && !sk->sk_backlog.tail)
sk                787 net/llc/af_llc.c 			if (sk->sk_err ||
sk                788 net/llc/af_llc.c 			    sk->sk_state == TCP_CLOSE ||
sk                789 net/llc/af_llc.c 			    (sk->sk_shutdown & RCV_SHUTDOWN) ||
sk                794 net/llc/af_llc.c 			if (sock_flag(sk, SOCK_DONE))
sk                797 net/llc/af_llc.c 			if (sk->sk_err) {
sk                798 net/llc/af_llc.c 				copied = sock_error(sk);
sk                801 net/llc/af_llc.c 			if (sk->sk_shutdown & RCV_SHUTDOWN)
sk                804 net/llc/af_llc.c 			if (sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_CLOSE) {
sk                805 net/llc/af_llc.c 				if (!sock_flag(sk, SOCK_DONE)) {
sk                822 net/llc/af_llc.c 			release_sock(sk);
sk                823 net/llc/af_llc.c 			lock_sock(sk);
sk                825 net/llc/af_llc.c 			sk_wait_data(sk, &timeo, NULL);
sk                856 net/llc/af_llc.c 		if (sk->sk_type != SOCK_STREAM)
sk                860 net/llc/af_llc.c 			skb_unlink(skb, &sk->sk_receive_queue);
sk                871 net/llc/af_llc.c 	release_sock(sk);
sk                878 net/llc/af_llc.c 	if (llc_sk(sk)->cmsg_flags)
sk                882 net/llc/af_llc.c 		skb_unlink(skb, &sk->sk_receive_queue);
sk                901 net/llc/af_llc.c 	struct sock *sk = sock->sk;
sk                902 net/llc/af_llc.c 	struct llc_sock *llc = llc_sk(sk);
sk                912 net/llc/af_llc.c 	lock_sock(sk);
sk                922 net/llc/af_llc.c 	if (sock_flag(sk, SOCK_ZAPPED)) {
sk                928 net/llc/af_llc.c 	hdrlen = llc->dev->hard_header_len + llc_ui_header_len(sk, addr);
sk                936 net/llc/af_llc.c 	release_sock(sk);
sk                937 net/llc/af_llc.c 	skb = sock_alloc_send_skb(sk, size, noblock, &rc);
sk                938 net/llc/af_llc.c 	lock_sock(sk);
sk                947 net/llc/af_llc.c 	if (sk->sk_type == SOCK_DGRAM || addr->sllc_ua) {
sk                966 net/llc/af_llc.c 	if (!(sk->sk_type == SOCK_STREAM && !addr->sllc_ua))
sk                968 net/llc/af_llc.c 	rc = llc_ui_send_data(sk, skb, noblock);
sk                975 net/llc/af_llc.c 	release_sock(sk);
sk                992 net/llc/af_llc.c 	struct sock *sk = sock->sk;
sk                993 net/llc/af_llc.c 	struct llc_sock *llc = llc_sk(sk);
sk                997 net/llc/af_llc.c 	lock_sock(sk);
sk                998 net/llc/af_llc.c 	if (sock_flag(sk, SOCK_ZAPPED))
sk               1002 net/llc/af_llc.c 		if (sk->sk_state != TCP_ESTABLISHED)
sk               1024 net/llc/af_llc.c 	release_sock(sk);
sk               1055 net/llc/af_llc.c 	struct sock *sk = sock->sk;
sk               1056 net/llc/af_llc.c 	struct llc_sock *llc = llc_sk(sk);
sk               1060 net/llc/af_llc.c 	lock_sock(sk);
sk               1120 net/llc/af_llc.c 	release_sock(sk);
sk               1137 net/llc/af_llc.c 	struct sock *sk = sock->sk;
sk               1138 net/llc/af_llc.c 	struct llc_sock *llc = llc_sk(sk);
sk               1141 net/llc/af_llc.c 	lock_sock(sk);
sk               1178 net/llc/af_llc.c 	release_sock(sk);
sk                 32 net/llc/llc_c_ac.c static int llc_conn_ac_inc_vs_by_1(struct sock *sk, struct sk_buff *skb);
sk                 33 net/llc/llc_c_ac.c static void llc_process_tmr_ev(struct sock *sk, struct sk_buff *skb);
sk                 34 net/llc/llc_c_ac.c static int llc_conn_ac_data_confirm(struct sock *sk, struct sk_buff *ev);
sk                 36 net/llc/llc_c_ac.c static int llc_conn_ac_inc_npta_value(struct sock *sk, struct sk_buff *skb);
sk                 38 net/llc/llc_c_ac.c static int llc_conn_ac_send_rr_rsp_f_set_ackpf(struct sock *sk,
sk                 41 net/llc/llc_c_ac.c static int llc_conn_ac_set_p_flag_1(struct sock *sk, struct sk_buff *skb);
sk                 45 net/llc/llc_c_ac.c int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb)
sk                 47 net/llc/llc_c_ac.c 	struct llc_sock *llc = llc_sk(sk);
sk                 56 net/llc/llc_c_ac.c 		llc_conn_resend_i_pdu_as_cmd(sk, nr, 0);
sk                 61 net/llc/llc_c_ac.c int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb)
sk                 69 net/llc/llc_c_ac.c int llc_conn_ac_conn_confirm(struct sock *sk, struct sk_buff *skb)
sk                 77 net/llc/llc_c_ac.c static int llc_conn_ac_data_confirm(struct sock *sk, struct sk_buff *skb)
sk                 85 net/llc/llc_c_ac.c int llc_conn_ac_data_ind(struct sock *sk, struct sk_buff *skb)
sk                 87 net/llc/llc_c_ac.c 	llc_conn_rtn_pdu(sk, skb);
sk                 91 net/llc/llc_c_ac.c int llc_conn_ac_disc_ind(struct sock *sk, struct sk_buff *skb)
sk                119 net/llc/llc_c_ac.c int llc_conn_ac_disc_confirm(struct sock *sk, struct sk_buff *skb)
sk                128 net/llc/llc_c_ac.c int llc_conn_ac_rst_ind(struct sock *sk, struct sk_buff *skb)
sk                134 net/llc/llc_c_ac.c 	struct llc_sock *llc = llc_sk(sk);
sk                167 net/llc/llc_c_ac.c int llc_conn_ac_rst_confirm(struct sock *sk, struct sk_buff *skb)
sk                176 net/llc/llc_c_ac.c int llc_conn_ac_clear_remote_busy_if_f_eq_1(struct sock *sk,
sk                183 net/llc/llc_c_ac.c 	    LLC_I_PF_IS_1(pdu) && llc_sk(sk)->ack_pf)
sk                184 net/llc/llc_c_ac.c 		llc_conn_ac_clear_remote_busy(sk, skb);
sk                188 net/llc/llc_c_ac.c int llc_conn_ac_stop_rej_tmr_if_data_flag_eq_2(struct sock *sk,
sk                191 net/llc/llc_c_ac.c 	struct llc_sock *llc = llc_sk(sk);
sk                198 net/llc/llc_c_ac.c int llc_conn_ac_send_disc_cmd_p_set_x(struct sock *sk, struct sk_buff *skb)
sk                201 net/llc/llc_c_ac.c 	struct llc_sock *llc = llc_sk(sk);
sk                202 net/llc/llc_c_ac.c 	struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_U, 0);
sk                213 net/llc/llc_c_ac.c 		llc_conn_send_pdu(sk, nskb);
sk                214 net/llc/llc_c_ac.c 		llc_conn_ac_set_p_flag_1(sk, skb);
sk                223 net/llc/llc_c_ac.c int llc_conn_ac_send_dm_rsp_f_set_p(struct sock *sk, struct sk_buff *skb)
sk                226 net/llc/llc_c_ac.c 	struct llc_sock *llc = llc_sk(sk);
sk                227 net/llc/llc_c_ac.c 	struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_U, 0);
sk                240 net/llc/llc_c_ac.c 		llc_conn_send_pdu(sk, nskb);
sk                249 net/llc/llc_c_ac.c int llc_conn_ac_send_dm_rsp_f_set_1(struct sock *sk, struct sk_buff *skb)
sk                252 net/llc/llc_c_ac.c 	struct llc_sock *llc = llc_sk(sk);
sk                253 net/llc/llc_c_ac.c 	struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_U, 0);
sk                264 net/llc/llc_c_ac.c 		llc_conn_send_pdu(sk, nskb);
sk                273 net/llc/llc_c_ac.c int llc_conn_ac_send_frmr_rsp_f_set_x(struct sock *sk, struct sk_buff *skb)
sk                279 net/llc/llc_c_ac.c 	struct llc_sock *llc = llc_sk(sk);
sk                286 net/llc/llc_c_ac.c 	nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_U,
sk                298 net/llc/llc_c_ac.c 		llc_conn_send_pdu(sk, nskb);
sk                307 net/llc/llc_c_ac.c int llc_conn_ac_resend_frmr_rsp_f_set_0(struct sock *sk, struct sk_buff *skb)
sk                310 net/llc/llc_c_ac.c 	struct llc_sock *llc = llc_sk(sk);
sk                311 net/llc/llc_c_ac.c 	struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_U,
sk                325 net/llc/llc_c_ac.c 		llc_conn_send_pdu(sk, nskb);
sk                334 net/llc/llc_c_ac.c int llc_conn_ac_resend_frmr_rsp_f_set_p(struct sock *sk, struct sk_buff *skb)
sk                339 net/llc/llc_c_ac.c 	struct llc_sock *llc = llc_sk(sk);
sk                342 net/llc/llc_c_ac.c 	nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_U,
sk                355 net/llc/llc_c_ac.c 		llc_conn_send_pdu(sk, nskb);
sk                364 net/llc/llc_c_ac.c int llc_conn_ac_send_i_cmd_p_set_1(struct sock *sk, struct sk_buff *skb)
sk                367 net/llc/llc_c_ac.c 	struct llc_sock *llc = llc_sk(sk);
sk                376 net/llc/llc_c_ac.c 		llc_conn_send_pdu(sk, skb);
sk                377 net/llc/llc_c_ac.c 		llc_conn_ac_inc_vs_by_1(sk, skb);
sk                382 net/llc/llc_c_ac.c static int llc_conn_ac_send_i_cmd_p_set_0(struct sock *sk, struct sk_buff *skb)
sk                385 net/llc/llc_c_ac.c 	struct llc_sock *llc = llc_sk(sk);
sk                394 net/llc/llc_c_ac.c 		llc_conn_send_pdu(sk, skb);
sk                395 net/llc/llc_c_ac.c 		llc_conn_ac_inc_vs_by_1(sk, skb);
sk                400 net/llc/llc_c_ac.c int llc_conn_ac_send_i_xxx_x_set_0(struct sock *sk, struct sk_buff *skb)
sk                403 net/llc/llc_c_ac.c 	struct llc_sock *llc = llc_sk(sk);
sk                412 net/llc/llc_c_ac.c 		llc_conn_send_pdu(sk, skb);
sk                413 net/llc/llc_c_ac.c 		llc_conn_ac_inc_vs_by_1(sk, skb);
sk                418 net/llc/llc_c_ac.c int llc_conn_ac_resend_i_xxx_x_set_0(struct sock *sk, struct sk_buff *skb)
sk                423 net/llc/llc_c_ac.c 	llc_conn_resend_i_pdu_as_cmd(sk, nr, 0);
sk                427 net/llc/llc_c_ac.c int llc_conn_ac_resend_i_xxx_x_set_0_or_send_rr(struct sock *sk,
sk                433 net/llc/llc_c_ac.c 	struct llc_sock *llc = llc_sk(sk);
sk                434 net/llc/llc_c_ac.c 	struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_U, 0);
sk                444 net/llc/llc_c_ac.c 			llc_conn_send_pdu(sk, nskb);
sk                451 net/llc/llc_c_ac.c 		llc_conn_resend_i_pdu_as_cmd(sk, nr, 0);
sk                456 net/llc/llc_c_ac.c int llc_conn_ac_resend_i_rsp_f_set_1(struct sock *sk, struct sk_buff *skb)
sk                461 net/llc/llc_c_ac.c 	llc_conn_resend_i_pdu_as_rsp(sk, nr, 1);
sk                465 net/llc/llc_c_ac.c int llc_conn_ac_send_rej_cmd_p_set_1(struct sock *sk, struct sk_buff *skb)
sk                468 net/llc/llc_c_ac.c 	struct llc_sock *llc = llc_sk(sk);
sk                469 net/llc/llc_c_ac.c 	struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0);
sk                480 net/llc/llc_c_ac.c 		llc_conn_send_pdu(sk, nskb);
sk                489 net/llc/llc_c_ac.c int llc_conn_ac_send_rej_rsp_f_set_1(struct sock *sk, struct sk_buff *skb)
sk                492 net/llc/llc_c_ac.c 	struct llc_sock *llc = llc_sk(sk);
sk                493 net/llc/llc_c_ac.c 	struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0);
sk                504 net/llc/llc_c_ac.c 		llc_conn_send_pdu(sk, nskb);
sk                513 net/llc/llc_c_ac.c int llc_conn_ac_send_rej_xxx_x_set_0(struct sock *sk, struct sk_buff *skb)
sk                516 net/llc/llc_c_ac.c 	struct llc_sock *llc = llc_sk(sk);
sk                517 net/llc/llc_c_ac.c 	struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0);
sk                528 net/llc/llc_c_ac.c 		llc_conn_send_pdu(sk, nskb);
sk                537 net/llc/llc_c_ac.c int llc_conn_ac_send_rnr_cmd_p_set_1(struct sock *sk, struct sk_buff *skb)
sk                540 net/llc/llc_c_ac.c 	struct llc_sock *llc = llc_sk(sk);
sk                541 net/llc/llc_c_ac.c 	struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0);
sk                552 net/llc/llc_c_ac.c 		llc_conn_send_pdu(sk, nskb);
sk                561 net/llc/llc_c_ac.c int llc_conn_ac_send_rnr_rsp_f_set_1(struct sock *sk, struct sk_buff *skb)
sk                564 net/llc/llc_c_ac.c 	struct llc_sock *llc = llc_sk(sk);
sk                565 net/llc/llc_c_ac.c 	struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0);
sk                576 net/llc/llc_c_ac.c 		llc_conn_send_pdu(sk, nskb);
sk                585 net/llc/llc_c_ac.c int llc_conn_ac_send_rnr_xxx_x_set_0(struct sock *sk, struct sk_buff *skb)
sk                588 net/llc/llc_c_ac.c 	struct llc_sock *llc = llc_sk(sk);
sk                589 net/llc/llc_c_ac.c 	struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0);
sk                600 net/llc/llc_c_ac.c 		llc_conn_send_pdu(sk, nskb);
sk                609 net/llc/llc_c_ac.c int llc_conn_ac_set_remote_busy(struct sock *sk, struct sk_buff *skb)
sk                611 net/llc/llc_c_ac.c 	struct llc_sock *llc = llc_sk(sk);
sk                621 net/llc/llc_c_ac.c int llc_conn_ac_opt_send_rnr_xxx_x_set_0(struct sock *sk, struct sk_buff *skb)
sk                624 net/llc/llc_c_ac.c 	struct llc_sock *llc = llc_sk(sk);
sk                625 net/llc/llc_c_ac.c 	struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0);
sk                636 net/llc/llc_c_ac.c 		llc_conn_send_pdu(sk, nskb);
sk                645 net/llc/llc_c_ac.c int llc_conn_ac_send_rr_cmd_p_set_1(struct sock *sk, struct sk_buff *skb)
sk                648 net/llc/llc_c_ac.c 	struct llc_sock *llc = llc_sk(sk);
sk                649 net/llc/llc_c_ac.c 	struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0);
sk                660 net/llc/llc_c_ac.c 		llc_conn_send_pdu(sk, nskb);
sk                669 net/llc/llc_c_ac.c int llc_conn_ac_send_rr_rsp_f_set_1(struct sock *sk, struct sk_buff *skb)
sk                672 net/llc/llc_c_ac.c 	struct llc_sock *llc = llc_sk(sk);
sk                673 net/llc/llc_c_ac.c 	struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0);
sk                685 net/llc/llc_c_ac.c 		llc_conn_send_pdu(sk, nskb);
sk                694 net/llc/llc_c_ac.c int llc_conn_ac_send_ack_rsp_f_set_1(struct sock *sk, struct sk_buff *skb)
sk                697 net/llc/llc_c_ac.c 	struct llc_sock *llc = llc_sk(sk);
sk                698 net/llc/llc_c_ac.c 	struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0);
sk                709 net/llc/llc_c_ac.c 		llc_conn_send_pdu(sk, nskb);
sk                718 net/llc/llc_c_ac.c int llc_conn_ac_send_rr_xxx_x_set_0(struct sock *sk, struct sk_buff *skb)
sk                721 net/llc/llc_c_ac.c 	struct llc_sock *llc = llc_sk(sk);
sk                722 net/llc/llc_c_ac.c 	struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0);
sk                733 net/llc/llc_c_ac.c 		llc_conn_send_pdu(sk, nskb);
sk                742 net/llc/llc_c_ac.c int llc_conn_ac_send_ack_xxx_x_set_0(struct sock *sk, struct sk_buff *skb)
sk                745 net/llc/llc_c_ac.c 	struct llc_sock *llc = llc_sk(sk);
sk                746 net/llc/llc_c_ac.c 	struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0);
sk                757 net/llc/llc_c_ac.c 		llc_conn_send_pdu(sk, nskb);
sk                766 net/llc/llc_c_ac.c void llc_conn_set_p_flag(struct sock *sk, u8 value)
sk                768 net/llc/llc_c_ac.c 	int state_changed = llc_sk(sk)->p_flag && !value;
sk                770 net/llc/llc_c_ac.c 	llc_sk(sk)->p_flag = value;
sk                773 net/llc/llc_c_ac.c 		sk->sk_state_change(sk);
sk                776 net/llc/llc_c_ac.c int llc_conn_ac_send_sabme_cmd_p_set_x(struct sock *sk, struct sk_buff *skb)
sk                779 net/llc/llc_c_ac.c 	struct llc_sock *llc = llc_sk(sk);
sk                780 net/llc/llc_c_ac.c 	struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_U, 0);
sk                794 net/llc/llc_c_ac.c 		llc_conn_send_pdu(sk, nskb);
sk                795 net/llc/llc_c_ac.c 		llc_conn_set_p_flag(sk, 1);
sk                804 net/llc/llc_c_ac.c int llc_conn_ac_send_ua_rsp_f_set_p(struct sock *sk, struct sk_buff *skb)
sk                808 net/llc/llc_c_ac.c 	struct llc_sock *llc = llc_sk(sk);
sk                809 net/llc/llc_c_ac.c 	struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_U, 0);
sk                822 net/llc/llc_c_ac.c 		llc_conn_send_pdu(sk, nskb);
sk                831 net/llc/llc_c_ac.c int llc_conn_ac_set_s_flag_0(struct sock *sk, struct sk_buff *skb)
sk                833 net/llc/llc_c_ac.c 	llc_sk(sk)->s_flag = 0;
sk                837 net/llc/llc_c_ac.c int llc_conn_ac_set_s_flag_1(struct sock *sk, struct sk_buff *skb)
sk                839 net/llc/llc_c_ac.c 	llc_sk(sk)->s_flag = 1;
sk                843 net/llc/llc_c_ac.c int llc_conn_ac_start_p_timer(struct sock *sk, struct sk_buff *skb)
sk                845 net/llc/llc_c_ac.c 	struct llc_sock *llc = llc_sk(sk);
sk                847 net/llc/llc_c_ac.c 	llc_conn_set_p_flag(sk, 1);
sk                863 net/llc/llc_c_ac.c int llc_conn_ac_send_ack_if_needed(struct sock *sk, struct sk_buff *skb)
sk                866 net/llc/llc_c_ac.c 	struct llc_sock *llc = llc_sk(sk);
sk                877 net/llc/llc_c_ac.c 		llc_conn_ac_send_rr_rsp_f_set_ackpf(sk, skb);
sk                880 net/llc/llc_c_ac.c 		llc_conn_ac_inc_npta_value(sk, skb);
sk                894 net/llc/llc_c_ac.c int llc_conn_ac_rst_sendack_flag(struct sock *sk, struct sk_buff *skb)
sk                896 net/llc/llc_c_ac.c 	llc_sk(sk)->ack_must_be_send = llc_sk(sk)->ack_pf = 0;
sk                910 net/llc/llc_c_ac.c static int llc_conn_ac_send_i_rsp_f_set_ackpf(struct sock *sk,
sk                914 net/llc/llc_c_ac.c 	struct llc_sock *llc = llc_sk(sk);
sk                923 net/llc/llc_c_ac.c 		llc_conn_send_pdu(sk, skb);
sk                924 net/llc/llc_c_ac.c 		llc_conn_ac_inc_vs_by_1(sk, skb);
sk                939 net/llc/llc_c_ac.c int llc_conn_ac_send_i_as_ack(struct sock *sk, struct sk_buff *skb)
sk                941 net/llc/llc_c_ac.c 	struct llc_sock *llc = llc_sk(sk);
sk                945 net/llc/llc_c_ac.c 		ret = llc_conn_ac_send_i_rsp_f_set_ackpf(sk, skb);
sk                949 net/llc/llc_c_ac.c 		ret = llc_conn_ac_send_i_cmd_p_set_0(sk, skb);
sk                965 net/llc/llc_c_ac.c static int llc_conn_ac_send_rr_rsp_f_set_ackpf(struct sock *sk,
sk                969 net/llc/llc_c_ac.c 	struct llc_sock *llc = llc_sk(sk);
sk                970 net/llc/llc_c_ac.c 	struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0);
sk                981 net/llc/llc_c_ac.c 		llc_conn_send_pdu(sk, nskb);
sk               1000 net/llc/llc_c_ac.c static int llc_conn_ac_inc_npta_value(struct sock *sk, struct sk_buff *skb)
sk               1002 net/llc/llc_c_ac.c 	struct llc_sock *llc = llc_sk(sk);
sk               1023 net/llc/llc_c_ac.c int llc_conn_ac_adjust_npta_by_rr(struct sock *sk, struct sk_buff *skb)
sk               1025 net/llc/llc_c_ac.c 	struct llc_sock *llc = llc_sk(sk);
sk               1049 net/llc/llc_c_ac.c int llc_conn_ac_adjust_npta_by_rnr(struct sock *sk, struct sk_buff *skb)
sk               1051 net/llc/llc_c_ac.c 	struct llc_sock *llc = llc_sk(sk);
sk               1074 net/llc/llc_c_ac.c int llc_conn_ac_dec_tx_win_size(struct sock *sk, struct sk_buff *skb)
sk               1076 net/llc/llc_c_ac.c 	struct llc_sock *llc = llc_sk(sk);
sk               1094 net/llc/llc_c_ac.c int llc_conn_ac_inc_tx_win_size(struct sock *sk, struct sk_buff *skb)
sk               1096 net/llc/llc_c_ac.c 	struct llc_sock *llc = llc_sk(sk);
sk               1104 net/llc/llc_c_ac.c int llc_conn_ac_stop_all_timers(struct sock *sk, struct sk_buff *skb)
sk               1106 net/llc/llc_c_ac.c 	llc_sk_stop_all_timers(sk, false);
sk               1110 net/llc/llc_c_ac.c int llc_conn_ac_stop_other_timers(struct sock *sk, struct sk_buff *skb)
sk               1112 net/llc/llc_c_ac.c 	struct llc_sock *llc = llc_sk(sk);
sk               1122 net/llc/llc_c_ac.c int llc_conn_ac_start_ack_timer(struct sock *sk, struct sk_buff *skb)
sk               1124 net/llc/llc_c_ac.c 	struct llc_sock *llc = llc_sk(sk);
sk               1130 net/llc/llc_c_ac.c int llc_conn_ac_start_rej_timer(struct sock *sk, struct sk_buff *skb)
sk               1132 net/llc/llc_c_ac.c 	struct llc_sock *llc = llc_sk(sk);
sk               1139 net/llc/llc_c_ac.c int llc_conn_ac_start_ack_tmr_if_not_running(struct sock *sk,
sk               1142 net/llc/llc_c_ac.c 	struct llc_sock *llc = llc_sk(sk);
sk               1150 net/llc/llc_c_ac.c int llc_conn_ac_stop_ack_timer(struct sock *sk, struct sk_buff *skb)
sk               1152 net/llc/llc_c_ac.c 	del_timer(&llc_sk(sk)->ack_timer.timer);
sk               1156 net/llc/llc_c_ac.c int llc_conn_ac_stop_p_timer(struct sock *sk, struct sk_buff *skb)
sk               1158 net/llc/llc_c_ac.c 	struct llc_sock *llc = llc_sk(sk);
sk               1161 net/llc/llc_c_ac.c 	llc_conn_set_p_flag(sk, 0);
sk               1165 net/llc/llc_c_ac.c int llc_conn_ac_stop_rej_timer(struct sock *sk, struct sk_buff *skb)
sk               1167 net/llc/llc_c_ac.c 	del_timer(&llc_sk(sk)->rej_sent_timer.timer);
sk               1171 net/llc/llc_c_ac.c int llc_conn_ac_upd_nr_received(struct sock *sk, struct sk_buff *skb)
sk               1176 net/llc/llc_c_ac.c 	struct llc_sock *llc = llc_sk(sk);
sk               1179 net/llc/llc_c_ac.c 	acked = llc_conn_remove_acked_pdus(sk, llc->last_nr, &unacked);
sk               1190 net/llc/llc_c_ac.c 			llc_conn_ac_data_confirm(sk, skb);
sk               1201 net/llc/llc_c_ac.c 			llc_conn_ac_data_confirm(sk, skb);
sk               1207 net/llc/llc_c_ac.c int llc_conn_ac_upd_p_flag(struct sock *sk, struct sk_buff *skb)
sk               1216 net/llc/llc_c_ac.c 			llc_conn_set_p_flag(sk, 0);
sk               1217 net/llc/llc_c_ac.c 			llc_conn_ac_stop_p_timer(sk, skb);
sk               1223 net/llc/llc_c_ac.c int llc_conn_ac_set_data_flag_2(struct sock *sk, struct sk_buff *skb)
sk               1225 net/llc/llc_c_ac.c 	llc_sk(sk)->data_flag = 2;
sk               1229 net/llc/llc_c_ac.c int llc_conn_ac_set_data_flag_0(struct sock *sk, struct sk_buff *skb)
sk               1231 net/llc/llc_c_ac.c 	llc_sk(sk)->data_flag = 0;
sk               1235 net/llc/llc_c_ac.c int llc_conn_ac_set_data_flag_1(struct sock *sk, struct sk_buff *skb)
sk               1237 net/llc/llc_c_ac.c 	llc_sk(sk)->data_flag = 1;
sk               1241 net/llc/llc_c_ac.c int llc_conn_ac_set_data_flag_1_if_data_flag_eq_0(struct sock *sk,
sk               1244 net/llc/llc_c_ac.c 	if (!llc_sk(sk)->data_flag)
sk               1245 net/llc/llc_c_ac.c 		llc_sk(sk)->data_flag = 1;
sk               1249 net/llc/llc_c_ac.c int llc_conn_ac_set_p_flag_0(struct sock *sk, struct sk_buff *skb)
sk               1251 net/llc/llc_c_ac.c 	llc_conn_set_p_flag(sk, 0);
sk               1255 net/llc/llc_c_ac.c static int llc_conn_ac_set_p_flag_1(struct sock *sk, struct sk_buff *skb)
sk               1257 net/llc/llc_c_ac.c 	llc_conn_set_p_flag(sk, 1);
sk               1261 net/llc/llc_c_ac.c int llc_conn_ac_set_remote_busy_0(struct sock *sk, struct sk_buff *skb)
sk               1263 net/llc/llc_c_ac.c 	llc_sk(sk)->remote_busy_flag = 0;
sk               1267 net/llc/llc_c_ac.c int llc_conn_ac_set_cause_flag_0(struct sock *sk, struct sk_buff *skb)
sk               1269 net/llc/llc_c_ac.c 	llc_sk(sk)->cause_flag = 0;
sk               1273 net/llc/llc_c_ac.c int llc_conn_ac_set_cause_flag_1(struct sock *sk, struct sk_buff *skb)
sk               1275 net/llc/llc_c_ac.c 	llc_sk(sk)->cause_flag = 1;
sk               1279 net/llc/llc_c_ac.c int llc_conn_ac_set_retry_cnt_0(struct sock *sk, struct sk_buff *skb)
sk               1281 net/llc/llc_c_ac.c 	llc_sk(sk)->retry_count = 0;
sk               1285 net/llc/llc_c_ac.c int llc_conn_ac_inc_retry_cnt_by_1(struct sock *sk, struct sk_buff *skb)
sk               1287 net/llc/llc_c_ac.c 	llc_sk(sk)->retry_count++;
sk               1291 net/llc/llc_c_ac.c int llc_conn_ac_set_vr_0(struct sock *sk, struct sk_buff *skb)
sk               1293 net/llc/llc_c_ac.c 	llc_sk(sk)->vR = 0;
sk               1297 net/llc/llc_c_ac.c int llc_conn_ac_inc_vr_by_1(struct sock *sk, struct sk_buff *skb)
sk               1299 net/llc/llc_c_ac.c 	llc_sk(sk)->vR = PDU_GET_NEXT_Vr(llc_sk(sk)->vR);
sk               1303 net/llc/llc_c_ac.c int llc_conn_ac_set_vs_0(struct sock *sk, struct sk_buff *skb)
sk               1305 net/llc/llc_c_ac.c 	llc_sk(sk)->vS = 0;
sk               1309 net/llc/llc_c_ac.c int llc_conn_ac_set_vs_nr(struct sock *sk, struct sk_buff *skb)
sk               1311 net/llc/llc_c_ac.c 	llc_sk(sk)->vS = llc_sk(sk)->last_nr;
sk               1315 net/llc/llc_c_ac.c static int llc_conn_ac_inc_vs_by_1(struct sock *sk, struct sk_buff *skb)
sk               1317 net/llc/llc_c_ac.c 	llc_sk(sk)->vS = (llc_sk(sk)->vS + 1) % LLC_2_SEQ_NBR_MODULO;
sk               1321 net/llc/llc_c_ac.c static void llc_conn_tmr_common_cb(struct sock *sk, u8 type)
sk               1325 net/llc/llc_c_ac.c 	bh_lock_sock(sk);
sk               1329 net/llc/llc_c_ac.c 		skb_set_owner_r(skb, sk);
sk               1331 net/llc/llc_c_ac.c 		llc_process_tmr_ev(sk, skb);
sk               1333 net/llc/llc_c_ac.c 	bh_unlock_sock(sk);
sk               1340 net/llc/llc_c_ac.c 	llc_conn_tmr_common_cb(&llc->sk, LLC_CONN_EV_TYPE_P_TMR);
sk               1347 net/llc/llc_c_ac.c 	llc_conn_tmr_common_cb(&llc->sk, LLC_CONN_EV_TYPE_BUSY_TMR);
sk               1354 net/llc/llc_c_ac.c 	llc_conn_tmr_common_cb(&llc->sk, LLC_CONN_EV_TYPE_ACK_TMR);
sk               1361 net/llc/llc_c_ac.c 	llc_conn_tmr_common_cb(&llc->sk, LLC_CONN_EV_TYPE_REJ_TMR);
sk               1364 net/llc/llc_c_ac.c int llc_conn_ac_rst_vs(struct sock *sk, struct sk_buff *skb)
sk               1366 net/llc/llc_c_ac.c 	llc_sk(sk)->X = llc_sk(sk)->vS;
sk               1367 net/llc/llc_c_ac.c 	llc_conn_ac_set_vs_nr(sk, skb);
sk               1371 net/llc/llc_c_ac.c int llc_conn_ac_upd_vs(struct sock *sk, struct sk_buff *skb)
sk               1376 net/llc/llc_c_ac.c 	if (llc_circular_between(llc_sk(sk)->vS, nr, llc_sk(sk)->X))
sk               1377 net/llc/llc_c_ac.c 		llc_conn_ac_set_vs_nr(sk, skb);
sk               1390 net/llc/llc_c_ac.c int llc_conn_disc(struct sock *sk, struct sk_buff *skb)
sk               1403 net/llc/llc_c_ac.c int llc_conn_reset(struct sock *sk, struct sk_buff *skb)
sk               1405 net/llc/llc_c_ac.c 	llc_sk_reset(sk);
sk               1437 net/llc/llc_c_ac.c static void llc_process_tmr_ev(struct sock *sk, struct sk_buff *skb)
sk               1439 net/llc/llc_c_ac.c 	if (llc_sk(sk)->state == LLC_CONN_OUT_OF_SVC) {
sk               1444 net/llc/llc_c_ac.c 		if (!sock_owned_by_user(sk))
sk               1445 net/llc/llc_c_ac.c 			llc_conn_state_process(sk, skb);
sk               1448 net/llc/llc_c_ac.c 			__sk_add_backlog(sk, skb);
sk                 75 net/llc/llc_c_ev.c static u16 llc_util_nr_inside_tx_window(struct sock *sk, u8 nr)
sk                 80 net/llc/llc_c_ev.c 	struct llc_sock *llc = llc_sk(sk);
sk                 99 net/llc/llc_c_ev.c int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb)
sk                107 net/llc/llc_c_ev.c int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb)
sk                115 net/llc/llc_c_ev.c int llc_conn_ev_disc_req(struct sock *sk, struct sk_buff *skb)
sk                123 net/llc/llc_c_ev.c int llc_conn_ev_rst_req(struct sock *sk, struct sk_buff *skb)
sk                131 net/llc/llc_c_ev.c int llc_conn_ev_local_busy_detected(struct sock *sk, struct sk_buff *skb)
sk                139 net/llc/llc_c_ev.c int llc_conn_ev_local_busy_cleared(struct sock *sk, struct sk_buff *skb)
sk                147 net/llc/llc_c_ev.c int llc_conn_ev_rx_bad_pdu(struct sock *sk, struct sk_buff *skb)
sk                152 net/llc/llc_c_ev.c int llc_conn_ev_rx_disc_cmd_pbit_set_x(struct sock *sk, struct sk_buff *skb)
sk                160 net/llc/llc_c_ev.c int llc_conn_ev_rx_dm_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb)
sk                168 net/llc/llc_c_ev.c int llc_conn_ev_rx_frmr_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb)
sk                176 net/llc/llc_c_ev.c int llc_conn_ev_rx_i_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb)
sk                180 net/llc/llc_c_ev.c 	return llc_conn_space(sk, skb) &&
sk                183 net/llc/llc_c_ev.c 	       LLC_I_GET_NS(pdu) == llc_sk(sk)->vR ? 0 : 1;
sk                186 net/llc/llc_c_ev.c int llc_conn_ev_rx_i_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb)
sk                190 net/llc/llc_c_ev.c 	return llc_conn_space(sk, skb) &&
sk                193 net/llc/llc_c_ev.c 	       LLC_I_GET_NS(pdu) == llc_sk(sk)->vR ? 0 : 1;
sk                196 net/llc/llc_c_ev.c int llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns(struct sock *sk,
sk                200 net/llc/llc_c_ev.c 	const u8 vr = llc_sk(sk)->vR;
sk                205 net/llc/llc_c_ev.c 	       !llc_util_ns_inside_rx_window(ns, vr, llc_sk(sk)->rw) ? 0 : 1;
sk                208 net/llc/llc_c_ev.c int llc_conn_ev_rx_i_cmd_pbit_set_1_unexpd_ns(struct sock *sk,
sk                212 net/llc/llc_c_ev.c 	const u8 vr = llc_sk(sk)->vR;
sk                217 net/llc/llc_c_ev.c 	       !llc_util_ns_inside_rx_window(ns, vr, llc_sk(sk)->rw) ? 0 : 1;
sk                220 net/llc/llc_c_ev.c int llc_conn_ev_rx_i_cmd_pbit_set_x_inval_ns(struct sock *sk,
sk                224 net/llc/llc_c_ev.c 	const u8 vr = llc_sk(sk)->vR;
sk                228 net/llc/llc_c_ev.c 		 llc_util_ns_inside_rx_window(ns, vr, llc_sk(sk)->rw) ? 0 : 1;
sk                231 net/llc/llc_c_ev.c 			__func__, llc_sk(sk)->state, ns, vr);
sk                235 net/llc/llc_c_ev.c int llc_conn_ev_rx_i_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb)
sk                239 net/llc/llc_c_ev.c 	return llc_conn_space(sk, skb) &&
sk                242 net/llc/llc_c_ev.c 	       LLC_I_GET_NS(pdu) == llc_sk(sk)->vR ? 0 : 1;
sk                245 net/llc/llc_c_ev.c int llc_conn_ev_rx_i_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb)
sk                251 net/llc/llc_c_ev.c 	       LLC_I_GET_NS(pdu) == llc_sk(sk)->vR ? 0 : 1;
sk                254 net/llc/llc_c_ev.c int llc_conn_ev_rx_i_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb)
sk                258 net/llc/llc_c_ev.c 	return llc_conn_space(sk, skb) &&
sk                260 net/llc/llc_c_ev.c 	       LLC_I_GET_NS(pdu) == llc_sk(sk)->vR ? 0 : 1;
sk                263 net/llc/llc_c_ev.c int llc_conn_ev_rx_i_rsp_fbit_set_0_unexpd_ns(struct sock *sk,
sk                267 net/llc/llc_c_ev.c 	const u8 vr = llc_sk(sk)->vR;
sk                272 net/llc/llc_c_ev.c 	       !llc_util_ns_inside_rx_window(ns, vr, llc_sk(sk)->rw) ? 0 : 1;
sk                275 net/llc/llc_c_ev.c int llc_conn_ev_rx_i_rsp_fbit_set_1_unexpd_ns(struct sock *sk,
sk                279 net/llc/llc_c_ev.c 	const u8 vr = llc_sk(sk)->vR;
sk                284 net/llc/llc_c_ev.c 	       !llc_util_ns_inside_rx_window(ns, vr, llc_sk(sk)->rw) ? 0 : 1;
sk                287 net/llc/llc_c_ev.c int llc_conn_ev_rx_i_rsp_fbit_set_x_unexpd_ns(struct sock *sk,
sk                291 net/llc/llc_c_ev.c 	const u8 vr = llc_sk(sk)->vR;
sk                295 net/llc/llc_c_ev.c 	       !llc_util_ns_inside_rx_window(ns, vr, llc_sk(sk)->rw) ? 0 : 1;
sk                298 net/llc/llc_c_ev.c int llc_conn_ev_rx_i_rsp_fbit_set_x_inval_ns(struct sock *sk,
sk                302 net/llc/llc_c_ev.c 	const u8 vr = llc_sk(sk)->vR;
sk                306 net/llc/llc_c_ev.c 		 llc_util_ns_inside_rx_window(ns, vr, llc_sk(sk)->rw) ? 0 : 1;
sk                309 net/llc/llc_c_ev.c 			__func__, llc_sk(sk)->state, ns, vr);
sk                313 net/llc/llc_c_ev.c int llc_conn_ev_rx_rej_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb)
sk                322 net/llc/llc_c_ev.c int llc_conn_ev_rx_rej_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb)
sk                331 net/llc/llc_c_ev.c int llc_conn_ev_rx_rej_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb)
sk                340 net/llc/llc_c_ev.c int llc_conn_ev_rx_rej_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb)
sk                349 net/llc/llc_c_ev.c int llc_conn_ev_rx_rej_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb)
sk                357 net/llc/llc_c_ev.c int llc_conn_ev_rx_rnr_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb)
sk                366 net/llc/llc_c_ev.c int llc_conn_ev_rx_rnr_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb)
sk                375 net/llc/llc_c_ev.c int llc_conn_ev_rx_rnr_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb)
sk                384 net/llc/llc_c_ev.c int llc_conn_ev_rx_rnr_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb)
sk                393 net/llc/llc_c_ev.c int llc_conn_ev_rx_rr_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb)
sk                402 net/llc/llc_c_ev.c int llc_conn_ev_rx_rr_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb)
sk                411 net/llc/llc_c_ev.c int llc_conn_ev_rx_rr_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb)
sk                415 net/llc/llc_c_ev.c 	return llc_conn_space(sk, skb) &&
sk                421 net/llc/llc_c_ev.c int llc_conn_ev_rx_rr_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb)
sk                425 net/llc/llc_c_ev.c 	return llc_conn_space(sk, skb) &&
sk                431 net/llc/llc_c_ev.c int llc_conn_ev_rx_sabme_cmd_pbit_set_x(struct sock *sk, struct sk_buff *skb)
sk                439 net/llc/llc_c_ev.c int llc_conn_ev_rx_ua_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb)
sk                447 net/llc/llc_c_ev.c int llc_conn_ev_rx_xxx_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb)
sk                462 net/llc/llc_c_ev.c int llc_conn_ev_rx_xxx_cmd_pbit_set_x(struct sock *sk, struct sk_buff *skb)
sk                481 net/llc/llc_c_ev.c int llc_conn_ev_rx_xxx_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb)
sk                502 net/llc/llc_c_ev.c int llc_conn_ev_rx_zzz_cmd_pbit_set_x_inval_nr(struct sock *sk,
sk                507 net/llc/llc_c_ev.c 	const u8 vs = llc_sk(sk)->vS;
sk                512 net/llc/llc_c_ev.c 	    nr != vs && llc_util_nr_inside_tx_window(sk, nr)) {
sk                514 net/llc/llc_c_ev.c 			__func__, llc_sk(sk)->state, vs, nr);
sk                520 net/llc/llc_c_ev.c int llc_conn_ev_rx_zzz_rsp_fbit_set_x_inval_nr(struct sock *sk,
sk                525 net/llc/llc_c_ev.c 	const u8 vs = llc_sk(sk)->vS;
sk                530 net/llc/llc_c_ev.c 	    nr != vs && llc_util_nr_inside_tx_window(sk, nr)) {
sk                533 net/llc/llc_c_ev.c 			__func__, llc_sk(sk)->state, vs, nr);
sk                538 net/llc/llc_c_ev.c int llc_conn_ev_rx_any_frame(struct sock *sk, struct sk_buff *skb)
sk                543 net/llc/llc_c_ev.c int llc_conn_ev_p_tmr_exp(struct sock *sk, struct sk_buff *skb)
sk                550 net/llc/llc_c_ev.c int llc_conn_ev_ack_tmr_exp(struct sock *sk, struct sk_buff *skb)
sk                557 net/llc/llc_c_ev.c int llc_conn_ev_rej_tmr_exp(struct sock *sk, struct sk_buff *skb)
sk                564 net/llc/llc_c_ev.c int llc_conn_ev_busy_tmr_exp(struct sock *sk, struct sk_buff *skb)
sk                571 net/llc/llc_c_ev.c int llc_conn_ev_init_p_f_cycle(struct sock *sk, struct sk_buff *skb)
sk                576 net/llc/llc_c_ev.c int llc_conn_ev_tx_buffer_full(struct sock *sk, struct sk_buff *skb)
sk                590 net/llc/llc_c_ev.c int llc_conn_ev_qlfy_data_flag_eq_1(struct sock *sk, struct sk_buff *skb)
sk                592 net/llc/llc_c_ev.c 	return llc_sk(sk)->data_flag != 1;
sk                595 net/llc/llc_c_ev.c int llc_conn_ev_qlfy_data_flag_eq_0(struct sock *sk, struct sk_buff *skb)
sk                597 net/llc/llc_c_ev.c 	return llc_sk(sk)->data_flag;
sk                600 net/llc/llc_c_ev.c int llc_conn_ev_qlfy_data_flag_eq_2(struct sock *sk, struct sk_buff *skb)
sk                602 net/llc/llc_c_ev.c 	return llc_sk(sk)->data_flag != 2;
sk                605 net/llc/llc_c_ev.c int llc_conn_ev_qlfy_p_flag_eq_1(struct sock *sk, struct sk_buff *skb)
sk                607 net/llc/llc_c_ev.c 	return llc_sk(sk)->p_flag != 1;
sk                621 net/llc/llc_c_ev.c int llc_conn_ev_qlfy_last_frame_eq_1(struct sock *sk, struct sk_buff *skb)
sk                623 net/llc/llc_c_ev.c 	return !(skb_queue_len(&llc_sk(sk)->pdu_unack_q) + 1 == llc_sk(sk)->k);
sk                635 net/llc/llc_c_ev.c int llc_conn_ev_qlfy_last_frame_eq_0(struct sock *sk, struct sk_buff *skb)
sk                637 net/llc/llc_c_ev.c 	return skb_queue_len(&llc_sk(sk)->pdu_unack_q) + 1 == llc_sk(sk)->k;
sk                640 net/llc/llc_c_ev.c int llc_conn_ev_qlfy_p_flag_eq_0(struct sock *sk, struct sk_buff *skb)
sk                642 net/llc/llc_c_ev.c 	return llc_sk(sk)->p_flag;
sk                645 net/llc/llc_c_ev.c int llc_conn_ev_qlfy_p_flag_eq_f(struct sock *sk, struct sk_buff *skb)
sk                650 net/llc/llc_c_ev.c 	return llc_sk(sk)->p_flag == f_bit ? 0 : 1;
sk                653 net/llc/llc_c_ev.c int llc_conn_ev_qlfy_remote_busy_eq_0(struct sock *sk, struct sk_buff *skb)
sk                655 net/llc/llc_c_ev.c 	return llc_sk(sk)->remote_busy_flag;
sk                658 net/llc/llc_c_ev.c int llc_conn_ev_qlfy_remote_busy_eq_1(struct sock *sk, struct sk_buff *skb)
sk                660 net/llc/llc_c_ev.c 	return !llc_sk(sk)->remote_busy_flag;
sk                663 net/llc/llc_c_ev.c int llc_conn_ev_qlfy_retry_cnt_lt_n2(struct sock *sk, struct sk_buff *skb)
sk                665 net/llc/llc_c_ev.c 	return !(llc_sk(sk)->retry_count < llc_sk(sk)->n2);
sk                668 net/llc/llc_c_ev.c int llc_conn_ev_qlfy_retry_cnt_gte_n2(struct sock *sk, struct sk_buff *skb)
sk                670 net/llc/llc_c_ev.c 	return !(llc_sk(sk)->retry_count >= llc_sk(sk)->n2);
sk                673 net/llc/llc_c_ev.c int llc_conn_ev_qlfy_s_flag_eq_1(struct sock *sk, struct sk_buff *skb)
sk                675 net/llc/llc_c_ev.c 	return !llc_sk(sk)->s_flag;
sk                678 net/llc/llc_c_ev.c int llc_conn_ev_qlfy_s_flag_eq_0(struct sock *sk, struct sk_buff *skb)
sk                680 net/llc/llc_c_ev.c 	return llc_sk(sk)->s_flag;
sk                683 net/llc/llc_c_ev.c int llc_conn_ev_qlfy_cause_flag_eq_1(struct sock *sk, struct sk_buff *skb)
sk                685 net/llc/llc_c_ev.c 	return !llc_sk(sk)->cause_flag;
sk                688 net/llc/llc_c_ev.c int llc_conn_ev_qlfy_cause_flag_eq_0(struct sock *sk, struct sk_buff *skb)
sk                690 net/llc/llc_c_ev.c 	return llc_sk(sk)->cause_flag;
sk                693 net/llc/llc_c_ev.c int llc_conn_ev_qlfy_set_status_conn(struct sock *sk, struct sk_buff *skb)
sk                701 net/llc/llc_c_ev.c int llc_conn_ev_qlfy_set_status_disc(struct sock *sk, struct sk_buff *skb)
sk                709 net/llc/llc_c_ev.c int llc_conn_ev_qlfy_set_status_failed(struct sock *sk, struct sk_buff *skb)
sk                717 net/llc/llc_c_ev.c int llc_conn_ev_qlfy_set_status_remote_busy(struct sock *sk,
sk                726 net/llc/llc_c_ev.c int llc_conn_ev_qlfy_set_status_refuse(struct sock *sk, struct sk_buff *skb)
sk                734 net/llc/llc_c_ev.c int llc_conn_ev_qlfy_set_status_conflict(struct sock *sk, struct sk_buff *skb)
sk                742 net/llc/llc_c_ev.c int llc_conn_ev_qlfy_set_status_rst_done(struct sock *sk, struct sk_buff *skb)
sk                 33 net/llc/llc_conn.c static void llc_conn_send_pdus(struct sock *sk);
sk                 34 net/llc/llc_conn.c static int llc_conn_service(struct sock *sk, struct sk_buff *skb);
sk                 35 net/llc/llc_conn.c static int llc_exec_conn_trans_actions(struct sock *sk,
sk                 38 net/llc/llc_conn.c static struct llc_conn_state_trans *llc_qualify_conn_ev(struct sock *sk,
sk                 61 net/llc/llc_conn.c int llc_conn_state_process(struct sock *sk, struct sk_buff *skb)
sk                 64 net/llc/llc_conn.c 	struct llc_sock *llc = llc_sk(skb->sk);
sk                 71 net/llc/llc_conn.c 	rc = llc_conn_service(skb->sk, skb);
sk                 80 net/llc/llc_conn.c 		llc_save_primitive(sk, skb, LLC_DATA_PRIM);
sk                 81 net/llc/llc_conn.c 		if (unlikely(sock_queue_rcv_skb(sk, skb))) {
sk                 97 net/llc/llc_conn.c 		skb_queue_tail(&sk->sk_receive_queue, skb);
sk                 98 net/llc/llc_conn.c 		sk->sk_state_change(sk);
sk                101 net/llc/llc_conn.c 		sock_hold(sk);
sk                102 net/llc/llc_conn.c 		if (sk->sk_type == SOCK_STREAM &&
sk                103 net/llc/llc_conn.c 		    sk->sk_state == TCP_ESTABLISHED) {
sk                104 net/llc/llc_conn.c 			sk->sk_shutdown       = SHUTDOWN_MASK;
sk                105 net/llc/llc_conn.c 			sk->sk_socket->state  = SS_UNCONNECTED;
sk                106 net/llc/llc_conn.c 			sk->sk_state          = TCP_CLOSE;
sk                107 net/llc/llc_conn.c 			if (!sock_flag(sk, SOCK_DEAD)) {
sk                108 net/llc/llc_conn.c 				sock_set_flag(sk, SOCK_DEAD);
sk                109 net/llc/llc_conn.c 				sk->sk_state_change(sk);
sk                112 net/llc/llc_conn.c 		sock_put(sk);
sk                132 net/llc/llc_conn.c 			sk->sk_write_space(sk);
sk                137 net/llc/llc_conn.c 		if (sk->sk_type == SOCK_STREAM &&
sk                138 net/llc/llc_conn.c 		    sk->sk_state == TCP_SYN_SENT) {
sk                140 net/llc/llc_conn.c 				sk->sk_socket->state = SS_UNCONNECTED;
sk                141 net/llc/llc_conn.c 				sk->sk_state         = TCP_CLOSE;
sk                143 net/llc/llc_conn.c 				sk->sk_socket->state = SS_CONNECTED;
sk                144 net/llc/llc_conn.c 				sk->sk_state         = TCP_ESTABLISHED;
sk                146 net/llc/llc_conn.c 			sk->sk_state_change(sk);
sk                150 net/llc/llc_conn.c 		sock_hold(sk);
sk                151 net/llc/llc_conn.c 		if (sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_CLOSING) {
sk                152 net/llc/llc_conn.c 			sk->sk_socket->state = SS_UNCONNECTED;
sk                153 net/llc/llc_conn.c 			sk->sk_state         = TCP_CLOSE;
sk                154 net/llc/llc_conn.c 			sk->sk_state_change(sk);
sk                156 net/llc/llc_conn.c 		sock_put(sk);
sk                177 net/llc/llc_conn.c void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb)
sk                180 net/llc/llc_conn.c 	skb_queue_tail(&sk->sk_write_queue, skb);
sk                181 net/llc/llc_conn.c 	llc_conn_send_pdus(sk);
sk                193 net/llc/llc_conn.c void llc_conn_rtn_pdu(struct sock *sk, struct sk_buff *skb)
sk                210 net/llc/llc_conn.c void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr, u8 first_p_bit)
sk                218 net/llc/llc_conn.c 	llc_conn_remove_acked_pdus(sk, nr, &nbr_unack_pdus);
sk                225 net/llc/llc_conn.c 	llc = llc_sk(sk);
sk                231 net/llc/llc_conn.c 		skb_queue_tail(&sk->sk_write_queue, skb);
sk                239 net/llc/llc_conn.c 	llc_conn_send_pdus(sk);
sk                253 net/llc/llc_conn.c void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr, u8 first_f_bit)
sk                257 net/llc/llc_conn.c 	struct llc_sock *llc = llc_sk(sk);
sk                260 net/llc/llc_conn.c 	llc_conn_remove_acked_pdus(sk, nr, &nbr_unack_pdus);
sk                272 net/llc/llc_conn.c 		skb_queue_tail(&sk->sk_write_queue, skb);
sk                280 net/llc/llc_conn.c 	llc_conn_send_pdus(sk);
sk                293 net/llc/llc_conn.c int llc_conn_remove_acked_pdus(struct sock *sk, u8 nr, u16 *how_many_unacked)
sk                299 net/llc/llc_conn.c 	struct llc_sock *llc = llc_sk(sk);
sk                327 net/llc/llc_conn.c static void llc_conn_send_pdus(struct sock *sk)
sk                331 net/llc/llc_conn.c 	while ((skb = skb_dequeue(&sk->sk_write_queue)) != NULL) {
sk                338 net/llc/llc_conn.c 			skb_queue_tail(&llc_sk(sk)->pdu_unack_q, skb);
sk                356 net/llc/llc_conn.c static int llc_conn_service(struct sock *sk, struct sk_buff *skb)
sk                359 net/llc/llc_conn.c 	struct llc_sock *llc = llc_sk(sk);
sk                365 net/llc/llc_conn.c 	trans = llc_qualify_conn_ev(sk, skb);
sk                367 net/llc/llc_conn.c 		rc = llc_exec_conn_trans_actions(sk, trans, skb);
sk                371 net/llc/llc_conn.c 				sk->sk_state_change(sk);
sk                386 net/llc/llc_conn.c static struct llc_conn_state_trans *llc_qualify_conn_ev(struct sock *sk,
sk                392 net/llc/llc_conn.c 	struct llc_sock *llc = llc_sk(sk);
sk                402 net/llc/llc_conn.c 		if (!((*next_trans)->ev)(sk, skb)) {
sk                411 net/llc/llc_conn.c 			     !(*next_qualifier)(sk, skb); next_qualifier++)
sk                433 net/llc/llc_conn.c static int llc_exec_conn_trans_actions(struct sock *sk,
sk                442 net/llc/llc_conn.c 		int rc2 = (*next_action)(sk, skb);
sk                456 net/llc/llc_conn.c 				   const struct sock *sk)
sk                458 net/llc/llc_conn.c 	struct llc_sock *llc = llc_sk(sk);
sk                518 net/llc/llc_conn.c 	struct sock *sk;
sk                521 net/llc/llc_conn.c 	sk = __llc_lookup_established(sap, daddr, laddr);
sk                523 net/llc/llc_conn.c 	return sk;
sk                528 net/llc/llc_conn.c 				      const struct sock *sk)
sk                530 net/llc/llc_conn.c 	struct llc_sock *llc = llc_sk(sk);
sk                532 net/llc/llc_conn.c 	return sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_LISTEN &&
sk                599 net/llc/llc_conn.c 	struct sock *sk = __llc_lookup_established(sap, daddr, laddr);
sk                601 net/llc/llc_conn.c 	return sk ? : llc_lookup_listener(sap, laddr);
sk                694 net/llc/llc_conn.c void llc_sap_add_socket(struct llc_sap *sap, struct sock *sk)
sk                696 net/llc/llc_conn.c 	struct llc_sock *llc = llc_sk(sk);
sk                701 net/llc/llc_conn.c 	llc_sk(sk)->sap = sap;
sk                704 net/llc/llc_conn.c 	sock_set_flag(sk, SOCK_RCU_FREE);
sk                706 net/llc/llc_conn.c 	sk_nulls_add_node_rcu(sk, laddr_hb);
sk                719 net/llc/llc_conn.c void llc_sap_remove_socket(struct llc_sap *sap, struct sock *sk)
sk                721 net/llc/llc_conn.c 	struct llc_sock *llc = llc_sk(sk);
sk                724 net/llc/llc_conn.c 	sk_nulls_del_node_init_rcu(sk);
sk                738 net/llc/llc_conn.c static int llc_conn_rcv(struct sock *sk, struct sk_buff *skb)
sk                744 net/llc/llc_conn.c 	return llc_conn_state_process(sk, skb);
sk                747 net/llc/llc_conn.c static struct sock *llc_create_incoming_sock(struct sock *sk,
sk                752 net/llc/llc_conn.c 	struct sock *newsk = llc_sk_alloc(sock_net(sk), sk->sk_family, GFP_ATOMIC,
sk                753 net/llc/llc_conn.c 					  sk->sk_prot, 0);
sk                754 net/llc/llc_conn.c 	struct llc_sock *newllc, *llc = llc_sk(sk);
sk                772 net/llc/llc_conn.c 	struct sock *sk;
sk                779 net/llc/llc_conn.c 	sk = __llc_lookup(sap, &saddr, &daddr);
sk                780 net/llc/llc_conn.c 	if (!sk)
sk                783 net/llc/llc_conn.c 	bh_lock_sock(sk);
sk                792 net/llc/llc_conn.c 	if (unlikely(sk->sk_state == TCP_LISTEN)) {
sk                793 net/llc/llc_conn.c 		struct sock *newsk = llc_create_incoming_sock(sk, skb->dev,
sk                807 net/llc/llc_conn.c 		sock_hold(sk);
sk                808 net/llc/llc_conn.c 		skb->sk = sk;
sk                811 net/llc/llc_conn.c 	if (!sock_owned_by_user(sk))
sk                812 net/llc/llc_conn.c 		llc_conn_rcv(sk, skb);
sk                816 net/llc/llc_conn.c 		if (sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf)))
sk                820 net/llc/llc_conn.c 	bh_unlock_sock(sk);
sk                821 net/llc/llc_conn.c 	sock_put(sk);
sk                846 net/llc/llc_conn.c static int llc_backlog_rcv(struct sock *sk, struct sk_buff *skb)
sk                849 net/llc/llc_conn.c 	struct llc_sock *llc = llc_sk(sk);
sk                853 net/llc/llc_conn.c 			rc = llc_conn_rcv(sk, skb);
sk                859 net/llc/llc_conn.c 			rc = llc_conn_state_process(sk, skb);
sk                879 net/llc/llc_conn.c static void llc_sk_init(struct sock *sk)
sk                881 net/llc/llc_conn.c 	struct llc_sock *llc = llc_sk(sk);
sk                904 net/llc/llc_conn.c 	sk->sk_backlog_rcv = llc_backlog_rcv;
sk                917 net/llc/llc_conn.c 	struct sock *sk = sk_alloc(net, family, priority, prot, kern);
sk                919 net/llc/llc_conn.c 	if (!sk)
sk                921 net/llc/llc_conn.c 	llc_sk_init(sk);
sk                922 net/llc/llc_conn.c 	sock_init_data(NULL, sk);
sk                925 net/llc/llc_conn.c 	printk(KERN_DEBUG "LLC socket %p created in %s, now we have %d alive\n", sk,
sk                929 net/llc/llc_conn.c 	return sk;
sk                932 net/llc/llc_conn.c void llc_sk_stop_all_timers(struct sock *sk, bool sync)
sk                934 net/llc/llc_conn.c 	struct llc_sock *llc = llc_sk(sk);
sk                958 net/llc/llc_conn.c void llc_sk_free(struct sock *sk)
sk                960 net/llc/llc_conn.c 	struct llc_sock *llc = llc_sk(sk);
sk                964 net/llc/llc_conn.c 	llc_sk_stop_all_timers(sk, true);
sk                968 net/llc/llc_conn.c 		skb_queue_len(&sk->sk_write_queue));
sk                970 net/llc/llc_conn.c 	skb_queue_purge(&sk->sk_receive_queue);
sk                971 net/llc/llc_conn.c 	skb_queue_purge(&sk->sk_write_queue);
sk                974 net/llc/llc_conn.c 	if (refcount_read(&sk->sk_refcnt) != 1) {
sk                976 net/llc/llc_conn.c 			sk, __func__, refcount_read(&sk->sk_refcnt));
sk                981 net/llc/llc_conn.c 		printk(KERN_DEBUG "LLC socket %p released in %s, %d are still alive\n", sk,
sk                985 net/llc/llc_conn.c 	sock_put(sk);
sk                995 net/llc/llc_conn.c void llc_sk_reset(struct sock *sk)
sk                997 net/llc/llc_conn.c 	struct llc_sock *llc = llc_sk(sk);
sk                999 net/llc/llc_conn.c 	llc_conn_ac_stop_all_timers(sk, NULL);
sk               1000 net/llc/llc_conn.c 	skb_queue_purge(&sk->sk_write_queue);
sk               1005 net/llc/llc_conn.c 	llc_conn_set_p_flag(sk, 0);
sk                 44 net/llc/llc_if.c int llc_build_and_send_pkt(struct sock *sk, struct sk_buff *skb)
sk                 48 net/llc/llc_if.c 	struct llc_sock *llc = llc_sk(sk);
sk                 63 net/llc/llc_if.c 	return llc_conn_state_process(sk, skb);
sk                 83 net/llc/llc_if.c int llc_establish_connection(struct sock *sk, u8 *lmac, u8 *dmac, u8 dsap)
sk                 88 net/llc/llc_if.c 	struct llc_sock *llc = llc_sk(sk);
sk                 98 net/llc/llc_if.c 			sk = existing;
sk                103 net/llc/llc_if.c 	sock_hold(sk);
sk                112 net/llc/llc_if.c 		skb_set_owner_w(skb, sk);
sk                113 net/llc/llc_if.c 		rc = llc_conn_state_process(sk, skb);
sk                116 net/llc/llc_if.c 	sock_put(sk);
sk                129 net/llc/llc_if.c int llc_send_disc(struct sock *sk)
sk                135 net/llc/llc_if.c 	sock_hold(sk);
sk                136 net/llc/llc_if.c 	if (sk->sk_type != SOCK_STREAM || sk->sk_state != TCP_ESTABLISHED ||
sk                137 net/llc/llc_if.c 	    llc_sk(sk)->state == LLC_CONN_STATE_ADM ||
sk                138 net/llc/llc_if.c 	    llc_sk(sk)->state == LLC_CONN_OUT_OF_SVC)
sk                147 net/llc/llc_if.c 	skb_set_owner_w(skb, sk);
sk                148 net/llc/llc_if.c 	sk->sk_state  = TCP_CLOSING;
sk                153 net/llc/llc_if.c 	rc = llc_conn_state_process(sk, skb);
sk                155 net/llc/llc_if.c 	sock_put(sk);
sk                 37 net/llc/llc_proc.c 	struct sock *sk = NULL;
sk                 46 net/llc/llc_proc.c 			sk_nulls_for_each(sk, node, head) {
sk                 54 net/llc/llc_proc.c 	sk = NULL;
sk                 56 net/llc/llc_proc.c 	return sk;
sk                 70 net/llc/llc_proc.c 	struct sock *sk = NULL;
sk                 73 net/llc/llc_proc.c 		sk_nulls_for_each(sk, node, &sap->sk_laddr_hash[bucket])
sk                 77 net/llc/llc_proc.c 	return sk;
sk                 82 net/llc/llc_proc.c 	struct sock* sk, *next;
sk                 88 net/llc/llc_proc.c 		sk = llc_get_sk_idx(0);
sk                 91 net/llc/llc_proc.c 	sk = v;
sk                 92 net/llc/llc_proc.c 	next = sk_nulls_next(sk);
sk                 94 net/llc/llc_proc.c 		sk = next;
sk                 97 net/llc/llc_proc.c 	llc = llc_sk(sk);
sk                 99 net/llc/llc_proc.c 	sk = laddr_hash_next(sap, llc_sk_laddr_hashfn(sap, &llc->laddr));
sk                100 net/llc/llc_proc.c 	if (sk)
sk                105 net/llc/llc_proc.c 		sk = laddr_hash_next(sap, -1);
sk                106 net/llc/llc_proc.c 		if (sk)
sk                111 net/llc/llc_proc.c 	return sk;
sk                117 net/llc/llc_proc.c 		struct sock *sk = v;
sk                118 net/llc/llc_proc.c 		struct llc_sock *llc = llc_sk(sk);
sk                128 net/llc/llc_proc.c 	struct sock* sk;
sk                136 net/llc/llc_proc.c 	sk = v;
sk                137 net/llc/llc_proc.c 	llc = llc_sk(sk);
sk                140 net/llc/llc_proc.c 	seq_printf(seq, "%2X  %2X ", sk->sk_type, 0);
sk                151 net/llc/llc_proc.c 		   sk_wmem_alloc_get(sk),
sk                152 net/llc/llc_proc.c 		   sk_rmem_alloc_get(sk) - llc->copied_seq,
sk                153 net/llc/llc_proc.c 		   sk->sk_state,
sk                154 net/llc/llc_proc.c 		   from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
sk                177 net/llc/llc_proc.c 	struct sock* sk;
sk                186 net/llc/llc_proc.c 	sk = v;
sk                187 net/llc/llc_proc.c 	llc = llc_sk(sk);
sk                198 net/llc/llc_proc.c 		   !!sk->sk_backlog.tail, !!sk->sk_lock.owned);
sk                 47 net/llc/llc_sap.c struct sk_buff *llc_alloc_frame(struct sock *sk, struct net_device *dev,
sk                 63 net/llc/llc_sap.c 		if (sk != NULL)
sk                 64 net/llc/llc_sap.c 			skb_set_owner_w(skb, sk);
sk                 69 net/llc/llc_sap.c void llc_save_primitive(struct sock *sk, struct sk_buff *skb, u8 prim)
sk                 77 net/llc/llc_sap.c 	addr->sllc_family = sk->sk_family;
sk                210 net/llc/llc_sap.c 	if (ev->ind_cfm_flag == LLC_IND && skb->sk->sk_state != TCP_LISTEN) {
sk                211 net/llc/llc_sap.c 		llc_save_primitive(skb->sk, skb, ev->prim);
sk                214 net/llc/llc_sap.c 		if (sock_queue_rcv_skb(skb->sk, skb) == 0)
sk                280 net/llc/llc_sap.c 			struct sock *sk)
sk                287 net/llc/llc_sap.c 	sock_hold(sk);
sk                288 net/llc/llc_sap.c 	skb->sk = sk;
sk                295 net/llc/llc_sap.c 				   const struct sock *sk)
sk                297 net/llc/llc_sap.c      struct llc_sock *llc = llc_sk(sk);
sk                299 net/llc/llc_sap.c      return sk->sk_type == SOCK_DGRAM &&
sk                351 net/llc/llc_sap.c 				   const struct sock *sk)
sk                353 net/llc/llc_sap.c      struct llc_sock *llc = llc_sk(sk);
sk                355 net/llc/llc_sap.c      return sk->sk_type == SOCK_DGRAM &&
sk                391 net/llc/llc_sap.c 	struct sock *sk;
sk                399 net/llc/llc_sap.c 		sk = &llc->sk;
sk                401 net/llc/llc_sap.c 		if (!llc_mcast_match(sap, laddr, skb, sk))
sk                404 net/llc/llc_sap.c 		sock_hold(sk);
sk                406 net/llc/llc_sap.c 			stack[i++] = sk;
sk                429 net/llc/llc_sap.c 		struct sock *sk = llc_lookup_dgram(sap, &laddr);
sk                430 net/llc/llc_sap.c 		if (sk) {
sk                431 net/llc/llc_sap.c 			llc_sap_rcv(sap, skb, sk);
sk                432 net/llc/llc_sap.c 			sock_put(sk);
sk               2734 net/mac80211/tx.c 	if (unlikely(!multicast && skb->sk &&
sk               3448 net/mac80211/tx.c 	if (skb->sk && skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS)
sk               3870 net/mac80211/tx.c 		sk_pacing_shift_update(skb->sk, sdata->local->hw.tx_sk_pacing_shift);
sk               1254 net/mpls/af_mpls.c 	struct net *net = sock_net(in_skb->sk);
sk               1303 net/mpls/af_mpls.c 	struct net *net = sock_net(skb->sk);
sk               1848 net/mpls/af_mpls.c 	cfg->rc_nlinfo.nl_net	= sock_net(skb->sk);
sk               2158 net/mpls/af_mpls.c 	struct net *net = sock_net(skb->sk);
sk               2339 net/mpls/af_mpls.c 	struct net *net = sock_net(in_skb->sk);
sk                227 net/ncsi/ncsi-netlink.c 	ndp = ndp_from_ifindex(get_net(sock_net(skb->sk)),
sk                288 net/ncsi/ncsi-netlink.c 	ndp = ndp_from_ifindex(get_net(sock_net(msg->sk)),
sk                364 net/ncsi/ncsi-netlink.c 	ndp = ndp_from_ifindex(get_net(sock_net(msg->sk)),
sk                426 net/ncsi/ncsi-netlink.c 	ndp = ndp_from_ifindex(get_net(sock_net(msg->sk)),
sk                603 net/ncsi/ncsi-netlink.c 	ndp = ndp_from_ifindex(get_net(sock_net(msg->sk)),
sk                658 net/ncsi/ncsi-netlink.c 	ndp = ndp_from_ifindex(get_net(sock_net(msg->sk)),
sk               1317 net/netfilter/ipset/ip_set_core.c 	struct ip_set_net *inst = ip_set_pernet(sock_net(skb->sk));
sk               1369 net/netfilter/ipset/ip_set_core.c 	struct ip_set_net *inst = ip_set_pernet(sock_net(skb->sk));
sk               2054 net/netfilter/ipset/ip_set_core.c ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len)
sk               2059 net/netfilter/ipset/ip_set_core.c 	struct net *net = sock_net(sk);
sk               1354 net/netfilter/ipvs/ip_vs_core.c 	struct sock *sk;
sk               1362 net/netfilter/ipvs/ip_vs_core.c 	sk = skb_to_full_sk(skb);
sk               1364 net/netfilter/ipvs/ip_vs_core.c 	if (unlikely(sk && hooknum == NF_INET_LOCAL_OUT &&
sk               1367 net/netfilter/ipvs/ip_vs_core.c 		if (sk->sk_family == PF_INET && inet_sk(sk)->nodefrag)
sk               1986 net/netfilter/ipvs/ip_vs_core.c 	struct sock *sk;
sk               2014 net/netfilter/ipvs/ip_vs_core.c 	sk = skb_to_full_sk(skb);
sk               2015 net/netfilter/ipvs/ip_vs_core.c 	if (unlikely(sk && hooknum == NF_INET_LOCAL_OUT &&
sk               2018 net/netfilter/ipvs/ip_vs_core.c 		if (sk->sk_family == PF_INET && inet_sk(sk)->nodefrag)
sk               2411 net/netfilter/ipvs/ip_vs_ctl.c do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
sk               2413 net/netfilter/ipvs/ip_vs_ctl.c 	struct net *net = sock_net(sk);
sk               2424 net/netfilter/ipvs/ip_vs_ctl.c 	if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
sk               2728 net/netfilter/ipvs/ip_vs_ctl.c do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
sk               2733 net/netfilter/ipvs/ip_vs_ctl.c 	struct net *net = sock_net(sk);
sk               2738 net/netfilter/ipvs/ip_vs_ctl.c 	if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
sk               3109 net/netfilter/ipvs/ip_vs_ctl.c 	struct net *net = sock_net(skb->sk);
sk               3322 net/netfilter/ipvs/ip_vs_ctl.c 	struct net *net = sock_net(skb->sk);
sk               3481 net/netfilter/ipvs/ip_vs_ctl.c 	struct net *net = sock_net(skb->sk);
sk               3602 net/netfilter/ipvs/ip_vs_ctl.c 	struct net *net = sock_net(skb->sk);
sk               3631 net/netfilter/ipvs/ip_vs_ctl.c 	struct net *net = sock_net(skb->sk);
sk               3758 net/netfilter/ipvs/ip_vs_ctl.c 	struct net *net = sock_net(skb->sk);
sk               1279 net/netfilter/ipvs/ip_vs_sync.c static void set_sock_size(struct sock *sk, int mode, int val)
sk               1283 net/netfilter/ipvs/ip_vs_sync.c 	lock_sock(sk);
sk               1287 net/netfilter/ipvs/ip_vs_sync.c 		sk->sk_sndbuf = val * 2;
sk               1288 net/netfilter/ipvs/ip_vs_sync.c 		sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
sk               1292 net/netfilter/ipvs/ip_vs_sync.c 		sk->sk_rcvbuf = val * 2;
sk               1293 net/netfilter/ipvs/ip_vs_sync.c 		sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
sk               1295 net/netfilter/ipvs/ip_vs_sync.c 	release_sock(sk);
sk               1301 net/netfilter/ipvs/ip_vs_sync.c static void set_mcast_loop(struct sock *sk, u_char loop)
sk               1303 net/netfilter/ipvs/ip_vs_sync.c 	struct inet_sock *inet = inet_sk(sk);
sk               1306 net/netfilter/ipvs/ip_vs_sync.c 	lock_sock(sk);
sk               1309 net/netfilter/ipvs/ip_vs_sync.c 	if (sk->sk_family == AF_INET6) {
sk               1310 net/netfilter/ipvs/ip_vs_sync.c 		struct ipv6_pinfo *np = inet6_sk(sk);
sk               1316 net/netfilter/ipvs/ip_vs_sync.c 	release_sock(sk);
sk               1322 net/netfilter/ipvs/ip_vs_sync.c static void set_mcast_ttl(struct sock *sk, u_char ttl)
sk               1324 net/netfilter/ipvs/ip_vs_sync.c 	struct inet_sock *inet = inet_sk(sk);
sk               1327 net/netfilter/ipvs/ip_vs_sync.c 	lock_sock(sk);
sk               1330 net/netfilter/ipvs/ip_vs_sync.c 	if (sk->sk_family == AF_INET6) {
sk               1331 net/netfilter/ipvs/ip_vs_sync.c 		struct ipv6_pinfo *np = inet6_sk(sk);
sk               1337 net/netfilter/ipvs/ip_vs_sync.c 	release_sock(sk);
sk               1341 net/netfilter/ipvs/ip_vs_sync.c static void set_mcast_pmtudisc(struct sock *sk, int val)
sk               1343 net/netfilter/ipvs/ip_vs_sync.c 	struct inet_sock *inet = inet_sk(sk);
sk               1346 net/netfilter/ipvs/ip_vs_sync.c 	lock_sock(sk);
sk               1349 net/netfilter/ipvs/ip_vs_sync.c 	if (sk->sk_family == AF_INET6) {
sk               1350 net/netfilter/ipvs/ip_vs_sync.c 		struct ipv6_pinfo *np = inet6_sk(sk);
sk               1356 net/netfilter/ipvs/ip_vs_sync.c 	release_sock(sk);
sk               1362 net/netfilter/ipvs/ip_vs_sync.c static int set_mcast_if(struct sock *sk, struct net_device *dev)
sk               1364 net/netfilter/ipvs/ip_vs_sync.c 	struct inet_sock *inet = inet_sk(sk);
sk               1366 net/netfilter/ipvs/ip_vs_sync.c 	if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
sk               1369 net/netfilter/ipvs/ip_vs_sync.c 	lock_sock(sk);
sk               1373 net/netfilter/ipvs/ip_vs_sync.c 	if (sk->sk_family == AF_INET6) {
sk               1374 net/netfilter/ipvs/ip_vs_sync.c 		struct ipv6_pinfo *np = inet6_sk(sk);
sk               1380 net/netfilter/ipvs/ip_vs_sync.c 	release_sock(sk);
sk               1392 net/netfilter/ipvs/ip_vs_sync.c join_mcast_group(struct sock *sk, struct in_addr *addr, struct net_device *dev)
sk               1400 net/netfilter/ipvs/ip_vs_sync.c 	if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
sk               1405 net/netfilter/ipvs/ip_vs_sync.c 	lock_sock(sk);
sk               1406 net/netfilter/ipvs/ip_vs_sync.c 	ret = ip_mc_join_group(sk, &mreq);
sk               1407 net/netfilter/ipvs/ip_vs_sync.c 	release_sock(sk);
sk               1413 net/netfilter/ipvs/ip_vs_sync.c static int join_mcast_group6(struct sock *sk, struct in6_addr *addr,
sk               1418 net/netfilter/ipvs/ip_vs_sync.c 	if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
sk               1421 net/netfilter/ipvs/ip_vs_sync.c 	lock_sock(sk);
sk               1422 net/netfilter/ipvs/ip_vs_sync.c 	ret = ipv6_sock_mc_join(sk, dev->ifindex, addr);
sk               1423 net/netfilter/ipvs/ip_vs_sync.c 	release_sock(sk);
sk               1489 net/netfilter/ipvs/ip_vs_sync.c 	result = set_mcast_if(sock->sk, dev);
sk               1495 net/netfilter/ipvs/ip_vs_sync.c 	set_mcast_loop(sock->sk, 0);
sk               1496 net/netfilter/ipvs/ip_vs_sync.c 	set_mcast_ttl(sock->sk, ipvs->mcfg.mcast_ttl);
sk               1498 net/netfilter/ipvs/ip_vs_sync.c 	set_mcast_pmtudisc(sock->sk, IP_PMTUDISC_DONT);
sk               1501 net/netfilter/ipvs/ip_vs_sync.c 		set_sock_size(sock->sk, 1, result);
sk               1547 net/netfilter/ipvs/ip_vs_sync.c 	sock->sk->sk_reuse = SK_CAN_REUSE;
sk               1550 net/netfilter/ipvs/ip_vs_sync.c 		set_sock_size(sock->sk, 0, result);
sk               1553 net/netfilter/ipvs/ip_vs_sync.c 	sock->sk->sk_bound_dev_if = dev->ifindex;
sk               1563 net/netfilter/ipvs/ip_vs_sync.c 		result = join_mcast_group6(sock->sk, &mcast_addr.in6.sin6_addr,
sk               1567 net/netfilter/ipvs/ip_vs_sync.c 		result = join_mcast_group(sock->sk, &mcast_addr.in.sin_addr,
sk               1669 net/netfilter/ipvs/ip_vs_sync.c 	struct sock *sk = tinfo->sock->sk;
sk               1688 net/netfilter/ipvs/ip_vs_sync.c 			__wait_event_interruptible(*sk_sleep(sk),
sk               1689 net/netfilter/ipvs/ip_vs_sync.c 						   sock_writeable(sk) ||
sk               1727 net/netfilter/ipvs/ip_vs_sync.c 		wait_event_interruptible(*sk_sleep(tinfo->sock->sk),
sk               1728 net/netfilter/ipvs/ip_vs_sync.c 			 !skb_queue_empty(&tinfo->sock->sk->sk_receive_queue)
sk               1732 net/netfilter/ipvs/ip_vs_sync.c 		while (!skb_queue_empty(&(tinfo->sock->sk->sk_receive_queue))) {
sk                207 net/netfilter/ipvs/ip_vs_xmit.c 	struct sock *sk = skb->sk;
sk                210 net/netfilter/ipvs/ip_vs_xmit.c 	if (!skb->dev && sk && sk_fullsock(sk))
sk                211 net/netfilter/ipvs/ip_vs_xmit.c 		ort->dst.ops->update_pmtu(&ort->dst, sk, NULL, mtu, true);
sk                965 net/netfilter/ipvs/ip_vs_xmit.c 		if (skb->sk)
sk                966 net/netfilter/ipvs/ip_vs_xmit.c 			skb_set_owner_w(new_skb, skb->sk);
sk               1266 net/netfilter/ipvs/ip_vs_xmit.c 		ip_local_out(net, skb->sk, skb);
sk               1413 net/netfilter/ipvs/ip_vs_xmit.c 		ip6_local_out(net, skb->sk, skb);
sk                 31 net/netfilter/nf_conntrack_broadcast.c 	if (skb->sk == NULL || !net_eq(nf_ct_net(ct), sock_net(skb->sk)))
sk                906 net/netfilter/nf_conntrack_netlink.c 	struct net *net = sock_net(skb->sk);
sk               1406 net/netfilter/nf_conntrack_netlink.c 	struct net *net = sock_net(skb->sk);
sk               2243 net/netfilter/nf_conntrack_netlink.c 	struct net *net = sock_net(skb->sk);
sk               2331 net/netfilter/nf_conntrack_netlink.c 					  sock_net(skb->sk));
sk               2893 net/netfilter/nf_conntrack_netlink.c 	struct net *net = sock_net(skb->sk);
sk               3466 net/netfilter/nf_conntrack_netlink.c 	struct net *net = sock_net(skb->sk);
sk                239 net/netfilter/nf_conntrack_proto.c getorigdst(struct sock *sk, int optval, void __user *user, int *len)
sk                241 net/netfilter/nf_conntrack_proto.c 	const struct inet_sock *inet = inet_sk(sk);
sk                247 net/netfilter/nf_conntrack_proto.c 	lock_sock(sk);
sk                253 net/netfilter/nf_conntrack_proto.c 	tuple.dst.protonum = sk->sk_protocol;
sk                254 net/netfilter/nf_conntrack_proto.c 	release_sock(sk);
sk                269 net/netfilter/nf_conntrack_proto.c 	h = nf_conntrack_find_get(sock_net(sk), &nf_ct_zone_dflt, &tuple);
sk                305 net/netfilter/nf_conntrack_proto.c ipv6_getorigdst(struct sock *sk, int optval, void __user *user, int *len)
sk                308 net/netfilter/nf_conntrack_proto.c 	const struct ipv6_pinfo *inet6 = inet6_sk(sk);
sk                309 net/netfilter/nf_conntrack_proto.c 	const struct inet_sock *inet = inet_sk(sk);
sk                316 net/netfilter/nf_conntrack_proto.c 	lock_sock(sk);
sk                317 net/netfilter/nf_conntrack_proto.c 	tuple.src.u3.in6 = sk->sk_v6_rcv_saddr;
sk                319 net/netfilter/nf_conntrack_proto.c 	tuple.dst.u3.in6 = sk->sk_v6_daddr;
sk                321 net/netfilter/nf_conntrack_proto.c 	tuple.dst.protonum = sk->sk_protocol;
sk                322 net/netfilter/nf_conntrack_proto.c 	bound_dev_if = sk->sk_bound_dev_if;
sk                324 net/netfilter/nf_conntrack_proto.c 	release_sock(sk);
sk                333 net/netfilter/nf_conntrack_proto.c 	h = nf_conntrack_find_get(sock_net(sk), &nf_ct_zone_dflt, &tuple);
sk                232 net/netfilter/nf_flow_table_ip.c 	dst_output(state->net, state->sk, skb);
sk                133 net/netfilter/nf_log_common.c 			    struct sock *sk)
sk                135 net/netfilter/nf_log_common.c 	if (!sk || !sk_fullsock(sk) || !net_eq(net, sock_net(sk)))
sk                138 net/netfilter/nf_log_common.c 	read_lock_bh(&sk->sk_callback_lock);
sk                139 net/netfilter/nf_log_common.c 	if (sk->sk_socket && sk->sk_socket->file) {
sk                140 net/netfilter/nf_log_common.c 		const struct cred *cred = sk->sk_socket->file->f_cred;
sk                145 net/netfilter/nf_log_common.c 	read_unlock_bh(&sk->sk_callback_lock);
sk                155 net/netfilter/nf_nat_core.c 	struct sock *sk = skb->sk;
sk                168 net/netfilter/nf_nat_core.c 	if (sk && !net_eq(net, sock_net(sk)))
sk                169 net/netfilter/nf_nat_core.c 		sk = NULL;
sk                171 net/netfilter/nf_nat_core.c 	dst = xfrm_lookup(net, dst, &fl, sk, 0);
sk                 76 net/netfilter/nf_queue.c 	if (state->sk)
sk                 77 net/netfilter/nf_queue.c 		sock_put(state->sk);
sk                110 net/netfilter/nf_queue.c 	if (state->sk)
sk                111 net/netfilter/nf_queue.c 		sock_hold(state->sk);
sk                337 net/netfilter/nf_queue.c 		entry->state.okfn(entry->state.net, entry->state.sk, skb);
sk                 62 net/netfilter/nf_sockopt.c static struct nf_sockopt_ops *nf_sockopt_find(struct sock *sk, u_int8_t pf,
sk                 93 net/netfilter/nf_sockopt.c static int nf_sockopt(struct sock *sk, u_int8_t pf, int val,
sk                 99 net/netfilter/nf_sockopt.c 	ops = nf_sockopt_find(sk, pf, val, get);
sk                104 net/netfilter/nf_sockopt.c 		ret = ops->get(sk, val, opt, len);
sk                106 net/netfilter/nf_sockopt.c 		ret = ops->set(sk, val, opt, *len);
sk                112 net/netfilter/nf_sockopt.c int nf_setsockopt(struct sock *sk, u_int8_t pf, int val, char __user *opt,
sk                115 net/netfilter/nf_sockopt.c 	return nf_sockopt(sk, pf, val, opt, &len, 0);
sk                119 net/netfilter/nf_sockopt.c int nf_getsockopt(struct sock *sk, u_int8_t pf, int val, char __user *opt,
sk                122 net/netfilter/nf_sockopt.c 	return nf_sockopt(sk, pf, val, opt, len, 1);
sk                127 net/netfilter/nf_sockopt.c static int compat_nf_sockopt(struct sock *sk, u_int8_t pf, int val,
sk                133 net/netfilter/nf_sockopt.c 	ops = nf_sockopt_find(sk, pf, val, get);
sk                139 net/netfilter/nf_sockopt.c 			ret = ops->compat_get(sk, val, opt, len);
sk                141 net/netfilter/nf_sockopt.c 			ret = ops->get(sk, val, opt, len);
sk                144 net/netfilter/nf_sockopt.c 			ret = ops->compat_set(sk, val, opt, *len);
sk                146 net/netfilter/nf_sockopt.c 			ret = ops->set(sk, val, opt, *len);
sk                153 net/netfilter/nf_sockopt.c int compat_nf_setsockopt(struct sock *sk, u_int8_t pf,
sk                156 net/netfilter/nf_sockopt.c 	return compat_nf_sockopt(sk, pf, val, opt, &len, 0);
sk                160 net/netfilter/nf_sockopt.c int compat_nf_getsockopt(struct sock *sk, u_int8_t pf,
sk                163 net/netfilter/nf_sockopt.c 	return compat_nf_sockopt(sk, pf, val, opt, len, 1);
sk                457 net/netfilter/nf_synproxy_core.c 	ip_local_out(net, nskb->sk, nskb);
sk                870 net/netfilter/nf_synproxy_core.c 	ip6_local_out(net, nskb->sk, nskb);
sk                660 net/netfilter/nf_tables_api.c 	struct net *net = sock_net(skb->sk);
sk               1365 net/netfilter/nf_tables_api.c 	struct net *net = sock_net(skb->sk);
sk               2442 net/netfilter/nf_tables_api.c 	struct net *net = sock_net(skb->sk);
sk               2481 net/netfilter/nf_tables_api.c 	struct net *net = sock_net(skb->sk);
sk               3409 net/netfilter/nf_tables_api.c 	struct net *net = sock_net(skb->sk);
sk               4105 net/netfilter/nf_tables_api.c 	struct net *net = sock_net(skb->sk);
sk               5356 net/netfilter/nf_tables_api.c 	struct net *net = sock_net(skb->sk);
sk               6049 net/netfilter/nf_tables_api.c 	struct net *net = sock_net(skb->sk);
sk                162 net/netfilter/nfnetlink.c 	struct net *net = sock_net(skb->sk);
sk                301 net/netfilter/nfnetlink.c 	struct net *net = sock_net(skb->sk);
sk                190 net/netfilter/nfnetlink_acct.c 	struct net *net = sock_net(skb->sk);
sk                209 net/netfilter/nfnetlink_cttimeout.c 	struct net *net = sock_net(skb->sk);
sk                458 net/netfilter/nfnetlink_log.c 	struct sock *sk;
sk                602 net/netfilter/nfnetlink_log.c 	sk = skb->sk;
sk                603 net/netfilter/nfnetlink_log.c 	if (sk && sk_fullsock(sk)) {
sk                604 net/netfilter/nfnetlink_log.c 		read_lock_bh(&sk->sk_callback_lock);
sk                605 net/netfilter/nfnetlink_log.c 		if (sk->sk_socket && sk->sk_socket->file) {
sk                606 net/netfilter/nfnetlink_log.c 			struct file *file = sk->sk_socket->file;
sk                611 net/netfilter/nfnetlink_log.c 			read_unlock_bh(&sk->sk_callback_lock);
sk                616 net/netfilter/nfnetlink_log.c 			read_unlock_bh(&sk->sk_callback_lock);
sk                937 net/netfilter/nfnetlink_log.c 					       sk_user_ns(NETLINK_CB(skb).sk));
sk                279 net/netfilter/nfnetlink_queue.c static int nfqnl_put_sk_uidgid(struct sk_buff *skb, struct sock *sk)
sk                283 net/netfilter/nfnetlink_queue.c 	if (!sk_fullsock(sk))
sk                286 net/netfilter/nfnetlink_queue.c 	read_lock_bh(&sk->sk_callback_lock);
sk                287 net/netfilter/nfnetlink_queue.c 	if (sk->sk_socket && sk->sk_socket->file) {
sk                288 net/netfilter/nfnetlink_queue.c 		cred = sk->sk_socket->file->f_cred;
sk                296 net/netfilter/nfnetlink_queue.c 	read_unlock_bh(&sk->sk_callback_lock);
sk                300 net/netfilter/nfnetlink_queue.c 	read_unlock_bh(&sk->sk_callback_lock);
sk                308 net/netfilter/nfnetlink_queue.c 	if (!skb || !sk_fullsock(skb->sk))
sk                311 net/netfilter/nfnetlink_queue.c 	read_lock_bh(&skb->sk->sk_callback_lock);
sk                316 net/netfilter/nfnetlink_queue.c 	read_unlock_bh(&skb->sk->sk_callback_lock);
sk                416 net/netfilter/nfnetlink_queue.c 	   (entry->state.hook == NF_INET_POST_ROUTING && entskb->sk == NULL))
sk                460 net/netfilter/nfnetlink_queue.c 	if ((queue->flags & NFQA_CFG_F_SECCTX) && entskb->sk) {
sk                592 net/netfilter/nfnetlink_queue.c 	if ((queue->flags & NFQA_CFG_F_UID_GID) && entskb->sk &&
sk                593 net/netfilter/nfnetlink_queue.c 	    nfqnl_put_sk_uidgid(skb, entskb->sk) < 0)
sk                 66 net/netfilter/nft_meta.c 	struct sock *sk;
sk                113 net/netfilter/nft_meta.c 		sk = skb_to_full_sk(skb);
sk                114 net/netfilter/nft_meta.c 		if (!sk || !sk_fullsock(sk) ||
sk                115 net/netfilter/nft_meta.c 		    !net_eq(nft_net(pkt), sock_net(sk)))
sk                118 net/netfilter/nft_meta.c 		read_lock_bh(&sk->sk_callback_lock);
sk                119 net/netfilter/nft_meta.c 		if (sk->sk_socket == NULL ||
sk                120 net/netfilter/nft_meta.c 		    sk->sk_socket->file == NULL) {
sk                121 net/netfilter/nft_meta.c 			read_unlock_bh(&sk->sk_callback_lock);
sk                126 net/netfilter/nft_meta.c 				sk->sk_socket->file->f_cred->fsuid);
sk                127 net/netfilter/nft_meta.c 		read_unlock_bh(&sk->sk_callback_lock);
sk                130 net/netfilter/nft_meta.c 		sk = skb_to_full_sk(skb);
sk                131 net/netfilter/nft_meta.c 		if (!sk || !sk_fullsock(sk) ||
sk                132 net/netfilter/nft_meta.c 		    !net_eq(nft_net(pkt), sock_net(sk)))
sk                135 net/netfilter/nft_meta.c 		read_lock_bh(&sk->sk_callback_lock);
sk                136 net/netfilter/nft_meta.c 		if (sk->sk_socket == NULL ||
sk                137 net/netfilter/nft_meta.c 		    sk->sk_socket->file == NULL) {
sk                138 net/netfilter/nft_meta.c 			read_unlock_bh(&sk->sk_callback_lock);
sk                142 net/netfilter/nft_meta.c 				 sk->sk_socket->file->f_cred->fsgid);
sk                143 net/netfilter/nft_meta.c 		read_unlock_bh(&sk->sk_callback_lock);
sk                222 net/netfilter/nft_meta.c 		sk = skb_to_full_sk(skb);
sk                223 net/netfilter/nft_meta.c 		if (!sk || !sk_fullsock(sk) ||
sk                224 net/netfilter/nft_meta.c 		    !net_eq(nft_net(pkt), sock_net(sk)))
sk                226 net/netfilter/nft_meta.c 		*dest = sock_cgroup_classid(&sk->sk_cgrp_data);
sk                 23 net/netfilter/nft_socket.c 	struct sock *sk = skb->sk;
sk                 26 net/netfilter/nft_socket.c 	if (sk && !net_eq(nft_net(pkt), sock_net(sk)))
sk                 27 net/netfilter/nft_socket.c 		sk = NULL;
sk                 29 net/netfilter/nft_socket.c 	if (!sk)
sk                 32 net/netfilter/nft_socket.c 			sk = nf_sk_lookup_slow_v4(nft_net(pkt), skb, nft_in(pkt));
sk                 36 net/netfilter/nft_socket.c 			sk = nf_sk_lookup_slow_v6(nft_net(pkt), skb, nft_in(pkt));
sk                 45 net/netfilter/nft_socket.c 	if (!sk) {
sk                 52 net/netfilter/nft_socket.c 		nft_reg_store8(dest, inet_sk_transparent(sk));
sk                 55 net/netfilter/nft_socket.c 		if (sk_fullsock(sk)) {
sk                 56 net/netfilter/nft_socket.c 			*dest = sk->sk_mark;
sk                 67 net/netfilter/nft_socket.c 	if (sk != skb->sk)
sk                 68 net/netfilter/nft_socket.c 		sock_gen_put(sk);
sk                 31 net/netfilter/nft_tproxy.c 	struct sock *sk;
sk                 43 net/netfilter/nft_tproxy.c 	sk = nf_tproxy_get_sock_v4(nft_net(pkt), skb, iph->protocol,
sk                 58 net/netfilter/nft_tproxy.c 	if (sk && sk->sk_state == TCP_TIME_WAIT) {
sk                 60 net/netfilter/nft_tproxy.c 		sk = nf_tproxy_handle_time_wait4(nft_net(pkt), skb, taddr, tport, sk);
sk                 61 net/netfilter/nft_tproxy.c 	} else if (!sk) {
sk                 65 net/netfilter/nft_tproxy.c 		sk = nf_tproxy_get_sock_v4(nft_net(pkt), skb, iph->protocol,
sk                 71 net/netfilter/nft_tproxy.c 	if (sk && nf_tproxy_sk_is_transparent(sk))
sk                 72 net/netfilter/nft_tproxy.c 		nf_tproxy_assign_sock(skb, sk);
sk                 89 net/netfilter/nft_tproxy.c 	struct sock *sk;
sk                110 net/netfilter/nft_tproxy.c 	sk = nf_tproxy_get_sock_v6(nft_net(pkt), skb, thoff, l4proto,
sk                125 net/netfilter/nft_tproxy.c 	if (sk && sk->sk_state == TCP_TIME_WAIT) {
sk                127 net/netfilter/nft_tproxy.c 		sk = nf_tproxy_handle_time_wait6(skb, l4proto, thoff,
sk                131 net/netfilter/nft_tproxy.c 						 sk);
sk                132 net/netfilter/nft_tproxy.c 	} else if (!sk) {
sk                136 net/netfilter/nft_tproxy.c 		sk = nf_tproxy_get_sock_v6(nft_net(pkt), skb, thoff,
sk                143 net/netfilter/nft_tproxy.c 	if (sk && nf_tproxy_sk_is_transparent(sk))
sk                144 net/netfilter/nft_tproxy.c 		nf_tproxy_assign_sock(skb, sk);
sk                 41 net/netfilter/xt_TPROXY.c 	struct sock *sk;
sk                 51 net/netfilter/xt_TPROXY.c 	sk = nf_tproxy_get_sock_v4(net, skb, iph->protocol,
sk                 61 net/netfilter/xt_TPROXY.c 	if (sk && sk->sk_state == TCP_TIME_WAIT)
sk                 63 net/netfilter/xt_TPROXY.c 		sk = nf_tproxy_handle_time_wait4(net, skb, laddr, lport, sk);
sk                 64 net/netfilter/xt_TPROXY.c 	else if (!sk)
sk                 67 net/netfilter/xt_TPROXY.c 		sk = nf_tproxy_get_sock_v4(net, skb, iph->protocol,
sk                 73 net/netfilter/xt_TPROXY.c 	if (sk && nf_tproxy_sk_is_transparent(sk)) {
sk                 82 net/netfilter/xt_TPROXY.c 		nf_tproxy_assign_sock(skb, sk);
sk                118 net/netfilter/xt_TPROXY.c 	struct sock *sk;
sk                140 net/netfilter/xt_TPROXY.c 	sk = nf_tproxy_get_sock_v6(xt_net(par), skb, thoff, tproto,
sk                149 net/netfilter/xt_TPROXY.c 	if (sk && sk->sk_state == TCP_TIME_WAIT) {
sk                152 net/netfilter/xt_TPROXY.c 		sk = nf_tproxy_handle_time_wait6(skb, tproto, thoff,
sk                156 net/netfilter/xt_TPROXY.c 					      sk);
sk                158 net/netfilter/xt_TPROXY.c 	else if (!sk)
sk                161 net/netfilter/xt_TPROXY.c 		sk = nf_tproxy_get_sock_v6(xt_net(par), skb, thoff,
sk                167 net/netfilter/xt_TPROXY.c 	if (sk && nf_tproxy_sk_is_transparent(sk)) {
sk                176 net/netfilter/xt_TPROXY.c 		nf_tproxy_assign_sock(skb, sk);
sk                104 net/netfilter/xt_cgroup.c 	struct sock *sk = skb->sk;
sk                106 net/netfilter/xt_cgroup.c 	if (!sk || !sk_fullsock(sk) || !net_eq(xt_net(par), sock_net(sk)))
sk                109 net/netfilter/xt_cgroup.c 	return (info->id == sock_cgroup_classid(&skb->sk->sk_cgrp_data)) ^
sk                116 net/netfilter/xt_cgroup.c 	struct sock_cgroup_data *skcd = &skb->sk->sk_cgrp_data;
sk                118 net/netfilter/xt_cgroup.c 	struct sock *sk = skb->sk;
sk                120 net/netfilter/xt_cgroup.c 	if (!sk || !sk_fullsock(sk) || !net_eq(xt_net(par), sock_net(sk)))
sk                134 net/netfilter/xt_cgroup.c 	struct sock_cgroup_data *skcd = &skb->sk->sk_cgrp_data;
sk                136 net/netfilter/xt_cgroup.c 	struct sock *sk = skb->sk;
sk                138 net/netfilter/xt_cgroup.c 	if (!sk || !sk_fullsock(sk) || !net_eq(xt_net(par), sock_net(sk)))
sk                 67 net/netfilter/xt_owner.c 	struct sock *sk = skb_to_full_sk(skb);
sk                 70 net/netfilter/xt_owner.c 	if (!sk || !sk->sk_socket || !net_eq(net, sock_net(sk)))
sk                 79 net/netfilter/xt_owner.c 	filp = sk->sk_socket->file;
sk                266 net/netfilter/xt_recent.c 	    (!skb->sk || !net_eq(net, sock_net(skb->sk))))
sk                 53 net/netfilter/xt_socket.c 	struct sock *sk = skb->sk;
sk                 55 net/netfilter/xt_socket.c 	if (sk && !net_eq(xt_net(par), sock_net(sk)))
sk                 56 net/netfilter/xt_socket.c 		sk = NULL;
sk                 58 net/netfilter/xt_socket.c 	if (!sk)
sk                 59 net/netfilter/xt_socket.c 		sk = nf_sk_lookup_slow_v4(xt_net(par), skb, xt_in(par));
sk                 61 net/netfilter/xt_socket.c 	if (sk) {
sk                 69 net/netfilter/xt_socket.c 			    sk_fullsock(sk) &&
sk                 70 net/netfilter/xt_socket.c 			    inet_sk(sk)->inet_rcv_saddr == 0);
sk                 76 net/netfilter/xt_socket.c 			transparent = inet_sk_transparent(sk);
sk                 79 net/netfilter/xt_socket.c 		    transparent && sk_fullsock(sk))
sk                 80 net/netfilter/xt_socket.c 			pskb->mark = sk->sk_mark;
sk                 82 net/netfilter/xt_socket.c 		if (sk != skb->sk)
sk                 83 net/netfilter/xt_socket.c 			sock_gen_put(sk);
sk                 86 net/netfilter/xt_socket.c 			sk = NULL;
sk                 89 net/netfilter/xt_socket.c 	return sk != NULL;
sk                114 net/netfilter/xt_socket.c 	struct sock *sk = skb->sk;
sk                116 net/netfilter/xt_socket.c 	if (sk && !net_eq(xt_net(par), sock_net(sk)))
sk                117 net/netfilter/xt_socket.c 		sk = NULL;
sk                119 net/netfilter/xt_socket.c 	if (!sk)
sk                120 net/netfilter/xt_socket.c 		sk = nf_sk_lookup_slow_v6(xt_net(par), skb, xt_in(par));
sk                122 net/netfilter/xt_socket.c 	if (sk) {
sk                130 net/netfilter/xt_socket.c 			    sk_fullsock(sk) &&
sk                131 net/netfilter/xt_socket.c 			    ipv6_addr_any(&sk->sk_v6_rcv_saddr));
sk                137 net/netfilter/xt_socket.c 			transparent = inet_sk_transparent(sk);
sk                140 net/netfilter/xt_socket.c 		    transparent && sk_fullsock(sk))
sk                141 net/netfilter/xt_socket.c 			pskb->mark = sk->sk_mark;
sk                143 net/netfilter/xt_socket.c 		if (sk != skb->sk)
sk                144 net/netfilter/xt_socket.c 			sock_gen_put(sk);
sk                147 net/netfilter/xt_socket.c 			sk = NULL;
sk                150 net/netfilter/xt_socket.c 	return sk != NULL;
sk                520 net/netlabel/netlabel_calipso.c int calipso_sock_getattr(struct sock *sk, struct netlbl_lsm_secattr *secattr)
sk                526 net/netlabel/netlabel_calipso.c 		ret_val = ops->sock_getattr(sk, secattr);
sk                544 net/netlabel/netlabel_calipso.c int calipso_sock_setattr(struct sock *sk,
sk                552 net/netlabel/netlabel_calipso.c 		ret_val = ops->sock_setattr(sk, doi_def, secattr);
sk                564 net/netlabel/netlabel_calipso.c void calipso_sock_delattr(struct sock *sk)
sk                569 net/netlabel/netlabel_calipso.c 		ops->sock_delattr(sk);
sk                117 net/netlabel/netlabel_calipso.h int calipso_sock_getattr(struct sock *sk, struct netlbl_lsm_secattr *secattr);
sk                118 net/netlabel/netlabel_calipso.h int calipso_sock_setattr(struct sock *sk,
sk                121 net/netlabel/netlabel_calipso.h void calipso_sock_delattr(struct sock *sk);
sk                975 net/netlabel/netlabel_kapi.c int netlbl_sock_setattr(struct sock *sk,
sk                995 net/netlabel/netlabel_kapi.c 			ret_val = cipso_v4_sock_setattr(sk,
sk               1013 net/netlabel/netlabel_kapi.c 			ret_val = calipso_sock_setattr(sk,
sk               1043 net/netlabel/netlabel_kapi.c void netlbl_sock_delattr(struct sock *sk)
sk               1045 net/netlabel/netlabel_kapi.c 	switch (sk->sk_family) {
sk               1047 net/netlabel/netlabel_kapi.c 		cipso_v4_sock_delattr(sk);
sk               1051 net/netlabel/netlabel_kapi.c 		calipso_sock_delattr(sk);
sk               1069 net/netlabel/netlabel_kapi.c int netlbl_sock_getattr(struct sock *sk,
sk               1074 net/netlabel/netlabel_kapi.c 	switch (sk->sk_family) {
sk               1076 net/netlabel/netlabel_kapi.c 		ret_val = cipso_v4_sock_getattr(sk, secattr);
sk               1080 net/netlabel/netlabel_kapi.c 		ret_val = calipso_sock_getattr(sk, secattr);
sk               1102 net/netlabel/netlabel_kapi.c int netlbl_conn_setattr(struct sock *sk,
sk               1125 net/netlabel/netlabel_kapi.c 			ret_val = cipso_v4_sock_setattr(sk,
sk               1131 net/netlabel/netlabel_kapi.c 			netlbl_sock_delattr(sk);
sk               1149 net/netlabel/netlabel_kapi.c 			ret_val = calipso_sock_setattr(sk,
sk               1155 net/netlabel/netlabel_kapi.c 			netlbl_sock_delattr(sk);
sk                 80 net/netlink/af_netlink.c static inline int netlink_is_kernel(struct sock *sk)
sk                 82 net/netlink/af_netlink.c 	return nlk_sk(sk)->flags & NETLINK_F_KERNEL_SOCKET;
sk                128 net/netlink/af_netlink.c static int netlink_dump(struct sock *sk);
sk                252 net/netlink/af_netlink.c 	struct sock *sk = skb->sk;
sk                257 net/netlink/af_netlink.c 	switch (sk->sk_protocol) {
sk                276 net/netlink/af_netlink.c 	struct sock *sk = skb->sk;
sk                279 net/netlink/af_netlink.c 	if (!net_eq(dev_net(dev), sock_net(sk)))
sk                290 net/netlink/af_netlink.c 		nskb->protocol = htons((u16) sk->sk_protocol);
sk                291 net/netlink/af_netlink.c 		nskb->pkt_type = netlink_is_kernel(sk) ?
sk                337 net/netlink/af_netlink.c static void netlink_overrun(struct sock *sk)
sk                339 net/netlink/af_netlink.c 	struct netlink_sock *nlk = nlk_sk(sk);
sk                343 net/netlink/af_netlink.c 				      &nlk_sk(sk)->state)) {
sk                344 net/netlink/af_netlink.c 			sk->sk_err = ENOBUFS;
sk                345 net/netlink/af_netlink.c 			sk->sk_error_report(sk);
sk                348 net/netlink/af_netlink.c 	atomic_inc(&sk->sk_drops);
sk                351 net/netlink/af_netlink.c static void netlink_rcv_wake(struct sock *sk)
sk                353 net/netlink/af_netlink.c 	struct netlink_sock *nlk = nlk_sk(sk);
sk                355 net/netlink/af_netlink.c 	if (skb_queue_empty(&sk->sk_receive_queue))
sk                370 net/netlink/af_netlink.c 	if (skb->sk != NULL)
sk                374 net/netlink/af_netlink.c static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
sk                376 net/netlink/af_netlink.c 	WARN_ON(skb->sk != NULL);
sk                377 net/netlink/af_netlink.c 	skb->sk = sk;
sk                379 net/netlink/af_netlink.c 	atomic_add(skb->truesize, &sk->sk_rmem_alloc);
sk                380 net/netlink/af_netlink.c 	sk_mem_charge(sk, skb->truesize);
sk                383 net/netlink/af_netlink.c static void netlink_sock_destruct(struct sock *sk)
sk                385 net/netlink/af_netlink.c 	struct netlink_sock *nlk = nlk_sk(sk);
sk                394 net/netlink/af_netlink.c 	skb_queue_purge(&sk->sk_receive_queue);
sk                396 net/netlink/af_netlink.c 	if (!sock_flag(sk, SOCK_DEAD)) {
sk                397 net/netlink/af_netlink.c 		printk(KERN_ERR "Freeing alive netlink socket %p\n", sk);
sk                401 net/netlink/af_netlink.c 	WARN_ON(atomic_read(&sk->sk_rmem_alloc));
sk                402 net/netlink/af_netlink.c 	WARN_ON(refcount_read(&sk->sk_wmem_alloc));
sk                403 net/netlink/af_netlink.c 	WARN_ON(nlk_sk(sk)->groups);
sk                411 net/netlink/af_netlink.c 	sk_free(&nlk->sk);
sk                486 net/netlink/af_netlink.c 	       !net_eq(sock_net(&nlk->sk), read_pnet(&x->pnet));
sk                507 net/netlink/af_netlink.c static int __netlink_insert(struct netlink_table *table, struct sock *sk)
sk                511 net/netlink/af_netlink.c 	netlink_compare_arg_init(&arg, sock_net(sk), nlk_sk(sk)->portid);
sk                513 net/netlink/af_netlink.c 					    &nlk_sk(sk)->node,
sk                520 net/netlink/af_netlink.c 	struct sock *sk;
sk                523 net/netlink/af_netlink.c 	sk = __netlink_lookup(table, portid, net);
sk                524 net/netlink/af_netlink.c 	if (sk)
sk                525 net/netlink/af_netlink.c 		sock_hold(sk);
sk                528 net/netlink/af_netlink.c 	return sk;
sk                534 net/netlink/af_netlink.c netlink_update_listeners(struct sock *sk)
sk                536 net/netlink/af_netlink.c 	struct netlink_table *tbl = &nl_table[sk->sk_protocol];
sk                547 net/netlink/af_netlink.c 		sk_for_each_bound(sk, &tbl->mc_list) {
sk                548 net/netlink/af_netlink.c 			if (i < NLGRPLONGS(nlk_sk(sk)->ngroups))
sk                549 net/netlink/af_netlink.c 				mask |= nlk_sk(sk)->groups[i];
sk                557 net/netlink/af_netlink.c static int netlink_insert(struct sock *sk, u32 portid)
sk                559 net/netlink/af_netlink.c 	struct netlink_table *table = &nl_table[sk->sk_protocol];
sk                562 net/netlink/af_netlink.c 	lock_sock(sk);
sk                564 net/netlink/af_netlink.c 	err = nlk_sk(sk)->portid == portid ? 0 : -EBUSY;
sk                565 net/netlink/af_netlink.c 	if (nlk_sk(sk)->bound)
sk                568 net/netlink/af_netlink.c 	nlk_sk(sk)->portid = portid;
sk                569 net/netlink/af_netlink.c 	sock_hold(sk);
sk                571 net/netlink/af_netlink.c 	err = __netlink_insert(table, sk);
sk                580 net/netlink/af_netlink.c 		sock_put(sk);
sk                586 net/netlink/af_netlink.c 	nlk_sk(sk)->bound = portid;
sk                589 net/netlink/af_netlink.c 	release_sock(sk);
sk                593 net/netlink/af_netlink.c static void netlink_remove(struct sock *sk)
sk                597 net/netlink/af_netlink.c 	table = &nl_table[sk->sk_protocol];
sk                598 net/netlink/af_netlink.c 	if (!rhashtable_remove_fast(&table->hash, &nlk_sk(sk)->node,
sk                600 net/netlink/af_netlink.c 		WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
sk                601 net/netlink/af_netlink.c 		__sock_put(sk);
sk                605 net/netlink/af_netlink.c 	if (nlk_sk(sk)->subscriptions) {
sk                606 net/netlink/af_netlink.c 		__sk_del_bind_node(sk);
sk                607 net/netlink/af_netlink.c 		netlink_update_listeners(sk);
sk                609 net/netlink/af_netlink.c 	if (sk->sk_protocol == NETLINK_GENERIC)
sk                624 net/netlink/af_netlink.c 	struct sock *sk;
sk                629 net/netlink/af_netlink.c 	sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto, kern);
sk                630 net/netlink/af_netlink.c 	if (!sk)
sk                633 net/netlink/af_netlink.c 	sock_init_data(sock, sk);
sk                635 net/netlink/af_netlink.c 	nlk = nlk_sk(sk);
sk                647 net/netlink/af_netlink.c 	sk->sk_destruct = netlink_sock_destruct;
sk                648 net/netlink/af_netlink.c 	sk->sk_protocol = protocol;
sk                700 net/netlink/af_netlink.c 	nlk = nlk_sk(sock->sk);
sk                715 net/netlink/af_netlink.c 	struct sock *sk = &nlk->sk;
sk                720 net/netlink/af_netlink.c 	if (!refcount_dec_and_test(&sk->sk_refcnt))
sk                729 net/netlink/af_netlink.c 	sk_free(sk);
sk                734 net/netlink/af_netlink.c 	struct sock *sk = sock->sk;
sk                737 net/netlink/af_netlink.c 	if (!sk)
sk                740 net/netlink/af_netlink.c 	netlink_remove(sk);
sk                741 net/netlink/af_netlink.c 	sock_orphan(sk);
sk                742 net/netlink/af_netlink.c 	nlk = nlk_sk(sk);
sk                757 net/netlink/af_netlink.c 				nlk->netlink_unbind(sock_net(sk), i + 1);
sk                759 net/netlink/af_netlink.c 	if (sk->sk_protocol == NETLINK_GENERIC &&
sk                763 net/netlink/af_netlink.c 	sock->sk = NULL;
sk                766 net/netlink/af_netlink.c 	skb_queue_purge(&sk->sk_write_queue);
sk                770 net/netlink/af_netlink.c 						.net = sock_net(sk),
sk                771 net/netlink/af_netlink.c 						.protocol = sk->sk_protocol,
sk                780 net/netlink/af_netlink.c 	if (netlink_is_kernel(sk)) {
sk                782 net/netlink/af_netlink.c 		BUG_ON(nl_table[sk->sk_protocol].registered == 0);
sk                783 net/netlink/af_netlink.c 		if (--nl_table[sk->sk_protocol].registered == 0) {
sk                786 net/netlink/af_netlink.c 			old = nl_deref_protected(nl_table[sk->sk_protocol].listeners);
sk                787 net/netlink/af_netlink.c 			RCU_INIT_POINTER(nl_table[sk->sk_protocol].listeners, NULL);
sk                789 net/netlink/af_netlink.c 			nl_table[sk->sk_protocol].module = NULL;
sk                790 net/netlink/af_netlink.c 			nl_table[sk->sk_protocol].bind = NULL;
sk                791 net/netlink/af_netlink.c 			nl_table[sk->sk_protocol].unbind = NULL;
sk                792 net/netlink/af_netlink.c 			nl_table[sk->sk_protocol].flags = 0;
sk                793 net/netlink/af_netlink.c 			nl_table[sk->sk_protocol].registered = 0;
sk                799 net/netlink/af_netlink.c 	sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1);
sk                807 net/netlink/af_netlink.c 	struct sock *sk = sock->sk;
sk                808 net/netlink/af_netlink.c 	struct net *net = sock_net(sk);
sk                809 net/netlink/af_netlink.c 	struct netlink_table *table = &nl_table[sk->sk_protocol];
sk                831 net/netlink/af_netlink.c 	err = netlink_insert(sk, portid);
sk                856 net/netlink/af_netlink.c 		file_ns_capable(nsp->sk->sk_socket->file, user_ns, cap)) &&
sk                905 net/netlink/af_netlink.c 	return netlink_ns_capable(skb, sock_net(skb->sk)->user_ns, cap);
sk                911 net/netlink/af_netlink.c 	return (nl_table[sock->sk->sk_protocol].flags & flag) ||
sk                912 net/netlink/af_netlink.c 		ns_capable(sock_net(sock->sk)->user_ns, CAP_NET_ADMIN);
sk                916 net/netlink/af_netlink.c netlink_update_subscriptions(struct sock *sk, unsigned int subscriptions)
sk                918 net/netlink/af_netlink.c 	struct netlink_sock *nlk = nlk_sk(sk);
sk                921 net/netlink/af_netlink.c 		__sk_del_bind_node(sk);
sk                923 net/netlink/af_netlink.c 		sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list);
sk                927 net/netlink/af_netlink.c static int netlink_realloc_groups(struct sock *sk)
sk                929 net/netlink/af_netlink.c 	struct netlink_sock *nlk = nlk_sk(sk);
sk                936 net/netlink/af_netlink.c 	groups = nl_table[sk->sk_protocol].groups;
sk                937 net/netlink/af_netlink.c 	if (!nl_table[sk->sk_protocol].registered) {
sk                961 net/netlink/af_netlink.c 			      struct sock *sk)
sk                963 net/netlink/af_netlink.c 	struct netlink_sock *nlk = nlk_sk(sk);
sk                971 net/netlink/af_netlink.c 			nlk->netlink_unbind(sock_net(sk), undo + 1);
sk                977 net/netlink/af_netlink.c 	struct sock *sk = sock->sk;
sk                978 net/netlink/af_netlink.c 	struct net *net = sock_net(sk);
sk                979 net/netlink/af_netlink.c 	struct netlink_sock *nlk = nlk_sk(sk);
sk                996 net/netlink/af_netlink.c 		err = netlink_realloc_groups(sk);
sk               1024 net/netlink/af_netlink.c 			netlink_undo_bind(group, groups, sk);
sk               1034 net/netlink/af_netlink.c 			netlink_insert(sk, nladdr->nl_pid) :
sk               1037 net/netlink/af_netlink.c 			netlink_undo_bind(BITS_PER_TYPE(u32), groups, sk);
sk               1047 net/netlink/af_netlink.c 	netlink_update_subscriptions(sk, nlk->subscriptions +
sk               1051 net/netlink/af_netlink.c 	netlink_update_listeners(sk);
sk               1065 net/netlink/af_netlink.c 	struct sock *sk = sock->sk;
sk               1066 net/netlink/af_netlink.c 	struct netlink_sock *nlk = nlk_sk(sk);
sk               1073 net/netlink/af_netlink.c 		sk->sk_state	= NETLINK_UNCONNECTED;
sk               1095 net/netlink/af_netlink.c 		sk->sk_state	= NETLINK_CONNECTED;
sk               1106 net/netlink/af_netlink.c 	struct sock *sk = sock->sk;
sk               1107 net/netlink/af_netlink.c 	struct netlink_sock *nlk = nlk_sk(sk);
sk               1160 net/netlink/af_netlink.c 	sock = SOCKET_I(inode)->sk;
sk               1203 net/netlink/af_netlink.c int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
sk               1208 net/netlink/af_netlink.c 	nlk = nlk_sk(sk);
sk               1210 net/netlink/af_netlink.c 	if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
sk               1215 net/netlink/af_netlink.c 				netlink_overrun(sk);
sk               1216 net/netlink/af_netlink.c 			sock_put(sk);
sk               1224 net/netlink/af_netlink.c 		if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
sk               1226 net/netlink/af_netlink.c 		    !sock_flag(sk, SOCK_DEAD))
sk               1231 net/netlink/af_netlink.c 		sock_put(sk);
sk               1239 net/netlink/af_netlink.c 	netlink_skb_set_owner_r(skb, sk);
sk               1243 net/netlink/af_netlink.c static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb)
sk               1247 net/netlink/af_netlink.c 	netlink_deliver_tap(sock_net(sk), skb);
sk               1249 net/netlink/af_netlink.c 	skb_queue_tail(&sk->sk_receive_queue, skb);
sk               1250 net/netlink/af_netlink.c 	sk->sk_data_ready(sk);
sk               1254 net/netlink/af_netlink.c int netlink_sendskb(struct sock *sk, struct sk_buff *skb)
sk               1256 net/netlink/af_netlink.c 	int len = __netlink_sendskb(sk, skb);
sk               1258 net/netlink/af_netlink.c 	sock_put(sk);
sk               1262 net/netlink/af_netlink.c void netlink_detachskb(struct sock *sk, struct sk_buff *skb)
sk               1265 net/netlink/af_netlink.c 	sock_put(sk);
sk               1272 net/netlink/af_netlink.c 	WARN_ON(skb->sk != NULL);
sk               1291 net/netlink/af_netlink.c static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb,
sk               1295 net/netlink/af_netlink.c 	struct netlink_sock *nlk = nlk_sk(sk);
sk               1300 net/netlink/af_netlink.c 		netlink_skb_set_owner_r(skb, sk);
sk               1301 net/netlink/af_netlink.c 		NETLINK_CB(skb).sk = ssk;
sk               1302 net/netlink/af_netlink.c 		netlink_deliver_tap_kernel(sk, ssk, skb);
sk               1308 net/netlink/af_netlink.c 	sock_put(sk);
sk               1315 net/netlink/af_netlink.c 	struct sock *sk;
sk               1323 net/netlink/af_netlink.c 	sk = netlink_getsockbyportid(ssk, portid);
sk               1324 net/netlink/af_netlink.c 	if (IS_ERR(sk)) {
sk               1326 net/netlink/af_netlink.c 		return PTR_ERR(sk);
sk               1328 net/netlink/af_netlink.c 	if (netlink_is_kernel(sk))
sk               1329 net/netlink/af_netlink.c 		return netlink_unicast_kernel(sk, skb, ssk);
sk               1331 net/netlink/af_netlink.c 	if (sk_filter(sk, skb)) {
sk               1334 net/netlink/af_netlink.c 		sock_put(sk);
sk               1338 net/netlink/af_netlink.c 	err = netlink_attachskb(sk, skb, &timeo, ssk);
sk               1344 net/netlink/af_netlink.c 	return netlink_sendskb(sk, skb);
sk               1348 net/netlink/af_netlink.c int netlink_has_listeners(struct sock *sk, unsigned int group)
sk               1353 net/netlink/af_netlink.c 	BUG_ON(!netlink_is_kernel(sk));
sk               1356 net/netlink/af_netlink.c 	listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners);
sk               1358 net/netlink/af_netlink.c 	if (listeners && group - 1 < nl_table[sk->sk_protocol].groups)
sk               1369 net/netlink/af_netlink.c 	const struct netlink_sock *nlk = nlk_sk(NETLINK_CB(skb).sk);
sk               1375 net/netlink/af_netlink.c static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
sk               1377 net/netlink/af_netlink.c 	struct netlink_sock *nlk = nlk_sk(sk);
sk               1379 net/netlink/af_netlink.c 	if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
sk               1381 net/netlink/af_netlink.c 		netlink_skb_set_owner_r(skb, sk);
sk               1382 net/netlink/af_netlink.c 		__netlink_sendskb(sk, skb);
sk               1383 net/netlink/af_netlink.c 		return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1);
sk               1403 net/netlink/af_netlink.c static void do_one_broadcast(struct sock *sk,
sk               1406 net/netlink/af_netlink.c 	struct netlink_sock *nlk = nlk_sk(sk);
sk               1409 net/netlink/af_netlink.c 	if (p->exclude_sk == sk)
sk               1416 net/netlink/af_netlink.c 	if (!net_eq(sock_net(sk), p->net)) {
sk               1420 net/netlink/af_netlink.c 		if (!peernet_has_id(sock_net(sk), p->net))
sk               1423 net/netlink/af_netlink.c 		if (!file_ns_capable(sk->sk_socket->file, p->net->user_ns,
sk               1429 net/netlink/af_netlink.c 		netlink_overrun(sk);
sk               1433 net/netlink/af_netlink.c 	sock_hold(sk);
sk               1447 net/netlink/af_netlink.c 		netlink_overrun(sk);
sk               1454 net/netlink/af_netlink.c 	if (p->tx_filter && p->tx_filter(sk, p->skb2, p->tx_data)) {
sk               1459 net/netlink/af_netlink.c 	if (sk_filter(sk, p->skb2)) {
sk               1464 net/netlink/af_netlink.c 	NETLINK_CB(p->skb2).nsid = peernet2id(sock_net(sk), p->net);
sk               1467 net/netlink/af_netlink.c 	val = netlink_broadcast_deliver(sk, p->skb2);
sk               1469 net/netlink/af_netlink.c 		netlink_overrun(sk);
sk               1478 net/netlink/af_netlink.c 	sock_put(sk);
sk               1488 net/netlink/af_netlink.c 	struct sock *sk;
sk               1510 net/netlink/af_netlink.c 	sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
sk               1511 net/netlink/af_netlink.c 		do_one_broadcast(sk, &info);
sk               1547 net/netlink/af_netlink.c static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p)
sk               1549 net/netlink/af_netlink.c 	struct netlink_sock *nlk = nlk_sk(sk);
sk               1552 net/netlink/af_netlink.c 	if (sk == p->exclude_sk)
sk               1555 net/netlink/af_netlink.c 	if (!net_eq(sock_net(sk), sock_net(p->exclude_sk)))
sk               1567 net/netlink/af_netlink.c 	sk->sk_err = p->code;
sk               1568 net/netlink/af_netlink.c 	sk->sk_error_report(sk);
sk               1586 net/netlink/af_netlink.c 	struct sock *sk;
sk               1597 net/netlink/af_netlink.c 	sk_for_each_bound(sk, &nl_table[ssk->sk_protocol].mc_list)
sk               1598 net/netlink/af_netlink.c 		ret += do_one_set_err(sk, &info);
sk               1618 net/netlink/af_netlink.c 	netlink_update_subscriptions(&nlk->sk, subscriptions);
sk               1619 net/netlink/af_netlink.c 	netlink_update_listeners(&nlk->sk);
sk               1625 net/netlink/af_netlink.c 	struct sock *sk = sock->sk;
sk               1626 net/netlink/af_netlink.c 	struct netlink_sock *nlk = nlk_sk(sk);
sk               1649 net/netlink/af_netlink.c 		err = netlink_realloc_groups(sk);
sk               1655 net/netlink/af_netlink.c 			err = nlk->netlink_bind(sock_net(sk), val);
sk               1664 net/netlink/af_netlink.c 			nlk->netlink_unbind(sock_net(sk), val);
sk               1687 net/netlink/af_netlink.c 		if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_BROADCAST))
sk               1726 net/netlink/af_netlink.c 	struct sock *sk = sock->sk;
sk               1727 net/netlink/af_netlink.c 	struct netlink_sock *nlk = nlk_sk(sk);
sk               1833 net/netlink/af_netlink.c static void netlink_cmsg_listen_all_nsid(struct sock *sk, struct msghdr *msg,
sk               1845 net/netlink/af_netlink.c 	struct sock *sk = sock->sk;
sk               1846 net/netlink/af_netlink.c 	struct netlink_sock *nlk = nlk_sk(sk);
sk               1890 net/netlink/af_netlink.c 	if (len > sk->sk_sndbuf - 32)
sk               1908 net/netlink/af_netlink.c 	err = security_netlink_send(sk, skb);
sk               1916 net/netlink/af_netlink.c 		netlink_broadcast(sk, skb, dst_portid, dst_group, GFP_KERNEL);
sk               1918 net/netlink/af_netlink.c 	err = netlink_unicast(sk, skb, dst_portid, msg->msg_flags&MSG_DONTWAIT);
sk               1929 net/netlink/af_netlink.c 	struct sock *sk = sock->sk;
sk               1930 net/netlink/af_netlink.c 	struct netlink_sock *nlk = nlk_sk(sk);
sk               1941 net/netlink/af_netlink.c 	skb = skb_recv_datagram(sk, flags, noblock, &err);
sk               1990 net/netlink/af_netlink.c 		netlink_cmsg_listen_all_nsid(sk, msg, skb);
sk               1997 net/netlink/af_netlink.c 	skb_free_datagram(sk, skb);
sk               2000 net/netlink/af_netlink.c 	    atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
sk               2001 net/netlink/af_netlink.c 		ret = netlink_dump(sk);
sk               2003 net/netlink/af_netlink.c 			sk->sk_err = -ret;
sk               2004 net/netlink/af_netlink.c 			sk->sk_error_report(sk);
sk               2010 net/netlink/af_netlink.c 	netlink_rcv_wake(sk);
sk               2014 net/netlink/af_netlink.c static void netlink_data_ready(struct sock *sk)
sk               2030 net/netlink/af_netlink.c 	struct sock *sk;
sk               2047 net/netlink/af_netlink.c 	sk = sock->sk;
sk               2058 net/netlink/af_netlink.c 	sk->sk_data_ready = netlink_data_ready;
sk               2060 net/netlink/af_netlink.c 		nlk_sk(sk)->netlink_rcv = cfg->input;
sk               2062 net/netlink/af_netlink.c 	if (netlink_insert(sk, 0))
sk               2065 net/netlink/af_netlink.c 	nlk = nlk_sk(sk);
sk               2087 net/netlink/af_netlink.c 	return sk;
sk               2091 net/netlink/af_netlink.c 	netlink_kernel_release(sk);
sk               2101 net/netlink/af_netlink.c netlink_kernel_release(struct sock *sk)
sk               2103 net/netlink/af_netlink.c 	if (sk == NULL || sk->sk_socket == NULL)
sk               2106 net/netlink/af_netlink.c 	sock_release(sk->sk_socket);
sk               2110 net/netlink/af_netlink.c int __netlink_change_ngroups(struct sock *sk, unsigned int groups)
sk               2113 net/netlink/af_netlink.c 	struct netlink_table *tbl = &nl_table[sk->sk_protocol];
sk               2145 net/netlink/af_netlink.c int netlink_change_ngroups(struct sock *sk, unsigned int groups)
sk               2150 net/netlink/af_netlink.c 	err = __netlink_change_ngroups(sk, groups);
sk               2158 net/netlink/af_netlink.c 	struct sock *sk;
sk               2161 net/netlink/af_netlink.c 	sk_for_each_bound(sk, &tbl->mc_list)
sk               2162 net/netlink/af_netlink.c 		netlink_update_socket_mc(nlk_sk(sk), group, 0);
sk               2188 net/netlink/af_netlink.c static int netlink_dump(struct sock *sk)
sk               2190 net/netlink/af_netlink.c 	struct netlink_sock *nlk = nlk_sk(sk);
sk               2206 net/netlink/af_netlink.c 	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
sk               2241 net/netlink/af_netlink.c 	netlink_skb_set_owner_r(skb, sk);
sk               2253 net/netlink/af_netlink.c 		if (sk_filter(sk, skb))
sk               2256 net/netlink/af_netlink.c 			__netlink_sendskb(sk, skb);
sk               2277 net/netlink/af_netlink.c 	if (sk_filter(sk, skb))
sk               2280 net/netlink/af_netlink.c 		__netlink_sendskb(sk, skb);
sk               2305 net/netlink/af_netlink.c 	struct sock *sk;
sk               2310 net/netlink/af_netlink.c 	sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid);
sk               2311 net/netlink/af_netlink.c 	if (sk == NULL) {
sk               2316 net/netlink/af_netlink.c 	nlk = nlk_sk(sk);
sk               2339 net/netlink/af_netlink.c 	nlk2 = nlk_sk(NETLINK_CB(skb).sk);
sk               2353 net/netlink/af_netlink.c 	ret = netlink_dump(sk);
sk               2355 net/netlink/af_netlink.c 	sock_put(sk);
sk               2368 net/netlink/af_netlink.c 	sock_put(sk);
sk               2384 net/netlink/af_netlink.c 	struct netlink_sock *nlk = nlk_sk(NETLINK_CB(in_skb).sk);
sk               2414 net/netlink/af_netlink.c 		NETLINK_CB(in_skb).sk->sk_err = ENOBUFS;
sk               2415 net/netlink/af_netlink.c 		NETLINK_CB(in_skb).sk->sk_error_report(NETLINK_CB(in_skb).sk);
sk               2448 net/netlink/af_netlink.c 	netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).portid, MSG_DONTWAIT);
sk               2506 net/netlink/af_netlink.c int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid,
sk               2521 net/netlink/af_netlink.c 		err = nlmsg_multicast(sk, skb, exclude_portid, group, flags);
sk               2527 net/netlink/af_netlink.c 		err2 = nlmsg_unicast(sk, skb, portid);
sk               2580 net/netlink/af_netlink.c 	} while (sock_net(&nlk->sk) != seq_file_net(seq));
sk               2739 net/netlink/af_netlink.c 	netlink_compare_arg_init(&arg, sock_net(&nlk->sk), nlk->portid);
sk                 25 net/netlink/af_netlink.h 	struct sock		sk;
sk                 52 net/netlink/af_netlink.h static inline struct netlink_sock *nlk_sk(struct sock *sk)
sk                 54 net/netlink/af_netlink.h 	return container_of(sk, struct netlink_sock, sk);
sk                 12 net/netlink/diag.c static int sk_diag_dump_groups(struct sock *sk, struct sk_buff *nlskb)
sk                 14 net/netlink/diag.c 	struct netlink_sock *nlk = nlk_sk(sk);
sk                 23 net/netlink/diag.c static int sk_diag_put_flags(struct sock *sk, struct sk_buff *skb)
sk                 25 net/netlink/diag.c 	struct netlink_sock *nlk = nlk_sk(sk);
sk                 44 net/netlink/diag.c static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
sk                 50 net/netlink/diag.c 	struct netlink_sock *nlk = nlk_sk(sk);
sk                 59 net/netlink/diag.c 	rep->ndiag_type		= sk->sk_type;
sk                 60 net/netlink/diag.c 	rep->ndiag_protocol	= sk->sk_protocol;
sk                 61 net/netlink/diag.c 	rep->ndiag_state	= sk->sk_state;
sk                 67 net/netlink/diag.c 	sock_diag_save_cookie(sk, rep->ndiag_cookie);
sk                 70 net/netlink/diag.c 	    sk_diag_dump_groups(sk, skb))
sk                 74 net/netlink/diag.c 	    sock_diag_put_meminfo(sk, skb, NETLINK_DIAG_MEMINFO))
sk                 78 net/netlink/diag.c 	    sk_diag_put_flags(sk, skb))
sk                 94 net/netlink/diag.c 	struct net *net = sock_net(skb->sk);
sk                 97 net/netlink/diag.c 	struct sock *sk;
sk                131 net/netlink/diag.c 		sk = (struct sock *)nlsk;
sk                133 net/netlink/diag.c 		if (!net_eq(sock_net(sk), net))
sk                136 net/netlink/diag.c 		if (sk_diag_fill(sk, skb, req,
sk                140 net/netlink/diag.c 				 sock_i_ino(sk)) < 0) {
sk                156 net/netlink/diag.c 	sk_for_each_bound(sk, &tbl->mc_list) {
sk                157 net/netlink/diag.c 		if (sk_hashed(sk))
sk                159 net/netlink/diag.c 		if (!net_eq(sock_net(sk), net))
sk                166 net/netlink/diag.c 		if (sk_diag_fill(sk, skb, req,
sk                170 net/netlink/diag.c 				 sock_i_ino(sk)) < 0) {
sk                227 net/netlink/diag.c 	struct net *net = sock_net(skb->sk);
sk                507 net/netlink/genetlink.c 	struct net *net = sock_net(skb->sk);
sk                807 net/netlink/genetlink.c 	struct net *net = sock_net(skb->sk);
sk               1162 net/netlink/genetlink.c 	struct sock *sk = net->genl_sock;
sk               1171 net/netlink/genetlink.c 	nlmsg_notify(sk, skb, info->snd_portid, group, report, flags);
sk                 69 net/netrom/af_netrom.c static void nr_remove_socket(struct sock *sk)
sk                 72 net/netrom/af_netrom.c 	sk_del_node_init(sk);
sk                112 net/netrom/af_netrom.c static void nr_insert_socket(struct sock *sk)
sk                115 net/netrom/af_netrom.c 	sk_add_node(sk, &nr_list);
sk                193 net/netrom/af_netrom.c 	struct sock *sk;
sk                200 net/netrom/af_netrom.c 			if ((sk=nr_find_socket(i, j)) == NULL)
sk                202 net/netrom/af_netrom.c 			sock_put(sk);
sk                221 net/netrom/af_netrom.c 	struct sock *sk = from_timer(sk, t, sk_timer);
sk                222 net/netrom/af_netrom.c 	bh_lock_sock(sk);
sk                223 net/netrom/af_netrom.c 	sock_hold(sk);
sk                224 net/netrom/af_netrom.c 	nr_destroy_socket(sk);
sk                225 net/netrom/af_netrom.c 	bh_unlock_sock(sk);
sk                226 net/netrom/af_netrom.c 	sock_put(sk);
sk                235 net/netrom/af_netrom.c void nr_destroy_socket(struct sock *sk)
sk                239 net/netrom/af_netrom.c 	nr_remove_socket(sk);
sk                241 net/netrom/af_netrom.c 	nr_stop_heartbeat(sk);
sk                242 net/netrom/af_netrom.c 	nr_stop_t1timer(sk);
sk                243 net/netrom/af_netrom.c 	nr_stop_t2timer(sk);
sk                244 net/netrom/af_netrom.c 	nr_stop_t4timer(sk);
sk                245 net/netrom/af_netrom.c 	nr_stop_idletimer(sk);
sk                247 net/netrom/af_netrom.c 	nr_clear_queues(sk);		/* Flush the queues */
sk                249 net/netrom/af_netrom.c 	while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
sk                250 net/netrom/af_netrom.c 		if (skb->sk != sk) { /* A pending connection */
sk                252 net/netrom/af_netrom.c 			sock_set_flag(skb->sk, SOCK_DEAD);
sk                253 net/netrom/af_netrom.c 			nr_start_heartbeat(skb->sk);
sk                254 net/netrom/af_netrom.c 			nr_sk(skb->sk)->state = NR_STATE_0;
sk                260 net/netrom/af_netrom.c 	if (sk_has_allocations(sk)) {
sk                262 net/netrom/af_netrom.c 		sk->sk_timer.function = nr_destroy_timer;
sk                263 net/netrom/af_netrom.c 		sk->sk_timer.expires  = jiffies + 2 * HZ;
sk                264 net/netrom/af_netrom.c 		add_timer(&sk->sk_timer);
sk                266 net/netrom/af_netrom.c 		sock_put(sk);
sk                277 net/netrom/af_netrom.c 	struct sock *sk = sock->sk;
sk                278 net/netrom/af_netrom.c 	struct nr_sock *nr = nr_sk(sk);
sk                329 net/netrom/af_netrom.c 	struct sock *sk = sock->sk;
sk                330 net/netrom/af_netrom.c 	struct nr_sock *nr = nr_sk(sk);
sk                378 net/netrom/af_netrom.c 	struct sock *sk = sock->sk;
sk                380 net/netrom/af_netrom.c 	lock_sock(sk);
sk                381 net/netrom/af_netrom.c 	if (sk->sk_state != TCP_LISTEN) {
sk                382 net/netrom/af_netrom.c 		memset(&nr_sk(sk)->user_addr, 0, AX25_ADDR_LEN);
sk                383 net/netrom/af_netrom.c 		sk->sk_max_ack_backlog = backlog;
sk                384 net/netrom/af_netrom.c 		sk->sk_state           = TCP_LISTEN;
sk                385 net/netrom/af_netrom.c 		release_sock(sk);
sk                388 net/netrom/af_netrom.c 	release_sock(sk);
sk                402 net/netrom/af_netrom.c 	struct sock *sk;
sk                411 net/netrom/af_netrom.c 	sk = sk_alloc(net, PF_NETROM, GFP_ATOMIC, &nr_proto, kern);
sk                412 net/netrom/af_netrom.c 	if (sk  == NULL)
sk                415 net/netrom/af_netrom.c 	nr = nr_sk(sk);
sk                417 net/netrom/af_netrom.c 	sock_init_data(sock, sk);
sk                420 net/netrom/af_netrom.c 	sk->sk_protocol = protocol;
sk                426 net/netrom/af_netrom.c 	nr_init_timers(sk);
sk                448 net/netrom/af_netrom.c 	struct sock *sk;
sk                454 net/netrom/af_netrom.c 	sk = sk_alloc(sock_net(osk), PF_NETROM, GFP_ATOMIC, osk->sk_prot, 0);
sk                455 net/netrom/af_netrom.c 	if (sk == NULL)
sk                458 net/netrom/af_netrom.c 	nr = nr_sk(sk);
sk                460 net/netrom/af_netrom.c 	sock_init_data(NULL, sk);
sk                462 net/netrom/af_netrom.c 	sk->sk_type     = osk->sk_type;
sk                463 net/netrom/af_netrom.c 	sk->sk_priority = osk->sk_priority;
sk                464 net/netrom/af_netrom.c 	sk->sk_protocol = osk->sk_protocol;
sk                465 net/netrom/af_netrom.c 	sk->sk_rcvbuf   = osk->sk_rcvbuf;
sk                466 net/netrom/af_netrom.c 	sk->sk_sndbuf   = osk->sk_sndbuf;
sk                467 net/netrom/af_netrom.c 	sk->sk_state    = TCP_ESTABLISHED;
sk                468 net/netrom/af_netrom.c 	sock_copy_flags(sk, osk);
sk                474 net/netrom/af_netrom.c 	nr_init_timers(sk);
sk                488 net/netrom/af_netrom.c 	return sk;
sk                493 net/netrom/af_netrom.c 	struct sock *sk = sock->sk;
sk                496 net/netrom/af_netrom.c 	if (sk == NULL) return 0;
sk                498 net/netrom/af_netrom.c 	sock_hold(sk);
sk                499 net/netrom/af_netrom.c 	sock_orphan(sk);
sk                500 net/netrom/af_netrom.c 	lock_sock(sk);
sk                501 net/netrom/af_netrom.c 	nr = nr_sk(sk);
sk                507 net/netrom/af_netrom.c 		nr_disconnect(sk, 0);
sk                508 net/netrom/af_netrom.c 		nr_destroy_socket(sk);
sk                512 net/netrom/af_netrom.c 		nr_clear_queues(sk);
sk                514 net/netrom/af_netrom.c 		nr_write_internal(sk, NR_DISCREQ);
sk                515 net/netrom/af_netrom.c 		nr_start_t1timer(sk);
sk                516 net/netrom/af_netrom.c 		nr_stop_t2timer(sk);
sk                517 net/netrom/af_netrom.c 		nr_stop_t4timer(sk);
sk                518 net/netrom/af_netrom.c 		nr_stop_idletimer(sk);
sk                520 net/netrom/af_netrom.c 		sk->sk_state    = TCP_CLOSE;
sk                521 net/netrom/af_netrom.c 		sk->sk_shutdown |= SEND_SHUTDOWN;
sk                522 net/netrom/af_netrom.c 		sk->sk_state_change(sk);
sk                523 net/netrom/af_netrom.c 		sock_set_flag(sk, SOCK_DESTROY);
sk                530 net/netrom/af_netrom.c 	sock->sk   = NULL;
sk                531 net/netrom/af_netrom.c 	release_sock(sk);
sk                532 net/netrom/af_netrom.c 	sock_put(sk);
sk                539 net/netrom/af_netrom.c 	struct sock *sk = sock->sk;
sk                540 net/netrom/af_netrom.c 	struct nr_sock *nr = nr_sk(sk);
sk                546 net/netrom/af_netrom.c 	lock_sock(sk);
sk                547 net/netrom/af_netrom.c 	if (!sock_flag(sk, SOCK_ZAPPED)) {
sk                548 net/netrom/af_netrom.c 		release_sock(sk);
sk                552 net/netrom/af_netrom.c 		release_sock(sk);
sk                556 net/netrom/af_netrom.c 		release_sock(sk);
sk                560 net/netrom/af_netrom.c 		release_sock(sk);
sk                564 net/netrom/af_netrom.c 		release_sock(sk);
sk                574 net/netrom/af_netrom.c 			release_sock(sk);
sk                588 net/netrom/af_netrom.c 				release_sock(sk);
sk                599 net/netrom/af_netrom.c 	nr_insert_socket(sk);
sk                601 net/netrom/af_netrom.c 	sock_reset_flag(sk, SOCK_ZAPPED);
sk                603 net/netrom/af_netrom.c 	release_sock(sk);
sk                611 net/netrom/af_netrom.c 	struct sock *sk = sock->sk;
sk                612 net/netrom/af_netrom.c 	struct nr_sock *nr = nr_sk(sk);
sk                619 net/netrom/af_netrom.c 	lock_sock(sk);
sk                620 net/netrom/af_netrom.c 	if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) {
sk                625 net/netrom/af_netrom.c 	if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) {
sk                631 net/netrom/af_netrom.c 	if (sk->sk_state == TCP_ESTABLISHED) {
sk                636 net/netrom/af_netrom.c 	sk->sk_state   = TCP_CLOSE;
sk                647 net/netrom/af_netrom.c 	if (sock_flag(sk, SOCK_ZAPPED)) {	/* Must bind first - autobinding in this may or may not work */
sk                648 net/netrom/af_netrom.c 		sock_reset_flag(sk, SOCK_ZAPPED);
sk                673 net/netrom/af_netrom.c 		nr_insert_socket(sk);		/* Finish the bind */
sk                678 net/netrom/af_netrom.c 	release_sock(sk);
sk                680 net/netrom/af_netrom.c 	lock_sock(sk);
sk                689 net/netrom/af_netrom.c 	sk->sk_state = TCP_SYN_SENT;
sk                691 net/netrom/af_netrom.c 	nr_establish_data_link(sk);
sk                695 net/netrom/af_netrom.c 	nr_start_heartbeat(sk);
sk                698 net/netrom/af_netrom.c 	if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) {
sk                707 net/netrom/af_netrom.c 	if (sk->sk_state == TCP_SYN_SENT) {
sk                711 net/netrom/af_netrom.c 			prepare_to_wait(sk_sleep(sk), &wait,
sk                713 net/netrom/af_netrom.c 			if (sk->sk_state != TCP_SYN_SENT)
sk                716 net/netrom/af_netrom.c 				release_sock(sk);
sk                718 net/netrom/af_netrom.c 				lock_sock(sk);
sk                724 net/netrom/af_netrom.c 		finish_wait(sk_sleep(sk), &wait);
sk                729 net/netrom/af_netrom.c 	if (sk->sk_state != TCP_ESTABLISHED) {
sk                731 net/netrom/af_netrom.c 		err = sock_error(sk);	/* Always set at this point */
sk                738 net/netrom/af_netrom.c 	release_sock(sk);
sk                749 net/netrom/af_netrom.c 	struct sock *sk;
sk                752 net/netrom/af_netrom.c 	if ((sk = sock->sk) == NULL)
sk                755 net/netrom/af_netrom.c 	lock_sock(sk);
sk                756 net/netrom/af_netrom.c 	if (sk->sk_type != SOCK_SEQPACKET) {
sk                761 net/netrom/af_netrom.c 	if (sk->sk_state != TCP_LISTEN) {
sk                771 net/netrom/af_netrom.c 		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
sk                772 net/netrom/af_netrom.c 		skb = skb_dequeue(&sk->sk_receive_queue);
sk                781 net/netrom/af_netrom.c 			release_sock(sk);
sk                783 net/netrom/af_netrom.c 			lock_sock(sk);
sk                789 net/netrom/af_netrom.c 	finish_wait(sk_sleep(sk), &wait);
sk                793 net/netrom/af_netrom.c 	newsk = skb->sk;
sk                798 net/netrom/af_netrom.c 	sk_acceptq_removed(sk);
sk                801 net/netrom/af_netrom.c 	release_sock(sk);
sk                810 net/netrom/af_netrom.c 	struct sock *sk = sock->sk;
sk                811 net/netrom/af_netrom.c 	struct nr_sock *nr = nr_sk(sk);
sk                816 net/netrom/af_netrom.c 	lock_sock(sk);
sk                818 net/netrom/af_netrom.c 		if (sk->sk_state != TCP_ESTABLISHED) {
sk                819 net/netrom/af_netrom.c 			release_sock(sk);
sk                834 net/netrom/af_netrom.c 	release_sock(sk);
sk                841 net/netrom/af_netrom.c 	struct sock *sk;
sk                885 net/netrom/af_netrom.c 	sk = NULL;
sk                889 net/netrom/af_netrom.c 			sk = nr_find_peer(peer_circuit_index, peer_circuit_id, src);
sk                892 net/netrom/af_netrom.c 			sk = nr_find_peer(circuit_index, circuit_id, src);
sk                894 net/netrom/af_netrom.c 			sk = nr_find_socket(circuit_index, circuit_id);
sk                897 net/netrom/af_netrom.c 	if (sk != NULL) {
sk                898 net/netrom/af_netrom.c 		bh_lock_sock(sk);
sk                902 net/netrom/af_netrom.c 			nr_sk(sk)->bpqext = 1;
sk                904 net/netrom/af_netrom.c 			nr_sk(sk)->bpqext = 0;
sk                906 net/netrom/af_netrom.c 		ret = nr_process_rx_frame(sk, skb);
sk                907 net/netrom/af_netrom.c 		bh_unlock_sock(sk);
sk                908 net/netrom/af_netrom.c 		sock_put(sk);
sk                932 net/netrom/af_netrom.c 	sk = nr_find_listener(dest);
sk                936 net/netrom/af_netrom.c 	if (sk == NULL || sk_acceptq_is_full(sk) ||
sk                937 net/netrom/af_netrom.c 	    (make = nr_make_new(sk)) == NULL) {
sk                939 net/netrom/af_netrom.c 		if (sk)
sk                940 net/netrom/af_netrom.c 			sock_put(sk);
sk                944 net/netrom/af_netrom.c 	bh_lock_sock(sk);
sk                949 net/netrom/af_netrom.c 	skb->sk             = make;
sk                962 net/netrom/af_netrom.c 	bh_unlock_sock(sk);
sk                964 net/netrom/af_netrom.c 	bh_lock_sock(sk);
sk                993 net/netrom/af_netrom.c 	sk_acceptq_added(sk);
sk                994 net/netrom/af_netrom.c 	skb_queue_head(&sk->sk_receive_queue, skb);
sk                996 net/netrom/af_netrom.c 	if (!sock_flag(sk, SOCK_DEAD))
sk                997 net/netrom/af_netrom.c 		sk->sk_data_ready(sk);
sk                999 net/netrom/af_netrom.c 	bh_unlock_sock(sk);
sk               1000 net/netrom/af_netrom.c 	sock_put(sk);
sk               1012 net/netrom/af_netrom.c 	struct sock *sk = sock->sk;
sk               1013 net/netrom/af_netrom.c 	struct nr_sock *nr = nr_sk(sk);
sk               1024 net/netrom/af_netrom.c 	lock_sock(sk);
sk               1025 net/netrom/af_netrom.c 	if (sock_flag(sk, SOCK_ZAPPED)) {
sk               1030 net/netrom/af_netrom.c 	if (sk->sk_shutdown & SEND_SHUTDOWN) {
sk               1056 net/netrom/af_netrom.c 		if (sk->sk_state != TCP_ESTABLISHED) {
sk               1073 net/netrom/af_netrom.c 	if ((skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT, &err)) == NULL)
sk               1105 net/netrom/af_netrom.c 	if (sk->sk_state != TCP_ESTABLISHED) {
sk               1111 net/netrom/af_netrom.c 	nr_output(sk, skb);	/* Shove it onto the queue */
sk               1115 net/netrom/af_netrom.c 	release_sock(sk);
sk               1122 net/netrom/af_netrom.c 	struct sock *sk = sock->sk;
sk               1133 net/netrom/af_netrom.c 	lock_sock(sk);
sk               1134 net/netrom/af_netrom.c 	if (sk->sk_state != TCP_ESTABLISHED) {
sk               1135 net/netrom/af_netrom.c 		release_sock(sk);
sk               1140 net/netrom/af_netrom.c 	if ((skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &er)) == NULL) {
sk               1141 net/netrom/af_netrom.c 		release_sock(sk);
sk               1155 net/netrom/af_netrom.c 		skb_free_datagram(sk, skb);
sk               1156 net/netrom/af_netrom.c 		release_sock(sk);
sk               1168 net/netrom/af_netrom.c 	skb_free_datagram(sk, skb);
sk               1170 net/netrom/af_netrom.c 	release_sock(sk);
sk               1177 net/netrom/af_netrom.c 	struct sock *sk = sock->sk;
sk               1184 net/netrom/af_netrom.c 		lock_sock(sk);
sk               1185 net/netrom/af_netrom.c 		amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
sk               1188 net/netrom/af_netrom.c 		release_sock(sk);
sk               1196 net/netrom/af_netrom.c 		lock_sock(sk);
sk               1198 net/netrom/af_netrom.c 		if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL)
sk               1200 net/netrom/af_netrom.c 		release_sock(sk);
sk                 29 net/netrom/nr_in.c static int nr_queue_rx_frame(struct sock *sk, struct sk_buff *skb, int more)
sk                 32 net/netrom/nr_in.c 	struct nr_sock *nr = nr_sk(sk);
sk                 36 net/netrom/nr_in.c 	nr_start_idletimer(sk);
sk                 63 net/netrom/nr_in.c 	return sock_queue_rcv_skb(sk, skbn);
sk                 71 net/netrom/nr_in.c static int nr_state1_machine(struct sock *sk, struct sk_buff *skb,
sk                 76 net/netrom/nr_in.c 		struct nr_sock *nr = nr_sk(sk);
sk                 78 net/netrom/nr_in.c 		nr_stop_t1timer(sk);
sk                 79 net/netrom/nr_in.c 		nr_start_idletimer(sk);
sk                 89 net/netrom/nr_in.c 		sk->sk_state   = TCP_ESTABLISHED;
sk                 90 net/netrom/nr_in.c 		if (!sock_flag(sk, SOCK_DEAD))
sk                 91 net/netrom/nr_in.c 			sk->sk_state_change(sk);
sk                 96 net/netrom/nr_in.c 		nr_disconnect(sk, ECONNREFUSED);
sk                101 net/netrom/nr_in.c 			nr_disconnect(sk, ECONNRESET);
sk                115 net/netrom/nr_in.c static int nr_state2_machine(struct sock *sk, struct sk_buff *skb,
sk                120 net/netrom/nr_in.c 		nr_disconnect(sk, ECONNRESET);
sk                124 net/netrom/nr_in.c 		nr_write_internal(sk, NR_DISCACK);
sk                127 net/netrom/nr_in.c 		nr_disconnect(sk, 0);
sk                132 net/netrom/nr_in.c 			nr_disconnect(sk, ECONNRESET);
sk                146 net/netrom/nr_in.c static int nr_state3_machine(struct sock *sk, struct sk_buff *skb, int frametype)
sk                148 net/netrom/nr_in.c 	struct nr_sock *nrom = nr_sk(sk);
sk                160 net/netrom/nr_in.c 		nr_write_internal(sk, NR_CONNACK);
sk                164 net/netrom/nr_in.c 		nr_write_internal(sk, NR_DISCACK);
sk                165 net/netrom/nr_in.c 		nr_disconnect(sk, 0);
sk                170 net/netrom/nr_in.c 		nr_disconnect(sk, ECONNRESET);
sk                179 net/netrom/nr_in.c 			nr_start_t4timer(sk);
sk                182 net/netrom/nr_in.c 			nr_stop_t4timer(sk);
sk                184 net/netrom/nr_in.c 		if (!nr_validate_nr(sk, nr)) {
sk                188 net/netrom/nr_in.c 			nr_frames_acked(sk, nr);
sk                189 net/netrom/nr_in.c 			nr_send_nak_frame(sk);
sk                192 net/netrom/nr_in.c 				nr_frames_acked(sk, nr);
sk                194 net/netrom/nr_in.c 				nr_check_iframes_acked(sk, nr);
sk                209 net/netrom/nr_in.c 			nr_start_t4timer(sk);
sk                212 net/netrom/nr_in.c 			nr_stop_t4timer(sk);
sk                214 net/netrom/nr_in.c 		if (nr_validate_nr(sk, nr)) {
sk                216 net/netrom/nr_in.c 				nr_frames_acked(sk, nr);
sk                217 net/netrom/nr_in.c 				nr_send_nak_frame(sk);
sk                220 net/netrom/nr_in.c 					nr_frames_acked(sk, nr);
sk                222 net/netrom/nr_in.c 					nr_check_iframes_acked(sk, nr);
sk                236 net/netrom/nr_in.c 					if (nr_queue_rx_frame(sk, skbn, frametype & NR_MORE_FLAG) == 0) {
sk                242 net/netrom/nr_in.c 				} else if (nr_in_rx_window(sk, ns)) {
sk                256 net/netrom/nr_in.c 			nr_enquiry_response(sk);
sk                260 net/netrom/nr_in.c 				nr_start_t2timer(sk);
sk                267 net/netrom/nr_in.c 			nr_disconnect(sk, ECONNRESET);
sk                277 net/netrom/nr_in.c int nr_process_rx_frame(struct sock *sk, struct sk_buff *skb)
sk                279 net/netrom/nr_in.c 	struct nr_sock *nr = nr_sk(sk);
sk                289 net/netrom/nr_in.c 		queued = nr_state1_machine(sk, skb, frametype);
sk                292 net/netrom/nr_in.c 		queued = nr_state2_machine(sk, skb, frametype);
sk                295 net/netrom/nr_in.c 		queued = nr_state3_machine(sk, skb, frametype);
sk                299 net/netrom/nr_in.c 	nr_kick(sk);
sk                 32 net/netrom/nr_out.c void nr_output(struct sock *sk, struct sk_buff *skb)
sk                 46 net/netrom/nr_out.c 			if ((skbn = sock_alloc_send_skb(sk, frontlen + NR_MAX_PACKET_SIZE, 0, &err)) == NULL)
sk                 64 net/netrom/nr_out.c 			skb_queue_tail(&sk->sk_write_queue, skbn); /* Throw it on the queue */
sk                 69 net/netrom/nr_out.c 		skb_queue_tail(&sk->sk_write_queue, skb);		/* Throw it on the queue */
sk                 72 net/netrom/nr_out.c 	nr_kick(sk);
sk                 79 net/netrom/nr_out.c static void nr_send_iframe(struct sock *sk, struct sk_buff *skb)
sk                 81 net/netrom/nr_out.c 	struct nr_sock *nr = nr_sk(sk);
sk                 92 net/netrom/nr_out.c 	nr_start_idletimer(sk);
sk                 94 net/netrom/nr_out.c 	nr_transmit_buffer(sk, skb);
sk                 97 net/netrom/nr_out.c void nr_send_nak_frame(struct sock *sk)
sk                100 net/netrom/nr_out.c 	struct nr_sock *nr = nr_sk(sk);
sk                114 net/netrom/nr_out.c 	nr_transmit_buffer(sk, skbn);
sk                119 net/netrom/nr_out.c 	nr_stop_t1timer(sk);
sk                122 net/netrom/nr_out.c void nr_kick(struct sock *sk)
sk                124 net/netrom/nr_out.c 	struct nr_sock *nr = nr_sk(sk);
sk                134 net/netrom/nr_out.c 	if (!skb_peek(&sk->sk_write_queue))
sk                153 net/netrom/nr_out.c 	skb = skb_dequeue(&sk->sk_write_queue);
sk                157 net/netrom/nr_out.c 			skb_queue_head(&sk->sk_write_queue, skb);
sk                161 net/netrom/nr_out.c 		skb_set_owner_w(skbn, sk);
sk                166 net/netrom/nr_out.c 		nr_send_iframe(sk, skbn);
sk                176 net/netrom/nr_out.c 		 (skb = skb_dequeue(&sk->sk_write_queue)) != NULL);
sk                181 net/netrom/nr_out.c 	if (!nr_t1timer_running(sk))
sk                182 net/netrom/nr_out.c 		nr_start_t1timer(sk);
sk                185 net/netrom/nr_out.c void nr_transmit_buffer(struct sock *sk, struct sk_buff *skb)
sk                187 net/netrom/nr_out.c 	struct nr_sock *nr = nr_sk(sk);
sk                211 net/netrom/nr_out.c 		nr_disconnect(sk, ENETUNREACH);
sk                220 net/netrom/nr_out.c void nr_establish_data_link(struct sock *sk)
sk                222 net/netrom/nr_out.c 	struct nr_sock *nr = nr_sk(sk);
sk                227 net/netrom/nr_out.c 	nr_write_internal(sk, NR_CONNREQ);
sk                229 net/netrom/nr_out.c 	nr_stop_t2timer(sk);
sk                230 net/netrom/nr_out.c 	nr_stop_t4timer(sk);
sk                231 net/netrom/nr_out.c 	nr_stop_idletimer(sk);
sk                232 net/netrom/nr_out.c 	nr_start_t1timer(sk);
sk                238 net/netrom/nr_out.c void nr_enquiry_response(struct sock *sk)
sk                240 net/netrom/nr_out.c 	struct nr_sock *nr = nr_sk(sk);
sk                250 net/netrom/nr_out.c 	nr_write_internal(sk, frametype);
sk                256 net/netrom/nr_out.c void nr_check_iframes_acked(struct sock *sk, unsigned short nr)
sk                258 net/netrom/nr_out.c 	struct nr_sock *nrom = nr_sk(sk);
sk                261 net/netrom/nr_out.c 		nr_frames_acked(sk, nr);
sk                262 net/netrom/nr_out.c 		nr_stop_t1timer(sk);
sk                266 net/netrom/nr_out.c 			nr_frames_acked(sk, nr);
sk                267 net/netrom/nr_out.c 			nr_start_t1timer(sk);
sk                 31 net/netrom/nr_subr.c void nr_clear_queues(struct sock *sk)
sk                 33 net/netrom/nr_subr.c 	struct nr_sock *nr = nr_sk(sk);
sk                 35 net/netrom/nr_subr.c 	skb_queue_purge(&sk->sk_write_queue);
sk                 46 net/netrom/nr_subr.c void nr_frames_acked(struct sock *sk, unsigned short nr)
sk                 48 net/netrom/nr_subr.c 	struct nr_sock *nrom = nr_sk(sk);
sk                 68 net/netrom/nr_subr.c void nr_requeue_frames(struct sock *sk)
sk                 72 net/netrom/nr_subr.c 	while ((skb = skb_dequeue(&nr_sk(sk)->ack_queue)) != NULL) {
sk                 74 net/netrom/nr_subr.c 			skb_queue_head(&sk->sk_write_queue, skb);
sk                 76 net/netrom/nr_subr.c 			skb_append(skb_prev, skb, &sk->sk_write_queue);
sk                 85 net/netrom/nr_subr.c int nr_validate_nr(struct sock *sk, unsigned short nr)
sk                 87 net/netrom/nr_subr.c 	struct nr_sock *nrom = nr_sk(sk);
sk                101 net/netrom/nr_subr.c int nr_in_rx_window(struct sock *sk, unsigned short ns)
sk                103 net/netrom/nr_subr.c 	struct nr_sock *nr = nr_sk(sk);
sk                119 net/netrom/nr_subr.c void nr_write_internal(struct sock *sk, int frametype)
sk                121 net/netrom/nr_subr.c 	struct nr_sock *nr = nr_sk(sk);
sk                205 net/netrom/nr_subr.c 	nr_transmit_buffer(sk, skb);
sk                259 net/netrom/nr_subr.c void nr_disconnect(struct sock *sk, int reason)
sk                261 net/netrom/nr_subr.c 	nr_stop_t1timer(sk);
sk                262 net/netrom/nr_subr.c 	nr_stop_t2timer(sk);
sk                263 net/netrom/nr_subr.c 	nr_stop_t4timer(sk);
sk                264 net/netrom/nr_subr.c 	nr_stop_idletimer(sk);
sk                266 net/netrom/nr_subr.c 	nr_clear_queues(sk);
sk                268 net/netrom/nr_subr.c 	nr_sk(sk)->state = NR_STATE_0;
sk                270 net/netrom/nr_subr.c 	sk->sk_state     = TCP_CLOSE;
sk                271 net/netrom/nr_subr.c 	sk->sk_err       = reason;
sk                272 net/netrom/nr_subr.c 	sk->sk_shutdown |= SEND_SHUTDOWN;
sk                274 net/netrom/nr_subr.c 	if (!sock_flag(sk, SOCK_DEAD)) {
sk                275 net/netrom/nr_subr.c 		sk->sk_state_change(sk);
sk                276 net/netrom/nr_subr.c 		sock_set_flag(sk, SOCK_DEAD);
sk                 35 net/netrom/nr_timer.c void nr_init_timers(struct sock *sk)
sk                 37 net/netrom/nr_timer.c 	struct nr_sock *nr = nr_sk(sk);
sk                 45 net/netrom/nr_timer.c 	sk->sk_timer.function = nr_heartbeat_expiry;
sk                 48 net/netrom/nr_timer.c void nr_start_t1timer(struct sock *sk)
sk                 50 net/netrom/nr_timer.c 	struct nr_sock *nr = nr_sk(sk);
sk                 52 net/netrom/nr_timer.c 	sk_reset_timer(sk, &nr->t1timer, jiffies + nr->t1);
sk                 55 net/netrom/nr_timer.c void nr_start_t2timer(struct sock *sk)
sk                 57 net/netrom/nr_timer.c 	struct nr_sock *nr = nr_sk(sk);
sk                 59 net/netrom/nr_timer.c 	sk_reset_timer(sk, &nr->t2timer, jiffies + nr->t2);
sk                 62 net/netrom/nr_timer.c void nr_start_t4timer(struct sock *sk)
sk                 64 net/netrom/nr_timer.c 	struct nr_sock *nr = nr_sk(sk);
sk                 66 net/netrom/nr_timer.c 	sk_reset_timer(sk, &nr->t4timer, jiffies + nr->t4);
sk                 69 net/netrom/nr_timer.c void nr_start_idletimer(struct sock *sk)
sk                 71 net/netrom/nr_timer.c 	struct nr_sock *nr = nr_sk(sk);
sk                 74 net/netrom/nr_timer.c 		sk_reset_timer(sk, &nr->idletimer, jiffies + nr->idle);
sk                 77 net/netrom/nr_timer.c void nr_start_heartbeat(struct sock *sk)
sk                 79 net/netrom/nr_timer.c 	sk_reset_timer(sk, &sk->sk_timer, jiffies + 5 * HZ);
sk                 82 net/netrom/nr_timer.c void nr_stop_t1timer(struct sock *sk)
sk                 84 net/netrom/nr_timer.c 	sk_stop_timer(sk, &nr_sk(sk)->t1timer);
sk                 87 net/netrom/nr_timer.c void nr_stop_t2timer(struct sock *sk)
sk                 89 net/netrom/nr_timer.c 	sk_stop_timer(sk, &nr_sk(sk)->t2timer);
sk                 92 net/netrom/nr_timer.c void nr_stop_t4timer(struct sock *sk)
sk                 94 net/netrom/nr_timer.c 	sk_stop_timer(sk, &nr_sk(sk)->t4timer);
sk                 97 net/netrom/nr_timer.c void nr_stop_idletimer(struct sock *sk)
sk                 99 net/netrom/nr_timer.c 	sk_stop_timer(sk, &nr_sk(sk)->idletimer);
sk                102 net/netrom/nr_timer.c void nr_stop_heartbeat(struct sock *sk)
sk                104 net/netrom/nr_timer.c 	sk_stop_timer(sk, &sk->sk_timer);
sk                107 net/netrom/nr_timer.c int nr_t1timer_running(struct sock *sk)
sk                109 net/netrom/nr_timer.c 	return timer_pending(&nr_sk(sk)->t1timer);
sk                114 net/netrom/nr_timer.c 	struct sock *sk = from_timer(sk, t, sk_timer);
sk                115 net/netrom/nr_timer.c 	struct nr_sock *nr = nr_sk(sk);
sk                117 net/netrom/nr_timer.c 	bh_lock_sock(sk);
sk                122 net/netrom/nr_timer.c 		if (sock_flag(sk, SOCK_DESTROY) ||
sk                123 net/netrom/nr_timer.c 		    (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) {
sk                124 net/netrom/nr_timer.c 			sock_hold(sk);
sk                125 net/netrom/nr_timer.c 			bh_unlock_sock(sk);
sk                126 net/netrom/nr_timer.c 			nr_destroy_socket(sk);
sk                127 net/netrom/nr_timer.c 			sock_put(sk);
sk                136 net/netrom/nr_timer.c 		if (atomic_read(&sk->sk_rmem_alloc) < (sk->sk_rcvbuf / 2) &&
sk                141 net/netrom/nr_timer.c 			nr_write_internal(sk, NR_INFOACK);
sk                147 net/netrom/nr_timer.c 	nr_start_heartbeat(sk);
sk                148 net/netrom/nr_timer.c 	bh_unlock_sock(sk);
sk                154 net/netrom/nr_timer.c 	struct sock *sk = &nr->sock;
sk                156 net/netrom/nr_timer.c 	bh_lock_sock(sk);
sk                159 net/netrom/nr_timer.c 		nr_enquiry_response(sk);
sk                161 net/netrom/nr_timer.c 	bh_unlock_sock(sk);
sk                167 net/netrom/nr_timer.c 	struct sock *sk = &nr->sock;
sk                169 net/netrom/nr_timer.c 	bh_lock_sock(sk);
sk                170 net/netrom/nr_timer.c 	nr_sk(sk)->condition &= ~NR_COND_PEER_RX_BUSY;
sk                171 net/netrom/nr_timer.c 	bh_unlock_sock(sk);
sk                177 net/netrom/nr_timer.c 	struct sock *sk = &nr->sock;
sk                179 net/netrom/nr_timer.c 	bh_lock_sock(sk);
sk                181 net/netrom/nr_timer.c 	nr_clear_queues(sk);
sk                184 net/netrom/nr_timer.c 	nr_write_internal(sk, NR_DISCREQ);
sk                187 net/netrom/nr_timer.c 	nr_start_t1timer(sk);
sk                188 net/netrom/nr_timer.c 	nr_stop_t2timer(sk);
sk                189 net/netrom/nr_timer.c 	nr_stop_t4timer(sk);
sk                191 net/netrom/nr_timer.c 	sk->sk_state     = TCP_CLOSE;
sk                192 net/netrom/nr_timer.c 	sk->sk_err       = 0;
sk                193 net/netrom/nr_timer.c 	sk->sk_shutdown |= SEND_SHUTDOWN;
sk                195 net/netrom/nr_timer.c 	if (!sock_flag(sk, SOCK_DEAD)) {
sk                196 net/netrom/nr_timer.c 		sk->sk_state_change(sk);
sk                197 net/netrom/nr_timer.c 		sock_set_flag(sk, SOCK_DEAD);
sk                199 net/netrom/nr_timer.c 	bh_unlock_sock(sk);
sk                205 net/netrom/nr_timer.c 	struct sock *sk = &nr->sock;
sk                207 net/netrom/nr_timer.c 	bh_lock_sock(sk);
sk                211 net/netrom/nr_timer.c 			nr_disconnect(sk, ETIMEDOUT);
sk                212 net/netrom/nr_timer.c 			bh_unlock_sock(sk);
sk                216 net/netrom/nr_timer.c 			nr_write_internal(sk, NR_CONNREQ);
sk                222 net/netrom/nr_timer.c 			nr_disconnect(sk, ETIMEDOUT);
sk                223 net/netrom/nr_timer.c 			bh_unlock_sock(sk);
sk                227 net/netrom/nr_timer.c 			nr_write_internal(sk, NR_DISCREQ);
sk                233 net/netrom/nr_timer.c 			nr_disconnect(sk, ETIMEDOUT);
sk                234 net/netrom/nr_timer.c 			bh_unlock_sock(sk);
sk                238 net/netrom/nr_timer.c 			nr_requeue_frames(sk);
sk                243 net/netrom/nr_timer.c 	nr_start_t1timer(sk);
sk                244 net/netrom/nr_timer.c 	bh_unlock_sock(sk);
sk                709 net/nfc/core.c struct sk_buff *nfc_alloc_send_skb(struct nfc_dev *dev, struct sock *sk,
sk                719 net/nfc/core.c 	skb = sock_alloc_send_skb(sk, total_size, flags & MSG_DONTWAIT, err);
sk                104 net/nfc/llcp.h 	struct sock sk;
sk                149 net/nfc/llcp.h #define nfc_llcp_sock(sk) ((struct nfc_llcp_sock *) (sk))
sk                150 net/nfc/llcp.h #define nfc_llcp_dev(sk)  (nfc_llcp_sock((sk))->dev)
sk                218 net/nfc/llcp.h void nfc_llcp_accept_unlink(struct sock *sk);
sk                219 net/nfc/llcp.h void nfc_llcp_accept_enqueue(struct sock *parent, struct sock *sk);
sk                220 net/nfc/llcp.h struct sock *nfc_llcp_accept_dequeue(struct sock *sk, struct socket *newsock);
sk                320 net/nfc/llcp_commands.c 	skb = nfc_alloc_send_skb(sock->dev, &sock->sk, MSG_DONTWAIT,
sk                647 net/nfc/llcp_commands.c 	struct sock *sk = &sock->sk;
sk                711 net/nfc/llcp_commands.c 		lock_sock(sk);
sk                715 net/nfc/llcp_commands.c 		release_sock(sk);
sk                763 net/nfc/llcp_commands.c 		pdu = nfc_alloc_send_skb(sock->dev, &sock->sk, 0,
sk                 23 net/nfc/llcp_core.c void nfc_llcp_sock_link(struct llcp_sock_list *l, struct sock *sk)
sk                 26 net/nfc/llcp_core.c 	sk_add_node(sk, &l->head);
sk                 30 net/nfc/llcp_core.c void nfc_llcp_sock_unlink(struct llcp_sock_list *l, struct sock *sk)
sk                 33 net/nfc/llcp_core.c 	sk_del_node_init(sk);
sk                 48 net/nfc/llcp_core.c 	pr_debug("%p\n", &sock->sk);
sk                 58 net/nfc/llcp_core.c 		if (s->sk != &sock->sk)
sk                 69 net/nfc/llcp_core.c 	struct sock *sk;
sk                 77 net/nfc/llcp_core.c 	sk_for_each_safe(sk, tmp, &local->sockets.head) {
sk                 78 net/nfc/llcp_core.c 		llcp_sock = nfc_llcp_sock(sk);
sk                 80 net/nfc/llcp_core.c 		bh_lock_sock(sk);
sk                 84 net/nfc/llcp_core.c 		if (sk->sk_state == LLCP_CONNECTED)
sk                 87 net/nfc/llcp_core.c 		if (sk->sk_state == LLCP_LISTEN) {
sk                 94 net/nfc/llcp_core.c 				accept_sk = &lsk->sk;
sk                102 net/nfc/llcp_core.c 				accept_sk->sk_state_change(sk);
sk                109 net/nfc/llcp_core.c 			sk->sk_err = err;
sk                110 net/nfc/llcp_core.c 		sk->sk_state = LLCP_CLOSED;
sk                111 net/nfc/llcp_core.c 		sk->sk_state_change(sk);
sk                113 net/nfc/llcp_core.c 		bh_unlock_sock(sk);
sk                115 net/nfc/llcp_core.c 		sk_del_node_init(sk);
sk                126 net/nfc/llcp_core.c 	sk_for_each_safe(sk, tmp, &local->raw_sockets.head) {
sk                127 net/nfc/llcp_core.c 		llcp_sock = nfc_llcp_sock(sk);
sk                129 net/nfc/llcp_core.c 		bh_lock_sock(sk);
sk                134 net/nfc/llcp_core.c 			sk->sk_err = err;
sk                135 net/nfc/llcp_core.c 		sk->sk_state = LLCP_CLOSED;
sk                136 net/nfc/llcp_core.c 		sk->sk_state_change(sk);
sk                138 net/nfc/llcp_core.c 		bh_unlock_sock(sk);
sk                140 net/nfc/llcp_core.c 		sk_del_node_init(sk);
sk                189 net/nfc/llcp_core.c 	struct sock *sk;
sk                201 net/nfc/llcp_core.c 	sk_for_each(sk, &local->sockets.head) {
sk                202 net/nfc/llcp_core.c 		tmp_sock = nfc_llcp_sock(sk);
sk                215 net/nfc/llcp_core.c 	sock_hold(&llcp_sock->sk);
sk                222 net/nfc/llcp_core.c 	sock_put(&sock->sk);
sk                330 net/nfc/llcp_core.c 	struct sock *sk;
sk                342 net/nfc/llcp_core.c 	sk_for_each(sk, &local->sockets.head) {
sk                343 net/nfc/llcp_core.c 		tmp_sock = nfc_llcp_sock(sk);
sk                347 net/nfc/llcp_core.c 		if (tmp_sock->sk.sk_type == SOCK_STREAM &&
sk                348 net/nfc/llcp_core.c 		    tmp_sock->sk.sk_state != LLCP_LISTEN)
sk                351 net/nfc/llcp_core.c 		if (tmp_sock->sk.sk_type == SOCK_DGRAM &&
sk                352 net/nfc/llcp_core.c 		    tmp_sock->sk.sk_state != LLCP_BOUND)
sk                678 net/nfc/llcp_core.c 	struct sock *sk;
sk                683 net/nfc/llcp_core.c 	sk_for_each(sk, &local->raw_sockets.head) {
sk                684 net/nfc/llcp_core.c 		if (sk->sk_state != LLCP_BOUND)
sk                705 net/nfc/llcp_core.c 		if (sock_queue_rcv_skb(sk, nskb))
sk                719 net/nfc/llcp_core.c 	struct sock *sk;
sk                724 net/nfc/llcp_core.c 		sk = skb->sk;
sk                725 net/nfc/llcp_core.c 		llcp_sock = nfc_llcp_sock(sk);
sk                742 net/nfc/llcp_core.c 			if (ptype == LLCP_PDU_DISC && sk != NULL &&
sk                743 net/nfc/llcp_core.c 			    sk->sk_state == LLCP_DISCONNECTING) {
sk                744 net/nfc/llcp_core.c 				nfc_llcp_sock_unlink(&local->sockets, sk);
sk                745 net/nfc/llcp_core.c 				sock_orphan(sk);
sk                746 net/nfc/llcp_core.c 				sock_put(sk);
sk                781 net/nfc/llcp_core.c 	struct sock *sk;
sk                786 net/nfc/llcp_core.c 	sk_for_each(sk, &local->connecting_sockets.head) {
sk                787 net/nfc/llcp_core.c 		llcp_sock = nfc_llcp_sock(sk);
sk                790 net/nfc/llcp_core.c 			sock_hold(&llcp_sock->sk);
sk                813 net/nfc/llcp_core.c 	sock_hold(&llcp_sock->sk);
sk                859 net/nfc/llcp_core.c 	if (llcp_sock == NULL || llcp_sock->sk.sk_type != SOCK_DGRAM)
sk                864 net/nfc/llcp_core.c 	if (!sock_queue_rcv_skb(&llcp_sock->sk, skb)) {
sk                891 net/nfc/llcp_core.c 		if (sock == NULL || sock->sk.sk_state != LLCP_LISTEN) {
sk                914 net/nfc/llcp_core.c 	lock_sock(&sock->sk);
sk                916 net/nfc/llcp_core.c 	parent = &sock->sk;
sk                920 net/nfc/llcp_core.c 		release_sock(&sock->sk);
sk                921 net/nfc/llcp_core.c 		sock_put(&sock->sk);
sk                932 net/nfc/llcp_core.c 			release_sock(&sock->sk);
sk                933 net/nfc/llcp_core.c 			sock_put(&sock->sk);
sk                943 net/nfc/llcp_core.c 		release_sock(&sock->sk);
sk                944 net/nfc/llcp_core.c 		sock_put(&sock->sk);
sk                973 net/nfc/llcp_core.c 	pr_debug("new sock %p sk %p\n", new_sock, &new_sock->sk);
sk                977 net/nfc/llcp_core.c 	nfc_llcp_accept_enqueue(&sock->sk, new_sk);
sk                989 net/nfc/llcp_core.c 	release_sock(&sock->sk);
sk                990 net/nfc/llcp_core.c 	sock_put(&sock->sk);
sk               1031 net/nfc/llcp_core.c 	struct sock *sk;
sk               1048 net/nfc/llcp_core.c 	sk = &llcp_sock->sk;
sk               1049 net/nfc/llcp_core.c 	lock_sock(sk);
sk               1050 net/nfc/llcp_core.c 	if (sk->sk_state == LLCP_CLOSED) {
sk               1051 net/nfc/llcp_core.c 		release_sock(sk);
sk               1057 net/nfc/llcp_core.c 		pr_debug("I frame, queueing on %p\n", &llcp_sock->sk);
sk               1065 net/nfc/llcp_core.c 		if (!sock_queue_rcv_skb(&llcp_sock->sk, skb)) {
sk               1110 net/nfc/llcp_core.c 	release_sock(sk);
sk               1118 net/nfc/llcp_core.c 	struct sock *sk;
sk               1136 net/nfc/llcp_core.c 	sk = &llcp_sock->sk;
sk               1137 net/nfc/llcp_core.c 	lock_sock(sk);
sk               1141 net/nfc/llcp_core.c 	if (sk->sk_state == LLCP_CLOSED) {
sk               1142 net/nfc/llcp_core.c 		release_sock(sk);
sk               1146 net/nfc/llcp_core.c 	if (sk->sk_state == LLCP_CONNECTED) {
sk               1148 net/nfc/llcp_core.c 		sk->sk_state = LLCP_CLOSED;
sk               1149 net/nfc/llcp_core.c 		sk->sk_state_change(sk);
sk               1154 net/nfc/llcp_core.c 	release_sock(sk);
sk               1161 net/nfc/llcp_core.c 	struct sock *sk;
sk               1175 net/nfc/llcp_core.c 	sk = &llcp_sock->sk;
sk               1178 net/nfc/llcp_core.c 	nfc_llcp_sock_unlink(&local->connecting_sockets, sk);
sk               1179 net/nfc/llcp_core.c 	nfc_llcp_sock_link(&local->sockets, sk);
sk               1185 net/nfc/llcp_core.c 	sk->sk_state = LLCP_CONNECTED;
sk               1186 net/nfc/llcp_core.c 	sk->sk_state_change(sk);
sk               1194 net/nfc/llcp_core.c 	struct sock *sk;
sk               1219 net/nfc/llcp_core.c 	sk = &llcp_sock->sk;
sk               1221 net/nfc/llcp_core.c 	sk->sk_err = ENXIO;
sk               1222 net/nfc/llcp_core.c 	sk->sk_state = LLCP_CLOSED;
sk               1223 net/nfc/llcp_core.c 	sk->sk_state_change(sk);
sk                 17 net/nfc/llcp_sock.c static int sock_wait_state(struct sock *sk, int state, unsigned long timeo)
sk                 22 net/nfc/llcp_sock.c 	pr_debug("sk %p", sk);
sk                 24 net/nfc/llcp_sock.c 	add_wait_queue(sk_sleep(sk), &wait);
sk                 27 net/nfc/llcp_sock.c 	while (sk->sk_state != state) {
sk                 38 net/nfc/llcp_sock.c 		release_sock(sk);
sk                 40 net/nfc/llcp_sock.c 		lock_sock(sk);
sk                 43 net/nfc/llcp_sock.c 		err = sock_error(sk);
sk                 49 net/nfc/llcp_sock.c 	remove_wait_queue(sk_sleep(sk), &wait);
sk                 61 net/nfc/llcp_sock.c 	struct sock *sk = sock->sk;
sk                 62 net/nfc/llcp_sock.c 	struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk);
sk                 72 net/nfc/llcp_sock.c 	pr_debug("sk %p addr %p family %d\n", sk, addr, addr->sa_family);
sk                 82 net/nfc/llcp_sock.c 	lock_sock(sk);
sk                 84 net/nfc/llcp_sock.c 	if (sk->sk_state != LLCP_CLOSED) {
sk                124 net/nfc/llcp_sock.c 	nfc_llcp_sock_link(&local->sockets, sk);
sk                128 net/nfc/llcp_sock.c 	sk->sk_state = LLCP_BOUND;
sk                134 net/nfc/llcp_sock.c 	release_sock(sk);
sk                141 net/nfc/llcp_sock.c 	struct sock *sk = sock->sk;
sk                142 net/nfc/llcp_sock.c 	struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk);
sk                152 net/nfc/llcp_sock.c 	pr_debug("sk %p addr %p family %d\n", sk, addr, addr->sa_family);
sk                158 net/nfc/llcp_sock.c 	lock_sock(sk);
sk                160 net/nfc/llcp_sock.c 	if (sk->sk_state != LLCP_CLOSED) {
sk                181 net/nfc/llcp_sock.c 	nfc_llcp_sock_link(&local->raw_sockets, sk);
sk                183 net/nfc/llcp_sock.c 	sk->sk_state = LLCP_BOUND;
sk                189 net/nfc/llcp_sock.c 	release_sock(sk);
sk                195 net/nfc/llcp_sock.c 	struct sock *sk = sock->sk;
sk                198 net/nfc/llcp_sock.c 	pr_debug("sk %p backlog %d\n", sk, backlog);
sk                200 net/nfc/llcp_sock.c 	lock_sock(sk);
sk                203 net/nfc/llcp_sock.c 	    sk->sk_state != LLCP_BOUND) {
sk                208 net/nfc/llcp_sock.c 	sk->sk_max_ack_backlog = backlog;
sk                209 net/nfc/llcp_sock.c 	sk->sk_ack_backlog = 0;
sk                212 net/nfc/llcp_sock.c 	sk->sk_state = LLCP_LISTEN;
sk                215 net/nfc/llcp_sock.c 	release_sock(sk);
sk                223 net/nfc/llcp_sock.c 	struct sock *sk = sock->sk;
sk                224 net/nfc/llcp_sock.c 	struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk);
sk                228 net/nfc/llcp_sock.c 	pr_debug("%p optname %d\n", sk, optname);
sk                233 net/nfc/llcp_sock.c 	lock_sock(sk);
sk                237 net/nfc/llcp_sock.c 		if (sk->sk_state == LLCP_CONNECTED ||
sk                238 net/nfc/llcp_sock.c 		    sk->sk_state == LLCP_BOUND ||
sk                239 net/nfc/llcp_sock.c 		    sk->sk_state == LLCP_LISTEN) {
sk                259 net/nfc/llcp_sock.c 		if (sk->sk_state == LLCP_CONNECTED ||
sk                260 net/nfc/llcp_sock.c 		    sk->sk_state == LLCP_BOUND ||
sk                261 net/nfc/llcp_sock.c 		    sk->sk_state == LLCP_LISTEN) {
sk                285 net/nfc/llcp_sock.c 	release_sock(sk);
sk                297 net/nfc/llcp_sock.c 	struct sock *sk = sock->sk;
sk                298 net/nfc/llcp_sock.c 	struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk);
sk                303 net/nfc/llcp_sock.c 	pr_debug("%p optname %d\n", sk, optname);
sk                317 net/nfc/llcp_sock.c 	lock_sock(sk);
sk                362 net/nfc/llcp_sock.c 	release_sock(sk);
sk                370 net/nfc/llcp_sock.c void nfc_llcp_accept_unlink(struct sock *sk)
sk                372 net/nfc/llcp_sock.c 	struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk);
sk                374 net/nfc/llcp_sock.c 	pr_debug("state %d\n", sk->sk_state);
sk                380 net/nfc/llcp_sock.c 	sock_put(sk);
sk                383 net/nfc/llcp_sock.c void nfc_llcp_accept_enqueue(struct sock *parent, struct sock *sk)
sk                385 net/nfc/llcp_sock.c 	struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk);
sk                389 net/nfc/llcp_sock.c 	sock_hold(sk);
sk                401 net/nfc/llcp_sock.c 	struct sock *sk;
sk                407 net/nfc/llcp_sock.c 		sk = &lsk->sk;
sk                408 net/nfc/llcp_sock.c 		lock_sock(sk);
sk                410 net/nfc/llcp_sock.c 		if (sk->sk_state == LLCP_CLOSED) {
sk                411 net/nfc/llcp_sock.c 			release_sock(sk);
sk                412 net/nfc/llcp_sock.c 			nfc_llcp_accept_unlink(sk);
sk                416 net/nfc/llcp_sock.c 		if (sk->sk_state == LLCP_CONNECTED || !newsock) {
sk                418 net/nfc/llcp_sock.c 			sock_put(sk);
sk                421 net/nfc/llcp_sock.c 				sock_graft(sk, newsock);
sk                423 net/nfc/llcp_sock.c 			release_sock(sk);
sk                425 net/nfc/llcp_sock.c 			pr_debug("Returning sk state %d\n", sk->sk_state);
sk                429 net/nfc/llcp_sock.c 			return sk;
sk                432 net/nfc/llcp_sock.c 		release_sock(sk);
sk                442 net/nfc/llcp_sock.c 	struct sock *sk = sock->sk, *new_sk;
sk                446 net/nfc/llcp_sock.c 	pr_debug("parent %p\n", sk);
sk                448 net/nfc/llcp_sock.c 	lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
sk                450 net/nfc/llcp_sock.c 	if (sk->sk_state != LLCP_LISTEN) {
sk                455 net/nfc/llcp_sock.c 	timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
sk                458 net/nfc/llcp_sock.c 	add_wait_queue_exclusive(sk_sleep(sk), &wait);
sk                459 net/nfc/llcp_sock.c 	while (!(new_sk = nfc_llcp_accept_dequeue(sk, newsock))) {
sk                472 net/nfc/llcp_sock.c 		release_sock(sk);
sk                474 net/nfc/llcp_sock.c 		lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
sk                477 net/nfc/llcp_sock.c 	remove_wait_queue(sk_sleep(sk), &wait);
sk                487 net/nfc/llcp_sock.c 	release_sock(sk);
sk                495 net/nfc/llcp_sock.c 	struct sock *sk = sock->sk;
sk                496 net/nfc/llcp_sock.c 	struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk);
sk                502 net/nfc/llcp_sock.c 	pr_debug("%p %d %d %d\n", sk, llcp_sock->target_idx,
sk                507 net/nfc/llcp_sock.c 	lock_sock(sk);
sk                509 net/nfc/llcp_sock.c 		release_sock(sk);
sk                521 net/nfc/llcp_sock.c 	release_sock(sk);
sk                529 net/nfc/llcp_sock.c 	struct sock *sk;
sk                535 net/nfc/llcp_sock.c 		sk = &llcp_sock->sk;
sk                537 net/nfc/llcp_sock.c 		if (sk->sk_state == LLCP_CONNECTED)
sk                547 net/nfc/llcp_sock.c 	struct sock *sk = sock->sk;
sk                550 net/nfc/llcp_sock.c 	pr_debug("%p\n", sk);
sk                554 net/nfc/llcp_sock.c 	if (sk->sk_state == LLCP_LISTEN)
sk                555 net/nfc/llcp_sock.c 		return llcp_accept_poll(sk);
sk                557 net/nfc/llcp_sock.c 	if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
sk                559 net/nfc/llcp_sock.c 			(sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
sk                561 net/nfc/llcp_sock.c 	if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
sk                564 net/nfc/llcp_sock.c 	if (sk->sk_state == LLCP_CLOSED)
sk                567 net/nfc/llcp_sock.c 	if (sk->sk_shutdown & RCV_SHUTDOWN)
sk                570 net/nfc/llcp_sock.c 	if (sk->sk_shutdown == SHUTDOWN_MASK)
sk                573 net/nfc/llcp_sock.c 	if (sock_writeable(sk) && sk->sk_state == LLCP_CONNECTED)
sk                576 net/nfc/llcp_sock.c 		sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
sk                585 net/nfc/llcp_sock.c 	struct sock *sk = sock->sk;
sk                587 net/nfc/llcp_sock.c 	struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk);
sk                590 net/nfc/llcp_sock.c 	if (!sk)
sk                593 net/nfc/llcp_sock.c 	pr_debug("%p\n", sk);
sk                601 net/nfc/llcp_sock.c 	lock_sock(sk);
sk                604 net/nfc/llcp_sock.c 	if (sk->sk_state == LLCP_CONNECTED)
sk                607 net/nfc/llcp_sock.c 	if (sk->sk_state == LLCP_LISTEN) {
sk                613 net/nfc/llcp_sock.c 			accept_sk = &lsk->sk;
sk                626 net/nfc/llcp_sock.c 	release_sock(sk);
sk                632 net/nfc/llcp_sock.c 	if (sk->sk_state == LLCP_DISCONNECTING)
sk                636 net/nfc/llcp_sock.c 		nfc_llcp_sock_unlink(&local->raw_sockets, sk);
sk                638 net/nfc/llcp_sock.c 		nfc_llcp_sock_unlink(&local->sockets, sk);
sk                641 net/nfc/llcp_sock.c 	sock_orphan(sk);
sk                642 net/nfc/llcp_sock.c 	sock_put(sk);
sk                650 net/nfc/llcp_sock.c 	struct sock *sk = sock->sk;
sk                651 net/nfc/llcp_sock.c 	struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk);
sk                657 net/nfc/llcp_sock.c 	pr_debug("sock %p sk %p flags 0x%x\n", sock, sk, flags);
sk                668 net/nfc/llcp_sock.c 	lock_sock(sk);
sk                670 net/nfc/llcp_sock.c 	if (sk->sk_state == LLCP_CONNECTED) {
sk                727 net/nfc/llcp_sock.c 	nfc_llcp_sock_link(&local->connecting_sockets, sk);
sk                733 net/nfc/llcp_sock.c 	sk->sk_state = LLCP_CONNECTING;
sk                735 net/nfc/llcp_sock.c 	ret = sock_wait_state(sk, LLCP_CONNECTED,
sk                736 net/nfc/llcp_sock.c 			      sock_sndtimeo(sk, flags & O_NONBLOCK));
sk                740 net/nfc/llcp_sock.c 	release_sock(sk);
sk                745 net/nfc/llcp_sock.c 	nfc_llcp_sock_unlink(&local->connecting_sockets, sk);
sk                754 net/nfc/llcp_sock.c 	release_sock(sk);
sk                761 net/nfc/llcp_sock.c 	struct sock *sk = sock->sk;
sk                762 net/nfc/llcp_sock.c 	struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk);
sk                765 net/nfc/llcp_sock.c 	pr_debug("sock %p sk %p", sock, sk);
sk                767 net/nfc/llcp_sock.c 	ret = sock_error(sk);
sk                774 net/nfc/llcp_sock.c 	lock_sock(sk);
sk                776 net/nfc/llcp_sock.c 	if (sk->sk_type == SOCK_DGRAM) {
sk                781 net/nfc/llcp_sock.c 			release_sock(sk);
sk                785 net/nfc/llcp_sock.c 		release_sock(sk);
sk                791 net/nfc/llcp_sock.c 	if (sk->sk_state != LLCP_CONNECTED) {
sk                792 net/nfc/llcp_sock.c 		release_sock(sk);
sk                796 net/nfc/llcp_sock.c 	release_sock(sk);
sk                805 net/nfc/llcp_sock.c 	struct sock *sk = sock->sk;
sk                810 net/nfc/llcp_sock.c 	pr_debug("%p %zu\n", sk, len);
sk                812 net/nfc/llcp_sock.c 	lock_sock(sk);
sk                814 net/nfc/llcp_sock.c 	if (sk->sk_state == LLCP_CLOSED &&
sk                815 net/nfc/llcp_sock.c 	    skb_queue_empty(&sk->sk_receive_queue)) {
sk                816 net/nfc/llcp_sock.c 		release_sock(sk);
sk                820 net/nfc/llcp_sock.c 	release_sock(sk);
sk                825 net/nfc/llcp_sock.c 	skb = skb_recv_datagram(sk, flags, noblock, &err);
sk                828 net/nfc/llcp_sock.c 		       sk->sk_state, err, sock_error(sk));
sk                830 net/nfc/llcp_sock.c 		if (sk->sk_shutdown & RCV_SHUTDOWN)
sk                842 net/nfc/llcp_sock.c 			skb_queue_head(&sk->sk_receive_queue, skb);
sk                846 net/nfc/llcp_sock.c 	sock_recv_timestamp(msg, sk, skb);
sk                848 net/nfc/llcp_sock.c 	if (sk->sk_type == SOCK_DGRAM && msg->msg_name) {
sk                868 net/nfc/llcp_sock.c 		if (sk->sk_type == SOCK_STREAM ||
sk                869 net/nfc/llcp_sock.c 		    sk->sk_type == SOCK_DGRAM ||
sk                870 net/nfc/llcp_sock.c 		    sk->sk_type == SOCK_RAW) {
sk                873 net/nfc/llcp_sock.c 				skb_queue_head(&sk->sk_receive_queue, skb);
sk                885 net/nfc/llcp_sock.c 	if (sk->sk_type == SOCK_SEQPACKET && (flags & MSG_TRUNC))
sk                931 net/nfc/llcp_sock.c static void llcp_sock_destruct(struct sock *sk)
sk                933 net/nfc/llcp_sock.c 	struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk);
sk                935 net/nfc/llcp_sock.c 	pr_debug("%p\n", sk);
sk                937 net/nfc/llcp_sock.c 	if (sk->sk_state == LLCP_CONNECTED)
sk                940 net/nfc/llcp_sock.c 	skb_queue_purge(&sk->sk_receive_queue);
sk                944 net/nfc/llcp_sock.c 	if (!sock_flag(sk, SOCK_DEAD)) {
sk                945 net/nfc/llcp_sock.c 		pr_err("Freeing alive NFC LLCP socket %p\n", sk);
sk                952 net/nfc/llcp_sock.c 	struct sock *sk;
sk                955 net/nfc/llcp_sock.c 	sk = sk_alloc(&init_net, PF_NFC, gfp, &llcp_sock_proto, kern);
sk                956 net/nfc/llcp_sock.c 	if (!sk)
sk                959 net/nfc/llcp_sock.c 	llcp_sock = nfc_llcp_sock(sk);
sk                961 net/nfc/llcp_sock.c 	sock_init_data(sock, sk);
sk                962 net/nfc/llcp_sock.c 	sk->sk_state = LLCP_CLOSED;
sk                963 net/nfc/llcp_sock.c 	sk->sk_protocol = NFC_SOCKPROTO_LLCP;
sk                964 net/nfc/llcp_sock.c 	sk->sk_type = type;
sk                965 net/nfc/llcp_sock.c 	sk->sk_destruct = llcp_sock_destruct;
sk                983 net/nfc/llcp_sock.c 	return sk;
sk               1003 net/nfc/llcp_sock.c 	struct sock *sk;
sk               1020 net/nfc/llcp_sock.c 	sk = nfc_llcp_sock_alloc(sock, sock->type, GFP_ATOMIC, kern);
sk               1021 net/nfc/llcp_sock.c 	if (sk == NULL)
sk                 28 net/nfc/nfc.h  	struct sock sk;
sk                 40 net/nfc/nfc.h  #define nfc_rawsock(sk) ((struct nfc_rawsock *) sk)
sk                 22 net/nfc/rawsock.c static void nfc_sock_link(struct nfc_sock_list *l, struct sock *sk)
sk                 25 net/nfc/rawsock.c 	sk_add_node(sk, &l->head);
sk                 29 net/nfc/rawsock.c static void nfc_sock_unlink(struct nfc_sock_list *l, struct sock *sk)
sk                 32 net/nfc/rawsock.c 	sk_del_node_init(sk);
sk                 36 net/nfc/rawsock.c static void rawsock_write_queue_purge(struct sock *sk)
sk                 38 net/nfc/rawsock.c 	pr_debug("sk=%p\n", sk);
sk                 40 net/nfc/rawsock.c 	spin_lock_bh(&sk->sk_write_queue.lock);
sk                 41 net/nfc/rawsock.c 	__skb_queue_purge(&sk->sk_write_queue);
sk                 42 net/nfc/rawsock.c 	nfc_rawsock(sk)->tx_work_scheduled = false;
sk                 43 net/nfc/rawsock.c 	spin_unlock_bh(&sk->sk_write_queue.lock);
sk                 46 net/nfc/rawsock.c static void rawsock_report_error(struct sock *sk, int err)
sk                 48 net/nfc/rawsock.c 	pr_debug("sk=%p err=%d\n", sk, err);
sk                 50 net/nfc/rawsock.c 	sk->sk_shutdown = SHUTDOWN_MASK;
sk                 51 net/nfc/rawsock.c 	sk->sk_err = -err;
sk                 52 net/nfc/rawsock.c 	sk->sk_error_report(sk);
sk                 54 net/nfc/rawsock.c 	rawsock_write_queue_purge(sk);
sk                 59 net/nfc/rawsock.c 	struct sock *sk = sock->sk;
sk                 61 net/nfc/rawsock.c 	pr_debug("sock=%p sk=%p\n", sock, sk);
sk                 63 net/nfc/rawsock.c 	if (!sk)
sk                 67 net/nfc/rawsock.c 		nfc_sock_unlink(&raw_sk_list, sk);
sk                 69 net/nfc/rawsock.c 	sock_orphan(sk);
sk                 70 net/nfc/rawsock.c 	sock_put(sk);
sk                 78 net/nfc/rawsock.c 	struct sock *sk = sock->sk;
sk                 83 net/nfc/rawsock.c 	pr_debug("sock=%p sk=%p flags=%d\n", sock, sk, flags);
sk                 92 net/nfc/rawsock.c 	lock_sock(sk);
sk                115 net/nfc/rawsock.c 	nfc_rawsock(sk)->dev = dev;
sk                116 net/nfc/rawsock.c 	nfc_rawsock(sk)->target_idx = addr->target_idx;
sk                118 net/nfc/rawsock.c 	sk->sk_state = TCP_ESTABLISHED;
sk                119 net/nfc/rawsock.c 	sk->sk_state_change(sk);
sk                121 net/nfc/rawsock.c 	release_sock(sk);
sk                127 net/nfc/rawsock.c 	release_sock(sk);
sk                141 net/nfc/rawsock.c 	struct sock *sk = (struct sock *) context;
sk                145 net/nfc/rawsock.c 	pr_debug("sk=%p err=%d\n", sk, err);
sk                154 net/nfc/rawsock.c 	err = sock_queue_rcv_skb(sk, skb);
sk                158 net/nfc/rawsock.c 	spin_lock_bh(&sk->sk_write_queue.lock);
sk                159 net/nfc/rawsock.c 	if (!skb_queue_empty(&sk->sk_write_queue))
sk                160 net/nfc/rawsock.c 		schedule_work(&nfc_rawsock(sk)->tx_work);
sk                162 net/nfc/rawsock.c 		nfc_rawsock(sk)->tx_work_scheduled = false;
sk                163 net/nfc/rawsock.c 	spin_unlock_bh(&sk->sk_write_queue.lock);
sk                165 net/nfc/rawsock.c 	sock_put(sk);
sk                172 net/nfc/rawsock.c 	rawsock_report_error(sk, err);
sk                173 net/nfc/rawsock.c 	sock_put(sk);
sk                178 net/nfc/rawsock.c 	struct sock *sk = to_rawsock_sk(work);
sk                179 net/nfc/rawsock.c 	struct nfc_dev *dev = nfc_rawsock(sk)->dev;
sk                180 net/nfc/rawsock.c 	u32 target_idx = nfc_rawsock(sk)->target_idx;
sk                184 net/nfc/rawsock.c 	pr_debug("sk=%p target_idx=%u\n", sk, target_idx);
sk                186 net/nfc/rawsock.c 	if (sk->sk_shutdown & SEND_SHUTDOWN) {
sk                187 net/nfc/rawsock.c 		rawsock_write_queue_purge(sk);
sk                191 net/nfc/rawsock.c 	skb = skb_dequeue(&sk->sk_write_queue);
sk                193 net/nfc/rawsock.c 	sock_hold(sk);
sk                195 net/nfc/rawsock.c 			       rawsock_data_exchange_complete, sk);
sk                197 net/nfc/rawsock.c 		rawsock_report_error(sk, rc);
sk                198 net/nfc/rawsock.c 		sock_put(sk);
sk                204 net/nfc/rawsock.c 	struct sock *sk = sock->sk;
sk                205 net/nfc/rawsock.c 	struct nfc_dev *dev = nfc_rawsock(sk)->dev;
sk                209 net/nfc/rawsock.c 	pr_debug("sock=%p sk=%p len=%zu\n", sock, sk, len);
sk                217 net/nfc/rawsock.c 	skb = nfc_alloc_send_skb(dev, sk, msg->msg_flags, len, &rc);
sk                227 net/nfc/rawsock.c 	spin_lock_bh(&sk->sk_write_queue.lock);
sk                228 net/nfc/rawsock.c 	__skb_queue_tail(&sk->sk_write_queue, skb);
sk                229 net/nfc/rawsock.c 	if (!nfc_rawsock(sk)->tx_work_scheduled) {
sk                230 net/nfc/rawsock.c 		schedule_work(&nfc_rawsock(sk)->tx_work);
sk                231 net/nfc/rawsock.c 		nfc_rawsock(sk)->tx_work_scheduled = true;
sk                233 net/nfc/rawsock.c 	spin_unlock_bh(&sk->sk_write_queue.lock);
sk                242 net/nfc/rawsock.c 	struct sock *sk = sock->sk;
sk                247 net/nfc/rawsock.c 	pr_debug("sock=%p sk=%p len=%zu flags=%d\n", sock, sk, len, flags);
sk                249 net/nfc/rawsock.c 	skb = skb_recv_datagram(sk, flags, noblock, &rc);
sk                261 net/nfc/rawsock.c 	skb_free_datagram(sk, skb);
sk                306 net/nfc/rawsock.c static void rawsock_destruct(struct sock *sk)
sk                308 net/nfc/rawsock.c 	pr_debug("sk=%p\n", sk);
sk                310 net/nfc/rawsock.c 	if (sk->sk_state == TCP_ESTABLISHED) {
sk                311 net/nfc/rawsock.c 		nfc_deactivate_target(nfc_rawsock(sk)->dev,
sk                312 net/nfc/rawsock.c 				      nfc_rawsock(sk)->target_idx,
sk                314 net/nfc/rawsock.c 		nfc_put_device(nfc_rawsock(sk)->dev);
sk                317 net/nfc/rawsock.c 	skb_queue_purge(&sk->sk_receive_queue);
sk                319 net/nfc/rawsock.c 	if (!sock_flag(sk, SOCK_DEAD)) {
sk                320 net/nfc/rawsock.c 		pr_err("Freeing alive NFC raw socket %p\n", sk);
sk                328 net/nfc/rawsock.c 	struct sock *sk;
sk                340 net/nfc/rawsock.c 	sk = sk_alloc(net, PF_NFC, GFP_ATOMIC, nfc_proto->proto, kern);
sk                341 net/nfc/rawsock.c 	if (!sk)
sk                344 net/nfc/rawsock.c 	sock_init_data(sock, sk);
sk                345 net/nfc/rawsock.c 	sk->sk_protocol = nfc_proto->id;
sk                346 net/nfc/rawsock.c 	sk->sk_destruct = rawsock_destruct;
sk                349 net/nfc/rawsock.c 		nfc_sock_link(&raw_sk_list, sk);
sk                351 net/nfc/rawsock.c 		INIT_WORK(&nfc_rawsock(sk)->tx_work, rawsock_tx_work);
sk                352 net/nfc/rawsock.c 		nfc_rawsock(sk)->tx_work_scheduled = false;
sk                362 net/nfc/rawsock.c 	struct sock *sk;
sk                367 net/nfc/rawsock.c 	sk_for_each(sk, &raw_sk_list.head) {
sk                385 net/nfc/rawsock.c 		if (sock_queue_rcv_skb(sk, nskb))
sk                741 net/openvswitch/actions.c static int ovs_vport_output(struct net *net, struct sock *sk, struct sk_buff *skb)
sk                844 net/openvswitch/actions.c 		ip_do_fragment(net, skb->sk, skb, ovs_vport_output);
sk                865 net/openvswitch/actions.c 		v6ops->fragment(net, skb->sk, skb, ovs_vport_output);
sk               2122 net/openvswitch/conntrack.c 	struct ovs_net *ovs_net = net_generic(sock_net(skb->sk), ovs_net_id);
sk               2156 net/openvswitch/conntrack.c 	struct ovs_net *ovs_net = net_generic(sock_net(skb->sk), ovs_net_id);
sk               2189 net/openvswitch/conntrack.c 	struct net *net = sock_net(skb->sk);
sk                536 net/openvswitch/datapath.c 	struct net *net = sock_net(skb->sk);
sk                898 net/openvswitch/datapath.c 	struct net *net = sock_net(skb->sk);
sk               1137 net/openvswitch/datapath.c 	struct net *net = sock_net(skb->sk);
sk               1241 net/openvswitch/datapath.c 	struct net *net = sock_net(skb->sk);
sk               1267 net/openvswitch/datapath.c 	dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
sk               1300 net/openvswitch/datapath.c 	struct net *net = sock_net(skb->sk);
sk               1322 net/openvswitch/datapath.c 	dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
sk               1363 net/openvswitch/datapath.c 			netlink_set_err(sock_net(skb->sk)->genl_sock, 0, 0, PTR_ERR(reply));
sk               1391 net/openvswitch/datapath.c 	dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex);
sk               1551 net/openvswitch/datapath.c 	dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
sk               1612 net/openvswitch/datapath.c 	ovs_dp_set_net(dp, sock_net(skb->sk));
sk               1738 net/openvswitch/datapath.c 	dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
sk               1771 net/openvswitch/datapath.c 	dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
sk               1806 net/openvswitch/datapath.c 	dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
sk               1826 net/openvswitch/datapath.c 	struct ovs_net *ovs_net = net_generic(sock_net(skb->sk), ovs_net_id);
sk               2065 net/openvswitch/datapath.c 	dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
sk               2137 net/openvswitch/datapath.c 	vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
sk               2193 net/openvswitch/datapath.c 	vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
sk               2246 net/openvswitch/datapath.c 	vport = lookup_vport(sock_net(skb->sk), ovs_header, a);
sk               2272 net/openvswitch/datapath.c 	dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex);
sk               2284 net/openvswitch/datapath.c 						    sock_net(skb->sk),
sk                296 net/openvswitch/meter.c 	dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
sk                360 net/openvswitch/meter.c 	dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
sk                412 net/openvswitch/meter.c 	dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
sk                164 net/packet/af_packet.c static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
sk                205 net/packet/af_packet.c static void packet_flush_mclist(struct sock *sk);
sk                235 net/packet/af_packet.c static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
sk                236 net/packet/af_packet.c static void __fanout_link(struct sock *sk, struct packet_sock *po);
sk                297 net/packet/af_packet.c static void __register_prot_hook(struct sock *sk)
sk                299 net/packet/af_packet.c 	struct packet_sock *po = pkt_sk(sk);
sk                303 net/packet/af_packet.c 			__fanout_link(sk, po);
sk                307 net/packet/af_packet.c 		sock_hold(sk);
sk                312 net/packet/af_packet.c static void register_prot_hook(struct sock *sk)
sk                314 net/packet/af_packet.c 	lockdep_assert_held_once(&pkt_sk(sk)->bind_lock);
sk                315 net/packet/af_packet.c 	__register_prot_hook(sk);
sk                324 net/packet/af_packet.c static void __unregister_prot_hook(struct sock *sk, bool sync)
sk                326 net/packet/af_packet.c 	struct packet_sock *po = pkt_sk(sk);
sk                333 net/packet/af_packet.c 		__fanout_unlink(sk, po);
sk                337 net/packet/af_packet.c 	__sock_put(sk);
sk                346 net/packet/af_packet.c static void unregister_prot_hook(struct sock *sk, bool sync)
sk                348 net/packet/af_packet.c 	struct packet_sock *po = pkt_sk(sk);
sk                351 net/packet/af_packet.c 		__unregister_prot_hook(sk, sync);
sk                528 net/packet/af_packet.c 	dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
sk                643 net/packet/af_packet.c 	spin_lock(&po->sk.sk_receive_queue.lock);
sk                706 net/packet/af_packet.c 	spin_unlock(&po->sk.sk_receive_queue.lock);
sk                760 net/packet/af_packet.c 	struct sock *sk = &po->sk;
sk                789 net/packet/af_packet.c 	sk->sk_data_ready(sk);
sk               1231 net/packet/af_packet.c 	const struct sock *sk = &po->sk;
sk               1235 net/packet/af_packet.c 		int rcvbuf = READ_ONCE(sk->sk_rcvbuf);
sk               1236 net/packet/af_packet.c 		int avail = rcvbuf - atomic_read(&sk->sk_rmem_alloc)
sk               1282 net/packet/af_packet.c static void packet_sock_destruct(struct sock *sk)
sk               1284 net/packet/af_packet.c 	skb_queue_purge(&sk->sk_error_queue);
sk               1286 net/packet/af_packet.c 	WARN_ON(atomic_read(&sk->sk_rmem_alloc));
sk               1287 net/packet/af_packet.c 	WARN_ON(refcount_read(&sk->sk_wmem_alloc));
sk               1289 net/packet/af_packet.c 	if (!sock_flag(sk, SOCK_DEAD)) {
sk               1290 net/packet/af_packet.c 		pr_err("Attempt to release alive packet socket: %p\n", sk);
sk               1294 net/packet/af_packet.c 	sk_refcnt_debug_dec(sk);
sk               1471 net/packet/af_packet.c static void __fanout_link(struct sock *sk, struct packet_sock *po)
sk               1476 net/packet/af_packet.c 	f->arr[f->num_members] = sk;
sk               1484 net/packet/af_packet.c static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
sk               1491 net/packet/af_packet.c 		if (f->arr[i] == sk)
sk               1502 net/packet/af_packet.c static bool match_fanout_group(struct packet_type *ptype, struct sock *sk)
sk               1504 net/packet/af_packet.c 	if (sk->sk_family != PF_PACKET)
sk               1507 net/packet/af_packet.c 	return ptype->af_packet_priv == pkt_sk(sk)->fanout;
sk               1545 net/packet/af_packet.c 	if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
sk               1566 net/packet/af_packet.c 	if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
sk               1603 net/packet/af_packet.c static bool __fanout_id_is_free(struct sock *sk, u16 candidate_id)
sk               1609 net/packet/af_packet.c 		    read_pnet(&f->net) == sock_net(sk)) {
sk               1616 net/packet/af_packet.c static bool fanout_find_new_id(struct sock *sk, u16 *new_id)
sk               1621 net/packet/af_packet.c 		if (__fanout_id_is_free(sk, id)) {
sk               1633 net/packet/af_packet.c static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
sk               1636 net/packet/af_packet.c 	struct packet_sock *po = pkt_sk(sk);
sk               1680 net/packet/af_packet.c 		if (!fanout_find_new_id(sk, &id)) {
sk               1691 net/packet/af_packet.c 		    read_pnet(&f->net) == sock_net(sk)) {
sk               1704 net/packet/af_packet.c 		write_pnet(&match->net, sock_net(sk));
sk               1733 net/packet/af_packet.c 			__fanout_link(sk, po);
sk               1755 net/packet/af_packet.c static struct packet_fanout *fanout_release(struct sock *sk)
sk               1757 net/packet/af_packet.c 	struct packet_sock *po = pkt_sk(sk);
sk               1796 net/packet/af_packet.c 	struct sock *sk;
sk               1804 net/packet/af_packet.c 	sk = pt->af_packet_priv;
sk               1820 net/packet/af_packet.c 	if (!net_eq(dev_net(dev), sock_net(sk)))
sk               1850 net/packet/af_packet.c 	if (sock_queue_rcv_skb(sk, skb) == 0)
sk               1878 net/packet/af_packet.c 	struct sock *sk = sock->sk;
sk               1906 net/packet/af_packet.c 	dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
sk               1920 net/packet/af_packet.c 	if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
sk               1938 net/packet/af_packet.c 		skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL);
sk               1971 net/packet/af_packet.c 	sockcm_init(&sockc, sk);
sk               1973 net/packet/af_packet.c 		err = sock_cmsg_send(sk, msg, &sockc);
sk               1980 net/packet/af_packet.c 	skb->priority = sk->sk_priority;
sk               1981 net/packet/af_packet.c 	skb->mark = sk->sk_mark;
sk               2003 net/packet/af_packet.c 			       const struct sock *sk,
sk               2009 net/packet/af_packet.c 	filter = rcu_dereference(sk->sk_filter);
sk               2047 net/packet/af_packet.c 	struct sock *sk;
sk               2058 net/packet/af_packet.c 	sk = pt->af_packet_priv;
sk               2059 net/packet/af_packet.c 	po = pkt_sk(sk);
sk               2061 net/packet/af_packet.c 	if (!net_eq(dev_net(dev), sock_net(sk)))
sk               2074 net/packet/af_packet.c 		if (sk->sk_type != SOCK_DGRAM)
sk               2084 net/packet/af_packet.c 	res = run_filter(skb, sk, snaplen);
sk               2090 net/packet/af_packet.c 	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
sk               2126 net/packet/af_packet.c 	skb_set_owner_r(skb, sk);
sk               2133 net/packet/af_packet.c 	spin_lock(&sk->sk_receive_queue.lock);
sk               2135 net/packet/af_packet.c 	sock_skb_set_dropcount(sk, skb);
sk               2136 net/packet/af_packet.c 	__skb_queue_tail(&sk->sk_receive_queue, skb);
sk               2137 net/packet/af_packet.c 	spin_unlock(&sk->sk_receive_queue.lock);
sk               2138 net/packet/af_packet.c 	sk->sk_data_ready(sk);
sk               2144 net/packet/af_packet.c 	atomic_inc(&sk->sk_drops);
sk               2162 net/packet/af_packet.c 	struct sock *sk;
sk               2188 net/packet/af_packet.c 	sk = pt->af_packet_priv;
sk               2189 net/packet/af_packet.c 	po = pkt_sk(sk);
sk               2191 net/packet/af_packet.c 	if (!net_eq(dev_net(dev), sock_net(sk)))
sk               2195 net/packet/af_packet.c 		if (sk->sk_type != SOCK_DGRAM)
sk               2205 net/packet/af_packet.c 	res = run_filter(skb, sk, snaplen);
sk               2225 net/packet/af_packet.c 	if (sk->sk_type == SOCK_DGRAM) {
sk               2242 net/packet/af_packet.c 			    atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
sk               2250 net/packet/af_packet.c 					skb_set_owner_r(copy_skb, sk);
sk               2272 net/packet/af_packet.c 	spin_lock(&sk->sk_receive_queue.lock);
sk               2306 net/packet/af_packet.c 		__skb_queue_tail(&sk->sk_receive_queue, copy_skb);
sk               2308 net/packet/af_packet.c 	spin_unlock(&sk->sk_receive_queue.lock);
sk               2390 net/packet/af_packet.c 		spin_lock(&sk->sk_receive_queue.lock);
sk               2393 net/packet/af_packet.c 		spin_unlock(&sk->sk_receive_queue.lock);
sk               2394 net/packet/af_packet.c 		sk->sk_data_ready(sk);
sk               2412 net/packet/af_packet.c 	spin_unlock(&sk->sk_receive_queue.lock);
sk               2416 net/packet/af_packet.c 	sk->sk_data_ready(sk);
sk               2423 net/packet/af_packet.c 	struct packet_sock *po = pkt_sk(skb->sk);
sk               2478 net/packet/af_packet.c 	struct socket *sock = po->sk.sk_socket;
sk               2486 net/packet/af_packet.c 	skb->priority = po->sk.sk_priority;
sk               2487 net/packet/af_packet.c 	skb->mark = po->sk.sk_mark;
sk               2524 net/packet/af_packet.c 	refcount_add(to_write, &po->sk.sk_wmem_alloc);
sk               2584 net/packet/af_packet.c 		if (po->sk.sk_type == SOCK_DGRAM) {
sk               2659 net/packet/af_packet.c 		dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
sk               2660 net/packet/af_packet.c 		if (po->sk.sk_socket->type == SOCK_DGRAM) {
sk               2675 net/packet/af_packet.c 	sockcm_init(&sockc, &po->sk);
sk               2677 net/packet/af_packet.c 		err = sock_cmsg_send(&po->sk, msg, &sockc);
sk               2682 net/packet/af_packet.c 	if (po->sk.sk_socket->type == SOCK_RAW)
sk               2697 net/packet/af_packet.c 				timeo = sock_sndtimeo(&po->sk, msg->msg_flags & MSG_DONTWAIT);
sk               2729 net/packet/af_packet.c 		skb = sock_alloc_send_skb(&po->sk,
sk               2815 net/packet/af_packet.c static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
sk               2826 net/packet/af_packet.c 	skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
sk               2841 net/packet/af_packet.c 	struct sock *sk = sock->sk;
sk               2851 net/packet/af_packet.c 	struct packet_sock *po = pkt_sk(sk);
sk               2870 net/packet/af_packet.c 		dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
sk               2886 net/packet/af_packet.c 	sockcm_init(&sockc, sk);
sk               2887 net/packet/af_packet.c 	sockc.mark = sk->sk_mark;
sk               2889 net/packet/af_packet.c 		err = sock_cmsg_send(sk, msg, &sockc);
sk               2903 net/packet/af_packet.c 	if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
sk               2921 net/packet/af_packet.c 	skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, linear,
sk               2961 net/packet/af_packet.c 	skb->priority = sk->sk_priority;
sk               2997 net/packet/af_packet.c 	struct sock *sk = sock->sk;
sk               2998 net/packet/af_packet.c 	struct packet_sock *po = pkt_sk(sk);
sk               3013 net/packet/af_packet.c 	struct sock *sk = sock->sk;
sk               3019 net/packet/af_packet.c 	if (!sk)
sk               3022 net/packet/af_packet.c 	net = sock_net(sk);
sk               3023 net/packet/af_packet.c 	po = pkt_sk(sk);
sk               3026 net/packet/af_packet.c 	sk_del_node_init_rcu(sk);
sk               3030 net/packet/af_packet.c 	sock_prot_inuse_add(net, sk->sk_prot, -1);
sk               3034 net/packet/af_packet.c 	unregister_prot_hook(sk, false);
sk               3043 net/packet/af_packet.c 	packet_flush_mclist(sk);
sk               3045 net/packet/af_packet.c 	lock_sock(sk);
sk               3048 net/packet/af_packet.c 		packet_set_ring(sk, &req_u, 1, 0);
sk               3053 net/packet/af_packet.c 		packet_set_ring(sk, &req_u, 1, 1);
sk               3055 net/packet/af_packet.c 	release_sock(sk);
sk               3057 net/packet/af_packet.c 	f = fanout_release(sk);
sk               3069 net/packet/af_packet.c 	sock_orphan(sk);
sk               3070 net/packet/af_packet.c 	sock->sk = NULL;
sk               3074 net/packet/af_packet.c 	skb_queue_purge(&sk->sk_receive_queue);
sk               3076 net/packet/af_packet.c 	sk_refcnt_debug_release(sk);
sk               3078 net/packet/af_packet.c 	sock_put(sk);
sk               3086 net/packet/af_packet.c static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
sk               3089 net/packet/af_packet.c 	struct packet_sock *po = pkt_sk(sk);
sk               3097 net/packet/af_packet.c 	lock_sock(sk);
sk               3107 net/packet/af_packet.c 		dev = dev_get_by_name_rcu(sock_net(sk), name);
sk               3113 net/packet/af_packet.c 		dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
sk               3135 net/packet/af_packet.c 			__unregister_prot_hook(sk, true);
sk               3139 net/packet/af_packet.c 				unlisted = !dev_get_by_index_rcu(sock_net(sk),
sk               3165 net/packet/af_packet.c 		register_prot_hook(sk);
sk               3167 net/packet/af_packet.c 		sk->sk_err = ENETDOWN;
sk               3168 net/packet/af_packet.c 		if (!sock_flag(sk, SOCK_DEAD))
sk               3169 net/packet/af_packet.c 			sk->sk_error_report(sk);
sk               3175 net/packet/af_packet.c 	release_sock(sk);
sk               3186 net/packet/af_packet.c 	struct sock *sk = sock->sk;
sk               3201 net/packet/af_packet.c 	return packet_do_bind(sk, name, 0, pkt_sk(sk)->num);
sk               3207 net/packet/af_packet.c 	struct sock *sk = sock->sk;
sk               3218 net/packet/af_packet.c 	return packet_do_bind(sk, NULL, sll->sll_ifindex,
sk               3219 net/packet/af_packet.c 			      sll->sll_protocol ? : pkt_sk(sk)->num);
sk               3235 net/packet/af_packet.c 	struct sock *sk;
sk               3249 net/packet/af_packet.c 	sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto, kern);
sk               3250 net/packet/af_packet.c 	if (sk == NULL)
sk               3257 net/packet/af_packet.c 	sock_init_data(sock, sk);
sk               3259 net/packet/af_packet.c 	po = pkt_sk(sk);
sk               3261 net/packet/af_packet.c 	sk->sk_family = PF_PACKET;
sk               3271 net/packet/af_packet.c 	sk->sk_destruct = packet_sock_destruct;
sk               3272 net/packet/af_packet.c 	sk_refcnt_debug_inc(sk);
sk               3286 net/packet/af_packet.c 	po->prot_hook.af_packet_priv = sk;
sk               3290 net/packet/af_packet.c 		__register_prot_hook(sk);
sk               3294 net/packet/af_packet.c 	sk_add_node_tail_rcu(sk, &net->packet.sklist);
sk               3303 net/packet/af_packet.c 	sk_free(sk);
sk               3316 net/packet/af_packet.c 	struct sock *sk = sock->sk;
sk               3328 net/packet/af_packet.c 	if (pkt_sk(sk)->ifindex < 0)
sk               3333 net/packet/af_packet.c 		err = sock_recv_errqueue(sk, msg, len,
sk               3347 net/packet/af_packet.c 	skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
sk               3358 net/packet/af_packet.c 	packet_rcv_try_clear_pressure(pkt_sk(sk));
sk               3360 net/packet/af_packet.c 	if (pkt_sk(sk)->has_vnet_hdr) {
sk               3390 net/packet/af_packet.c 	sock_recv_ts_and_drops(msg, sk, skb);
sk               3418 net/packet/af_packet.c 	if (pkt_sk(sk)->auxdata) {
sk               3451 net/packet/af_packet.c 	skb_free_datagram(sk, skb);
sk               3460 net/packet/af_packet.c 	struct sock *sk	= sock->sk;
sk               3468 net/packet/af_packet.c 	dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
sk               3480 net/packet/af_packet.c 	struct sock *sk = sock->sk;
sk               3481 net/packet/af_packet.c 	struct packet_sock *po = pkt_sk(sk);
sk               3492 net/packet/af_packet.c 	dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
sk               3551 net/packet/af_packet.c static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
sk               3553 net/packet/af_packet.c 	struct packet_sock *po = pkt_sk(sk);
sk               3561 net/packet/af_packet.c 	dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
sk               3606 net/packet/af_packet.c static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
sk               3612 net/packet/af_packet.c 	for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
sk               3620 net/packet/af_packet.c 				dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
sk               3632 net/packet/af_packet.c static void packet_flush_mclist(struct sock *sk)
sk               3634 net/packet/af_packet.c 	struct packet_sock *po = pkt_sk(sk);
sk               3645 net/packet/af_packet.c 		dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
sk               3656 net/packet/af_packet.c 	struct sock *sk = sock->sk;
sk               3657 net/packet/af_packet.c 	struct packet_sock *po = pkt_sk(sk);
sk               3679 net/packet/af_packet.c 			ret = packet_mc_add(sk, &mreq);
sk               3681 net/packet/af_packet.c 			ret = packet_mc_drop(sk, &mreq);
sk               3691 net/packet/af_packet.c 		lock_sock(sk);
sk               3708 net/packet/af_packet.c 				ret = packet_set_ring(sk, &req_u, 0,
sk               3711 net/packet/af_packet.c 		release_sock(sk);
sk               3723 net/packet/af_packet.c 		pkt_sk(sk)->copy_thresh = val;
sk               3742 net/packet/af_packet.c 		lock_sock(sk);
sk               3749 net/packet/af_packet.c 		release_sock(sk);
sk               3762 net/packet/af_packet.c 		lock_sock(sk);
sk               3769 net/packet/af_packet.c 		release_sock(sk);
sk               3781 net/packet/af_packet.c 		lock_sock(sk);
sk               3788 net/packet/af_packet.c 		release_sock(sk);
sk               3800 net/packet/af_packet.c 		lock_sock(sk);
sk               3802 net/packet/af_packet.c 		release_sock(sk);
sk               3814 net/packet/af_packet.c 		lock_sock(sk);
sk               3816 net/packet/af_packet.c 		release_sock(sk);
sk               3830 net/packet/af_packet.c 		lock_sock(sk);
sk               3837 net/packet/af_packet.c 		release_sock(sk);
sk               3861 net/packet/af_packet.c 		return fanout_add(sk, val & 0xffff, val >> 16);
sk               3893 net/packet/af_packet.c 		lock_sock(sk);
sk               3900 net/packet/af_packet.c 		release_sock(sk);
sk               3925 net/packet/af_packet.c 	struct sock *sk = sock->sk;
sk               3926 net/packet/af_packet.c 	struct packet_sock *po = pkt_sk(sk);
sk               3943 net/packet/af_packet.c 		spin_lock_bh(&sk->sk_receive_queue.lock);
sk               3946 net/packet/af_packet.c 		spin_unlock_bh(&sk->sk_receive_queue.lock);
sk               4047 net/packet/af_packet.c 	struct packet_sock *po = pkt_sk(sock->sk);
sk               4067 net/packet/af_packet.c 	struct sock *sk;
sk               4072 net/packet/af_packet.c 	sk_for_each_rcu(sk, &net->packet.sklist) {
sk               4073 net/packet/af_packet.c 		struct packet_sock *po = pkt_sk(sk);
sk               4085 net/packet/af_packet.c 					__unregister_prot_hook(sk, false);
sk               4086 net/packet/af_packet.c 					sk->sk_err = ENETDOWN;
sk               4087 net/packet/af_packet.c 					if (!sock_flag(sk, SOCK_DEAD))
sk               4088 net/packet/af_packet.c 						sk->sk_error_report(sk);
sk               4104 net/packet/af_packet.c 					register_prot_hook(sk);
sk               4118 net/packet/af_packet.c 	struct sock *sk = sock->sk;
sk               4123 net/packet/af_packet.c 		int amount = sk_wmem_alloc_get(sk);
sk               4132 net/packet/af_packet.c 		spin_lock_bh(&sk->sk_receive_queue.lock);
sk               4133 net/packet/af_packet.c 		skb = skb_peek(&sk->sk_receive_queue);
sk               4136 net/packet/af_packet.c 		spin_unlock_bh(&sk->sk_receive_queue.lock);
sk               4166 net/packet/af_packet.c 	struct sock *sk = sock->sk;
sk               4167 net/packet/af_packet.c 	struct packet_sock *po = pkt_sk(sk);
sk               4170 net/packet/af_packet.c 	spin_lock_bh(&sk->sk_receive_queue.lock);
sk               4177 net/packet/af_packet.c 	spin_unlock_bh(&sk->sk_receive_queue.lock);
sk               4178 net/packet/af_packet.c 	spin_lock_bh(&sk->sk_write_queue.lock);
sk               4183 net/packet/af_packet.c 	spin_unlock_bh(&sk->sk_write_queue.lock);
sk               4196 net/packet/af_packet.c 	struct sock *sk = sock->sk;
sk               4198 net/packet/af_packet.c 	if (sk)
sk               4199 net/packet/af_packet.c 		atomic_inc(&pkt_sk(sk)->mapped);
sk               4206 net/packet/af_packet.c 	struct sock *sk = sock->sk;
sk               4208 net/packet/af_packet.c 	if (sk)
sk               4209 net/packet/af_packet.c 		atomic_dec(&pkt_sk(sk)->mapped);
sk               4285 net/packet/af_packet.c static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
sk               4289 net/packet/af_packet.c 	struct packet_sock *po = pkt_sk(sk);
sk               4300 net/packet/af_packet.c 	rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
sk               4399 net/packet/af_packet.c 		__unregister_prot_hook(sk, false);
sk               4434 net/packet/af_packet.c 		register_prot_hook(sk);
sk               4454 net/packet/af_packet.c 	struct sock *sk = sock->sk;
sk               4455 net/packet/af_packet.c 	struct packet_sock *po = pkt_sk(sk);
sk                129 net/packet/diag.c static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
sk                137 net/packet/diag.c 	struct packet_sock *po = pkt_sk(sk);
sk                145 net/packet/diag.c 	rp->pdiag_type = sk->sk_type;
sk                148 net/packet/diag.c 	sock_diag_save_cookie(sk, rp->pdiag_cookie);
sk                156 net/packet/diag.c 			from_kuid_munged(user_ns, sock_i_uid(sk))))
sk                172 net/packet/diag.c 	    sock_diag_put_meminfo(sk, skb, PACKET_DIAG_MEMINFO))
sk                176 net/packet/diag.c 	    sock_diag_put_filterinfo(may_report_filterinfo, sk, skb,
sk                193 net/packet/diag.c 	struct sock *sk;
sk                196 net/packet/diag.c 	net = sock_net(skb->sk);
sk                201 net/packet/diag.c 	sk_for_each(sk, &net->packet.sklist) {
sk                202 net/packet/diag.c 		if (!net_eq(sock_net(sk), net))
sk                207 net/packet/diag.c 		if (sk_diag_fill(sk, skb, req,
sk                209 net/packet/diag.c 				 sk_user_ns(NETLINK_CB(cb->skb).sk),
sk                212 net/packet/diag.c 				 sock_i_ino(sk)) < 0)
sk                227 net/packet/diag.c 	struct net *net = sock_net(skb->sk);
sk                110 net/packet/internal.h 	struct sock		sk;
sk                141 net/packet/internal.h static struct packet_sock *pkt_sk(struct sock *sk)
sk                143 net/packet/internal.h 	return (struct packet_sock *)sk;
sk                 53 net/phonet/af_phonet.c 	struct sock *sk;
sk                 87 net/phonet/af_phonet.c 	sk = sk_alloc(net, PF_PHONET, GFP_KERNEL, pnp->prot, kern);
sk                 88 net/phonet/af_phonet.c 	if (sk == NULL) {
sk                 93 net/phonet/af_phonet.c 	sock_init_data(sock, sk);
sk                 96 net/phonet/af_phonet.c 	sk->sk_backlog_rcv = sk->sk_prot->backlog_rcv;
sk                 97 net/phonet/af_phonet.c 	sk->sk_protocol = protocol;
sk                 98 net/phonet/af_phonet.c 	pn = pn_sk(sk);
sk                102 net/phonet/af_phonet.c 	sk->sk_prot->init(sk);
sk                224 net/phonet/af_phonet.c int pn_skb_send(struct sock *sk, struct sk_buff *skb,
sk                227 net/phonet/af_phonet.c 	struct net *net = sock_net(sk);
sk                229 net/phonet/af_phonet.c 	struct pn_sock *pn = pn_sk(sk);
sk                245 net/phonet/af_phonet.c 	if (sk->sk_bound_dev_if)
sk                246 net/phonet/af_phonet.c 		dev = dev_get_by_index(net, sk->sk_bound_dev_if);
sk                252 net/phonet/af_phonet.c 		struct sock *sk = pn_find_sock_by_res(net, res);
sk                253 net/phonet/af_phonet.c 		if (sk)	{
sk                254 net/phonet/af_phonet.c 			sock_put(sk);
sk                395 net/phonet/af_phonet.c 		struct sock *sk = pn_find_sock_by_res(net, sa.spn_resource);
sk                396 net/phonet/af_phonet.c 		if (sk)
sk                397 net/phonet/af_phonet.c 			return sk_receive_skb(sk, skb, 0);
sk                403 net/phonet/af_phonet.c 		struct sock *sk = pn_find_sock_by_sa(net, &sa);
sk                405 net/phonet/af_phonet.c 		if (sk)
sk                406 net/phonet/af_phonet.c 			return sk_receive_skb(sk, skb, 0);
sk                 23 net/phonet/datagram.c static int pn_backlog_rcv(struct sock *sk, struct sk_buff *skb);
sk                 26 net/phonet/datagram.c static void pn_sock_close(struct sock *sk, long timeout)
sk                 28 net/phonet/datagram.c 	sk_common_release(sk);
sk                 31 net/phonet/datagram.c static int pn_ioctl(struct sock *sk, int cmd, unsigned long arg)
sk                 38 net/phonet/datagram.c 		lock_sock(sk);
sk                 39 net/phonet/datagram.c 		skb = skb_peek(&sk->sk_receive_queue);
sk                 41 net/phonet/datagram.c 		release_sock(sk);
sk                 52 net/phonet/datagram.c 				return pn_sock_bind_res(sk, res);
sk                 54 net/phonet/datagram.c 				return pn_sock_unbind_res(sk, res);
sk                 62 net/phonet/datagram.c static void pn_destruct(struct sock *sk)
sk                 64 net/phonet/datagram.c 	skb_queue_purge(&sk->sk_receive_queue);
sk                 67 net/phonet/datagram.c static int pn_init(struct sock *sk)
sk                 69 net/phonet/datagram.c 	sk->sk_destruct = pn_destruct;
sk                 73 net/phonet/datagram.c static int pn_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
sk                 92 net/phonet/datagram.c 	skb = sock_alloc_send_skb(sk, MAX_PHONET_HEADER + len,
sk                108 net/phonet/datagram.c 	err = pn_skb_send(sk, skb, target);
sk                114 net/phonet/datagram.c static int pn_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
sk                126 net/phonet/datagram.c 	skb = skb_recv_datagram(sk, flags, noblock, &rval);
sk                153 net/phonet/datagram.c 	skb_free_datagram(sk, skb);
sk                160 net/phonet/datagram.c static int pn_backlog_rcv(struct sock *sk, struct sk_buff *skb)
sk                162 net/phonet/datagram.c 	int err = sock_queue_rcv_skb(sk, skb);
sk                 25 net/phonet/pep-gprs.c 	struct sock		*sk;
sk                 55 net/phonet/pep-gprs.c 	if (pep_writeable(gp->sk))
sk                 63 net/phonet/pep-gprs.c static void gprs_state_change(struct sock *sk)
sk                 65 net/phonet/pep-gprs.c 	struct gprs_dev *gp = sk->sk_user_data;
sk                 67 net/phonet/pep-gprs.c 	if (sk->sk_state == TCP_CLOSE_WAIT) {
sk                136 net/phonet/pep-gprs.c static void gprs_data_ready(struct sock *sk)
sk                138 net/phonet/pep-gprs.c 	struct gprs_dev *gp = sk->sk_user_data;
sk                141 net/phonet/pep-gprs.c 	while ((skb = pep_read(sk)) != NULL) {
sk                147 net/phonet/pep-gprs.c static void gprs_write_space(struct sock *sk)
sk                149 net/phonet/pep-gprs.c 	struct gprs_dev *gp = sk->sk_user_data;
sk                176 net/phonet/pep-gprs.c 	struct sock *sk = gp->sk;
sk                189 net/phonet/pep-gprs.c 	skb_set_owner_w(skb, sk);
sk                191 net/phonet/pep-gprs.c 	err = pep_write(sk, skb);
sk                202 net/phonet/pep-gprs.c 	if (pep_writeable(sk))
sk                237 net/phonet/pep-gprs.c int gprs_attach(struct sock *sk)
sk                244 net/phonet/pep-gprs.c 	if (unlikely(sk->sk_type == SOCK_STREAM))
sk                252 net/phonet/pep-gprs.c 	gp->sk = sk;
sk                262 net/phonet/pep-gprs.c 	lock_sock(sk);
sk                263 net/phonet/pep-gprs.c 	if (unlikely(sk->sk_user_data)) {
sk                267 net/phonet/pep-gprs.c 	if (unlikely((1 << sk->sk_state & (TCPF_CLOSE|TCPF_LISTEN)) ||
sk                268 net/phonet/pep-gprs.c 			sock_flag(sk, SOCK_DEAD))) {
sk                272 net/phonet/pep-gprs.c 	sk->sk_user_data	= gp;
sk                273 net/phonet/pep-gprs.c 	gp->old_state_change	= sk->sk_state_change;
sk                274 net/phonet/pep-gprs.c 	gp->old_data_ready	= sk->sk_data_ready;
sk                275 net/phonet/pep-gprs.c 	gp->old_write_space	= sk->sk_write_space;
sk                276 net/phonet/pep-gprs.c 	sk->sk_state_change	= gprs_state_change;
sk                277 net/phonet/pep-gprs.c 	sk->sk_data_ready	= gprs_data_ready;
sk                278 net/phonet/pep-gprs.c 	sk->sk_write_space	= gprs_write_space;
sk                279 net/phonet/pep-gprs.c 	release_sock(sk);
sk                280 net/phonet/pep-gprs.c 	sock_hold(sk);
sk                286 net/phonet/pep-gprs.c 	release_sock(sk);
sk                291 net/phonet/pep-gprs.c void gprs_detach(struct sock *sk)
sk                293 net/phonet/pep-gprs.c 	struct gprs_dev *gp = sk->sk_user_data;
sk                296 net/phonet/pep-gprs.c 	lock_sock(sk);
sk                297 net/phonet/pep-gprs.c 	sk->sk_user_data	= NULL;
sk                298 net/phonet/pep-gprs.c 	sk->sk_state_change	= gp->old_state_change;
sk                299 net/phonet/pep-gprs.c 	sk->sk_data_ready	= gp->old_data_ready;
sk                300 net/phonet/pep-gprs.c 	sk->sk_write_space	= gp->old_write_space;
sk                301 net/phonet/pep-gprs.c 	release_sock(sk);
sk                305 net/phonet/pep-gprs.c 	sock_put(sk);
sk                 69 net/phonet/pep.c static struct sk_buff *pep_alloc_skb(struct sock *sk, const void *payload,
sk                 75 net/phonet/pep.c 	skb_set_owner_w(skb, sk);
sk                 85 net/phonet/pep.c static int pep_reply(struct sock *sk, struct sk_buff *oskb, u8 code,
sk                 93 net/phonet/pep.c 	skb = pep_alloc_skb(sk, data, len, priority);
sk                104 net/phonet/pep.c 	return pn_skb_send(sk, skb, &peer);
sk                107 net/phonet/pep.c static int pep_indicate(struct sock *sk, u8 id, u8 code,
sk                110 net/phonet/pep.c 	struct pep_sock *pn = pep_sk(sk);
sk                114 net/phonet/pep.c 	skb = pep_alloc_skb(sk, data, len, priority);
sk                123 net/phonet/pep.c 	return pn_skb_send(sk, skb, NULL);
sk                128 net/phonet/pep.c static int pipe_handler_request(struct sock *sk, u8 id, u8 code,
sk                131 net/phonet/pep.c 	struct pep_sock *pn = pep_sk(sk);
sk                135 net/phonet/pep.c 	skb = pep_alloc_skb(sk, data, len, GFP_KERNEL);
sk                144 net/phonet/pep.c 	return pn_skb_send(sk, skb, NULL);
sk                147 net/phonet/pep.c static int pipe_handler_send_created_ind(struct sock *sk)
sk                149 net/phonet/pep.c 	struct pep_sock *pn = pep_sk(sk);
sk                155 net/phonet/pep.c 	return pep_indicate(sk, PNS_PIPE_CREATED_IND, 1 /* sub-blocks */,
sk                159 net/phonet/pep.c static int pep_accept_conn(struct sock *sk, struct sk_buff *skb)
sk                176 net/phonet/pep.c 	return pep_reply(sk, skb, PN_PIPE_NO_ERROR, data, sizeof(data),
sk                180 net/phonet/pep.c static int pep_reject_conn(struct sock *sk, struct sk_buff *skb, u8 code,
sk                185 net/phonet/pep.c 	return pep_reply(sk, skb, code, data, sizeof(data), priority);
sk                190 net/phonet/pep.c static int pep_ctrlreq_error(struct sock *sk, struct sk_buff *oskb, u8 code,
sk                203 net/phonet/pep.c 	skb = pep_alloc_skb(sk, data, 4, priority);
sk                214 net/phonet/pep.c 	return pn_skb_send(sk, skb, &dst);
sk                217 net/phonet/pep.c static int pipe_snd_status(struct sock *sk, u8 type, u8 status, gfp_t priority)
sk                221 net/phonet/pep.c 	return pep_indicate(sk, PNS_PEP_STATUS_IND, PN_PEP_TYPE_COMMON,
sk                227 net/phonet/pep.c static void pipe_grant_credits(struct sock *sk, gfp_t priority)
sk                229 net/phonet/pep.c 	struct pep_sock *pn = pep_sk(sk);
sk                231 net/phonet/pep.c 	BUG_ON(sk->sk_state != TCP_ESTABLISHED);
sk                237 net/phonet/pep.c 		if (pipe_snd_status(sk, PN_PEP_IND_FLOW_CONTROL,
sk                244 net/phonet/pep.c 		if (pipe_snd_status(sk, PN_PEP_IND_ID_MCFC_GRANT_CREDITS,
sk                252 net/phonet/pep.c static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb)
sk                254 net/phonet/pep.c 	struct pep_sock *pn = pep_sk(sk);
sk                300 net/phonet/pep.c 		sk->sk_write_space(sk);
sk                304 net/phonet/pep.c static int pipe_rcv_created(struct sock *sk, struct sk_buff *skb)
sk                306 net/phonet/pep.c 	struct pep_sock *pn = pep_sk(sk);
sk                333 net/phonet/pep.c static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
sk                335 net/phonet/pep.c 	struct pep_sock *pn = pep_sk(sk);
sk                340 net/phonet/pep.c 	BUG_ON(sk->sk_state == TCP_CLOSE_WAIT);
sk                344 net/phonet/pep.c 		pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE, GFP_ATOMIC);
sk                348 net/phonet/pep.c 		pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
sk                349 net/phonet/pep.c 		sk->sk_state = TCP_CLOSE_WAIT;
sk                350 net/phonet/pep.c 		if (!sock_flag(sk, SOCK_DEAD))
sk                351 net/phonet/pep.c 			sk->sk_state_change(sk);
sk                356 net/phonet/pep.c 		pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
sk                374 net/phonet/pep.c 		pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
sk                379 net/phonet/pep.c 			atomic_inc(&sk->sk_drops);
sk                392 net/phonet/pep.c 			err = sock_queue_rcv_skb(sk, skb);
sk                400 net/phonet/pep.c 			atomic_inc(&sk->sk_drops);
sk                405 net/phonet/pep.c 		queue = &sk->sk_receive_queue;
sk                409 net/phonet/pep.c 		pipe_rcv_status(sk, skb);
sk                413 net/phonet/pep.c 		err = pipe_rcv_created(sk, skb);
sk                417 net/phonet/pep.c 		err = pipe_rcv_created(sk, skb);
sk                428 net/phonet/pep.c 			sk->sk_write_space(sk);
sk                430 net/phonet/pep.c 		if (sk->sk_state == TCP_ESTABLISHED)
sk                432 net/phonet/pep.c 		sk->sk_state = TCP_ESTABLISHED;
sk                433 net/phonet/pep.c 		pipe_grant_credits(sk, GFP_ATOMIC);
sk                437 net/phonet/pep.c 		sk->sk_state = TCP_SYN_RECV;
sk                452 net/phonet/pep.c 	skb_set_owner_r(skb, sk);
sk                454 net/phonet/pep.c 	if (!sock_flag(sk, SOCK_DEAD))
sk                455 net/phonet/pep.c 		sk->sk_data_ready(sk);
sk                460 net/phonet/pep.c static void pipe_destruct(struct sock *sk)
sk                462 net/phonet/pep.c 	struct pep_sock *pn = pep_sk(sk);
sk                464 net/phonet/pep.c 	skb_queue_purge(&sk->sk_receive_queue);
sk                482 net/phonet/pep.c static int pep_connresp_rcv(struct sock *sk, struct sk_buff *skb)
sk                484 net/phonet/pep.c 	struct pep_sock *pn = pep_sk(sk);
sk                521 net/phonet/pep.c 	return pipe_handler_send_created_ind(sk);
sk                524 net/phonet/pep.c static int pep_enableresp_rcv(struct sock *sk, struct sk_buff *skb)
sk                531 net/phonet/pep.c 	return pep_indicate(sk, PNS_PIPE_ENABLED_IND, 0 /* sub-blocks */,
sk                536 net/phonet/pep.c static void pipe_start_flow_control(struct sock *sk)
sk                538 net/phonet/pep.c 	struct pep_sock *pn = pep_sk(sk);
sk                542 net/phonet/pep.c 		sk->sk_write_space(sk);
sk                544 net/phonet/pep.c 	pipe_grant_credits(sk, GFP_ATOMIC);
sk                549 net/phonet/pep.c static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb)
sk                551 net/phonet/pep.c 	struct pep_sock *pn = pep_sk(sk);
sk                562 net/phonet/pep.c 			err = sock_queue_rcv_skb(sk, skb);
sk                570 net/phonet/pep.c 			atomic_inc(&sk->sk_drops);
sk                576 net/phonet/pep.c 		skb_set_owner_r(skb, sk);
sk                577 net/phonet/pep.c 		skb_queue_tail(&sk->sk_receive_queue, skb);
sk                578 net/phonet/pep.c 		if (!sock_flag(sk, SOCK_DEAD))
sk                579 net/phonet/pep.c 			sk->sk_data_ready(sk);
sk                583 net/phonet/pep.c 		if (sk->sk_state != TCP_SYN_SENT)
sk                585 net/phonet/pep.c 		if (!sock_flag(sk, SOCK_DEAD))
sk                586 net/phonet/pep.c 			sk->sk_state_change(sk);
sk                587 net/phonet/pep.c 		if (pep_connresp_rcv(sk, skb)) {
sk                588 net/phonet/pep.c 			sk->sk_state = TCP_CLOSE_WAIT;
sk                592 net/phonet/pep.c 			sk->sk_state = TCP_SYN_RECV;
sk                594 net/phonet/pep.c 			sk->sk_state = TCP_ESTABLISHED;
sk                595 net/phonet/pep.c 			pipe_start_flow_control(sk);
sk                600 net/phonet/pep.c 		if (sk->sk_state != TCP_SYN_SENT)
sk                603 net/phonet/pep.c 		if (pep_enableresp_rcv(sk, skb)) {
sk                604 net/phonet/pep.c 			sk->sk_state = TCP_CLOSE_WAIT;
sk                608 net/phonet/pep.c 		sk->sk_state = TCP_ESTABLISHED;
sk                609 net/phonet/pep.c 		pipe_start_flow_control(sk);
sk                617 net/phonet/pep.c 		pipe_rcv_status(sk, skb);
sk                654 net/phonet/pep.c static int pep_do_rcv(struct sock *sk, struct sk_buff *skb)
sk                656 net/phonet/pep.c 	struct pep_sock *pn = pep_sk(sk);
sk                679 net/phonet/pep.c 		if (sk->sk_state != TCP_LISTEN || sk_acceptq_is_full(sk)) {
sk                680 net/phonet/pep.c 			pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE,
sk                684 net/phonet/pep.c 		skb_queue_head(&sk->sk_receive_queue, skb);
sk                685 net/phonet/pep.c 		sk_acceptq_added(sk);
sk                686 net/phonet/pep.c 		if (!sock_flag(sk, SOCK_DEAD))
sk                687 net/phonet/pep.c 			sk->sk_data_ready(sk);
sk                691 net/phonet/pep.c 		pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
sk                695 net/phonet/pep.c 		pep_ctrlreq_error(sk, skb, PN_PIPE_INVALID_HANDLE, GFP_ATOMIC);
sk                705 net/phonet/pep.c 		if ((1 << sk->sk_state)
sk                708 net/phonet/pep.c 			return pipe_handler_do_rcv(sk, skb);
sk                715 net/phonet/pep.c static int pipe_do_remove(struct sock *sk)
sk                717 net/phonet/pep.c 	struct pep_sock *pn = pep_sk(sk);
sk                721 net/phonet/pep.c 	skb = pep_alloc_skb(sk, NULL, 0, GFP_KERNEL);
sk                730 net/phonet/pep.c 	return pn_skb_send(sk, skb, NULL);
sk                734 net/phonet/pep.c static void pep_sock_close(struct sock *sk, long timeout)
sk                736 net/phonet/pep.c 	struct pep_sock *pn = pep_sk(sk);
sk                739 net/phonet/pep.c 	sock_hold(sk); /* keep a reference after sk_common_release() */
sk                740 net/phonet/pep.c 	sk_common_release(sk);
sk                742 net/phonet/pep.c 	lock_sock(sk);
sk                743 net/phonet/pep.c 	if ((1 << sk->sk_state) & (TCPF_SYN_RECV|TCPF_ESTABLISHED)) {
sk                744 net/phonet/pep.c 		if (sk->sk_backlog_rcv == pipe_do_rcv)
sk                746 net/phonet/pep.c 			pipe_do_remove(sk);
sk                748 net/phonet/pep.c 			pipe_handler_request(sk, PNS_PEP_DISCONNECT_REQ, PAD,
sk                751 net/phonet/pep.c 	sk->sk_state = TCP_CLOSE;
sk                755 net/phonet/pep.c 	release_sock(sk);
sk                758 net/phonet/pep.c 		gprs_detach(sk);
sk                759 net/phonet/pep.c 	sock_put(sk);
sk                762 net/phonet/pep.c static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp,
sk                765 net/phonet/pep.c 	struct pep_sock *pn = pep_sk(sk), *newpn;
sk                775 net/phonet/pep.c 	skb = skb_recv_datagram(sk, 0, flags & O_NONBLOCK, errp);
sk                779 net/phonet/pep.c 	lock_sock(sk);
sk                780 net/phonet/pep.c 	if (sk->sk_state != TCP_LISTEN) {
sk                784 net/phonet/pep.c 	sk_acceptq_removed(sk);
sk                800 net/phonet/pep.c 		pep_reject_conn(sk, skb, PN_PIPE_ERR_INVALID_PARAM,
sk                832 net/phonet/pep.c 		pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE, GFP_KERNEL);
sk                837 net/phonet/pep.c 	newsk = sk_alloc(sock_net(sk), PF_PHONET, GFP_KERNEL, sk->sk_prot,
sk                840 net/phonet/pep.c 		pep_reject_conn(sk, skb, PN_PIPE_ERR_OVERLOAD, GFP_KERNEL);
sk                848 net/phonet/pep.c 	newsk->sk_protocol = sk->sk_protocol;
sk                857 net/phonet/pep.c 	sock_hold(sk);
sk                858 net/phonet/pep.c 	newpn->listener = sk;
sk                877 net/phonet/pep.c 	release_sock(sk);
sk                883 net/phonet/pep.c static int pep_sock_connect(struct sock *sk, struct sockaddr *addr, int len)
sk                885 net/phonet/pep.c 	struct pep_sock *pn = pep_sk(sk);
sk                892 net/phonet/pep.c 	err = pipe_handler_request(sk, PNS_PEP_CONNECT_REQ,
sk                899 net/phonet/pep.c 	sk->sk_state = TCP_SYN_SENT;
sk                904 net/phonet/pep.c static int pep_sock_enable(struct sock *sk, struct sockaddr *addr, int len)
sk                908 net/phonet/pep.c 	err = pipe_handler_request(sk, PNS_PEP_ENABLE_REQ, PAD,
sk                913 net/phonet/pep.c 	sk->sk_state = TCP_SYN_SENT;
sk                918 net/phonet/pep.c static int pep_ioctl(struct sock *sk, int cmd, unsigned long arg)
sk                920 net/phonet/pep.c 	struct pep_sock *pn = pep_sk(sk);
sk                926 net/phonet/pep.c 		if (sk->sk_state == TCP_LISTEN) {
sk                931 net/phonet/pep.c 		lock_sock(sk);
sk                932 net/phonet/pep.c 		if (sock_flag(sk, SOCK_URGINLINE) &&
sk                935 net/phonet/pep.c 		else if (!skb_queue_empty(&sk->sk_receive_queue))
sk                936 net/phonet/pep.c 			answ = skb_peek(&sk->sk_receive_queue)->len;
sk                939 net/phonet/pep.c 		release_sock(sk);
sk                944 net/phonet/pep.c 		lock_sock(sk);
sk                945 net/phonet/pep.c 		if (sk->sk_state == TCP_SYN_SENT)
sk                947 net/phonet/pep.c 		else if (sk->sk_state == TCP_ESTABLISHED)
sk                950 net/phonet/pep.c 			ret = pep_sock_enable(sk, NULL, 0);
sk                951 net/phonet/pep.c 		release_sock(sk);
sk                958 net/phonet/pep.c static int pep_init(struct sock *sk)
sk                960 net/phonet/pep.c 	struct pep_sock *pn = pep_sk(sk);
sk                962 net/phonet/pep.c 	sk->sk_destruct = pipe_destruct;
sk                977 net/phonet/pep.c static int pep_setsockopt(struct sock *sk, int level, int optname,
sk                980 net/phonet/pep.c 	struct pep_sock *pn = pep_sk(sk);
sk                990 net/phonet/pep.c 	lock_sock(sk);
sk               1004 net/phonet/pep.c 			release_sock(sk);
sk               1005 net/phonet/pep.c 			err = gprs_attach(sk);
sk               1012 net/phonet/pep.c 			release_sock(sk);
sk               1013 net/phonet/pep.c 			gprs_detach(sk);
sk               1019 net/phonet/pep.c 		if ((sk->sk_state == TCP_CLOSE) &&
sk               1033 net/phonet/pep.c 	release_sock(sk);
sk               1039 net/phonet/pep.c static int pep_getsockopt(struct sock *sk, int level, int optname,
sk               1042 net/phonet/pep.c 	struct pep_sock *pn = pep_sk(sk);
sk               1081 net/phonet/pep.c static int pipe_skb_send(struct sock *sk, struct sk_buff *skb)
sk               1083 net/phonet/pep.c 	struct pep_sock *pn = pep_sk(sk);
sk               1103 net/phonet/pep.c 	err = pn_skb_send(sk, skb, NULL);
sk               1111 net/phonet/pep.c static int pep_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
sk               1113 net/phonet/pep.c 	struct pep_sock *pn = pep_sk(sk);
sk               1127 net/phonet/pep.c 	skb = sock_alloc_send_skb(sk, MAX_PNPIPE_HEADER + len,
sk               1137 net/phonet/pep.c 	lock_sock(sk);
sk               1138 net/phonet/pep.c 	timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
sk               1139 net/phonet/pep.c 	if ((1 << sk->sk_state) & (TCPF_LISTEN|TCPF_CLOSE)) {
sk               1143 net/phonet/pep.c 	if (sk->sk_state != TCP_ESTABLISHED) {
sk               1146 net/phonet/pep.c 		err = sk_stream_wait_connect(sk, &timeo);
sk               1150 net/phonet/pep.c 		if (sk->sk_state == TCP_CLOSE_WAIT) {
sk               1155 net/phonet/pep.c 	BUG_ON(sk->sk_state != TCP_ESTABLISHED);
sk               1171 net/phonet/pep.c 		add_wait_queue(sk_sleep(sk), &wait);
sk               1172 net/phonet/pep.c 		done = sk_wait_event(sk, &timeo, atomic_read(&pn->tx_credits), &wait);
sk               1173 net/phonet/pep.c 		remove_wait_queue(sk_sleep(sk), &wait);
sk               1175 net/phonet/pep.c 		if (sk->sk_state != TCP_ESTABLISHED)
sk               1179 net/phonet/pep.c 	err = pipe_skb_send(sk, skb);
sk               1184 net/phonet/pep.c 	release_sock(sk);
sk               1190 net/phonet/pep.c int pep_writeable(struct sock *sk)
sk               1192 net/phonet/pep.c 	struct pep_sock *pn = pep_sk(sk);
sk               1197 net/phonet/pep.c int pep_write(struct sock *sk, struct sk_buff *skb)
sk               1202 net/phonet/pep.c 	if (pep_sk(sk)->aligned)
sk               1203 net/phonet/pep.c 		return pipe_skb_send(sk, skb);
sk               1225 net/phonet/pep.c 	return pipe_skb_send(sk, rskb);
sk               1228 net/phonet/pep.c struct sk_buff *pep_read(struct sock *sk)
sk               1230 net/phonet/pep.c 	struct sk_buff *skb = skb_dequeue(&sk->sk_receive_queue);
sk               1232 net/phonet/pep.c 	if (sk->sk_state == TCP_ESTABLISHED)
sk               1233 net/phonet/pep.c 		pipe_grant_credits(sk, GFP_ATOMIC);
sk               1237 net/phonet/pep.c static int pep_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
sk               1247 net/phonet/pep.c 	if (unlikely(1 << sk->sk_state & (TCPF_LISTEN | TCPF_CLOSE)))
sk               1250 net/phonet/pep.c 	if ((flags & MSG_OOB) || sock_flag(sk, SOCK_URGINLINE)) {
sk               1252 net/phonet/pep.c 		struct pep_sock *pn = pep_sk(sk);
sk               1258 net/phonet/pep.c 			pep_ctrlreq_error(sk, skb, PN_PIPE_NO_ERROR,
sk               1267 net/phonet/pep.c 	skb = skb_recv_datagram(sk, flags, noblock, &err);
sk               1268 net/phonet/pep.c 	lock_sock(sk);
sk               1270 net/phonet/pep.c 		if (err == -ENOTCONN && sk->sk_state == TCP_CLOSE_WAIT)
sk               1272 net/phonet/pep.c 		release_sock(sk);
sk               1276 net/phonet/pep.c 	if (sk->sk_state == TCP_ESTABLISHED)
sk               1277 net/phonet/pep.c 		pipe_grant_credits(sk, GFP_KERNEL);
sk               1278 net/phonet/pep.c 	release_sock(sk);
sk               1290 net/phonet/pep.c 	skb_free_datagram(sk, skb);
sk               1294 net/phonet/pep.c static void pep_sock_unhash(struct sock *sk)
sk               1296 net/phonet/pep.c 	struct pep_sock *pn = pep_sk(sk);
sk               1299 net/phonet/pep.c 	lock_sock(sk);
sk               1304 net/phonet/pep.c 		release_sock(sk);
sk               1308 net/phonet/pep.c 		sk_del_node_init(sk);
sk               1309 net/phonet/pep.c 		sk = skparent;
sk               1315 net/phonet/pep.c 		pn_sock_unhash(&pn->pn_sk.sk);
sk               1316 net/phonet/pep.c 	release_sock(sk);
sk                 54 net/phonet/pn_netlink.c 	struct net *net = sock_net(skb->sk);
sk                128 net/phonet/pn_netlink.c 	pndevs = phonet_device_list(sock_net(skb->sk));
sk                221 net/phonet/pn_netlink.c 	struct net *net = sock_net(skb->sk);
sk                265 net/phonet/pn_netlink.c 	struct net *net = sock_net(skb->sk);
sk                 30 net/phonet/socket.c 	struct sock *sk = sock->sk;
sk                 32 net/phonet/socket.c 	if (sk) {
sk                 33 net/phonet/socket.c 		sock->sk = NULL;
sk                 34 net/phonet/socket.c 		sk->sk_prot->close(sk, 0);
sk                132 net/phonet/socket.c int pn_sock_hash(struct sock *sk)
sk                134 net/phonet/socket.c 	struct hlist_head *hlist = pn_hash_list(pn_sk(sk)->sobject);
sk                137 net/phonet/socket.c 	sk_add_node_rcu(sk, hlist);
sk                144 net/phonet/socket.c void pn_sock_unhash(struct sock *sk)
sk                147 net/phonet/socket.c 	sk_del_node_init_rcu(sk);
sk                149 net/phonet/socket.c 	pn_sock_unbind_all_res(sk);
sk                158 net/phonet/socket.c 	struct sock *sk = sock->sk;
sk                159 net/phonet/socket.c 	struct pn_sock *pn = pn_sk(sk);
sk                165 net/phonet/socket.c 	if (sk->sk_prot->bind)
sk                166 net/phonet/socket.c 		return sk->sk_prot->bind(sk, addr, len);
sk                175 net/phonet/socket.c 	if (saddr && phonet_address_lookup(sock_net(sk), saddr))
sk                178 net/phonet/socket.c 	lock_sock(sk);
sk                179 net/phonet/socket.c 	if (sk->sk_state != TCP_CLOSE || pn_port(pn->sobject)) {
sk                183 net/phonet/socket.c 	WARN_ON(sk_hashed(sk));
sk                185 net/phonet/socket.c 	err = sk->sk_prot->get_port(sk, pn_port(handle));
sk                194 net/phonet/socket.c 	err = sk->sk_prot->hash(sk);
sk                198 net/phonet/socket.c 	release_sock(sk);
sk                213 net/phonet/socket.c 	BUG_ON(!pn_port(pn_sk(sock->sk)->sobject));
sk                220 net/phonet/socket.c 	struct sock *sk = sock->sk;
sk                221 net/phonet/socket.c 	struct pn_sock *pn = pn_sk(sk);
sk                224 net/phonet/socket.c 	long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
sk                234 net/phonet/socket.c 	lock_sock(sk);
sk                238 net/phonet/socket.c 		if (sk->sk_state != TCP_CLOSE) {
sk                255 net/phonet/socket.c 	err = sk->sk_prot->connect(sk, addr, len);
sk                262 net/phonet/socket.c 	while (sk->sk_state == TCP_SYN_SENT) {
sk                274 net/phonet/socket.c 		prepare_to_wait_exclusive(sk_sleep(sk), &wait,
sk                276 net/phonet/socket.c 		release_sock(sk);
sk                278 net/phonet/socket.c 		lock_sock(sk);
sk                279 net/phonet/socket.c 		finish_wait(sk_sleep(sk), &wait);
sk                282 net/phonet/socket.c 	if ((1 << sk->sk_state) & (TCPF_SYN_RECV|TCPF_ESTABLISHED))
sk                284 net/phonet/socket.c 	else if (sk->sk_state == TCP_CLOSE_WAIT)
sk                290 net/phonet/socket.c 	release_sock(sk);
sk                297 net/phonet/socket.c 	struct sock *sk = sock->sk;
sk                301 net/phonet/socket.c 	if (unlikely(sk->sk_state != TCP_LISTEN))
sk                304 net/phonet/socket.c 	newsk = sk->sk_prot->accept(sk, flags, &err, kern);
sk                318 net/phonet/socket.c 	struct sock *sk = sock->sk;
sk                319 net/phonet/socket.c 	struct pn_sock *pn = pn_sk(sk);
sk                333 net/phonet/socket.c 	struct sock *sk = sock->sk;
sk                334 net/phonet/socket.c 	struct pep_sock *pn = pep_sk(sk);
sk                337 net/phonet/socket.c 	poll_wait(file, sk_sleep(sk), wait);
sk                339 net/phonet/socket.c 	if (sk->sk_state == TCP_CLOSE)
sk                341 net/phonet/socket.c 	if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
sk                345 net/phonet/socket.c 	if (!mask && sk->sk_state == TCP_CLOSE_WAIT)
sk                348 net/phonet/socket.c 	if (sk->sk_state == TCP_ESTABLISHED &&
sk                349 net/phonet/socket.c 		refcount_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf &&
sk                359 net/phonet/socket.c 	struct sock *sk = sock->sk;
sk                360 net/phonet/socket.c 	struct pn_sock *pn = pn_sk(sk);
sk                370 net/phonet/socket.c 		lock_sock(sk);
sk                371 net/phonet/socket.c 		if (sk->sk_bound_dev_if)
sk                372 net/phonet/socket.c 			dev = dev_get_by_index(sock_net(sk),
sk                373 net/phonet/socket.c 						sk->sk_bound_dev_if);
sk                375 net/phonet/socket.c 			dev = phonet_device_get(sock_net(sk));
sk                380 net/phonet/socket.c 		release_sock(sk);
sk                391 net/phonet/socket.c 	return sk->sk_prot->ioctl(sk, cmd, arg);
sk                396 net/phonet/socket.c 	struct sock *sk = sock->sk;
sk                402 net/phonet/socket.c 	lock_sock(sk);
sk                408 net/phonet/socket.c 	if (sk->sk_state != TCP_LISTEN) {
sk                409 net/phonet/socket.c 		sk->sk_state = TCP_LISTEN;
sk                410 net/phonet/socket.c 		sk->sk_ack_backlog = 0;
sk                412 net/phonet/socket.c 	sk->sk_max_ack_backlog = backlog;
sk                414 net/phonet/socket.c 	release_sock(sk);
sk                421 net/phonet/socket.c 	struct sock *sk = sock->sk;
sk                426 net/phonet/socket.c 	return sk->sk_prot->sendmsg(sk, m, total_len);
sk                481 net/phonet/socket.c int pn_sock_get_port(struct sock *sk, unsigned short sport)
sk                484 net/phonet/socket.c 	struct net *net = sock_net(sk);
sk                485 net/phonet/socket.c 	struct pn_sock *pn = pn_sk(sk);
sk                550 net/phonet/socket.c static struct sock *pn_sock_get_next(struct seq_file *seq, struct sock *sk)
sk                555 net/phonet/socket.c 		sk = sk_next(sk);
sk                556 net/phonet/socket.c 	while (sk && !net_eq(net, sock_net(sk)));
sk                558 net/phonet/socket.c 	return sk;
sk                570 net/phonet/socket.c 	struct sock *sk;
sk                573 net/phonet/socket.c 		sk = pn_sock_get_idx(seq, 0);
sk                575 net/phonet/socket.c 		sk = pn_sock_get_next(seq, v);
sk                577 net/phonet/socket.c 	return sk;
sk                593 net/phonet/socket.c 		struct sock *sk = v;
sk                594 net/phonet/socket.c 		struct pn_sock *pn = pn_sk(sk);
sk                598 net/phonet/socket.c 			sk->sk_protocol, pn->sobject, pn->dobject,
sk                599 net/phonet/socket.c 			pn->resource, sk->sk_state,
sk                600 net/phonet/socket.c 			sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
sk                601 net/phonet/socket.c 			from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
sk                602 net/phonet/socket.c 			sock_i_ino(sk),
sk                603 net/phonet/socket.c 			refcount_read(&sk->sk_refcnt), sk,
sk                604 net/phonet/socket.c 			atomic_read(&sk->sk_drops));
sk                619 net/phonet/socket.c 	struct sock *sk[256];
sk                627 net/phonet/socket.c 	struct sock *sk;
sk                633 net/phonet/socket.c 	sk = rcu_dereference(pnres.sk[res]);
sk                634 net/phonet/socket.c 	if (sk)
sk                635 net/phonet/socket.c 		sock_hold(sk);
sk                637 net/phonet/socket.c 	return sk;
sk                642 net/phonet/socket.c int pn_sock_bind_res(struct sock *sk, u8 res)
sk                646 net/phonet/socket.c 	if (!net_eq(sock_net(sk), &init_net))
sk                650 net/phonet/socket.c 	if (pn_socket_autobind(sk->sk_socket))
sk                654 net/phonet/socket.c 	if (pnres.sk[res] == NULL) {
sk                655 net/phonet/socket.c 		sock_hold(sk);
sk                656 net/phonet/socket.c 		rcu_assign_pointer(pnres.sk[res], sk);
sk                663 net/phonet/socket.c int pn_sock_unbind_res(struct sock *sk, u8 res)
sk                671 net/phonet/socket.c 	if (pnres.sk[res] == sk) {
sk                672 net/phonet/socket.c 		RCU_INIT_POINTER(pnres.sk[res], NULL);
sk                679 net/phonet/socket.c 		sock_put(sk);
sk                684 net/phonet/socket.c void pn_sock_unbind_all_res(struct sock *sk)
sk                690 net/phonet/socket.c 		if (pnres.sk[res] == sk) {
sk                691 net/phonet/socket.c 			RCU_INIT_POINTER(pnres.sk[res], NULL);
sk                698 net/phonet/socket.c 		__sock_put(sk);
sk                714 net/phonet/socket.c 		if (pnres.sk[i] == NULL)
sk                717 net/phonet/socket.c 			return pnres.sk + i;
sk                723 net/phonet/socket.c static struct sock **pn_res_get_next(struct seq_file *seq, struct sock **sk)
sk                730 net/phonet/socket.c 	for (i = (sk - pnres.sk) + 1; i < 256; i++)
sk                731 net/phonet/socket.c 		if (pnres.sk[i])
sk                732 net/phonet/socket.c 			return pnres.sk + i;
sk                745 net/phonet/socket.c 	struct sock **sk;
sk                748 net/phonet/socket.c 		sk = pn_res_get_idx(seq, 0);
sk                750 net/phonet/socket.c 		sk = pn_res_get_next(seq, v);
sk                752 net/phonet/socket.c 	return sk;
sk                768 net/phonet/socket.c 		struct sock *sk = *psk;
sk                771 net/phonet/socket.c 			   (int) (psk - pnres.sk),
sk                772 net/phonet/socket.c 			   from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
sk                773 net/phonet/socket.c 			   sock_i_ino(sk));
sk                 78 net/psample/psample.c 		if (!net_eq(group->net, sock_net(msg->sk)))
sk                 86 net/qrtr/qrtr.c 	struct sock sk;
sk                 91 net/qrtr/qrtr.c static inline struct qrtr_sock *qrtr_sk(struct sock *sk)
sk                 93 net/qrtr/qrtr.c 	BUILD_BUG_ON(offsetof(struct qrtr_sock, sk) != 0);
sk                 94 net/qrtr/qrtr.c 	return container_of(sk, struct qrtr_sock, sk);
sk                388 net/qrtr/qrtr.c 			if (sock_queue_rcv_skb(&ipc->sk, skb))
sk                489 net/qrtr/qrtr.c 		sock_hold(&ipc->sk);
sk                498 net/qrtr/qrtr.c 	sock_put(&ipc->sk);
sk                519 net/qrtr/qrtr.c 		skb_set_owner_w(skb, &ipc->sk);
sk                527 net/qrtr/qrtr.c 	__sock_put(&ipc->sk);
sk                571 net/qrtr/qrtr.c 	sock_hold(&ipc->sk);
sk                588 net/qrtr/qrtr.c 		sock_hold(&ipc->sk);
sk                589 net/qrtr/qrtr.c 		ipc->sk.sk_err = ENETRESET;
sk                590 net/qrtr/qrtr.c 		ipc->sk.sk_error_report(&ipc->sk);
sk                591 net/qrtr/qrtr.c 		sock_put(&ipc->sk);
sk                603 net/qrtr/qrtr.c 	struct qrtr_sock *ipc = qrtr_sk(sock->sk);
sk                604 net/qrtr/qrtr.c 	struct sock *sk = sock->sk;
sk                622 net/qrtr/qrtr.c 	sock_reset_flag(sk, SOCK_ZAPPED);
sk                634 net/qrtr/qrtr.c 	struct sock *sk = sock->sk;
sk                637 net/qrtr/qrtr.c 	if (!sock_flag(sk, SOCK_ZAPPED))
sk                651 net/qrtr/qrtr.c 	struct qrtr_sock *ipc = qrtr_sk(sock->sk);
sk                652 net/qrtr/qrtr.c 	struct sock *sk = sock->sk;
sk                661 net/qrtr/qrtr.c 	lock_sock(sk);
sk                662 net/qrtr/qrtr.c 	rc = __qrtr_bind(sock, addr, sock_flag(sk, SOCK_ZAPPED));
sk                663 net/qrtr/qrtr.c 	release_sock(sk);
sk                677 net/qrtr/qrtr.c 	if (!ipc || &ipc->sk == skb->sk) { /* do not send to self */
sk                686 net/qrtr/qrtr.c 	if (sock_queue_rcv_skb(&ipc->sk, skb)) {
sk                709 net/qrtr/qrtr.c 		skb_set_owner_w(skbn, skb->sk);
sk                725 net/qrtr/qrtr.c 	struct qrtr_sock *ipc = qrtr_sk(sock->sk);
sk                726 net/qrtr/qrtr.c 	struct sock *sk = sock->sk;
sk                739 net/qrtr/qrtr.c 	lock_sock(sk);
sk                743 net/qrtr/qrtr.c 			release_sock(sk);
sk                748 net/qrtr/qrtr.c 			release_sock(sk);
sk                754 net/qrtr/qrtr.c 			release_sock(sk);
sk                757 net/qrtr/qrtr.c 	} else if (sk->sk_state == TCP_ESTABLISHED) {
sk                760 net/qrtr/qrtr.c 		release_sock(sk);
sk                768 net/qrtr/qrtr.c 			release_sock(sk);
sk                777 net/qrtr/qrtr.c 			release_sock(sk);
sk                784 net/qrtr/qrtr.c 	skb = sock_alloc_send_skb(sk, plen + QRTR_HDR_MAX_SIZE,
sk                815 net/qrtr/qrtr.c 	release_sock(sk);
sk                824 net/qrtr/qrtr.c 	struct sock *sk = sock->sk;
sk                829 net/qrtr/qrtr.c 	lock_sock(sk);
sk                831 net/qrtr/qrtr.c 	if (sock_flag(sk, SOCK_ZAPPED)) {
sk                832 net/qrtr/qrtr.c 		release_sock(sk);
sk                836 net/qrtr/qrtr.c 	skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
sk                839 net/qrtr/qrtr.c 		release_sock(sk);
sk                863 net/qrtr/qrtr.c 	skb_free_datagram(sk, skb);
sk                864 net/qrtr/qrtr.c 	release_sock(sk);
sk                873 net/qrtr/qrtr.c 	struct qrtr_sock *ipc = qrtr_sk(sock->sk);
sk                874 net/qrtr/qrtr.c 	struct sock *sk = sock->sk;
sk                880 net/qrtr/qrtr.c 	lock_sock(sk);
sk                882 net/qrtr/qrtr.c 	sk->sk_state = TCP_CLOSE;
sk                887 net/qrtr/qrtr.c 		release_sock(sk);
sk                893 net/qrtr/qrtr.c 	sk->sk_state = TCP_ESTABLISHED;
sk                895 net/qrtr/qrtr.c 	release_sock(sk);
sk                903 net/qrtr/qrtr.c 	struct qrtr_sock *ipc = qrtr_sk(sock->sk);
sk                905 net/qrtr/qrtr.c 	struct sock *sk = sock->sk;
sk                907 net/qrtr/qrtr.c 	lock_sock(sk);
sk                909 net/qrtr/qrtr.c 		if (sk->sk_state != TCP_ESTABLISHED) {
sk                910 net/qrtr/qrtr.c 			release_sock(sk);
sk                918 net/qrtr/qrtr.c 	release_sock(sk);
sk                930 net/qrtr/qrtr.c 	struct qrtr_sock *ipc = qrtr_sk(sock->sk);
sk                931 net/qrtr/qrtr.c 	struct sock *sk = sock->sk;
sk                938 net/qrtr/qrtr.c 	lock_sock(sk);
sk                942 net/qrtr/qrtr.c 		len = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
sk                948 net/qrtr/qrtr.c 		skb = skb_peek(&sk->sk_receive_queue);
sk                982 net/qrtr/qrtr.c 	release_sock(sk);
sk                989 net/qrtr/qrtr.c 	struct sock *sk = sock->sk;
sk                992 net/qrtr/qrtr.c 	if (!sk)
sk                995 net/qrtr/qrtr.c 	lock_sock(sk);
sk                997 net/qrtr/qrtr.c 	ipc = qrtr_sk(sk);
sk                998 net/qrtr/qrtr.c 	sk->sk_shutdown = SHUTDOWN_MASK;
sk                999 net/qrtr/qrtr.c 	if (!sock_flag(sk, SOCK_DEAD))
sk               1000 net/qrtr/qrtr.c 		sk->sk_state_change(sk);
sk               1002 net/qrtr/qrtr.c 	sock_set_flag(sk, SOCK_DEAD);
sk               1003 net/qrtr/qrtr.c 	sock->sk = NULL;
sk               1005 net/qrtr/qrtr.c 	if (!sock_flag(sk, SOCK_ZAPPED))
sk               1008 net/qrtr/qrtr.c 	skb_queue_purge(&sk->sk_receive_queue);
sk               1010 net/qrtr/qrtr.c 	release_sock(sk);
sk               1011 net/qrtr/qrtr.c 	sock_put(sk);
sk               1048 net/qrtr/qrtr.c 	struct sock *sk;
sk               1053 net/qrtr/qrtr.c 	sk = sk_alloc(net, AF_QIPCRTR, GFP_KERNEL, &qrtr_proto, kern);
sk               1054 net/qrtr/qrtr.c 	if (!sk)
sk               1057 net/qrtr/qrtr.c 	sock_set_flag(sk, SOCK_ZAPPED);
sk               1059 net/qrtr/qrtr.c 	sock_init_data(sock, sk);
sk               1062 net/qrtr/qrtr.c 	ipc = qrtr_sk(sk);
sk                 61 net/rds/af_rds.c 	struct sock *sk = sock->sk;
sk                 64 net/rds/af_rds.c 	if (!sk)
sk                 67 net/rds/af_rds.c 	rs = rds_sk_to_rs(sk);
sk                 69 net/rds/af_rds.c 	sock_orphan(sk);
sk                 90 net/rds/af_rds.c 	sock->sk = NULL;
sk                 91 net/rds/af_rds.c 	sock_put(sk);
sk                117 net/rds/af_rds.c 	struct rds_sock *rs = rds_sk_to_rs(sock->sk);
sk                215 net/rds/af_rds.c 	struct sock *sk = sock->sk;
sk                216 net/rds/af_rds.c 	struct rds_sock *rs = rds_sk_to_rs(sk);
sk                220 net/rds/af_rds.c 	poll_wait(file, sk_sleep(sk), wait);
sk                244 net/rds/af_rds.c 	if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
sk                257 net/rds/af_rds.c 	struct rds_sock *rs = rds_sk_to_rs(sock->sk);
sk                383 net/rds/af_rds.c static int rds_enable_recvtstamp(struct sock *sk, char __user *optval,
sk                397 net/rds/af_rds.c 		sock_set_flag(sk, SOCK_TSTAMP_NEW);
sk                400 net/rds/af_rds.c 		sock_set_flag(sk, SOCK_RCVTSTAMP);
sk                402 net/rds/af_rds.c 		sock_reset_flag(sk, SOCK_RCVTSTAMP);
sk                437 net/rds/af_rds.c 	struct rds_sock *rs = rds_sk_to_rs(sock->sk);
sk                465 net/rds/af_rds.c 		lock_sock(sock->sk);
sk                467 net/rds/af_rds.c 		release_sock(sock->sk);
sk                471 net/rds/af_rds.c 		lock_sock(sock->sk);
sk                472 net/rds/af_rds.c 		ret = rds_enable_recvtstamp(sock->sk, optval, optlen, optname);
sk                473 net/rds/af_rds.c 		release_sock(sock->sk);
sk                488 net/rds/af_rds.c 	struct rds_sock *rs = rds_sk_to_rs(sock->sk);
sk                541 net/rds/af_rds.c 	struct sock *sk = sock->sk;
sk                543 net/rds/af_rds.c 	struct rds_sock *rs = rds_sk_to_rs(sk);
sk                549 net/rds/af_rds.c 	lock_sock(sk);
sk                630 net/rds/af_rds.c 	release_sock(sk);
sk                661 net/rds/af_rds.c static void rds_sock_destruct(struct sock *sk)
sk                663 net/rds/af_rds.c 	struct rds_sock *rs = rds_sk_to_rs(sk);
sk                669 net/rds/af_rds.c static int __rds_create(struct socket *sock, struct sock *sk, int protocol)
sk                673 net/rds/af_rds.c 	sock_init_data(sock, sk);
sk                675 net/rds/af_rds.c 	sk->sk_protocol		= protocol;
sk                676 net/rds/af_rds.c 	sk->sk_destruct		= rds_sock_destruct;
sk                678 net/rds/af_rds.c 	rs = rds_sk_to_rs(sk);
sk                703 net/rds/af_rds.c 	struct sock *sk;
sk                708 net/rds/af_rds.c 	sk = sk_alloc(net, AF_RDS, GFP_KERNEL, &rds_proto, kern);
sk                709 net/rds/af_rds.c 	if (!sk)
sk                712 net/rds/af_rds.c 	return __rds_create(sock, sk, protocol);
sk                165 net/rds/bind.c 	struct sock *sk = sock->sk;
sk                166 net/rds/bind.c 	struct rds_sock *rs = rds_sk_to_rs(sk);
sk                224 net/rds/bind.c 	lock_sock(sk);
sk                248 net/rds/bind.c 		    trans->laddr_check(sock_net(sock->sk),
sk                254 net/rds/bind.c 		trans = rds_trans_get_preferred(sock_net(sock->sk),
sk                265 net/rds/bind.c 	sock_set_flag(sk, SOCK_RCU_FREE);
sk                271 net/rds/bind.c 	release_sock(sk);
sk                374 net/rds/ib.h   void rds_ib_state_change(struct sock *sk);
sk                666 net/rds/rds.h  static inline struct rds_sock *rds_sk_to_rs(const struct sock *sk)
sk                668 net/rds/rds.h  	return container_of(sk, struct rds_sock, rs_sk);
sk                732 net/rds/rds.h  static inline void __rds_wake_sk_sleep(struct sock *sk)
sk                734 net/rds/rds.h  	wait_queue_head_t *waitq = sk_sleep(sk);
sk                736 net/rds/rds.h  	if (!sock_flag(sk, SOCK_DEAD) && waitq)
sk                 87 net/rds/recv.c static void rds_recv_rcvbuf_delta(struct rds_sock *rs, struct sock *sk,
sk                287 net/rds/recv.c 	struct sock *sk;
sk                372 net/rds/recv.c 	sk = rds_rs_to_sk(rs);
sk                376 net/rds/recv.c 	if (!sock_flag(sk, SOCK_DEAD)) {
sk                379 net/rds/recv.c 		rds_recv_rcvbuf_delta(rs, sk, inc->i_conn->c_lcong,
sk                382 net/rds/recv.c 		if (sock_flag(sk, SOCK_RCVTSTAMP))
sk                387 net/rds/recv.c 		__rds_wake_sk_sleep(sk);
sk                424 net/rds/recv.c 	struct sock *sk = rds_rs_to_sk(rs);
sk                433 net/rds/recv.c 			rds_recv_rcvbuf_delta(rs, sk, inc->i_conn->c_lcong,
sk                635 net/rds/recv.c 	struct sock *sk = sock->sk;
sk                636 net/rds/recv.c 	struct rds_sock *rs = rds_sk_to_rs(sk);
sk                644 net/rds/recv.c 	timeo = sock_rcvtimeo(sk, nonblock);
sk                651 net/rds/recv.c 		return sock_recv_errqueue(sk, msg, size, SOL_IP, IP_RECVERR);
sk                673 net/rds/recv.c 			timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
sk                760 net/rds/recv.c 	struct sock *sk = rds_rs_to_sk(rs);
sk                766 net/rds/recv.c 		rds_recv_rcvbuf_delta(rs, sk, inc->i_conn->c_lcong,
sk               1103 net/rds/send.c 	struct sock *sk = sock->sk;
sk               1104 net/rds/send.c 	struct rds_sock *rs = rds_sk_to_rs(sk);
sk               1113 net/rds/send.c 	long timeo = sock_sndtimeo(sk, nonblock);
sk               1203 net/rds/send.c 		lock_sock(sk);
sk               1207 net/rds/send.c 		release_sock(sk);
sk               1210 net/rds/send.c 	lock_sock(sk);
sk               1212 net/rds/send.c 		release_sock(sk);
sk               1222 net/rds/send.c 			release_sock(sk);
sk               1234 net/rds/send.c 				release_sock(sk);
sk               1240 net/rds/send.c 	release_sock(sk);
sk               1296 net/rds/send.c 		conn = rds_conn_create_outgoing(sock_net(sock->sk),
sk               1299 net/rds/send.c 						sock->sk->sk_allocation,
sk               1359 net/rds/send.c 		timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
sk                105 net/rds/tcp.c  	return tcp_sk(tc->t_sock->sk)->write_seq;
sk                110 net/rds/tcp.c  	return tcp_sk(tc->t_sock->sk)->snd_una;
sk                117 net/rds/tcp.c  	write_lock_bh(&sock->sk->sk_callback_lock);
sk                131 net/rds/tcp.c  	sock->sk->sk_write_space = tc->t_orig_write_space;
sk                132 net/rds/tcp.c  	sock->sk->sk_data_ready = tc->t_orig_data_ready;
sk                133 net/rds/tcp.c  	sock->sk->sk_state_change = tc->t_orig_state_change;
sk                134 net/rds/tcp.c  	sock->sk->sk_user_data = NULL;
sk                136 net/rds/tcp.c  	write_unlock_bh(&sock->sk->sk_callback_lock);
sk                179 net/rds/tcp.c  	lock_sock(osock->sk);
sk                190 net/rds/tcp.c  	release_sock(osock->sk);
sk                194 net/rds/tcp.c  	lock_sock(sock->sk);
sk                196 net/rds/tcp.c  	release_sock(sock->sk);
sk                208 net/rds/tcp.c  	write_lock_bh(&sock->sk->sk_callback_lock);
sk                221 net/rds/tcp.c  	if (sock->sk->sk_data_ready == rds_tcp_listen_data_ready)
sk                222 net/rds/tcp.c  		sock->sk->sk_data_ready = sock->sk->sk_user_data;
sk                226 net/rds/tcp.c  	tc->t_orig_data_ready = sock->sk->sk_data_ready;
sk                227 net/rds/tcp.c  	tc->t_orig_write_space = sock->sk->sk_write_space;
sk                228 net/rds/tcp.c  	tc->t_orig_state_change = sock->sk->sk_state_change;
sk                230 net/rds/tcp.c  	sock->sk->sk_user_data = cp;
sk                231 net/rds/tcp.c  	sock->sk->sk_data_ready = rds_tcp_data_ready;
sk                232 net/rds/tcp.c  	sock->sk->sk_write_space = rds_tcp_write_space;
sk                233 net/rds/tcp.c  	sock->sk->sk_state_change = rds_tcp_state_change;
sk                235 net/rds/tcp.c  	write_unlock_bh(&sock->sk->sk_callback_lock);
sk                255 net/rds/tcp.c  		struct inet_sock *inet = inet_sk(tc->t_sock->sk);
sk                301 net/rds/tcp.c  		struct sock *sk = tc->t_sock->sk;
sk                302 net/rds/tcp.c  		struct inet_sock *inet = inet_sk(sk);
sk                304 net/rds/tcp.c  		tsinfo6.local_addr = sk->sk_v6_rcv_saddr;
sk                306 net/rds/tcp.c  		tsinfo6.peer_addr = sk->sk_v6_daddr;
sk                502 net/rds/tcp.c  	struct sock *sk = sock->sk;
sk                503 net/rds/tcp.c  	struct net *net = sock_net(sk);
sk                507 net/rds/tcp.c  	lock_sock(sk);
sk                509 net/rds/tcp.c  		sk->sk_sndbuf = rtn->sndbuf_size;
sk                510 net/rds/tcp.c  		sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
sk                513 net/rds/tcp.c  		sk->sk_sndbuf = rtn->rcvbuf_size;
sk                514 net/rds/tcp.c  		sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
sk                516 net/rds/tcp.c  	release_sock(sk);
sk                529 net/rds/tcp.c  void rds_tcp_accept_work(struct sock *sk)
sk                531 net/rds/tcp.c  	struct net *net = sock_net(sk);
sk                653 net/rds/tcp.c  	return lsock->sk->sk_user_data;
sk                 62 net/rds/tcp.h  void rds_tcp_accept_work(struct sock *sk);
sk                 67 net/rds/tcp.h  void rds_tcp_state_change(struct sock *sk);
sk                 72 net/rds/tcp.h  void rds_tcp_listen_data_ready(struct sock *sk);
sk                 81 net/rds/tcp.h  void rds_tcp_data_ready(struct sock *sk);
sk                 91 net/rds/tcp.h  void rds_tcp_write_space(struct sock *sk);
sk                 40 net/rds/tcp_connect.c void rds_tcp_state_change(struct sock *sk)
sk                 42 net/rds/tcp_connect.c 	void (*state_change)(struct sock *sk);
sk                 46 net/rds/tcp_connect.c 	read_lock_bh(&sk->sk_callback_lock);
sk                 47 net/rds/tcp_connect.c 	cp = sk->sk_user_data;
sk                 49 net/rds/tcp_connect.c 		state_change = sk->sk_state_change;
sk                 55 net/rds/tcp_connect.c 	rdsdebug("sock %p state_change to %d\n", tc->t_sock, sk->sk_state);
sk                 57 net/rds/tcp_connect.c 	switch (sk->sk_state) {
sk                 85 net/rds/tcp_connect.c 	read_unlock_bh(&sk->sk_callback_lock);
sk                 86 net/rds/tcp_connect.c 	state_change(sk);
sk                212 net/rds/tcp_connect.c 		lock_sock(sock->sk);
sk                215 net/rds/tcp_connect.c 		release_sock(sock->sk);
sk                143 net/rds/tcp_listen.c 	ret = sock_create_lite(sock->sk->sk_family,
sk                144 net/rds/tcp_listen.c 			       sock->sk->sk_type, sock->sk->sk_protocol,
sk                169 net/rds/tcp_listen.c 	inet = inet_sk(new_sock->sk);
sk                172 net/rds/tcp_listen.c 	my_addr = &new_sock->sk->sk_v6_rcv_saddr;
sk                173 net/rds/tcp_listen.c 	peer_addr = &new_sock->sk->sk_v6_daddr;
sk                181 net/rds/tcp_listen.c 		 sock->sk->sk_family,
sk                194 net/rds/tcp_listen.c 		inet6 = inet6_sk(new_sock->sk);
sk                197 net/rds/tcp_listen.c 		dev_if = new_sock->sk->sk_bound_dev_if;
sk                201 net/rds/tcp_listen.c 	conn = rds_conn_create(sock_net(sock->sk),
sk                255 net/rds/tcp_listen.c void rds_tcp_listen_data_ready(struct sock *sk)
sk                257 net/rds/tcp_listen.c 	void (*ready)(struct sock *sk);
sk                259 net/rds/tcp_listen.c 	rdsdebug("listen data ready sk %p\n", sk);
sk                261 net/rds/tcp_listen.c 	read_lock_bh(&sk->sk_callback_lock);
sk                262 net/rds/tcp_listen.c 	ready = sk->sk_user_data;
sk                264 net/rds/tcp_listen.c 		ready = sk->sk_data_ready;
sk                277 net/rds/tcp_listen.c 	if (sk->sk_state == TCP_LISTEN)
sk                278 net/rds/tcp_listen.c 		rds_tcp_accept_work(sk);
sk                280 net/rds/tcp_listen.c 		ready = rds_tcp_listen_sock_def_readable(sock_net(sk));
sk                283 net/rds/tcp_listen.c 	read_unlock_bh(&sk->sk_callback_lock);
sk                285 net/rds/tcp_listen.c 		ready(sk);
sk                305 net/rds/tcp_listen.c 	sock->sk->sk_reuse = SK_CAN_REUSE;
sk                308 net/rds/tcp_listen.c 	write_lock_bh(&sock->sk->sk_callback_lock);
sk                309 net/rds/tcp_listen.c 	sock->sk->sk_user_data = sock->sk->sk_data_ready;
sk                310 net/rds/tcp_listen.c 	sock->sk->sk_data_ready = rds_tcp_listen_data_ready;
sk                311 net/rds/tcp_listen.c 	write_unlock_bh(&sock->sk->sk_callback_lock);
sk                349 net/rds/tcp_listen.c 	struct sock *sk;
sk                354 net/rds/tcp_listen.c 	sk = sock->sk;
sk                357 net/rds/tcp_listen.c 	lock_sock(sk);
sk                358 net/rds/tcp_listen.c 	write_lock_bh(&sk->sk_callback_lock);
sk                359 net/rds/tcp_listen.c 	if (sk->sk_user_data) {
sk                360 net/rds/tcp_listen.c 		sk->sk_data_ready = sk->sk_user_data;
sk                361 net/rds/tcp_listen.c 		sk->sk_user_data = NULL;
sk                363 net/rds/tcp_listen.c 	write_unlock_bh(&sk->sk_callback_lock);
sk                364 net/rds/tcp_listen.c 	release_sock(sk);
sk                257 net/rds/tcp_recv.c 		 skb_queue_len(&tc->t_sock->sk->sk_receive_queue));
sk                276 net/rds/tcp_recv.c 	tcp_read_sock(sock->sk, &desc, rds_tcp_data_recv);
sk                299 net/rds/tcp_recv.c 	lock_sock(sock->sk);
sk                301 net/rds/tcp_recv.c 	release_sock(sock->sk);
sk                306 net/rds/tcp_recv.c void rds_tcp_data_ready(struct sock *sk)
sk                308 net/rds/tcp_recv.c 	void (*ready)(struct sock *sk);
sk                312 net/rds/tcp_recv.c 	rdsdebug("data ready sk %p\n", sk);
sk                314 net/rds/tcp_recv.c 	read_lock_bh(&sk->sk_callback_lock);
sk                315 net/rds/tcp_recv.c 	cp = sk->sk_user_data;
sk                317 net/rds/tcp_recv.c 		ready = sk->sk_data_ready;
sk                332 net/rds/tcp_recv.c 	read_unlock_bh(&sk->sk_callback_lock);
sk                333 net/rds/tcp_recv.c 	ready(sk);
sk                107 net/rds/tcp_send.c 		set_bit(SOCK_NOSPACE, &tc->t_sock->sk->sk_socket->flags);
sk                183 net/rds/tcp_send.c void rds_tcp_write_space(struct sock *sk)
sk                185 net/rds/tcp_send.c 	void (*write_space)(struct sock *sk);
sk                189 net/rds/tcp_send.c 	read_lock_bh(&sk->sk_callback_lock);
sk                190 net/rds/tcp_send.c 	cp = sk->sk_user_data;
sk                192 net/rds/tcp_send.c 		write_space = sk->sk_write_space;
sk                206 net/rds/tcp_send.c 	if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf &&
sk                212 net/rds/tcp_send.c 	read_unlock_bh(&sk->sk_callback_lock);
sk                226 net/rds/tcp_send.c 	write_space(sk);
sk                228 net/rds/tcp_send.c 	if (sk->sk_socket)
sk                229 net/rds/tcp_send.c 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
sk                129 net/rose/af_rose.c static void rose_remove_socket(struct sock *sk)
sk                132 net/rose/af_rose.c 	sk_del_node_init(sk);
sk                208 net/rose/af_rose.c static void rose_insert_socket(struct sock *sk)
sk                212 net/rose/af_rose.c 	sk_add_node(sk, &rose_list);
sk                298 net/rose/af_rose.c 	struct sock *sk = from_timer(sk, t, sk_timer);
sk                300 net/rose/af_rose.c 	rose_destroy_socket(sk);
sk                309 net/rose/af_rose.c void rose_destroy_socket(struct sock *sk)
sk                313 net/rose/af_rose.c 	rose_remove_socket(sk);
sk                314 net/rose/af_rose.c 	rose_stop_heartbeat(sk);
sk                315 net/rose/af_rose.c 	rose_stop_idletimer(sk);
sk                316 net/rose/af_rose.c 	rose_stop_timer(sk);
sk                318 net/rose/af_rose.c 	rose_clear_queues(sk);		/* Flush the queues */
sk                320 net/rose/af_rose.c 	while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
sk                321 net/rose/af_rose.c 		if (skb->sk != sk) {	/* A pending connection */
sk                323 net/rose/af_rose.c 			sock_set_flag(skb->sk, SOCK_DEAD);
sk                324 net/rose/af_rose.c 			rose_start_heartbeat(skb->sk);
sk                325 net/rose/af_rose.c 			rose_sk(skb->sk)->state = ROSE_STATE_0;
sk                331 net/rose/af_rose.c 	if (sk_has_allocations(sk)) {
sk                333 net/rose/af_rose.c 		timer_setup(&sk->sk_timer, rose_destroy_timer, 0);
sk                334 net/rose/af_rose.c 		sk->sk_timer.expires  = jiffies + 10 * HZ;
sk                335 net/rose/af_rose.c 		add_timer(&sk->sk_timer);
sk                337 net/rose/af_rose.c 		sock_put(sk);
sk                348 net/rose/af_rose.c 	struct sock *sk = sock->sk;
sk                349 net/rose/af_rose.c 	struct rose_sock *rose = rose_sk(sk);
sk                408 net/rose/af_rose.c 	struct sock *sk = sock->sk;
sk                409 net/rose/af_rose.c 	struct rose_sock *rose = rose_sk(sk);
sk                465 net/rose/af_rose.c 	struct sock *sk = sock->sk;
sk                467 net/rose/af_rose.c 	if (sk->sk_state != TCP_LISTEN) {
sk                468 net/rose/af_rose.c 		struct rose_sock *rose = rose_sk(sk);
sk                474 net/rose/af_rose.c 		sk->sk_max_ack_backlog = backlog;
sk                475 net/rose/af_rose.c 		sk->sk_state           = TCP_LISTEN;
sk                491 net/rose/af_rose.c 	struct sock *sk;
sk                500 net/rose/af_rose.c 	sk = sk_alloc(net, PF_ROSE, GFP_ATOMIC, &rose_proto, kern);
sk                501 net/rose/af_rose.c 	if (sk == NULL)
sk                504 net/rose/af_rose.c 	rose = rose_sk(sk);
sk                506 net/rose/af_rose.c 	sock_init_data(sock, sk);
sk                515 net/rose/af_rose.c 	sk->sk_protocol = protocol;
sk                533 net/rose/af_rose.c 	struct sock *sk;
sk                539 net/rose/af_rose.c 	sk = sk_alloc(sock_net(osk), PF_ROSE, GFP_ATOMIC, &rose_proto, 0);
sk                540 net/rose/af_rose.c 	if (sk == NULL)
sk                543 net/rose/af_rose.c 	rose = rose_sk(sk);
sk                545 net/rose/af_rose.c 	sock_init_data(NULL, sk);
sk                553 net/rose/af_rose.c 	sk->sk_type     = osk->sk_type;
sk                554 net/rose/af_rose.c 	sk->sk_priority = osk->sk_priority;
sk                555 net/rose/af_rose.c 	sk->sk_protocol = osk->sk_protocol;
sk                556 net/rose/af_rose.c 	sk->sk_rcvbuf   = osk->sk_rcvbuf;
sk                557 net/rose/af_rose.c 	sk->sk_sndbuf   = osk->sk_sndbuf;
sk                558 net/rose/af_rose.c 	sk->sk_state    = TCP_ESTABLISHED;
sk                559 net/rose/af_rose.c 	sock_copy_flags(sk, osk);
sk                574 net/rose/af_rose.c 	return sk;
sk                579 net/rose/af_rose.c 	struct sock *sk = sock->sk;
sk                582 net/rose/af_rose.c 	if (sk == NULL) return 0;
sk                584 net/rose/af_rose.c 	sock_hold(sk);
sk                585 net/rose/af_rose.c 	sock_orphan(sk);
sk                586 net/rose/af_rose.c 	lock_sock(sk);
sk                587 net/rose/af_rose.c 	rose = rose_sk(sk);
sk                591 net/rose/af_rose.c 		release_sock(sk);
sk                592 net/rose/af_rose.c 		rose_disconnect(sk, 0, -1, -1);
sk                593 net/rose/af_rose.c 		lock_sock(sk);
sk                594 net/rose/af_rose.c 		rose_destroy_socket(sk);
sk                599 net/rose/af_rose.c 		release_sock(sk);
sk                600 net/rose/af_rose.c 		rose_disconnect(sk, 0, -1, -1);
sk                601 net/rose/af_rose.c 		lock_sock(sk);
sk                602 net/rose/af_rose.c 		rose_destroy_socket(sk);
sk                609 net/rose/af_rose.c 		rose_clear_queues(sk);
sk                610 net/rose/af_rose.c 		rose_stop_idletimer(sk);
sk                611 net/rose/af_rose.c 		rose_write_internal(sk, ROSE_CLEAR_REQUEST);
sk                612 net/rose/af_rose.c 		rose_start_t3timer(sk);
sk                614 net/rose/af_rose.c 		sk->sk_state    = TCP_CLOSE;
sk                615 net/rose/af_rose.c 		sk->sk_shutdown |= SEND_SHUTDOWN;
sk                616 net/rose/af_rose.c 		sk->sk_state_change(sk);
sk                617 net/rose/af_rose.c 		sock_set_flag(sk, SOCK_DEAD);
sk                618 net/rose/af_rose.c 		sock_set_flag(sk, SOCK_DESTROY);
sk                625 net/rose/af_rose.c 	sock->sk = NULL;
sk                626 net/rose/af_rose.c 	release_sock(sk);
sk                627 net/rose/af_rose.c 	sock_put(sk);
sk                634 net/rose/af_rose.c 	struct sock *sk = sock->sk;
sk                635 net/rose/af_rose.c 	struct rose_sock *rose = rose_sk(sk);
sk                642 net/rose/af_rose.c 	if (!sock_flag(sk, SOCK_ZAPPED))
sk                688 net/rose/af_rose.c 	rose_insert_socket(sk);
sk                690 net/rose/af_rose.c 	sock_reset_flag(sk, SOCK_ZAPPED);
sk                697 net/rose/af_rose.c 	struct sock *sk = sock->sk;
sk                698 net/rose/af_rose.c 	struct rose_sock *rose = rose_sk(sk);
sk                721 net/rose/af_rose.c 	lock_sock(sk);
sk                723 net/rose/af_rose.c 	if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) {
sk                729 net/rose/af_rose.c 	if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) {
sk                735 net/rose/af_rose.c 	if (sk->sk_state == TCP_ESTABLISHED) {
sk                741 net/rose/af_rose.c 	sk->sk_state   = TCP_CLOSE;
sk                757 net/rose/af_rose.c 	if (sock_flag(sk, SOCK_ZAPPED)) {	/* Must bind first - autobinding in this may or may not work */
sk                758 net/rose/af_rose.c 		sock_reset_flag(sk, SOCK_ZAPPED);
sk                776 net/rose/af_rose.c 		rose_insert_socket(sk);		/* Finish the bind */
sk                795 net/rose/af_rose.c 	sk->sk_state     = TCP_SYN_SENT;
sk                801 net/rose/af_rose.c 	rose_write_internal(sk, ROSE_CALL_REQUEST);
sk                802 net/rose/af_rose.c 	rose_start_heartbeat(sk);
sk                803 net/rose/af_rose.c 	rose_start_t1timer(sk);
sk                806 net/rose/af_rose.c 	if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) {
sk                815 net/rose/af_rose.c 	if (sk->sk_state == TCP_SYN_SENT) {
sk                819 net/rose/af_rose.c 			prepare_to_wait(sk_sleep(sk), &wait,
sk                821 net/rose/af_rose.c 			if (sk->sk_state != TCP_SYN_SENT)
sk                824 net/rose/af_rose.c 				release_sock(sk);
sk                826 net/rose/af_rose.c 				lock_sock(sk);
sk                832 net/rose/af_rose.c 		finish_wait(sk_sleep(sk), &wait);
sk                838 net/rose/af_rose.c 	if (sk->sk_state != TCP_ESTABLISHED) {
sk                840 net/rose/af_rose.c 		err = sock_error(sk);	/* Always set at this point */
sk                847 net/rose/af_rose.c 	release_sock(sk);
sk                858 net/rose/af_rose.c 	struct sock *sk;
sk                861 net/rose/af_rose.c 	if ((sk = sock->sk) == NULL)
sk                864 net/rose/af_rose.c 	lock_sock(sk);
sk                865 net/rose/af_rose.c 	if (sk->sk_type != SOCK_SEQPACKET) {
sk                870 net/rose/af_rose.c 	if (sk->sk_state != TCP_LISTEN) {
sk                880 net/rose/af_rose.c 		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
sk                882 net/rose/af_rose.c 		skb = skb_dequeue(&sk->sk_receive_queue);
sk                891 net/rose/af_rose.c 			release_sock(sk);
sk                893 net/rose/af_rose.c 			lock_sock(sk);
sk                899 net/rose/af_rose.c 	finish_wait(sk_sleep(sk), &wait);
sk                903 net/rose/af_rose.c 	newsk = skb->sk;
sk                907 net/rose/af_rose.c 	skb->sk = NULL;
sk                909 net/rose/af_rose.c 	sk->sk_ack_backlog--;
sk                912 net/rose/af_rose.c 	release_sock(sk);
sk                921 net/rose/af_rose.c 	struct sock *sk = sock->sk;
sk                922 net/rose/af_rose.c 	struct rose_sock *rose = rose_sk(sk);
sk                927 net/rose/af_rose.c 		if (sk->sk_state != TCP_ESTABLISHED)
sk                949 net/rose/af_rose.c 	struct sock *sk;
sk                955 net/rose/af_rose.c 	skb->sk = NULL;		/* Initially we don't know who it's for */
sk                969 net/rose/af_rose.c 	sk = rose_find_listener(&facilities.source_addr, &facilities.source_call);
sk                974 net/rose/af_rose.c 	if (sk == NULL || sk_acceptq_is_full(sk) ||
sk                975 net/rose/af_rose.c 	    (make = rose_make_new(sk)) == NULL) {
sk                980 net/rose/af_rose.c 	skb->sk     = make;
sk               1001 net/rose/af_rose.c 	if (rose_sk(sk)->defer) {
sk               1014 net/rose/af_rose.c 	sk->sk_ack_backlog++;
sk               1018 net/rose/af_rose.c 	skb_queue_head(&sk->sk_receive_queue, skb);
sk               1022 net/rose/af_rose.c 	if (!sock_flag(sk, SOCK_DEAD))
sk               1023 net/rose/af_rose.c 		sk->sk_data_ready(sk);
sk               1030 net/rose/af_rose.c 	struct sock *sk = sock->sk;
sk               1031 net/rose/af_rose.c 	struct rose_sock *rose = rose_sk(sk);
sk               1042 net/rose/af_rose.c 	if (sock_flag(sk, SOCK_ZAPPED))
sk               1045 net/rose/af_rose.c 	if (sk->sk_shutdown & SEND_SHUTDOWN) {
sk               1072 net/rose/af_rose.c 		if (sk->sk_state != TCP_ESTABLISHED)
sk               1090 net/rose/af_rose.c 	if ((skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT, &err)) == NULL)
sk               1130 net/rose/af_rose.c 	if (sk->sk_state != TCP_ESTABLISHED) {
sk               1150 net/rose/af_rose.c 			if ((skbn = sock_alloc_send_skb(sk, frontlen + ROSE_PACLEN, 0, &err)) == NULL) {
sk               1155 net/rose/af_rose.c 			skbn->sk   = sk;
sk               1174 net/rose/af_rose.c 			skb_queue_tail(&sk->sk_write_queue, skbn); /* Throw it on the queue */
sk               1180 net/rose/af_rose.c 		skb_queue_tail(&sk->sk_write_queue, skb);		/* Throw it on the queue */
sk               1183 net/rose/af_rose.c 	skb_queue_tail(&sk->sk_write_queue, skb);	/* Shove it onto the queue */
sk               1186 net/rose/af_rose.c 	rose_kick(sk);
sk               1195 net/rose/af_rose.c 	struct sock *sk = sock->sk;
sk               1196 net/rose/af_rose.c 	struct rose_sock *rose = rose_sk(sk);
sk               1206 net/rose/af_rose.c 	if (sk->sk_state != TCP_ESTABLISHED)
sk               1210 net/rose/af_rose.c 	if ((skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &er)) == NULL)
sk               1248 net/rose/af_rose.c 	skb_free_datagram(sk, skb);
sk               1256 net/rose/af_rose.c 	struct sock *sk = sock->sk;
sk               1257 net/rose/af_rose.c 	struct rose_sock *rose = rose_sk(sk);
sk               1264 net/rose/af_rose.c 		amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
sk               1274 net/rose/af_rose.c 		if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL)
sk               1330 net/rose/af_rose.c 			rose_write_internal(sk, ROSE_CALL_ACCEPTED);
sk               1331 net/rose/af_rose.c 			rose_start_idletimer(sk);
sk                 36 net/rose/rose_in.c static int rose_state1_machine(struct sock *sk, struct sk_buff *skb, int frametype)
sk                 38 net/rose/rose_in.c 	struct rose_sock *rose = rose_sk(sk);
sk                 42 net/rose/rose_in.c 		rose_stop_timer(sk);
sk                 43 net/rose/rose_in.c 		rose_start_idletimer(sk);
sk                 50 net/rose/rose_in.c 		sk->sk_state	= TCP_ESTABLISHED;
sk                 51 net/rose/rose_in.c 		if (!sock_flag(sk, SOCK_DEAD))
sk                 52 net/rose/rose_in.c 			sk->sk_state_change(sk);
sk                 56 net/rose/rose_in.c 		rose_write_internal(sk, ROSE_CLEAR_CONFIRMATION);
sk                 57 net/rose/rose_in.c 		rose_disconnect(sk, ECONNREFUSED, skb->data[3], skb->data[4]);
sk                 73 net/rose/rose_in.c static int rose_state2_machine(struct sock *sk, struct sk_buff *skb, int frametype)
sk                 75 net/rose/rose_in.c 	struct rose_sock *rose = rose_sk(sk);
sk                 79 net/rose/rose_in.c 		rose_write_internal(sk, ROSE_CLEAR_CONFIRMATION);
sk                 80 net/rose/rose_in.c 		rose_disconnect(sk, 0, skb->data[3], skb->data[4]);
sk                 85 net/rose/rose_in.c 		rose_disconnect(sk, 0, -1, -1);
sk                101 net/rose/rose_in.c static int rose_state3_machine(struct sock *sk, struct sk_buff *skb, int frametype, int ns, int nr, int q, int d, int m)
sk                103 net/rose/rose_in.c 	struct rose_sock *rose = rose_sk(sk);
sk                108 net/rose/rose_in.c 		rose_stop_timer(sk);
sk                109 net/rose/rose_in.c 		rose_start_idletimer(sk);
sk                110 net/rose/rose_in.c 		rose_write_internal(sk, ROSE_RESET_CONFIRMATION);
sk                116 net/rose/rose_in.c 		rose_requeue_frames(sk);
sk                120 net/rose/rose_in.c 		rose_write_internal(sk, ROSE_CLEAR_CONFIRMATION);
sk                121 net/rose/rose_in.c 		rose_disconnect(sk, 0, skb->data[3], skb->data[4]);
sk                127 net/rose/rose_in.c 		if (!rose_validate_nr(sk, nr)) {
sk                128 net/rose/rose_in.c 			rose_write_internal(sk, ROSE_RESET_REQUEST);
sk                135 net/rose/rose_in.c 			rose_start_t2timer(sk);
sk                136 net/rose/rose_in.c 			rose_stop_idletimer(sk);
sk                138 net/rose/rose_in.c 			rose_frames_acked(sk, nr);
sk                149 net/rose/rose_in.c 		if (!rose_validate_nr(sk, nr)) {
sk                150 net/rose/rose_in.c 			rose_write_internal(sk, ROSE_RESET_REQUEST);
sk                157 net/rose/rose_in.c 			rose_start_t2timer(sk);
sk                158 net/rose/rose_in.c 			rose_stop_idletimer(sk);
sk                161 net/rose/rose_in.c 		rose_frames_acked(sk, nr);
sk                163 net/rose/rose_in.c 			rose_start_idletimer(sk);
sk                164 net/rose/rose_in.c 			if (sk_filter_trim_cap(sk, skb, ROSE_MIN_LEN) == 0 &&
sk                165 net/rose/rose_in.c 			    __sock_queue_rcv_skb(sk, skb) == 0) {
sk                170 net/rose/rose_in.c 				rose_write_internal(sk, ROSE_RESET_REQUEST);
sk                177 net/rose/rose_in.c 				rose_start_t2timer(sk);
sk                178 net/rose/rose_in.c 				rose_stop_idletimer(sk);
sk                181 net/rose/rose_in.c 			if (atomic_read(&sk->sk_rmem_alloc) >
sk                182 net/rose/rose_in.c 			    (sk->sk_rcvbuf >> 1))
sk                191 net/rose/rose_in.c 			rose_stop_timer(sk);
sk                192 net/rose/rose_in.c 			rose_enquiry_response(sk);
sk                195 net/rose/rose_in.c 			rose_start_hbtimer(sk);
sk                212 net/rose/rose_in.c static int rose_state4_machine(struct sock *sk, struct sk_buff *skb, int frametype)
sk                214 net/rose/rose_in.c 	struct rose_sock *rose = rose_sk(sk);
sk                218 net/rose/rose_in.c 		rose_write_internal(sk, ROSE_RESET_CONFIRMATION);
sk                221 net/rose/rose_in.c 		rose_stop_timer(sk);
sk                222 net/rose/rose_in.c 		rose_start_idletimer(sk);
sk                229 net/rose/rose_in.c 		rose_requeue_frames(sk);
sk                233 net/rose/rose_in.c 		rose_write_internal(sk, ROSE_CLEAR_CONFIRMATION);
sk                234 net/rose/rose_in.c 		rose_disconnect(sk, 0, skb->data[3], skb->data[4]);
sk                250 net/rose/rose_in.c static int rose_state5_machine(struct sock *sk, struct sk_buff *skb, int frametype)
sk                253 net/rose/rose_in.c 		rose_write_internal(sk, ROSE_CLEAR_CONFIRMATION);
sk                254 net/rose/rose_in.c 		rose_disconnect(sk, 0, skb->data[3], skb->data[4]);
sk                255 net/rose/rose_in.c 		rose_sk(sk)->neighbour->use--;
sk                262 net/rose/rose_in.c int rose_process_rx_frame(struct sock *sk, struct sk_buff *skb)
sk                264 net/rose/rose_in.c 	struct rose_sock *rose = rose_sk(sk);
sk                274 net/rose/rose_in.c 		queued = rose_state1_machine(sk, skb, frametype);
sk                277 net/rose/rose_in.c 		queued = rose_state2_machine(sk, skb, frametype);
sk                280 net/rose/rose_in.c 		queued = rose_state3_machine(sk, skb, frametype, ns, nr, q, d, m);
sk                283 net/rose/rose_in.c 		queued = rose_state4_machine(sk, skb, frametype);
sk                286 net/rose/rose_in.c 		queued = rose_state5_machine(sk, skb, frametype);
sk                290 net/rose/rose_in.c 	rose_kick(sk);
sk                 64 net/rose/rose_loopback.c 	struct sock *sk;
sk                 91 net/rose/rose_loopback.c 		sk = rose_find_socket(lci_o, rose_loopback_neigh);
sk                 92 net/rose/rose_loopback.c 		if (sk) {
sk                 93 net/rose/rose_loopback.c 			if (rose_process_rx_frame(sk, skb) == 0)
sk                120 net/rose/rose_loopback.c 		skb->sk = NULL;
sk                 30 net/rose/rose_out.c static void rose_send_iframe(struct sock *sk, struct sk_buff *skb)
sk                 32 net/rose/rose_out.c 	struct rose_sock *rose = rose_sk(sk);
sk                 40 net/rose/rose_out.c 	rose_start_idletimer(sk);
sk                 45 net/rose/rose_out.c void rose_kick(struct sock *sk)
sk                 47 net/rose/rose_out.c 	struct rose_sock *rose = rose_sk(sk);
sk                 57 net/rose/rose_out.c 	if (!skb_peek(&sk->sk_write_queue))
sk                 73 net/rose/rose_out.c 	skb  = skb_dequeue(&sk->sk_write_queue);
sk                 77 net/rose/rose_out.c 			skb_queue_head(&sk->sk_write_queue, skb);
sk                 81 net/rose/rose_out.c 		skb_set_owner_w(skbn, sk);
sk                 86 net/rose/rose_out.c 		rose_send_iframe(sk, skbn);
sk                 96 net/rose/rose_out.c 		 (skb = skb_dequeue(&sk->sk_write_queue)) != NULL);
sk                101 net/rose/rose_out.c 	rose_stop_timer(sk);
sk                109 net/rose/rose_out.c void rose_enquiry_response(struct sock *sk)
sk                111 net/rose/rose_out.c 	struct rose_sock *rose = rose_sk(sk);
sk                114 net/rose/rose_out.c 		rose_write_internal(sk, ROSE_RNR);
sk                116 net/rose/rose_out.c 		rose_write_internal(sk, ROSE_RR);
sk                121 net/rose/rose_out.c 	rose_stop_timer(sk);
sk                858 net/rose/rose_route.c 	struct sock *sk;
sk                916 net/rose/rose_route.c 	if ((sk = rose_find_socket(lci, rose_neigh)) != NULL) {
sk                918 net/rose/rose_route.c 			struct rose_sock *rose = rose_sk(sk);
sk                921 net/rose/rose_route.c 			rose_clear_queues(sk);
sk                928 net/rose/rose_route.c 			sk->sk_state	 = TCP_CLOSE;
sk                929 net/rose/rose_route.c 			sk->sk_err	 = 0;
sk                930 net/rose/rose_route.c 			sk->sk_shutdown	 |= SEND_SHUTDOWN;
sk                931 net/rose/rose_route.c 			if (!sock_flag(sk, SOCK_DEAD)) {
sk                932 net/rose/rose_route.c 				sk->sk_state_change(sk);
sk                933 net/rose/rose_route.c 				sock_set_flag(sk, SOCK_DEAD);
sk                938 net/rose/rose_route.c 			res = rose_process_rx_frame(sk, skb);
sk                 32 net/rose/rose_subr.c void rose_clear_queues(struct sock *sk)
sk                 34 net/rose/rose_subr.c 	skb_queue_purge(&sk->sk_write_queue);
sk                 35 net/rose/rose_subr.c 	skb_queue_purge(&rose_sk(sk)->ack_queue);
sk                 43 net/rose/rose_subr.c void rose_frames_acked(struct sock *sk, unsigned short nr)
sk                 46 net/rose/rose_subr.c 	struct rose_sock *rose = rose_sk(sk);
sk                 60 net/rose/rose_subr.c void rose_requeue_frames(struct sock *sk)
sk                 69 net/rose/rose_subr.c 	while ((skb = skb_dequeue(&rose_sk(sk)->ack_queue)) != NULL) {
sk                 71 net/rose/rose_subr.c 			skb_queue_head(&sk->sk_write_queue, skb);
sk                 73 net/rose/rose_subr.c 			skb_append(skb_prev, skb, &sk->sk_write_queue);
sk                 82 net/rose/rose_subr.c int rose_validate_nr(struct sock *sk, unsigned short nr)
sk                 84 net/rose/rose_subr.c 	struct rose_sock *rose = rose_sk(sk);
sk                 99 net/rose/rose_subr.c void rose_write_internal(struct sock *sk, int frametype)
sk                101 net/rose/rose_subr.c 	struct rose_sock *rose = rose_sk(sk);
sk                530 net/rose/rose_subr.c void rose_disconnect(struct sock *sk, int reason, int cause, int diagnostic)
sk                532 net/rose/rose_subr.c 	struct rose_sock *rose = rose_sk(sk);
sk                534 net/rose/rose_subr.c 	rose_stop_timer(sk);
sk                535 net/rose/rose_subr.c 	rose_stop_idletimer(sk);
sk                537 net/rose/rose_subr.c 	rose_clear_queues(sk);
sk                548 net/rose/rose_subr.c 	sk->sk_state     = TCP_CLOSE;
sk                549 net/rose/rose_subr.c 	sk->sk_err       = reason;
sk                550 net/rose/rose_subr.c 	sk->sk_shutdown |= SEND_SHUTDOWN;
sk                552 net/rose/rose_subr.c 	if (!sock_flag(sk, SOCK_DEAD)) {
sk                553 net/rose/rose_subr.c 		sk->sk_state_change(sk);
sk                554 net/rose/rose_subr.c 		sock_set_flag(sk, SOCK_DEAD);
sk                 32 net/rose/rose_timer.c void rose_start_heartbeat(struct sock *sk)
sk                 34 net/rose/rose_timer.c 	del_timer(&sk->sk_timer);
sk                 36 net/rose/rose_timer.c 	sk->sk_timer.function = rose_heartbeat_expiry;
sk                 37 net/rose/rose_timer.c 	sk->sk_timer.expires  = jiffies + 5 * HZ;
sk                 39 net/rose/rose_timer.c 	add_timer(&sk->sk_timer);
sk                 42 net/rose/rose_timer.c void rose_start_t1timer(struct sock *sk)
sk                 44 net/rose/rose_timer.c 	struct rose_sock *rose = rose_sk(sk);
sk                 54 net/rose/rose_timer.c void rose_start_t2timer(struct sock *sk)
sk                 56 net/rose/rose_timer.c 	struct rose_sock *rose = rose_sk(sk);
sk                 66 net/rose/rose_timer.c void rose_start_t3timer(struct sock *sk)
sk                 68 net/rose/rose_timer.c 	struct rose_sock *rose = rose_sk(sk);
sk                 78 net/rose/rose_timer.c void rose_start_hbtimer(struct sock *sk)
sk                 80 net/rose/rose_timer.c 	struct rose_sock *rose = rose_sk(sk);
sk                 90 net/rose/rose_timer.c void rose_start_idletimer(struct sock *sk)
sk                 92 net/rose/rose_timer.c 	struct rose_sock *rose = rose_sk(sk);
sk                104 net/rose/rose_timer.c void rose_stop_heartbeat(struct sock *sk)
sk                106 net/rose/rose_timer.c 	del_timer(&sk->sk_timer);
sk                109 net/rose/rose_timer.c void rose_stop_timer(struct sock *sk)
sk                111 net/rose/rose_timer.c 	del_timer(&rose_sk(sk)->timer);
sk                114 net/rose/rose_timer.c void rose_stop_idletimer(struct sock *sk)
sk                116 net/rose/rose_timer.c 	del_timer(&rose_sk(sk)->idletimer);
sk                121 net/rose/rose_timer.c 	struct sock *sk = from_timer(sk, t, sk_timer);
sk                122 net/rose/rose_timer.c 	struct rose_sock *rose = rose_sk(sk);
sk                124 net/rose/rose_timer.c 	bh_lock_sock(sk);
sk                129 net/rose/rose_timer.c 		if (sock_flag(sk, SOCK_DESTROY) ||
sk                130 net/rose/rose_timer.c 		    (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) {
sk                131 net/rose/rose_timer.c 			bh_unlock_sock(sk);
sk                132 net/rose/rose_timer.c 			rose_destroy_socket(sk);
sk                141 net/rose/rose_timer.c 		if (atomic_read(&sk->sk_rmem_alloc) < (sk->sk_rcvbuf / 2) &&
sk                146 net/rose/rose_timer.c 			rose_write_internal(sk, ROSE_RR);
sk                147 net/rose/rose_timer.c 			rose_stop_timer(sk);	/* HB */
sk                153 net/rose/rose_timer.c 	rose_start_heartbeat(sk);
sk                154 net/rose/rose_timer.c 	bh_unlock_sock(sk);
sk                160 net/rose/rose_timer.c 	struct sock *sk = &rose->sock;
sk                162 net/rose/rose_timer.c 	bh_lock_sock(sk);
sk                166 net/rose/rose_timer.c 		rose_write_internal(sk, ROSE_CLEAR_REQUEST);
sk                168 net/rose/rose_timer.c 		rose_start_t3timer(sk);
sk                173 net/rose/rose_timer.c 		rose_disconnect(sk, ETIMEDOUT, -1, -1);
sk                179 net/rose/rose_timer.c 			rose_enquiry_response(sk);
sk                183 net/rose/rose_timer.c 	bh_unlock_sock(sk);
sk                189 net/rose/rose_timer.c 	struct sock *sk = &rose->sock;
sk                191 net/rose/rose_timer.c 	bh_lock_sock(sk);
sk                192 net/rose/rose_timer.c 	rose_clear_queues(sk);
sk                194 net/rose/rose_timer.c 	rose_write_internal(sk, ROSE_CLEAR_REQUEST);
sk                195 net/rose/rose_timer.c 	rose_sk(sk)->state = ROSE_STATE_2;
sk                197 net/rose/rose_timer.c 	rose_start_t3timer(sk);
sk                199 net/rose/rose_timer.c 	sk->sk_state     = TCP_CLOSE;
sk                200 net/rose/rose_timer.c 	sk->sk_err       = 0;
sk                201 net/rose/rose_timer.c 	sk->sk_shutdown |= SEND_SHUTDOWN;
sk                203 net/rose/rose_timer.c 	if (!sock_flag(sk, SOCK_DEAD)) {
sk                204 net/rose/rose_timer.c 		sk->sk_state_change(sk);
sk                205 net/rose/rose_timer.c 		sock_set_flag(sk, SOCK_DEAD);
sk                207 net/rose/rose_timer.c 	bh_unlock_sock(sk);
sk                 51 net/rxrpc/af_rxrpc.c static inline int rxrpc_writable(struct sock *sk)
sk                 53 net/rxrpc/af_rxrpc.c 	return refcount_read(&sk->sk_wmem_alloc) < (size_t) sk->sk_sndbuf;
sk                 59 net/rxrpc/af_rxrpc.c static void rxrpc_write_space(struct sock *sk)
sk                 61 net/rxrpc/af_rxrpc.c 	_enter("%p", sk);
sk                 63 net/rxrpc/af_rxrpc.c 	if (rxrpc_writable(sk)) {
sk                 64 net/rxrpc/af_rxrpc.c 		struct socket_wq *wq = rcu_dereference(sk->sk_wq);
sk                 68 net/rxrpc/af_rxrpc.c 		sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
sk                133 net/rxrpc/af_rxrpc.c 	struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
sk                144 net/rxrpc/af_rxrpc.c 	lock_sock(&rx->sk);
sk                146 net/rxrpc/af_rxrpc.c 	switch (rx->sk.sk_state) {
sk                149 net/rxrpc/af_rxrpc.c 		local = rxrpc_lookup_local(sock_net(&rx->sk), &rx->srx);
sk                163 net/rxrpc/af_rxrpc.c 			rx->sk.sk_state = RXRPC_SERVER_BOUND;
sk                166 net/rxrpc/af_rxrpc.c 			rx->sk.sk_state = RXRPC_CLIENT_BOUND;
sk                182 net/rxrpc/af_rxrpc.c 		rx->sk.sk_state = RXRPC_SERVER_BOUND2;
sk                190 net/rxrpc/af_rxrpc.c 	release_sock(&rx->sk);
sk                200 net/rxrpc/af_rxrpc.c 	release_sock(&rx->sk);
sk                211 net/rxrpc/af_rxrpc.c 	struct sock *sk = sock->sk;
sk                212 net/rxrpc/af_rxrpc.c 	struct rxrpc_sock *rx = rxrpc_sk(sk);
sk                218 net/rxrpc/af_rxrpc.c 	lock_sock(&rx->sk);
sk                220 net/rxrpc/af_rxrpc.c 	switch (rx->sk.sk_state) {
sk                233 net/rxrpc/af_rxrpc.c 		old = sk->sk_max_ack_backlog;
sk                234 net/rxrpc/af_rxrpc.c 		sk->sk_max_ack_backlog = backlog;
sk                237 net/rxrpc/af_rxrpc.c 			rx->sk.sk_state = RXRPC_SERVER_LISTENING;
sk                239 net/rxrpc/af_rxrpc.c 			sk->sk_max_ack_backlog = old;
sk                243 net/rxrpc/af_rxrpc.c 			rx->sk.sk_state = RXRPC_SERVER_LISTEN_DISABLED;
sk                244 net/rxrpc/af_rxrpc.c 			sk->sk_max_ack_backlog = 0;
sk                255 net/rxrpc/af_rxrpc.c 	release_sock(&rx->sk);
sk                294 net/rxrpc/af_rxrpc.c 	struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
sk                303 net/rxrpc/af_rxrpc.c 	lock_sock(&rx->sk);
sk                338 net/rxrpc/af_rxrpc.c static void rxrpc_dummy_notify_rx(struct sock *sk, struct rxrpc_call *rxcall,
sk                356 net/rxrpc/af_rxrpc.c 	rxrpc_release_call(rxrpc_sk(sock->sk), call);
sk                412 net/rxrpc/af_rxrpc.c 	struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
sk                453 net/rxrpc/af_rxrpc.c 	struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
sk                464 net/rxrpc/af_rxrpc.c 	lock_sock(&rx->sk);
sk                470 net/rxrpc/af_rxrpc.c 	switch (rx->sk.sk_state) {
sk                472 net/rxrpc/af_rxrpc.c 		rx->sk.sk_state = RXRPC_CLIENT_UNBOUND;
sk                486 net/rxrpc/af_rxrpc.c 	release_sock(&rx->sk);
sk                502 net/rxrpc/af_rxrpc.c 	struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
sk                505 net/rxrpc/af_rxrpc.c 	_enter(",{%d},,%zu", rx->sk.sk_state, len);
sk                518 net/rxrpc/af_rxrpc.c 	lock_sock(&rx->sk);
sk                520 net/rxrpc/af_rxrpc.c 	switch (rx->sk.sk_state) {
sk                540 net/rxrpc/af_rxrpc.c 		local = rxrpc_lookup_local(sock_net(sock->sk), &rx->srx);
sk                547 net/rxrpc/af_rxrpc.c 		rx->sk.sk_state = RXRPC_CLIENT_BOUND;
sk                568 net/rxrpc/af_rxrpc.c 	release_sock(&rx->sk);
sk                580 net/rxrpc/af_rxrpc.c 	struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
sk                587 net/rxrpc/af_rxrpc.c 	lock_sock(&rx->sk);
sk                597 net/rxrpc/af_rxrpc.c 			if (rx->sk.sk_state != RXRPC_UNBOUND)
sk                607 net/rxrpc/af_rxrpc.c 			if (rx->sk.sk_state != RXRPC_UNBOUND)
sk                617 net/rxrpc/af_rxrpc.c 			if (rx->sk.sk_state != RXRPC_UNBOUND)
sk                627 net/rxrpc/af_rxrpc.c 			if (rx->sk.sk_state != RXRPC_UNBOUND)
sk                645 net/rxrpc/af_rxrpc.c 			if (rx->sk.sk_state != RXRPC_SERVER_BOUND2)
sk                669 net/rxrpc/af_rxrpc.c 	release_sock(&rx->sk);
sk                707 net/rxrpc/af_rxrpc.c 	struct sock *sk = sock->sk;
sk                708 net/rxrpc/af_rxrpc.c 	struct rxrpc_sock *rx = rxrpc_sk(sk);
sk                722 net/rxrpc/af_rxrpc.c 	if (rxrpc_writable(sk))
sk                736 net/rxrpc/af_rxrpc.c 	struct sock *sk;
sk                751 net/rxrpc/af_rxrpc.c 	sk = sk_alloc(net, PF_RXRPC, GFP_KERNEL, &rxrpc_proto, kern);
sk                752 net/rxrpc/af_rxrpc.c 	if (!sk)
sk                755 net/rxrpc/af_rxrpc.c 	sock_init_data(sock, sk);
sk                756 net/rxrpc/af_rxrpc.c 	sock_set_flag(sk, SOCK_RCU_FREE);
sk                757 net/rxrpc/af_rxrpc.c 	sk->sk_state		= RXRPC_UNBOUND;
sk                758 net/rxrpc/af_rxrpc.c 	sk->sk_write_space	= rxrpc_write_space;
sk                759 net/rxrpc/af_rxrpc.c 	sk->sk_max_ack_backlog	= 0;
sk                760 net/rxrpc/af_rxrpc.c 	sk->sk_destruct		= rxrpc_sock_destructor;
sk                762 net/rxrpc/af_rxrpc.c 	rx = rxrpc_sk(sk);
sk                774 net/rxrpc/af_rxrpc.c 	rxnet = rxrpc_net(sock_net(&rx->sk));
sk                786 net/rxrpc/af_rxrpc.c 	struct sock *sk = sock->sk;
sk                787 net/rxrpc/af_rxrpc.c 	struct rxrpc_sock *rx = rxrpc_sk(sk);
sk                790 net/rxrpc/af_rxrpc.c 	_enter("%p,%d", sk, flags);
sk                794 net/rxrpc/af_rxrpc.c 	if (sk->sk_state == RXRPC_CLOSE)
sk                797 net/rxrpc/af_rxrpc.c 	lock_sock(sk);
sk                799 net/rxrpc/af_rxrpc.c 	spin_lock_bh(&sk->sk_receive_queue.lock);
sk                800 net/rxrpc/af_rxrpc.c 	if (sk->sk_state < RXRPC_CLOSE) {
sk                801 net/rxrpc/af_rxrpc.c 		sk->sk_state = RXRPC_CLOSE;
sk                802 net/rxrpc/af_rxrpc.c 		sk->sk_shutdown = SHUTDOWN_MASK;
sk                806 net/rxrpc/af_rxrpc.c 	spin_unlock_bh(&sk->sk_receive_queue.lock);
sk                810 net/rxrpc/af_rxrpc.c 	release_sock(sk);
sk                817 net/rxrpc/af_rxrpc.c static void rxrpc_sock_destructor(struct sock *sk)
sk                819 net/rxrpc/af_rxrpc.c 	_enter("%p", sk);
sk                821 net/rxrpc/af_rxrpc.c 	rxrpc_purge_queue(&sk->sk_receive_queue);
sk                823 net/rxrpc/af_rxrpc.c 	WARN_ON(refcount_read(&sk->sk_wmem_alloc));
sk                824 net/rxrpc/af_rxrpc.c 	WARN_ON(!sk_unhashed(sk));
sk                825 net/rxrpc/af_rxrpc.c 	WARN_ON(sk->sk_socket);
sk                827 net/rxrpc/af_rxrpc.c 	if (!sock_flag(sk, SOCK_DEAD)) {
sk                828 net/rxrpc/af_rxrpc.c 		printk("Attempt to release alive rxrpc socket: %p\n", sk);
sk                836 net/rxrpc/af_rxrpc.c static int rxrpc_release_sock(struct sock *sk)
sk                838 net/rxrpc/af_rxrpc.c 	struct rxrpc_sock *rx = rxrpc_sk(sk);
sk                840 net/rxrpc/af_rxrpc.c 	_enter("%p{%d,%d}", sk, sk->sk_state, refcount_read(&sk->sk_refcnt));
sk                843 net/rxrpc/af_rxrpc.c 	sock_orphan(sk);
sk                844 net/rxrpc/af_rxrpc.c 	sk->sk_shutdown = SHUTDOWN_MASK;
sk                850 net/rxrpc/af_rxrpc.c 	switch (sk->sk_state) {
sk                859 net/rxrpc/af_rxrpc.c 	spin_lock_bh(&sk->sk_receive_queue.lock);
sk                860 net/rxrpc/af_rxrpc.c 	sk->sk_state = RXRPC_CLOSE;
sk                861 net/rxrpc/af_rxrpc.c 	spin_unlock_bh(&sk->sk_receive_queue.lock);
sk                873 net/rxrpc/af_rxrpc.c 	rxrpc_purge_queue(&sk->sk_receive_queue);
sk                882 net/rxrpc/af_rxrpc.c 	sock_put(sk);
sk                893 net/rxrpc/af_rxrpc.c 	struct sock *sk = sock->sk;
sk                895 net/rxrpc/af_rxrpc.c 	_enter("%p{%p}", sock, sk);
sk                897 net/rxrpc/af_rxrpc.c 	if (!sk)
sk                900 net/rxrpc/af_rxrpc.c 	sock->sk = NULL;
sk                902 net/rxrpc/af_rxrpc.c 	return rxrpc_release_sock(sk);
sk                132 net/rxrpc/ar-internal.h 	struct sock		sk;
sk                162 net/rxrpc/ar-internal.h #define rxrpc_sk(__sk) container_of((__sk), struct rxrpc_sock, sk)
sk                 38 net/rxrpc/call_accept.c 	struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
sk                 43 net/rxrpc/call_accept.c 	max = rx->sk.sk_max_ack_backlog;
sk                 44 net/rxrpc/call_accept.c 	tmp = rx->sk.sk_ack_backlog;
sk                188 net/rxrpc/call_accept.c 	struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
sk                363 net/rxrpc/call_accept.c 	if (rx->sk.sk_state == RXRPC_SERVER_LISTEN_DISABLED ||
sk                364 net/rxrpc/call_accept.c 	    rx->sk.sk_state == RXRPC_CLOSE) {
sk                397 net/rxrpc/call_accept.c 		rx->notify_new_call(&rx->sk, call, call->user_call_ID);
sk                399 net/rxrpc/call_accept.c 		sk_acceptq_added(&rx->sk);
sk                463 net/rxrpc/call_accept.c 	__releases(&rx->sk.sk_lock.slock)
sk                478 net/rxrpc/call_accept.c 		release_sock(&rx->sk);
sk                511 net/rxrpc/call_accept.c 		release_sock(&rx->sk);
sk                518 net/rxrpc/call_accept.c 	sk_acceptq_removed(&rx->sk);
sk                561 net/rxrpc/call_accept.c 	release_sock(&rx->sk);
sk                578 net/rxrpc/call_accept.c 	release_sock(&rx->sk);
sk                610 net/rxrpc/call_accept.c 	sk_acceptq_removed(&rx->sk);
sk                660 net/rxrpc/call_accept.c 	struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
sk                663 net/rxrpc/call_accept.c 	if (sock->sk->sk_state == RXRPC_CLOSE)
sk                102 net/rxrpc/call_object.c 	struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
sk                123 net/rxrpc/call_object.c 	if (rx->sk.sk_kern_sock)
sk                222 net/rxrpc/call_object.c 	__releases(&rx->sk.sk_lock.slock)
sk                235 net/rxrpc/call_object.c 		release_sock(&rx->sk);
sk                284 net/rxrpc/call_object.c 	release_sock(&rx->sk);
sk                310 net/rxrpc/call_object.c 	release_sock(&rx->sk);
sk                913 net/rxrpc/key.c 	key = request_key_net(&key_type_rxrpc, description, sock_net(&rx->sk), NULL);
sk                944 net/rxrpc/key.c 	key = request_key_net(&key_type_keyring, description, sock_net(&rx->sk), NULL);
sk                124 net/rxrpc/local_object.c 	usk = local->socket->sk;
sk                210 net/rxrpc/local_object.c 	local->socket->sk->sk_user_data = NULL;
sk                430 net/rxrpc/local_object.c 		socket->sk->sk_user_data = NULL;
sk                146 net/rxrpc/peer_event.c void rxrpc_error_report(struct sock *sk)
sk                155 net/rxrpc/peer_event.c 	local = rcu_dereference_sk_user_data(sk);
sk                160 net/rxrpc/peer_event.c 	_enter("%p{%d}", sk, local->debug_id);
sk                165 net/rxrpc/peer_event.c 	sock_error(sk);
sk                167 net/rxrpc/peer_event.c 	skb = sock_dequeue_err_skb(sk);
sk                155 net/rxrpc/peer_object.c 	struct net *net = sock_net(&rx->sk);
sk                 26 net/rxrpc/recvmsg.c 	struct sock *sk;
sk                 36 net/rxrpc/recvmsg.c 	sk = &rx->sk;
sk                 37 net/rxrpc/recvmsg.c 	if (rx && sk->sk_state < RXRPC_CLOSE) {
sk                 40 net/rxrpc/recvmsg.c 			call->notify_rx(sk, call, call->user_call_ID);
sk                 50 net/rxrpc/recvmsg.c 			if (!sock_flag(sk, SOCK_DEAD)) {
sk                 51 net/rxrpc/recvmsg.c 				_debug("call %ps", sk->sk_data_ready);
sk                 52 net/rxrpc/recvmsg.c 				sk->sk_data_ready(sk);
sk                361 net/rxrpc/recvmsg.c 			sock_recv_timestamp(msg, sock->sk, skb);
sk                444 net/rxrpc/recvmsg.c 	struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
sk                457 net/rxrpc/recvmsg.c 	timeo = sock_rcvtimeo(&rx->sk, flags & MSG_DONTWAIT);
sk                460 net/rxrpc/recvmsg.c 	lock_sock(&rx->sk);
sk                465 net/rxrpc/recvmsg.c 	    rx->sk.sk_state != RXRPC_SERVER_LISTENING) {
sk                466 net/rxrpc/recvmsg.c 		release_sock(&rx->sk);
sk                477 net/rxrpc/recvmsg.c 		release_sock(&rx->sk);
sk                480 net/rxrpc/recvmsg.c 		prepare_to_wait_exclusive(sk_sleep(&rx->sk), &wait,
sk                482 net/rxrpc/recvmsg.c 		ret = sock_error(&rx->sk);
sk                493 net/rxrpc/recvmsg.c 		finish_wait(sk_sleep(&rx->sk), &wait);
sk                523 net/rxrpc/recvmsg.c 	release_sock(&rx->sk);
sk                609 net/rxrpc/recvmsg.c 	release_sock(&rx->sk);
sk                617 net/rxrpc/recvmsg.c 	finish_wait(sk_sleep(&rx->sk), &wait);
sk                186 net/rxrpc/sendmsg.c 		notify_end_tx(&rx->sk, call, call->user_call_ID);
sk                299 net/rxrpc/sendmsg.c 	struct sock *sk = &rx->sk;
sk                304 net/rxrpc/sendmsg.c 	timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
sk                307 net/rxrpc/sendmsg.c 	sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
sk                309 net/rxrpc/sendmsg.c 	if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
sk                364 net/rxrpc/sendmsg.c 				sk, size, msg->msg_flags & MSG_DONTWAIT, &ret);
sk                597 net/rxrpc/sendmsg.c 	__releases(&rx->sk.sk_lock.slock)
sk                609 net/rxrpc/sendmsg.c 		release_sock(&rx->sk);
sk                639 net/rxrpc/sendmsg.c 	__releases(&rx->sk.sk_lock.slock)
sk                666 net/rxrpc/sendmsg.c 		if (rx->sk.sk_state != RXRPC_SERVER_LISTENING)
sk                701 net/rxrpc/sendmsg.c 		release_sock(&rx->sk);
sk                776 net/rxrpc/sendmsg.c 	release_sock(&rx->sk);
sk                813 net/rxrpc/sendmsg.c 		ret = rxrpc_send_data(rxrpc_sk(sock->sk), call, msg, len,
sk               1385 net/sched/act_api.c 	struct net *net = sock_net(skb->sk);
sk               1457 net/sched/act_api.c 	struct net *net = sock_net(skb->sk);
sk               1922 net/sched/cls_api.c 	struct net *net = sock_net(skb->sk);
sk               2149 net/sched/cls_api.c 	struct net *net = sock_net(skb->sk);
sk               2309 net/sched/cls_api.c 	struct net *net = sock_net(skb->sk);
sk               2440 net/sched/cls_api.c 	struct net *net = sock_net(a->skb->sk);
sk               2452 net/sched/cls_api.c 	struct net *net = sock_net(skb->sk);
sk               2512 net/sched/cls_api.c 	struct net *net = sock_net(skb->sk);
sk               2762 net/sched/cls_api.c 	struct net *net = sock_net(skb->sk);
sk               2896 net/sched/cls_api.c 	struct net *net = sock_net(skb->sk);
sk                 73 net/sched/cls_flow.c 	return addr_fold(skb->sk);
sk                 98 net/sched/cls_flow.c 	return addr_fold(skb->sk);
sk                204 net/sched/cls_flow.c 	struct sock *sk = skb_to_full_sk(skb);
sk                206 net/sched/cls_flow.c 	if (sk && sk->sk_socket && sk->sk_socket->file) {
sk                207 net/sched/cls_flow.c 		kuid_t skuid = sk->sk_socket->file->f_cred->fsuid;
sk                216 net/sched/cls_flow.c 	struct sock *sk = skb_to_full_sk(skb);
sk                218 net/sched/cls_flow.c 	if (sk && sk->sk_socket && sk->sk_socket->file) {
sk                219 net/sched/cls_flow.c 		kgid_t skgid = sk->sk_socket->file->f_cred->fsgid;
sk                429 net/sched/cls_flow.c 		    sk_user_ns(NETLINK_CB(in_skb).sk) != &init_user_ns)
sk                273 net/sched/em_meta.c 	(unlikely(skb->sk == NULL))
sk                281 net/sched/em_meta.c 	dst->value = skb->sk->sk_family;
sk                290 net/sched/em_meta.c 	dst->value = skb->sk->sk_state;
sk                299 net/sched/em_meta.c 	dst->value = skb->sk->sk_reuse;
sk                309 net/sched/em_meta.c 	dst->value = skb->sk->sk_bound_dev_if;
sk                319 net/sched/em_meta.c 	if (skb->sk->sk_bound_dev_if == 0) {
sk                326 net/sched/em_meta.c 		dev = dev_get_by_index_rcu(sock_net(skb->sk),
sk                327 net/sched/em_meta.c 					   skb->sk->sk_bound_dev_if);
sk                339 net/sched/em_meta.c 	dst->value = refcount_read(&skb->sk->sk_refcnt);
sk                344 net/sched/em_meta.c 	const struct sock *sk = skb_to_full_sk(skb);
sk                346 net/sched/em_meta.c 	if (!sk) {
sk                350 net/sched/em_meta.c 	dst->value = sk->sk_rcvbuf;
sk                355 net/sched/em_meta.c 	const struct sock *sk = skb_to_full_sk(skb);
sk                357 net/sched/em_meta.c 	if (!sk) {
sk                361 net/sched/em_meta.c 	dst->value = sk->sk_shutdown;
sk                366 net/sched/em_meta.c 	const struct sock *sk = skb_to_full_sk(skb);
sk                368 net/sched/em_meta.c 	if (!sk) {
sk                372 net/sched/em_meta.c 	dst->value = sk->sk_protocol;
sk                377 net/sched/em_meta.c 	const struct sock *sk = skb_to_full_sk(skb);
sk                379 net/sched/em_meta.c 	if (!sk) {
sk                383 net/sched/em_meta.c 	dst->value = sk->sk_type;
sk                388 net/sched/em_meta.c 	const struct sock *sk = skb_to_full_sk(skb);
sk                390 net/sched/em_meta.c 	if (!sk) {
sk                394 net/sched/em_meta.c 	dst->value = sk_rmem_alloc_get(sk);
sk                399 net/sched/em_meta.c 	const struct sock *sk = skb_to_full_sk(skb);
sk                401 net/sched/em_meta.c 	if (!sk) {
sk                405 net/sched/em_meta.c 	dst->value = sk_wmem_alloc_get(sk);
sk                410 net/sched/em_meta.c 	const struct sock *sk = skb_to_full_sk(skb);
sk                412 net/sched/em_meta.c 	if (!sk) {
sk                416 net/sched/em_meta.c 	dst->value = atomic_read(&sk->sk_omem_alloc);
sk                421 net/sched/em_meta.c 	const struct sock *sk = skb_to_full_sk(skb);
sk                423 net/sched/em_meta.c 	if (!sk) {
sk                427 net/sched/em_meta.c 	dst->value = sk->sk_receive_queue.qlen;
sk                432 net/sched/em_meta.c 	const struct sock *sk = skb_to_full_sk(skb);
sk                434 net/sched/em_meta.c 	if (!sk) {
sk                438 net/sched/em_meta.c 	dst->value = sk->sk_write_queue.qlen;
sk                443 net/sched/em_meta.c 	const struct sock *sk = skb_to_full_sk(skb);
sk                445 net/sched/em_meta.c 	if (!sk) {
sk                449 net/sched/em_meta.c 	dst->value = READ_ONCE(sk->sk_wmem_queued);
sk                454 net/sched/em_meta.c 	const struct sock *sk = skb_to_full_sk(skb);
sk                456 net/sched/em_meta.c 	if (!sk) {
sk                460 net/sched/em_meta.c 	dst->value = sk->sk_forward_alloc;
sk                465 net/sched/em_meta.c 	const struct sock *sk = skb_to_full_sk(skb);
sk                467 net/sched/em_meta.c 	if (!sk) {
sk                471 net/sched/em_meta.c 	dst->value = sk->sk_sndbuf;
sk                476 net/sched/em_meta.c 	const struct sock *sk = skb_to_full_sk(skb);
sk                478 net/sched/em_meta.c 	if (!sk) {
sk                482 net/sched/em_meta.c 	dst->value = (__force int) sk->sk_allocation;
sk                491 net/sched/em_meta.c 	dst->value = skb->sk->sk_hash;
sk                496 net/sched/em_meta.c 	const struct sock *sk = skb_to_full_sk(skb);
sk                498 net/sched/em_meta.c 	if (!sk) {
sk                502 net/sched/em_meta.c 	dst->value = sk->sk_lingertime / HZ;
sk                507 net/sched/em_meta.c 	const struct sock *sk = skb_to_full_sk(skb);
sk                509 net/sched/em_meta.c 	if (!sk) {
sk                513 net/sched/em_meta.c 	dst->value = sk->sk_error_queue.qlen;
sk                518 net/sched/em_meta.c 	const struct sock *sk = skb_to_full_sk(skb);
sk                520 net/sched/em_meta.c 	if (!sk) {
sk                524 net/sched/em_meta.c 	dst->value = sk->sk_ack_backlog;
sk                529 net/sched/em_meta.c 	const struct sock *sk = skb_to_full_sk(skb);
sk                531 net/sched/em_meta.c 	if (!sk) {
sk                535 net/sched/em_meta.c 	dst->value = sk->sk_max_ack_backlog;
sk                540 net/sched/em_meta.c 	const struct sock *sk = skb_to_full_sk(skb);
sk                542 net/sched/em_meta.c 	if (!sk) {
sk                546 net/sched/em_meta.c 	dst->value = sk->sk_priority;
sk                551 net/sched/em_meta.c 	const struct sock *sk = skb_to_full_sk(skb);
sk                553 net/sched/em_meta.c 	if (!sk) {
sk                557 net/sched/em_meta.c 	dst->value = READ_ONCE(sk->sk_rcvlowat);
sk                562 net/sched/em_meta.c 	const struct sock *sk = skb_to_full_sk(skb);
sk                564 net/sched/em_meta.c 	if (!sk) {
sk                568 net/sched/em_meta.c 	dst->value = sk->sk_rcvtimeo / HZ;
sk                573 net/sched/em_meta.c 	const struct sock *sk = skb_to_full_sk(skb);
sk                575 net/sched/em_meta.c 	if (!sk) {
sk                579 net/sched/em_meta.c 	dst->value = sk->sk_sndtimeo / HZ;
sk                584 net/sched/em_meta.c 	const struct sock *sk = skb_to_full_sk(skb);
sk                586 net/sched/em_meta.c 	if (!sk) {
sk                590 net/sched/em_meta.c 	dst->value = sk->sk_frag.offset;
sk                595 net/sched/em_meta.c 	const struct sock *sk = skb_to_full_sk(skb);
sk                597 net/sched/em_meta.c 	if (!sk) {
sk                601 net/sched/em_meta.c 	dst->value = sk->sk_write_pending;
sk               1410 net/sched/sch_api.c 	struct net *net = sock_net(skb->sk);
sk               1495 net/sched/sch_api.c 	struct net *net = sock_net(skb->sk);
sk               1729 net/sched/sch_api.c 	struct net *net = sock_net(skb->sk);
sk               1978 net/sched/sch_api.c 	struct net *net = sock_net(skb->sk);
sk               2191 net/sched/sch_api.c 	struct net *net = sock_net(skb->sk);
sk                 79 net/sched/sch_etf.c 	struct sock *sk = nskb->sk;
sk                 85 net/sched/sch_etf.c 	if (!sk || !sk_fullsock(sk))
sk                 88 net/sched/sch_etf.c 	if (!sock_flag(sk, SOCK_TXTIME))
sk                 94 net/sched/sch_etf.c 	if (sk->sk_clockid != q->clockid)
sk                 97 net/sched/sch_etf.c 	if (sk->sk_txtime_deadline_mode != q->deadline_mode)
sk                140 net/sched/sch_etf.c 	struct sock *sk = skb->sk;
sk                142 net/sched/sch_etf.c 	if (!sk || !sk_fullsock(sk) || !(sk->sk_txtime_report_errors))
sk                158 net/sched/sch_etf.c 	if (sock_queue_err_skb(sk, clone))
sk                 76 net/sched/sch_fq.c 	struct sock	*sk;
sk                203 net/sched/sch_fq.c 		  struct sock *sk)
sk                215 net/sched/sch_fq.c 		if (f->sk == sk)
sk                224 net/sched/sch_fq.c 		if (f->sk > sk)
sk                244 net/sched/sch_fq.c 	struct sock *sk = skb->sk;
sk                261 net/sched/sch_fq.c 	if (!sk || sk_listener(sk)) {
sk                267 net/sched/sch_fq.c 		sk = (struct sock *)((hash << 1) | 1UL);
sk                269 net/sched/sch_fq.c 	} else if (sk->sk_state == TCP_CLOSE) {
sk                279 net/sched/sch_fq.c 		sk = (struct sock *)((hash << 1) | 1UL);
sk                282 net/sched/sch_fq.c 	root = &q->fq_root[hash_ptr(sk, q->fq_trees_log)];
sk                286 net/sched/sch_fq.c 		fq_gc(q, root, sk);
sk                294 net/sched/sch_fq.c 		if (f->sk == sk) {
sk                300 net/sched/sch_fq.c 			if (unlikely(skb->sk == sk &&
sk                301 net/sched/sch_fq.c 				     f->socket_hash != sk->sk_hash)) {
sk                303 net/sched/sch_fq.c 				f->socket_hash = sk->sk_hash;
sk                305 net/sched/sch_fq.c 					smp_store_release(&sk->sk_pacing_status,
sk                313 net/sched/sch_fq.c 		if (f->sk > sk)
sk                327 net/sched/sch_fq.c 	f->sk = sk;
sk                328 net/sched/sch_fq.c 	if (skb->sk == sk) {
sk                329 net/sched/sch_fq.c 		f->socket_hash = sk->sk_hash;
sk                331 net/sched/sch_fq.c 			smp_store_release(&sk->sk_pacing_status,
sk                565 net/sched/sch_fq.c 		if (skb->sk)
sk                566 net/sched/sch_fq.c 			rate = min(skb->sk->sk_pacing_rate, rate);
sk                673 net/sched/sch_fq.c 			nroot = &new_array[hash_ptr(of->sk, new_log)];
sk                681 net/sched/sch_fq.c 				BUG_ON(nf->sk == of->sk);
sk                683 net/sched/sch_fq.c 				if (nf->sk > of->sk)
sk                426 net/sched/sch_taprio.c 	if (skb->sk && sock_flag(skb->sk, SOCK_TXTIME)) {
sk                 54 net/sctp/associola.c 					const struct sock *sk,
sk                 62 net/sctp/associola.c 	sp = sctp_sk((struct sock *)sk);
sk                 66 net/sctp/associola.c 	asoc->base.sk = (struct sock *)sk;
sk                 67 net/sctp/associola.c 	asoc->base.net = sock_net(sk);
sk                 70 net/sctp/associola.c 	sock_hold(asoc->base.sk);
sk                157 net/sctp/associola.c 	if ((sk->sk_rcvbuf/2) < SCTP_DEFAULT_MINWINDOW)
sk                160 net/sctp/associola.c 		asoc->rwnd = sk->sk_rcvbuf/2;
sk                239 net/sctp/associola.c 	if (asoc->base.sk->sk_family == PF_INET6)
sk                277 net/sctp/associola.c 	sock_put(asoc->base.sk);
sk                284 net/sctp/associola.c 					      const struct sock *sk,
sk                293 net/sctp/associola.c 	if (!sctp_association_init(asoc, ep, sk, scope, gfp))
sk                313 net/sctp/associola.c 	struct sock *sk = asoc->base.sk;
sk                327 net/sctp/associola.c 		if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
sk                328 net/sctp/associola.c 			sk->sk_ack_backlog--;
sk                405 net/sctp/associola.c 	sock_put(asoc->base.sk);
sk                582 net/sctp/associola.c 	struct net *net = sock_net(asoc->base.sk);
sk                587 net/sctp/associola.c 	sp = sctp_sk(asoc->base.sk);
sk                981 net/sctp/associola.c 	struct net *net = sock_net(asoc->base.sk);
sk               1072 net/sctp/associola.c 	struct sock *oldsk = assoc->base.sk;
sk               1085 net/sctp/associola.c 	sock_put(assoc->base.sk);
sk               1092 net/sctp/associola.c 	assoc->base.sk = newsk;
sk               1093 net/sctp/associola.c 	sock_hold(assoc->base.sk);
sk               1398 net/sctp/associola.c 	int frag = sctp_mtu_payload(sctp_sk(asoc->base.sk), asoc->pathmtu,
sk               1449 net/sctp/associola.c 	struct net *net = sock_net(asoc->base.sk);
sk               1457 net/sctp/associola.c 			   (asoc->base.sk->sk_rcvbuf >> net->sctp.rwnd_upd_shift),
sk               1540 net/sctp/associola.c 		rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc);
sk               1547 net/sctp/associola.c 	if (rx_count >= asoc->base.sk->sk_rcvbuf)
sk               1577 net/sctp/associola.c 	flags = (PF_INET6 == asoc->base.sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0;
sk               1583 net/sctp/associola.c 	return sctp_bind_addr_copy(sock_net(asoc->base.sk),
sk               1610 net/sctp/associola.c 				 sctp_sk(asoc->base.sk)))
sk                476 net/sctp/bind_addr.c int sctp_is_any(struct sock *sk, const union sctp_addr *addr)
sk                484 net/sctp/bind_addr.c 	else if (sk)
sk                485 net/sctp/bind_addr.c 		fam = sk->sk_family;
sk                536 net/sctp/bind_addr.c int sctp_is_ep_boundall(struct sock *sk)
sk                541 net/sctp/bind_addr.c 	bp = &sctp_sk(sk)->ep->base.bind_addr;
sk                545 net/sctp/bind_addr.c 		if (sctp_is_any(sk, &addr->a))
sk                178 net/sctp/chunk.c 		max_data = sctp_min_frag_point(sctp_sk(asoc->base.sk),
sk                230 net/sctp/chunk.c 			SCTP_INC_STATS(sock_net(asoc->base.sk),
sk                 22 net/sctp/diag.c static void sctp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
sk                 27 net/sctp/diag.c 					struct sock *sk,
sk                 39 net/sctp/diag.c 	r->idiag_family = sk->sk_family;
sk                 43 net/sctp/diag.c 	sock_diag_save_cookie(sk, r->id.idiag_cookie);
sk                 46 net/sctp/diag.c 	if (sk->sk_family == AF_INET6) {
sk                123 net/sctp/diag.c static int inet_sctp_diag_fill(struct sock *sk, struct sctp_association *asoc,
sk                131 net/sctp/diag.c 	struct sctp_endpoint *ep = sctp_sk(sk)->ep;
sk                145 net/sctp/diag.c 	BUG_ON(!sk_fullsock(sk));
sk                148 net/sctp/diag.c 		inet_diag_msg_sctpasoc_fill(r, sk, asoc);
sk                150 net/sctp/diag.c 		inet_diag_msg_common_fill(r, sk);
sk                151 net/sctp/diag.c 		r->idiag_state = sk->sk_state;
sk                156 net/sctp/diag.c 	if (inet_diag_msg_attrs_fill(sk, skb, r, ext, user_ns, net_admin))
sk                166 net/sctp/diag.c 			amt = sk_wmem_alloc_get(sk);
sk                171 net/sctp/diag.c 			amt = sk_rmem_alloc_get(sk);
sk                173 net/sctp/diag.c 		mem[SK_MEMINFO_RCVBUF] = sk->sk_rcvbuf;
sk                174 net/sctp/diag.c 		mem[SK_MEMINFO_SNDBUF] = sk->sk_sndbuf;
sk                175 net/sctp/diag.c 		mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc;
sk                176 net/sctp/diag.c 		mem[SK_MEMINFO_WMEM_QUEUED] = sk->sk_wmem_queued;
sk                177 net/sctp/diag.c 		mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc);
sk                178 net/sctp/diag.c 		mem[SK_MEMINFO_BACKLOG] = READ_ONCE(sk->sk_backlog.len);
sk                179 net/sctp/diag.c 		mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops);
sk                198 net/sctp/diag.c 	sctp_diag_get_info(sk, r, &infox);
sk                251 net/sctp/diag.c 	struct sock *sk = tsp->asoc->base.sk;
sk                256 net/sctp/diag.c 	struct net *net = sock_net(in_skb->sk);
sk                260 net/sctp/diag.c 	err = sock_diag_check_cookie(sk, req->id.idiag_cookie);
sk                269 net/sctp/diag.c 	lock_sock(sk);
sk                270 net/sctp/diag.c 	if (sk != assoc->base.sk) {
sk                271 net/sctp/diag.c 		release_sock(sk);
sk                272 net/sctp/diag.c 		sk = assoc->base.sk;
sk                273 net/sctp/diag.c 		lock_sock(sk);
sk                275 net/sctp/diag.c 	err = inet_sctp_diag_fill(sk, assoc, rep, req,
sk                276 net/sctp/diag.c 				  sk_user_ns(NETLINK_CB(in_skb).sk),
sk                280 net/sctp/diag.c 	release_sock(sk);
sk                299 net/sctp/diag.c 	struct sock *sk = ep->base.sk;
sk                306 net/sctp/diag.c 	lock_sock(sk);
sk                319 net/sctp/diag.c 		    inet_sctp_diag_fill(sk, NULL, skb, r,
sk                320 net/sctp/diag.c 					sk_user_ns(NETLINK_CB(cb->skb).sk),
sk                330 net/sctp/diag.c 		if (inet_sctp_diag_fill(sk, assoc, skb, r,
sk                331 net/sctp/diag.c 					sk_user_ns(NETLINK_CB(cb->skb).sk),
sk                345 net/sctp/diag.c 	release_sock(sk);
sk                353 net/sctp/diag.c 	struct sock *sk = ep->base.sk;
sk                362 net/sctp/diag.c 	if (r->sdiag_family != AF_UNSPEC && sk->sk_family != r->sdiag_family)
sk                371 net/sctp/diag.c 	struct sock *sk = ep->base.sk;
sk                375 net/sctp/diag.c 	struct net *net = sock_net(skb->sk);
sk                376 net/sctp/diag.c 	struct inet_sock *inet = inet_sk(sk);
sk                379 net/sctp/diag.c 	if (!net_eq(sock_net(sk), net))
sk                389 net/sctp/diag.c 	    sk->sk_family != r->sdiag_family)
sk                400 net/sctp/diag.c 	if (inet_sctp_diag_fill(sk, NULL, skb, r,
sk                401 net/sctp/diag.c 				sk_user_ns(NETLINK_CB(cb->skb).sk),
sk                415 net/sctp/diag.c static void sctp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
sk                424 net/sctp/diag.c 		r->idiag_rqueue = sk->sk_ack_backlog;
sk                425 net/sctp/diag.c 		r->idiag_wqueue = sk->sk_max_ack_backlog;
sk                428 net/sctp/diag.c 		sctp_get_sctp_info(sk, infox->asoc, infox->sctpinfo);
sk                435 net/sctp/diag.c 	struct net *net = sock_net(in_skb->sk);
sk                472 net/sctp/diag.c 	struct net *net = sock_net(skb->sk);
sk                 42 net/sctp/endpointola.c 						struct sock *sk,
sk                 45 net/sctp/endpointola.c 	struct net *net = sock_net(sk);
sk                 86 net/sctp/endpointola.c 	sk->sk_data_ready = sctp_data_ready;
sk                 87 net/sctp/endpointola.c 	sk->sk_write_space = sctp_write_space;
sk                 88 net/sctp/endpointola.c 	sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
sk                112 net/sctp/endpointola.c 	ep->base.sk = sk;
sk                113 net/sctp/endpointola.c 	ep->base.net = sock_net(sk);
sk                114 net/sctp/endpointola.c 	sock_hold(ep->base.sk);
sk                129 net/sctp/endpointola.c struct sctp_endpoint *sctp_endpoint_new(struct sock *sk, gfp_t gfp)
sk                138 net/sctp/endpointola.c 	if (!sctp_endpoint_init(ep, sk, gfp))
sk                154 net/sctp/endpointola.c 	struct sock *sk = ep->base.sk;
sk                167 net/sctp/endpointola.c 	if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
sk                168 net/sctp/endpointola.c 		sk->sk_ack_backlog++;
sk                178 net/sctp/endpointola.c 	inet_sk_set_state(ep->base.sk, SCTP_SS_CLOSED);
sk                189 net/sctp/endpointola.c 	struct sock *sk;
sk                211 net/sctp/endpointola.c 	sk = ep->base.sk;
sk                213 net/sctp/endpointola.c 	if (sctp_sk(sk)->bind_hash)
sk                214 net/sctp/endpointola.c 		sctp_put_port(sk);
sk                216 net/sctp/endpointola.c 	sctp_sk(sk)->ep = NULL;
sk                218 net/sctp/endpointola.c 	sock_put(sk);
sk                247 net/sctp/endpointola.c 	    net_eq(sock_net(ep->base.sk), net)) {
sk                249 net/sctp/endpointola.c 					 sctp_sk(ep->base.sk)))
sk                296 net/sctp/endpointola.c 	struct net *net = sock_net(ep->base.sk);
sk                319 net/sctp/endpointola.c 	struct sock *sk;
sk                334 net/sctp/endpointola.c 	sk = ep->base.sk;
sk                335 net/sctp/endpointola.c 	net = sock_net(sk);
sk                387 net/sctp/endpointola.c 			SCTP_INC_STATS(sock_net(ep->base.sk), SCTP_MIB_INCTRLCHUNKS);
sk                404 net/sctp/endpointola.c 		if (!sctp_sk(sk)->ep)
sk                 64 net/sctp/input.c static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb);
sk                 87 net/sctp/input.c 	struct sock *sk;
sk                166 net/sctp/input.c 	sk = rcvr->sk;
sk                172 net/sctp/input.c 	if (sk->sk_bound_dev_if && (sk->sk_bound_dev_if != af->skb_iif(skb))) {
sk                181 net/sctp/input.c 		sk = net->sctp.ctl_sock;
sk                182 net/sctp/input.c 		ep = sctp_sk(sk)->ep;
sk                202 net/sctp/input.c 	if (!xfrm_policy_check(sk, XFRM_POLICY_IN, skb, family))
sk                206 net/sctp/input.c 	if (sk_filter(sk, skb))
sk                210 net/sctp/input.c 	chunk = sctp_chunkify(skb, asoc, sk, GFP_ATOMIC);
sk                231 net/sctp/input.c 	bh_lock_sock(sk);
sk                233 net/sctp/input.c 	if (sk != rcvr->sk) {
sk                241 net/sctp/input.c 		bh_unlock_sock(sk);
sk                242 net/sctp/input.c 		sk = rcvr->sk;
sk                243 net/sctp/input.c 		bh_lock_sock(sk);
sk                246 net/sctp/input.c 	if (sock_owned_by_user(sk) || !sctp_newsk_ready(sk)) {
sk                247 net/sctp/input.c 		if (sctp_add_backlog(sk, skb)) {
sk                248 net/sctp/input.c 			bh_unlock_sock(sk);
sk                259 net/sctp/input.c 	bh_unlock_sock(sk);
sk                289 net/sctp/input.c int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
sk                308 net/sctp/input.c 	if (unlikely(rcvr->sk != sk)) {
sk                320 net/sctp/input.c 		sk = rcvr->sk;
sk                322 net/sctp/input.c 		bh_lock_sock(sk);
sk                324 net/sctp/input.c 		if (sock_owned_by_user(sk) || !sctp_newsk_ready(sk)) {
sk                325 net/sctp/input.c 			if (sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf)))
sk                332 net/sctp/input.c 		bh_unlock_sock(sk);
sk                339 net/sctp/input.c 		if (!sctp_newsk_ready(sk)) {
sk                340 net/sctp/input.c 			if (!sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf)))
sk                360 net/sctp/input.c static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb)
sk                367 net/sctp/input.c 	ret = sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf));
sk                385 net/sctp/input.c void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc,
sk                391 net/sctp/input.c 	if (sock_owned_by_user(sk)) {
sk                419 net/sctp/input.c void sctp_icmp_redirect(struct sock *sk, struct sctp_transport *t,
sk                424 net/sctp/input.c 	if (sock_owned_by_user(sk) || !t)
sk                428 net/sctp/input.c 		dst->ops->redirect(dst, sk, skb);
sk                442 net/sctp/input.c void sctp_icmp_proto_unreachable(struct sock *sk,
sk                446 net/sctp/input.c 	if (sock_owned_by_user(sk)) {
sk                455 net/sctp/input.c 		struct net *net = sock_net(sk);
sk                480 net/sctp/input.c 	struct sock *sk = NULL;
sk                503 net/sctp/input.c 	sk = asoc->base.sk;
sk                532 net/sctp/input.c 	bh_lock_sock(sk);
sk                537 net/sctp/input.c 	if (sock_owned_by_user(sk))
sk                542 net/sctp/input.c 	return sk;
sk                550 net/sctp/input.c void sctp_err_finish(struct sock *sk, struct sctp_transport *t)
sk                552 net/sctp/input.c 	bh_unlock_sock(sk);
sk                577 net/sctp/input.c 	struct sock *sk;
sk                590 net/sctp/input.c 	sk = sctp_err_lookup(net, AF_INET, skb, sctp_hdr(skb), &asoc, &transport);
sk                594 net/sctp/input.c 	if (!sk) {
sk                612 net/sctp/input.c 			sctp_icmp_frag_needed(sk, asoc, transport,
sk                617 net/sctp/input.c 				sctp_icmp_proto_unreachable(sk, asoc,
sk                634 net/sctp/input.c 		sctp_icmp_redirect(sk, transport, skb);
sk                640 net/sctp/input.c 	inet = inet_sk(sk);
sk                641 net/sctp/input.c 	if (!sock_owned_by_user(sk) && inet->recverr) {
sk                642 net/sctp/input.c 		sk->sk_err = err;
sk                643 net/sctp/input.c 		sk->sk_error_report(sk);
sk                645 net/sctp/input.c 		sk->sk_err_soft = err;
sk                649 net/sctp/input.c 	sctp_err_finish(sk, transport);
sk                720 net/sctp/input.c 	struct sock *sk = ep->base.sk;
sk                721 net/sctp/input.c 	struct net *net = sock_net(sk);
sk                729 net/sctp/input.c 	if (sk->sk_reuseport) {
sk                730 net/sctp/input.c 		bool any = sctp_is_ep_boundall(sk);
sk                739 net/sctp/input.c 			struct sock *sk2 = epb2->sk;
sk                741 net/sctp/input.c 			if (!net_eq(sock_net(sk2), net) || sk2 == sk ||
sk                742 net/sctp/input.c 			    !uid_eq(sock_i_uid(sk2), sock_i_uid(sk)) ||
sk                747 net/sctp/input.c 						    sctp_sk(sk), cnt);
sk                749 net/sctp/input.c 				err = reuseport_add_sock(sk, sk2, any);
sk                759 net/sctp/input.c 			err = reuseport_alloc(sk, any);
sk                786 net/sctp/input.c 	struct sock *sk = ep->base.sk;
sk                792 net/sctp/input.c 	epb->hashent = sctp_ep_hashfn(sock_net(sk), epb->bind_addr.port);
sk                796 net/sctp/input.c 	if (rcu_access_pointer(sk->sk_reuseport_cb))
sk                797 net/sctp/input.c 		reuseport_detach_sock(sk);
sk                835 net/sctp/input.c 	struct sock *sk;
sk                852 net/sctp/input.c 	sk = ep->base.sk;
sk                853 net/sctp/input.c 	if (sk->sk_reuseport) {
sk                856 net/sctp/input.c 		sk = reuseport_select_sock(sk, phash, skb,
sk                858 net/sctp/input.c 		if (sk)
sk                859 net/sctp/input.c 			ep = sctp_sk(sk)->ep;
sk                940 net/sctp/input.c 	arg.net   = sock_net(t->asoc->base.sk);
sk                994 net/sctp/input.c 					 laddr, sctp_sk(t->asoc->base.sk)))
sk               1007 net/sctp/input.c 	struct net *net = sock_net(ep->base.sk);
sk                177 net/sctp/inqueue.c 			sock_rps_save_rxhash(chunk->asoc->base.sk, chunk->skb);
sk                129 net/sctp/ipv6.c 	struct sock *sk;
sk                144 net/sctp/ipv6.c 	sk = sctp_err_lookup(net, AF_INET6, skb, sctp_hdr(skb), &asoc, &transport);
sk                148 net/sctp/ipv6.c 	if (!sk) {
sk                160 net/sctp/ipv6.c 		if (ip6_sk_accept_pmtu(sk))
sk                161 net/sctp/ipv6.c 			sctp_icmp_frag_needed(sk, asoc, transport, ntohl(info));
sk                165 net/sctp/ipv6.c 			sctp_icmp_proto_unreachable(sk, asoc, transport);
sk                170 net/sctp/ipv6.c 		sctp_icmp_redirect(sk, transport, skb);
sk                176 net/sctp/ipv6.c 	np = inet6_sk(sk);
sk                178 net/sctp/ipv6.c 	if (!sock_owned_by_user(sk) && np->recverr) {
sk                179 net/sctp/ipv6.c 		sk->sk_err = err;
sk                180 net/sctp/ipv6.c 		sk->sk_error_report(sk);
sk                182 net/sctp/ipv6.c 		sk->sk_err_soft = err;
sk                186 net/sctp/ipv6.c 	sctp_err_finish(sk, transport);
sk                196 net/sctp/ipv6.c 	struct sock *sk = skb->sk;
sk                197 net/sctp/ipv6.c 	struct ipv6_pinfo *np = inet6_sk(sk);
sk                209 net/sctp/ipv6.c 		IP6_ECN_flow_xmit(sk, fl6->flowlabel);
sk                214 net/sctp/ipv6.c 	SCTP_INC_STATS(sock_net(sk), SCTP_MIB_OUTSCTPPACKS);
sk                217 net/sctp/ipv6.c 	res = ip6_xmit(sk, skb, fl6, sk->sk_mark, rcu_dereference(np->opt),
sk                218 net/sctp/ipv6.c 		       tclass, sk->sk_priority);
sk                227 net/sctp/ipv6.c 			    struct flowi *fl, struct sock *sk)
sk                234 net/sctp/ipv6.c 	struct ipv6_pinfo *np = inet6_sk(sk);
sk                249 net/sctp/ipv6.c 		fl6->flowi6_oif = asoc->base.sk->sk_bound_dev_if;
sk                256 net/sctp/ipv6.c 		flowlabel = fl6_sock_lookup(sk, fl6->flowlabel);
sk                279 net/sctp/ipv6.c 	dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p);
sk                337 net/sctp/ipv6.c 		bdst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p);
sk                394 net/sctp/ipv6.c static void sctp_v6_get_saddr(struct sctp_sock *sk,
sk                442 net/sctp/ipv6.c static void sctp_v6_copy_ip_options(struct sock *sk, struct sock *newsk)
sk                444 net/sctp/ipv6.c 	struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
sk                461 net/sctp/ipv6.c static int sctp_v6_ip_options_len(struct sock *sk)
sk                463 net/sctp/ipv6.c 	struct ipv6_pinfo *np = inet6_sk(sk);
sk                498 net/sctp/ipv6.c static void sctp_v6_from_sk(union sctp_addr *addr, struct sock *sk)
sk                502 net/sctp/ipv6.c 	addr->v6.sin6_addr = sk->sk_v6_rcv_saddr;
sk                506 net/sctp/ipv6.c static void sctp_v6_to_sk_saddr(union sctp_addr *addr, struct sock *sk)
sk                509 net/sctp/ipv6.c 		sk->sk_v6_rcv_saddr.s6_addr32[0] = 0;
sk                510 net/sctp/ipv6.c 		sk->sk_v6_rcv_saddr.s6_addr32[1] = 0;
sk                511 net/sctp/ipv6.c 		sk->sk_v6_rcv_saddr.s6_addr32[2] = htonl(0x0000ffff);
sk                512 net/sctp/ipv6.c 		sk->sk_v6_rcv_saddr.s6_addr32[3] =
sk                515 net/sctp/ipv6.c 		sk->sk_v6_rcv_saddr = addr->v6.sin6_addr;
sk                520 net/sctp/ipv6.c static void sctp_v6_to_sk_daddr(union sctp_addr *addr, struct sock *sk)
sk                523 net/sctp/ipv6.c 		sk->sk_v6_daddr.s6_addr32[0] = 0;
sk                524 net/sctp/ipv6.c 		sk->sk_v6_daddr.s6_addr32[1] = 0;
sk                525 net/sctp/ipv6.c 		sk->sk_v6_daddr.s6_addr32[2] = htonl(0x0000ffff);
sk                526 net/sctp/ipv6.c 		sk->sk_v6_daddr.s6_addr32[3] = addr->v4.sin_addr.s_addr;
sk                528 net/sctp/ipv6.c 		sk->sk_v6_daddr = addr->v6.sin6_addr;
sk                631 net/sctp/ipv6.c 	struct net *net = sock_net(&sp->inet.sk);
sk                711 net/sctp/ipv6.c static struct sock *sctp_v6_create_accept_sk(struct sock *sk,
sk                716 net/sctp/ipv6.c 	struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
sk                719 net/sctp/ipv6.c 	newsk = sk_alloc(sock_net(sk), PF_INET6, GFP_KERNEL, sk->sk_prot, kern);
sk                725 net/sctp/ipv6.c 	sctp_copy_sock(newsk, sk, asoc);
sk                726 net/sctp/ipv6.c 	sock_reset_flag(sk, SOCK_ZAPPED);
sk                731 net/sctp/ipv6.c 	sctp_sk(newsk)->v4mapped = sctp_sk(sk)->v4mapped;
sk                740 net/sctp/ipv6.c 	sctp_v6_copy_ip_options(sk, newsk);
sk                747 net/sctp/ipv6.c 	newsk->sk_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
sk                799 net/sctp/ipv6.c static void sctp_v6_ecn_capable(struct sock *sk)
sk                801 net/sctp/ipv6.c 	inet6_sk(sk)->tclass |= INET_ECN_ECT_0;
sk                834 net/sctp/ipv6.c 	*addrlen = sctp_v6_addr_to_user(sctp_sk(asoc->base.sk), addr);
sk                865 net/sctp/ipv6.c 	*addr_len = sctp_v6_addr_to_user(sctp_sk(skb->sk), addr);
sk                892 net/sctp/ipv6.c 	struct sock *sk = sctp_opt2sk(opt);
sk                902 net/sctp/ipv6.c 	if (__ipv6_only_sock(sk) && af1 != af2)
sk                906 net/sctp/ipv6.c 	if (sctp_is_any(sk, addr1) || sctp_is_any(sk, addr2))
sk                933 net/sctp/ipv6.c 			net = sock_net(&opt->inet.sk);
sk                969 net/sctp/ipv6.c 			dev = dev_get_by_index_rcu(sock_net(&opt->inet.sk),
sk               1009 net/sctp/ipv6.c 	rc = sctp_v6_addr_to_user(sctp_sk(sock->sk),
sk                 79 net/sctp/output.c 	struct sock *sk;
sk                 92 net/sctp/output.c 		sk = asoc->base.sk;
sk                 93 net/sctp/output.c 		sp = sctp_sk(sk);
sk                132 net/sctp/output.c 	if (__sk_dst_get(sk) != tp->dst) {
sk                134 net/sctp/output.c 		sk_setup_caps(sk, tp->dst);
sk                136 net/sctp/output.c 	packet->max_size = sk_can_gso(sk) ? tp->dst->dev->gso_max_size
sk                194 net/sctp/output.c 				chunk->skb->sk->sk_err = -error;
sk                285 net/sctp/output.c 				SCTP_INC_STATS(sock_net(asoc->base.sk),
sk                401 net/sctp/output.c 	refcount_add(skb->truesize, &head->sk->sk_wmem_alloc);
sk                413 net/sctp/output.c 	struct sock *sk = head->sk;
sk                418 net/sctp/output.c 		skb_shinfo(head)->gso_type = sk->sk_gso_type;
sk                514 net/sctp/output.c 			sk_setup_caps(sk, tp->dst);
sk                554 net/sctp/output.c 	struct sock *sk;
sk                560 net/sctp/output.c 	sk = chunk->skb->sk;
sk                564 net/sctp/output.c 		if (!sk_can_gso(sk)) {
sk                577 net/sctp/output.c 	skb_set_owner_w(head, sk);
sk                590 net/sctp/output.c 		IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
sk                617 net/sctp/output.c 	tp->af_specific->ecn_capable(sk);
sk                703 net/sctp/output.c 	if ((sctp_sk(asoc->base.sk)->nodelay || inflight == 0) &&
sk                200 net/sctp/outqueue.c 	sctp_sched_set_sched(asoc, sctp_sk(asoc->base.sk)->default_ss);
sk                282 net/sctp/outqueue.c 	struct net *net = sock_net(q->asoc->base.sk);
sk                536 net/sctp/outqueue.c 	struct net *net = sock_net(q->asoc->base.sk);
sk                906 net/sctp/outqueue.c 				ctx->asoc->base.sk->sk_err = -error;
sk                995 net/sctp/outqueue.c 		ctx->asoc->base.sk->sk_err = -error;
sk               1148 net/sctp/outqueue.c 				ctx->q->asoc->base.sk->sk_err = -error;
sk               1887 net/sctp/outqueue.c 		SCTP_INC_STATS(sock_net(asoc->base.sk), SCTP_MIB_OUTCTRLCHUNKS);
sk                166 net/sctp/proc.c 	struct sock *sk;
sk                176 net/sctp/proc.c 		sk = epb->sk;
sk                177 net/sctp/proc.c 		if (!net_eq(sock_net(sk), seq_file_net(seq)))
sk                179 net/sctp/proc.c 		seq_printf(seq, "%8pK %8pK %-3d %-3d %-4d %-5d %5u %5lu ", ep, sk,
sk                180 net/sctp/proc.c 			   sctp_sk(sk)->type, sk->sk_state, hash,
sk                182 net/sctp/proc.c 			   from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
sk                183 net/sctp/proc.c 			   sock_i_ino(sk));
sk                236 net/sctp/proc.c 	struct sock *sk;
sk                250 net/sctp/proc.c 	sk = epb->sk;
sk                255 net/sctp/proc.c 		   assoc, sk, sctp_sk(sk)->type, sk->sk_state,
sk                260 net/sctp/proc.c 		   from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
sk                261 net/sctp/proc.c 		   sock_i_ino(sk),
sk                274 net/sctp/proc.c 		refcount_read(&sk->sk_wmem_alloc),
sk                275 net/sctp/proc.c 		sk->sk_wmem_queued,
sk                276 net/sctp/proc.c 		sk->sk_sndbuf,
sk                277 net/sctp/proc.c 		sk->sk_rcvbuf);
sk                175 net/sctp/protocol.c static void sctp_v4_copy_ip_options(struct sock *sk, struct sock *newsk)
sk                177 net/sctp/protocol.c 	struct inet_sock *newinet, *inet = inet_sk(sk);
sk                198 net/sctp/protocol.c static int sctp_v4_ip_options_len(struct sock *sk)
sk                200 net/sctp/protocol.c 	struct inet_sock *inet = inet_sk(sk);
sk                234 net/sctp/protocol.c static void sctp_v4_from_sk(union sctp_addr *addr, struct sock *sk)
sk                238 net/sctp/protocol.c 	addr->v4.sin_addr.s_addr = inet_sk(sk)->inet_rcv_saddr;
sk                243 net/sctp/protocol.c static void sctp_v4_to_sk_saddr(union sctp_addr *addr, struct sock *sk)
sk                245 net/sctp/protocol.c 	inet_sk(sk)->inet_rcv_saddr = addr->v4.sin_addr.s_addr;
sk                249 net/sctp/protocol.c static void sctp_v4_to_sk_daddr(union sctp_addr *addr, struct sock *sk)
sk                251 net/sctp/protocol.c 	inet_sk(sk)->inet_daddr = addr->v4.sin_addr.s_addr;
sk                348 net/sctp/protocol.c 	struct net *net = sock_net(&sp->inet.sk);
sk                408 net/sctp/protocol.c 				struct flowi *fl, struct sock *sk)
sk                419 net/sctp/protocol.c 	__u8 tos = inet_sk(sk)->tos;
sk                428 net/sctp/protocol.c 		fl4->flowi4_tos = RT_CONN_FLAGS_TOS(asoc->base.sk, tos);
sk                429 net/sctp/protocol.c 		fl4->flowi4_oif = asoc->base.sk->sk_bound_dev_if;
sk                441 net/sctp/protocol.c 	rt = ip_route_output_key(sock_net(sk), fl4);
sk                494 net/sctp/protocol.c 				     asoc->base.sk->sk_bound_dev_if,
sk                495 net/sctp/protocol.c 				     RT_CONN_FLAGS_TOS(asoc->base.sk, tos),
sk                499 net/sctp/protocol.c 		rt = ip_route_output_key(sock_net(sk), fl4);
sk                506 net/sctp/protocol.c 		odev = __ip_dev_find(sock_net(sk), laddr->a.v4.sin_addr.s_addr,
sk                541 net/sctp/protocol.c static void sctp_v4_get_saddr(struct sctp_sock *sk,
sk                567 net/sctp/protocol.c static struct sock *sctp_v4_create_accept_sk(struct sock *sk,
sk                571 net/sctp/protocol.c 	struct sock *newsk = sk_alloc(sock_net(sk), PF_INET, GFP_KERNEL,
sk                572 net/sctp/protocol.c 			sk->sk_prot, kern);
sk                580 net/sctp/protocol.c 	sctp_copy_sock(newsk, sk, asoc);
sk                583 net/sctp/protocol.c 	sctp_v4_copy_ip_options(sk, newsk);
sk                613 net/sctp/protocol.c static void sctp_v4_ecn_capable(struct sock *sk)
sk                615 net/sctp/protocol.c 	INET_ECN_xmit(sk);
sk                658 net/sctp/protocol.c 			struct sock *sk;
sk                660 net/sctp/protocol.c 			sk = sctp_opt2sk(sp);
sk                662 net/sctp/protocol.c 			if (!sctp_is_ep_boundall(sk))
sk                664 net/sctp/protocol.c 			bh_lock_sock(sk);
sk                667 net/sctp/protocol.c 			bh_unlock_sock(sk);
sk                976 net/sctp/protocol.c 	struct inet_sock *inet = inet_sk(skb->sk);
sk                989 net/sctp/protocol.c 	SCTP_INC_STATS(sock_net(&inet->sk), SCTP_MIB_OUTSCTPPACKS);
sk                991 net/sctp/protocol.c 	return __ip_queue_xmit(&inet->sk, skb, &transport->fl, dscp);
sk                113 net/sctp/sm_make_chunk.c 	skb->sk = asoc ? asoc->base.sk : NULL;
sk                242 net/sctp/sm_make_chunk.c 	sp = sctp_sk(asoc->base.sk);
sk                419 net/sctp/sm_make_chunk.c 	sp = sctp_sk(asoc->base.sk);
sk               1252 net/sctp/sm_make_chunk.c 		sp = sctp_sk(asoc->base.sk);
sk               1332 net/sctp/sm_make_chunk.c 				 struct sock *sk, gfp_t gfp)
sk               1340 net/sctp/sm_make_chunk.c 	if (!sk)
sk               1390 net/sctp/sm_make_chunk.c 	struct sock *sk;
sk               1408 net/sctp/sm_make_chunk.c 	sk = asoc ? asoc->base.sk : NULL;
sk               1409 net/sctp/sm_make_chunk.c 	retval = sctp_chunkify(skb, asoc, sk, gfp);
sk               1593 net/sctp/sm_make_chunk.c 	asoc = sctp_association_new(ep, ep->base.sk, scope, gfp);
sk               1672 net/sctp/sm_make_chunk.c 	if (sctp_sk(ep->base.sk)->hmac) {
sk               1673 net/sctp/sm_make_chunk.c 		SHASH_DESC_ON_STACK(desc, sctp_sk(ep->base.sk)->hmac);
sk               1677 net/sctp/sm_make_chunk.c 		desc->tfm = sctp_sk(ep->base.sk)->hmac;
sk               1739 net/sctp/sm_make_chunk.c 	if (!sctp_sk(ep->base.sk)->hmac)
sk               1744 net/sctp/sm_make_chunk.c 		SHASH_DESC_ON_STACK(desc, sctp_sk(ep->base.sk)->hmac);
sk               1747 net/sctp/sm_make_chunk.c 		desc->tfm = sctp_sk(ep->base.sk)->hmac;
sk               1793 net/sctp/sm_make_chunk.c 	if (sock_flag(ep->base.sk, SOCK_TIMESTAMP))
sk               1823 net/sctp/sm_make_chunk.c 	retval = sctp_association_new(ep, ep->base.sk, scope, gfp);
sk               2314 net/sctp/sm_make_chunk.c 	struct net *net = sock_net(asoc->base.sk);
sk               2498 net/sctp/sm_make_chunk.c 	struct net *net = sock_net(asoc->base.sk);
sk               2515 net/sctp/sm_make_chunk.c 		if (PF_INET6 != asoc->base.sk->sk_family)
sk               2521 net/sctp/sm_make_chunk.c 		if (ipv6_only_sock(asoc->base.sk))
sk               2575 net/sctp/sm_make_chunk.c 				if (PF_INET6 == asoc->base.sk->sk_family)
sk               3067 net/sctp/sm_make_chunk.c 		if (security_sctp_bind_connect(asoc->ep->base.sk,
sk               3139 net/sctp/sm_make_chunk.c 		if (security_sctp_bind_connect(asoc->ep->base.sk,
sk                236 net/sctp/sm_sideeffect.c 	struct sock *sk = asoc->base.sk;
sk                237 net/sctp/sm_sideeffect.c 	struct net *net = sock_net(sk);
sk                242 net/sctp/sm_sideeffect.c 	bh_lock_sock(sk);
sk                243 net/sctp/sm_sideeffect.c 	if (sock_owned_by_user(sk)) {
sk                260 net/sctp/sm_sideeffect.c 		sk->sk_err = -error;
sk                263 net/sctp/sm_sideeffect.c 	bh_unlock_sock(sk);
sk                273 net/sctp/sm_sideeffect.c 	struct sock *sk = asoc->base.sk;
sk                274 net/sctp/sm_sideeffect.c 	struct net *net = sock_net(sk);
sk                277 net/sctp/sm_sideeffect.c 	bh_lock_sock(sk);
sk                278 net/sctp/sm_sideeffect.c 	if (sock_owned_by_user(sk)) {
sk                301 net/sctp/sm_sideeffect.c 		sk->sk_err = -error;
sk                304 net/sctp/sm_sideeffect.c 	bh_unlock_sock(sk);
sk                366 net/sctp/sm_sideeffect.c 	struct sock *sk = asoc->base.sk;
sk                367 net/sctp/sm_sideeffect.c 	struct net *net = sock_net(sk);
sk                371 net/sctp/sm_sideeffect.c 	bh_lock_sock(sk);
sk                372 net/sctp/sm_sideeffect.c 	if (sock_owned_by_user(sk)) {
sk                397 net/sctp/sm_sideeffect.c 		sk->sk_err = -error;
sk                400 net/sctp/sm_sideeffect.c 	bh_unlock_sock(sk);
sk                412 net/sctp/sm_sideeffect.c 	struct sock *sk = asoc->base.sk;
sk                413 net/sctp/sm_sideeffect.c 	struct net *net = sock_net(sk);
sk                415 net/sctp/sm_sideeffect.c 	bh_lock_sock(sk);
sk                416 net/sctp/sm_sideeffect.c 	if (sock_owned_by_user(sk)) {
sk                437 net/sctp/sm_sideeffect.c 	bh_unlock_sock(sk);
sk                447 net/sctp/sm_sideeffect.c 	struct sock *sk = asoc->base.sk;
sk                448 net/sctp/sm_sideeffect.c 	struct net *net = sock_net(sk);
sk                451 net/sctp/sm_sideeffect.c 	bh_lock_sock(sk);
sk                452 net/sctp/sm_sideeffect.c 	if (sock_owned_by_user(sk)) {
sk                467 net/sctp/sm_sideeffect.c 		sk->sk_err = -error;
sk                470 net/sctp/sm_sideeffect.c 	bh_unlock_sock(sk);
sk                519 net/sctp/sm_sideeffect.c 	struct net *net = sock_net(asoc->base.sk);
sk                796 net/sctp/sm_sideeffect.c 		struct net *net = sock_net(asoc->base.sk);
sk                832 net/sctp/sm_sideeffect.c 	struct net *net = sock_net(asoc->base.sk);
sk                855 net/sctp/sm_sideeffect.c 	struct sock *sk = asoc->base.sk;
sk                861 net/sctp/sm_sideeffect.c 	if (sctp_style(sk, TCP)) {
sk                865 net/sctp/sm_sideeffect.c 		if (sctp_state(asoc, ESTABLISHED) && sctp_sstate(sk, CLOSED))
sk                866 net/sctp/sm_sideeffect.c 			inet_sk_set_state(sk, SCTP_SS_ESTABLISHED);
sk                870 net/sctp/sm_sideeffect.c 		    sctp_sstate(sk, ESTABLISHED)) {
sk                871 net/sctp/sm_sideeffect.c 			inet_sk_set_state(sk, SCTP_SS_CLOSING);
sk                872 net/sctp/sm_sideeffect.c 			sk->sk_shutdown |= RCV_SHUTDOWN;
sk                906 net/sctp/sm_sideeffect.c 		if (!sctp_style(sk, UDP))
sk                907 net/sctp/sm_sideeffect.c 			sk->sk_state_change(sk);
sk                919 net/sctp/sm_sideeffect.c 	struct sock *sk = asoc->base.sk;
sk                925 net/sctp/sm_sideeffect.c 	if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING) &&
sk                926 net/sctp/sm_sideeffect.c 	    (!asoc->temp) && (sk->sk_shutdown != SHUTDOWN_MASK))
sk               1021 net/sctp/sm_sideeffect.c 	struct sock *sk = asoc->base.sk;
sk               1023 net/sctp/sm_sideeffect.c 	if (!sctp_style(sk, UDP))
sk               1024 net/sctp/sm_sideeffect.c 		sk->sk_err = error;
sk               1120 net/sctp/sm_sideeffect.c 		 asoc, sctp_state_tbl[(asoc && sctp_id2assoc(ep->base.sk, \
sk               1262 net/sctp/sm_sideeffect.c 	struct sctp_sock *sp = sctp_sk(ep->base.sk);
sk                361 net/sctp/sm_statefuns.c 	if (sctp_sstate(ep->base.sk, CLOSING))
sk                697 net/sctp/sm_statefuns.c 	struct sock *sk;
sk                720 net/sctp/sm_statefuns.c 	sk = ep->base.sk;
sk                721 net/sctp/sm_statefuns.c 	if (!sctp_sstate(sk, LISTENING) ||
sk                722 net/sctp/sm_statefuns.c 	    (sctp_style(sk, TCP) && sk_acceptq_is_full(sk)))
sk                927 net/sctp/sm_statefuns.c 	security_inet_conn_established(ep->base.sk, chunk->skb);
sk               1333 net/sctp/sm_statefuns.c 	struct net *net = sock_net(new_asoc->base.sk);
sk               1861 net/sctp/sm_statefuns.c 	    (sctp_sstate(asoc->base.sk, CLOSING) ||
sk               1862 net/sctp/sm_statefuns.c 	     sock_flag(asoc->base.sk, SOCK_DEAD))) {
sk               3391 net/sctp/sm_statefuns.c 	abort->skb->sk = ep->base.sk;
sk               3655 net/sctp/sm_statefuns.c 	shut->skb->sk = ep->base.sk;
sk               4616 net/sctp/sm_statefuns.c 		abort->skb->sk = ep->base.sk;
sk               6203 net/sctp/sm_statefuns.c 		abort->skb->sk = ep->base.sk;
sk               6313 net/sctp/sm_statefuns.c 			err_chunk->skb->sk = ep->base.sk;
sk               6330 net/sctp/sm_statefuns.c 	struct sock *sk = asoc->base.sk;
sk               6331 net/sctp/sm_statefuns.c 	struct net *net = sock_net(sk);
sk               6435 net/sctp/sm_statefuns.c 	if (sk_under_memory_pressure(sk)) {
sk               6442 net/sctp/sm_statefuns.c 			sk_mem_reclaim(sk);
sk                 71 net/sctp/socket.c static bool sctp_writeable(struct sock *sk);
sk                 75 net/sctp/socket.c static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p);
sk                 77 net/sctp/socket.c static int sctp_wait_for_accept(struct sock *sk, long timeo);
sk                 78 net/sctp/socket.c static void sctp_wait_for_close(struct sock *sk, long timeo);
sk                 79 net/sctp/socket.c static void sctp_destruct_sock(struct sock *sk);
sk                 89 net/sctp/socket.c static int sctp_autobind(struct sock *sk);
sk                 98 net/sctp/socket.c static void sctp_enter_memory_pressure(struct sock *sk)
sk                107 net/sctp/socket.c 	struct sock *sk = asoc->base.sk;
sk                109 net/sctp/socket.c 	return asoc->ep->sndbuf_policy ? sk->sk_sndbuf - asoc->sndbuf_used
sk                110 net/sctp/socket.c 				       : sk_stream_wspace(sk);
sk                125 net/sctp/socket.c 	struct sock *sk = asoc->base.sk;
sk                133 net/sctp/socket.c 	skb_set_owner_w(chunk->skb, sk);
sk                139 net/sctp/socket.c 	refcount_add(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc);
sk                141 net/sctp/socket.c 	sk->sk_wmem_queued += chunk->skb->truesize + sizeof(struct sctp_chunk);
sk                142 net/sctp/socket.c 	sk_mem_charge(sk, chunk->skb->truesize);
sk                156 net/sctp/socket.c 		if ((clear && asoc->base.sk == c->skb->sk) ||	\
sk                157 net/sctp/socket.c 		    (!clear && asoc->base.sk != c->skb->sk))	\
sk                190 net/sctp/socket.c static void sctp_for_each_rx_skb(struct sctp_association *asoc, struct sock *sk,
sk                197 net/sctp/socket.c 		cb(skb, sk);
sk                200 net/sctp/socket.c 		cb(skb, sk);
sk                203 net/sctp/socket.c 		cb(skb, sk);
sk                207 net/sctp/socket.c static inline int sctp_verify_addr(struct sock *sk, union sctp_addr *addr,
sk                213 net/sctp/socket.c 	af = sctp_sockaddr_af(sctp_sk(sk), addr, len);
sk                218 net/sctp/socket.c 	if (!af->addr_valid(addr, sctp_sk(sk), NULL))
sk                221 net/sctp/socket.c 	if (!sctp_sk(sk)->pf->send_verify(sctp_sk(sk), (addr)))
sk                230 net/sctp/socket.c struct sctp_association *sctp_id2assoc(struct sock *sk, sctp_assoc_t id)
sk                235 net/sctp/socket.c 	if (!sctp_style(sk, UDP)) {
sk                240 net/sctp/socket.c 		if (!sctp_sstate(sk, ESTABLISHED) && !sctp_sstate(sk, CLOSING))
sk                244 net/sctp/socket.c 		if (!list_empty(&sctp_sk(sk)->ep->asocs))
sk                245 net/sctp/socket.c 			asoc = list_entry(sctp_sk(sk)->ep->asocs.next,
sk                256 net/sctp/socket.c 	if (asoc && (asoc->base.sk != sk || asoc->base.dead))
sk                267 net/sctp/socket.c static struct sctp_transport *sctp_addr_id2transport(struct sock *sk,
sk                276 net/sctp/socket.c 	if (!af || sctp_verify_addr(sk, laddr, af->sockaddr_len))
sk                279 net/sctp/socket.c 	addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep,
sk                286 net/sctp/socket.c 	id_asoc = sctp_id2assoc(sk, id);
sk                290 net/sctp/socket.c 	sctp_get_pf_specific(sk->sk_family)->addr_to_user(sctp_sk(sk),
sk                306 net/sctp/socket.c static int sctp_bind(struct sock *sk, struct sockaddr *addr, int addr_len)
sk                310 net/sctp/socket.c 	lock_sock(sk);
sk                312 net/sctp/socket.c 	pr_debug("%s: sk:%p, addr:%p, addr_len:%d\n", __func__, sk,
sk                316 net/sctp/socket.c 	if (!sctp_sk(sk)->ep->base.bind_addr.port)
sk                317 net/sctp/socket.c 		retval = sctp_do_bind(sk, (union sctp_addr *)addr,
sk                322 net/sctp/socket.c 	release_sock(sk);
sk                361 net/sctp/socket.c static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
sk                363 net/sctp/socket.c 	struct net *net = sock_net(sk);
sk                364 net/sctp/socket.c 	struct sctp_sock *sp = sctp_sk(sk);
sk                375 net/sctp/socket.c 			 __func__, sk, addr, len);
sk                382 net/sctp/socket.c 		 __func__, sk, &addr->sa, bp->port, snum, len);
sk                417 net/sctp/socket.c 	if (sctp_get_port_local(sk, addr))
sk                422 net/sctp/socket.c 		bp->port = inet_sk(sk)->inet_num;
sk                431 net/sctp/socket.c 		sctp_put_port(sk);
sk                435 net/sctp/socket.c 	inet_sk(sk)->inet_sport = htons(inet_sk(sk)->inet_num);
sk                436 net/sctp/socket.c 	sp->pf->to_sk_saddr(addr, sk);
sk                454 net/sctp/socket.c 	struct net 	*net = sock_net(asoc->base.sk);
sk                489 net/sctp/socket.c static int sctp_bindx_add(struct sock *sk, struct sockaddr *addrs, int addrcnt)
sk                497 net/sctp/socket.c 	pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", __func__, sk,
sk                512 net/sctp/socket.c 		retval = sctp_do_bind(sk, (union sctp_addr *)sa_addr,
sk                521 net/sctp/socket.c 				sctp_bindx_rem(sk, addrs, cnt);
sk                539 net/sctp/socket.c static int sctp_send_asconf_add_ip(struct sock		*sk,
sk                557 net/sctp/socket.c 	sp = sctp_sk(sk);
sk                564 net/sctp/socket.c 		 __func__, sk, addrs, addrcnt);
sk                637 net/sctp/socket.c 						     sctp_sk(asoc->base.sk));
sk                662 net/sctp/socket.c static int sctp_bindx_rem(struct sock *sk, struct sockaddr *addrs, int addrcnt)
sk                664 net/sctp/socket.c 	struct sctp_sock *sp = sctp_sk(sk);
sk                674 net/sctp/socket.c 		 __func__, sk, addrs, addrcnt);
sk                723 net/sctp/socket.c 				sctp_bindx_add(sk, addrs, cnt);
sk                741 net/sctp/socket.c static int sctp_send_asconf_del_ip(struct sock		*sk,
sk                760 net/sctp/socket.c 	sp = sctp_sk(sk);
sk                767 net/sctp/socket.c 		 __func__, sk, addrs, addrcnt);
sk                880 net/sctp/socket.c 					     sctp_sk(asoc->base.sk));
sk                895 net/sctp/socket.c 	struct sock *sk = sctp_opt2sk(sp);
sk                905 net/sctp/socket.c 	if (sctp_verify_addr(sk, addr, af->sockaddr_len))
sk                909 net/sctp/socket.c 		return sctp_send_asconf_add_ip(sk, (struct sockaddr *)addr, 1);
sk                911 net/sctp/socket.c 		return sctp_send_asconf_del_ip(sk, (struct sockaddr *)addr, 1);
sk                983 net/sctp/socket.c static int sctp_setsockopt_bindx(struct sock *sk,
sk                996 net/sctp/socket.c 		 __func__, sk, addrs, addrs_size, op);
sk               1032 net/sctp/socket.c 		err = security_sctp_bind_connect(sk, SCTP_SOCKOPT_BINDX_ADD,
sk               1037 net/sctp/socket.c 		err = sctp_bindx_add(sk, kaddrs, addrcnt);
sk               1040 net/sctp/socket.c 		err = sctp_send_asconf_add_ip(sk, kaddrs, addrcnt);
sk               1044 net/sctp/socket.c 		err = sctp_bindx_rem(sk, kaddrs, addrcnt);
sk               1047 net/sctp/socket.c 		err = sctp_send_asconf_del_ip(sk, kaddrs, addrcnt);
sk               1067 net/sctp/socket.c 	struct sock *sk = ep->base.sk;
sk               1068 net/sctp/socket.c 	struct net *net = sock_net(sk);
sk               1076 net/sctp/socket.c 		if (sctp_autobind(sk))
sk               1085 net/sctp/socket.c 	asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL);
sk               1136 net/sctp/socket.c 	err = sctp_verify_addr(ep->base.sk, daddr, addr_len);
sk               1160 net/sctp/socket.c static int __sctp_connect(struct sock *sk, struct sockaddr *kaddrs,
sk               1163 net/sctp/socket.c 	struct sctp_sock *sp = sctp_sk(sk);
sk               1173 net/sctp/socket.c 	if (sctp_sstate(sk, ESTABLISHED) || sctp_sstate(sk, CLOSING) ||
sk               1174 net/sctp/socket.c 	    (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)))
sk               1182 net/sctp/socket.c 	err = sctp_verify_addr(sk, daddr, af->sockaddr_len);
sk               1228 net/sctp/socket.c 	err = sctp_primitive_ASSOCIATE(sock_net(sk), asoc, NULL);
sk               1233 net/sctp/socket.c 	inet_sk(sk)->inet_dport = htons(asoc->peer.port);
sk               1234 net/sctp/socket.c 	sp->pf->to_sk_daddr(daddr, sk);
sk               1235 net/sctp/socket.c 	sk->sk_err = 0;
sk               1240 net/sctp/socket.c 	timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
sk               1305 net/sctp/socket.c static int __sctp_setsockopt_connectx(struct sock *sk,
sk               1314 net/sctp/socket.c 		 __func__, sk, addrs, addrs_size);
sk               1325 net/sctp/socket.c 	err = security_sctp_bind_connect(sk, SCTP_SOCKOPT_CONNECTX,
sk               1334 net/sctp/socket.c 	if (sk->sk_socket->file)
sk               1335 net/sctp/socket.c 		flags = sk->sk_socket->file->f_flags;
sk               1337 net/sctp/socket.c 	err = __sctp_connect(sk, kaddrs, addrs_size, flags, assoc_id);
sk               1349 net/sctp/socket.c static int sctp_setsockopt_connectx_old(struct sock *sk,
sk               1353 net/sctp/socket.c 	return __sctp_setsockopt_connectx(sk, addrs, addrs_size, NULL);
sk               1362 net/sctp/socket.c static int sctp_setsockopt_connectx(struct sock *sk,
sk               1369 net/sctp/socket.c 	err = __sctp_setsockopt_connectx(sk, addrs, addrs_size, &assoc_id);
sk               1393 net/sctp/socket.c static int sctp_getsockopt_connectx3(struct sock *sk, int len,
sk               1422 net/sctp/socket.c 	err = __sctp_setsockopt_connectx(sk, (struct sockaddr __user *)
sk               1485 net/sctp/socket.c static void sctp_close(struct sock *sk, long timeout)
sk               1487 net/sctp/socket.c 	struct net *net = sock_net(sk);
sk               1493 net/sctp/socket.c 	pr_debug("%s: sk:%p, timeout:%ld\n", __func__, sk, timeout);
sk               1495 net/sctp/socket.c 	lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
sk               1496 net/sctp/socket.c 	sk->sk_shutdown = SHUTDOWN_MASK;
sk               1497 net/sctp/socket.c 	inet_sk_set_state(sk, SCTP_SS_CLOSING);
sk               1499 net/sctp/socket.c 	ep = sctp_sk(sk)->ep;
sk               1502 net/sctp/socket.c 	data_was_unread = sctp_queue_purge_ulpevents(&sk->sk_receive_queue);
sk               1503 net/sctp/socket.c 	data_was_unread += sctp_queue_purge_ulpevents(&sctp_sk(sk)->pd_lobby);
sk               1509 net/sctp/socket.c 		if (sctp_style(sk, TCP)) {
sk               1524 net/sctp/socket.c 		    (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime)) {
sk               1534 net/sctp/socket.c 	if (sctp_style(sk, TCP) && timeout)
sk               1535 net/sctp/socket.c 		sctp_wait_for_close(sk, timeout);
sk               1538 net/sctp/socket.c 	release_sock(sk);
sk               1546 net/sctp/socket.c 	bh_lock_sock_nested(sk);
sk               1551 net/sctp/socket.c 	sock_hold(sk);
sk               1552 net/sctp/socket.c 	sk_common_release(sk);
sk               1554 net/sctp/socket.c 	bh_unlock_sock(sk);
sk               1557 net/sctp/socket.c 	sock_put(sk);
sk               1563 net/sctp/socket.c static int sctp_error(struct sock *sk, int flags, int err)
sk               1566 net/sctp/socket.c 		err = sock_error(sk) ? : -EPIPE;
sk               1598 net/sctp/socket.c static int sctp_sendmsg_parse(struct sock *sk, struct sctp_cmsgs *cmsgs,
sk               1605 net/sctp/socket.c 	if (sctp_sstate(sk, LISTENING) && sctp_style(sk, TCP))
sk               1608 net/sctp/socket.c 	if (msg_len > sk->sk_sndbuf)
sk               1646 net/sctp/socket.c 	if (sctp_style(sk, TCP) && (sflags & (SCTP_EOF | SCTP_ABORT)))
sk               1659 net/sctp/socket.c static int sctp_sendmsg_new_asoc(struct sock *sk, __u16 sflags,
sk               1664 net/sctp/socket.c 	struct sctp_endpoint *ep = sctp_sk(sk)->ep;
sk               1676 net/sctp/socket.c 	if (sctp_style(sk, TCP) && (sctp_sstate(sk, ESTABLISHED) ||
sk               1677 net/sctp/socket.c 				    sctp_sstate(sk, CLOSING)))
sk               1689 net/sctp/socket.c 	err = security_sctp_bind_connect(sk, SCTP_SENDMSG_CONNECT,
sk               1758 net/sctp/socket.c 	struct sock *sk = asoc->base.sk;
sk               1759 net/sctp/socket.c 	struct net *net = sock_net(sk);
sk               1761 net/sctp/socket.c 	if (sctp_state(asoc, CLOSED) && sctp_style(sk, TCP))
sk               1764 net/sctp/socket.c 	if ((sflags & SCTP_SENDALL) && sctp_style(sk, UDP) &&
sk               1797 net/sctp/socket.c 	struct sock *sk = asoc->base.sk;
sk               1798 net/sctp/socket.c 	struct sctp_sock *sp = sctp_sk(sk);
sk               1799 net/sctp/socket.c 	struct net *net = sock_net(sk);
sk               1831 net/sctp/socket.c 	if (sk_under_memory_pressure(sk))
sk               1832 net/sctp/socket.c 		sk_mem_reclaim(sk);
sk               1834 net/sctp/socket.c 	if (sctp_wspace(asoc) <= 0 || !sk_wmem_schedule(sk, msg_len)) {
sk               1835 net/sctp/socket.c 		timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
sk               1847 net/sctp/socket.c 			timeo = sock_sndtimeo(sk, 0);
sk               1885 net/sctp/socket.c 		timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
sk               1895 net/sctp/socket.c static union sctp_addr *sctp_sendmsg_get_daddr(struct sock *sk,
sk               1902 net/sctp/socket.c 	if (!sctp_style(sk, UDP_HIGH_BANDWIDTH) && msg->msg_name) {
sk               1910 net/sctp/socket.c 		err = sctp_verify_addr(sk, daddr, len);
sk               1944 net/sctp/socket.c static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
sk               1946 net/sctp/socket.c 	struct sctp_endpoint *ep = sctp_sk(sk)->ep;
sk               1957 net/sctp/socket.c 	err = sctp_sendmsg_parse(sk, &cmsgs, &_sinfo, msg, msg_len);
sk               1965 net/sctp/socket.c 	daddr = sctp_sendmsg_get_daddr(sk, msg, &cmsgs);
sk               1971 net/sctp/socket.c 	lock_sock(sk);
sk               1974 net/sctp/socket.c 	if ((sflags & SCTP_SENDALL) && sctp_style(sk, UDP)) {
sk               2005 net/sctp/socket.c 			err = sctp_sendmsg_new_asoc(sk, sflags, &cmsgs, daddr,
sk               2014 net/sctp/socket.c 		if (!sctp_style(sk, TCP) && !(sflags & SCTP_ADDR_OVER))
sk               2017 net/sctp/socket.c 		asoc = sctp_id2assoc(sk, sinfo->sinfo_assoc_id);
sk               2037 net/sctp/socket.c 	release_sock(sk);
sk               2039 net/sctp/socket.c 	return sctp_error(sk, msg->msg_flags, err);
sk               2091 net/sctp/socket.c static int sctp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
sk               2095 net/sctp/socket.c 	struct sctp_sock *sp = sctp_sk(sk);
sk               2102 net/sctp/socket.c 		 "addr_len:%p)\n", __func__, sk, msg, len, noblock, flags,
sk               2105 net/sctp/socket.c 	lock_sock(sk);
sk               2107 net/sctp/socket.c 	if (sctp_style(sk, TCP) && !sctp_sstate(sk, ESTABLISHED) &&
sk               2108 net/sctp/socket.c 	    !sctp_sstate(sk, CLOSING) && !sctp_sstate(sk, CLOSED)) {
sk               2113 net/sctp/socket.c 	skb = sctp_skb_recv_datagram(sk, flags, noblock, &err);
sk               2137 net/sctp/socket.c 	sock_recv_ts_and_drops(msg, sk, head_skb);
sk               2147 net/sctp/socket.c 		sctp_ulpevent_read_nxtinfo(event, msg, sk);
sk               2166 net/sctp/socket.c 		skb_queue_head(&sk->sk_receive_queue, skb);
sk               2195 net/sctp/socket.c 	release_sock(sk);
sk               2206 net/sctp/socket.c static int sctp_setsockopt_disable_fragments(struct sock *sk,
sk               2218 net/sctp/socket.c 	sctp_sk(sk)->disable_fragments = (val == 0) ? 0 : 1;
sk               2223 net/sctp/socket.c static int sctp_setsockopt_events(struct sock *sk, char __user *optval,
sk               2228 net/sctp/socket.c 	struct sctp_sock *sp = sctp_sk(sk);
sk               2243 net/sctp/socket.c 		asoc->subscribe = sctp_sk(sk)->subscribe;
sk               2252 net/sctp/socket.c 		asoc = sctp_id2assoc(sk, 0);
sk               2277 net/sctp/socket.c static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval,
sk               2280 net/sctp/socket.c 	struct sctp_sock *sp = sctp_sk(sk);
sk               2281 net/sctp/socket.c 	struct net *net = sock_net(sk);
sk               2284 net/sctp/socket.c 	if (sctp_style(sk, TCP))
sk               2446 net/sctp/socket.c 		struct net *net = sock_net(trans->asoc->base.sk);
sk               2625 net/sctp/socket.c static int sctp_setsockopt_peer_addr_params(struct sock *sk,
sk               2632 net/sctp/socket.c 	struct sctp_sock        *sp = sctp_sk(sk);
sk               2665 net/sctp/socket.c 	if (!sctp_is_any(sk, (union sctp_addr *)&params.spp_address)) {
sk               2666 net/sctp/socket.c 		trans = sctp_addr_id2transport(sk, &params.spp_address,
sk               2676 net/sctp/socket.c 	asoc = sctp_id2assoc(sk, params.spp_assoc_id);
sk               2678 net/sctp/socket.c 	    sctp_style(sk, UDP))
sk               2793 net/sctp/socket.c static int sctp_setsockopt_delayed_ack(struct sock *sk,
sk               2796 net/sctp/socket.c 	struct sctp_sock *sp = sctp_sk(sk);
sk               2830 net/sctp/socket.c 	asoc = sctp_id2assoc(sk, params.sack_assoc_id);
sk               2832 net/sctp/socket.c 	    sctp_style(sk, UDP))
sk               2841 net/sctp/socket.c 	if (sctp_style(sk, TCP))
sk               2880 net/sctp/socket.c static int sctp_setsockopt_initmsg(struct sock *sk, char __user *optval, unsigned int optlen)
sk               2883 net/sctp/socket.c 	struct sctp_sock *sp = sctp_sk(sk);
sk               2916 net/sctp/socket.c static int sctp_setsockopt_default_send_param(struct sock *sk,
sk               2920 net/sctp/socket.c 	struct sctp_sock *sp = sctp_sk(sk);
sk               2933 net/sctp/socket.c 	asoc = sctp_id2assoc(sk, info.sinfo_assoc_id);
sk               2935 net/sctp/socket.c 	    sctp_style(sk, UDP))
sk               2948 net/sctp/socket.c 	if (sctp_style(sk, TCP))
sk               2977 net/sctp/socket.c static int sctp_setsockopt_default_sndinfo(struct sock *sk,
sk               2981 net/sctp/socket.c 	struct sctp_sock *sp = sctp_sk(sk);
sk               2994 net/sctp/socket.c 	asoc = sctp_id2assoc(sk, info.snd_assoc_id);
sk               2996 net/sctp/socket.c 	    sctp_style(sk, UDP))
sk               3008 net/sctp/socket.c 	if (sctp_style(sk, TCP))
sk               3038 net/sctp/socket.c static int sctp_setsockopt_primary_addr(struct sock *sk, char __user *optval,
sk               3057 net/sctp/socket.c 	err = security_sctp_bind_connect(sk, SCTP_PRIMARY_ADDR,
sk               3063 net/sctp/socket.c 	trans = sctp_addr_id2transport(sk, &prim.ssp_addr, prim.ssp_assoc_id);
sk               3080 net/sctp/socket.c static int sctp_setsockopt_nodelay(struct sock *sk, char __user *optval,
sk               3090 net/sctp/socket.c 	sctp_sk(sk)->nodelay = (val == 0) ? 0 : 1;
sk               3106 net/sctp/socket.c static int sctp_setsockopt_rtoinfo(struct sock *sk, char __user *optval, unsigned int optlen)
sk               3111 net/sctp/socket.c 	struct sctp_sock *sp = sctp_sk(sk);
sk               3119 net/sctp/socket.c 	asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id);
sk               3123 net/sctp/socket.c 	    sctp_style(sk, UDP))
sk               3172 net/sctp/socket.c static int sctp_setsockopt_associnfo(struct sock *sk, char __user *optval, unsigned int optlen)
sk               3183 net/sctp/socket.c 	asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id);
sk               3186 net/sctp/socket.c 	    sctp_style(sk, UDP))
sk               3218 net/sctp/socket.c 		struct sctp_sock *sp = sctp_sk(sk);
sk               3240 net/sctp/socket.c static int sctp_setsockopt_mappedv4(struct sock *sk, char __user *optval, unsigned int optlen)
sk               3243 net/sctp/socket.c 	struct sctp_sock *sp = sctp_sk(sk);
sk               3284 net/sctp/socket.c static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned int optlen)
sk               3286 net/sctp/socket.c 	struct sctp_sock *sp = sctp_sk(sk);
sk               3308 net/sctp/socket.c 	asoc = sctp_id2assoc(sk, params.assoc_id);
sk               3310 net/sctp/socket.c 	    sctp_style(sk, UDP))
sk               3344 net/sctp/socket.c static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optval,
sk               3354 net/sctp/socket.c 	sp = sctp_sk(sk);
sk               3365 net/sctp/socket.c 	asoc = sctp_id2assoc(sk, prim.sspp_assoc_id);
sk               3389 net/sctp/socket.c 	err = security_sctp_bind_connect(sk, SCTP_SET_PEER_PRIMARY_ADDR,
sk               3408 net/sctp/socket.c static int sctp_setsockopt_adaptation_layer(struct sock *sk, char __user *optval,
sk               3418 net/sctp/socket.c 	sctp_sk(sk)->adaptation_ind = adaptation.ssb_adaptation_ind;
sk               3437 net/sctp/socket.c static int sctp_setsockopt_context(struct sock *sk, char __user *optval,
sk               3440 net/sctp/socket.c 	struct sctp_sock *sp = sctp_sk(sk);
sk               3449 net/sctp/socket.c 	asoc = sctp_id2assoc(sk, params.assoc_id);
sk               3451 net/sctp/socket.c 	    sctp_style(sk, UDP))
sk               3460 net/sctp/socket.c 	if (sctp_style(sk, TCP))
sk               3499 net/sctp/socket.c static int sctp_setsockopt_fragment_interleave(struct sock *sk,
sk               3510 net/sctp/socket.c 	sctp_sk(sk)->frag_interleave = !!val;
sk               3512 net/sctp/socket.c 	if (!sctp_sk(sk)->frag_interleave)
sk               3513 net/sctp/socket.c 		sctp_sk(sk)->ep->intl_enable = 0;
sk               3535 net/sctp/socket.c static int sctp_setsockopt_partial_delivery_point(struct sock *sk,
sk               3549 net/sctp/socket.c 	if (val > (sk->sk_rcvbuf >> 1))
sk               3552 net/sctp/socket.c 	sctp_sk(sk)->pd_point = val;
sk               3568 net/sctp/socket.c static int sctp_setsockopt_maxburst(struct sock *sk,
sk               3572 net/sctp/socket.c 	struct sctp_sock *sp = sctp_sk(sk);
sk               3591 net/sctp/socket.c 	asoc = sctp_id2assoc(sk, params.assoc_id);
sk               3593 net/sctp/socket.c 	    sctp_style(sk, UDP))
sk               3602 net/sctp/socket.c 	if (sctp_style(sk, TCP))
sk               3624 net/sctp/socket.c static int sctp_setsockopt_auth_chunk(struct sock *sk,
sk               3628 net/sctp/socket.c 	struct sctp_endpoint *ep = sctp_sk(sk)->ep;
sk               3657 net/sctp/socket.c static int sctp_setsockopt_hmac_ident(struct sock *sk,
sk               3661 net/sctp/socket.c 	struct sctp_endpoint *ep = sctp_sk(sk)->ep;
sk               3697 net/sctp/socket.c static int sctp_setsockopt_auth_key(struct sock *sk,
sk               3701 net/sctp/socket.c 	struct sctp_endpoint *ep = sctp_sk(sk)->ep;
sk               3720 net/sctp/socket.c 	asoc = sctp_id2assoc(sk, authkey->sca_assoc_id);
sk               3722 net/sctp/socket.c 	    sctp_style(sk, UDP))
sk               3730 net/sctp/socket.c 	if (sctp_style(sk, TCP))
sk               3763 net/sctp/socket.c static int sctp_setsockopt_active_key(struct sock *sk,
sk               3767 net/sctp/socket.c 	struct sctp_endpoint *ep = sctp_sk(sk)->ep;
sk               3777 net/sctp/socket.c 	asoc = sctp_id2assoc(sk, val.scact_assoc_id);
sk               3779 net/sctp/socket.c 	    sctp_style(sk, UDP))
sk               3785 net/sctp/socket.c 	if (sctp_style(sk, TCP))
sk               3814 net/sctp/socket.c static int sctp_setsockopt_del_key(struct sock *sk,
sk               3818 net/sctp/socket.c 	struct sctp_endpoint *ep = sctp_sk(sk)->ep;
sk               3828 net/sctp/socket.c 	asoc = sctp_id2assoc(sk, val.scact_assoc_id);
sk               3830 net/sctp/socket.c 	    sctp_style(sk, UDP))
sk               3836 net/sctp/socket.c 	if (sctp_style(sk, TCP))
sk               3865 net/sctp/socket.c static int sctp_setsockopt_deactivate_key(struct sock *sk, char __user *optval,
sk               3868 net/sctp/socket.c 	struct sctp_endpoint *ep = sctp_sk(sk)->ep;
sk               3878 net/sctp/socket.c 	asoc = sctp_id2assoc(sk, val.scact_assoc_id);
sk               3880 net/sctp/socket.c 	    sctp_style(sk, UDP))
sk               3886 net/sctp/socket.c 	if (sctp_style(sk, TCP))
sk               3924 net/sctp/socket.c static int sctp_setsockopt_auto_asconf(struct sock *sk, char __user *optval,
sk               3928 net/sctp/socket.c 	struct sctp_sock *sp = sctp_sk(sk);
sk               3934 net/sctp/socket.c 	if (!sctp_is_ep_boundall(sk) && val)
sk               3939 net/sctp/socket.c 	spin_lock_bh(&sock_net(sk)->sctp.addr_wq_lock);
sk               3945 net/sctp/socket.c 		    &sock_net(sk)->sctp.auto_asconf_splist);
sk               3948 net/sctp/socket.c 	spin_unlock_bh(&sock_net(sk)->sctp.addr_wq_lock);
sk               3959 net/sctp/socket.c static int sctp_setsockopt_paddr_thresholds(struct sock *sk,
sk               3973 net/sctp/socket.c 	if (!sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) {
sk               3974 net/sctp/socket.c 		trans = sctp_addr_id2transport(sk, &val.spt_address,
sk               3986 net/sctp/socket.c 	asoc = sctp_id2assoc(sk, val.spt_assoc_id);
sk               3988 net/sctp/socket.c 	    sctp_style(sk, UDP))
sk               4003 net/sctp/socket.c 		struct sctp_sock *sp = sctp_sk(sk);
sk               4013 net/sctp/socket.c static int sctp_setsockopt_recvrcvinfo(struct sock *sk,
sk               4024 net/sctp/socket.c 	sctp_sk(sk)->recvrcvinfo = (val == 0) ? 0 : 1;
sk               4029 net/sctp/socket.c static int sctp_setsockopt_recvnxtinfo(struct sock *sk,
sk               4040 net/sctp/socket.c 	sctp_sk(sk)->recvnxtinfo = (val == 0) ? 0 : 1;
sk               4045 net/sctp/socket.c static int sctp_setsockopt_pr_supported(struct sock *sk,
sk               4058 net/sctp/socket.c 	asoc = sctp_id2assoc(sk, params.assoc_id);
sk               4060 net/sctp/socket.c 	    sctp_style(sk, UDP))
sk               4063 net/sctp/socket.c 	sctp_sk(sk)->ep->prsctp_enable = !!params.assoc_value;
sk               4068 net/sctp/socket.c static int sctp_setsockopt_default_prinfo(struct sock *sk,
sk               4072 net/sctp/socket.c 	struct sctp_sock *sp = sctp_sk(sk);
sk               4091 net/sctp/socket.c 	asoc = sctp_id2assoc(sk, info.pr_assoc_id);
sk               4093 net/sctp/socket.c 	    sctp_style(sk, UDP))
sk               4104 net/sctp/socket.c 	if (sctp_style(sk, TCP))
sk               4125 net/sctp/socket.c static int sctp_setsockopt_reconfig_supported(struct sock *sk,
sk               4141 net/sctp/socket.c 	asoc = sctp_id2assoc(sk, params.assoc_id);
sk               4143 net/sctp/socket.c 	    sctp_style(sk, UDP))
sk               4146 net/sctp/socket.c 	sctp_sk(sk)->ep->reconf_enable = !!params.assoc_value;
sk               4154 net/sctp/socket.c static int sctp_setsockopt_enable_strreset(struct sock *sk,
sk               4158 net/sctp/socket.c 	struct sctp_endpoint *ep = sctp_sk(sk)->ep;
sk               4174 net/sctp/socket.c 	asoc = sctp_id2assoc(sk, params.assoc_id);
sk               4176 net/sctp/socket.c 	    sctp_style(sk, UDP))
sk               4186 net/sctp/socket.c 	if (sctp_style(sk, TCP))
sk               4202 net/sctp/socket.c static int sctp_setsockopt_reset_streams(struct sock *sk,
sk               4224 net/sctp/socket.c 	asoc = sctp_id2assoc(sk, params->srs_assoc_id);
sk               4235 net/sctp/socket.c static int sctp_setsockopt_reset_assoc(struct sock *sk,
sk               4251 net/sctp/socket.c 	asoc = sctp_id2assoc(sk, associd);
sk               4261 net/sctp/socket.c static int sctp_setsockopt_add_streams(struct sock *sk,
sk               4277 net/sctp/socket.c 	asoc = sctp_id2assoc(sk, params.sas_assoc_id);
sk               4287 net/sctp/socket.c static int sctp_setsockopt_scheduler(struct sock *sk,
sk               4291 net/sctp/socket.c 	struct sctp_sock *sp = sctp_sk(sk);
sk               4306 net/sctp/socket.c 	asoc = sctp_id2assoc(sk, params.assoc_id);
sk               4308 net/sctp/socket.c 	    sctp_style(sk, UDP))
sk               4314 net/sctp/socket.c 	if (sctp_style(sk, TCP))
sk               4335 net/sctp/socket.c static int sctp_setsockopt_scheduler_value(struct sock *sk,
sk               4352 net/sctp/socket.c 	asoc = sctp_id2assoc(sk, params.assoc_id);
sk               4354 net/sctp/socket.c 	    sctp_style(sk, UDP))
sk               4365 net/sctp/socket.c 	list_for_each_entry(asoc, &sctp_sk(sk)->ep->asocs, asocs) {
sk               4376 net/sctp/socket.c static int sctp_setsockopt_interleaving_supported(struct sock *sk,
sk               4380 net/sctp/socket.c 	struct sctp_sock *sp = sctp_sk(sk);
sk               4394 net/sctp/socket.c 	asoc = sctp_id2assoc(sk, params.assoc_id);
sk               4396 net/sctp/socket.c 	    sctp_style(sk, UDP))
sk               4399 net/sctp/socket.c 	if (!sock_net(sk)->sctp.intl_enable || !sp->frag_interleave) {
sk               4412 net/sctp/socket.c static int sctp_setsockopt_reuse_port(struct sock *sk, char __user *optval,
sk               4417 net/sctp/socket.c 	if (!sctp_style(sk, TCP))
sk               4420 net/sctp/socket.c 	if (sctp_sk(sk)->ep->base.bind_addr.port)
sk               4429 net/sctp/socket.c 	sctp_sk(sk)->reuse = !!val;
sk               4455 net/sctp/socket.c static int sctp_setsockopt_event(struct sock *sk, char __user *optval,
sk               4458 net/sctp/socket.c 	struct sctp_sock *sp = sctp_sk(sk);
sk               4474 net/sctp/socket.c 	asoc = sctp_id2assoc(sk, param.se_assoc_id);
sk               4476 net/sctp/socket.c 	    sctp_style(sk, UDP))
sk               4482 net/sctp/socket.c 	if (sctp_style(sk, TCP))
sk               4503 net/sctp/socket.c static int sctp_setsockopt_asconf_supported(struct sock *sk,
sk               4520 net/sctp/socket.c 	asoc = sctp_id2assoc(sk, params.assoc_id);
sk               4522 net/sctp/socket.c 	    sctp_style(sk, UDP))
sk               4525 net/sctp/socket.c 	ep = sctp_sk(sk)->ep;
sk               4539 net/sctp/socket.c static int sctp_setsockopt_auth_supported(struct sock *sk,
sk               4556 net/sctp/socket.c 	asoc = sctp_id2assoc(sk, params.assoc_id);
sk               4558 net/sctp/socket.c 	    sctp_style(sk, UDP))
sk               4561 net/sctp/socket.c 	ep = sctp_sk(sk)->ep;
sk               4579 net/sctp/socket.c static int sctp_setsockopt_ecn_supported(struct sock *sk,
sk               4595 net/sctp/socket.c 	asoc = sctp_id2assoc(sk, params.assoc_id);
sk               4597 net/sctp/socket.c 	    sctp_style(sk, UDP))
sk               4600 net/sctp/socket.c 	sctp_sk(sk)->ep->ecn_enable = !!params.assoc_value;
sk               4626 net/sctp/socket.c static int sctp_setsockopt(struct sock *sk, int level, int optname,
sk               4631 net/sctp/socket.c 	pr_debug("%s: sk:%p, optname:%d\n", __func__, sk, optname);
sk               4640 net/sctp/socket.c 		struct sctp_af *af = sctp_sk(sk)->pf->af;
sk               4641 net/sctp/socket.c 		retval = af->setsockopt(sk, level, optname, optval, optlen);
sk               4645 net/sctp/socket.c 	lock_sock(sk);
sk               4650 net/sctp/socket.c 		retval = sctp_setsockopt_bindx(sk, (struct sockaddr __user *)optval,
sk               4656 net/sctp/socket.c 		retval = sctp_setsockopt_bindx(sk, (struct sockaddr __user *)optval,
sk               4662 net/sctp/socket.c 		retval = sctp_setsockopt_connectx_old(sk,
sk               4669 net/sctp/socket.c 		retval = sctp_setsockopt_connectx(sk,
sk               4675 net/sctp/socket.c 		retval = sctp_setsockopt_disable_fragments(sk, optval, optlen);
sk               4679 net/sctp/socket.c 		retval = sctp_setsockopt_events(sk, optval, optlen);
sk               4683 net/sctp/socket.c 		retval = sctp_setsockopt_autoclose(sk, optval, optlen);
sk               4687 net/sctp/socket.c 		retval = sctp_setsockopt_peer_addr_params(sk, optval, optlen);
sk               4691 net/sctp/socket.c 		retval = sctp_setsockopt_delayed_ack(sk, optval, optlen);
sk               4694 net/sctp/socket.c 		retval = sctp_setsockopt_partial_delivery_point(sk, optval, optlen);
sk               4698 net/sctp/socket.c 		retval = sctp_setsockopt_initmsg(sk, optval, optlen);
sk               4701 net/sctp/socket.c 		retval = sctp_setsockopt_default_send_param(sk, optval,
sk               4705 net/sctp/socket.c 		retval = sctp_setsockopt_default_sndinfo(sk, optval, optlen);
sk               4708 net/sctp/socket.c 		retval = sctp_setsockopt_primary_addr(sk, optval, optlen);
sk               4711 net/sctp/socket.c 		retval = sctp_setsockopt_peer_primary_addr(sk, optval, optlen);
sk               4714 net/sctp/socket.c 		retval = sctp_setsockopt_nodelay(sk, optval, optlen);
sk               4717 net/sctp/socket.c 		retval = sctp_setsockopt_rtoinfo(sk, optval, optlen);
sk               4720 net/sctp/socket.c 		retval = sctp_setsockopt_associnfo(sk, optval, optlen);
sk               4723 net/sctp/socket.c 		retval = sctp_setsockopt_mappedv4(sk, optval, optlen);
sk               4726 net/sctp/socket.c 		retval = sctp_setsockopt_maxseg(sk, optval, optlen);
sk               4729 net/sctp/socket.c 		retval = sctp_setsockopt_adaptation_layer(sk, optval, optlen);
sk               4732 net/sctp/socket.c 		retval = sctp_setsockopt_context(sk, optval, optlen);
sk               4735 net/sctp/socket.c 		retval = sctp_setsockopt_fragment_interleave(sk, optval, optlen);
sk               4738 net/sctp/socket.c 		retval = sctp_setsockopt_maxburst(sk, optval, optlen);
sk               4741 net/sctp/socket.c 		retval = sctp_setsockopt_auth_chunk(sk, optval, optlen);
sk               4744 net/sctp/socket.c 		retval = sctp_setsockopt_hmac_ident(sk, optval, optlen);
sk               4747 net/sctp/socket.c 		retval = sctp_setsockopt_auth_key(sk, optval, optlen);
sk               4750 net/sctp/socket.c 		retval = sctp_setsockopt_active_key(sk, optval, optlen);
sk               4753 net/sctp/socket.c 		retval = sctp_setsockopt_del_key(sk, optval, optlen);
sk               4756 net/sctp/socket.c 		retval = sctp_setsockopt_deactivate_key(sk, optval, optlen);
sk               4759 net/sctp/socket.c 		retval = sctp_setsockopt_auto_asconf(sk, optval, optlen);
sk               4762 net/sctp/socket.c 		retval = sctp_setsockopt_paddr_thresholds(sk, optval, optlen);
sk               4765 net/sctp/socket.c 		retval = sctp_setsockopt_recvrcvinfo(sk, optval, optlen);
sk               4768 net/sctp/socket.c 		retval = sctp_setsockopt_recvnxtinfo(sk, optval, optlen);
sk               4771 net/sctp/socket.c 		retval = sctp_setsockopt_pr_supported(sk, optval, optlen);
sk               4774 net/sctp/socket.c 		retval = sctp_setsockopt_default_prinfo(sk, optval, optlen);
sk               4777 net/sctp/socket.c 		retval = sctp_setsockopt_reconfig_supported(sk, optval, optlen);
sk               4780 net/sctp/socket.c 		retval = sctp_setsockopt_enable_strreset(sk, optval, optlen);
sk               4783 net/sctp/socket.c 		retval = sctp_setsockopt_reset_streams(sk, optval, optlen);
sk               4786 net/sctp/socket.c 		retval = sctp_setsockopt_reset_assoc(sk, optval, optlen);
sk               4789 net/sctp/socket.c 		retval = sctp_setsockopt_add_streams(sk, optval, optlen);
sk               4792 net/sctp/socket.c 		retval = sctp_setsockopt_scheduler(sk, optval, optlen);
sk               4795 net/sctp/socket.c 		retval = sctp_setsockopt_scheduler_value(sk, optval, optlen);
sk               4798 net/sctp/socket.c 		retval = sctp_setsockopt_interleaving_supported(sk, optval,
sk               4802 net/sctp/socket.c 		retval = sctp_setsockopt_reuse_port(sk, optval, optlen);
sk               4805 net/sctp/socket.c 		retval = sctp_setsockopt_event(sk, optval, optlen);
sk               4808 net/sctp/socket.c 		retval = sctp_setsockopt_asconf_supported(sk, optval, optlen);
sk               4811 net/sctp/socket.c 		retval = sctp_setsockopt_auth_supported(sk, optval, optlen);
sk               4814 net/sctp/socket.c 		retval = sctp_setsockopt_ecn_supported(sk, optval, optlen);
sk               4821 net/sctp/socket.c 	release_sock(sk);
sk               4843 net/sctp/socket.c static int sctp_connect(struct sock *sk, struct sockaddr *addr,
sk               4849 net/sctp/socket.c 	lock_sock(sk);
sk               4850 net/sctp/socket.c 	pr_debug("%s: sk:%p, sockaddr:%p, addr_len:%d\n", __func__, sk,
sk               4856 net/sctp/socket.c 		err = __sctp_connect(sk, addr, af->sockaddr_len, flags, NULL);
sk               4858 net/sctp/socket.c 	release_sock(sk);
sk               4871 net/sctp/socket.c 	return sctp_connect(sock->sk, uaddr, addr_len, flags);
sk               4875 net/sctp/socket.c static int sctp_disconnect(struct sock *sk, int flags)
sk               4887 net/sctp/socket.c static struct sock *sctp_accept(struct sock *sk, int flags, int *err, bool kern)
sk               4896 net/sctp/socket.c 	lock_sock(sk);
sk               4898 net/sctp/socket.c 	sp = sctp_sk(sk);
sk               4901 net/sctp/socket.c 	if (!sctp_style(sk, TCP)) {
sk               4906 net/sctp/socket.c 	if (!sctp_sstate(sk, LISTENING)) {
sk               4911 net/sctp/socket.c 	timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
sk               4913 net/sctp/socket.c 	error = sctp_wait_for_accept(sk, timeo);
sk               4922 net/sctp/socket.c 	newsk = sp->pf->create_accept_sk(sk, asoc, kern);
sk               4931 net/sctp/socket.c 	error = sctp_sock_migrate(sk, newsk, asoc, SCTP_SOCKET_TCP);
sk               4938 net/sctp/socket.c 	release_sock(sk);
sk               4944 net/sctp/socket.c static int sctp_ioctl(struct sock *sk, int cmd, unsigned long arg)
sk               4948 net/sctp/socket.c 	lock_sock(sk);
sk               4954 net/sctp/socket.c 	if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
sk               4962 net/sctp/socket.c 		skb = skb_peek(&sk->sk_receive_queue);
sk               4978 net/sctp/socket.c 	release_sock(sk);
sk               4986 net/sctp/socket.c static int sctp_init_sock(struct sock *sk)
sk               4988 net/sctp/socket.c 	struct net *net = sock_net(sk);
sk               4991 net/sctp/socket.c 	pr_debug("%s: sk:%p\n", __func__, sk);
sk               4993 net/sctp/socket.c 	sp = sctp_sk(sk);
sk               4996 net/sctp/socket.c 	switch (sk->sk_type) {
sk               5007 net/sctp/socket.c 	sk->sk_gso_type = SKB_GSO_SCTP;
sk               5093 net/sctp/socket.c 	sp->pf = sctp_get_pf_specific(sk->sk_family);
sk               5104 net/sctp/socket.c 	sp->ep = sctp_endpoint_new(sk, GFP_KERNEL);
sk               5110 net/sctp/socket.c 	sk->sk_destruct = sctp_destruct_sock;
sk               5115 net/sctp/socket.c 	sk_sockets_allocated_inc(sk);
sk               5116 net/sctp/socket.c 	sock_prot_inuse_add(net, sk->sk_prot, 1);
sk               5122 net/sctp/socket.c 		spin_lock(&sock_net(sk)->sctp.addr_wq_lock);
sk               5126 net/sctp/socket.c 		spin_unlock(&sock_net(sk)->sctp.addr_wq_lock);
sk               5139 net/sctp/socket.c static void sctp_destroy_sock(struct sock *sk)
sk               5143 net/sctp/socket.c 	pr_debug("%s: sk:%p\n", __func__, sk);
sk               5146 net/sctp/socket.c 	sp = sctp_sk(sk);
sk               5159 net/sctp/socket.c 	sk_sockets_allocated_dec(sk);
sk               5160 net/sctp/socket.c 	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
sk               5165 net/sctp/socket.c static void sctp_destruct_sock(struct sock *sk)
sk               5167 net/sctp/socket.c 	struct sctp_sock *sp = sctp_sk(sk);
sk               5172 net/sctp/socket.c 	inet_sock_destruct(sk);
sk               5191 net/sctp/socket.c static void sctp_shutdown(struct sock *sk, int how)
sk               5193 net/sctp/socket.c 	struct net *net = sock_net(sk);
sk               5196 net/sctp/socket.c 	if (!sctp_style(sk, TCP))
sk               5199 net/sctp/socket.c 	ep = sctp_sk(sk)->ep;
sk               5203 net/sctp/socket.c 		inet_sk_set_state(sk, SCTP_SS_CLOSING);
sk               5210 net/sctp/socket.c int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc,
sk               5219 net/sctp/socket.c 		struct sctp_sock *sp = sctp_sk(sk);
sk               5326 net/sctp/socket.c 		if (net_eq(sock_net(t->asoc->base.sk), net) &&
sk               5438 net/sctp/socket.c static int sctp_getsockopt_sctp_status(struct sock *sk, int len,
sk               5460 net/sctp/socket.c 	asoc = sctp_id2assoc(sk, associd);
sk               5481 net/sctp/socket.c 	sctp_get_pf_specific(sk->sk_family)->addr_to_user(sctp_sk(sk),
sk               5518 net/sctp/socket.c static int sctp_getsockopt_peer_addr_info(struct sock *sk, int len,
sk               5537 net/sctp/socket.c 	transport = sctp_addr_id2transport(sk, &pinfo.spinfo_address,
sk               5573 net/sctp/socket.c static int sctp_getsockopt_disable_fragments(struct sock *sk, int len,
sk               5582 net/sctp/socket.c 	val = (sctp_sk(sk)->disable_fragments == 1);
sk               5595 net/sctp/socket.c static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval,
sk               5610 net/sctp/socket.c 		sn_type[i] = sctp_ulpevent_type_enabled(sctp_sk(sk)->subscribe,
sk               5630 net/sctp/socket.c static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optval, int __user *optlen)
sk               5633 net/sctp/socket.c 	if (sctp_style(sk, TCP))
sk               5640 net/sctp/socket.c 	if (put_user(sctp_sk(sk)->autoclose, (int __user *)optval))
sk               5646 net/sctp/socket.c int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp)
sk               5648 net/sctp/socket.c 	struct sctp_association *asoc = sctp_id2assoc(sk, id);
sk               5649 net/sctp/socket.c 	struct sctp_sock *sp = sctp_sk(sk);
sk               5654 net/sctp/socket.c 	if (!net_eq(current->nsproxy->net_ns, sock_net(sk)))
sk               5663 net/sctp/socket.c 	if (!sctp_style(sk, UDP))
sk               5667 net/sctp/socket.c 	err = sock_create(sk->sk_family, SOCK_SEQPACKET, IPPROTO_SCTP, &sock);
sk               5671 net/sctp/socket.c 	sctp_copy_sock(sock->sk, sk, asoc);
sk               5677 net/sctp/socket.c 	sp->pf->to_sk_daddr(&asoc->peer.primary_addr, sk);
sk               5678 net/sctp/socket.c 	sp->pf->copy_ip_options(sk, sock->sk);
sk               5683 net/sctp/socket.c 	err = sctp_sock_migrate(sk, sock->sk, asoc,
sk               5696 net/sctp/socket.c static int sctp_getsockopt_peeloff_common(struct sock *sk, sctp_peeloff_arg_t *peeloff,
sk               5702 net/sctp/socket.c 	retval = sctp_do_peeloff(sk, peeloff->associd, &newsock);
sk               5721 net/sctp/socket.c 	pr_debug("%s: sk:%p, newsk:%p, sd:%d\n", __func__, sk, newsock->sk,
sk               5732 net/sctp/socket.c static int sctp_getsockopt_peeloff(struct sock *sk, int len, char __user *optval, int __user *optlen)
sk               5744 net/sctp/socket.c 	retval = sctp_getsockopt_peeloff_common(sk, &peeloff, &newfile, 0);
sk               5765 net/sctp/socket.c static int sctp_getsockopt_peeloff_flags(struct sock *sk, int len,
sk               5778 net/sctp/socket.c 	retval = sctp_getsockopt_peeloff_common(sk, &peeloff.p_arg,
sk               5932 net/sctp/socket.c static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len,
sk               5938 net/sctp/socket.c 	struct sctp_sock        *sp = sctp_sk(sk);
sk               5955 net/sctp/socket.c 	if (!sctp_is_any(sk, (union sctp_addr *)&params.spp_address)) {
sk               5956 net/sctp/socket.c 		trans = sctp_addr_id2transport(sk, &params.spp_address,
sk               5968 net/sctp/socket.c 	asoc = sctp_id2assoc(sk, params.spp_assoc_id);
sk               5970 net/sctp/socket.c 	    sctp_style(sk, UDP)) {
sk               6075 net/sctp/socket.c static int sctp_getsockopt_delayed_ack(struct sock *sk, int len,
sk               6081 net/sctp/socket.c 	struct sctp_sock        *sp = sctp_sk(sk);
sk               6103 net/sctp/socket.c 	asoc = sctp_id2assoc(sk, params.sack_assoc_id);
sk               6105 net/sctp/socket.c 	    sctp_style(sk, UDP))
sk               6149 net/sctp/socket.c static int sctp_getsockopt_initmsg(struct sock *sk, int len, char __user *optval, int __user *optlen)
sk               6156 net/sctp/socket.c 	if (copy_to_user(optval, &sctp_sk(sk)->initmsg, len))
sk               6162 net/sctp/socket.c static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
sk               6171 net/sctp/socket.c 	struct sctp_sock *sp = sctp_sk(sk);
sk               6183 net/sctp/socket.c 	asoc = sctp_id2assoc(sk, getaddrs.assoc_id);
sk               6193 net/sctp/socket.c 		addrlen = sctp_get_pf_specific(sk->sk_family)
sk               6213 net/sctp/socket.c static int sctp_copy_laddrs(struct sock *sk, __u16 port, void *to,
sk               6220 net/sctp/socket.c 	struct net *net = sock_net(sk);
sk               6227 net/sctp/socket.c 		if ((PF_INET == sk->sk_family) &&
sk               6230 net/sctp/socket.c 		if ((PF_INET6 == sk->sk_family) &&
sk               6231 net/sctp/socket.c 		    inet_v6_ipv6only(sk) &&
sk               6238 net/sctp/socket.c 		addrlen = sctp_get_pf_specific(sk->sk_family)
sk               6239 net/sctp/socket.c 			      ->addr_to_user(sctp_sk(sk), &temp);
sk               6258 net/sctp/socket.c static int sctp_getsockopt_local_addrs(struct sock *sk, int len,
sk               6268 net/sctp/socket.c 	struct sctp_sock *sp = sctp_sk(sk);
sk               6289 net/sctp/socket.c 		bp = &sctp_sk(sk)->ep->base.bind_addr;
sk               6291 net/sctp/socket.c 		asoc = sctp_id2assoc(sk, getaddrs.assoc_id);
sk               6310 net/sctp/socket.c 		if (sctp_is_any(sk, &addr->a)) {
sk               6311 net/sctp/socket.c 			cnt = sctp_copy_laddrs(sk, bp->port, addrs,
sk               6328 net/sctp/socket.c 		addrlen = sctp_get_pf_specific(sk->sk_family)
sk               6366 net/sctp/socket.c static int sctp_getsockopt_primary_addr(struct sock *sk, int len,
sk               6371 net/sctp/socket.c 	struct sctp_sock *sp = sctp_sk(sk);
sk               6381 net/sctp/socket.c 	asoc = sctp_id2assoc(sk, prim.ssp_assoc_id);
sk               6391 net/sctp/socket.c 	sctp_get_pf_specific(sk->sk_family)->addr_to_user(sp,
sk               6408 net/sctp/socket.c static int sctp_getsockopt_adaptation_layer(struct sock *sk, int len,
sk               6418 net/sctp/socket.c 	adaptation.ssb_adaptation_ind = sctp_sk(sk)->adaptation_ind;
sk               6447 net/sctp/socket.c static int sctp_getsockopt_default_send_param(struct sock *sk,
sk               6451 net/sctp/socket.c 	struct sctp_sock *sp = sctp_sk(sk);
sk               6463 net/sctp/socket.c 	asoc = sctp_id2assoc(sk, info.sinfo_assoc_id);
sk               6465 net/sctp/socket.c 	    sctp_style(sk, UDP))
sk               6493 net/sctp/socket.c static int sctp_getsockopt_default_sndinfo(struct sock *sk, int len,
sk               6497 net/sctp/socket.c 	struct sctp_sock *sp = sctp_sk(sk);
sk               6509 net/sctp/socket.c 	asoc = sctp_id2assoc(sk, info.snd_assoc_id);
sk               6511 net/sctp/socket.c 	    sctp_style(sk, UDP))
sk               6544 net/sctp/socket.c static int sctp_getsockopt_nodelay(struct sock *sk, int len,
sk               6553 net/sctp/socket.c 	val = (sctp_sk(sk)->nodelay == 1);
sk               6573 net/sctp/socket.c static int sctp_getsockopt_rtoinfo(struct sock *sk, int len,
sk               6587 net/sctp/socket.c 	asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id);
sk               6590 net/sctp/socket.c 	    sctp_style(sk, UDP))
sk               6600 net/sctp/socket.c 		struct sctp_sock *sp = sctp_sk(sk);
sk               6627 net/sctp/socket.c static int sctp_getsockopt_associnfo(struct sock *sk, int len,
sk               6645 net/sctp/socket.c 	asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id);
sk               6648 net/sctp/socket.c 	    sctp_style(sk, UDP))
sk               6665 net/sctp/socket.c 		struct sctp_sock *sp = sctp_sk(sk);
sk               6696 net/sctp/socket.c static int sctp_getsockopt_mappedv4(struct sock *sk, int len,
sk               6700 net/sctp/socket.c 	struct sctp_sock *sp = sctp_sk(sk);
sk               6719 net/sctp/socket.c static int sctp_getsockopt_context(struct sock *sk, int len,
sk               6733 net/sctp/socket.c 	asoc = sctp_id2assoc(sk, params.assoc_id);
sk               6735 net/sctp/socket.c 	    sctp_style(sk, UDP))
sk               6739 net/sctp/socket.c 				  : sctp_sk(sk)->default_rcv_context;
sk               6776 net/sctp/socket.c static int sctp_getsockopt_maxseg(struct sock *sk, int len,
sk               6796 net/sctp/socket.c 	asoc = sctp_id2assoc(sk, params.assoc_id);
sk               6798 net/sctp/socket.c 	    sctp_style(sk, UDP))
sk               6804 net/sctp/socket.c 		params.assoc_value = sctp_sk(sk)->user_frag;
sk               6823 net/sctp/socket.c static int sctp_getsockopt_fragment_interleave(struct sock *sk, int len,
sk               6833 net/sctp/socket.c 	val = sctp_sk(sk)->frag_interleave;
sk               6846 net/sctp/socket.c static int sctp_getsockopt_partial_delivery_point(struct sock *sk, int len,
sk               6857 net/sctp/socket.c 	val = sctp_sk(sk)->pd_point;
sk               6870 net/sctp/socket.c static int sctp_getsockopt_maxburst(struct sock *sk, int len,
sk               6891 net/sctp/socket.c 	asoc = sctp_id2assoc(sk, params.assoc_id);
sk               6893 net/sctp/socket.c 	    sctp_style(sk, UDP))
sk               6896 net/sctp/socket.c 	params.assoc_value = asoc ? asoc->max_burst : sctp_sk(sk)->max_burst;
sk               6910 net/sctp/socket.c static int sctp_getsockopt_hmac_ident(struct sock *sk, int len,
sk               6913 net/sctp/socket.c 	struct sctp_endpoint *ep = sctp_sk(sk)->ep;
sk               6946 net/sctp/socket.c static int sctp_getsockopt_active_key(struct sock *sk, int len,
sk               6949 net/sctp/socket.c 	struct sctp_endpoint *ep = sctp_sk(sk)->ep;
sk               6960 net/sctp/socket.c 	asoc = sctp_id2assoc(sk, val.scact_assoc_id);
sk               6961 net/sctp/socket.c 	if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP))
sk               6982 net/sctp/socket.c static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len,
sk               6999 net/sctp/socket.c 	asoc = sctp_id2assoc(sk, val.gauth_assoc_id);
sk               7026 net/sctp/socket.c static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len,
sk               7029 net/sctp/socket.c 	struct sctp_endpoint *ep = sctp_sk(sk)->ep;
sk               7044 net/sctp/socket.c 	asoc = sctp_id2assoc(sk, val.gauth_assoc_id);
sk               7046 net/sctp/socket.c 	    sctp_style(sk, UDP))
sk               7082 net/sctp/socket.c static int sctp_getsockopt_assoc_number(struct sock *sk, int len,
sk               7085 net/sctp/socket.c 	struct sctp_sock *sp = sctp_sk(sk);
sk               7089 net/sctp/socket.c 	if (sctp_style(sk, TCP))
sk               7113 net/sctp/socket.c static int sctp_getsockopt_auto_asconf(struct sock *sk, int len,
sk               7122 net/sctp/socket.c 	if (sctp_sk(sk)->do_auto_asconf && sctp_is_ep_boundall(sk))
sk               7138 net/sctp/socket.c static int sctp_getsockopt_assoc_ids(struct sock *sk, int len,
sk               7141 net/sctp/socket.c 	struct sctp_sock *sp = sctp_sk(sk);
sk               7146 net/sctp/socket.c 	if (sctp_style(sk, TCP))
sk               7187 net/sctp/socket.c static int sctp_getsockopt_paddr_thresholds(struct sock *sk,
sk               7202 net/sctp/socket.c 	if (!sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) {
sk               7203 net/sctp/socket.c 		trans = sctp_addr_id2transport(sk, &val.spt_address,
sk               7214 net/sctp/socket.c 	asoc = sctp_id2assoc(sk, val.spt_assoc_id);
sk               7216 net/sctp/socket.c 	    sctp_style(sk, UDP))
sk               7223 net/sctp/socket.c 		struct sctp_sock *sp = sctp_sk(sk);
sk               7242 net/sctp/socket.c static int sctp_getsockopt_assoc_stats(struct sock *sk, int len,
sk               7259 net/sctp/socket.c 	asoc = sctp_id2assoc(sk, sas.sas_assoc_id);
sk               7300 net/sctp/socket.c static int sctp_getsockopt_recvrcvinfo(struct sock *sk,	int len,
sk               7310 net/sctp/socket.c 	if (sctp_sk(sk)->recvrcvinfo)
sk               7320 net/sctp/socket.c static int sctp_getsockopt_recvnxtinfo(struct sock *sk,	int len,
sk               7330 net/sctp/socket.c 	if (sctp_sk(sk)->recvnxtinfo)
sk               7340 net/sctp/socket.c static int sctp_getsockopt_pr_supported(struct sock *sk, int len,
sk               7357 net/sctp/socket.c 	asoc = sctp_id2assoc(sk, params.assoc_id);
sk               7359 net/sctp/socket.c 	    sctp_style(sk, UDP)) {
sk               7365 net/sctp/socket.c 				  : sctp_sk(sk)->ep->prsctp_enable;
sk               7379 net/sctp/socket.c static int sctp_getsockopt_default_prinfo(struct sock *sk, int len,
sk               7396 net/sctp/socket.c 	asoc = sctp_id2assoc(sk, info.pr_assoc_id);
sk               7398 net/sctp/socket.c 	    sctp_style(sk, UDP)) {
sk               7407 net/sctp/socket.c 		struct sctp_sock *sp = sctp_sk(sk);
sk               7425 net/sctp/socket.c static int sctp_getsockopt_pr_assocstatus(struct sock *sk, int len,
sk               7448 net/sctp/socket.c 	asoc = sctp_id2assoc(sk, params.sprstat_assoc_id);
sk               7484 net/sctp/socket.c static int sctp_getsockopt_pr_streamstatus(struct sock *sk, int len,
sk               7508 net/sctp/socket.c 	asoc = sctp_id2assoc(sk, params.sprstat_assoc_id);
sk               7548 net/sctp/socket.c static int sctp_getsockopt_reconfig_supported(struct sock *sk, int len,
sk               7565 net/sctp/socket.c 	asoc = sctp_id2assoc(sk, params.assoc_id);
sk               7567 net/sctp/socket.c 	    sctp_style(sk, UDP)) {
sk               7573 net/sctp/socket.c 				  : sctp_sk(sk)->ep->reconf_enable;
sk               7587 net/sctp/socket.c static int sctp_getsockopt_enable_strreset(struct sock *sk, int len,
sk               7604 net/sctp/socket.c 	asoc = sctp_id2assoc(sk, params.assoc_id);
sk               7606 net/sctp/socket.c 	    sctp_style(sk, UDP)) {
sk               7612 net/sctp/socket.c 				  : sctp_sk(sk)->ep->strreset_enable;
sk               7626 net/sctp/socket.c static int sctp_getsockopt_scheduler(struct sock *sk, int len,
sk               7643 net/sctp/socket.c 	asoc = sctp_id2assoc(sk, params.assoc_id);
sk               7645 net/sctp/socket.c 	    sctp_style(sk, UDP)) {
sk               7651 net/sctp/socket.c 				  : sctp_sk(sk)->default_ss;
sk               7665 net/sctp/socket.c static int sctp_getsockopt_scheduler_value(struct sock *sk, int len,
sk               7682 net/sctp/socket.c 	asoc = sctp_id2assoc(sk, params.assoc_id);
sk               7707 net/sctp/socket.c static int sctp_getsockopt_interleaving_supported(struct sock *sk, int len,
sk               7724 net/sctp/socket.c 	asoc = sctp_id2assoc(sk, params.assoc_id);
sk               7726 net/sctp/socket.c 	    sctp_style(sk, UDP)) {
sk               7732 net/sctp/socket.c 				  : sctp_sk(sk)->ep->intl_enable;
sk               7746 net/sctp/socket.c static int sctp_getsockopt_reuse_port(struct sock *sk, int len,
sk               7756 net/sctp/socket.c 	val = sctp_sk(sk)->reuse;
sk               7766 net/sctp/socket.c static int sctp_getsockopt_event(struct sock *sk, int len, char __user *optval,
sk               7784 net/sctp/socket.c 	asoc = sctp_id2assoc(sk, param.se_assoc_id);
sk               7786 net/sctp/socket.c 	    sctp_style(sk, UDP))
sk               7789 net/sctp/socket.c 	subscribe = asoc ? asoc->subscribe : sctp_sk(sk)->subscribe;
sk               7801 net/sctp/socket.c static int sctp_getsockopt_asconf_supported(struct sock *sk, int len,
sk               7818 net/sctp/socket.c 	asoc = sctp_id2assoc(sk, params.assoc_id);
sk               7820 net/sctp/socket.c 	    sctp_style(sk, UDP)) {
sk               7826 net/sctp/socket.c 				  : sctp_sk(sk)->ep->asconf_enable;
sk               7840 net/sctp/socket.c static int sctp_getsockopt_auth_supported(struct sock *sk, int len,
sk               7857 net/sctp/socket.c 	asoc = sctp_id2assoc(sk, params.assoc_id);
sk               7859 net/sctp/socket.c 	    sctp_style(sk, UDP)) {
sk               7865 net/sctp/socket.c 				  : sctp_sk(sk)->ep->auth_enable;
sk               7879 net/sctp/socket.c static int sctp_getsockopt_ecn_supported(struct sock *sk, int len,
sk               7896 net/sctp/socket.c 	asoc = sctp_id2assoc(sk, params.assoc_id);
sk               7898 net/sctp/socket.c 	    sctp_style(sk, UDP)) {
sk               7904 net/sctp/socket.c 				  : sctp_sk(sk)->ep->ecn_enable;
sk               7918 net/sctp/socket.c static int sctp_getsockopt(struct sock *sk, int level, int optname,
sk               7924 net/sctp/socket.c 	pr_debug("%s: sk:%p, optname:%d\n", __func__, sk, optname);
sk               7933 net/sctp/socket.c 		struct sctp_af *af = sctp_sk(sk)->pf->af;
sk               7935 net/sctp/socket.c 		retval = af->getsockopt(sk, level, optname, optval, optlen);
sk               7945 net/sctp/socket.c 	lock_sock(sk);
sk               7949 net/sctp/socket.c 		retval = sctp_getsockopt_sctp_status(sk, len, optval, optlen);
sk               7952 net/sctp/socket.c 		retval = sctp_getsockopt_disable_fragments(sk, len, optval,
sk               7956 net/sctp/socket.c 		retval = sctp_getsockopt_events(sk, len, optval, optlen);
sk               7959 net/sctp/socket.c 		retval = sctp_getsockopt_autoclose(sk, len, optval, optlen);
sk               7962 net/sctp/socket.c 		retval = sctp_getsockopt_peeloff(sk, len, optval, optlen);
sk               7965 net/sctp/socket.c 		retval = sctp_getsockopt_peeloff_flags(sk, len, optval, optlen);
sk               7968 net/sctp/socket.c 		retval = sctp_getsockopt_peer_addr_params(sk, len, optval,
sk               7972 net/sctp/socket.c 		retval = sctp_getsockopt_delayed_ack(sk, len, optval,
sk               7976 net/sctp/socket.c 		retval = sctp_getsockopt_initmsg(sk, len, optval, optlen);
sk               7979 net/sctp/socket.c 		retval = sctp_getsockopt_peer_addrs(sk, len, optval,
sk               7983 net/sctp/socket.c 		retval = sctp_getsockopt_local_addrs(sk, len, optval,
sk               7987 net/sctp/socket.c 		retval = sctp_getsockopt_connectx3(sk, len, optval, optlen);
sk               7990 net/sctp/socket.c 		retval = sctp_getsockopt_default_send_param(sk, len,
sk               7994 net/sctp/socket.c 		retval = sctp_getsockopt_default_sndinfo(sk, len,
sk               7998 net/sctp/socket.c 		retval = sctp_getsockopt_primary_addr(sk, len, optval, optlen);
sk               8001 net/sctp/socket.c 		retval = sctp_getsockopt_nodelay(sk, len, optval, optlen);
sk               8004 net/sctp/socket.c 		retval = sctp_getsockopt_rtoinfo(sk, len, optval, optlen);
sk               8007 net/sctp/socket.c 		retval = sctp_getsockopt_associnfo(sk, len, optval, optlen);
sk               8010 net/sctp/socket.c 		retval = sctp_getsockopt_mappedv4(sk, len, optval, optlen);
sk               8013 net/sctp/socket.c 		retval = sctp_getsockopt_maxseg(sk, len, optval, optlen);
sk               8016 net/sctp/socket.c 		retval = sctp_getsockopt_peer_addr_info(sk, len, optval,
sk               8020 net/sctp/socket.c 		retval = sctp_getsockopt_adaptation_layer(sk, len, optval,
sk               8024 net/sctp/socket.c 		retval = sctp_getsockopt_context(sk, len, optval, optlen);
sk               8027 net/sctp/socket.c 		retval = sctp_getsockopt_fragment_interleave(sk, len, optval,
sk               8031 net/sctp/socket.c 		retval = sctp_getsockopt_partial_delivery_point(sk, len, optval,
sk               8035 net/sctp/socket.c 		retval = sctp_getsockopt_maxburst(sk, len, optval, optlen);
sk               8044 net/sctp/socket.c 		retval = sctp_getsockopt_hmac_ident(sk, len, optval, optlen);
sk               8047 net/sctp/socket.c 		retval = sctp_getsockopt_active_key(sk, len, optval, optlen);
sk               8050 net/sctp/socket.c 		retval = sctp_getsockopt_peer_auth_chunks(sk, len, optval,
sk               8054 net/sctp/socket.c 		retval = sctp_getsockopt_local_auth_chunks(sk, len, optval,
sk               8058 net/sctp/socket.c 		retval = sctp_getsockopt_assoc_number(sk, len, optval, optlen);
sk               8061 net/sctp/socket.c 		retval = sctp_getsockopt_assoc_ids(sk, len, optval, optlen);
sk               8064 net/sctp/socket.c 		retval = sctp_getsockopt_auto_asconf(sk, len, optval, optlen);
sk               8067 net/sctp/socket.c 		retval = sctp_getsockopt_paddr_thresholds(sk, optval, len, optlen);
sk               8070 net/sctp/socket.c 		retval = sctp_getsockopt_assoc_stats(sk, len, optval, optlen);
sk               8073 net/sctp/socket.c 		retval = sctp_getsockopt_recvrcvinfo(sk, len, optval, optlen);
sk               8076 net/sctp/socket.c 		retval = sctp_getsockopt_recvnxtinfo(sk, len, optval, optlen);
sk               8079 net/sctp/socket.c 		retval = sctp_getsockopt_pr_supported(sk, len, optval, optlen);
sk               8082 net/sctp/socket.c 		retval = sctp_getsockopt_default_prinfo(sk, len, optval,
sk               8086 net/sctp/socket.c 		retval = sctp_getsockopt_pr_assocstatus(sk, len, optval,
sk               8090 net/sctp/socket.c 		retval = sctp_getsockopt_pr_streamstatus(sk, len, optval,
sk               8094 net/sctp/socket.c 		retval = sctp_getsockopt_reconfig_supported(sk, len, optval,
sk               8098 net/sctp/socket.c 		retval = sctp_getsockopt_enable_strreset(sk, len, optval,
sk               8102 net/sctp/socket.c 		retval = sctp_getsockopt_scheduler(sk, len, optval,
sk               8106 net/sctp/socket.c 		retval = sctp_getsockopt_scheduler_value(sk, len, optval,
sk               8110 net/sctp/socket.c 		retval = sctp_getsockopt_interleaving_supported(sk, len, optval,
sk               8114 net/sctp/socket.c 		retval = sctp_getsockopt_reuse_port(sk, len, optval, optlen);
sk               8117 net/sctp/socket.c 		retval = sctp_getsockopt_event(sk, len, optval, optlen);
sk               8120 net/sctp/socket.c 		retval = sctp_getsockopt_asconf_supported(sk, len, optval,
sk               8124 net/sctp/socket.c 		retval = sctp_getsockopt_auth_supported(sk, len, optval,
sk               8128 net/sctp/socket.c 		retval = sctp_getsockopt_ecn_supported(sk, len, optval, optlen);
sk               8135 net/sctp/socket.c 	release_sock(sk);
sk               8139 net/sctp/socket.c static int sctp_hash(struct sock *sk)
sk               8145 net/sctp/socket.c static void sctp_unhash(struct sock *sk)
sk               8165 net/sctp/socket.c static int sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
sk               8167 net/sctp/socket.c 	struct sctp_sock *sp = sctp_sk(sk);
sk               8168 net/sctp/socket.c 	bool reuse = (sk->sk_reuse || sp->reuse);
sk               8170 net/sctp/socket.c 	kuid_t uid = sock_i_uid(sk);
sk               8185 net/sctp/socket.c 		struct net *net = sock_net(sk);
sk               8197 net/sctp/socket.c 			index = sctp_phashfn(sock_net(sk), rover);
sk               8202 net/sctp/socket.c 				    net_eq(sock_net(sk), pp->net))
sk               8226 net/sctp/socket.c 		head = &sctp_port_hashtable[sctp_phashfn(sock_net(sk), snum)];
sk               8229 net/sctp/socket.c 			if ((pp->port == snum) && net_eq(pp->net, sock_net(sk)))
sk               8247 net/sctp/socket.c 		     sk->sk_state != SCTP_SS_LISTENING) ||
sk               8248 net/sctp/socket.c 		    (pp->fastreuseport && sk->sk_reuseport &&
sk               8266 net/sctp/socket.c 			if (sk == sk2 ||
sk               8269 net/sctp/socket.c 			    (sk->sk_reuseport && sk2->sk_reuseport &&
sk               8285 net/sctp/socket.c 	if (!pp && !(pp = sctp_bucket_create(head, sock_net(sk), snum)))
sk               8293 net/sctp/socket.c 		if (reuse && sk->sk_state != SCTP_SS_LISTENING)
sk               8298 net/sctp/socket.c 		if (sk->sk_reuseport) {
sk               8306 net/sctp/socket.c 		    (!reuse || sk->sk_state == SCTP_SS_LISTENING))
sk               8310 net/sctp/socket.c 		    (!sk->sk_reuseport || !uid_eq(pp->fastuid, uid)))
sk               8320 net/sctp/socket.c 		inet_sk(sk)->inet_num = snum;
sk               8321 net/sctp/socket.c 		sk_add_bind_node(sk, &pp->owner);
sk               8337 net/sctp/socket.c static int sctp_get_port(struct sock *sk, unsigned short snum)
sk               8340 net/sctp/socket.c 	struct sctp_af *af = sctp_sk(sk)->pf->af;
sk               8343 net/sctp/socket.c 	af->from_sk(&addr, sk);
sk               8347 net/sctp/socket.c 	return sctp_get_port_local(sk, &addr);
sk               8353 net/sctp/socket.c static int sctp_listen_start(struct sock *sk, int backlog)
sk               8355 net/sctp/socket.c 	struct sctp_sock *sp = sctp_sk(sk);
sk               8369 net/sctp/socket.c 		sctp_sk(sk)->hmac = tfm;
sk               8383 net/sctp/socket.c 	inet_sk_set_state(sk, SCTP_SS_LISTENING);
sk               8385 net/sctp/socket.c 		if (sctp_autobind(sk))
sk               8388 net/sctp/socket.c 		if (sctp_get_port(sk, inet_sk(sk)->inet_num)) {
sk               8389 net/sctp/socket.c 			inet_sk_set_state(sk, SCTP_SS_CLOSED);
sk               8394 net/sctp/socket.c 	sk->sk_max_ack_backlog = backlog;
sk               8414 net/sctp/socket.c 	struct sock *sk = sock->sk;
sk               8415 net/sctp/socket.c 	struct sctp_endpoint *ep = sctp_sk(sk)->ep;
sk               8421 net/sctp/socket.c 	lock_sock(sk);
sk               8424 net/sctp/socket.c 	if (sctp_style(sk, UDP_HIGH_BANDWIDTH))
sk               8430 net/sctp/socket.c 	if (!sctp_sstate(sk, LISTENING) && !sctp_sstate(sk, CLOSED))
sk               8435 net/sctp/socket.c 		if (sctp_sstate(sk, CLOSED))
sk               8440 net/sctp/socket.c 		sk->sk_state = SCTP_SS_CLOSED;
sk               8441 net/sctp/socket.c 		if (sk->sk_reuse || sctp_sk(sk)->reuse)
sk               8442 net/sctp/socket.c 			sctp_sk(sk)->bind_hash->fastreuse = 1;
sk               8447 net/sctp/socket.c 	if (sctp_sstate(sk, LISTENING))
sk               8448 net/sctp/socket.c 		sk->sk_max_ack_backlog = backlog;
sk               8450 net/sctp/socket.c 		err = sctp_listen_start(sk, backlog);
sk               8457 net/sctp/socket.c 	release_sock(sk);
sk               8476 net/sctp/socket.c 	struct sock *sk = sock->sk;
sk               8477 net/sctp/socket.c 	struct sctp_sock *sp = sctp_sk(sk);
sk               8480 net/sctp/socket.c 	poll_wait(file, sk_sleep(sk), wait);
sk               8482 net/sctp/socket.c 	sock_rps_record_flow(sk);
sk               8487 net/sctp/socket.c 	if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
sk               8494 net/sctp/socket.c 	if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
sk               8496 net/sctp/socket.c 			(sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
sk               8497 net/sctp/socket.c 	if (sk->sk_shutdown & RCV_SHUTDOWN)
sk               8499 net/sctp/socket.c 	if (sk->sk_shutdown == SHUTDOWN_MASK)
sk               8503 net/sctp/socket.c 	if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
sk               8507 net/sctp/socket.c 	if (!sctp_style(sk, UDP) && sctp_sstate(sk, CLOSED))
sk               8511 net/sctp/socket.c 	if (sctp_writeable(sk)) {
sk               8514 net/sctp/socket.c 		sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
sk               8523 net/sctp/socket.c 		if (sctp_writeable(sk))
sk               8561 net/sctp/socket.c static inline void __sctp_put_port(struct sock *sk)
sk               8564 net/sctp/socket.c 		&sctp_port_hashtable[sctp_phashfn(sock_net(sk),
sk               8565 net/sctp/socket.c 						  inet_sk(sk)->inet_num)];
sk               8569 net/sctp/socket.c 	pp = sctp_sk(sk)->bind_hash;
sk               8570 net/sctp/socket.c 	__sk_del_bind_node(sk);
sk               8571 net/sctp/socket.c 	sctp_sk(sk)->bind_hash = NULL;
sk               8572 net/sctp/socket.c 	inet_sk(sk)->inet_num = 0;
sk               8577 net/sctp/socket.c void sctp_put_port(struct sock *sk)
sk               8580 net/sctp/socket.c 	__sctp_put_port(sk);
sk               8590 net/sctp/socket.c static int sctp_autobind(struct sock *sk)
sk               8597 net/sctp/socket.c 	af = sctp_sk(sk)->pf->af;
sk               8599 net/sctp/socket.c 	port = htons(inet_sk(sk)->inet_num);
sk               8602 net/sctp/socket.c 	return sctp_do_bind(sk, &autoaddr, af->sockaddr_len);
sk               8789 net/sctp/socket.c static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p)
sk               8794 net/sctp/socket.c 	prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
sk               8797 net/sctp/socket.c 	error = sock_error(sk);
sk               8801 net/sctp/socket.c 	if (!skb_queue_empty(&sk->sk_receive_queue))
sk               8805 net/sctp/socket.c 	if (sk->sk_shutdown & RCV_SHUTDOWN)
sk               8814 net/sctp/socket.c 	if (list_empty(&sctp_sk(sk)->ep->asocs) && !sctp_sstate(sk, LISTENING))
sk               8826 net/sctp/socket.c 	release_sock(sk);
sk               8828 net/sctp/socket.c 	lock_sock(sk);
sk               8831 net/sctp/socket.c 	finish_wait(sk_sleep(sk), &wait);
sk               8838 net/sctp/socket.c 	finish_wait(sk_sleep(sk), &wait);
sk               8847 net/sctp/socket.c struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags,
sk               8854 net/sctp/socket.c 	timeo = sock_rcvtimeo(sk, noblock);
sk               8868 net/sctp/socket.c 			skb = skb_peek(&sk->sk_receive_queue);
sk               8872 net/sctp/socket.c 			skb = __skb_dequeue(&sk->sk_receive_queue);
sk               8879 net/sctp/socket.c 		error = sock_error(sk);
sk               8883 net/sctp/socket.c 		if (sk->sk_shutdown & RCV_SHUTDOWN)
sk               8886 net/sctp/socket.c 		if (sk_can_busy_loop(sk)) {
sk               8887 net/sctp/socket.c 			sk_busy_loop(sk, noblock);
sk               8889 net/sctp/socket.c 			if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
sk               8897 net/sctp/socket.c 	} while (sctp_wait_for_packet(sk, err, &timeo) == 0);
sk               8909 net/sctp/socket.c 	struct sock *sk = asoc->base.sk;
sk               8917 net/sctp/socket.c 	if (sctp_writeable(sk)) {
sk               8921 net/sctp/socket.c 		wq = rcu_dereference(sk->sk_wq);
sk               8930 net/sctp/socket.c 			if (!(sk->sk_shutdown & SEND_SHUTDOWN))
sk               8937 net/sctp/socket.c static void sctp_wake_up_waiters(struct sock *sk,
sk               8952 net/sctp/socket.c 		return sctp_write_space(sk);
sk               8967 net/sctp/socket.c 		if (&tmp->asocs == &((sctp_sk(sk))->ep->asocs))
sk               8985 net/sctp/socket.c 	struct sock *sk = asoc->base.sk;
sk               8987 net/sctp/socket.c 	sk_mem_uncharge(sk, skb->truesize);
sk               8988 net/sctp/socket.c 	sk->sk_wmem_queued -= skb->truesize + sizeof(struct sctp_chunk);
sk               8991 net/sctp/socket.c 				      &sk->sk_wmem_alloc));
sk               9014 net/sctp/socket.c 	sctp_wake_up_waiters(sk, asoc);
sk               9026 net/sctp/socket.c 	struct sock *sk = skb->sk;
sk               9029 net/sctp/socket.c 	atomic_sub(event->rmem_len, &sk->sk_rmem_alloc);
sk               9034 net/sctp/socket.c 	sk_mem_uncharge(sk, event->rmem_len);
sk               9042 net/sctp/socket.c 	struct sock *sk = asoc->base.sk;
sk               9061 net/sctp/socket.c 		if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING)
sk               9065 net/sctp/socket.c 		if (sk_under_memory_pressure(sk))
sk               9066 net/sctp/socket.c 			sk_mem_reclaim(sk);
sk               9068 net/sctp/socket.c 		    sk_wmem_schedule(sk, msg_len))
sk               9074 net/sctp/socket.c 		release_sock(sk);
sk               9076 net/sctp/socket.c 		lock_sock(sk);
sk               9077 net/sctp/socket.c 		if (sk != asoc->base.sk)
sk               9108 net/sctp/socket.c void sctp_data_ready(struct sock *sk)
sk               9113 net/sctp/socket.c 	wq = rcu_dereference(sk->sk_wq);
sk               9117 net/sctp/socket.c 	sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
sk               9122 net/sctp/socket.c void sctp_write_space(struct sock *sk)
sk               9127 net/sctp/socket.c 	list_for_each_entry(asoc, &((sctp_sk(sk))->ep->asocs), asocs) {
sk               9143 net/sctp/socket.c static bool sctp_writeable(struct sock *sk)
sk               9145 net/sctp/socket.c 	return sk->sk_sndbuf > sk->sk_wmem_queued;
sk               9153 net/sctp/socket.c 	struct sock *sk = asoc->base.sk;
sk               9168 net/sctp/socket.c 		if (sk->sk_shutdown & RCV_SHUTDOWN)
sk               9170 net/sctp/socket.c 		if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING ||
sk               9182 net/sctp/socket.c 		release_sock(sk);
sk               9184 net/sctp/socket.c 		lock_sock(sk);
sk               9213 net/sctp/socket.c static int sctp_wait_for_accept(struct sock *sk, long timeo)
sk               9219 net/sctp/socket.c 	ep = sctp_sk(sk)->ep;
sk               9223 net/sctp/socket.c 		prepare_to_wait_exclusive(sk_sleep(sk), &wait,
sk               9227 net/sctp/socket.c 			release_sock(sk);
sk               9229 net/sctp/socket.c 			lock_sock(sk);
sk               9233 net/sctp/socket.c 		if (!sctp_sstate(sk, LISTENING))
sk               9249 net/sctp/socket.c 	finish_wait(sk_sleep(sk), &wait);
sk               9254 net/sctp/socket.c static void sctp_wait_for_close(struct sock *sk, long timeout)
sk               9259 net/sctp/socket.c 		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
sk               9260 net/sctp/socket.c 		if (list_empty(&sctp_sk(sk)->ep->asocs))
sk               9262 net/sctp/socket.c 		release_sock(sk);
sk               9264 net/sctp/socket.c 		lock_sock(sk);
sk               9267 net/sctp/socket.c 	finish_wait(sk_sleep(sk), &wait);
sk               9270 net/sctp/socket.c static void sctp_skb_set_owner_r_frag(struct sk_buff *skb, struct sock *sk)
sk               9279 net/sctp/socket.c 		sctp_skb_set_owner_r_frag(frag, sk);
sk               9282 net/sctp/socket.c 	sctp_skb_set_owner_r(skb, sk);
sk               9285 net/sctp/socket.c void sctp_copy_sock(struct sock *newsk, struct sock *sk,
sk               9288 net/sctp/socket.c 	struct inet_sock *inet = inet_sk(sk);
sk               9290 net/sctp/socket.c 	struct sctp_sock *sp = sctp_sk(sk);
sk               9293 net/sctp/socket.c 	newsk->sk_type = sk->sk_type;
sk               9294 net/sctp/socket.c 	newsk->sk_bound_dev_if = sk->sk_bound_dev_if;
sk               9295 net/sctp/socket.c 	newsk->sk_flags = sk->sk_flags;
sk               9296 net/sctp/socket.c 	newsk->sk_tsflags = sk->sk_tsflags;
sk               9297 net/sctp/socket.c 	newsk->sk_no_check_tx = sk->sk_no_check_tx;
sk               9298 net/sctp/socket.c 	newsk->sk_no_check_rx = sk->sk_no_check_rx;
sk               9299 net/sctp/socket.c 	newsk->sk_reuse = sk->sk_reuse;
sk               9302 net/sctp/socket.c 	newsk->sk_shutdown = sk->sk_shutdown;
sk               9304 net/sctp/socket.c 	newsk->sk_family = sk->sk_family;
sk               9306 net/sctp/socket.c 	newsk->sk_backlog_rcv = sk->sk_prot->backlog_rcv;
sk               9307 net/sctp/socket.c 	newsk->sk_sndbuf = sk->sk_sndbuf;
sk               9308 net/sctp/socket.c 	newsk->sk_rcvbuf = sk->sk_rcvbuf;
sk               9309 net/sctp/socket.c 	newsk->sk_lingertime = sk->sk_lingertime;
sk               9310 net/sctp/socket.c 	newsk->sk_rcvtimeo = sk->sk_rcvtimeo;
sk               9311 net/sctp/socket.c 	newsk->sk_sndtimeo = sk->sk_sndtimeo;
sk               9312 net/sctp/socket.c 	newsk->sk_rxhash = sk->sk_rxhash;
sk               9338 net/sctp/socket.c 	security_sctp_sk_clone(ep, sk, newsk);
sk               9536 net/sctp/socket.c static void sctp_v6_destroy_sock(struct sock *sk)
sk               9538 net/sctp/socket.c 	sctp_destroy_sock(sk);
sk               9539 net/sctp/socket.c 	inet6_destroy_sock(sk);
sk                225 net/sctp/stream.c 	struct net *net = sock_net(asoc->base.sk);
sk                244 net/sctp/stream_interleave.c 	retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
sk                327 net/sctp/stream_interleave.c 	pd_point = sctp_sk(asoc->base.sk)->pd_point;
sk                329 net/sctp/stream_interleave.c 		retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
sk                340 net/sctp/stream_interleave.c 	retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
sk                474 net/sctp/stream_interleave.c 	struct sock *sk = ulpq->asoc->base.sk;
sk                475 net/sctp/stream_interleave.c 	struct sctp_sock *sp = sctp_sk(sk);
sk                482 net/sctp/stream_interleave.c 	if (sk->sk_shutdown & RCV_SHUTDOWN &&
sk                483 net/sctp/stream_interleave.c 	    (sk->sk_shutdown & SEND_SHUTDOWN ||
sk                488 net/sctp/stream_interleave.c 		sk_mark_napi_id(sk, skb);
sk                489 net/sctp/stream_interleave.c 		sk_incoming_cpu_update(sk);
sk                497 net/sctp/stream_interleave.c 					   &sk->sk_receive_queue);
sk                499 net/sctp/stream_interleave.c 		__skb_queue_tail(&sk->sk_receive_queue, skb);
sk                503 net/sctp/stream_interleave.c 		sk->sk_data_ready(sk);
sk                633 net/sctp/stream_interleave.c 	retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
sk                717 net/sctp/stream_interleave.c 	pd_point = sctp_sk(asoc->base.sk)->pd_point;
sk                719 net/sctp/stream_interleave.c 		retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
sk                730 net/sctp/stream_interleave.c 	retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
sk                817 net/sctp/stream_interleave.c 	retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
sk                924 net/sctp/stream_interleave.c 	retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
sk                973 net/sctp/stream_interleave.c 	if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
sk                986 net/sctp/stream_interleave.c 	sk_mem_reclaim(asoc->base.sk);
sk                992 net/sctp/stream_interleave.c 	struct sock *sk = ulpq->asoc->base.sk;
sk               1002 net/sctp/stream_interleave.c 		struct sctp_sock *sp = sctp_sk(sk);
sk               1004 net/sctp/stream_interleave.c 		__skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
sk               1008 net/sctp/stream_interleave.c 			sk->sk_data_ready(sk);
sk               1162 net/sctp/stream_interleave.c 		SCTP_INC_STATS(sock_net(asoc->base.sk), SCTP_MIB_OUTCTRLCHUNKS);
sk                458 net/sctp/sysctl.c 		struct sock *sk = net->sctp.ctl_sock;
sk                462 net/sctp/sysctl.c 		lock_sock(sk);
sk                463 net/sctp/sysctl.c 		sctp_sk(sk)->ep->auth_enable = new_value;
sk                464 net/sctp/sysctl.c 		release_sock(sk);
sk                222 net/sctp/transport.c void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk)
sk                228 net/sctp/transport.c 						&transport->fl, sk);
sk                249 net/sctp/transport.c 	struct sock *sk = t->asoc->base.sk;
sk                264 net/sctp/transport.c 		pf->af->from_sk(&addr, sk);
sk                265 net/sctp/transport.c 		pf->to_sk_daddr(&t->ipaddr, sk);
sk                266 net/sctp/transport.c 		dst->ops->update_pmtu(dst, sk, NULL, pmtu, true);
sk                267 net/sctp/transport.c 		pf->to_sk_daddr(&addr, sk);
sk                273 net/sctp/transport.c 		t->af_specific->get_dst(t, &t->saddr, &t->fl, sk);
sk                311 net/sctp/transport.c 		opt->pf->to_sk_saddr(&transport->saddr, asoc->base.sk);
sk                337 net/sctp/transport.c 		struct net *net = sock_net(tp->asoc->base.sk);
sk                 89 net/sctp/ulpevent.c 	sctp_skb_set_owner_r(skb, asoc->base.sk);
sk                 90 net/sctp/ulpevent.c 	if (chunk && chunk->head_skb && !chunk->head_skb->sk)
sk                 91 net/sctp/ulpevent.c 		chunk->head_skb->sk = asoc->base.sk;
sk                329 net/sctp/ulpevent.c 	sctp_get_pf_specific(asoc->base.sk->sk_family)->addr_to_user(
sk                330 net/sctp/ulpevent.c 					sctp_sk(asoc->base.sk),
sk                623 net/sctp/ulpevent.c 	struct sock *sk = asoc->base.sk;
sk                635 net/sctp/ulpevent.c 		rx_count = atomic_read(&sk->sk_rmem_alloc);
sk                639 net/sctp/ulpevent.c 	if (rx_count >= sk->sk_rcvbuf || !sk_rmem_schedule(sk, skb, datalen))
sk               1003 net/sctp/ulpevent.c 				struct sock *sk)
sk               1008 net/sctp/ulpevent.c 	skb = sctp_skb_recv_datagram(sk, MSG_PEEK, 1, &err);
sk                128 net/sctp/ulpqueue.c int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc)
sk                130 net/sctp/ulpqueue.c 	struct sctp_sock *sp = sctp_sk(sk);
sk                138 net/sctp/ulpqueue.c 						   &sk->sk_receive_queue);
sk                155 net/sctp/ulpqueue.c 					__skb_queue_tail(&sk->sk_receive_queue,
sk                168 net/sctp/ulpqueue.c 	struct sctp_sock *sp = sctp_sk(ulpq->asoc->base.sk);
sk                179 net/sctp/ulpqueue.c 	return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc);
sk                184 net/sctp/ulpqueue.c 	struct sock *sk = ulpq->asoc->base.sk;
sk                185 net/sctp/ulpqueue.c 	struct sctp_sock *sp = sctp_sk(sk);
sk                197 net/sctp/ulpqueue.c 	if (sk->sk_shutdown & RCV_SHUTDOWN &&
sk                198 net/sctp/ulpqueue.c 	    (sk->sk_shutdown & SEND_SHUTDOWN ||
sk                203 net/sctp/ulpqueue.c 		sk_mark_napi_id(sk, skb);
sk                204 net/sctp/ulpqueue.c 		sk_incoming_cpu_update(sk);
sk                216 net/sctp/ulpqueue.c 		queue = &sk->sk_receive_queue;
sk                230 net/sctp/ulpqueue.c 				queue = &sk->sk_receive_queue;
sk                239 net/sctp/ulpqueue.c 				queue = &sk->sk_receive_queue;
sk                254 net/sctp/ulpqueue.c 	if (queue == &sk->sk_receive_queue && !sp->data_ready_signalled) {
sk                255 net/sctp/ulpqueue.c 		if (!sock_owned_by_user(sk))
sk                257 net/sctp/ulpqueue.c 		sk->sk_data_ready(sk);
sk                355 net/sctp/ulpqueue.c 			sctp_skb_set_owner_r(new, f_frag->sk);
sk                482 net/sctp/ulpqueue.c 		if (!sctp_sk(asoc->base.sk)->frag_interleave &&
sk                483 net/sctp/ulpqueue.c 		    atomic_read(&sctp_sk(asoc->base.sk)->pd_mode))
sk                487 net/sctp/ulpqueue.c 		pd_point = sctp_sk(asoc->base.sk)->pd_point;
sk                489 net/sctp/ulpqueue.c 			retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
sk                500 net/sctp/ulpqueue.c 	retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
sk                566 net/sctp/ulpqueue.c 	retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
sk                667 net/sctp/ulpqueue.c 	retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
sk               1036 net/sctp/ulpqueue.c 	sp = sctp_sk(asoc->base.sk);
sk               1086 net/sctp/ulpqueue.c 	if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
sk               1092 net/sctp/ulpqueue.c 	if (sk_rmem_schedule(asoc->base.sk, chunk->skb, needed) &&
sk               1105 net/sctp/ulpqueue.c 	sk_mem_reclaim(asoc->base.sk);
sk               1117 net/sctp/ulpqueue.c 	struct sock *sk;
sk               1122 net/sctp/ulpqueue.c 	sk = ulpq->asoc->base.sk;
sk               1123 net/sctp/ulpqueue.c 	sp = sctp_sk(sk);
sk               1130 net/sctp/ulpqueue.c 		__skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
sk               1135 net/sctp/ulpqueue.c 		sk->sk_data_ready(sk);
sk                 60 net/smc/af_smc.c static void smc_set_keepalive(struct sock *sk, int val)
sk                 62 net/smc/af_smc.c 	struct smc_sock *smc = smc_sk(sk);
sk                 64 net/smc/af_smc.c 	smc->clcsock->sk->sk_prot->keepalive(smc->clcsock->sk, val);
sk                 75 net/smc/af_smc.c int smc_hash_sk(struct sock *sk)
sk                 77 net/smc/af_smc.c 	struct smc_hashinfo *h = sk->sk_prot->h.smc_hash;
sk                 83 net/smc/af_smc.c 	sk_add_node(sk, head);
sk                 84 net/smc/af_smc.c 	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
sk                 91 net/smc/af_smc.c void smc_unhash_sk(struct sock *sk)
sk                 93 net/smc/af_smc.c 	struct smc_hashinfo *h = sk->sk_prot->h.smc_hash;
sk                 96 net/smc/af_smc.c 	if (sk_del_node_init(sk))
sk                 97 net/smc/af_smc.c 		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
sk                128 net/smc/af_smc.c 	smc->clcsock->file->private_data = smc->sk.sk_socket;
sk                134 net/smc/af_smc.c 	struct sock *sk = &smc->sk;
sk                139 net/smc/af_smc.c 		sock_set_flag(sk, SOCK_DEAD);
sk                140 net/smc/af_smc.c 		sk->sk_shutdown |= SHUTDOWN_MASK;
sk                142 net/smc/af_smc.c 		if (sk->sk_state != SMC_LISTEN && sk->sk_state != SMC_INIT)
sk                143 net/smc/af_smc.c 			sock_put(sk); /* passive closing */
sk                144 net/smc/af_smc.c 		if (sk->sk_state == SMC_LISTEN) {
sk                148 net/smc/af_smc.c 		sk->sk_state = SMC_CLOSED;
sk                149 net/smc/af_smc.c 		sk->sk_state_change(sk);
sk                153 net/smc/af_smc.c 	sk->sk_prot->unhash(sk);
sk                155 net/smc/af_smc.c 	if (sk->sk_state == SMC_CLOSED) {
sk                157 net/smc/af_smc.c 			release_sock(sk);
sk                159 net/smc/af_smc.c 			lock_sock(sk);
sk                170 net/smc/af_smc.c 	struct sock *sk = sock->sk;
sk                174 net/smc/af_smc.c 	if (!sk)
sk                177 net/smc/af_smc.c 	smc = smc_sk(sk);
sk                180 net/smc/af_smc.c 	if (smc->connect_nonblock && sk->sk_state == SMC_INIT)
sk                181 net/smc/af_smc.c 		tcp_abort(smc->clcsock->sk, ECONNABORTED);
sk                184 net/smc/af_smc.c 	if (sk->sk_state == SMC_LISTEN)
sk                188 net/smc/af_smc.c 		lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
sk                190 net/smc/af_smc.c 		lock_sock(sk);
sk                195 net/smc/af_smc.c 	sock_orphan(sk);
sk                196 net/smc/af_smc.c 	sock->sk = NULL;
sk                197 net/smc/af_smc.c 	release_sock(sk);
sk                199 net/smc/af_smc.c 	sock_put(sk); /* final sock_put */
sk                204 net/smc/af_smc.c static void smc_destruct(struct sock *sk)
sk                206 net/smc/af_smc.c 	if (sk->sk_state != SMC_CLOSED)
sk                208 net/smc/af_smc.c 	if (!sock_flag(sk, SOCK_DEAD))
sk                211 net/smc/af_smc.c 	sk_refcnt_debug_dec(sk);
sk                219 net/smc/af_smc.c 	struct sock *sk;
sk                222 net/smc/af_smc.c 	sk = sk_alloc(net, PF_SMC, GFP_KERNEL, prot, 0);
sk                223 net/smc/af_smc.c 	if (!sk)
sk                226 net/smc/af_smc.c 	sock_init_data(sock, sk); /* sets sk_refcnt to 1 */
sk                227 net/smc/af_smc.c 	sk->sk_state = SMC_INIT;
sk                228 net/smc/af_smc.c 	sk->sk_destruct = smc_destruct;
sk                229 net/smc/af_smc.c 	sk->sk_protocol = protocol;
sk                230 net/smc/af_smc.c 	smc = smc_sk(sk);
sk                237 net/smc/af_smc.c 	sk->sk_prot->hash(sk);
sk                238 net/smc/af_smc.c 	sk_refcnt_debug_inc(sk);
sk                241 net/smc/af_smc.c 	return sk;
sk                248 net/smc/af_smc.c 	struct sock *sk = sock->sk;
sk                252 net/smc/af_smc.c 	smc = smc_sk(sk);
sk                269 net/smc/af_smc.c 	lock_sock(sk);
sk                273 net/smc/af_smc.c 	if (sk->sk_state != SMC_INIT || smc->connect_nonblock)
sk                276 net/smc/af_smc.c 	smc->clcsock->sk->sk_reuse = sk->sk_reuse;
sk                280 net/smc/af_smc.c 	release_sock(sk);
sk                324 net/smc/af_smc.c 	smc_copy_sock_settings(smc->clcsock->sk, &smc->sk, SK_FLAGS_SMC_TO_CLC);
sk                334 net/smc/af_smc.c 	smc_copy_sock_settings(&smc->sk, smc->clcsock->sk, SK_FLAGS_CLC_TO_SMC);
sk                361 net/smc/af_smc.c 	struct net *net = sock_net(smc->clcsock->sk);
sk                467 net/smc/af_smc.c 	if (smc->sk.sk_socket && smc->sk.sk_socket->file) {
sk                468 net/smc/af_smc.c 		smc->clcsock->file = smc->sk.sk_socket->file;
sk                471 net/smc/af_smc.c 			smc->sk.sk_socket->wq.fasync_list;
sk                482 net/smc/af_smc.c 	if (smc->sk.sk_state == SMC_INIT)
sk                483 net/smc/af_smc.c 		smc->sk.sk_state = SMC_ACTIVE;
sk                493 net/smc/af_smc.c 		if (smc->sk.sk_state == SMC_INIT)
sk                494 net/smc/af_smc.c 			sock_put(&smc->sk); /* passive closing */
sk                500 net/smc/af_smc.c 			if (smc->sk.sk_state == SMC_INIT)
sk                501 net/smc/af_smc.c 				sock_put(&smc->sk); /* passive closing */
sk                533 net/smc/af_smc.c 	smc_pnet_find_roce_resource(smc->clcsock->sk, ini);
sk                544 net/smc/af_smc.c 	smc_pnet_find_ism_resource(smc->clcsock->sk, ini);
sk                655 net/smc/af_smc.c 	if (smc->sk.sk_state == SMC_INIT)
sk                656 net/smc/af_smc.c 		smc->sk.sk_state = SMC_ACTIVE;
sk                697 net/smc/af_smc.c 	if (smc->sk.sk_state == SMC_INIT)
sk                698 net/smc/af_smc.c 		smc->sk.sk_state = SMC_ACTIVE;
sk                716 net/smc/af_smc.c 	if (!tcp_sk(smc->clcsock->sk)->syn_smc)
sk                777 net/smc/af_smc.c 	long timeo = smc->sk.sk_sndtimeo;
sk                782 net/smc/af_smc.c 	lock_sock(smc->clcsock->sk);
sk                783 net/smc/af_smc.c 	if (smc->clcsock->sk->sk_err) {
sk                784 net/smc/af_smc.c 		smc->sk.sk_err = smc->clcsock->sk->sk_err;
sk                785 net/smc/af_smc.c 	} else if ((1 << smc->clcsock->sk->sk_state) &
sk                787 net/smc/af_smc.c 		rc = sk_stream_wait_connect(smc->clcsock->sk, &timeo);
sk                789 net/smc/af_smc.c 		    ((1 << smc->clcsock->sk->sk_state) &
sk                793 net/smc/af_smc.c 	release_sock(smc->clcsock->sk);
sk                794 net/smc/af_smc.c 	lock_sock(&smc->sk);
sk                795 net/smc/af_smc.c 	if (rc != 0 || smc->sk.sk_err) {
sk                796 net/smc/af_smc.c 		smc->sk.sk_state = SMC_CLOSED;
sk                798 net/smc/af_smc.c 			smc->sk.sk_err = EPIPE;
sk                800 net/smc/af_smc.c 			smc->sk.sk_err = -sock_intr_errno(timeo);
sk                801 net/smc/af_smc.c 		sock_put(&smc->sk); /* passive closing */
sk                807 net/smc/af_smc.c 		smc->sk.sk_err = -rc;
sk                810 net/smc/af_smc.c 	if (!sock_flag(&smc->sk, SOCK_DEAD)) {
sk                811 net/smc/af_smc.c 		if (smc->sk.sk_err) {
sk                812 net/smc/af_smc.c 			smc->sk.sk_state_change(&smc->sk);
sk                814 net/smc/af_smc.c 			smc->clcsock->sk->sk_write_space(smc->clcsock->sk);
sk                815 net/smc/af_smc.c 			smc->sk.sk_write_space(&smc->sk);
sk                818 net/smc/af_smc.c 	release_sock(&smc->sk);
sk                824 net/smc/af_smc.c 	struct sock *sk = sock->sk;
sk                828 net/smc/af_smc.c 	smc = smc_sk(sk);
sk                836 net/smc/af_smc.c 	lock_sock(sk);
sk                837 net/smc/af_smc.c 	switch (sk->sk_state) {
sk                849 net/smc/af_smc.c 	tcp_sk(smc->clcsock->sk)->syn_smc = 1;
sk                858 net/smc/af_smc.c 	sock_hold(&smc->sk); /* sock put in passive closing */
sk                874 net/smc/af_smc.c 	release_sock(sk);
sk                882 net/smc/af_smc.c 	struct sock *lsk = &lsmc->sk;
sk                923 net/smc/af_smc.c static void smc_accept_enqueue(struct sock *parent, struct sock *sk)
sk                927 net/smc/af_smc.c 	sock_hold(sk); /* sock_put in smc_accept_unlink () */
sk                929 net/smc/af_smc.c 	list_add_tail(&smc_sk(sk)->accept_q, &par->accept_q);
sk                935 net/smc/af_smc.c static void smc_accept_unlink(struct sock *sk)
sk                937 net/smc/af_smc.c 	struct smc_sock *par = smc_sk(sk)->listen_smc;
sk                940 net/smc/af_smc.c 	list_del_init(&smc_sk(sk)->accept_q);
sk                942 net/smc/af_smc.c 	sk_acceptq_removed(&smc_sk(sk)->listen_smc->sk);
sk                943 net/smc/af_smc.c 	sock_put(sk); /* sock_hold in smc_accept_enqueue */
sk                981 net/smc/af_smc.c void smc_close_non_accepted(struct sock *sk)
sk                983 net/smc/af_smc.c 	struct smc_sock *smc = smc_sk(sk);
sk                985 net/smc/af_smc.c 	lock_sock(sk);
sk                986 net/smc/af_smc.c 	if (!sk->sk_lingertime)
sk                988 net/smc/af_smc.c 		sk->sk_lingertime = SMC_MAX_STREAM_WAIT_TIMEOUT;
sk                990 net/smc/af_smc.c 	release_sock(sk);
sk                991 net/smc/af_smc.c 	sock_put(sk); /* final sock_put */
sk                996 net/smc/af_smc.c 	struct net *net = sock_net(smc->clcsock->sk);
sk               1054 net/smc/af_smc.c 	struct sock *newsmcsk = &new_smc->sk;
sk               1056 net/smc/af_smc.c 	if (lsmc->sk.sk_state == SMC_LISTEN) {
sk               1057 net/smc/af_smc.c 		lock_sock_nested(&lsmc->sk, SINGLE_DEPTH_NESTING);
sk               1058 net/smc/af_smc.c 		smc_accept_enqueue(&lsmc->sk, newsmcsk);
sk               1059 net/smc/af_smc.c 		release_sock(&lsmc->sk);
sk               1065 net/smc/af_smc.c 	lsmc->sk.sk_data_ready(&lsmc->sk);
sk               1066 net/smc/af_smc.c 	sock_put(&lsmc->sk); /* sock_hold in smc_tcp_listen_work */
sk               1072 net/smc/af_smc.c 	struct sock *newsmcsk = &new_smc->sk;
sk               1084 net/smc/af_smc.c 	struct sock *newsmcsk = &new_smc->sk;
sk               1087 net/smc/af_smc.c 		sock_put(&new_smc->sk); /* passive closing */
sk               1244 net/smc/af_smc.c 	if (new_smc->listen_smc->sk.sk_state != SMC_LISTEN)
sk               1253 net/smc/af_smc.c 	if (!tcp_sk(newclcsock->sk)->syn_smc) {
sk               1365 net/smc/af_smc.c 	struct sock *lsk = &lsmc->sk;
sk               1383 net/smc/af_smc.c 		new_smc->sk.sk_sndbuf = lsmc->sk.sk_sndbuf;
sk               1384 net/smc/af_smc.c 		new_smc->sk.sk_rcvbuf = lsmc->sk.sk_rcvbuf;
sk               1385 net/smc/af_smc.c 		sock_hold(&new_smc->sk); /* sock_put in passive closing */
sk               1387 net/smc/af_smc.c 			sock_put(&new_smc->sk);
sk               1392 net/smc/af_smc.c 	sock_put(&lsmc->sk); /* sock_hold in smc_listen */
sk               1397 net/smc/af_smc.c 	struct sock *sk = sock->sk;
sk               1401 net/smc/af_smc.c 	smc = smc_sk(sk);
sk               1402 net/smc/af_smc.c 	lock_sock(sk);
sk               1405 net/smc/af_smc.c 	if ((sk->sk_state != SMC_INIT && sk->sk_state != SMC_LISTEN) ||
sk               1410 net/smc/af_smc.c 	if (sk->sk_state == SMC_LISTEN) {
sk               1411 net/smc/af_smc.c 		sk->sk_max_ack_backlog = backlog;
sk               1419 net/smc/af_smc.c 		tcp_sk(smc->clcsock->sk)->syn_smc = 1;
sk               1424 net/smc/af_smc.c 	sk->sk_max_ack_backlog = backlog;
sk               1425 net/smc/af_smc.c 	sk->sk_ack_backlog = 0;
sk               1426 net/smc/af_smc.c 	sk->sk_state = SMC_LISTEN;
sk               1427 net/smc/af_smc.c 	sock_hold(sk); /* sock_hold in tcp_listen_worker */
sk               1429 net/smc/af_smc.c 		sock_put(sk);
sk               1432 net/smc/af_smc.c 	release_sock(sk);
sk               1439 net/smc/af_smc.c 	struct sock *sk = sock->sk, *nsk;
sk               1445 net/smc/af_smc.c 	lsmc = smc_sk(sk);
sk               1446 net/smc/af_smc.c 	sock_hold(sk); /* sock_put below */
sk               1447 net/smc/af_smc.c 	lock_sock(sk);
sk               1449 net/smc/af_smc.c 	if (lsmc->sk.sk_state != SMC_LISTEN) {
sk               1451 net/smc/af_smc.c 		release_sock(sk);
sk               1456 net/smc/af_smc.c 	timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
sk               1457 net/smc/af_smc.c 	add_wait_queue_exclusive(sk_sleep(sk), &wait);
sk               1458 net/smc/af_smc.c 	while (!(nsk = smc_accept_dequeue(sk, new_sock))) {
sk               1464 net/smc/af_smc.c 		release_sock(sk);
sk               1468 net/smc/af_smc.c 		lock_sock(sk);
sk               1475 net/smc/af_smc.c 	remove_wait_queue(sk_sleep(sk), &wait);
sk               1479 net/smc/af_smc.c 	release_sock(sk);
sk               1488 net/smc/af_smc.c 			struct sock *clcsk = smc_sk(nsk)->clcsock->sk;
sk               1502 net/smc/af_smc.c 	sock_put(sk); /* sock_hold above */
sk               1511 net/smc/af_smc.c 	if (peer && (sock->sk->sk_state != SMC_ACTIVE) &&
sk               1512 net/smc/af_smc.c 	    (sock->sk->sk_state != SMC_APPCLOSEWAIT1))
sk               1515 net/smc/af_smc.c 	smc = smc_sk(sock->sk);
sk               1522 net/smc/af_smc.c 	struct sock *sk = sock->sk;
sk               1526 net/smc/af_smc.c 	smc = smc_sk(sk);
sk               1527 net/smc/af_smc.c 	lock_sock(sk);
sk               1528 net/smc/af_smc.c 	if ((sk->sk_state != SMC_ACTIVE) &&
sk               1529 net/smc/af_smc.c 	    (sk->sk_state != SMC_APPCLOSEWAIT1) &&
sk               1530 net/smc/af_smc.c 	    (sk->sk_state != SMC_INIT))
sk               1534 net/smc/af_smc.c 		if (sk->sk_state == SMC_INIT && !smc->connect_nonblock) {
sk               1548 net/smc/af_smc.c 	release_sock(sk);
sk               1555 net/smc/af_smc.c 	struct sock *sk = sock->sk;
sk               1559 net/smc/af_smc.c 	smc = smc_sk(sk);
sk               1560 net/smc/af_smc.c 	lock_sock(sk);
sk               1561 net/smc/af_smc.c 	if (sk->sk_state == SMC_CLOSED && (sk->sk_shutdown & RCV_SHUTDOWN)) {
sk               1566 net/smc/af_smc.c 	if ((sk->sk_state == SMC_INIT) ||
sk               1567 net/smc/af_smc.c 	    (sk->sk_state == SMC_LISTEN) ||
sk               1568 net/smc/af_smc.c 	    (sk->sk_state == SMC_CLOSED))
sk               1571 net/smc/af_smc.c 	if (sk->sk_state == SMC_PEERFINCLOSEWAIT) {
sk               1584 net/smc/af_smc.c 	release_sock(sk);
sk               1604 net/smc/af_smc.c 	struct sock *sk = sock->sk;
sk               1608 net/smc/af_smc.c 	if (!sk)
sk               1611 net/smc/af_smc.c 	smc = smc_sk(sock->sk);
sk               1615 net/smc/af_smc.c 		sk->sk_err = smc->clcsock->sk->sk_err;
sk               1617 net/smc/af_smc.c 		if (sk->sk_state != SMC_CLOSED)
sk               1619 net/smc/af_smc.c 		if (sk->sk_err)
sk               1621 net/smc/af_smc.c 		if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
sk               1622 net/smc/af_smc.c 		    (sk->sk_state == SMC_CLOSED))
sk               1624 net/smc/af_smc.c 		if (sk->sk_state == SMC_LISTEN) {
sk               1626 net/smc/af_smc.c 			mask |= smc_accept_poll(sk);
sk               1630 net/smc/af_smc.c 			sk->sk_err = smc->clcsock->sk->sk_err;
sk               1632 net/smc/af_smc.c 			if ((sk->sk_state != SMC_INIT &&
sk               1634 net/smc/af_smc.c 			    sk->sk_shutdown & SEND_SHUTDOWN) {
sk               1637 net/smc/af_smc.c 				sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
sk               1638 net/smc/af_smc.c 				set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
sk               1642 net/smc/af_smc.c 			if (sk->sk_shutdown & RCV_SHUTDOWN)
sk               1644 net/smc/af_smc.c 			if (sk->sk_state == SMC_APPCLOSEWAIT1)
sk               1656 net/smc/af_smc.c 	struct sock *sk = sock->sk;
sk               1661 net/smc/af_smc.c 	smc = smc_sk(sk);
sk               1666 net/smc/af_smc.c 	lock_sock(sk);
sk               1669 net/smc/af_smc.c 	if ((sk->sk_state != SMC_ACTIVE) &&
sk               1670 net/smc/af_smc.c 	    (sk->sk_state != SMC_PEERCLOSEWAIT1) &&
sk               1671 net/smc/af_smc.c 	    (sk->sk_state != SMC_PEERCLOSEWAIT2) &&
sk               1672 net/smc/af_smc.c 	    (sk->sk_state != SMC_APPCLOSEWAIT1) &&
sk               1673 net/smc/af_smc.c 	    (sk->sk_state != SMC_APPCLOSEWAIT2) &&
sk               1674 net/smc/af_smc.c 	    (sk->sk_state != SMC_APPFINCLOSEWAIT))
sk               1678 net/smc/af_smc.c 		sk->sk_shutdown = smc->clcsock->sk->sk_shutdown;
sk               1679 net/smc/af_smc.c 		if (sk->sk_shutdown == SHUTDOWN_MASK)
sk               1680 net/smc/af_smc.c 			sk->sk_state = SMC_CLOSED;
sk               1698 net/smc/af_smc.c 	sk->sk_shutdown |= how + 1;
sk               1701 net/smc/af_smc.c 	release_sock(sk);
sk               1708 net/smc/af_smc.c 	struct sock *sk = sock->sk;
sk               1712 net/smc/af_smc.c 	smc = smc_sk(sk);
sk               1719 net/smc/af_smc.c 	if (smc->clcsock->sk->sk_err) {
sk               1720 net/smc/af_smc.c 		sk->sk_err = smc->clcsock->sk->sk_err;
sk               1721 net/smc/af_smc.c 		sk->sk_error_report(sk);
sk               1729 net/smc/af_smc.c 	lock_sock(sk);
sk               1739 net/smc/af_smc.c 		if (sk->sk_state == SMC_INIT && !smc->connect_nonblock) {
sk               1747 net/smc/af_smc.c 		if (sk->sk_state != SMC_INIT &&
sk               1748 net/smc/af_smc.c 		    sk->sk_state != SMC_LISTEN &&
sk               1749 net/smc/af_smc.c 		    sk->sk_state != SMC_CLOSED) {
sk               1756 net/smc/af_smc.c 		if (sk->sk_state != SMC_INIT &&
sk               1757 net/smc/af_smc.c 		    sk->sk_state != SMC_LISTEN &&
sk               1758 net/smc/af_smc.c 		    sk->sk_state != SMC_CLOSED) {
sk               1771 net/smc/af_smc.c 	release_sock(sk);
sk               1781 net/smc/af_smc.c 	smc = smc_sk(sock->sk);
sk               1795 net/smc/af_smc.c 	smc = smc_sk(sock->sk);
sk               1797 net/smc/af_smc.c 	lock_sock(&smc->sk);
sk               1800 net/smc/af_smc.c 			release_sock(&smc->sk);
sk               1804 net/smc/af_smc.c 		release_sock(&smc->sk);
sk               1809 net/smc/af_smc.c 		if (smc->sk.sk_state == SMC_LISTEN) {
sk               1810 net/smc/af_smc.c 			release_sock(&smc->sk);
sk               1813 net/smc/af_smc.c 		if (smc->sk.sk_state == SMC_INIT ||
sk               1814 net/smc/af_smc.c 		    smc->sk.sk_state == SMC_CLOSED)
sk               1821 net/smc/af_smc.c 		if (smc->sk.sk_state == SMC_LISTEN) {
sk               1822 net/smc/af_smc.c 			release_sock(&smc->sk);
sk               1825 net/smc/af_smc.c 		if (smc->sk.sk_state == SMC_INIT ||
sk               1826 net/smc/af_smc.c 		    smc->sk.sk_state == SMC_CLOSED)
sk               1834 net/smc/af_smc.c 		if (smc->sk.sk_state == SMC_LISTEN) {
sk               1835 net/smc/af_smc.c 			release_sock(&smc->sk);
sk               1838 net/smc/af_smc.c 		if (smc->sk.sk_state == SMC_INIT ||
sk               1839 net/smc/af_smc.c 		    smc->sk.sk_state == SMC_CLOSED)
sk               1845 net/smc/af_smc.c 		if (smc->sk.sk_state == SMC_LISTEN) {
sk               1846 net/smc/af_smc.c 			release_sock(&smc->sk);
sk               1849 net/smc/af_smc.c 		if (smc->sk.sk_state == SMC_INIT ||
sk               1850 net/smc/af_smc.c 		    smc->sk.sk_state == SMC_CLOSED) {
sk               1860 net/smc/af_smc.c 		release_sock(&smc->sk);
sk               1863 net/smc/af_smc.c 	release_sock(&smc->sk);
sk               1871 net/smc/af_smc.c 	struct sock *sk = sock->sk;
sk               1875 net/smc/af_smc.c 	smc = smc_sk(sk);
sk               1876 net/smc/af_smc.c 	lock_sock(sk);
sk               1877 net/smc/af_smc.c 	if (sk->sk_state != SMC_ACTIVE) {
sk               1878 net/smc/af_smc.c 		release_sock(sk);
sk               1881 net/smc/af_smc.c 	release_sock(sk);
sk               1902 net/smc/af_smc.c 	struct sock *sk = sock->sk;
sk               1906 net/smc/af_smc.c 	smc = smc_sk(sk);
sk               1907 net/smc/af_smc.c 	lock_sock(sk);
sk               1908 net/smc/af_smc.c 	if (sk->sk_state == SMC_CLOSED && (sk->sk_shutdown & RCV_SHUTDOWN)) {
sk               1913 net/smc/af_smc.c 	if (sk->sk_state == SMC_INIT ||
sk               1914 net/smc/af_smc.c 	    sk->sk_state == SMC_LISTEN ||
sk               1915 net/smc/af_smc.c 	    sk->sk_state == SMC_CLOSED)
sk               1918 net/smc/af_smc.c 	if (sk->sk_state == SMC_PEERFINCLOSEWAIT) {
sk               1938 net/smc/af_smc.c 	release_sock(sk);
sk               1971 net/smc/af_smc.c 	struct sock *sk;
sk               1984 net/smc/af_smc.c 	sk = smc_sock_alloc(net, sock, protocol);
sk               1985 net/smc/af_smc.c 	if (!sk)
sk               1989 net/smc/af_smc.c 	smc = smc_sk(sk);
sk               1995 net/smc/af_smc.c 		sk_common_release(sk);
sk               1998 net/smc/af_smc.c 	smc->sk.sk_sndbuf = max(smc->clcsock->sk->sk_sndbuf, SMC_BUF_MIN_SIZE);
sk               1999 net/smc/af_smc.c 	smc->sk.sk_rcvbuf = max(smc->clcsock->sk->sk_rcvbuf, SMC_BUF_MIN_SIZE);
sk                194 net/smc/smc.h  	struct sock		sk;
sk                225 net/smc/smc.h  static inline struct smc_sock *smc_sk(const struct sock *sk)
sk                227 net/smc/smc.h  	return (struct smc_sock *)sk;
sk                255 net/smc/smc.h  	return (smc->clcsock->sk->sk_policy[0] ||
sk                256 net/smc/smc.h  		smc->clcsock->sk->sk_policy[1]) ? true : false;
sk                266 net/smc/smc.h  void smc_close_non_accepted(struct sock *sk);
sk                 39 net/smc/smc_cdc.c 	bh_lock_sock(&smc->sk);
sk                 52 net/smc/smc_cdc.c 	bh_unlock_sock(&smc->sk);
sk                228 net/smc/smc_cdc.c 	if (!sock_flag(&smc->sk, SOCK_URGINLINE))
sk                236 net/smc/smc_cdc.c 	sk_send_sigurg(&smc->sk);
sk                272 net/smc/smc_cdc.c 		smc->sk.sk_data_ready(&smc->sk);
sk                275 net/smc/smc_cdc.c 			smc->sk.sk_data_ready(&smc->sk);
sk                290 net/smc/smc_cdc.c 		smc->sk.sk_write_space(&smc->sk);
sk                294 net/smc/smc_cdc.c 		smc->sk.sk_err = ECONNRESET;
sk                298 net/smc/smc_cdc.c 		smc->sk.sk_shutdown |= RCV_SHUTDOWN;
sk                299 net/smc/smc_cdc.c 		if (smc->clcsock && smc->clcsock->sk)
sk                300 net/smc/smc_cdc.c 			smc->clcsock->sk->sk_shutdown |= RCV_SHUTDOWN;
sk                301 net/smc/smc_cdc.c 		sock_set_flag(&smc->sk, SOCK_DONE);
sk                302 net/smc/smc_cdc.c 		sock_hold(&smc->sk); /* sock_put in close_work */
sk                304 net/smc/smc_cdc.c 			sock_put(&smc->sk);
sk                311 net/smc/smc_cdc.c 	sock_hold(&smc->sk);
sk                312 net/smc/smc_cdc.c 	bh_lock_sock(&smc->sk);
sk                314 net/smc/smc_cdc.c 	bh_unlock_sock(&smc->sk);
sk                315 net/smc/smc_cdc.c 	sock_put(&smc->sk); /* no free sk in softirq-context */
sk                151 net/smc/smc_clc.c 	struct dst_entry *dst = sk_dst_get(clcsock->sk);
sk                241 net/smc/smc_clc.c 	struct dst_entry *dst = sk_dst_get(clcsock->sk);
sk                273 net/smc/smc_clc.c 	long rcvtimeo = smc->clcsock->sk->sk_rcvtimeo;
sk                274 net/smc/smc_clc.c 	struct sock *clc_sk = smc->clcsock->sk;
sk                298 net/smc/smc_clc.c 		smc->sk.sk_err = EINTR;
sk                307 net/smc/smc_clc.c 			smc->sk.sk_err = clc_sk->sk_err;
sk                311 net/smc/smc_clc.c 		smc->sk.sk_err = ECONNRESET;
sk                317 net/smc/smc_clc.c 			smc->sk.sk_err = -len;
sk                329 net/smc/smc_clc.c 		smc->sk.sk_err = EPROTO;
sk                340 net/smc/smc_clc.c 		smc->sk.sk_err = EPROTO;
sk                458 net/smc/smc_clc.c 		smc->sk.sk_err = smc->clcsock->sk->sk_err;
sk                459 net/smc/smc_clc.c 		reason_code = -smc->sk.sk_err;
sk                462 net/smc/smc_clc.c 		smc->sk.sk_err = -reason_code;
sk                530 net/smc/smc_clc.c 			smc->sk.sk_err = -reason_code;
sk                532 net/smc/smc_clc.c 			smc->sk.sk_err = smc->clcsock->sk->sk_err;
sk                533 net/smc/smc_clc.c 			reason_code = -smc->sk.sk_err;
sk                600 net/smc/smc_clc.c 		len = len >= 0 ? -EPROTO : -new_smc->clcsock->sk->sk_err;
sk                 42 net/smc/smc_close.c 	struct sock *sk;
sk                 45 net/smc/smc_close.c 	while ((sk = smc_accept_dequeue(parent, NULL)))
sk                 46 net/smc/smc_close.c 		smc_close_non_accepted(sk);
sk                 53 net/smc/smc_close.c 	struct sock *sk = &smc->sk;
sk                 62 net/smc/smc_close.c 	add_wait_queue(sk_sleep(sk), &wait);
sk                 66 net/smc/smc_close.c 		rc = sk_wait_event(sk, &timeout,
sk                 68 net/smc/smc_close.c 				   (sk->sk_err == ECONNABORTED) ||
sk                 69 net/smc/smc_close.c 				   (sk->sk_err == ECONNRESET),
sk                 74 net/smc/smc_close.c 	remove_wait_queue(sk_sleep(sk), &wait);
sk                 82 net/smc/smc_close.c 		smc->sk.sk_state_change(&smc->sk);
sk                114 net/smc/smc_close.c 	struct sock *sk = &smc->sk;
sk                119 net/smc/smc_close.c 	if (sk->sk_state != SMC_INIT && smc->clcsock && smc->clcsock->sk) {
sk                120 net/smc/smc_close.c 		sk->sk_err = ECONNABORTED;
sk                121 net/smc/smc_close.c 		if (smc->clcsock && smc->clcsock->sk) {
sk                122 net/smc/smc_close.c 			smc->clcsock->sk->sk_err = ECONNABORTED;
sk                123 net/smc/smc_close.c 			smc->clcsock->sk->sk_state_change(smc->clcsock->sk);
sk                126 net/smc/smc_close.c 	switch (sk->sk_state) {
sk                128 net/smc/smc_close.c 		sk->sk_state = SMC_PEERABORTWAIT;
sk                129 net/smc/smc_close.c 		release_sock(sk);
sk                131 net/smc/smc_close.c 		lock_sock(sk);
sk                132 net/smc/smc_close.c 		sock_put(sk); /* passive closing */
sk                137 net/smc/smc_close.c 			sk->sk_state = SMC_PEERABORTWAIT;
sk                139 net/smc/smc_close.c 			sk->sk_state = SMC_CLOSED;
sk                140 net/smc/smc_close.c 		release_sock(sk);
sk                142 net/smc/smc_close.c 		lock_sock(sk);
sk                148 net/smc/smc_close.c 			sk->sk_state = SMC_PEERABORTWAIT;
sk                150 net/smc/smc_close.c 			sk->sk_state = SMC_CLOSED;
sk                152 net/smc/smc_close.c 		sock_put(sk); /* passive closing */
sk                156 net/smc/smc_close.c 		sk->sk_state = SMC_CLOSED;
sk                159 net/smc/smc_close.c 		sock_put(sk); /* passive closing */
sk                167 net/smc/smc_close.c 	sock_set_flag(sk, SOCK_DEAD);
sk                168 net/smc/smc_close.c 	sk->sk_state_change(sk);
sk                182 net/smc/smc_close.c 	struct sock *sk = &smc->sk;
sk                188 net/smc/smc_close.c 		  0 : sock_flag(sk, SOCK_LINGER) ?
sk                189 net/smc/smc_close.c 		      sk->sk_lingertime : SMC_MAX_STREAM_WAIT_TIMEOUT;
sk                191 net/smc/smc_close.c 	old_state = sk->sk_state;
sk                193 net/smc/smc_close.c 	switch (sk->sk_state) {
sk                195 net/smc/smc_close.c 		sk->sk_state = SMC_CLOSED;
sk                198 net/smc/smc_close.c 		sk->sk_state = SMC_CLOSED;
sk                199 net/smc/smc_close.c 		sk->sk_state_change(sk); /* wake up accept */
sk                200 net/smc/smc_close.c 		if (smc->clcsock && smc->clcsock->sk) {
sk                203 net/smc/smc_close.c 			smc->clcsock->sk->sk_data_ready(smc->clcsock->sk);
sk                205 net/smc/smc_close.c 		smc_close_cleanup_listen(sk);
sk                206 net/smc/smc_close.c 		release_sock(sk);
sk                208 net/smc/smc_close.c 		lock_sock(sk);
sk                212 net/smc/smc_close.c 		release_sock(sk);
sk                214 net/smc/smc_close.c 		lock_sock(sk);
sk                215 net/smc/smc_close.c 		if (sk->sk_state == SMC_ACTIVE) {
sk                220 net/smc/smc_close.c 			sk->sk_state = SMC_PEERCLOSEWAIT1;
sk                235 net/smc/smc_close.c 		sk->sk_state = SMC_CLOSED;
sk                241 net/smc/smc_close.c 		release_sock(sk);
sk                243 net/smc/smc_close.c 		lock_sock(sk);
sk                244 net/smc/smc_close.c 		if (sk->sk_state != SMC_APPCLOSEWAIT1 &&
sk                245 net/smc/smc_close.c 		    sk->sk_state != SMC_APPCLOSEWAIT2)
sk                253 net/smc/smc_close.c 			sk->sk_state = SMC_CLOSED;
sk                254 net/smc/smc_close.c 			sock_put(sk); /* postponed passive closing */
sk                257 net/smc/smc_close.c 			sk->sk_state = SMC_PEERFINCLOSEWAIT;
sk                276 net/smc/smc_close.c 		sk->sk_state = SMC_CLOSED;
sk                284 net/smc/smc_close.c 	if (old_state != sk->sk_state)
sk                285 net/smc/smc_close.c 		sk->sk_state_change(sk);
sk                293 net/smc/smc_close.c 	struct sock *sk = &smc->sk;
sk                295 net/smc/smc_close.c 	switch (sk->sk_state) {
sk                299 net/smc/smc_close.c 		sk->sk_state = SMC_PROCESSABORT;
sk                300 net/smc/smc_close.c 		sock_put(sk); /* passive closing */
sk                303 net/smc/smc_close.c 		sk->sk_state = SMC_PROCESSABORT;
sk                310 net/smc/smc_close.c 			sk->sk_state = SMC_PROCESSABORT;
sk                312 net/smc/smc_close.c 			sk->sk_state = SMC_CLOSED;
sk                313 net/smc/smc_close.c 		sock_put(sk); /* passive closing */
sk                317 net/smc/smc_close.c 		sk->sk_state = SMC_CLOSED;
sk                318 net/smc/smc_close.c 		sock_put(sk); /* passive closing */
sk                321 net/smc/smc_close.c 		sk->sk_state = SMC_CLOSED;
sk                341 net/smc/smc_close.c 	struct sock *sk = &smc->sk;
sk                344 net/smc/smc_close.c 	lock_sock(sk);
sk                345 net/smc/smc_close.c 	old_state = sk->sk_state;
sk                357 net/smc/smc_close.c 		release_sock(&smc->sk);
sk                359 net/smc/smc_close.c 		lock_sock(&smc->sk);
sk                363 net/smc/smc_close.c 	switch (sk->sk_state) {
sk                365 net/smc/smc_close.c 		sk->sk_state = SMC_APPCLOSEWAIT1;
sk                368 net/smc/smc_close.c 		sk->sk_state = SMC_APPCLOSEWAIT1;
sk                375 net/smc/smc_close.c 			sk->sk_state = SMC_PEERCLOSEWAIT2;
sk                381 net/smc/smc_close.c 		if (sock_flag(sk, SOCK_DEAD) &&
sk                384 net/smc/smc_close.c 			sk->sk_state = SMC_CLOSED;
sk                387 net/smc/smc_close.c 			sk->sk_state = SMC_APPFINCLOSEWAIT;
sk                389 net/smc/smc_close.c 		sock_put(sk); /* passive closing */
sk                393 net/smc/smc_close.c 			sk->sk_state = SMC_CLOSED;
sk                394 net/smc/smc_close.c 			sock_put(sk); /* passive closing */
sk                412 net/smc/smc_close.c 	sk->sk_data_ready(sk); /* wakeup blocked rcvbuf consumers */
sk                413 net/smc/smc_close.c 	sk->sk_write_space(sk); /* wakeup blocked sndbuf producers */
sk                415 net/smc/smc_close.c 	if (old_state != sk->sk_state) {
sk                416 net/smc/smc_close.c 		sk->sk_state_change(sk);
sk                417 net/smc/smc_close.c 		if ((sk->sk_state == SMC_CLOSED) &&
sk                418 net/smc/smc_close.c 		    (sock_flag(sk, SOCK_DEAD) || !sk->sk_socket)) {
sk                424 net/smc/smc_close.c 	release_sock(sk);
sk                427 net/smc/smc_close.c 	sock_put(sk); /* sock_hold done by schedulers of close_work */
sk                433 net/smc/smc_close.c 	struct sock *sk = &smc->sk;
sk                439 net/smc/smc_close.c 		  0 : sock_flag(sk, SOCK_LINGER) ?
sk                440 net/smc/smc_close.c 		      sk->sk_lingertime : SMC_MAX_STREAM_WAIT_TIMEOUT;
sk                442 net/smc/smc_close.c 	old_state = sk->sk_state;
sk                444 net/smc/smc_close.c 	switch (sk->sk_state) {
sk                447 net/smc/smc_close.c 		release_sock(sk);
sk                449 net/smc/smc_close.c 		lock_sock(sk);
sk                450 net/smc/smc_close.c 		if (sk->sk_state != SMC_ACTIVE)
sk                456 net/smc/smc_close.c 		sk->sk_state = SMC_PEERCLOSEWAIT1;
sk                462 net/smc/smc_close.c 		release_sock(sk);
sk                464 net/smc/smc_close.c 		lock_sock(sk);
sk                465 net/smc/smc_close.c 		if (sk->sk_state != SMC_APPCLOSEWAIT1)
sk                471 net/smc/smc_close.c 		sk->sk_state = SMC_APPCLOSEWAIT2;
sk                484 net/smc/smc_close.c 	if (old_state != sk->sk_state)
sk                485 net/smc/smc_close.c 		sk->sk_state_change(sk);
sk                101 net/smc/smc_core.c 	sock_hold(&smc->sk); /* sock_put in smc_lgr_unregister_conn() */
sk                121 net/smc/smc_core.c 	sock_put(&smc->sk); /* sock_hold in smc_lgr_register_conn() */
sk                477 net/smc/smc_core.c 		sock_hold(&smc->sk); /* sock_put in close work */
sk                483 net/smc/smc_core.c 			sock_put(&smc->sk);
sk                548 net/smc/smc_core.c 	struct dst_entry *dst = sk_dst_get(clcsock->sk);
sk                847 net/smc/smc_core.c 		sk_buf_size = smc->sk.sk_rcvbuf / 2;
sk                850 net/smc/smc_core.c 		sk_buf_size = smc->sk.sk_sndbuf / 2;
sk                896 net/smc/smc_core.c 		smc->sk.sk_rcvbuf = bufsize * 2;
sk                904 net/smc/smc_core.c 		smc->sk.sk_sndbuf = bufsize * 2;
sk                 38 net/smc/smc_diag.c static void smc_diag_msg_common_fill(struct smc_diag_msg *r, struct sock *sk)
sk                 40 net/smc/smc_diag.c 	struct smc_sock *smc = smc_sk(sk);
sk                 43 net/smc/smc_diag.c 	r->diag_family = sk->sk_family;
sk                 44 net/smc/smc_diag.c 	sock_diag_save_cookie(sk, r->id.idiag_cookie);
sk                 47 net/smc/smc_diag.c 	r->id.idiag_sport = htons(smc->clcsock->sk->sk_num);
sk                 48 net/smc/smc_diag.c 	r->id.idiag_dport = smc->clcsock->sk->sk_dport;
sk                 49 net/smc/smc_diag.c 	r->id.idiag_if = smc->clcsock->sk->sk_bound_dev_if;
sk                 50 net/smc/smc_diag.c 	if (sk->sk_protocol == SMCPROTO_SMC) {
sk                 51 net/smc/smc_diag.c 		r->id.idiag_src[0] = smc->clcsock->sk->sk_rcv_saddr;
sk                 52 net/smc/smc_diag.c 		r->id.idiag_dst[0] = smc->clcsock->sk->sk_daddr;
sk                 54 net/smc/smc_diag.c 	} else if (sk->sk_protocol == SMCPROTO_SMC6) {
sk                 55 net/smc/smc_diag.c 		memcpy(&r->id.idiag_src, &smc->clcsock->sk->sk_v6_rcv_saddr,
sk                 56 net/smc/smc_diag.c 		       sizeof(smc->clcsock->sk->sk_v6_rcv_saddr));
sk                 57 net/smc/smc_diag.c 		memcpy(&r->id.idiag_dst, &smc->clcsock->sk->sk_v6_daddr,
sk                 58 net/smc/smc_diag.c 		       sizeof(smc->clcsock->sk->sk_v6_daddr));
sk                 63 net/smc/smc_diag.c static int smc_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
sk                 67 net/smc/smc_diag.c 	if (nla_put_u8(skb, SMC_DIAG_SHUTDOWN, sk->sk_shutdown))
sk                 70 net/smc/smc_diag.c 	r->diag_uid = from_kuid_munged(user_ns, sock_i_uid(sk));
sk                 71 net/smc/smc_diag.c 	r->diag_inode = sock_i_ino(sk);
sk                 75 net/smc/smc_diag.c static int __smc_diag_dump(struct sock *sk, struct sk_buff *skb,
sk                 80 net/smc/smc_diag.c 	struct smc_sock *smc = smc_sk(sk);
sk                 92 net/smc/smc_diag.c 	smc_diag_msg_common_fill(r, sk);
sk                 93 net/smc/smc_diag.c 	r->diag_state = sk->sk_state;
sk                100 net/smc/smc_diag.c 	user_ns = sk_user_ns(NETLINK_CB(cb->skb).sk);
sk                101 net/smc/smc_diag.c 	if (smc_diag_msg_attrs_fill(sk, skb, r, user_ns))
sk                196 net/smc/smc_diag.c 	struct net *net = sock_net(skb->sk);
sk                199 net/smc/smc_diag.c 	struct sock *sk;
sk                207 net/smc/smc_diag.c 	sk_for_each(sk, head) {
sk                208 net/smc/smc_diag.c 		if (!net_eq(sock_net(sk), net))
sk                210 net/smc/smc_diag.c 		rc = __smc_diag_dump(sk, skb, cb, nlmsg_data(cb->nlh), bc);
sk                232 net/smc/smc_diag.c 	struct net *net = sock_net(skb->sk);
sk                562 net/smc/smc_pnet.c 	struct net *net = sock_net(skb->sk);
sk                859 net/smc/smc_pnet.c void smc_pnet_find_roce_resource(struct sock *sk, struct smc_init_info *ini)
sk                861 net/smc/smc_pnet.c 	struct dst_entry *dst = sk_dst_get(sk);
sk                878 net/smc/smc_pnet.c void smc_pnet_find_ism_resource(struct sock *sk, struct smc_init_info *ini)
sk                880 net/smc/smc_pnet.c 	struct dst_entry *dst = sk_dst_get(sk);
sk                 47 net/smc/smc_pnet.h void smc_pnet_find_roce_resource(struct sock *sk, struct smc_init_info *ini);
sk                 48 net/smc/smc_pnet.h void smc_pnet_find_ism_resource(struct sock *sk, struct smc_init_info *ini);
sk                 28 net/smc/smc_rx.c static void smc_rx_wake_up(struct sock *sk)
sk                 35 net/smc/smc_rx.c 	wq = rcu_dereference(sk->sk_wq);
sk                 39 net/smc/smc_rx.c 	sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
sk                 40 net/smc/smc_rx.c 	if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
sk                 41 net/smc/smc_rx.c 	    (sk->sk_state == SMC_CLOSED))
sk                 42 net/smc/smc_rx.c 		sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
sk                 57 net/smc/smc_rx.c 	struct sock *sk = &smc->sk;
sk                 67 net/smc/smc_rx.c 		if (sock_flag(sk, SOCK_URGINLINE)) {
sk                114 net/smc/smc_rx.c 	struct sock *sk = &smc->sk;
sk                116 net/smc/smc_rx.c 	if (sk->sk_state == SMC_CLOSED ||
sk                117 net/smc/smc_rx.c 	    sk->sk_state == SMC_PEERFINCLOSEWAIT ||
sk                118 net/smc/smc_rx.c 	    sk->sk_state == SMC_APPFINCLOSEWAIT)
sk                121 net/smc/smc_rx.c 	lock_sock(sk);
sk                123 net/smc/smc_rx.c 	release_sock(sk);
sk                125 net/smc/smc_rx.c 		smc_rx_wake_up(sk);
sk                129 net/smc/smc_rx.c 	sock_put(sk);
sk                177 net/smc/smc_rx.c 		sock_hold(&smc->sk);
sk                204 net/smc/smc_rx.c 	struct sock *sk = &smc->sk;
sk                209 net/smc/smc_rx.c 	sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
sk                210 net/smc/smc_rx.c 	add_wait_queue(sk_sleep(sk), &wait);
sk                211 net/smc/smc_rx.c 	rc = sk_wait_event(sk, timeo,
sk                212 net/smc/smc_rx.c 			   sk->sk_err ||
sk                213 net/smc/smc_rx.c 			   sk->sk_shutdown & RCV_SHUTDOWN ||
sk                216 net/smc/smc_rx.c 	remove_wait_queue(sk_sleep(sk), &wait);
sk                217 net/smc/smc_rx.c 	sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
sk                226 net/smc/smc_rx.c 	struct sock *sk = &smc->sk;
sk                229 net/smc/smc_rx.c 	if (sock_flag(sk, SOCK_URGINLINE) ||
sk                258 net/smc/smc_rx.c 	if (sk->sk_state == SMC_CLOSED || sk->sk_shutdown & RCV_SHUTDOWN)
sk                293 net/smc/smc_rx.c 	struct sock *sk;
sk                302 net/smc/smc_rx.c 	sk = &smc->sk;
sk                303 net/smc/smc_rx.c 	if (sk->sk_state == SMC_LISTEN)
sk                307 net/smc/smc_rx.c 	timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
sk                308 net/smc/smc_rx.c 	target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
sk                320 net/smc/smc_rx.c 		if (sk->sk_shutdown & RCV_SHUTDOWN ||
sk                331 net/smc/smc_rx.c 			if (sk->sk_err ||
sk                332 net/smc/smc_rx.c 			    sk->sk_state == SMC_CLOSED ||
sk                337 net/smc/smc_rx.c 			if (sk->sk_err) {
sk                338 net/smc/smc_rx.c 				read_done = sock_error(sk);
sk                341 net/smc/smc_rx.c 			if (sk->sk_state == SMC_CLOSED) {
sk                342 net/smc/smc_rx.c 				if (!sock_flag(sk, SOCK_DONE)) {
sk                383 net/smc/smc_rx.c 		    sock_flag(&smc->sk, SOCK_URGINLINE) &&
sk                443 net/smc/smc_rx.c 	smc->sk.sk_data_ready = smc_rx_wake_up;
sk                 40 net/smc/smc_tx.c static void smc_tx_write_space(struct sock *sk)
sk                 42 net/smc/smc_tx.c 	struct socket *sock = sk->sk_socket;
sk                 43 net/smc/smc_tx.c 	struct smc_sock *smc = smc_sk(sk);
sk                 50 net/smc/smc_tx.c 		wq = rcu_dereference(sk->sk_wq);
sk                 55 net/smc/smc_tx.c 		if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN))
sk                 66 net/smc/smc_tx.c 	if (smc->sk.sk_socket &&
sk                 67 net/smc/smc_tx.c 	    test_bit(SOCK_NOSPACE, &smc->sk.sk_socket->flags))
sk                 68 net/smc/smc_tx.c 		smc->sk.sk_write_space(&smc->sk);
sk                 78 net/smc/smc_tx.c 	struct sock *sk = &smc->sk;
sk                 83 net/smc/smc_tx.c 	timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
sk                 84 net/smc/smc_tx.c 	add_wait_queue(sk_sleep(sk), &wait);
sk                 86 net/smc/smc_tx.c 		sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
sk                 87 net/smc/smc_tx.c 		if (sk->sk_err ||
sk                 88 net/smc/smc_tx.c 		    (sk->sk_shutdown & SEND_SHUTDOWN) ||
sk                 99 net/smc/smc_tx.c 			set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
sk                107 net/smc/smc_tx.c 		sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
sk                110 net/smc/smc_tx.c 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
sk                111 net/smc/smc_tx.c 		sk_wait_event(sk, &timeo,
sk                112 net/smc/smc_tx.c 			      sk->sk_err ||
sk                113 net/smc/smc_tx.c 			      (sk->sk_shutdown & SEND_SHUTDOWN) ||
sk                119 net/smc/smc_tx.c 	remove_wait_queue(sk_sleep(sk), &wait);
sk                125 net/smc/smc_tx.c 	struct tcp_sock *tp = tcp_sk(smc->clcsock->sk);
sk                139 net/smc/smc_tx.c 	struct sock *sk = &smc->sk;
sk                146 net/smc/smc_tx.c 	sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
sk                148 net/smc/smc_tx.c 	if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) {
sk                154 net/smc/smc_tx.c 		if (sk->sk_state == SMC_INIT)
sk                156 net/smc/smc_tx.c 		if (smc->sk.sk_shutdown & SEND_SHUTDOWN ||
sk                157 net/smc/smc_tx.c 		    (smc->sk.sk_err == ECONNABORTED) ||
sk                239 net/smc/smc_tx.c 	rc = sk_stream_error(sk, msg->msg_flags, rc);
sk                242 net/smc/smc_tx.c 		sk->sk_write_space(sk);
sk                496 net/smc/smc_tx.c 			if (smc->sk.sk_err == ECONNABORTED)
sk                497 net/smc/smc_tx.c 				return sock_error(&smc->sk);
sk                575 net/smc/smc_tx.c 	lock_sock(&smc->sk);
sk                576 net/smc/smc_tx.c 	if (smc->sk.sk_err ||
sk                587 net/smc/smc_tx.c 	release_sock(&smc->sk);
sk                628 net/smc/smc_tx.c 	smc->sk.sk_write_space = smc_tx_write_space;
sk                249 net/socket.c   	ei->socket.sk = NULL;
sk                395 net/socket.c   		dname = sock->sk ? sock->sk->sk_prot_creator->name : "";
sk                531 net/socket.c   		if (sock->sk)
sk                532 net/socket.c   			sock->sk->sk_uid = iattr->ia_uid;
sk                591 net/socket.c   		sock->sk = NULL;
sk                694 net/socket.c   int kernel_sendmsg_locked(struct sock *sk, struct msghdr *msg,
sk                697 net/socket.c   	struct socket *sock = sk->sk_socket;
sk                700 net/socket.c   		return sock_no_sendmsg_locked(sk, msg, size);
sk                704 net/socket.c   	return sock->ops->sendmsg_locked(sk, msg, msg_data_left(msg));
sk                755 net/socket.c   void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
sk                758 net/socket.c   	int need_software_tstamp = sock_flag(sk, SOCK_RCVTSTAMP);
sk                759 net/socket.c   	int new_tstamp = sock_flag(sk, SOCK_TSTAMP_NEW);
sk                774 net/socket.c   		if (!sock_flag(sk, SOCK_RCVTSTAMPNS)) {
sk                806 net/socket.c   	if ((sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE) &&
sk                810 net/socket.c   	    (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
sk                814 net/socket.c   		if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_PKTINFO) &&
sk                819 net/socket.c   		if (sock_flag(sk, SOCK_TSTAMP_NEW))
sk                832 net/socket.c   void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
sk                837 net/socket.c   	if (!sock_flag(sk, SOCK_WIFI_STATUS))
sk                848 net/socket.c   static inline void sock_recv_drops(struct msghdr *msg, struct sock *sk,
sk                851 net/socket.c   	if (sock_flag(sk, SOCK_RXQ_OVFL) && skb && SOCK_SKB_CB(skb)->dropcount)
sk                856 net/socket.c   void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
sk                859 net/socket.c   	sock_recv_timestamp(msg, sk, skb);
sk                860 net/socket.c   	sock_recv_drops(msg, sk, skb);
sk               1090 net/socket.c   	struct sock *sk;
sk               1096 net/socket.c   	sk = sock->sk;
sk               1097 net/socket.c   	net = sock_net(sk);
sk               1247 net/socket.c   	if (sk_can_busy_loop(sock->sk)) {
sk               1250 net/socket.c   			sk_busy_loop(sock->sk, 1);
sk               1286 net/socket.c   	struct sock *sk = sock->sk;
sk               1289 net/socket.c   	if (sk == NULL)
sk               1292 net/socket.c   	lock_sock(sk);
sk               1296 net/socket.c   		sock_reset_flag(sk, SOCK_FASYNC);
sk               1298 net/socket.c   		sock_set_flag(sk, SOCK_FASYNC);
sk               1300 net/socket.c   	release_sock(sk);
sk               1675 net/socket.c   		somaxconn = sock_net(sock->sk)->core.sysctl_somaxconn;
sk               1743 net/socket.c   	newfile = sock_alloc_file(newsock, flags, sock->sk->sk_prot_creator->name);
sk               2062 net/socket.c   		err = BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock->sk, &level,
sk               2132 net/socket.c   		err = BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock->sk, level, optname,
sk               2254 net/socket.c   		    cmsghdr_from_user_compat_to_kern(msg_sys, sock->sk, ctl,
sk               2264 net/socket.c   			ctl_buf = sock_kmalloc(sock->sk, ctl_len, GFP_KERNEL);
sk               2311 net/socket.c   		sock_kfree_s(sock->sk, ctl_buf, ctl_len);
sk               2658 net/socket.c   		err = sock_error(sock->sk);
sk               2737 net/socket.c   		sock->sk->sk_err = -err;
sk               3377 net/socket.c   	if (sock && sock->sk && sock->sk->sk_family == AF_INET6) { /* ipv6 */
sk               3442 net/socket.c   	struct sock *sk = sock->sk;
sk               3443 net/socket.c   	struct net *net = sock_net(sk);
sk               3548 net/socket.c   	struct sock *sk;
sk               3551 net/socket.c   	sk = sock->sk;
sk               3552 net/socket.c   	net = sock_net(sk);
sk               3610 net/socket.c   	struct sock *sk = sock->sk;
sk               3613 net/socket.c   	err = sock_create_lite(sk->sk_family, sk->sk_type, sk->sk_protocol,
sk               3781 net/socket.c   int kernel_sendpage_locked(struct sock *sk, struct page *page, int offset,
sk               3784 net/socket.c   	struct socket *sock = sk->sk_socket;
sk               3787 net/socket.c   		return sock->ops->sendpage_locked(sk, page, offset, size,
sk               3790 net/socket.c   	return sock_no_sendpage_locked(sk, page, offset, size, flags);
sk               3818 net/socket.c   u32 kernel_sock_ip_overhead(struct sock *sk)
sk               3828 net/socket.c   	if (!sk)
sk               3831 net/socket.c   	switch (sk->sk_family) {
sk               3833 net/socket.c   		inet = inet_sk(sk);
sk               3836 net/socket.c   						sock_owned_by_user(sk));
sk               3842 net/socket.c   		np = inet6_sk(sk);
sk               3846 net/socket.c   							  sock_owned_by_user(sk));
sk                 56 net/strparser/strparser.c 	if (strp->sk) {
sk                 57 net/strparser/strparser.c 		struct sock *sk = strp->sk;
sk                 60 net/strparser/strparser.c 		sk->sk_err = -err;
sk                 61 net/strparser/strparser.c 		sk->sk_error_report(sk);
sk                 83 net/strparser/strparser.c 	if (strp->sk) {
sk                 84 net/strparser/strparser.c 		struct socket *sock = strp->sk->sk_socket;
sk                344 net/strparser/strparser.c 			   strp->sk->sk_rcvbuf, strp->sk->sk_rcvtimeo);
sk                355 net/strparser/strparser.c 	struct socket *sock = strp->sk->sk_socket;
sk                366 net/strparser/strparser.c 	sock->ops->read_sock(strp->sk, &desc, strp_recv);
sk                386 net/strparser/strparser.c 	if (sock_owned_by_user_nocheck(strp->sk)) {
sk                440 net/strparser/strparser.c 	lock_sock(strp->sk);
sk                445 net/strparser/strparser.c 	release_sock(strp->sk);
sk                448 net/strparser/strparser.c int strp_init(struct strparser *strp, struct sock *sk,
sk                466 net/strparser/strparser.c 	if (!sk) {
sk                473 net/strparser/strparser.c 	strp->sk = sk;
sk                 40 net/sunrpc/sunrpc.h static inline int sock_is_loopback(struct sock *sk)
sk                 45 net/sunrpc/sunrpc.h 	dst = rcu_dereference(sk->sk_dst_cache);
sk                 80 net/sunrpc/svcsock.c 	struct sock *sk = sock->sk;
sk                 82 net/sunrpc/svcsock.c 	if (WARN_ON_ONCE(!sock_allow_reclassification(sk)))
sk                 85 net/sunrpc/svcsock.c 	switch (sk->sk_family) {
sk                 87 net/sunrpc/svcsock.c 		sock_lock_init_class_and_name(sk, "slock-AF_INET-NFSD",
sk                 94 net/sunrpc/svcsock.c 		sock_lock_init_class_and_name(sk, "slock-AF_INET6-NFSD",
sk                287 net/sunrpc/svcsock.c 	const struct sock *sk = svsk->sk_sk;
sk                288 net/sunrpc/svcsock.c 	const char *proto_name = sk->sk_protocol == IPPROTO_UDP ?
sk                292 net/sunrpc/svcsock.c 	switch (sk->sk_family) {
sk                296 net/sunrpc/svcsock.c 				&inet_sk(sk)->inet_rcv_saddr,
sk                297 net/sunrpc/svcsock.c 				inet_sk(sk)->inet_num);
sk                303 net/sunrpc/svcsock.c 				&sk->sk_v6_rcv_saddr,
sk                304 net/sunrpc/svcsock.c 				inet_sk(sk)->inet_num);
sk                309 net/sunrpc/svcsock.c 				sk->sk_family);
sk                360 net/sunrpc/svcsock.c 	lock_sock(sock->sk);
sk                361 net/sunrpc/svcsock.c 	sock->sk->sk_sndbuf = nreqs * max_mesg * 2;
sk                362 net/sunrpc/svcsock.c 	sock->sk->sk_rcvbuf = nreqs * max_mesg * 2;
sk                363 net/sunrpc/svcsock.c 	sock->sk->sk_write_space(sock->sk);
sk                364 net/sunrpc/svcsock.c 	release_sock(sock->sk);
sk                378 net/sunrpc/svcsock.c static void svc_data_ready(struct sock *sk)
sk                380 net/sunrpc/svcsock.c 	struct svc_sock	*svsk = (struct svc_sock *)sk->sk_user_data;
sk                384 net/sunrpc/svcsock.c 			svsk, sk,
sk                389 net/sunrpc/svcsock.c 		svsk->sk_odata(sk);
sk                398 net/sunrpc/svcsock.c static void svc_write_space(struct sock *sk)
sk                400 net/sunrpc/svcsock.c 	struct svc_sock	*svsk = (struct svc_sock *)(sk->sk_user_data);
sk                404 net/sunrpc/svcsock.c 			svsk, sk, test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags));
sk                408 net/sunrpc/svcsock.c 		svsk->sk_owspace(sk);
sk                679 net/sunrpc/svcsock.c 	svc_xprt_init(sock_net(svsk->sk_sock->sk), &svc_udp_class,
sk                717 net/sunrpc/svcsock.c static void svc_tcp_listen_data_ready(struct sock *sk)
sk                719 net/sunrpc/svcsock.c 	struct svc_sock	*svsk = (struct svc_sock *)sk->sk_user_data;
sk                722 net/sunrpc/svcsock.c 		sk, sk->sk_state);
sk                727 net/sunrpc/svcsock.c 		svsk->sk_odata(sk);
sk                740 net/sunrpc/svcsock.c 	if (sk->sk_state == TCP_LISTEN) {
sk                745 net/sunrpc/svcsock.c 			printk("svc: socket %p: no user data\n", sk);
sk                752 net/sunrpc/svcsock.c static void svc_tcp_state_change(struct sock *sk)
sk                754 net/sunrpc/svcsock.c 	struct svc_sock	*svsk = (struct svc_sock *)sk->sk_user_data;
sk                757 net/sunrpc/svcsock.c 		sk, sk->sk_state, sk->sk_user_data);
sk                760 net/sunrpc/svcsock.c 		printk("svc: socket %p: no user data\n", sk);
sk                764 net/sunrpc/svcsock.c 		svsk->sk_ostate(sk);
sk                765 net/sunrpc/svcsock.c 		if (sk->sk_state != TCP_ESTABLISHED) {
sk                825 net/sunrpc/svcsock.c 	newsock->sk->sk_state_change = svsk->sk_ostate;
sk                826 net/sunrpc/svcsock.c 	newsock->sk->sk_data_ready = svsk->sk_odata;
sk                827 net/sunrpc/svcsock.c 	newsock->sk->sk_write_space = svsk->sk_owspace;
sk                832 net/sunrpc/svcsock.c 	newsock->sk->sk_sndtimeo = HZ*30;
sk                847 net/sunrpc/svcsock.c 	if (sock_is_loopback(newsock->sk))
sk               1209 net/sunrpc/svcsock.c 	struct sock	*sk = svsk->sk_sk;
sk               1211 net/sunrpc/svcsock.c 	svc_xprt_init(sock_net(svsk->sk_sock->sk), &svc_tcp_class,
sk               1215 net/sunrpc/svcsock.c 	if (sk->sk_state == TCP_LISTEN) {
sk               1219 net/sunrpc/svcsock.c 		sk->sk_data_ready = svc_tcp_listen_data_ready;
sk               1223 net/sunrpc/svcsock.c 		sk->sk_state_change = svc_tcp_state_change;
sk               1224 net/sunrpc/svcsock.c 		sk->sk_data_ready = svc_data_ready;
sk               1225 net/sunrpc/svcsock.c 		sk->sk_write_space = svc_write_space;
sk               1232 net/sunrpc/svcsock.c 		tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF;
sk               1235 net/sunrpc/svcsock.c 		switch (sk->sk_state) {
sk               1277 net/sunrpc/svcsock.c 	inet = sock->sk;
sk               1281 net/sunrpc/svcsock.c 		err = svc_register(serv, sock_net(sock->sk), inet->sk_family,
sk               1326 net/sunrpc/svcsock.c 	if (sock_net(sock->sk) != net)
sk               1359 net/sunrpc/svcsock.c 	if ((so->sk->sk_family != PF_INET) && (so->sk->sk_family != PF_INET6))
sk               1362 net/sunrpc/svcsock.c 	if (so->sk->sk_protocol != IPPROTO_TCP &&
sk               1363 net/sunrpc/svcsock.c 	    so->sk->sk_protocol != IPPROTO_UDP)
sk               1448 net/sunrpc/svcsock.c 		sock->sk->sk_reuse = SK_CAN_REUSE; /* allow address reuse */
sk               1483 net/sunrpc/svcsock.c 	struct sock *sk = svsk->sk_sk;
sk               1488 net/sunrpc/svcsock.c 	lock_sock(sk);
sk               1489 net/sunrpc/svcsock.c 	sk->sk_state_change = svsk->sk_ostate;
sk               1490 net/sunrpc/svcsock.c 	sk->sk_data_ready = svsk->sk_odata;
sk               1491 net/sunrpc/svcsock.c 	sk->sk_write_space = svsk->sk_owspace;
sk               1492 net/sunrpc/svcsock.c 	sk->sk_user_data = NULL;
sk               1493 net/sunrpc/svcsock.c 	release_sock(sk);
sk                221 net/sunrpc/xprtsock.c static inline struct rpc_xprt *xprt_from_sock(struct sock *sk)
sk                223 net/sunrpc/xprtsock.c 	return (struct rpc_xprt *) sk->sk_user_data;
sk                880 net/sunrpc/xprtsock.c 	struct sock *sk = transport->inet;
sk                894 net/sunrpc/xprtsock.c 		sk->sk_write_pending++;
sk                906 net/sunrpc/xprtsock.c 		wq = rcu_dereference(sk->sk_wq);
sk                910 net/sunrpc/xprtsock.c 		sk->sk_write_space(sk);
sk               1200 net/sunrpc/xprtsock.c static void xs_save_old_callbacks(struct sock_xprt *transport, struct sock *sk)
sk               1202 net/sunrpc/xprtsock.c 	transport->old_data_ready = sk->sk_data_ready;
sk               1203 net/sunrpc/xprtsock.c 	transport->old_state_change = sk->sk_state_change;
sk               1204 net/sunrpc/xprtsock.c 	transport->old_write_space = sk->sk_write_space;
sk               1205 net/sunrpc/xprtsock.c 	transport->old_error_report = sk->sk_error_report;
sk               1208 net/sunrpc/xprtsock.c static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *sk)
sk               1210 net/sunrpc/xprtsock.c 	sk->sk_data_ready = transport->old_data_ready;
sk               1211 net/sunrpc/xprtsock.c 	sk->sk_state_change = transport->old_state_change;
sk               1212 net/sunrpc/xprtsock.c 	sk->sk_write_space = transport->old_write_space;
sk               1213 net/sunrpc/xprtsock.c 	sk->sk_error_report = transport->old_error_report;
sk               1248 net/sunrpc/xprtsock.c static void xs_error_report(struct sock *sk)
sk               1253 net/sunrpc/xprtsock.c 	read_lock_bh(&sk->sk_callback_lock);
sk               1254 net/sunrpc/xprtsock.c 	if (!(xprt = xprt_from_sock(sk)))
sk               1258 net/sunrpc/xprtsock.c 	transport->xprt_err = -sk->sk_err;
sk               1263 net/sunrpc/xprtsock.c 	trace_rpc_socket_error(xprt, sk->sk_socket, transport->xprt_err);
sk               1269 net/sunrpc/xprtsock.c 	read_unlock_bh(&sk->sk_callback_lock);
sk               1275 net/sunrpc/xprtsock.c 	struct sock *sk = transport->inet;
sk               1279 net/sunrpc/xprtsock.c 	if (sk == NULL)
sk               1283 net/sunrpc/xprtsock.c 		sk_clear_memalloc(sk);
sk               1288 net/sunrpc/xprtsock.c 	write_lock_bh(&sk->sk_callback_lock);
sk               1293 net/sunrpc/xprtsock.c 	sk->sk_user_data = NULL;
sk               1295 net/sunrpc/xprtsock.c 	xs_restore_old_callbacks(transport, sk);
sk               1297 net/sunrpc/xprtsock.c 	write_unlock_bh(&sk->sk_callback_lock);
sk               1369 net/sunrpc/xprtsock.c 		struct sock *sk,
sk               1405 net/sunrpc/xprtsock.c 		__UDPX_INC_STATS(sk, UDP_MIB_INERRORS);
sk               1415 net/sunrpc/xprtsock.c 	__UDPX_INC_STATS(sk, UDP_MIB_INDATAGRAMS);
sk               1425 net/sunrpc/xprtsock.c 	struct sock *sk;
sk               1429 net/sunrpc/xprtsock.c 	sk = transport->inet;
sk               1430 net/sunrpc/xprtsock.c 	if (sk == NULL)
sk               1433 net/sunrpc/xprtsock.c 		skb = skb_recv_udp(sk, 0, 1, &err);
sk               1436 net/sunrpc/xprtsock.c 		xs_udp_data_read_skb(&transport->xprt, sk, skb);
sk               1460 net/sunrpc/xprtsock.c static void xs_data_ready(struct sock *sk)
sk               1464 net/sunrpc/xprtsock.c 	read_lock_bh(&sk->sk_callback_lock);
sk               1466 net/sunrpc/xprtsock.c 	xprt = xprt_from_sock(sk);
sk               1470 net/sunrpc/xprtsock.c 		transport->old_data_ready(sk);
sk               1479 net/sunrpc/xprtsock.c 	read_unlock_bh(&sk->sk_callback_lock);
sk               1503 net/sunrpc/xprtsock.c static void xs_tcp_state_change(struct sock *sk)
sk               1508 net/sunrpc/xprtsock.c 	read_lock_bh(&sk->sk_callback_lock);
sk               1509 net/sunrpc/xprtsock.c 	if (!(xprt = xprt_from_sock(sk)))
sk               1513 net/sunrpc/xprtsock.c 			sk->sk_state, xprt_connected(xprt),
sk               1514 net/sunrpc/xprtsock.c 			sock_flag(sk, SOCK_DEAD),
sk               1515 net/sunrpc/xprtsock.c 			sock_flag(sk, SOCK_ZAPPED),
sk               1516 net/sunrpc/xprtsock.c 			sk->sk_shutdown);
sk               1519 net/sunrpc/xprtsock.c 	trace_rpc_socket_state_change(xprt, sk->sk_socket);
sk               1520 net/sunrpc/xprtsock.c 	switch (sk->sk_state) {
sk               1572 net/sunrpc/xprtsock.c 	read_unlock_bh(&sk->sk_callback_lock);
sk               1575 net/sunrpc/xprtsock.c static void xs_write_space(struct sock *sk)
sk               1581 net/sunrpc/xprtsock.c 	if (!sk->sk_socket)
sk               1583 net/sunrpc/xprtsock.c 	clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
sk               1585 net/sunrpc/xprtsock.c 	if (unlikely(!(xprt = xprt_from_sock(sk))))
sk               1589 net/sunrpc/xprtsock.c 	wq = rcu_dereference(sk->sk_wq);
sk               1594 net/sunrpc/xprtsock.c 	sk->sk_write_pending--;
sk               1609 net/sunrpc/xprtsock.c static void xs_udp_write_space(struct sock *sk)
sk               1611 net/sunrpc/xprtsock.c 	read_lock_bh(&sk->sk_callback_lock);
sk               1614 net/sunrpc/xprtsock.c 	if (sock_writeable(sk))
sk               1615 net/sunrpc/xprtsock.c 		xs_write_space(sk);
sk               1617 net/sunrpc/xprtsock.c 	read_unlock_bh(&sk->sk_callback_lock);
sk               1630 net/sunrpc/xprtsock.c static void xs_tcp_write_space(struct sock *sk)
sk               1632 net/sunrpc/xprtsock.c 	read_lock_bh(&sk->sk_callback_lock);
sk               1635 net/sunrpc/xprtsock.c 	if (sk_stream_is_writeable(sk))
sk               1636 net/sunrpc/xprtsock.c 		xs_write_space(sk);
sk               1638 net/sunrpc/xprtsock.c 	read_unlock_bh(&sk->sk_callback_lock);
sk               1644 net/sunrpc/xprtsock.c 	struct sock *sk = transport->inet;
sk               1647 net/sunrpc/xprtsock.c 		sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
sk               1648 net/sunrpc/xprtsock.c 		sk->sk_rcvbuf = transport->rcvsize * xprt->max_reqs * 2;
sk               1651 net/sunrpc/xprtsock.c 		sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
sk               1652 net/sunrpc/xprtsock.c 		sk->sk_sndbuf = transport->sndsize * xprt->max_reqs * 2;
sk               1653 net/sunrpc/xprtsock.c 		sk->sk_write_space(sk);
sk               1847 net/sunrpc/xprtsock.c 	struct sock *sk = sock->sk;
sk               1849 net/sunrpc/xprtsock.c 	sock_lock_init_class_and_name(sk, "slock-AF_LOCAL-RPC",
sk               1855 net/sunrpc/xprtsock.c 	struct sock *sk = sock->sk;
sk               1857 net/sunrpc/xprtsock.c 	sock_lock_init_class_and_name(sk, "slock-AF_INET-RPC",
sk               1863 net/sunrpc/xprtsock.c 	struct sock *sk = sock->sk;
sk               1865 net/sunrpc/xprtsock.c 	sock_lock_init_class_and_name(sk, "slock-AF_INET6-RPC",
sk               1871 net/sunrpc/xprtsock.c 	if (WARN_ON_ONCE(!sock_allow_reclassification(sock->sk)))
sk               1938 net/sunrpc/xprtsock.c 		struct sock *sk = sock->sk;
sk               1940 net/sunrpc/xprtsock.c 		write_lock_bh(&sk->sk_callback_lock);
sk               1942 net/sunrpc/xprtsock.c 		xs_save_old_callbacks(transport, sk);
sk               1944 net/sunrpc/xprtsock.c 		sk->sk_user_data = xprt;
sk               1945 net/sunrpc/xprtsock.c 		sk->sk_data_ready = xs_data_ready;
sk               1946 net/sunrpc/xprtsock.c 		sk->sk_write_space = xs_udp_write_space;
sk               1947 net/sunrpc/xprtsock.c 		sock_set_flag(sk, SOCK_FASYNC);
sk               1948 net/sunrpc/xprtsock.c 		sk->sk_error_report = xs_error_report;
sk               1954 net/sunrpc/xprtsock.c 		transport->inet = sk;
sk               1956 net/sunrpc/xprtsock.c 		write_unlock_bh(&sk->sk_callback_lock);
sk               2135 net/sunrpc/xprtsock.c 		struct sock *sk = sock->sk;
sk               2137 net/sunrpc/xprtsock.c 		write_lock_bh(&sk->sk_callback_lock);
sk               2139 net/sunrpc/xprtsock.c 		xs_save_old_callbacks(transport, sk);
sk               2141 net/sunrpc/xprtsock.c 		sk->sk_user_data = xprt;
sk               2142 net/sunrpc/xprtsock.c 		sk->sk_data_ready = xs_data_ready;
sk               2143 net/sunrpc/xprtsock.c 		sk->sk_write_space = xs_udp_write_space;
sk               2144 net/sunrpc/xprtsock.c 		sock_set_flag(sk, SOCK_FASYNC);
sk               2150 net/sunrpc/xprtsock.c 		transport->inet = sk;
sk               2154 net/sunrpc/xprtsock.c 		write_unlock_bh(&sk->sk_callback_lock);
sk               2282 net/sunrpc/xprtsock.c 		struct sock *sk = sock->sk;
sk               2297 net/sunrpc/xprtsock.c 		write_lock_bh(&sk->sk_callback_lock);
sk               2299 net/sunrpc/xprtsock.c 		xs_save_old_callbacks(transport, sk);
sk               2301 net/sunrpc/xprtsock.c 		sk->sk_user_data = xprt;
sk               2302 net/sunrpc/xprtsock.c 		sk->sk_data_ready = xs_data_ready;
sk               2303 net/sunrpc/xprtsock.c 		sk->sk_state_change = xs_tcp_state_change;
sk               2304 net/sunrpc/xprtsock.c 		sk->sk_write_space = xs_tcp_write_space;
sk               2305 net/sunrpc/xprtsock.c 		sock_set_flag(sk, SOCK_FASYNC);
sk               2306 net/sunrpc/xprtsock.c 		sk->sk_error_report = xs_error_report;
sk               2309 net/sunrpc/xprtsock.c 		sock_reset_flag(sk, SOCK_LINGER);
sk               2310 net/sunrpc/xprtsock.c 		tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF;
sk               2316 net/sunrpc/xprtsock.c 		transport->inet = sk;
sk               2318 net/sunrpc/xprtsock.c 		write_unlock_bh(&sk->sk_callback_lock);
sk               2382 net/sunrpc/xprtsock.c 			sock->sk->sk_state);
sk                804 net/tipc/bearer.c 	struct net *net = sock_net(skb->sk);
sk                887 net/tipc/bearer.c 	struct net *net = sock_net(skb->sk);
sk                928 net/tipc/bearer.c 	struct net *net = sock_net(skb->sk);
sk                983 net/tipc/bearer.c 	struct net *net = sock_net(skb->sk);
sk               1024 net/tipc/bearer.c 	struct net *net = sock_net(skb->sk);
sk                 41 net/tipc/diag.c static u64 __tipc_diag_gen_cookie(struct sock *sk)
sk                 45 net/tipc/diag.c 	sock_diag_save_cookie(sk, res);
sk                 80 net/tipc/diag.c 	struct net *net = sock_net(skb->sk);
sk                944 net/tipc/name_table.c 	struct net *net = sock_net(skb->sk);
sk                217 net/tipc/net.c 	struct net *net = sock_net(skb->sk);
sk                243 net/tipc/net.c 	struct net *net = sock_net(skb->sk);
sk                198 net/tipc/netlink_compat.c 	buf->sk = msg->dst_sk;
sk                339 net/tipc/netlink_compat.c 	doit_buf->sk = msg->dst_sk;
sk               1276 net/tipc/netlink_compat.c 	msg.dst_sk = skb->sk;
sk               1926 net/tipc/node.c 	struct net *net = sock_net(skb->sk);
sk               1981 net/tipc/node.c 	struct net *net = sock_net(skb->sk);
sk               2088 net/tipc/node.c 	struct net *net = sock_net(skb->sk);
sk               2230 net/tipc/node.c 	struct net *net = sock_net(skb->sk);
sk               2298 net/tipc/node.c 	struct net *net = sock_net(skb->sk);
sk               2370 net/tipc/node.c 	struct net *net = sock_net(skb->sk);
sk               2430 net/tipc/node.c 	struct net *net = sock_net(skb->sk);
sk               2451 net/tipc/node.c 	struct net *net = sock_net(skb->sk);
sk               2479 net/tipc/node.c 	struct net *net = sock_net(skb->sk);
sk                 95 net/tipc/socket.c 	struct sock sk;
sk                122 net/tipc/socket.c static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
sk                123 net/tipc/socket.c static void tipc_data_ready(struct sock *sk);
sk                124 net/tipc/socket.c static void tipc_write_space(struct sock *sk);
sk                125 net/tipc/socket.c static void tipc_sock_destruct(struct sock *sk);
sk                195 net/tipc/socket.c static struct tipc_sock *tipc_sk(const struct sock *sk)
sk                197 net/tipc/socket.c 	return container_of(sk, struct tipc_sock, sk);
sk                235 net/tipc/socket.c static void tsk_advance_rx_queue(struct sock *sk)
sk                237 net/tipc/socket.c 	trace_tipc_sk_advance_rx(sk, NULL, TIPC_DUMP_SK_RCVQ, " ");
sk                238 net/tipc/socket.c 	kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
sk                243 net/tipc/socket.c static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err)
sk                247 net/tipc/socket.c 	u32 onode = tipc_own_addr(sock_net(sk));
sk                252 net/tipc/socket.c 	trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_NONE, "@sk_respond!");
sk                255 net/tipc/socket.c 	tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
sk                263 net/tipc/socket.c static void tsk_rej_rx_queue(struct sock *sk)
sk                267 net/tipc/socket.c 	while ((skb = __skb_dequeue(&sk->sk_receive_queue)))
sk                268 net/tipc/socket.c 		tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT);
sk                271 net/tipc/socket.c static bool tipc_sk_connected(struct sock *sk)
sk                273 net/tipc/socket.c 	return sk->sk_state == TIPC_ESTABLISHED;
sk                281 net/tipc/socket.c static bool tipc_sk_type_connectionless(struct sock *sk)
sk                283 net/tipc/socket.c 	return sk->sk_type == SOCK_RDM || sk->sk_type == SOCK_DGRAM;
sk                293 net/tipc/socket.c 	struct sock *sk = &tsk->sk;
sk                294 net/tipc/socket.c 	u32 self = tipc_own_addr(sock_net(sk));
sk                298 net/tipc/socket.c 	if (unlikely(!tipc_sk_connected(sk)))
sk                326 net/tipc/socket.c static int tipc_set_sk_state(struct sock *sk, int state)
sk                328 net/tipc/socket.c 	int oldsk_state = sk->sk_state;
sk                353 net/tipc/socket.c 		sk->sk_state = state;
sk                360 net/tipc/socket.c 	struct sock *sk = sock->sk;
sk                361 net/tipc/socket.c 	int err = sock_error(sk);
sk                367 net/tipc/socket.c 		if (sk->sk_state == TIPC_DISCONNECTING)
sk                369 net/tipc/socket.c 		else if (!tipc_sk_connected(sk))
sk                389 net/tipc/socket.c 		sk_ = (sock_)->sk;					       \
sk                419 net/tipc/socket.c 	struct sock *sk;
sk                443 net/tipc/socket.c 	sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto, kern);
sk                444 net/tipc/socket.c 	if (sk == NULL)
sk                447 net/tipc/socket.c 	tsk = tipc_sk(sk);
sk                455 net/tipc/socket.c 	sock_init_data(sock, sk);
sk                456 net/tipc/socket.c 	tipc_set_sk_state(sk, TIPC_OPEN);
sk                469 net/tipc/socket.c 	timer_setup(&sk->sk_timer, tipc_sk_timeout, 0);
sk                470 net/tipc/socket.c 	sk->sk_shutdown = 0;
sk                471 net/tipc/socket.c 	sk->sk_backlog_rcv = tipc_sk_backlog_rcv;
sk                472 net/tipc/socket.c 	sk->sk_rcvbuf = sysctl_tipc_rmem[1];
sk                473 net/tipc/socket.c 	sk->sk_data_ready = tipc_data_ready;
sk                474 net/tipc/socket.c 	sk->sk_write_space = tipc_write_space;
sk                475 net/tipc/socket.c 	sk->sk_destruct = tipc_sock_destruct;
sk                484 net/tipc/socket.c 	if (tipc_sk_type_connectionless(sk)) {
sk                490 net/tipc/socket.c 	trace_tipc_sk_create(sk, NULL, TIPC_DUMP_NONE, " ");
sk                498 net/tipc/socket.c 	sock_put(&tsk->sk);
sk                504 net/tipc/socket.c 	struct sock *sk = sock->sk;
sk                505 net/tipc/socket.c 	struct tipc_sock *tsk = tipc_sk(sk);
sk                506 net/tipc/socket.c 	struct net *net = sock_net(sk);
sk                516 net/tipc/socket.c 	__skb_queue_purge(&sk->sk_write_queue);
sk                521 net/tipc/socket.c 	while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
sk                526 net/tipc/socket.c 		if (!tipc_sk_type_connectionless(sk) &&
sk                527 net/tipc/socket.c 		    sk->sk_state != TIPC_DISCONNECTING) {
sk                528 net/tipc/socket.c 			tipc_set_sk_state(sk, TIPC_DISCONNECTING);
sk                531 net/tipc/socket.c 		tipc_sk_respond(sk, skb, error);
sk                534 net/tipc/socket.c 	if (tipc_sk_type_connectionless(sk))
sk                537 net/tipc/socket.c 	if (sk->sk_state != TIPC_DISCONNECTING) {
sk                545 net/tipc/socket.c 		tipc_set_sk_state(sk, TIPC_DISCONNECTING);
sk                567 net/tipc/socket.c 	struct sock *sk = sock->sk;
sk                574 net/tipc/socket.c 	if (sk == NULL)
sk                577 net/tipc/socket.c 	tsk = tipc_sk(sk);
sk                578 net/tipc/socket.c 	lock_sock(sk);
sk                580 net/tipc/socket.c 	trace_tipc_sk_release(sk, NULL, TIPC_DUMP_ALL, " ");
sk                582 net/tipc/socket.c 	sk->sk_shutdown = SHUTDOWN_MASK;
sk                586 net/tipc/socket.c 	sk_stop_timer(sk, &sk->sk_timer);
sk                589 net/tipc/socket.c 	sock_orphan(sk);
sk                591 net/tipc/socket.c 	release_sock(sk);
sk                595 net/tipc/socket.c 	sock->sk = NULL;
sk                618 net/tipc/socket.c 	struct sock *sk = sock->sk;
sk                620 net/tipc/socket.c 	struct tipc_sock *tsk = tipc_sk(sk);
sk                623 net/tipc/socket.c 	lock_sock(sk);
sk                659 net/tipc/socket.c 	release_sock(sk);
sk                680 net/tipc/socket.c 	struct sock *sk = sock->sk;
sk                681 net/tipc/socket.c 	struct tipc_sock *tsk = tipc_sk(sk);
sk                685 net/tipc/socket.c 		if ((!tipc_sk_connected(sk)) &&
sk                686 net/tipc/socket.c 		    ((peer != 2) || (sk->sk_state != TIPC_DISCONNECTING)))
sk                692 net/tipc/socket.c 		addr->addr.id.node = tipc_own_addr(sock_net(sk));
sk                724 net/tipc/socket.c 	struct sock *sk = sock->sk;
sk                725 net/tipc/socket.c 	struct tipc_sock *tsk = tipc_sk(sk);
sk                729 net/tipc/socket.c 	trace_tipc_sk_poll(sk, NULL, TIPC_DUMP_ALL, " ");
sk                731 net/tipc/socket.c 	if (sk->sk_shutdown & RCV_SHUTDOWN)
sk                733 net/tipc/socket.c 	if (sk->sk_shutdown == SHUTDOWN_MASK)
sk                736 net/tipc/socket.c 	switch (sk->sk_state) {
sk                743 net/tipc/socket.c 		if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
sk                749 net/tipc/socket.c 		if (!tipc_sk_type_connectionless(sk))
sk                751 net/tipc/socket.c 		if (skb_queue_empty_lockless(&sk->sk_receive_queue))
sk                776 net/tipc/socket.c 	struct sock *sk = sock->sk;
sk                777 net/tipc/socket.c 	struct tipc_sock *tsk = tipc_sk(sk);
sk                779 net/tipc/socket.c 	struct net *net = sock_net(sk);
sk                817 net/tipc/socket.c 		trace_tipc_sk_sendmcast(sk, skb_peek(&pkts),
sk                891 net/tipc/socket.c 	struct sock *sk = sock->sk;
sk                894 net/tipc/socket.c 	struct tipc_sock *tsk = tipc_sk(sk);
sk                895 net/tipc/socket.c 	struct net *net = sock_net(sk);
sk                936 net/tipc/socket.c 	struct sock *sk = sock->sk;
sk                937 net/tipc/socket.c 	struct tipc_sock *tsk = tipc_sk(sk);
sk                943 net/tipc/socket.c 	struct net *net = sock_net(sk);
sk               1021 net/tipc/socket.c 	struct sock *sk = sock->sk;
sk               1022 net/tipc/socket.c 	struct net *net = sock_net(sk);
sk               1023 net/tipc/socket.c 	struct tipc_sock *tsk = tipc_sk(sk);
sk               1094 net/tipc/socket.c 	struct sock *sk = sock->sk;
sk               1096 net/tipc/socket.c 	struct tipc_sock *tsk = tipc_sk(sk);
sk               1099 net/tipc/socket.c 	struct net *net = sock_net(sk);
sk               1222 net/tipc/socket.c 	struct sock *sk = &tsk->sk;
sk               1228 net/tipc/socket.c 		trace_tipc_sk_drop_msg(sk, skb, TIPC_DUMP_NONE, "@proto_rcv!");
sk               1233 net/tipc/socket.c 		tipc_set_sk_state(sk, TIPC_DISCONNECTING);
sk               1234 net/tipc/socket.c 		tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk),
sk               1236 net/tipc/socket.c 		sk->sk_state_change(sk);
sk               1262 net/tipc/socket.c 			sk->sk_write_space(sk);
sk               1286 net/tipc/socket.c 	struct sock *sk = sock->sk;
sk               1289 net/tipc/socket.c 	lock_sock(sk);
sk               1291 net/tipc/socket.c 	release_sock(sk);
sk               1298 net/tipc/socket.c 	struct sock *sk = sock->sk;
sk               1299 net/tipc/socket.c 	struct net *net = sock_net(sk);
sk               1300 net/tipc/socket.c 	struct tipc_sock *tsk = tipc_sk(sk);
sk               1302 net/tipc/socket.c 	long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
sk               1304 net/tipc/socket.c 	bool syn = !tipc_sk_type_connectionless(sk);
sk               1342 net/tipc/socket.c 		if (sk->sk_state == TIPC_LISTEN)
sk               1344 net/tipc/socket.c 		if (sk->sk_state != TIPC_OPEN)
sk               1399 net/tipc/socket.c 	if (unlikely(syn && !tipc_msg_skb_clone(&pkts, &sk->sk_write_queue))) {
sk               1404 net/tipc/socket.c 	trace_tipc_sk_sendmsg(sk, skb_peek(&pkts), TIPC_DUMP_SK_SNDQ, " ");
sk               1413 net/tipc/socket.c 		tipc_set_sk_state(sk, TIPC_CONNECTING);
sk               1431 net/tipc/socket.c 	struct sock *sk = sock->sk;
sk               1434 net/tipc/socket.c 	lock_sock(sk);
sk               1436 net/tipc/socket.c 	release_sock(sk);
sk               1443 net/tipc/socket.c 	struct sock *sk = sock->sk;
sk               1445 net/tipc/socket.c 	long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
sk               1446 net/tipc/socket.c 	struct tipc_sock *tsk = tipc_sk(sk);
sk               1448 net/tipc/socket.c 	struct net *net = sock_net(sk);
sk               1473 net/tipc/socket.c 					 tipc_sk_connected(sk)));
sk               1482 net/tipc/socket.c 		trace_tipc_sk_sendstream(sk, skb_peek(&pkts),
sk               1521 net/tipc/socket.c 	struct sock *sk = &tsk->sk;
sk               1522 net/tipc/socket.c 	struct net *net = sock_net(sk);
sk               1532 net/tipc/socket.c 	sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV);
sk               1533 net/tipc/socket.c 	tipc_set_sk_state(sk, TIPC_ESTABLISHED);
sk               1537 net/tipc/socket.c 	__skb_queue_purge(&sk->sk_write_queue);
sk               1660 net/tipc/socket.c 	struct sock *sk = &tsk->sk;
sk               1661 net/tipc/socket.c 	struct net *net = sock_net(sk);
sk               1667 net/tipc/socket.c 	if (!tipc_sk_connected(sk))
sk               1680 net/tipc/socket.c 		tsk->rcv_win = tsk_adv_blocks(tsk->sk.sk_rcvbuf);
sk               1688 net/tipc/socket.c 	struct sock *sk = sock->sk;
sk               1691 net/tipc/socket.c 	int err = sock_error(sk);
sk               1697 net/tipc/socket.c 		if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
sk               1698 net/tipc/socket.c 			if (sk->sk_shutdown & RCV_SHUTDOWN) {
sk               1702 net/tipc/socket.c 			add_wait_queue(sk_sleep(sk), &wait);
sk               1703 net/tipc/socket.c 			release_sock(sk);
sk               1706 net/tipc/socket.c 			lock_sock(sk);
sk               1707 net/tipc/socket.c 			remove_wait_queue(sk_sleep(sk), &wait);
sk               1710 net/tipc/socket.c 		if (!skb_queue_empty(&sk->sk_receive_queue))
sk               1719 net/tipc/socket.c 		err = sock_error(sk);
sk               1741 net/tipc/socket.c 	struct sock *sk = sock->sk;
sk               1742 net/tipc/socket.c 	bool connected = !tipc_sk_type_connectionless(sk);
sk               1743 net/tipc/socket.c 	struct tipc_sock *tsk = tipc_sk(sk);
sk               1755 net/tipc/socket.c 	lock_sock(sk);
sk               1756 net/tipc/socket.c 	if (unlikely(connected && sk->sk_state == TIPC_OPEN)) {
sk               1760 net/tipc/socket.c 	timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
sk               1767 net/tipc/socket.c 		skb = skb_peek(&sk->sk_receive_queue);
sk               1775 net/tipc/socket.c 		tsk_advance_rx_queue(sk);
sk               1818 net/tipc/socket.c 		tipc_node_distr_xmit(sock_net(sk), &xmitq);
sk               1821 net/tipc/socket.c 	tsk_advance_rx_queue(sk);
sk               1831 net/tipc/socket.c 	release_sock(sk);
sk               1849 net/tipc/socket.c 	struct sock *sk = sock->sk;
sk               1850 net/tipc/socket.c 	struct tipc_sock *tsk = tipc_sk(sk);
sk               1863 net/tipc/socket.c 	lock_sock(sk);
sk               1865 net/tipc/socket.c 	if (unlikely(sk->sk_state == TIPC_OPEN)) {
sk               1869 net/tipc/socket.c 	required = sock_rcvlowat(sk, flags & MSG_WAITALL, buflen);
sk               1870 net/tipc/socket.c 	timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
sk               1877 net/tipc/socket.c 		skb = skb_peek(&sk->sk_receive_queue);
sk               1886 net/tipc/socket.c 			tsk_advance_rx_queue(sk);
sk               1924 net/tipc/socket.c 		tsk_advance_rx_queue(sk);
sk               1935 net/tipc/socket.c 	} while (!skb_queue_empty(&sk->sk_receive_queue) || copied < required);
sk               1937 net/tipc/socket.c 	release_sock(sk);
sk               1945 net/tipc/socket.c static void tipc_write_space(struct sock *sk)
sk               1950 net/tipc/socket.c 	wq = rcu_dereference(sk->sk_wq);
sk               1962 net/tipc/socket.c static void tipc_data_ready(struct sock *sk)
sk               1967 net/tipc/socket.c 	wq = rcu_dereference(sk->sk_wq);
sk               1974 net/tipc/socket.c static void tipc_sock_destruct(struct sock *sk)
sk               1976 net/tipc/socket.c 	__skb_queue_purge(&sk->sk_receive_queue);
sk               1979 net/tipc/socket.c static void tipc_sk_proto_rcv(struct sock *sk,
sk               1984 net/tipc/socket.c 	struct tipc_sock *tsk = tipc_sk(sk);
sk               2004 net/tipc/socket.c 		tipc_group_member_evt(tsk->group, &wakeup, &sk->sk_rcvbuf,
sk               2012 net/tipc/socket.c 		sk->sk_write_space(sk);
sk               2025 net/tipc/socket.c 	struct sock *sk = &tsk->sk;
sk               2026 net/tipc/socket.c 	struct net *net = sock_net(sk);
sk               2039 net/tipc/socket.c 	switch (sk->sk_state) {
sk               2051 net/tipc/socket.c 			sk->sk_state_change(sk);
sk               2064 net/tipc/socket.c 		if (skb_queue_empty(&sk->sk_write_queue))
sk               2069 net/tipc/socket.c 		sk_reset_timer(sk, &sk->sk_timer, jiffies + delay);
sk               2090 net/tipc/socket.c 		tipc_set_sk_state(sk, TIPC_DISCONNECTING);
sk               2092 net/tipc/socket.c 		sk->sk_state_change(sk);
sk               2095 net/tipc/socket.c 		pr_err("Unknown sk_state %u\n", sk->sk_state);
sk               2098 net/tipc/socket.c 	tipc_set_sk_state(sk, TIPC_DISCONNECTING);
sk               2099 net/tipc/socket.c 	sk->sk_err = ECONNREFUSED;
sk               2100 net/tipc/socket.c 	sk->sk_state_change(sk);
sk               2122 net/tipc/socket.c static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *skb)
sk               2124 net/tipc/socket.c 	struct tipc_sock *tsk = tipc_sk(sk);
sk               2128 net/tipc/socket.c 		return READ_ONCE(sk->sk_rcvbuf);
sk               2131 net/tipc/socket.c 		return READ_ONCE(sk->sk_rcvbuf) << msg_importance(hdr);
sk               2134 net/tipc/socket.c 		return READ_ONCE(sk->sk_rcvbuf);
sk               2150 net/tipc/socket.c static void tipc_sk_filter_rcv(struct sock *sk, struct sk_buff *skb,
sk               2153 net/tipc/socket.c 	bool sk_conn = !tipc_sk_type_connectionless(sk);
sk               2154 net/tipc/socket.c 	struct tipc_sock *tsk = tipc_sk(sk);
sk               2157 net/tipc/socket.c 	struct net *net = sock_net(sk);
sk               2162 net/tipc/socket.c 	trace_tipc_sk_filter_rcv(sk, skb, TIPC_DUMP_ALL, " ");
sk               2168 net/tipc/socket.c 		tipc_sk_proto_rcv(sk, &inputq, xmitq);
sk               2179 net/tipc/socket.c 		limit = rcvbuf_limit(sk, skb);
sk               2184 net/tipc/socket.c 		else if (sk_rmem_alloc_get(sk) + skb->truesize >= limit) {
sk               2185 net/tipc/socket.c 			trace_tipc_sk_dump(sk, skb, TIPC_DUMP_ALL,
sk               2187 net/tipc/socket.c 			atomic_inc(&sk->sk_drops);
sk               2193 net/tipc/socket.c 				trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_NONE,
sk               2200 net/tipc/socket.c 		__skb_queue_tail(&sk->sk_receive_queue, skb);
sk               2201 net/tipc/socket.c 		skb_set_owner_r(skb, sk);
sk               2202 net/tipc/socket.c 		trace_tipc_sk_overlimit2(sk, skb, TIPC_DUMP_ALL,
sk               2204 net/tipc/socket.c 		sk->sk_data_ready(sk);
sk               2215 net/tipc/socket.c static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
sk               2217 net/tipc/socket.c 	unsigned int before = sk_rmem_alloc_get(sk);
sk               2223 net/tipc/socket.c 	tipc_sk_filter_rcv(sk, skb, &xmitq);
sk               2224 net/tipc/socket.c 	added = sk_rmem_alloc_get(sk) - before;
sk               2225 net/tipc/socket.c 	atomic_add(added, &tipc_sk(sk)->dupl_rcvcnt);
sk               2228 net/tipc/socket.c 	tipc_node_distr_xmit(sock_net(sk), &xmitq);
sk               2241 net/tipc/socket.c static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
sk               2259 net/tipc/socket.c 		if (!sock_owned_by_user(sk)) {
sk               2260 net/tipc/socket.c 			tipc_sk_filter_rcv(sk, skb, xmitq);
sk               2265 net/tipc/socket.c 		dcnt = &tipc_sk(sk)->dupl_rcvcnt;
sk               2266 net/tipc/socket.c 		if (!sk->sk_backlog.len)
sk               2268 net/tipc/socket.c 		lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
sk               2269 net/tipc/socket.c 		if (likely(!sk_add_backlog(sk, skb, lim))) {
sk               2270 net/tipc/socket.c 			trace_tipc_sk_overlimit1(sk, skb, TIPC_DUMP_ALL,
sk               2275 net/tipc/socket.c 		trace_tipc_sk_dump(sk, skb, TIPC_DUMP_ALL, "err_overload!");
sk               2277 net/tipc/socket.c 		onode = tipc_own_addr(sock_net(sk));
sk               2278 net/tipc/socket.c 		atomic_inc(&sk->sk_drops);
sk               2280 net/tipc/socket.c 			trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_ALL,
sk               2300 net/tipc/socket.c 	struct sock *sk;
sk               2309 net/tipc/socket.c 			sk = &tsk->sk;
sk               2310 net/tipc/socket.c 			if (likely(spin_trylock_bh(&sk->sk_lock.slock))) {
sk               2311 net/tipc/socket.c 				tipc_sk_enqueue(inputq, sk, dport, &xmitq);
sk               2312 net/tipc/socket.c 				spin_unlock_bh(&sk->sk_lock.slock);
sk               2315 net/tipc/socket.c 			tipc_node_distr_xmit(sock_net(sk), &xmitq);
sk               2316 net/tipc/socket.c 			sock_put(sk);
sk               2343 net/tipc/socket.c 	struct sock *sk = sock->sk;
sk               2347 net/tipc/socket.c 		int err = sock_error(sk);
sk               2355 net/tipc/socket.c 		add_wait_queue(sk_sleep(sk), &wait);
sk               2356 net/tipc/socket.c 		done = sk_wait_event(sk, timeo_p,
sk               2357 net/tipc/socket.c 				     sk->sk_state != TIPC_CONNECTING, &wait);
sk               2358 net/tipc/socket.c 		remove_wait_queue(sk_sleep(sk), &wait);
sk               2385 net/tipc/socket.c 	struct sock *sk = sock->sk;
sk               2386 net/tipc/socket.c 	struct tipc_sock *tsk = tipc_sk(sk);
sk               2396 net/tipc/socket.c 	lock_sock(sk);
sk               2405 net/tipc/socket.c 		if (!tipc_sk_type_connectionless(sk))
sk               2414 net/tipc/socket.c 	if (tipc_sk_type_connectionless(sk)) {
sk               2422 net/tipc/socket.c 	previous = sk->sk_state;
sk               2424 net/tipc/socket.c 	switch (sk->sk_state) {
sk               2464 net/tipc/socket.c 	release_sock(sk);
sk               2477 net/tipc/socket.c 	struct sock *sk = sock->sk;
sk               2480 net/tipc/socket.c 	lock_sock(sk);
sk               2481 net/tipc/socket.c 	res = tipc_set_sk_state(sk, TIPC_LISTEN);
sk               2482 net/tipc/socket.c 	release_sock(sk);
sk               2489 net/tipc/socket.c 	struct sock *sk = sock->sk;
sk               2499 net/tipc/socket.c 		prepare_to_wait_exclusive(sk_sleep(sk), &wait,
sk               2501 net/tipc/socket.c 		if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
sk               2502 net/tipc/socket.c 			release_sock(sk);
sk               2504 net/tipc/socket.c 			lock_sock(sk);
sk               2507 net/tipc/socket.c 		if (!skb_queue_empty(&sk->sk_receive_queue))
sk               2516 net/tipc/socket.c 	finish_wait(sk_sleep(sk), &wait);
sk               2531 net/tipc/socket.c 	struct sock *new_sk, *sk = sock->sk;
sk               2538 net/tipc/socket.c 	lock_sock(sk);
sk               2540 net/tipc/socket.c 	if (sk->sk_state != TIPC_LISTEN) {
sk               2544 net/tipc/socket.c 	timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
sk               2549 net/tipc/socket.c 	buf = skb_peek(&sk->sk_receive_queue);
sk               2551 net/tipc/socket.c 	res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, kern);
sk               2554 net/tipc/socket.c 	security_sk_clone(sock->sk, new_sock->sk);
sk               2556 net/tipc/socket.c 	new_sk = new_sock->sk;
sk               2585 net/tipc/socket.c 		tsk_advance_rx_queue(sk);
sk               2588 net/tipc/socket.c 		__skb_dequeue(&sk->sk_receive_queue);
sk               2594 net/tipc/socket.c 	release_sock(sk);
sk               2609 net/tipc/socket.c 	struct sock *sk = sock->sk;
sk               2615 net/tipc/socket.c 	lock_sock(sk);
sk               2617 net/tipc/socket.c 	trace_tipc_sk_shutdown(sk, NULL, TIPC_DUMP_ALL, " ");
sk               2619 net/tipc/socket.c 	sk->sk_shutdown = SEND_SHUTDOWN;
sk               2621 net/tipc/socket.c 	if (sk->sk_state == TIPC_DISCONNECTING) {
sk               2623 net/tipc/socket.c 		__skb_queue_purge(&sk->sk_receive_queue);
sk               2626 net/tipc/socket.c 		sk->sk_state_change(sk);
sk               2632 net/tipc/socket.c 	release_sock(sk);
sk               2636 net/tipc/socket.c static void tipc_sk_check_probing_state(struct sock *sk,
sk               2639 net/tipc/socket.c 	struct tipc_sock *tsk = tipc_sk(sk);
sk               2647 net/tipc/socket.c 		tipc_set_sk_state(sk, TIPC_DISCONNECTING);
sk               2648 net/tipc/socket.c 		sk->sk_err = ECONNABORTED;
sk               2649 net/tipc/socket.c 		tipc_node_remove_conn(sock_net(sk), pnode, pport);
sk               2650 net/tipc/socket.c 		sk->sk_state_change(sk);
sk               2659 net/tipc/socket.c 	sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV);
sk               2662 net/tipc/socket.c static void tipc_sk_retry_connect(struct sock *sk, struct sk_buff_head *list)
sk               2664 net/tipc/socket.c 	struct tipc_sock *tsk = tipc_sk(sk);
sk               2668 net/tipc/socket.c 		sk_reset_timer(sk, &sk->sk_timer, msecs_to_jiffies(100));
sk               2672 net/tipc/socket.c 	tipc_msg_skb_clone(&sk->sk_write_queue, list);
sk               2677 net/tipc/socket.c 	struct sock *sk = from_timer(sk, t, sk_timer);
sk               2678 net/tipc/socket.c 	struct tipc_sock *tsk = tipc_sk(sk);
sk               2684 net/tipc/socket.c 	bh_lock_sock(sk);
sk               2687 net/tipc/socket.c 	if (sock_owned_by_user(sk)) {
sk               2688 net/tipc/socket.c 		sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ / 20);
sk               2689 net/tipc/socket.c 		bh_unlock_sock(sk);
sk               2690 net/tipc/socket.c 		sock_put(sk);
sk               2694 net/tipc/socket.c 	if (sk->sk_state == TIPC_ESTABLISHED)
sk               2695 net/tipc/socket.c 		tipc_sk_check_probing_state(sk, &list);
sk               2696 net/tipc/socket.c 	else if (sk->sk_state == TIPC_CONNECTING)
sk               2697 net/tipc/socket.c 		tipc_sk_retry_connect(sk, &list);
sk               2699 net/tipc/socket.c 	bh_unlock_sock(sk);
sk               2702 net/tipc/socket.c 		rc = tipc_node_xmit(sock_net(sk), &list, pnode, tsk->portid);
sk               2709 net/tipc/socket.c 	sock_put(sk);
sk               2715 net/tipc/socket.c 	struct sock *sk = &tsk->sk;
sk               2716 net/tipc/socket.c 	struct net *net = sock_net(sk);
sk               2723 net/tipc/socket.c 	if (tipc_sk_connected(sk))
sk               2743 net/tipc/socket.c 	struct net *net = sock_net(&tsk->sk);
sk               2791 net/tipc/socket.c 			sock_hold(&tsk->sk);
sk               2793 net/tipc/socket.c 			lock_sock(&tsk->sk);
sk               2797 net/tipc/socket.c 			release_sock(&tsk->sk);
sk               2799 net/tipc/socket.c 			sock_put(&tsk->sk);
sk               2816 net/tipc/socket.c 		sock_hold(&tsk->sk);
sk               2824 net/tipc/socket.c 	struct sock *sk = &tsk->sk;
sk               2825 net/tipc/socket.c 	struct net *net = sock_net(sk);
sk               2835 net/tipc/socket.c 		sock_hold(&tsk->sk);
sk               2839 net/tipc/socket.c 		sock_put(&tsk->sk);
sk               2847 net/tipc/socket.c 	struct sock *sk = &tsk->sk;
sk               2848 net/tipc/socket.c 	struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id);
sk               2851 net/tipc/socket.c 		WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
sk               2852 net/tipc/socket.c 		__sock_put(sk);
sk               2885 net/tipc/socket.c 	struct net *net = sock_net(&tsk->sk);
sk               2917 net/tipc/socket.c 	tipc_group_join(net, grp, &tsk->sk.sk_rcvbuf);
sk               2923 net/tipc/socket.c 	struct net *net = sock_net(&tsk->sk);
sk               2953 net/tipc/socket.c 	struct sock *sk = sock->sk;
sk               2954 net/tipc/socket.c 	struct tipc_sock *tsk = tipc_sk(sk);
sk               2985 net/tipc/socket.c 	lock_sock(sk);
sk               3001 net/tipc/socket.c 		tipc_sk(sk)->conn_timeout = value;
sk               3021 net/tipc/socket.c 	release_sock(sk);
sk               3042 net/tipc/socket.c 	struct sock *sk = sock->sk;
sk               3043 net/tipc/socket.c 	struct tipc_sock *tsk = tipc_sk(sk);
sk               3057 net/tipc/socket.c 	lock_sock(sk);
sk               3077 net/tipc/socket.c 		value = skb_queue_len(&sk->sk_receive_queue);
sk               3080 net/tipc/socket.c 		value = sk_rmem_alloc_get(sk);
sk               3092 net/tipc/socket.c 	release_sock(sk);
sk               3108 net/tipc/socket.c 	struct net *net = sock_net(sock->sk);
sk               3140 net/tipc/socket.c 	struct tipc_sock *tsk2 = tipc_sk(sock2->sk);
sk               3141 net/tipc/socket.c 	struct tipc_sock *tsk1 = tipc_sk(sock1->sk);
sk               3142 net/tipc/socket.c 	u32 onode = tipc_own_addr(sock_net(sock1->sk));
sk               3312 net/tipc/socket.c 	struct net *net = sock_net(skb->sk);
sk               3313 net/tipc/socket.c 	struct sock *sk = &tsk->sk;
sk               3319 net/tipc/socket.c 	if (tipc_sk_connected(sk)) {
sk               3381 net/tipc/socket.c 		sock_hold(&tsk->sk);
sk               3383 net/tipc/socket.c 		lock_sock(&tsk->sk);
sk               3386 net/tipc/socket.c 			release_sock(&tsk->sk);
sk               3387 net/tipc/socket.c 			sock_put(&tsk->sk);
sk               3390 net/tipc/socket.c 		release_sock(&tsk->sk);
sk               3392 net/tipc/socket.c 		sock_put(&tsk->sk);
sk               3402 net/tipc/socket.c 	return __tipc_dump_start(cb, sock_net(cb->skb->sk));
sk               3436 net/tipc/socket.c 			   u64 (*tipc_diag_gen_cookie)(struct sock *sk))
sk               3438 net/tipc/socket.c 	struct sock *sk = &tsk->sk;
sk               3443 net/tipc/socket.c 	if (!(sk_filter_state & (1 << sk->sk_state)))
sk               3453 net/tipc/socket.c 	if (nla_put_u32(skb, TIPC_NLA_SOCK_TYPE, (u32)sk->sk_type) ||
sk               3454 net/tipc/socket.c 	    nla_put_u32(skb, TIPC_NLA_SOCK_TIPC_STATE, (u32)sk->sk_state) ||
sk               3455 net/tipc/socket.c 	    nla_put_u32(skb, TIPC_NLA_SOCK_INO, sock_i_ino(sk)) ||
sk               3457 net/tipc/socket.c 			from_kuid_munged(sk_user_ns(NETLINK_CB(cb->skb).sk),
sk               3458 net/tipc/socket.c 					 sock_i_uid(sk))) ||
sk               3460 net/tipc/socket.c 			      tipc_diag_gen_cookie(sk),
sk               3469 net/tipc/socket.c 			skb_queue_len(&sk->sk_receive_queue)) ||
sk               3471 net/tipc/socket.c 			skb_queue_len(&sk->sk_write_queue)) ||
sk               3473 net/tipc/socket.c 			atomic_read(&sk->sk_drops)))
sk               3594 net/tipc/socket.c 	struct net *net = sock_net(skb->sk);
sk               3627 net/tipc/socket.c 	lock_sock(&tsk->sk);
sk               3631 net/tipc/socket.c 	release_sock(&tsk->sk);
sk               3632 net/tipc/socket.c 	sock_put(&tsk->sk);
sk               3651 net/tipc/socket.c bool tipc_sk_filtering(struct sock *sk)
sk               3658 net/tipc/socket.c 	if (!sk)
sk               3661 net/tipc/socket.c 	tsk = tipc_sk(sk);
sk               3675 net/tipc/socket.c 	if (_sktype && _sktype != sk->sk_type)
sk               3688 net/tipc/socket.c 	if (!tipc_sk_type_connectionless(sk)) {
sk               3701 net/tipc/socket.c u32 tipc_sock_get_portid(struct sock *sk)
sk               3703 net/tipc/socket.c 	return (sk) ? (tipc_sk(sk))->portid : 0;
sk               3715 net/tipc/socket.c bool tipc_sk_overlimit1(struct sock *sk, struct sk_buff *skb)
sk               3717 net/tipc/socket.c 	atomic_t *dcnt = &tipc_sk(sk)->dupl_rcvcnt;
sk               3718 net/tipc/socket.c 	unsigned int lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
sk               3719 net/tipc/socket.c 	unsigned int qsize = sk->sk_backlog.len + sk_rmem_alloc_get(sk);
sk               3733 net/tipc/socket.c bool tipc_sk_overlimit2(struct sock *sk, struct sk_buff *skb)
sk               3735 net/tipc/socket.c 	unsigned int lim = rcvbuf_limit(sk, skb);
sk               3736 net/tipc/socket.c 	unsigned int qsize = sk_rmem_alloc_get(sk);
sk               3752 net/tipc/socket.c int tipc_sk_dump(struct sock *sk, u16 dqueues, char *buf)
sk               3760 net/tipc/socket.c 	if (!sk) {
sk               3765 net/tipc/socket.c 	tsk = tipc_sk(sk);
sk               3766 net/tipc/socket.c 	tsk_connected = !tipc_sk_type_connectionless(sk);
sk               3768 net/tipc/socket.c 	i += scnprintf(buf, sz, "sk data: %u", sk->sk_type);
sk               3769 net/tipc/socket.c 	i += scnprintf(buf + i, sz - i, " %d", sk->sk_state);
sk               3795 net/tipc/socket.c 	i += scnprintf(buf + i, sz - i, " %u", sk->sk_shutdown);
sk               3796 net/tipc/socket.c 	i += scnprintf(buf + i, sz - i, " | %d", sk_wmem_alloc_get(sk));
sk               3797 net/tipc/socket.c 	i += scnprintf(buf + i, sz - i, " %d", sk->sk_sndbuf);
sk               3798 net/tipc/socket.c 	i += scnprintf(buf + i, sz - i, " | %d", sk_rmem_alloc_get(sk));
sk               3799 net/tipc/socket.c 	i += scnprintf(buf + i, sz - i, " %d", sk->sk_rcvbuf);
sk               3800 net/tipc/socket.c 	i += scnprintf(buf + i, sz - i, " | %d\n", READ_ONCE(sk->sk_backlog.len));
sk               3804 net/tipc/socket.c 		i += tipc_list_dump(&sk->sk_write_queue, false, buf + i);
sk               3809 net/tipc/socket.c 		i += tipc_list_dump(&sk->sk_receive_queue, false, buf + i);
sk               3814 net/tipc/socket.c 		i += tipc_skb_dump(sk->sk_backlog.head, false, buf + i);
sk               3815 net/tipc/socket.c 		if (sk->sk_backlog.tail != sk->sk_backlog.head) {
sk               3817 net/tipc/socket.c 			i += tipc_skb_dump(sk->sk_backlog.tail, false,
sk                 66 net/tipc/socket.h 			   u64 (*tipc_diag_gen_cookie)(struct sock *sk));
sk                 74 net/tipc/socket.h u32 tipc_sock_get_portid(struct sock *sk);
sk                 75 net/tipc/socket.h bool tipc_sk_overlimit1(struct sock *sk, struct sk_buff *skb);
sk                 76 net/tipc/socket.h bool tipc_sk_overlimit2(struct sock *sk, struct sk_buff *skb);
sk                158 net/tipc/topsrv.c 	struct sock *sk = con->sock->sk;
sk                161 net/tipc/topsrv.c 	write_lock_bh(&sk->sk_callback_lock);
sk                165 net/tipc/topsrv.c 		sk->sk_user_data = NULL;
sk                168 net/tipc/topsrv.c 	write_unlock_bh(&sk->sk_callback_lock);
sk                345 net/tipc/topsrv.c static void tipc_conn_write_space(struct sock *sk)
sk                349 net/tipc/topsrv.c 	read_lock_bh(&sk->sk_callback_lock);
sk                350 net/tipc/topsrv.c 	con = sk->sk_user_data;
sk                356 net/tipc/topsrv.c 	read_unlock_bh(&sk->sk_callback_lock);
sk                388 net/tipc/topsrv.c 	struct sock *sk = con->sock->sk;
sk                402 net/tipc/topsrv.c 		read_lock_bh(&sk->sk_callback_lock);
sk                404 net/tipc/topsrv.c 		read_unlock_bh(&sk->sk_callback_lock);
sk                434 net/tipc/topsrv.c static void tipc_conn_data_ready(struct sock *sk)
sk                438 net/tipc/topsrv.c 	read_lock_bh(&sk->sk_callback_lock);
sk                439 net/tipc/topsrv.c 	con = sk->sk_user_data;
sk                445 net/tipc/topsrv.c 	read_unlock_bh(&sk->sk_callback_lock);
sk                468 net/tipc/topsrv.c 		newsk = newsock->sk;
sk                484 net/tipc/topsrv.c static void tipc_topsrv_listener_data_ready(struct sock *sk)
sk                488 net/tipc/topsrv.c 	read_lock_bh(&sk->sk_callback_lock);
sk                489 net/tipc/topsrv.c 	srv = sk->sk_user_data;
sk                492 net/tipc/topsrv.c 	read_unlock_bh(&sk->sk_callback_lock);
sk                500 net/tipc/topsrv.c 	struct sock *sk;
sk                508 net/tipc/topsrv.c 	sk = lsock->sk;
sk                509 net/tipc/topsrv.c 	write_lock_bh(&sk->sk_callback_lock);
sk                510 net/tipc/topsrv.c 	sk->sk_data_ready = tipc_topsrv_listener_data_ready;
sk                511 net/tipc/topsrv.c 	sk->sk_user_data = srv;
sk                512 net/tipc/topsrv.c 	write_unlock_bh(&sk->sk_callback_lock);
sk                549 net/tipc/topsrv.c 	module_put(sk->sk_prot_creator->owner);
sk                691 net/tipc/topsrv.c 	__module_get(lsock->sk->sk_prot_creator->owner);
sk                139 net/tipc/trace.c 	i += scnprintf(buf + i, sz - i, " %p", skb->sk);
sk                131 net/tipc/trace.h int tipc_sk_dump(struct sock *sk, u16 dqueues, char *buf);
sk                134 net/tipc/trace.h bool tipc_sk_filtering(struct sock *sk);
sk                190 net/tipc/trace.h 	TP_PROTO(struct sock *sk, struct sk_buff *skb, u16 dqueues,
sk                193 net/tipc/trace.h 	TP_ARGS(sk, skb, dqueues, header),
sk                204 net/tipc/trace.h 		__entry->portid = tipc_sock_get_portid(sk);
sk                205 net/tipc/trace.h 		tipc_sk_dump(sk, dqueues, __get_str(buf));
sk                218 net/tipc/trace.h 	TP_PROTO(struct sock *sk, struct sk_buff *skb, u16 dqueues, \
sk                220 net/tipc/trace.h 	TP_ARGS(sk, skb, dqueues, header), \
sk                221 net/tipc/trace.h 	TP_CONDITION(tipc_sk_filtering(sk)))
sk                237 net/tipc/trace.h 	TP_PROTO(struct sock *sk, struct sk_buff *skb, u16 dqueues, \
sk                239 net/tipc/trace.h 	TP_ARGS(sk, skb, dqueues, header), \
sk                240 net/tipc/trace.h 	TP_CONDITION(tipc_sk_filtering(sk) && (cond)))
sk                241 net/tipc/trace.h DEFINE_SK_EVENT_FILTER_COND(tipc_sk_overlimit1, tipc_sk_overlimit1(sk, skb));
sk                242 net/tipc/trace.h DEFINE_SK_EVENT_FILTER_COND(tipc_sk_overlimit2, tipc_sk_overlimit2(sk, skb));
sk                188 net/tipc/udp_media.c 		udp_tunnel_xmit_skb(rt, ub->ubsock->sk, skb, src->ipv4.s_addr,
sk                201 net/tipc/udp_media.c 							       ub->ubsock->sk,
sk                210 net/tipc/udp_media.c 		err = udp_tunnel6_xmit_skb(ndst, ub->ubsock->sk, skb, NULL,
sk                362 net/tipc/udp_media.c static int tipc_udp_recv(struct sock *sk, struct sk_buff *skb)
sk                369 net/tipc/udp_media.c 	ub = rcu_dereference_sk_user_data(sk);
sk                382 net/tipc/udp_media.c 		tipc_rcv(sock_net(sk), skb, b);
sk                401 net/tipc/udp_media.c 	struct sock *sk = ub->ubsock->sk;
sk                406 net/tipc/udp_media.c 		err = ip_mc_join_group(sk, &mreqn);
sk                409 net/tipc/udp_media.c 		err = ipv6_stub->ipv6_sock_mc_join(sk, ub->ifindex,
sk                458 net/tipc/udp_media.c 		struct net *net = sock_net(skb->sk);
sk                489 net/tipc/udp_media.c 		struct net *net = sock_net(skb->sk);
sk                821 net/tipc/udp_media.c 	sock_set_flag(ub->ubsock->sk, SOCK_DEAD);
sk                108 net/tls/tls_device.c static struct net_device *get_netdev_for_sock(struct sock *sk)
sk                110 net/tls/tls_device.c 	struct dst_entry *dst = sk_dst_get(sk);
sk                144 net/tls/tls_device.c static void tls_icsk_clean_acked(struct sock *sk, u32 acked_seq)
sk                146 net/tls/tls_device.c 	struct tls_context *tls_ctx = tls_get_ctx(sk);
sk                179 net/tls/tls_device.c static void tls_device_sk_destruct(struct sock *sk)
sk                181 net/tls/tls_device.c 	struct tls_context *tls_ctx = tls_get_ctx(sk);
sk                184 net/tls/tls_device.c 	tls_ctx->sk_destruct(sk);
sk                191 net/tls/tls_device.c 		clean_acked_data_disable(inet_csk(sk));
sk                198 net/tls/tls_device.c void tls_device_free_resources_tx(struct sock *sk)
sk                200 net/tls/tls_device.c 	struct tls_context *tls_ctx = tls_get_ctx(sk);
sk                202 net/tls/tls_device.c 	tls_free_partial_record(sk, tls_ctx);
sk                205 net/tls/tls_device.c static void tls_device_resync_tx(struct sock *sk, struct tls_context *tls_ctx,
sk                213 net/tls/tls_device.c 	skb = tcp_write_queue_tail(sk);
sk                222 net/tls/tls_device.c 		err = netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq,
sk                255 net/tls/tls_device.c static int tls_push_record(struct sock *sk,
sk                262 net/tls/tls_device.c 	struct tcp_sock *tp = tcp_sk(sk);
sk                271 net/tls/tls_device.c 		tls_device_resync_tx(sk, ctx, tp->write_seq);
sk                273 net/tls/tls_device.c 	tls_advance_record_sn(sk, prot, &ctx->tx);
sk                280 net/tls/tls_device.c 		sk_mem_charge(sk, skb_frag_size(frag));
sk                286 net/tls/tls_device.c 	return tls_push_sg(sk, ctx, offload_ctx->sg_tx_data, 0, flags);
sk                289 net/tls/tls_device.c static int tls_device_record_close(struct sock *sk,
sk                305 net/tls/tls_device.c 					sk->sk_allocation))) {
sk                346 net/tls/tls_device.c static int tls_do_allocation(struct sock *sk,
sk                355 net/tls/tls_device.c 						   sk->sk_allocation))) {
sk                356 net/tls/tls_device.c 			sk->sk_prot->enter_memory_pressure(sk);
sk                357 net/tls/tls_device.c 			sk_stream_moderate_sndbuf(sk);
sk                369 net/tls/tls_device.c 	if (!sk_page_frag_refill(sk, pfrag))
sk                400 net/tls/tls_device.c static int tls_push_data(struct sock *sk,
sk                405 net/tls/tls_device.c 	struct tls_context *tls_ctx = tls_get_ctx(sk);
sk                422 net/tls/tls_device.c 	if (sk->sk_err)
sk                423 net/tls/tls_device.c 		return -sk->sk_err;
sk                428 net/tls/tls_device.c 	timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
sk                430 net/tls/tls_device.c 		rc = tls_push_partial_record(sk, tls_ctx, flags);
sk                435 net/tls/tls_device.c 	pfrag = sk_page_frag(sk);
sk                443 net/tls/tls_device.c 		rc = tls_do_allocation(sk, ctx, pfrag,
sk                446 net/tls/tls_device.c 			rc = sk_stream_wait_memory(sk, &timeo);
sk                494 net/tls/tls_device.c 			rc = tls_device_record_close(sk, tls_ctx, record,
sk                507 net/tls/tls_device.c 			rc = tls_push_record(sk,
sk                523 net/tls/tls_device.c int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
sk                526 net/tls/tls_device.c 	struct tls_context *tls_ctx = tls_get_ctx(sk);
sk                530 net/tls/tls_device.c 	lock_sock(sk);
sk                533 net/tls/tls_device.c 		rc = tls_proccess_cmsg(sk, msg, &record_type);
sk                538 net/tls/tls_device.c 	rc = tls_push_data(sk, &msg->msg_iter, size,
sk                542 net/tls/tls_device.c 	release_sock(sk);
sk                547 net/tls/tls_device.c int tls_device_sendpage(struct sock *sk, struct page *page,
sk                550 net/tls/tls_device.c 	struct tls_context *tls_ctx = tls_get_ctx(sk);
sk                560 net/tls/tls_device.c 	lock_sock(sk);
sk                570 net/tls/tls_device.c 	rc = tls_push_data(sk, &msg_iter, size,
sk                575 net/tls/tls_device.c 	release_sock(sk);
sk                640 net/tls/tls_device.c static int tls_device_push_pending_record(struct sock *sk, int flags)
sk                645 net/tls/tls_device.c 	return tls_push_data(sk, &msg_iter, 0, flags, TLS_RECORD_TYPE_DATA);
sk                648 net/tls/tls_device.c void tls_device_write_space(struct sock *sk, struct tls_context *ctx)
sk                651 net/tls/tls_device.c 		gfp_t sk_allocation = sk->sk_allocation;
sk                653 net/tls/tls_device.c 		WARN_ON_ONCE(sk->sk_write_pending);
sk                655 net/tls/tls_device.c 		sk->sk_allocation = GFP_ATOMIC;
sk                656 net/tls/tls_device.c 		tls_push_partial_record(sk, ctx,
sk                659 net/tls/tls_device.c 		sk->sk_allocation = sk_allocation;
sk                664 net/tls/tls_device.c 				 struct sock *sk, u32 seq, u8 *rcd_sn)
sk                672 net/tls/tls_device.c 		netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq, rcd_sn,
sk                677 net/tls/tls_device.c void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq)
sk                679 net/tls/tls_device.c 	struct tls_context *tls_ctx = tls_get_ctx(sk);
sk                712 net/tls/tls_device.c 		if (tcp_inq(sk) > rcd_len)
sk                721 net/tls/tls_device.c 	tls_device_resync_rx(tls_ctx, sk, seq, rcd_sn);
sk                726 net/tls/tls_device.c 					   struct sock *sk, struct sk_buff *skb)
sk                756 net/tls/tls_device.c 	if (tcp_inq(sk) > rxm->full_len) {
sk                765 net/tls/tls_device.c 		tls_device_resync_rx(tls_ctx, sk, tcp_sk(sk)->copied_seq,
sk                770 net/tls/tls_device.c static int tls_device_reencrypt(struct sock *sk, struct sk_buff *skb)
sk                779 net/tls/tls_device.c 			   TLS_CIPHER_AES_GCM_128_IV_SIZE, sk->sk_allocation);
sk                800 net/tls/tls_device.c 	err = decrypt_skb(sk, skb, sg);
sk                855 net/tls/tls_device.c int tls_device_decrypted(struct sock *sk, struct sk_buff *skb)
sk                857 net/tls/tls_device.c 	struct tls_context *tls_ctx = tls_get_ctx(sk);
sk                880 net/tls/tls_device.c 		tls_device_core_ctrl_rx_resync(tls_ctx, ctx, sk, skb);
sk                885 net/tls/tls_device.c 	return tls_device_reencrypt(sk, skb);
sk                888 net/tls/tls_device.c static void tls_device_attach(struct tls_context *ctx, struct sock *sk,
sk                891 net/tls/tls_device.c 	if (sk->sk_destruct != tls_device_sk_destruct) {
sk                899 net/tls/tls_device.c 		ctx->sk_destruct = sk->sk_destruct;
sk                900 net/tls/tls_device.c 		sk->sk_destruct = tls_device_sk_destruct;
sk                904 net/tls/tls_device.c int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
sk                907 net/tls/tls_device.c 	struct tls_context *tls_ctx = tls_get_ctx(sk);
sk                983 net/tls/tls_device.c 	rc = tls_sw_fallback_init(sk, offload_ctx, crypto_info);
sk                991 net/tls/tls_device.c 	start_marker_record->end_seq = tcp_sk(sk)->write_seq;
sk               1001 net/tls/tls_device.c 	clean_acked_data_enable(inet_csk(sk), &tls_icsk_clean_acked);
sk               1008 net/tls/tls_device.c 	skb = tcp_write_queue_tail(sk);
sk               1012 net/tls/tls_device.c 	netdev = get_netdev_for_sock(sk);
sk               1039 net/tls/tls_device.c 	rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_TX,
sk               1041 net/tls/tls_device.c 					     tcp_sk(sk)->write_seq);
sk               1045 net/tls/tls_device.c 	tls_device_attach(ctx, sk, netdev);
sk               1052 net/tls/tls_device.c 	smp_store_release(&sk->sk_validate_xmit_skb, tls_validate_xmit_skb);
sk               1062 net/tls/tls_device.c 	clean_acked_data_disable(inet_csk(sk));
sk               1076 net/tls/tls_device.c int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
sk               1085 net/tls/tls_device.c 	netdev = get_netdev_for_sock(sk);
sk               1118 net/tls/tls_device.c 	rc = tls_set_sw_offload(sk, ctx, 0);
sk               1122 net/tls/tls_device.c 	rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_RX,
sk               1124 net/tls/tls_device.c 					     tcp_sk(sk)->copied_seq);
sk               1128 net/tls/tls_device.c 	tls_device_attach(ctx, sk, netdev);
sk               1137 net/tls/tls_device.c 	tls_sw_free_resources_rx(sk);
sk               1148 net/tls/tls_device.c void tls_device_offload_cleanup_rx(struct sock *sk)
sk               1150 net/tls/tls_device.c 	struct tls_context *tls_ctx = tls_get_ctx(sk);
sk               1167 net/tls/tls_device.c 	tls_sw_release_resources_rx(sk);
sk                184 net/tls/tls_device_fallback.c 	if (skb->sk->sk_family == AF_INET6) {
sk                197 net/tls/tls_device_fallback.c 	struct sock *sk = skb->sk;
sk                206 net/tls/tls_device_fallback.c 	nskb->sk = sk;
sk                208 net/tls/tls_device_fallback.c 	skb->sk = NULL;
sk                218 net/tls/tls_device_fallback.c 		WARN_ON_ONCE(refcount_sub_and_test(-delta, &sk->sk_wmem_alloc));
sk                220 net/tls/tls_device_fallback.c 		refcount_add(delta, &sk->sk_wmem_alloc);
sk                372 net/tls/tls_device_fallback.c static struct sk_buff *tls_sw_fallback(struct sock *sk, struct sk_buff *skb)
sk                375 net/tls/tls_device_fallback.c 	struct tls_context *tls_ctx = tls_get_ctx(sk);
sk                422 net/tls/tls_device_fallback.c struct sk_buff *tls_validate_xmit_skb(struct sock *sk,
sk                426 net/tls/tls_device_fallback.c 	if (dev == tls_get_ctx(sk)->netdev)
sk                429 net/tls/tls_device_fallback.c 	return tls_sw_fallback(sk, skb);
sk                435 net/tls/tls_device_fallback.c 	return tls_sw_fallback(skb->sk, skb);
sk                439 net/tls/tls_device_fallback.c int tls_sw_fallback_init(struct sock *sk,
sk                 68 net/tls/tls_main.c static void update_sk_prot(struct sock *sk, struct tls_context *ctx)
sk                 70 net/tls/tls_main.c 	int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4;
sk                 72 net/tls/tls_main.c 	sk->sk_prot = &tls_prots[ip_ver][ctx->tx_conf][ctx->rx_conf];
sk                 75 net/tls/tls_main.c int wait_on_pending_writer(struct sock *sk, long *timeo)
sk                 80 net/tls/tls_main.c 	add_wait_queue(sk_sleep(sk), &wait);
sk                 92 net/tls/tls_main.c 		if (sk_wait_event(sk, timeo, !sk->sk_write_pending, &wait))
sk                 95 net/tls/tls_main.c 	remove_wait_queue(sk_sleep(sk), &wait);
sk                 99 net/tls/tls_main.c int tls_push_sg(struct sock *sk,
sk                120 net/tls/tls_main.c 		tcp_rate_check_app_limited(sk);
sk                123 net/tls/tls_main.c 		ret = do_tcp_sendpages(sk, p, offset, size, sendpage_flags);
sk                140 net/tls/tls_main.c 		sk_mem_uncharge(sk, sg->length);
sk                154 net/tls/tls_main.c static int tls_handle_open_record(struct sock *sk, int flags)
sk                156 net/tls/tls_main.c 	struct tls_context *ctx = tls_get_ctx(sk);
sk                159 net/tls/tls_main.c 		return ctx->push_pending_record(sk, flags);
sk                164 net/tls/tls_main.c int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg,
sk                184 net/tls/tls_main.c 			rc = tls_handle_open_record(sk, msg->msg_flags);
sk                199 net/tls/tls_main.c int tls_push_partial_record(struct sock *sk, struct tls_context *ctx,
sk                209 net/tls/tls_main.c 	return tls_push_sg(sk, ctx, sg, offset, flags);
sk                212 net/tls/tls_main.c void tls_free_partial_record(struct sock *sk, struct tls_context *ctx)
sk                218 net/tls/tls_main.c 		sk_mem_uncharge(sk, sg->length);
sk                223 net/tls/tls_main.c static void tls_write_space(struct sock *sk)
sk                225 net/tls/tls_main.c 	struct tls_context *ctx = tls_get_ctx(sk);
sk                232 net/tls/tls_main.c 		ctx->sk_write_space(sk);
sk                238 net/tls/tls_main.c 		tls_device_write_space(sk, ctx);
sk                241 net/tls/tls_main.c 		tls_sw_write_space(sk, ctx);
sk                243 net/tls/tls_main.c 	ctx->sk_write_space(sk);
sk                254 net/tls/tls_main.c void tls_ctx_free(struct sock *sk, struct tls_context *ctx)
sk                263 net/tls/tls_main.c 	if (sk)
sk                269 net/tls/tls_main.c static void tls_sk_proto_cleanup(struct sock *sk,
sk                272 net/tls/tls_main.c 	if (unlikely(sk->sk_write_pending) &&
sk                273 net/tls/tls_main.c 	    !wait_on_pending_writer(sk, &timeo))
sk                274 net/tls/tls_main.c 		tls_handle_open_record(sk, 0);
sk                280 net/tls/tls_main.c 		tls_sw_release_resources_tx(sk);
sk                282 net/tls/tls_main.c 		tls_device_free_resources_tx(sk);
sk                286 net/tls/tls_main.c 		tls_sw_release_resources_rx(sk);
sk                288 net/tls/tls_main.c 		tls_device_offload_cleanup_rx(sk);
sk                291 net/tls/tls_main.c static void tls_sk_proto_close(struct sock *sk, long timeout)
sk                293 net/tls/tls_main.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk                294 net/tls/tls_main.c 	struct tls_context *ctx = tls_get_ctx(sk);
sk                295 net/tls/tls_main.c 	long timeo = sock_sndtimeo(sk, 0);
sk                301 net/tls/tls_main.c 	lock_sock(sk);
sk                305 net/tls/tls_main.c 		tls_sk_proto_cleanup(sk, ctx, timeo);
sk                307 net/tls/tls_main.c 	write_lock_bh(&sk->sk_callback_lock);
sk                310 net/tls/tls_main.c 	sk->sk_prot = ctx->sk_proto;
sk                311 net/tls/tls_main.c 	if (sk->sk_write_space == tls_write_space)
sk                312 net/tls/tls_main.c 		sk->sk_write_space = ctx->sk_write_space;
sk                313 net/tls/tls_main.c 	write_unlock_bh(&sk->sk_callback_lock);
sk                314 net/tls/tls_main.c 	release_sock(sk);
sk                321 net/tls/tls_main.c 	ctx->sk_proto->close(sk, timeout);
sk                324 net/tls/tls_main.c 		tls_ctx_free(sk, ctx);
sk                327 net/tls/tls_main.c static int do_tls_getsockopt_tx(struct sock *sk, char __user *optval,
sk                331 net/tls/tls_main.c 	struct tls_context *ctx = tls_get_ctx(sk);
sk                374 net/tls/tls_main.c 		lock_sock(sk);
sk                380 net/tls/tls_main.c 		release_sock(sk);
sk                398 net/tls/tls_main.c 		lock_sock(sk);
sk                404 net/tls/tls_main.c 		release_sock(sk);
sk                419 net/tls/tls_main.c static int do_tls_getsockopt(struct sock *sk, int optname,
sk                426 net/tls/tls_main.c 		rc = do_tls_getsockopt_tx(sk, optval, optlen);
sk                435 net/tls/tls_main.c static int tls_getsockopt(struct sock *sk, int level, int optname,
sk                438 net/tls/tls_main.c 	struct tls_context *ctx = tls_get_ctx(sk);
sk                441 net/tls/tls_main.c 		return ctx->sk_proto->getsockopt(sk, level,
sk                444 net/tls/tls_main.c 	return do_tls_getsockopt(sk, optname, optval, optlen);
sk                447 net/tls/tls_main.c static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval,
sk                452 net/tls/tls_main.c 	struct tls_context *ctx = tls_get_ctx(sk);
sk                527 net/tls/tls_main.c 		rc = tls_set_device_offload(sk, ctx);
sk                530 net/tls/tls_main.c 			rc = tls_set_sw_offload(sk, ctx, 1);
sk                536 net/tls/tls_main.c 		rc = tls_set_device_offload_rx(sk, ctx);
sk                539 net/tls/tls_main.c 			rc = tls_set_sw_offload(sk, ctx, 0);
sk                544 net/tls/tls_main.c 		tls_sw_strparser_arm(sk, ctx);
sk                551 net/tls/tls_main.c 	update_sk_prot(sk, ctx);
sk                553 net/tls/tls_main.c 		ctx->sk_write_space = sk->sk_write_space;
sk                554 net/tls/tls_main.c 		sk->sk_write_space = tls_write_space;
sk                556 net/tls/tls_main.c 		sk->sk_socket->ops = &tls_sw_proto_ops;
sk                566 net/tls/tls_main.c static int do_tls_setsockopt(struct sock *sk, int optname,
sk                574 net/tls/tls_main.c 		lock_sock(sk);
sk                575 net/tls/tls_main.c 		rc = do_tls_setsockopt_conf(sk, optval, optlen,
sk                577 net/tls/tls_main.c 		release_sock(sk);
sk                586 net/tls/tls_main.c static int tls_setsockopt(struct sock *sk, int level, int optname,
sk                589 net/tls/tls_main.c 	struct tls_context *ctx = tls_get_ctx(sk);
sk                592 net/tls/tls_main.c 		return ctx->sk_proto->setsockopt(sk, level, optname, optval,
sk                595 net/tls/tls_main.c 	return do_tls_setsockopt(sk, optname, optval, optlen);
sk                598 net/tls/tls_main.c static struct tls_context *create_ctx(struct sock *sk)
sk                600 net/tls/tls_main.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk                609 net/tls/tls_main.c 	ctx->sk_proto = sk->sk_prot;
sk                613 net/tls/tls_main.c static void tls_build_proto(struct sock *sk)
sk                615 net/tls/tls_main.c 	int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4;
sk                619 net/tls/tls_main.c 	    unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv6_prot))) {
sk                621 net/tls/tls_main.c 		if (likely(sk->sk_prot != saved_tcpv6_prot)) {
sk                622 net/tls/tls_main.c 			build_protos(tls_prots[TLSV6], sk->sk_prot);
sk                623 net/tls/tls_main.c 			smp_store_release(&saved_tcpv6_prot, sk->sk_prot);
sk                629 net/tls/tls_main.c 	    unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv4_prot))) {
sk                631 net/tls/tls_main.c 		if (likely(sk->sk_prot != saved_tcpv4_prot)) {
sk                632 net/tls/tls_main.c 			build_protos(tls_prots[TLSV4], sk->sk_prot);
sk                633 net/tls/tls_main.c 			smp_store_release(&saved_tcpv4_prot, sk->sk_prot);
sk                639 net/tls/tls_main.c static void tls_hw_sk_destruct(struct sock *sk)
sk                641 net/tls/tls_main.c 	struct tls_context *ctx = tls_get_ctx(sk);
sk                642 net/tls/tls_main.c 	struct inet_connection_sock *icsk = inet_csk(sk);
sk                644 net/tls/tls_main.c 	ctx->sk_destruct(sk);
sk                647 net/tls/tls_main.c 	tls_ctx_free(sk, ctx);
sk                650 net/tls/tls_main.c static int tls_hw_prot(struct sock *sk)
sk                659 net/tls/tls_main.c 			ctx = create_ctx(sk);
sk                664 net/tls/tls_main.c 			tls_build_proto(sk);
sk                665 net/tls/tls_main.c 			ctx->sk_destruct = sk->sk_destruct;
sk                666 net/tls/tls_main.c 			sk->sk_destruct = tls_hw_sk_destruct;
sk                669 net/tls/tls_main.c 			update_sk_prot(sk, ctx);
sk                680 net/tls/tls_main.c static void tls_hw_unhash(struct sock *sk)
sk                682 net/tls/tls_main.c 	struct tls_context *ctx = tls_get_ctx(sk);
sk                690 net/tls/tls_main.c 			dev->unhash(dev, sk);
sk                696 net/tls/tls_main.c 	ctx->sk_proto->unhash(sk);
sk                699 net/tls/tls_main.c static int tls_hw_hash(struct sock *sk)
sk                701 net/tls/tls_main.c 	struct tls_context *ctx = tls_get_ctx(sk);
sk                705 net/tls/tls_main.c 	err = ctx->sk_proto->hash(sk);
sk                711 net/tls/tls_main.c 			err |= dev->hash(dev, sk);
sk                719 net/tls/tls_main.c 		tls_hw_unhash(sk);
sk                766 net/tls/tls_main.c static int tls_init(struct sock *sk)
sk                771 net/tls/tls_main.c 	if (tls_hw_prot(sk))
sk                780 net/tls/tls_main.c 	if (sk->sk_state != TCP_ESTABLISHED)
sk                783 net/tls/tls_main.c 	tls_build_proto(sk);
sk                786 net/tls/tls_main.c 	write_lock_bh(&sk->sk_callback_lock);
sk                787 net/tls/tls_main.c 	ctx = create_ctx(sk);
sk                795 net/tls/tls_main.c 	update_sk_prot(sk, ctx);
sk                797 net/tls/tls_main.c 	write_unlock_bh(&sk->sk_callback_lock);
sk                801 net/tls/tls_main.c static void tls_update(struct sock *sk, struct proto *p,
sk                802 net/tls/tls_main.c 		       void (*write_space)(struct sock *sk))
sk                806 net/tls/tls_main.c 	ctx = tls_get_ctx(sk);
sk                811 net/tls/tls_main.c 		sk->sk_prot = p;
sk                812 net/tls/tls_main.c 		sk->sk_write_space = write_space;
sk                816 net/tls/tls_main.c static int tls_get_info(const struct sock *sk, struct sk_buff *skb)
sk                828 net/tls/tls_main.c 	ctx = rcu_dereference(inet_csk(sk)->icsk_ulp_data);
sk                863 net/tls/tls_main.c static size_t tls_get_info_size(const struct sock *sk)
sk                165 net/tls/tls_sw.c 	tls_ctx = tls_get_ctx(skb->sk);
sk                172 net/tls/tls_sw.c 		tls_err_abort(skb->sk, err);
sk                180 net/tls/tls_sw.c 			tls_err_abort(skb->sk, pad);
sk                191 net/tls/tls_sw.c 	skb->sk = NULL;
sk                214 net/tls/tls_sw.c static int tls_do_decryption(struct sock *sk,
sk                223 net/tls/tls_sw.c 	struct tls_context *tls_ctx = tls_get_ctx(sk);
sk                241 net/tls/tls_sw.c 		skb->sk = sk;
sk                266 net/tls/tls_sw.c static void tls_trim_both_msgs(struct sock *sk, int target_size)
sk                268 net/tls/tls_sw.c 	struct tls_context *tls_ctx = tls_get_ctx(sk);
sk                273 net/tls/tls_sw.c 	sk_msg_trim(sk, &rec->msg_plaintext, target_size);
sk                276 net/tls/tls_sw.c 	sk_msg_trim(sk, &rec->msg_encrypted, target_size);
sk                279 net/tls/tls_sw.c static int tls_alloc_encrypted_msg(struct sock *sk, int len)
sk                281 net/tls/tls_sw.c 	struct tls_context *tls_ctx = tls_get_ctx(sk);
sk                286 net/tls/tls_sw.c 	return sk_msg_alloc(sk, msg_en, len, 0);
sk                289 net/tls/tls_sw.c static int tls_clone_plaintext_msg(struct sock *sk, int required)
sk                291 net/tls/tls_sw.c 	struct tls_context *tls_ctx = tls_get_ctx(sk);
sk                310 net/tls/tls_sw.c 	return sk_msg_clone(sk, msg_pl, msg_en, skip, len);
sk                313 net/tls/tls_sw.c static struct tls_rec *tls_get_rec(struct sock *sk)
sk                315 net/tls/tls_sw.c 	struct tls_context *tls_ctx = tls_get_ctx(sk);
sk                324 net/tls/tls_sw.c 	rec = kzalloc(mem_size, sk->sk_allocation);
sk                345 net/tls/tls_sw.c static void tls_free_rec(struct sock *sk, struct tls_rec *rec)
sk                347 net/tls/tls_sw.c 	sk_msg_free(sk, &rec->msg_encrypted);
sk                348 net/tls/tls_sw.c 	sk_msg_free(sk, &rec->msg_plaintext);
sk                352 net/tls/tls_sw.c static void tls_free_open_rec(struct sock *sk)
sk                354 net/tls/tls_sw.c 	struct tls_context *tls_ctx = tls_get_ctx(sk);
sk                359 net/tls/tls_sw.c 		tls_free_rec(sk, rec);
sk                364 net/tls/tls_sw.c int tls_tx_records(struct sock *sk, int flags)
sk                366 net/tls/tls_sw.c 	struct tls_context *tls_ctx = tls_get_ctx(sk);
sk                381 net/tls/tls_sw.c 		rc = tls_push_partial_record(sk, tls_ctx, tx_flags);
sk                389 net/tls/tls_sw.c 		sk_msg_free(sk, &rec->msg_plaintext);
sk                402 net/tls/tls_sw.c 			rc = tls_push_sg(sk, tls_ctx,
sk                409 net/tls/tls_sw.c 			sk_msg_free(sk, &rec->msg_plaintext);
sk                418 net/tls/tls_sw.c 		tls_err_abort(sk, EBADMSG);
sk                426 net/tls/tls_sw.c 	struct sock *sk = req->data;
sk                427 net/tls/tls_sw.c 	struct tls_context *tls_ctx = tls_get_ctx(sk);
sk                444 net/tls/tls_sw.c 	if (err || sk->sk_err) {
sk                448 net/tls/tls_sw.c 		if (sk->sk_err) {
sk                449 net/tls/tls_sw.c 			ctx->async_wait.err = sk->sk_err;
sk                452 net/tls/tls_sw.c 			tls_err_abort(sk, err);
sk                484 net/tls/tls_sw.c static int tls_do_encryption(struct sock *sk,
sk                519 net/tls/tls_sw.c 				  tls_encrypt_done, sk);
sk                541 net/tls/tls_sw.c 	tls_advance_record_sn(sk, prot, &tls_ctx->tx);
sk                545 net/tls/tls_sw.c static int tls_split_open_record(struct sock *sk, struct tls_rec *from,
sk                558 net/tls/tls_sw.c 	new = tls_get_rec(sk);
sk                561 net/tls/tls_sw.c 	ret = sk_msg_alloc(sk, &new->msg_encrypted, msg_opl->sg.size +
sk                564 net/tls/tls_sw.c 		tls_free_rec(sk, new);
sk                630 net/tls/tls_sw.c static void tls_merge_open_record(struct sock *sk, struct tls_rec *to,
sk                657 net/tls/tls_sw.c 	sk_msg_free(sk, &to->msg_encrypted);
sk                663 net/tls/tls_sw.c static int tls_push_record(struct sock *sk, int flags,
sk                666 net/tls/tls_sw.c 	struct tls_context *tls_ctx = tls_get_ctx(sk);
sk                694 net/tls/tls_sw.c 		rc = tls_split_open_record(sk, rec, &tmp, msg_pl, msg_en,
sk                705 net/tls/tls_sw.c 			tls_merge_open_record(sk, rec, tmp, orig_end);
sk                710 net/tls/tls_sw.c 		sk_msg_trim(sk, msg_en, msg_pl->sg.size +
sk                759 net/tls/tls_sw.c 	rc = tls_do_encryption(sk, tls_ctx, ctx, req,
sk                763 net/tls/tls_sw.c 			tls_err_abort(sk, EBADMSG);
sk                766 net/tls/tls_sw.c 				tls_merge_open_record(sk, rec, tmp, orig_end);
sk                774 net/tls/tls_sw.c 		sk_msg_trim(sk, msg_en, msg_pl->sg.size + prot->overhead_size);
sk                779 net/tls/tls_sw.c 	return tls_tx_records(sk, flags);
sk                782 net/tls/tls_sw.c static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk,
sk                786 net/tls/tls_sw.c 	struct tls_context *tls_ctx = tls_get_ctx(sk);
sk                797 net/tls/tls_sw.c 	psock = sk_psock_get(sk);
sk                799 net/tls/tls_sw.c 		err = tls_push_record(sk, flags, record_type);
sk                800 net/tls/tls_sw.c 		if (err && sk->sk_err == EBADMSG) {
sk                801 net/tls/tls_sw.c 			*copied -= sk_msg_free(sk, msg);
sk                802 net/tls/tls_sw.c 			tls_free_open_rec(sk);
sk                803 net/tls/tls_sw.c 			err = -sk->sk_err;
sk                806 net/tls/tls_sw.c 			sk_psock_put(sk, psock);
sk                813 net/tls/tls_sw.c 		psock->eval = sk_psock_msg_verdict(sk, psock, msg);
sk                828 net/tls/tls_sw.c 		err = tls_push_record(sk, flags, record_type);
sk                829 net/tls/tls_sw.c 		if (err && sk->sk_err == EBADMSG) {
sk                830 net/tls/tls_sw.c 			*copied -= sk_msg_free(sk, msg);
sk                831 net/tls/tls_sw.c 			tls_free_open_rec(sk);
sk                832 net/tls/tls_sw.c 			err = -sk->sk_err;
sk                843 net/tls/tls_sw.c 		sk_msg_return_zero(sk, msg, send);
sk                845 net/tls/tls_sw.c 		release_sock(sk);
sk                847 net/tls/tls_sw.c 		lock_sock(sk);
sk                849 net/tls/tls_sw.c 			*copied -= sk_msg_free_nocharge(sk, &msg_redir);
sk                853 net/tls/tls_sw.c 			tls_free_open_rec(sk);
sk                857 net/tls/tls_sw.c 		sk_msg_free_partial(sk, msg, send);
sk                863 net/tls/tls_sw.c 			tls_free_open_rec(sk);
sk                888 net/tls/tls_sw.c 	sk_psock_put(sk, psock);
sk                892 net/tls/tls_sw.c static int tls_sw_push_pending_record(struct sock *sk, int flags)
sk                894 net/tls/tls_sw.c 	struct tls_context *tls_ctx = tls_get_ctx(sk);
sk                908 net/tls/tls_sw.c 	return bpf_exec_tx_verdict(msg_pl, sk, true, TLS_RECORD_TYPE_DATA,
sk                912 net/tls/tls_sw.c int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
sk                914 net/tls/tls_sw.c 	long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
sk                915 net/tls/tls_sw.c 	struct tls_context *tls_ctx = tls_get_ctx(sk);
sk                939 net/tls/tls_sw.c 	lock_sock(sk);
sk                942 net/tls/tls_sw.c 		ret = tls_proccess_cmsg(sk, msg, &record_type);
sk                952 net/tls/tls_sw.c 		if (sk->sk_err) {
sk                953 net/tls/tls_sw.c 			ret = -sk->sk_err;
sk                960 net/tls/tls_sw.c 			rec = ctx->open_rec = tls_get_rec(sk);
sk                981 net/tls/tls_sw.c 		if (!sk_stream_memory_free(sk))
sk                985 net/tls/tls_sw.c 		ret = tls_alloc_encrypted_msg(sk, required_size);
sk               1001 net/tls/tls_sw.c 			ret = sk_msg_zerocopy_from_iter(sk, &msg->msg_iter,
sk               1010 net/tls/tls_sw.c 			ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
sk               1030 net/tls/tls_sw.c 			sk_msg_trim(sk, msg_pl, orig_size);
sk               1035 net/tls/tls_sw.c 		ret = tls_clone_plaintext_msg(sk, required_size);
sk               1046 net/tls/tls_sw.c 			sk_msg_trim(sk, msg_en,
sk               1051 net/tls/tls_sw.c 			ret = sk_msg_memcopy_from_iter(sk, &msg->msg_iter,
sk               1063 net/tls/tls_sw.c 			ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
sk               1082 net/tls/tls_sw.c 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
sk               1084 net/tls/tls_sw.c 		ret = sk_stream_wait_memory(sk, &timeo);
sk               1088 net/tls/tls_sw.c 				tls_trim_both_msgs(sk, orig_size);
sk               1124 net/tls/tls_sw.c 		tls_tx_records(sk, msg->msg_flags);
sk               1128 net/tls/tls_sw.c 	ret = sk_stream_error(sk, msg->msg_flags, ret);
sk               1130 net/tls/tls_sw.c 	release_sock(sk);
sk               1135 net/tls/tls_sw.c static int tls_sw_do_sendpage(struct sock *sk, struct page *page,
sk               1138 net/tls/tls_sw.c 	long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
sk               1139 net/tls/tls_sw.c 	struct tls_context *tls_ctx = tls_get_ctx(sk);
sk               1153 net/tls/tls_sw.c 	sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
sk               1159 net/tls/tls_sw.c 		if (sk->sk_err) {
sk               1160 net/tls/tls_sw.c 			ret = -sk->sk_err;
sk               1167 net/tls/tls_sw.c 			rec = ctx->open_rec = tls_get_rec(sk);
sk               1185 net/tls/tls_sw.c 		if (!sk_stream_memory_free(sk))
sk               1188 net/tls/tls_sw.c 		ret = tls_alloc_encrypted_msg(sk, required_size);
sk               1202 net/tls/tls_sw.c 		sk_mem_charge(sk, copy);
sk               1210 net/tls/tls_sw.c 			ret = bpf_exec_tx_verdict(msg_pl, sk, full_record,
sk               1226 net/tls/tls_sw.c 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
sk               1228 net/tls/tls_sw.c 		ret = sk_stream_wait_memory(sk, &timeo);
sk               1231 net/tls/tls_sw.c 				tls_trim_both_msgs(sk, msg_pl->sg.size);
sk               1243 net/tls/tls_sw.c 			tls_tx_records(sk, flags);
sk               1247 net/tls/tls_sw.c 	ret = sk_stream_error(sk, flags, ret);
sk               1251 net/tls/tls_sw.c int tls_sw_sendpage_locked(struct sock *sk, struct page *page,
sk               1259 net/tls/tls_sw.c 	return tls_sw_do_sendpage(sk, page, offset, size, flags);
sk               1262 net/tls/tls_sw.c int tls_sw_sendpage(struct sock *sk, struct page *page,
sk               1265 net/tls/tls_sw.c 	struct tls_context *tls_ctx = tls_get_ctx(sk);
sk               1273 net/tls/tls_sw.c 	lock_sock(sk);
sk               1274 net/tls/tls_sw.c 	ret = tls_sw_do_sendpage(sk, page, offset, size, flags);
sk               1275 net/tls/tls_sw.c 	release_sock(sk);
sk               1280 net/tls/tls_sw.c static struct sk_buff *tls_wait_data(struct sock *sk, struct sk_psock *psock,
sk               1283 net/tls/tls_sw.c 	struct tls_context *tls_ctx = tls_get_ctx(sk);
sk               1289 net/tls/tls_sw.c 		if (sk->sk_err) {
sk               1290 net/tls/tls_sw.c 			*err = sock_error(sk);
sk               1294 net/tls/tls_sw.c 		if (sk->sk_shutdown & RCV_SHUTDOWN)
sk               1297 net/tls/tls_sw.c 		if (sock_flag(sk, SOCK_DONE))
sk               1305 net/tls/tls_sw.c 		add_wait_queue(sk_sleep(sk), &wait);
sk               1306 net/tls/tls_sw.c 		sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
sk               1307 net/tls/tls_sw.c 		sk_wait_event(sk, &timeo,
sk               1311 net/tls/tls_sw.c 		sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
sk               1312 net/tls/tls_sw.c 		remove_wait_queue(sk_sleep(sk), &wait);
sk               1324 net/tls/tls_sw.c static int tls_setup_from_iter(struct sock *sk, struct iov_iter *from,
sk               1390 net/tls/tls_sw.c static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
sk               1395 net/tls/tls_sw.c 	struct tls_context *tls_ctx = tls_get_ctx(sk);
sk               1439 net/tls/tls_sw.c 	mem = kmalloc(mem_size, sk->sk_allocation);
sk               1495 net/tls/tls_sw.c 			err = tls_setup_from_iter(sk, out_iov, data_len,
sk               1514 net/tls/tls_sw.c 	err = tls_do_decryption(sk, skb, sgin, sgout, iv,
sk               1527 net/tls/tls_sw.c static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
sk               1531 net/tls/tls_sw.c 	struct tls_context *tls_ctx = tls_get_ctx(sk);
sk               1539 net/tls/tls_sw.c 			err = tls_device_decrypted(sk, skb);
sk               1546 net/tls/tls_sw.c 			err = decrypt_internal(sk, skb, dest, NULL, chunk, zc,
sk               1550 net/tls/tls_sw.c 					tls_advance_record_sn(sk, prot,
sk               1566 net/tls/tls_sw.c 		tls_advance_record_sn(sk, prot, &tls_ctx->rx);
sk               1568 net/tls/tls_sw.c 		ctx->saved_data_ready(sk);
sk               1576 net/tls/tls_sw.c int decrypt_skb(struct sock *sk, struct sk_buff *skb,
sk               1582 net/tls/tls_sw.c 	return decrypt_internal(sk, skb, NULL, sgout, &chunk, &zc, false);
sk               1585 net/tls/tls_sw.c static bool tls_sw_advance_skb(struct sock *sk, struct sk_buff *skb,
sk               1588 net/tls/tls_sw.c 	struct tls_context *tls_ctx = tls_get_ctx(sk);
sk               1718 net/tls/tls_sw.c int tls_sw_recvmsg(struct sock *sk,
sk               1725 net/tls/tls_sw.c 	struct tls_context *tls_ctx = tls_get_ctx(sk);
sk               1746 net/tls/tls_sw.c 		return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR);
sk               1748 net/tls/tls_sw.c 	psock = sk_psock_get(sk);
sk               1749 net/tls/tls_sw.c 	lock_sock(sk);
sk               1755 net/tls/tls_sw.c 		tls_err_abort(sk, err);
sk               1764 net/tls/tls_sw.c 	target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
sk               1766 net/tls/tls_sw.c 	timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
sk               1776 net/tls/tls_sw.c 		skb = tls_wait_data(sk, psock, flags, timeo, &err);
sk               1779 net/tls/tls_sw.c 				int ret = __tcp_bpf_recvmsg(sk, psock,
sk               1812 net/tls/tls_sw.c 		err = decrypt_skb_update(sk, skb, &msg->msg_iter,
sk               1815 net/tls/tls_sw.c 			tls_err_abort(sk, EBADMSG);
sk               1888 net/tls/tls_sw.c 		if (tls_sw_advance_skb(sk, skb, chunk)) {
sk               1912 net/tls/tls_sw.c 				tls_err_abort(sk, err);
sk               1934 net/tls/tls_sw.c 			tls_err_abort(sk, err);
sk               1943 net/tls/tls_sw.c 	release_sock(sk);
sk               1945 net/tls/tls_sw.c 		sk_psock_put(sk, psock);
sk               1953 net/tls/tls_sw.c 	struct tls_context *tls_ctx = tls_get_ctx(sock->sk);
sk               1956 net/tls/tls_sw.c 	struct sock *sk = sock->sk;
sk               1964 net/tls/tls_sw.c 	lock_sock(sk);
sk               1966 net/tls/tls_sw.c 	timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
sk               1968 net/tls/tls_sw.c 	skb = tls_wait_data(sk, NULL, flags, timeo, &err);
sk               1973 net/tls/tls_sw.c 		err = decrypt_skb_update(sk, skb, NULL, &chunk, &zc, false);
sk               1982 net/tls/tls_sw.c 			tls_err_abort(sk, EBADMSG);
sk               1990 net/tls/tls_sw.c 	copied = skb_splice_bits(skb, sk, rxm->offset, pipe, chunk, flags);
sk               1995 net/tls/tls_sw.c 		tls_sw_advance_skb(sk, skb, copied);
sk               1998 net/tls/tls_sw.c 	release_sock(sk);
sk               2002 net/tls/tls_sw.c bool tls_sw_stream_read(const struct sock *sk)
sk               2004 net/tls/tls_sw.c 	struct tls_context *tls_ctx = tls_get_ctx(sk);
sk               2010 net/tls/tls_sw.c 	psock = sk_psock(sk);
sk               2021 net/tls/tls_sw.c 	struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
sk               2071 net/tls/tls_sw.c 	tls_device_rx_resync_new_rec(strp->sk, data_len + TLS_HEADER_SIZE,
sk               2076 net/tls/tls_sw.c 	tls_err_abort(strp->sk, ret);
sk               2083 net/tls/tls_sw.c 	struct tls_context *tls_ctx = tls_get_ctx(strp->sk);
sk               2091 net/tls/tls_sw.c 	ctx->saved_data_ready(strp->sk);
sk               2094 net/tls/tls_sw.c static void tls_data_ready(struct sock *sk)
sk               2096 net/tls/tls_sw.c 	struct tls_context *tls_ctx = tls_get_ctx(sk);
sk               2102 net/tls/tls_sw.c 	psock = sk_psock_get(sk);
sk               2105 net/tls/tls_sw.c 			ctx->saved_data_ready(sk);
sk               2106 net/tls/tls_sw.c 		sk_psock_put(sk, psock);
sk               2119 net/tls/tls_sw.c void tls_sw_release_resources_tx(struct sock *sk)
sk               2121 net/tls/tls_sw.c 	struct tls_context *tls_ctx = tls_get_ctx(sk);
sk               2130 net/tls/tls_sw.c 	tls_tx_records(sk, -1);
sk               2136 net/tls/tls_sw.c 		tls_free_partial_record(sk, tls_ctx);
sk               2140 net/tls/tls_sw.c 		sk_msg_free(sk, &rec->msg_plaintext);
sk               2146 net/tls/tls_sw.c 		sk_msg_free(sk, &rec->msg_encrypted);
sk               2147 net/tls/tls_sw.c 		sk_msg_free(sk, &rec->msg_plaintext);
sk               2152 net/tls/tls_sw.c 	tls_free_open_rec(sk);
sk               2162 net/tls/tls_sw.c void tls_sw_release_resources_rx(struct sock *sk)
sk               2164 net/tls/tls_sw.c 	struct tls_context *tls_ctx = tls_get_ctx(sk);
sk               2181 net/tls/tls_sw.c 			write_lock_bh(&sk->sk_callback_lock);
sk               2182 net/tls/tls_sw.c 			sk->sk_data_ready = ctx->saved_data_ready;
sk               2183 net/tls/tls_sw.c 			write_unlock_bh(&sk->sk_callback_lock);
sk               2202 net/tls/tls_sw.c void tls_sw_free_resources_rx(struct sock *sk)
sk               2204 net/tls/tls_sw.c 	struct tls_context *tls_ctx = tls_get_ctx(sk);
sk               2206 net/tls/tls_sw.c 	tls_sw_release_resources_rx(sk);
sk               2216 net/tls/tls_sw.c 	struct sock *sk = tx_work->sk;
sk               2217 net/tls/tls_sw.c 	struct tls_context *tls_ctx = tls_get_ctx(sk);
sk               2230 net/tls/tls_sw.c 	lock_sock(sk);
sk               2231 net/tls/tls_sw.c 	tls_tx_records(sk, -1);
sk               2232 net/tls/tls_sw.c 	release_sock(sk);
sk               2236 net/tls/tls_sw.c void tls_sw_write_space(struct sock *sk, struct tls_context *ctx)
sk               2246 net/tls/tls_sw.c void tls_sw_strparser_arm(struct sock *sk, struct tls_context *tls_ctx)
sk               2250 net/tls/tls_sw.c 	write_lock_bh(&sk->sk_callback_lock);
sk               2251 net/tls/tls_sw.c 	rx_ctx->saved_data_ready = sk->sk_data_ready;
sk               2252 net/tls/tls_sw.c 	sk->sk_data_ready = tls_data_ready;
sk               2253 net/tls/tls_sw.c 	write_unlock_bh(&sk->sk_callback_lock);
sk               2258 net/tls/tls_sw.c int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
sk               2260 net/tls/tls_sw.c 	struct tls_context *tls_ctx = tls_get_ctx(sk);
sk               2316 net/tls/tls_sw.c 		sw_ctx_tx->tx_work.sk = sk;
sk               2456 net/tls/tls_sw.c 		strp_init(&sw_ctx_rx->strp, sk, &cb);
sk                136 net/unix/af_unix.c #define UNIX_ABSTRACT(sk)	(unix_sk(sk)->addr->hash < UNIX_HASH_SIZE)
sk                180 net/unix/af_unix.c #define unix_peer(sk) (unix_sk(sk)->peer)
sk                182 net/unix/af_unix.c static inline int unix_our_peer(struct sock *sk, struct sock *osk)
sk                184 net/unix/af_unix.c 	return unix_peer(osk) == sk;
sk                187 net/unix/af_unix.c static inline int unix_may_send(struct sock *sk, struct sock *osk)
sk                189 net/unix/af_unix.c 	return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
sk                192 net/unix/af_unix.c static inline int unix_recvq_full(struct sock const *sk)
sk                194 net/unix/af_unix.c 	return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
sk                248 net/unix/af_unix.c static void __unix_remove_socket(struct sock *sk)
sk                250 net/unix/af_unix.c 	sk_del_node_init(sk);
sk                253 net/unix/af_unix.c static void __unix_insert_socket(struct hlist_head *list, struct sock *sk)
sk                255 net/unix/af_unix.c 	WARN_ON(!sk_unhashed(sk));
sk                256 net/unix/af_unix.c 	sk_add_node(sk, list);
sk                259 net/unix/af_unix.c static inline void unix_remove_socket(struct sock *sk)
sk                262 net/unix/af_unix.c 	__unix_remove_socket(sk);
sk                266 net/unix/af_unix.c static inline void unix_insert_socket(struct hlist_head *list, struct sock *sk)
sk                269 net/unix/af_unix.c 	__unix_insert_socket(list, sk);
sk                367 net/unix/af_unix.c 	u_sleep = sk_sleep(&u->sk);
sk                374 net/unix/af_unix.c static int unix_dgram_peer_wake_connect(struct sock *sk, struct sock *other)
sk                379 net/unix/af_unix.c 	u = unix_sk(sk);
sk                395 net/unix/af_unix.c static void unix_dgram_peer_wake_disconnect(struct sock *sk,
sk                400 net/unix/af_unix.c 	u = unix_sk(sk);
sk                412 net/unix/af_unix.c static void unix_dgram_peer_wake_disconnect_wakeup(struct sock *sk,
sk                415 net/unix/af_unix.c 	unix_dgram_peer_wake_disconnect(sk, other);
sk                416 net/unix/af_unix.c 	wake_up_interruptible_poll(sk_sleep(sk),
sk                426 net/unix/af_unix.c static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
sk                430 net/unix/af_unix.c 	connected = unix_dgram_peer_wake_connect(sk, other);
sk                441 net/unix/af_unix.c 		unix_dgram_peer_wake_disconnect(sk, other);
sk                446 net/unix/af_unix.c static int unix_writable(const struct sock *sk)
sk                448 net/unix/af_unix.c 	return sk->sk_state != TCP_LISTEN &&
sk                449 net/unix/af_unix.c 	       (refcount_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
sk                452 net/unix/af_unix.c static void unix_write_space(struct sock *sk)
sk                457 net/unix/af_unix.c 	if (unix_writable(sk)) {
sk                458 net/unix/af_unix.c 		wq = rcu_dereference(sk->sk_wq);
sk                462 net/unix/af_unix.c 		sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
sk                471 net/unix/af_unix.c static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
sk                473 net/unix/af_unix.c 	if (!skb_queue_empty(&sk->sk_receive_queue)) {
sk                474 net/unix/af_unix.c 		skb_queue_purge(&sk->sk_receive_queue);
sk                475 net/unix/af_unix.c 		wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
sk                481 net/unix/af_unix.c 		if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
sk                488 net/unix/af_unix.c static void unix_sock_destructor(struct sock *sk)
sk                490 net/unix/af_unix.c 	struct unix_sock *u = unix_sk(sk);
sk                492 net/unix/af_unix.c 	skb_queue_purge(&sk->sk_receive_queue);
sk                494 net/unix/af_unix.c 	WARN_ON(refcount_read(&sk->sk_wmem_alloc));
sk                495 net/unix/af_unix.c 	WARN_ON(!sk_unhashed(sk));
sk                496 net/unix/af_unix.c 	WARN_ON(sk->sk_socket);
sk                497 net/unix/af_unix.c 	if (!sock_flag(sk, SOCK_DEAD)) {
sk                498 net/unix/af_unix.c 		pr_info("Attempt to release alive unix socket: %p\n", sk);
sk                507 net/unix/af_unix.c 	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
sk                510 net/unix/af_unix.c 	pr_debug("UNIX %p is destroyed, %ld are still alive.\n", sk,
sk                515 net/unix/af_unix.c static void unix_release_sock(struct sock *sk, int embrion)
sk                517 net/unix/af_unix.c 	struct unix_sock *u = unix_sk(sk);
sk                523 net/unix/af_unix.c 	unix_remove_socket(sk);
sk                526 net/unix/af_unix.c 	unix_state_lock(sk);
sk                527 net/unix/af_unix.c 	sock_orphan(sk);
sk                528 net/unix/af_unix.c 	sk->sk_shutdown = SHUTDOWN_MASK;
sk                532 net/unix/af_unix.c 	state = sk->sk_state;
sk                533 net/unix/af_unix.c 	sk->sk_state = TCP_CLOSE;
sk                534 net/unix/af_unix.c 	unix_state_unlock(sk);
sk                538 net/unix/af_unix.c 	skpair = unix_peer(sk);
sk                541 net/unix/af_unix.c 		if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
sk                545 net/unix/af_unix.c 			if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
sk                552 net/unix/af_unix.c 		unix_dgram_peer_wake_disconnect(sk, skpair);
sk                554 net/unix/af_unix.c 		unix_peer(sk) = NULL;
sk                559 net/unix/af_unix.c 	while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
sk                561 net/unix/af_unix.c 			unix_release_sock(skb->sk, 1);
sk                570 net/unix/af_unix.c 	sock_put(sk);
sk                589 net/unix/af_unix.c static void init_peercred(struct sock *sk)
sk                591 net/unix/af_unix.c 	put_pid(sk->sk_peer_pid);
sk                592 net/unix/af_unix.c 	if (sk->sk_peer_cred)
sk                593 net/unix/af_unix.c 		put_cred(sk->sk_peer_cred);
sk                594 net/unix/af_unix.c 	sk->sk_peer_pid  = get_pid(task_tgid(current));
sk                595 net/unix/af_unix.c 	sk->sk_peer_cred = get_current_cred();
sk                598 net/unix/af_unix.c static void copy_peercred(struct sock *sk, struct sock *peersk)
sk                600 net/unix/af_unix.c 	put_pid(sk->sk_peer_pid);
sk                601 net/unix/af_unix.c 	if (sk->sk_peer_cred)
sk                602 net/unix/af_unix.c 		put_cred(sk->sk_peer_cred);
sk                603 net/unix/af_unix.c 	sk->sk_peer_pid  = get_pid(peersk->sk_peer_pid);
sk                604 net/unix/af_unix.c 	sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
sk                610 net/unix/af_unix.c 	struct sock *sk = sock->sk;
sk                611 net/unix/af_unix.c 	struct unix_sock *u = unix_sk(sk);
sk                620 net/unix/af_unix.c 	unix_state_lock(sk);
sk                621 net/unix/af_unix.c 	if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
sk                623 net/unix/af_unix.c 	if (backlog > sk->sk_max_ack_backlog)
sk                625 net/unix/af_unix.c 	sk->sk_max_ack_backlog	= backlog;
sk                626 net/unix/af_unix.c 	sk->sk_state		= TCP_LISTEN;
sk                628 net/unix/af_unix.c 	init_peercred(sk);
sk                632 net/unix/af_unix.c 	unix_state_unlock(sk);
sk                668 net/unix/af_unix.c static int unix_set_peek_off(struct sock *sk, int val)
sk                670 net/unix/af_unix.c 	struct unix_sock *u = unix_sk(sk);
sk                675 net/unix/af_unix.c 	sk->sk_peek_off = val;
sk                766 net/unix/af_unix.c 	struct sock *sk = NULL;
sk                773 net/unix/af_unix.c 	sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto, kern);
sk                774 net/unix/af_unix.c 	if (!sk)
sk                777 net/unix/af_unix.c 	sock_init_data(sock, sk);
sk                779 net/unix/af_unix.c 	sk->sk_allocation	= GFP_KERNEL_ACCOUNT;
sk                780 net/unix/af_unix.c 	sk->sk_write_space	= unix_write_space;
sk                781 net/unix/af_unix.c 	sk->sk_max_ack_backlog	= net->unx.sysctl_max_dgram_qlen;
sk                782 net/unix/af_unix.c 	sk->sk_destruct		= unix_sock_destructor;
sk                783 net/unix/af_unix.c 	u	  = unix_sk(sk);
sk                793 net/unix/af_unix.c 	unix_insert_socket(unix_sockets_unbound(sk), sk);
sk                795 net/unix/af_unix.c 	if (sk == NULL)
sk                799 net/unix/af_unix.c 		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
sk                802 net/unix/af_unix.c 	return sk;
sk                839 net/unix/af_unix.c 	struct sock *sk = sock->sk;
sk                841 net/unix/af_unix.c 	if (!sk)
sk                844 net/unix/af_unix.c 	unix_release_sock(sk, 0);
sk                845 net/unix/af_unix.c 	sock->sk = NULL;
sk                852 net/unix/af_unix.c 	struct sock *sk = sock->sk;
sk                853 net/unix/af_unix.c 	struct net *net = sock_net(sk);
sk                854 net/unix/af_unix.c 	struct unix_sock *u = unix_sk(sk);
sk                899 net/unix/af_unix.c 	addr->hash ^= sk->sk_type;
sk                901 net/unix/af_unix.c 	__unix_remove_socket(sk);
sk                903 net/unix/af_unix.c 	__unix_insert_socket(&unix_socket_table[addr->hash], sk);
sk                997 net/unix/af_unix.c 	struct sock *sk = sock->sk;
sk                998 net/unix/af_unix.c 	struct net *net = sock_net(sk);
sk                999 net/unix/af_unix.c 	struct unix_sock *u = unix_sk(sk);
sk               1049 net/unix/af_unix.c 	addr->hash = hash ^ sk->sk_type;
sk               1062 net/unix/af_unix.c 					      sk->sk_type, hash)) {
sk               1071 net/unix/af_unix.c 	__unix_remove_socket(sk);
sk               1073 net/unix/af_unix.c 	__unix_insert_socket(list, sk);
sk               1114 net/unix/af_unix.c 	struct sock *sk = sock->sk;
sk               1115 net/unix/af_unix.c 	struct net *net = sock_net(sk);
sk               1132 net/unix/af_unix.c 		    !unix_sk(sk)->addr && (err = unix_autobind(sock)) != 0)
sk               1140 net/unix/af_unix.c 		unix_state_double_lock(sk, other);
sk               1144 net/unix/af_unix.c 			unix_state_double_unlock(sk, other);
sk               1150 net/unix/af_unix.c 		if (!unix_may_send(sk, other))
sk               1153 net/unix/af_unix.c 		err = security_unix_may_send(sk->sk_socket, other->sk_socket);
sk               1162 net/unix/af_unix.c 		unix_state_double_lock(sk, other);
sk               1168 net/unix/af_unix.c 	if (unix_peer(sk)) {
sk               1169 net/unix/af_unix.c 		struct sock *old_peer = unix_peer(sk);
sk               1170 net/unix/af_unix.c 		unix_peer(sk) = other;
sk               1171 net/unix/af_unix.c 		unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);
sk               1173 net/unix/af_unix.c 		unix_state_double_unlock(sk, other);
sk               1176 net/unix/af_unix.c 			unix_dgram_disconnected(sk, old_peer);
sk               1179 net/unix/af_unix.c 		unix_peer(sk) = other;
sk               1180 net/unix/af_unix.c 		unix_state_double_unlock(sk, other);
sk               1185 net/unix/af_unix.c 	unix_state_double_unlock(sk, other);
sk               1216 net/unix/af_unix.c 	struct sock *sk = sock->sk;
sk               1217 net/unix/af_unix.c 	struct net *net = sock_net(sk);
sk               1218 net/unix/af_unix.c 	struct unix_sock *u = unix_sk(sk), *newu, *otheru;
sk               1236 net/unix/af_unix.c 	timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
sk               1246 net/unix/af_unix.c 	newsk = unix_create1(sock_net(sk), NULL, 0);
sk               1257 net/unix/af_unix.c 	other = unix_find_other(net, sunaddr, addr_len, sk->sk_type, hash, &err);
sk               1302 net/unix/af_unix.c 	st = sk->sk_state;
sk               1317 net/unix/af_unix.c 	unix_state_lock_nested(sk);
sk               1319 net/unix/af_unix.c 	if (sk->sk_state != st) {
sk               1320 net/unix/af_unix.c 		unix_state_unlock(sk);
sk               1326 net/unix/af_unix.c 	err = security_unix_stream_connect(sk, other, newsk);
sk               1328 net/unix/af_unix.c 		unix_state_unlock(sk);
sk               1334 net/unix/af_unix.c 	sock_hold(sk);
sk               1335 net/unix/af_unix.c 	unix_peer(newsk)	= sk;
sk               1337 net/unix/af_unix.c 	newsk->sk_type		= sk->sk_type;
sk               1368 net/unix/af_unix.c 	copy_peercred(sk, other);
sk               1371 net/unix/af_unix.c 	sk->sk_state	= TCP_ESTABLISHED;
sk               1375 net/unix/af_unix.c 	unix_peer(sk)	= newsk;
sk               1377 net/unix/af_unix.c 	unix_state_unlock(sk);
sk               1403 net/unix/af_unix.c 	struct sock *ska = socka->sk, *skb = sockb->sk;
sk               1434 net/unix/af_unix.c 	struct sock *sk = sock->sk;
sk               1444 net/unix/af_unix.c 	if (sk->sk_state != TCP_LISTEN)
sk               1451 net/unix/af_unix.c 	skb = skb_recv_datagram(sk, 0, flags&O_NONBLOCK, &err);
sk               1459 net/unix/af_unix.c 	tsk = skb->sk;
sk               1460 net/unix/af_unix.c 	skb_free_datagram(sk, skb);
sk               1461 net/unix/af_unix.c 	wake_up_interruptible(&unix_sk(sk)->peer_wait);
sk               1478 net/unix/af_unix.c 	struct sock *sk = sock->sk;
sk               1484 net/unix/af_unix.c 		sk = unix_peer_get(sk);
sk               1487 net/unix/af_unix.c 		if (!sk)
sk               1491 net/unix/af_unix.c 		sock_hold(sk);
sk               1494 net/unix/af_unix.c 	addr = smp_load_acquire(&unix_sk(sk)->addr);
sk               1503 net/unix/af_unix.c 	sock_put(sk);
sk               1584 net/unix/af_unix.c 	struct sock *sk = sock->sk;
sk               1585 net/unix/af_unix.c 	struct net *net = sock_net(sk);
sk               1586 net/unix/af_unix.c 	struct unix_sock *u = unix_sk(sk);
sk               1615 net/unix/af_unix.c 		other = unix_peer_get(sk);
sk               1625 net/unix/af_unix.c 	if (len > sk->sk_sndbuf - 32)
sk               1637 net/unix/af_unix.c 	skb = sock_alloc_send_pskb(sk, len - data_len, data_len,
sk               1654 net/unix/af_unix.c 	timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
sk               1662 net/unix/af_unix.c 		other = unix_find_other(net, sunaddr, namelen, sk->sk_type,
sk               1678 net/unix/af_unix.c 	if (!unix_may_send(sk, other))
sk               1690 net/unix/af_unix.c 			unix_state_lock(sk);
sk               1693 net/unix/af_unix.c 		if (unix_peer(sk) == other) {
sk               1694 net/unix/af_unix.c 			unix_peer(sk) = NULL;
sk               1695 net/unix/af_unix.c 			unix_dgram_peer_wake_disconnect_wakeup(sk, other);
sk               1697 net/unix/af_unix.c 			unix_state_unlock(sk);
sk               1699 net/unix/af_unix.c 			unix_dgram_disconnected(sk, other);
sk               1703 net/unix/af_unix.c 			unix_state_unlock(sk);
sk               1716 net/unix/af_unix.c 	if (sk->sk_type != SOCK_SEQPACKET) {
sk               1717 net/unix/af_unix.c 		err = security_unix_may_send(sk->sk_socket, other->sk_socket);
sk               1726 net/unix/af_unix.c 	if (other != sk &&
sk               1727 net/unix/af_unix.c 	    unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
sk               1740 net/unix/af_unix.c 			unix_state_double_lock(sk, other);
sk               1743 net/unix/af_unix.c 		if (unix_peer(sk) != other ||
sk               1744 net/unix/af_unix.c 		    unix_dgram_peer_wake_me(sk, other)) {
sk               1757 net/unix/af_unix.c 		unix_state_unlock(sk);
sk               1771 net/unix/af_unix.c 		unix_state_unlock(sk);
sk               1790 net/unix/af_unix.c 	struct sock *sk = sock->sk;
sk               1809 net/unix/af_unix.c 		err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
sk               1813 net/unix/af_unix.c 		other = unix_peer(sk);
sk               1818 net/unix/af_unix.c 	if (sk->sk_shutdown & SEND_SHUTDOWN)
sk               1825 net/unix/af_unix.c 		size = min_t(int, size, (sk->sk_sndbuf >> 1) - 64);
sk               1834 net/unix/af_unix.c 		skb = sock_alloc_send_pskb(sk, size - data_len, data_len,
sk               1893 net/unix/af_unix.c 	struct sock *other, *sk = socket->sk;
sk               1899 net/unix/af_unix.c 	other = unix_peer(sk);
sk               1900 net/unix/af_unix.c 	if (!other || sk->sk_state != TCP_ESTABLISHED)
sk               1907 net/unix/af_unix.c 		newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT,
sk               1922 net/unix/af_unix.c 	if (sk->sk_shutdown & SEND_SHUTDOWN) {
sk               1971 net/unix/af_unix.c 	refcount_add(size, &sk->sk_wmem_alloc);
sk               2006 net/unix/af_unix.c 	struct sock *sk = sock->sk;
sk               2008 net/unix/af_unix.c 	err = sock_error(sk);
sk               2012 net/unix/af_unix.c 	if (sk->sk_state != TCP_ESTABLISHED)
sk               2024 net/unix/af_unix.c 	struct sock *sk = sock->sk;
sk               2026 net/unix/af_unix.c 	if (sk->sk_state != TCP_ESTABLISHED)
sk               2032 net/unix/af_unix.c static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
sk               2034 net/unix/af_unix.c 	struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr);
sk               2046 net/unix/af_unix.c 	struct sock *sk = sock->sk;
sk               2047 net/unix/af_unix.c 	struct unix_sock *u = unix_sk(sk);
sk               2057 net/unix/af_unix.c 	timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
sk               2062 net/unix/af_unix.c 		skip = sk_peek_offset(sk, flags);
sk               2063 net/unix/af_unix.c 		skb = __skb_try_recv_datagram(sk, flags, NULL, &skip, &err,
sk               2073 net/unix/af_unix.c 		 !__skb_wait_for_more_packets(sk, &err, &timeo, last));
sk               2076 net/unix/af_unix.c 		unix_state_lock(sk);
sk               2078 net/unix/af_unix.c 		if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
sk               2079 net/unix/af_unix.c 		    (sk->sk_shutdown & RCV_SHUTDOWN))
sk               2081 net/unix/af_unix.c 		unix_state_unlock(sk);
sk               2091 net/unix/af_unix.c 		unix_copy_addr(msg, skb->sk);
sk               2102 net/unix/af_unix.c 	if (sock_flag(sk, SOCK_RCVTSTAMP))
sk               2103 net/unix/af_unix.c 		__sock_recv_timestamp(msg, sk, skb);
sk               2114 net/unix/af_unix.c 		sk_peek_offset_bwd(sk, skb->len);
sk               2129 net/unix/af_unix.c 		sk_peek_offset_fwd(sk, size);
sk               2139 net/unix/af_unix.c 	skb_free_datagram(sk, skb);
sk               2148 net/unix/af_unix.c static long unix_stream_data_wait(struct sock *sk, long timeo,
sk               2155 net/unix/af_unix.c 	unix_state_lock(sk);
sk               2158 net/unix/af_unix.c 		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
sk               2160 net/unix/af_unix.c 		tail = skb_peek_tail(&sk->sk_receive_queue);
sk               2163 net/unix/af_unix.c 		    sk->sk_err ||
sk               2164 net/unix/af_unix.c 		    (sk->sk_shutdown & RCV_SHUTDOWN) ||
sk               2169 net/unix/af_unix.c 		sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
sk               2170 net/unix/af_unix.c 		unix_state_unlock(sk);
sk               2175 net/unix/af_unix.c 		unix_state_lock(sk);
sk               2177 net/unix/af_unix.c 		if (sock_flag(sk, SOCK_DEAD))
sk               2180 net/unix/af_unix.c 		sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
sk               2183 net/unix/af_unix.c 	finish_wait(sk_sleep(sk), &wait);
sk               2184 net/unix/af_unix.c 	unix_state_unlock(sk);
sk               2209 net/unix/af_unix.c 	struct sock *sk = sock->sk;
sk               2210 net/unix/af_unix.c 	struct unix_sock *u = unix_sk(sk);
sk               2222 net/unix/af_unix.c 	if (unlikely(sk->sk_state != TCP_ESTABLISHED)) {
sk               2232 net/unix/af_unix.c 	target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
sk               2233 net/unix/af_unix.c 	timeo = sock_rcvtimeo(sk, noblock);
sk               2242 net/unix/af_unix.c 	skip = max(sk_peek_offset(sk, flags), 0);
sk               2250 net/unix/af_unix.c 		unix_state_lock(sk);
sk               2251 net/unix/af_unix.c 		if (sock_flag(sk, SOCK_DEAD)) {
sk               2255 net/unix/af_unix.c 		last = skb = skb_peek(&sk->sk_receive_queue);
sk               2266 net/unix/af_unix.c 			err = sock_error(sk);
sk               2269 net/unix/af_unix.c 			if (sk->sk_shutdown & RCV_SHUTDOWN)
sk               2272 net/unix/af_unix.c 			unix_state_unlock(sk);
sk               2280 net/unix/af_unix.c 			timeo = unix_stream_data_wait(sk, timeo, last,
sk               2292 net/unix/af_unix.c 			unix_state_unlock(sk);
sk               2300 net/unix/af_unix.c 			skb = skb_peek_next(skb, &sk->sk_receive_queue);
sk               2305 net/unix/af_unix.c 		unix_state_unlock(sk);
sk               2322 net/unix/af_unix.c 			unix_copy_addr(state->msg, skb->sk);
sk               2356 net/unix/af_unix.c 			sk_peek_offset_bwd(sk, chunk);
sk               2364 net/unix/af_unix.c 			skb_unlink(skb, &sk->sk_receive_queue);
sk               2375 net/unix/af_unix.c 			sk_peek_offset_fwd(sk, chunk);
sk               2383 net/unix/af_unix.c 			unix_state_lock(sk);
sk               2384 net/unix/af_unix.c 			skb = skb_peek_next(skb, &sk->sk_receive_queue);
sk               2387 net/unix/af_unix.c 			unix_state_unlock(sk);
sk               2430 net/unix/af_unix.c 	return skb_splice_bits(skb, state->socket->sk,
sk               2459 net/unix/af_unix.c 	struct sock *sk = sock->sk;
sk               2471 net/unix/af_unix.c 	unix_state_lock(sk);
sk               2472 net/unix/af_unix.c 	sk->sk_shutdown |= mode;
sk               2473 net/unix/af_unix.c 	other = unix_peer(sk);
sk               2476 net/unix/af_unix.c 	unix_state_unlock(sk);
sk               2477 net/unix/af_unix.c 	sk->sk_state_change(sk);
sk               2480 net/unix/af_unix.c 		(sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
sk               2503 net/unix/af_unix.c long unix_inq_len(struct sock *sk)
sk               2508 net/unix/af_unix.c 	if (sk->sk_state == TCP_LISTEN)
sk               2511 net/unix/af_unix.c 	spin_lock(&sk->sk_receive_queue.lock);
sk               2512 net/unix/af_unix.c 	if (sk->sk_type == SOCK_STREAM ||
sk               2513 net/unix/af_unix.c 	    sk->sk_type == SOCK_SEQPACKET) {
sk               2514 net/unix/af_unix.c 		skb_queue_walk(&sk->sk_receive_queue, skb)
sk               2517 net/unix/af_unix.c 		skb = skb_peek(&sk->sk_receive_queue);
sk               2521 net/unix/af_unix.c 	spin_unlock(&sk->sk_receive_queue.lock);
sk               2527 net/unix/af_unix.c long unix_outq_len(struct sock *sk)
sk               2529 net/unix/af_unix.c 	return sk_wmem_alloc_get(sk);
sk               2533 net/unix/af_unix.c static int unix_open_file(struct sock *sk)
sk               2539 net/unix/af_unix.c 	if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
sk               2542 net/unix/af_unix.c 	if (!smp_load_acquire(&unix_sk(sk)->addr))
sk               2545 net/unix/af_unix.c 	path = unix_sk(sk)->path;
sk               2571 net/unix/af_unix.c 	struct sock *sk = sock->sk;
sk               2577 net/unix/af_unix.c 		amount = unix_outq_len(sk);
sk               2581 net/unix/af_unix.c 		amount = unix_inq_len(sk);
sk               2588 net/unix/af_unix.c 		err = unix_open_file(sk);
sk               2606 net/unix/af_unix.c 	struct sock *sk = sock->sk;
sk               2613 net/unix/af_unix.c 	if (sk->sk_err)
sk               2615 net/unix/af_unix.c 	if (sk->sk_shutdown == SHUTDOWN_MASK)
sk               2617 net/unix/af_unix.c 	if (sk->sk_shutdown & RCV_SHUTDOWN)
sk               2621 net/unix/af_unix.c 	if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
sk               2625 net/unix/af_unix.c 	if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
sk               2626 net/unix/af_unix.c 	    sk->sk_state == TCP_CLOSE)
sk               2633 net/unix/af_unix.c 	if (unix_writable(sk))
sk               2642 net/unix/af_unix.c 	struct sock *sk = sock->sk, *other;
sk               2650 net/unix/af_unix.c 	if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
sk               2652 net/unix/af_unix.c 			(sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
sk               2654 net/unix/af_unix.c 	if (sk->sk_shutdown & RCV_SHUTDOWN)
sk               2656 net/unix/af_unix.c 	if (sk->sk_shutdown == SHUTDOWN_MASK)
sk               2660 net/unix/af_unix.c 	if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
sk               2664 net/unix/af_unix.c 	if (sk->sk_type == SOCK_SEQPACKET) {
sk               2665 net/unix/af_unix.c 		if (sk->sk_state == TCP_CLOSE)
sk               2668 net/unix/af_unix.c 		if (sk->sk_state == TCP_SYN_SENT)
sk               2676 net/unix/af_unix.c 	writable = unix_writable(sk);
sk               2678 net/unix/af_unix.c 		unix_state_lock(sk);
sk               2680 net/unix/af_unix.c 		other = unix_peer(sk);
sk               2681 net/unix/af_unix.c 		if (other && unix_peer(other) != sk &&
sk               2683 net/unix/af_unix.c 		    unix_dgram_peer_wake_me(sk, other))
sk               2686 net/unix/af_unix.c 		unix_state_unlock(sk);
sk               2692 net/unix/af_unix.c 		sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
sk               2709 net/unix/af_unix.c 	struct sock *sk;
sk               2712 net/unix/af_unix.c 	for (sk = sk_head(&unix_socket_table[bucket]); sk; sk = sk_next(sk)) {
sk               2713 net/unix/af_unix.c 		if (sock_net(sk) != seq_file_net(seq))
sk               2719 net/unix/af_unix.c 	return sk;
sk               2723 net/unix/af_unix.c 				     struct sock *sk,
sk               2728 net/unix/af_unix.c 	while (sk > (struct sock *)SEQ_START_TOKEN) {
sk               2729 net/unix/af_unix.c 		sk = sk_next(sk);
sk               2730 net/unix/af_unix.c 		if (!sk)
sk               2732 net/unix/af_unix.c 		if (sock_net(sk) == seq_file_net(seq))
sk               2733 net/unix/af_unix.c 			return sk;
sk               2737 net/unix/af_unix.c 		sk = unix_from_bucket(seq, pos);
sk               2738 net/unix/af_unix.c 		if (sk)
sk               2739 net/unix/af_unix.c 			return sk;
sk                 14 net/unix/diag.c static int sk_diag_dump_name(struct sock *sk, struct sk_buff *nlskb)
sk                 17 net/unix/diag.c 	struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr);
sk                 26 net/unix/diag.c static int sk_diag_dump_vfs(struct sock *sk, struct sk_buff *nlskb)
sk                 28 net/unix/diag.c 	struct dentry *dentry = unix_sk(sk)->path.dentry;
sk                 42 net/unix/diag.c static int sk_diag_dump_peer(struct sock *sk, struct sk_buff *nlskb)
sk                 47 net/unix/diag.c 	peer = unix_peer_get(sk);
sk                 60 net/unix/diag.c static int sk_diag_dump_icons(struct sock *sk, struct sk_buff *nlskb)
sk                 67 net/unix/diag.c 	if (sk->sk_state == TCP_LISTEN) {
sk                 68 net/unix/diag.c 		spin_lock(&sk->sk_receive_queue.lock);
sk                 71 net/unix/diag.c 				   sk->sk_receive_queue.qlen * sizeof(u32));
sk                 77 net/unix/diag.c 		skb_queue_walk(&sk->sk_receive_queue, skb) {
sk                 80 net/unix/diag.c 			req = skb->sk;
sk                 91 net/unix/diag.c 		spin_unlock(&sk->sk_receive_queue.lock);
sk                 97 net/unix/diag.c 	spin_unlock(&sk->sk_receive_queue.lock);
sk                101 net/unix/diag.c static int sk_diag_show_rqlen(struct sock *sk, struct sk_buff *nlskb)
sk                105 net/unix/diag.c 	if (sk->sk_state == TCP_LISTEN) {
sk                106 net/unix/diag.c 		rql.udiag_rqueue = sk->sk_receive_queue.qlen;
sk                107 net/unix/diag.c 		rql.udiag_wqueue = sk->sk_max_ack_backlog;
sk                109 net/unix/diag.c 		rql.udiag_rqueue = (u32) unix_inq_len(sk);
sk                110 net/unix/diag.c 		rql.udiag_wqueue = (u32) unix_outq_len(sk);
sk                116 net/unix/diag.c static int sk_diag_dump_uid(struct sock *sk, struct sk_buff *nlskb)
sk                118 net/unix/diag.c 	uid_t uid = from_kuid_munged(sk_user_ns(nlskb->sk), sock_i_uid(sk));
sk                122 net/unix/diag.c static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req,
sk                135 net/unix/diag.c 	rep->udiag_type = sk->sk_type;
sk                136 net/unix/diag.c 	rep->udiag_state = sk->sk_state;
sk                139 net/unix/diag.c 	sock_diag_save_cookie(sk, rep->udiag_cookie);
sk                142 net/unix/diag.c 	    sk_diag_dump_name(sk, skb))
sk                146 net/unix/diag.c 	    sk_diag_dump_vfs(sk, skb))
sk                150 net/unix/diag.c 	    sk_diag_dump_peer(sk, skb))
sk                154 net/unix/diag.c 	    sk_diag_dump_icons(sk, skb))
sk                158 net/unix/diag.c 	    sk_diag_show_rqlen(sk, skb))
sk                162 net/unix/diag.c 	    sock_diag_put_meminfo(sk, skb, UNIX_DIAG_MEMINFO))
sk                165 net/unix/diag.c 	if (nla_put_u8(skb, UNIX_DIAG_SHUTDOWN, sk->sk_shutdown))
sk                169 net/unix/diag.c 	    sk_diag_dump_uid(sk, skb))
sk                180 net/unix/diag.c static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req,
sk                185 net/unix/diag.c 	unix_state_lock(sk);
sk                186 net/unix/diag.c 	sk_ino = sock_i_ino(sk);
sk                187 net/unix/diag.c 	unix_state_unlock(sk);
sk                192 net/unix/diag.c 	return sk_diag_fill(sk, skb, req, portid, seq, flags, sk_ino);
sk                199 net/unix/diag.c 	struct net *net = sock_net(skb->sk);
sk                210 net/unix/diag.c 		struct sock *sk;
sk                213 net/unix/diag.c 		sk_for_each(sk, &unix_socket_table[slot]) {
sk                214 net/unix/diag.c 			if (!net_eq(sock_net(sk), net))
sk                218 net/unix/diag.c 			if (!(req->udiag_states & (1 << sk->sk_state)))
sk                220 net/unix/diag.c 			if (sk_diag_dump(sk, skb, req,
sk                240 net/unix/diag.c 	struct sock *sk;
sk                244 net/unix/diag.c 		sk_for_each(sk, &unix_socket_table[i])
sk                245 net/unix/diag.c 			if (ino == sock_i_ino(sk)) {
sk                246 net/unix/diag.c 				sock_hold(sk);
sk                249 net/unix/diag.c 				return sk;
sk                262 net/unix/diag.c 	struct sock *sk;
sk                265 net/unix/diag.c 	struct net *net = sock_net(in_skb->sk);
sk                270 net/unix/diag.c 	sk = unix_lookup_by_ino(req->udiag_ino);
sk                272 net/unix/diag.c 	if (sk == NULL)
sk                274 net/unix/diag.c 	if (!net_eq(sock_net(sk), net))
sk                277 net/unix/diag.c 	err = sock_diag_check_cookie(sk, req->udiag_cookie);
sk                288 net/unix/diag.c 	err = sk_diag_fill(sk, rep, req, NETLINK_CB(in_skb).portid,
sk                303 net/unix/diag.c 	if (sk)
sk                304 net/unix/diag.c 		sock_put(sk);
sk                312 net/unix/diag.c 	struct net *net = sock_net(skb->sk);
sk                108 net/unix/garbage.c 				struct sock *sk = unix_get_socket(*fp++);
sk                110 net/unix/garbage.c 				if (sk) {
sk                111 net/unix/garbage.c 					struct unix_sock *u = unix_sk(sk);
sk                149 net/unix/garbage.c 			u = unix_sk(skb->sk);
sk                161 net/unix/garbage.c 			scan_inflight(&u->sk, func, hitlist);
sk                236 net/unix/garbage.c 		total_refs = file_count(u->sk.sk_socket->file);
sk                252 net/unix/garbage.c 		scan_children(&u->sk, dec_inflight, NULL);
sk                271 net/unix/garbage.c 			scan_children(&u->sk, inc_inflight_move_tail, NULL);
sk                282 net/unix/garbage.c 		scan_children(&u->sk, inc_inflight, &hitlist);
sk                 31 net/unix/scm.c 		struct sock *s = sock->sk;
sk                113 net/vmw_vsock/af_vsock.c static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr);
sk                114 net/vmw_vsock/af_vsock.c static void vsock_sk_destruct(struct sock *sk);
sk                115 net/vmw_vsock/af_vsock.c static int vsock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
sk                182 net/vmw_vsock/af_vsock.c 	struct sock *sk = sk_vsock(vsk);
sk                188 net/vmw_vsock/af_vsock.c 	return __vsock_bind(sk, &local_addr);
sk                206 net/vmw_vsock/af_vsock.c 	sock_hold(&vsk->sk);
sk                213 net/vmw_vsock/af_vsock.c 	sock_hold(&vsk->sk);
sk                220 net/vmw_vsock/af_vsock.c 	sock_put(&vsk->sk);
sk                226 net/vmw_vsock/af_vsock.c 	sock_put(&vsk->sk);
sk                294 net/vmw_vsock/af_vsock.c 	struct sock *sk;
sk                297 net/vmw_vsock/af_vsock.c 	sk = __vsock_find_bound_socket(addr);
sk                298 net/vmw_vsock/af_vsock.c 	if (sk)
sk                299 net/vmw_vsock/af_vsock.c 		sock_hold(sk);
sk                303 net/vmw_vsock/af_vsock.c 	return sk;
sk                310 net/vmw_vsock/af_vsock.c 	struct sock *sk;
sk                313 net/vmw_vsock/af_vsock.c 	sk = __vsock_find_connected_socket(src, dst);
sk                314 net/vmw_vsock/af_vsock.c 	if (sk)
sk                315 net/vmw_vsock/af_vsock.c 		sock_hold(sk);
sk                319 net/vmw_vsock/af_vsock.c 	return sk;
sk                330 net/vmw_vsock/af_vsock.c void vsock_for_each_connected_socket(void (*fn)(struct sock *sk))
sk                407 net/vmw_vsock/af_vsock.c static bool vsock_is_accept_queue_empty(struct sock *sk)
sk                409 net/vmw_vsock/af_vsock.c 	struct vsock_sock *vsk = vsock_sk(sk);
sk                413 net/vmw_vsock/af_vsock.c static bool vsock_is_pending(struct sock *sk)
sk                415 net/vmw_vsock/af_vsock.c 	struct vsock_sock *vsk = vsock_sk(sk);
sk                419 net/vmw_vsock/af_vsock.c static int vsock_send_shutdown(struct sock *sk, int mode)
sk                421 net/vmw_vsock/af_vsock.c 	return transport->shutdown(vsock_sk(sk), mode);
sk                426 net/vmw_vsock/af_vsock.c 	struct sock *sk;
sk                432 net/vmw_vsock/af_vsock.c 	sk = sk_vsock(vsk);
sk                437 net/vmw_vsock/af_vsock.c 	lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
sk                439 net/vmw_vsock/af_vsock.c 	if (vsock_is_pending(sk)) {
sk                440 net/vmw_vsock/af_vsock.c 		vsock_remove_pending(listener, sk);
sk                459 net/vmw_vsock/af_vsock.c 	sk->sk_state = TCP_CLOSE;
sk                462 net/vmw_vsock/af_vsock.c 	release_sock(sk);
sk                465 net/vmw_vsock/af_vsock.c 		sock_put(sk);
sk                467 net/vmw_vsock/af_vsock.c 	sock_put(sk);
sk                534 net/vmw_vsock/af_vsock.c static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr)
sk                536 net/vmw_vsock/af_vsock.c 	struct vsock_sock *vsk = vsock_sk(sk);
sk                553 net/vmw_vsock/af_vsock.c 	switch (sk->sk_socket->type) {
sk                581 net/vmw_vsock/af_vsock.c 	struct sock *sk;
sk                585 net/vmw_vsock/af_vsock.c 	sk = sk_alloc(net, AF_VSOCK, priority, &vsock_proto, kern);
sk                586 net/vmw_vsock/af_vsock.c 	if (!sk)
sk                589 net/vmw_vsock/af_vsock.c 	sock_init_data(sock, sk);
sk                596 net/vmw_vsock/af_vsock.c 		sk->sk_type = type;
sk                598 net/vmw_vsock/af_vsock.c 	vsk = vsock_sk(sk);
sk                602 net/vmw_vsock/af_vsock.c 	sk->sk_destruct = vsock_sk_destruct;
sk                603 net/vmw_vsock/af_vsock.c 	sk->sk_backlog_rcv = vsock_queue_rcv_skb;
sk                604 net/vmw_vsock/af_vsock.c 	sock_reset_flag(sk, SOCK_DONE);
sk                630 net/vmw_vsock/af_vsock.c 		sk_free(sk);
sk                637 net/vmw_vsock/af_vsock.c 	return sk;
sk                641 net/vmw_vsock/af_vsock.c static void __vsock_release(struct sock *sk, int level)
sk                643 net/vmw_vsock/af_vsock.c 	if (sk) {
sk                648 net/vmw_vsock/af_vsock.c 		vsk = vsock_sk(sk);
sk                661 net/vmw_vsock/af_vsock.c 		lock_sock_nested(sk, level);
sk                662 net/vmw_vsock/af_vsock.c 		sock_orphan(sk);
sk                663 net/vmw_vsock/af_vsock.c 		sk->sk_shutdown = SHUTDOWN_MASK;
sk                665 net/vmw_vsock/af_vsock.c 		while ((skb = skb_dequeue(&sk->sk_receive_queue)))
sk                669 net/vmw_vsock/af_vsock.c 		while ((pending = vsock_dequeue_accept(sk)) != NULL) {
sk                674 net/vmw_vsock/af_vsock.c 		release_sock(sk);
sk                675 net/vmw_vsock/af_vsock.c 		sock_put(sk);
sk                679 net/vmw_vsock/af_vsock.c static void vsock_sk_destruct(struct sock *sk)
sk                681 net/vmw_vsock/af_vsock.c 	struct vsock_sock *vsk = vsock_sk(sk);
sk                694 net/vmw_vsock/af_vsock.c static int vsock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
sk                698 net/vmw_vsock/af_vsock.c 	err = sock_queue_rcv_skb(sk, skb);
sk                719 net/vmw_vsock/af_vsock.c 	__vsock_release(sock->sk, 0);
sk                720 net/vmw_vsock/af_vsock.c 	sock->sk = NULL;
sk                730 net/vmw_vsock/af_vsock.c 	struct sock *sk;
sk                733 net/vmw_vsock/af_vsock.c 	sk = sock->sk;
sk                738 net/vmw_vsock/af_vsock.c 	lock_sock(sk);
sk                739 net/vmw_vsock/af_vsock.c 	err = __vsock_bind(sk, vm_addr);
sk                740 net/vmw_vsock/af_vsock.c 	release_sock(sk);
sk                749 net/vmw_vsock/af_vsock.c 	struct sock *sk;
sk                753 net/vmw_vsock/af_vsock.c 	sk = sock->sk;
sk                754 net/vmw_vsock/af_vsock.c 	vsk = vsock_sk(sk);
sk                757 net/vmw_vsock/af_vsock.c 	lock_sock(sk);
sk                784 net/vmw_vsock/af_vsock.c 	release_sock(sk);
sk                791 net/vmw_vsock/af_vsock.c 	struct sock *sk;
sk                810 net/vmw_vsock/af_vsock.c 	sk = sock->sk;
sk                813 net/vmw_vsock/af_vsock.c 		if (sk->sk_type == SOCK_STREAM)
sk                823 net/vmw_vsock/af_vsock.c 		lock_sock(sk);
sk                824 net/vmw_vsock/af_vsock.c 		sk->sk_shutdown |= mode;
sk                825 net/vmw_vsock/af_vsock.c 		sk->sk_state_change(sk);
sk                826 net/vmw_vsock/af_vsock.c 		release_sock(sk);
sk                828 net/vmw_vsock/af_vsock.c 		if (sk->sk_type == SOCK_STREAM) {
sk                829 net/vmw_vsock/af_vsock.c 			sock_reset_flag(sk, SOCK_DONE);
sk                830 net/vmw_vsock/af_vsock.c 			vsock_send_shutdown(sk, mode);
sk                840 net/vmw_vsock/af_vsock.c 	struct sock *sk;
sk                844 net/vmw_vsock/af_vsock.c 	sk = sock->sk;
sk                845 net/vmw_vsock/af_vsock.c 	vsk = vsock_sk(sk);
sk                847 net/vmw_vsock/af_vsock.c 	poll_wait(file, sk_sleep(sk), wait);
sk                850 net/vmw_vsock/af_vsock.c 	if (sk->sk_err)
sk                857 net/vmw_vsock/af_vsock.c 	if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
sk                858 net/vmw_vsock/af_vsock.c 	    ((sk->sk_shutdown & SEND_SHUTDOWN) &&
sk                863 net/vmw_vsock/af_vsock.c 	if (sk->sk_shutdown & RCV_SHUTDOWN ||
sk                873 net/vmw_vsock/af_vsock.c 		if (!skb_queue_empty_lockless(&sk->sk_receive_queue) ||
sk                874 net/vmw_vsock/af_vsock.c 		    (sk->sk_shutdown & RCV_SHUTDOWN)) {
sk                878 net/vmw_vsock/af_vsock.c 		if (!(sk->sk_shutdown & SEND_SHUTDOWN))
sk                882 net/vmw_vsock/af_vsock.c 		lock_sock(sk);
sk                887 net/vmw_vsock/af_vsock.c 		if (sk->sk_state == TCP_LISTEN
sk                888 net/vmw_vsock/af_vsock.c 		    && !vsock_is_accept_queue_empty(sk))
sk                893 net/vmw_vsock/af_vsock.c 		    !(sk->sk_shutdown & RCV_SHUTDOWN)) {
sk                910 net/vmw_vsock/af_vsock.c 		if (sk->sk_shutdown & RCV_SHUTDOWN ||
sk                916 net/vmw_vsock/af_vsock.c 		if (sk->sk_state == TCP_ESTABLISHED) {
sk                917 net/vmw_vsock/af_vsock.c 			if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
sk                938 net/vmw_vsock/af_vsock.c 		if (sk->sk_state == TCP_CLOSE || sk->sk_state == TCP_CLOSING) {
sk                939 net/vmw_vsock/af_vsock.c 			if (!(sk->sk_shutdown & SEND_SHUTDOWN))
sk                944 net/vmw_vsock/af_vsock.c 		release_sock(sk);
sk                954 net/vmw_vsock/af_vsock.c 	struct sock *sk;
sk                963 net/vmw_vsock/af_vsock.c 	sk = sock->sk;
sk                964 net/vmw_vsock/af_vsock.c 	vsk = vsock_sk(sk);
sk                966 net/vmw_vsock/af_vsock.c 	lock_sock(sk);
sk               1017 net/vmw_vsock/af_vsock.c 	release_sock(sk);
sk               1025 net/vmw_vsock/af_vsock.c 	struct sock *sk;
sk               1029 net/vmw_vsock/af_vsock.c 	sk = sock->sk;
sk               1030 net/vmw_vsock/af_vsock.c 	vsk = vsock_sk(sk);
sk               1034 net/vmw_vsock/af_vsock.c 		lock_sock(sk);
sk               1038 net/vmw_vsock/af_vsock.c 		release_sock(sk);
sk               1043 net/vmw_vsock/af_vsock.c 	lock_sock(sk);
sk               1059 net/vmw_vsock/af_vsock.c 	release_sock(sk);
sk               1066 net/vmw_vsock/af_vsock.c 	return transport->dgram_dequeue(vsock_sk(sock->sk), msg, len, flags);
sk               1100 net/vmw_vsock/af_vsock.c 	struct sock *sk;
sk               1105 net/vmw_vsock/af_vsock.c 	sk = sk_vsock(vsk);
sk               1107 net/vmw_vsock/af_vsock.c 	lock_sock(sk);
sk               1108 net/vmw_vsock/af_vsock.c 	if (sk->sk_state == TCP_SYN_SENT &&
sk               1109 net/vmw_vsock/af_vsock.c 	    (sk->sk_shutdown != SHUTDOWN_MASK)) {
sk               1110 net/vmw_vsock/af_vsock.c 		sk->sk_state = TCP_CLOSE;
sk               1111 net/vmw_vsock/af_vsock.c 		sk->sk_err = ETIMEDOUT;
sk               1112 net/vmw_vsock/af_vsock.c 		sk->sk_error_report(sk);
sk               1115 net/vmw_vsock/af_vsock.c 	release_sock(sk);
sk               1119 net/vmw_vsock/af_vsock.c 	sock_put(sk);
sk               1126 net/vmw_vsock/af_vsock.c 	struct sock *sk;
sk               1133 net/vmw_vsock/af_vsock.c 	sk = sock->sk;
sk               1134 net/vmw_vsock/af_vsock.c 	vsk = vsock_sk(sk);
sk               1136 net/vmw_vsock/af_vsock.c 	lock_sock(sk);
sk               1156 net/vmw_vsock/af_vsock.c 		if ((sk->sk_state == TCP_LISTEN) ||
sk               1179 net/vmw_vsock/af_vsock.c 		sk->sk_state = TCP_SYN_SENT;
sk               1197 net/vmw_vsock/af_vsock.c 	prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
sk               1199 net/vmw_vsock/af_vsock.c 	while (sk->sk_state != TCP_ESTABLISHED && sk->sk_err == 0) {
sk               1207 net/vmw_vsock/af_vsock.c 			sock_hold(sk);
sk               1214 net/vmw_vsock/af_vsock.c 		release_sock(sk);
sk               1216 net/vmw_vsock/af_vsock.c 		lock_sock(sk);
sk               1220 net/vmw_vsock/af_vsock.c 			sk->sk_state = TCP_CLOSE;
sk               1226 net/vmw_vsock/af_vsock.c 			sk->sk_state = TCP_CLOSE;
sk               1232 net/vmw_vsock/af_vsock.c 		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
sk               1235 net/vmw_vsock/af_vsock.c 	if (sk->sk_err) {
sk               1236 net/vmw_vsock/af_vsock.c 		err = -sk->sk_err;
sk               1237 net/vmw_vsock/af_vsock.c 		sk->sk_state = TCP_CLOSE;
sk               1244 net/vmw_vsock/af_vsock.c 	finish_wait(sk_sleep(sk), &wait);
sk               1246 net/vmw_vsock/af_vsock.c 	release_sock(sk);
sk               1261 net/vmw_vsock/af_vsock.c 	listener = sock->sk;
sk               1335 net/vmw_vsock/af_vsock.c 	struct sock *sk;
sk               1338 net/vmw_vsock/af_vsock.c 	sk = sock->sk;
sk               1340 net/vmw_vsock/af_vsock.c 	lock_sock(sk);
sk               1352 net/vmw_vsock/af_vsock.c 	vsk = vsock_sk(sk);
sk               1359 net/vmw_vsock/af_vsock.c 	sk->sk_max_ack_backlog = backlog;
sk               1360 net/vmw_vsock/af_vsock.c 	sk->sk_state = TCP_LISTEN;
sk               1365 net/vmw_vsock/af_vsock.c 	release_sock(sk);
sk               1376 net/vmw_vsock/af_vsock.c 	struct sock *sk;
sk               1396 net/vmw_vsock/af_vsock.c 	sk = sock->sk;
sk               1397 net/vmw_vsock/af_vsock.c 	vsk = vsock_sk(sk);
sk               1399 net/vmw_vsock/af_vsock.c 	lock_sock(sk);
sk               1442 net/vmw_vsock/af_vsock.c 	release_sock(sk);
sk               1453 net/vmw_vsock/af_vsock.c 	struct sock *sk;
sk               1476 net/vmw_vsock/af_vsock.c 	sk = sock->sk;
sk               1477 net/vmw_vsock/af_vsock.c 	vsk = vsock_sk(sk);
sk               1520 net/vmw_vsock/af_vsock.c 	struct sock *sk;
sk               1528 net/vmw_vsock/af_vsock.c 	sk = sock->sk;
sk               1529 net/vmw_vsock/af_vsock.c 	vsk = vsock_sk(sk);
sk               1536 net/vmw_vsock/af_vsock.c 	lock_sock(sk);
sk               1540 net/vmw_vsock/af_vsock.c 		err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
sk               1545 net/vmw_vsock/af_vsock.c 	if (sk->sk_shutdown & SEND_SHUTDOWN ||
sk               1551 net/vmw_vsock/af_vsock.c 	if (sk->sk_state != TCP_ESTABLISHED ||
sk               1563 net/vmw_vsock/af_vsock.c 	timeout = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
sk               1572 net/vmw_vsock/af_vsock.c 		add_wait_queue(sk_sleep(sk), &wait);
sk               1574 net/vmw_vsock/af_vsock.c 		       sk->sk_err == 0 &&
sk               1575 net/vmw_vsock/af_vsock.c 		       !(sk->sk_shutdown & SEND_SHUTDOWN) &&
sk               1581 net/vmw_vsock/af_vsock.c 				remove_wait_queue(sk_sleep(sk), &wait);
sk               1587 net/vmw_vsock/af_vsock.c 				remove_wait_queue(sk_sleep(sk), &wait);
sk               1591 net/vmw_vsock/af_vsock.c 			release_sock(sk);
sk               1593 net/vmw_vsock/af_vsock.c 			lock_sock(sk);
sk               1596 net/vmw_vsock/af_vsock.c 				remove_wait_queue(sk_sleep(sk), &wait);
sk               1600 net/vmw_vsock/af_vsock.c 				remove_wait_queue(sk_sleep(sk), &wait);
sk               1604 net/vmw_vsock/af_vsock.c 		remove_wait_queue(sk_sleep(sk), &wait);
sk               1610 net/vmw_vsock/af_vsock.c 		if (sk->sk_err) {
sk               1611 net/vmw_vsock/af_vsock.c 			err = -sk->sk_err;
sk               1613 net/vmw_vsock/af_vsock.c 		} else if ((sk->sk_shutdown & SEND_SHUTDOWN) ||
sk               1650 net/vmw_vsock/af_vsock.c 	release_sock(sk);
sk               1659 net/vmw_vsock/af_vsock.c 	struct sock *sk;
sk               1669 net/vmw_vsock/af_vsock.c 	sk = sock->sk;
sk               1670 net/vmw_vsock/af_vsock.c 	vsk = vsock_sk(sk);
sk               1673 net/vmw_vsock/af_vsock.c 	lock_sock(sk);
sk               1675 net/vmw_vsock/af_vsock.c 	if (sk->sk_state != TCP_ESTABLISHED) {
sk               1681 net/vmw_vsock/af_vsock.c 		if (sock_flag(sk, SOCK_DONE))
sk               1698 net/vmw_vsock/af_vsock.c 	if (sk->sk_shutdown & RCV_SHUTDOWN) {
sk               1717 net/vmw_vsock/af_vsock.c 	target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
sk               1722 net/vmw_vsock/af_vsock.c 	timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
sk               1733 net/vmw_vsock/af_vsock.c 		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
sk               1737 net/vmw_vsock/af_vsock.c 			if (sk->sk_err != 0 ||
sk               1738 net/vmw_vsock/af_vsock.c 			    (sk->sk_shutdown & RCV_SHUTDOWN) ||
sk               1740 net/vmw_vsock/af_vsock.c 				finish_wait(sk_sleep(sk), &wait);
sk               1746 net/vmw_vsock/af_vsock.c 				finish_wait(sk_sleep(sk), &wait);
sk               1753 net/vmw_vsock/af_vsock.c 				finish_wait(sk_sleep(sk), &wait);
sk               1756 net/vmw_vsock/af_vsock.c 			release_sock(sk);
sk               1758 net/vmw_vsock/af_vsock.c 			lock_sock(sk);
sk               1762 net/vmw_vsock/af_vsock.c 				finish_wait(sk_sleep(sk), &wait);
sk               1766 net/vmw_vsock/af_vsock.c 				finish_wait(sk_sleep(sk), &wait);
sk               1772 net/vmw_vsock/af_vsock.c 			finish_wait(sk_sleep(sk), &wait);
sk               1812 net/vmw_vsock/af_vsock.c 	if (sk->sk_err)
sk               1813 net/vmw_vsock/af_vsock.c 		err = -sk->sk_err;
sk               1814 net/vmw_vsock/af_vsock.c 	else if (sk->sk_shutdown & RCV_SHUTDOWN)
sk               1821 net/vmw_vsock/af_vsock.c 	release_sock(sk);
sk                 14 net/vmw_vsock/diag.c static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
sk                 17 net/vmw_vsock/diag.c 	struct vsock_sock *vsk = vsock_sk(sk);
sk                 34 net/vmw_vsock/diag.c 	rep->vdiag_type = sk->sk_type;
sk                 35 net/vmw_vsock/diag.c 	rep->vdiag_state = sk->sk_state;
sk                 36 net/vmw_vsock/diag.c 	rep->vdiag_shutdown = sk->sk_shutdown;
sk                 41 net/vmw_vsock/diag.c 	rep->vdiag_ino = sock_i_ino(sk);
sk                 43 net/vmw_vsock/diag.c 	sock_diag_save_cookie(sk, rep->vdiag_cookie);
sk                 59 net/vmw_vsock/diag.c 	net = sock_net(skb->sk);
sk                 77 net/vmw_vsock/diag.c 				struct sock *sk = sk_vsock(vsk);
sk                 79 net/vmw_vsock/diag.c 				if (!net_eq(sock_net(sk), net))
sk                 83 net/vmw_vsock/diag.c 				if (!(req->vdiag_states & (1 << sk->sk_state)))
sk                 85 net/vmw_vsock/diag.c 				if (sk_diag_fill(sk, skb,
sk                107 net/vmw_vsock/diag.c 			struct sock *sk = sk_vsock(vsk);
sk                113 net/vmw_vsock/diag.c 			if (!net_eq(sock_net(sk), net))
sk                117 net/vmw_vsock/diag.c 			if (!(req->vdiag_states & (1 << sk->sk_state)))
sk                119 net/vmw_vsock/diag.c 			if (sk_diag_fill(sk, skb,
sk                144 net/vmw_vsock/diag.c 	struct net *net = sock_net(skb->sk);
sk                236 net/vmw_vsock/hyperv_transport.c 	struct sock *sk = (struct sock *)ctx;
sk                237 net/vmw_vsock/hyperv_transport.c 	struct vsock_sock *vsk = vsock_sk(sk);
sk                242 net/vmw_vsock/hyperv_transport.c 		sk->sk_data_ready(sk);
sk                245 net/vmw_vsock/hyperv_transport.c 		sk->sk_write_space(sk);
sk                251 net/vmw_vsock/hyperv_transport.c 	struct sock *sk = sk_vsock(vsk);
sk                253 net/vmw_vsock/hyperv_transport.c 	sock_set_flag(sk, SOCK_DONE);
sk                256 net/vmw_vsock/hyperv_transport.c 		sk->sk_state = TCP_CLOSING;
sk                257 net/vmw_vsock/hyperv_transport.c 	sk->sk_state_change(sk);
sk                264 net/vmw_vsock/hyperv_transport.c 		sock_put(sk);
sk                270 net/vmw_vsock/hyperv_transport.c 	struct sock *sk = get_per_channel_state(chan);
sk                272 net/vmw_vsock/hyperv_transport.c 	lock_sock(sk);
sk                273 net/vmw_vsock/hyperv_transport.c 	hvs_do_close_lock_held(vsock_sk(sk), true);
sk                274 net/vmw_vsock/hyperv_transport.c 	release_sock(sk);
sk                279 net/vmw_vsock/hyperv_transport.c 	sock_put(sk);
sk                288 net/vmw_vsock/hyperv_transport.c 	struct sock *sk, *new = NULL;
sk                303 net/vmw_vsock/hyperv_transport.c 	sk = vsock_find_bound_socket(&addr);
sk                304 net/vmw_vsock/hyperv_transport.c 	if (!sk)
sk                307 net/vmw_vsock/hyperv_transport.c 	lock_sock(sk);
sk                308 net/vmw_vsock/hyperv_transport.c 	if ((conn_from_host && sk->sk_state != TCP_LISTEN) ||
sk                309 net/vmw_vsock/hyperv_transport.c 	    (!conn_from_host && sk->sk_state != TCP_SYN_SENT))
sk                313 net/vmw_vsock/hyperv_transport.c 		if (sk->sk_ack_backlog >= sk->sk_max_ack_backlog)
sk                316 net/vmw_vsock/hyperv_transport.c 		new = __vsock_create(sock_net(sk), NULL, sk, GFP_KERNEL,
sk                317 net/vmw_vsock/hyperv_transport.c 				     sk->sk_type, 0);
sk                333 net/vmw_vsock/hyperv_transport.c 		hvs = vsock_sk(sk)->trans;
sk                356 net/vmw_vsock/hyperv_transport.c 		sndbuf = max_t(int, sk->sk_sndbuf, RINGBUFFER_HVS_SND_SIZE);
sk                359 net/vmw_vsock/hyperv_transport.c 		rcvbuf = max_t(int, sk->sk_rcvbuf, RINGBUFFER_HVS_RCV_SIZE);
sk                365 net/vmw_vsock/hyperv_transport.c 			 conn_from_host ? new : sk);
sk                376 net/vmw_vsock/hyperv_transport.c 	set_per_channel_state(chan, conn_from_host ? new : sk);
sk                379 net/vmw_vsock/hyperv_transport.c 	sock_hold(conn_from_host ? new : sk);
sk                391 net/vmw_vsock/hyperv_transport.c 		sk->sk_ack_backlog++;
sk                399 net/vmw_vsock/hyperv_transport.c 		vsock_enqueue_accept(sk, new);
sk                401 net/vmw_vsock/hyperv_transport.c 		sk->sk_state = TCP_ESTABLISHED;
sk                402 net/vmw_vsock/hyperv_transport.c 		sk->sk_socket->state = SS_CONNECTED;
sk                404 net/vmw_vsock/hyperv_transport.c 		vsock_insert_connected(vsock_sk(sk));
sk                407 net/vmw_vsock/hyperv_transport.c 	sk->sk_state_change(sk);
sk                411 net/vmw_vsock/hyperv_transport.c 	sock_put(sk);
sk                413 net/vmw_vsock/hyperv_transport.c 	release_sock(sk);
sk                424 net/vmw_vsock/hyperv_transport.c 	struct sock *sk = sk_vsock(vsk);
sk                432 net/vmw_vsock/hyperv_transport.c 	sk->sk_sndbuf = RINGBUFFER_HVS_SND_SIZE;
sk                433 net/vmw_vsock/hyperv_transport.c 	sk->sk_rcvbuf = RINGBUFFER_HVS_RCV_SIZE;
sk                467 net/vmw_vsock/hyperv_transport.c 	struct sock *sk = sk_vsock(vsk);
sk                472 net/vmw_vsock/hyperv_transport.c 	lock_sock(sk);
sk                474 net/vmw_vsock/hyperv_transport.c 	release_sock(sk);
sk                482 net/vmw_vsock/hyperv_transport.c 	struct sock *sk = sk_vsock(vsk);
sk                484 net/vmw_vsock/hyperv_transport.c 	sock_hold(sk);
sk                485 net/vmw_vsock/hyperv_transport.c 	lock_sock(sk);
sk                486 net/vmw_vsock/hyperv_transport.c 	if (!sock_flag(sk, SOCK_DONE))
sk                490 net/vmw_vsock/hyperv_transport.c 	release_sock(sk);
sk                491 net/vmw_vsock/hyperv_transport.c 	sock_put(sk);
sk                497 net/vmw_vsock/hyperv_transport.c 	struct sock *sk = sk_vsock(vsk);
sk                499 net/vmw_vsock/hyperv_transport.c 	if (!(sk->sk_state == TCP_ESTABLISHED ||
sk                500 net/vmw_vsock/hyperv_transport.c 	      sk->sk_state == TCP_CLOSING))
sk                503 net/vmw_vsock/hyperv_transport.c 	if ((sk->sk_shutdown & SHUTDOWN_MASK) != SHUTDOWN_MASK)
sk                506 net/vmw_vsock/hyperv_transport.c 	if (sock_flag(sk, SOCK_DONE))
sk                510 net/vmw_vsock/hyperv_transport.c 	sock_hold(sk);
sk                519 net/vmw_vsock/hyperv_transport.c 	struct sock *sk = sk_vsock(vsk);
sk                522 net/vmw_vsock/hyperv_transport.c 	lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
sk                524 net/vmw_vsock/hyperv_transport.c 	release_sock(sk);
sk                454 net/vmw_vsock/virtio_transport.c static void virtio_vsock_reset_sock(struct sock *sk)
sk                456 net/vmw_vsock/virtio_transport.c 	lock_sock(sk);
sk                457 net/vmw_vsock/virtio_transport.c 	sk->sk_state = TCP_CLOSE;
sk                458 net/vmw_vsock/virtio_transport.c 	sk->sk_err = ECONNRESET;
sk                459 net/vmw_vsock/virtio_transport.c 	sk->sk_error_report(sk);
sk                460 net/vmw_vsock/virtio_transport.c 	release_sock(sk);
sk                730 net/vmw_vsock/virtio_transport_common.c static void virtio_transport_wait_close(struct sock *sk, long timeout)
sk                735 net/vmw_vsock/virtio_transport_common.c 		add_wait_queue(sk_sleep(sk), &wait);
sk                738 net/vmw_vsock/virtio_transport_common.c 			if (sk_wait_event(sk, &timeout,
sk                739 net/vmw_vsock/virtio_transport_common.c 					  sock_flag(sk, SOCK_DONE), &wait))
sk                743 net/vmw_vsock/virtio_transport_common.c 		remove_wait_queue(sk_sleep(sk), &wait);
sk                750 net/vmw_vsock/virtio_transport_common.c 	struct sock *sk = sk_vsock(vsk);
sk                752 net/vmw_vsock/virtio_transport_common.c 	sock_set_flag(sk, SOCK_DONE);
sk                755 net/vmw_vsock/virtio_transport_common.c 		sk->sk_state = TCP_CLOSING;
sk                756 net/vmw_vsock/virtio_transport_common.c 	sk->sk_state_change(sk);
sk                765 net/vmw_vsock/virtio_transport_common.c 		sock_put(sk);
sk                773 net/vmw_vsock/virtio_transport_common.c 	struct sock *sk = sk_vsock(vsk);
sk                775 net/vmw_vsock/virtio_transport_common.c 	sock_hold(sk);
sk                776 net/vmw_vsock/virtio_transport_common.c 	lock_sock(sk);
sk                778 net/vmw_vsock/virtio_transport_common.c 	if (!sock_flag(sk, SOCK_DONE)) {
sk                786 net/vmw_vsock/virtio_transport_common.c 	release_sock(sk);
sk                787 net/vmw_vsock/virtio_transport_common.c 	sock_put(sk);
sk                793 net/vmw_vsock/virtio_transport_common.c 	struct sock *sk = &vsk->sk;
sk                795 net/vmw_vsock/virtio_transport_common.c 	if (!(sk->sk_state == TCP_ESTABLISHED ||
sk                796 net/vmw_vsock/virtio_transport_common.c 	      sk->sk_state == TCP_CLOSING))
sk                805 net/vmw_vsock/virtio_transport_common.c 	if ((sk->sk_shutdown & SHUTDOWN_MASK) != SHUTDOWN_MASK)
sk                808 net/vmw_vsock/virtio_transport_common.c 	if (sock_flag(sk, SOCK_LINGER) && !(current->flags & PF_EXITING))
sk                809 net/vmw_vsock/virtio_transport_common.c 		virtio_transport_wait_close(sk, sk->sk_lingertime);
sk                811 net/vmw_vsock/virtio_transport_common.c 	if (sock_flag(sk, SOCK_DONE)) {
sk                815 net/vmw_vsock/virtio_transport_common.c 	sock_hold(sk);
sk                827 net/vmw_vsock/virtio_transport_common.c 	struct sock *sk = &vsk->sk;
sk                830 net/vmw_vsock/virtio_transport_common.c 	lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
sk                831 net/vmw_vsock/virtio_transport_common.c 	if (sk->sk_type == SOCK_STREAM)
sk                838 net/vmw_vsock/virtio_transport_common.c 	release_sock(sk);
sk                846 net/vmw_vsock/virtio_transport_common.c virtio_transport_recv_connecting(struct sock *sk,
sk                849 net/vmw_vsock/virtio_transport_common.c 	struct vsock_sock *vsk = vsock_sk(sk);
sk                855 net/vmw_vsock/virtio_transport_common.c 		sk->sk_state = TCP_ESTABLISHED;
sk                856 net/vmw_vsock/virtio_transport_common.c 		sk->sk_socket->state = SS_CONNECTED;
sk                858 net/vmw_vsock/virtio_transport_common.c 		sk->sk_state_change(sk);
sk                875 net/vmw_vsock/virtio_transport_common.c 	sk->sk_state = TCP_CLOSE;
sk                876 net/vmw_vsock/virtio_transport_common.c 	sk->sk_err = skerr;
sk                877 net/vmw_vsock/virtio_transport_common.c 	sk->sk_error_report(sk);
sk                930 net/vmw_vsock/virtio_transport_common.c virtio_transport_recv_connected(struct sock *sk,
sk                933 net/vmw_vsock/virtio_transport_common.c 	struct vsock_sock *vsk = vsock_sk(sk);
sk                939 net/vmw_vsock/virtio_transport_common.c 		sk->sk_data_ready(sk);
sk                942 net/vmw_vsock/virtio_transport_common.c 		sk->sk_write_space(sk);
sk                951 net/vmw_vsock/virtio_transport_common.c 		    !sock_flag(sk, SOCK_DONE)) {
sk                957 net/vmw_vsock/virtio_transport_common.c 			sk->sk_state_change(sk);
sk                972 net/vmw_vsock/virtio_transport_common.c virtio_transport_recv_disconnecting(struct sock *sk,
sk                975 net/vmw_vsock/virtio_transport_common.c 	struct vsock_sock *vsk = vsock_sk(sk);
sk                999 net/vmw_vsock/virtio_transport_common.c virtio_transport_recv_listen(struct sock *sk, struct virtio_vsock_pkt *pkt)
sk               1001 net/vmw_vsock/virtio_transport_common.c 	struct vsock_sock *vsk = vsock_sk(sk);
sk               1010 net/vmw_vsock/virtio_transport_common.c 	if (sk_acceptq_is_full(sk)) {
sk               1015 net/vmw_vsock/virtio_transport_common.c 	child = __vsock_create(sock_net(sk), NULL, sk, GFP_KERNEL,
sk               1016 net/vmw_vsock/virtio_transport_common.c 			       sk->sk_type, 0);
sk               1022 net/vmw_vsock/virtio_transport_common.c 	sk->sk_ack_backlog++;
sk               1035 net/vmw_vsock/virtio_transport_common.c 	vsock_enqueue_accept(sk, child);
sk               1040 net/vmw_vsock/virtio_transport_common.c 	sk->sk_data_ready(sk);
sk               1044 net/vmw_vsock/virtio_transport_common.c static bool virtio_transport_space_update(struct sock *sk,
sk               1047 net/vmw_vsock/virtio_transport_common.c 	struct vsock_sock *vsk = vsock_sk(sk);
sk               1067 net/vmw_vsock/virtio_transport_common.c 	struct sock *sk;
sk               1092 net/vmw_vsock/virtio_transport_common.c 	sk = vsock_find_connected_socket(&src, &dst);
sk               1093 net/vmw_vsock/virtio_transport_common.c 	if (!sk) {
sk               1094 net/vmw_vsock/virtio_transport_common.c 		sk = vsock_find_bound_socket(&dst);
sk               1095 net/vmw_vsock/virtio_transport_common.c 		if (!sk) {
sk               1101 net/vmw_vsock/virtio_transport_common.c 	vsk = vsock_sk(sk);
sk               1103 net/vmw_vsock/virtio_transport_common.c 	space_available = virtio_transport_space_update(sk, pkt);
sk               1105 net/vmw_vsock/virtio_transport_common.c 	lock_sock(sk);
sk               1111 net/vmw_vsock/virtio_transport_common.c 		sk->sk_write_space(sk);
sk               1113 net/vmw_vsock/virtio_transport_common.c 	switch (sk->sk_state) {
sk               1115 net/vmw_vsock/virtio_transport_common.c 		virtio_transport_recv_listen(sk, pkt);
sk               1119 net/vmw_vsock/virtio_transport_common.c 		virtio_transport_recv_connecting(sk, pkt);
sk               1123 net/vmw_vsock/virtio_transport_common.c 		virtio_transport_recv_connected(sk, pkt);
sk               1126 net/vmw_vsock/virtio_transport_common.c 		virtio_transport_recv_disconnecting(sk, pkt);
sk               1133 net/vmw_vsock/virtio_transport_common.c 	release_sock(sk);
sk               1138 net/vmw_vsock/virtio_transport_common.c 	sock_put(sk);
sk                 39 net/vmw_vsock/vmci_transport.c static int vmci_transport_recv_listen(struct sock *sk,
sk                 42 net/vmw_vsock/vmci_transport.c 					struct sock *sk,
sk                 46 net/vmw_vsock/vmci_transport.c 					struct sock *sk,
sk                 49 net/vmw_vsock/vmci_transport.c 					struct sock *sk,
sk                 52 net/vmw_vsock/vmci_transport.c 					struct sock *sk,
sk                 54 net/vmw_vsock/vmci_transport.c static int vmci_transport_recv_connected(struct sock *sk,
sk                 58 net/vmw_vsock/vmci_transport.c static bool vmci_transport_proto_to_notify_struct(struct sock *sk, u16 *proto,
sk                 63 net/vmw_vsock/vmci_transport.c 	struct sock *sk;
sk                284 net/vmw_vsock/vmci_transport.c vmci_transport_send_control_pkt(struct sock *sk,
sk                294 net/vmw_vsock/vmci_transport.c 	vsk = vsock_sk(sk);
sk                320 net/vmw_vsock/vmci_transport.c static int vmci_transport_send_reset(struct sock *sk,
sk                330 net/vmw_vsock/vmci_transport.c 	vsk = vsock_sk(sk);
sk                348 net/vmw_vsock/vmci_transport.c static int vmci_transport_send_negotiate(struct sock *sk, size_t size)
sk                351 net/vmw_vsock/vmci_transport.c 					sk,
sk                358 net/vmw_vsock/vmci_transport.c static int vmci_transport_send_negotiate2(struct sock *sk, size_t size,
sk                362 net/vmw_vsock/vmci_transport.c 					sk,
sk                368 net/vmw_vsock/vmci_transport.c static int vmci_transport_send_qp_offer(struct sock *sk,
sk                372 net/vmw_vsock/vmci_transport.c 					sk, VMCI_TRANSPORT_PACKET_TYPE_OFFER, 0,
sk                377 net/vmw_vsock/vmci_transport.c static int vmci_transport_send_attach(struct sock *sk,
sk                381 net/vmw_vsock/vmci_transport.c 					sk, VMCI_TRANSPORT_PACKET_TYPE_ATTACH,
sk                422 net/vmw_vsock/vmci_transport.c int vmci_transport_send_wrote(struct sock *sk)
sk                425 net/vmw_vsock/vmci_transport.c 					sk, VMCI_TRANSPORT_PACKET_TYPE_WROTE, 0,
sk                430 net/vmw_vsock/vmci_transport.c int vmci_transport_send_read(struct sock *sk)
sk                433 net/vmw_vsock/vmci_transport.c 					sk, VMCI_TRANSPORT_PACKET_TYPE_READ, 0,
sk                438 net/vmw_vsock/vmci_transport.c int vmci_transport_send_waiting_write(struct sock *sk,
sk                442 net/vmw_vsock/vmci_transport.c 				sk, VMCI_TRANSPORT_PACKET_TYPE_WAITING_WRITE,
sk                447 net/vmw_vsock/vmci_transport.c int vmci_transport_send_waiting_read(struct sock *sk,
sk                451 net/vmw_vsock/vmci_transport.c 				sk, VMCI_TRANSPORT_PACKET_TYPE_WAITING_READ,
sk                459 net/vmw_vsock/vmci_transport.c 					&vsk->sk,
sk                466 net/vmw_vsock/vmci_transport.c static int vmci_transport_send_conn_request(struct sock *sk, size_t size)
sk                468 net/vmw_vsock/vmci_transport.c 	return vmci_transport_send_control_pkt(sk,
sk                475 net/vmw_vsock/vmci_transport.c static int vmci_transport_send_conn_request2(struct sock *sk, size_t size,
sk                479 net/vmw_vsock/vmci_transport.c 					sk, VMCI_TRANSPORT_PACKET_TYPE_REQUEST2,
sk                620 net/vmw_vsock/vmci_transport.c 	struct sock *sk;
sk                625 net/vmw_vsock/vmci_transport.c 	sk = (struct sock *)data;
sk                636 net/vmw_vsock/vmci_transport.c 	vsk = vsock_sk(sk);
sk                648 net/vmw_vsock/vmci_transport.c 	sock_hold(sk);
sk                651 net/vmw_vsock/vmci_transport.c 	sk_receive_skb(sk, skb, 0);
sk                680 net/vmw_vsock/vmci_transport.c 	struct sock *sk;
sk                688 net/vmw_vsock/vmci_transport.c 	sk = NULL;
sk                713 net/vmw_vsock/vmci_transport.c 	sk = vsock_find_connected_socket(&src, &dst);
sk                714 net/vmw_vsock/vmci_transport.c 	if (!sk) {
sk                715 net/vmw_vsock/vmci_transport.c 		sk = vsock_find_bound_socket(&dst);
sk                716 net/vmw_vsock/vmci_transport.c 		if (!sk) {
sk                753 net/vmw_vsock/vmci_transport.c 	vsk = vsock_sk(sk);
sk                764 net/vmw_vsock/vmci_transport.c 	bh_lock_sock(sk);
sk                766 net/vmw_vsock/vmci_transport.c 	if (!sock_owned_by_user(sk)) {
sk                770 net/vmw_vsock/vmci_transport.c 		if (sk->sk_state == TCP_ESTABLISHED)
sk                772 net/vmw_vsock/vmci_transport.c 					sk, pkt, true, &dst, &src,
sk                776 net/vmw_vsock/vmci_transport.c 	bh_unlock_sock(sk);
sk                790 net/vmw_vsock/vmci_transport.c 		recv_pkt_info->sk = sk;
sk                800 net/vmw_vsock/vmci_transport.c 		sk = NULL;
sk                804 net/vmw_vsock/vmci_transport.c 	if (sk)
sk                805 net/vmw_vsock/vmci_transport.c 		sock_put(sk);
sk                810 net/vmw_vsock/vmci_transport.c static void vmci_transport_handle_detach(struct sock *sk)
sk                814 net/vmw_vsock/vmci_transport.c 	vsk = vsock_sk(sk);
sk                816 net/vmw_vsock/vmci_transport.c 		sock_set_flag(sk, SOCK_DONE);
sk                832 net/vmw_vsock/vmci_transport.c 			if (sk->sk_state == TCP_SYN_SENT) {
sk                841 net/vmw_vsock/vmci_transport.c 				sk->sk_state = TCP_CLOSE;
sk                842 net/vmw_vsock/vmci_transport.c 				sk->sk_err = ECONNRESET;
sk                843 net/vmw_vsock/vmci_transport.c 				sk->sk_error_report(sk);
sk                846 net/vmw_vsock/vmci_transport.c 			sk->sk_state = TCP_CLOSE;
sk                848 net/vmw_vsock/vmci_transport.c 		sk->sk_state_change(sk);
sk                875 net/vmw_vsock/vmci_transport.c 	if (!trans->sk)
sk                881 net/vmw_vsock/vmci_transport.c 	bh_lock_sock(trans->sk);
sk                883 net/vmw_vsock/vmci_transport.c 	vmci_transport_handle_detach(trans->sk);
sk                885 net/vmw_vsock/vmci_transport.c 	bh_unlock_sock(trans->sk);
sk                901 net/vmw_vsock/vmci_transport.c 	struct sock *sk;
sk                905 net/vmw_vsock/vmci_transport.c 	sk = recv_pkt_info->sk;
sk                908 net/vmw_vsock/vmci_transport.c 	lock_sock(sk);
sk                911 net/vmw_vsock/vmci_transport.c 	vsock_sk(sk)->local_addr.svm_cid = pkt->dg.dst.context;
sk                913 net/vmw_vsock/vmci_transport.c 	switch (sk->sk_state) {
sk                915 net/vmw_vsock/vmci_transport.c 		vmci_transport_recv_listen(sk, pkt);
sk                922 net/vmw_vsock/vmci_transport.c 		vmci_transport_recv_connecting_client(sk, pkt);
sk                925 net/vmw_vsock/vmci_transport.c 		vmci_transport_recv_connected(sk, pkt);
sk                934 net/vmw_vsock/vmci_transport.c 		vmci_transport_send_reset(sk, pkt);
sk                938 net/vmw_vsock/vmci_transport.c 	release_sock(sk);
sk                943 net/vmw_vsock/vmci_transport.c 	sock_put(sk);
sk                946 net/vmw_vsock/vmci_transport.c static int vmci_transport_recv_listen(struct sock *sk,
sk                965 net/vmw_vsock/vmci_transport.c 	pending = vmci_transport_get_pending(sk, pkt);
sk                974 net/vmw_vsock/vmci_transport.c 			err = vmci_transport_recv_connecting_server(sk,
sk                984 net/vmw_vsock/vmci_transport.c 			vsock_remove_pending(sk, pending);
sk               1011 net/vmw_vsock/vmci_transport.c 	if (sk->sk_ack_backlog >= sk->sk_max_ack_backlog) {
sk               1016 net/vmw_vsock/vmci_transport.c 	pending = __vsock_create(sock_net(sk), NULL, sk, GFP_KERNEL,
sk               1017 net/vmw_vsock/vmci_transport.c 				 sk->sk_type, 0);
sk               1019 net/vmw_vsock/vmci_transport.c 		vmci_transport_send_reset(sk, pkt);
sk               1094 net/vmw_vsock/vmci_transport.c 		vmci_transport_send_reset(sk, pkt);
sk               1100 net/vmw_vsock/vmci_transport.c 	vsock_add_pending(sk, pending);
sk               1101 net/vmw_vsock/vmci_transport.c 	sk->sk_ack_backlog++;
sk               1119 net/vmw_vsock/vmci_transport.c 	vpending->listener = sk;
sk               1120 net/vmw_vsock/vmci_transport.c 	sock_hold(sk);
sk               1276 net/vmw_vsock/vmci_transport.c vmci_transport_recv_connecting_client(struct sock *sk,
sk               1283 net/vmw_vsock/vmci_transport.c 	vsk = vsock_sk(sk);
sk               1300 net/vmw_vsock/vmci_transport.c 		sk->sk_state = TCP_ESTABLISHED;
sk               1301 net/vmw_vsock/vmci_transport.c 		sk->sk_socket->state = SS_CONNECTED;
sk               1303 net/vmw_vsock/vmci_transport.c 		sk->sk_state_change(sk);
sk               1322 net/vmw_vsock/vmci_transport.c 		err = vmci_transport_recv_connecting_client_negotiate(sk, pkt);
sk               1330 net/vmw_vsock/vmci_transport.c 		err = vmci_transport_recv_connecting_client_invalid(sk, pkt);
sk               1366 net/vmw_vsock/vmci_transport.c 	vmci_transport_send_reset(sk, pkt);
sk               1368 net/vmw_vsock/vmci_transport.c 	sk->sk_state = TCP_CLOSE;
sk               1369 net/vmw_vsock/vmci_transport.c 	sk->sk_err = skerr;
sk               1370 net/vmw_vsock/vmci_transport.c 	sk->sk_error_report(sk);
sk               1375 net/vmw_vsock/vmci_transport.c 					struct sock *sk,
sk               1389 net/vmw_vsock/vmci_transport.c 	vsk = vsock_sk(sk);
sk               1430 net/vmw_vsock/vmci_transport.c 	if (!vmci_transport_proto_to_notify_struct(sk, &version, old_proto)) {
sk               1467 net/vmw_vsock/vmci_transport.c 	err = vmci_transport_send_qp_offer(sk, handle);
sk               1481 net/vmw_vsock/vmci_transport.c 	vmci_trans(vsk)->notify_ops->process_negotiate(sk);
sk               1496 net/vmw_vsock/vmci_transport.c vmci_transport_recv_connecting_client_invalid(struct sock *sk,
sk               1500 net/vmw_vsock/vmci_transport.c 	struct vsock_sock *vsk = vsock_sk(sk);
sk               1507 net/vmw_vsock/vmci_transport.c 			sk, vmci_trans(vsk)->queue_pair_size);
sk               1518 net/vmw_vsock/vmci_transport.c static int vmci_transport_recv_connected(struct sock *sk,
sk               1535 net/vmw_vsock/vmci_transport.c 			vsk = vsock_sk(sk);
sk               1538 net/vmw_vsock/vmci_transport.c 			sk->sk_state_change(sk);
sk               1543 net/vmw_vsock/vmci_transport.c 		vsk = vsock_sk(sk);
sk               1553 net/vmw_vsock/vmci_transport.c 		sock_set_flag(sk, SOCK_DONE);
sk               1556 net/vmw_vsock/vmci_transport.c 			sk->sk_state = TCP_CLOSING;
sk               1558 net/vmw_vsock/vmci_transport.c 		sk->sk_state_change(sk);
sk               1562 net/vmw_vsock/vmci_transport.c 		vsk = vsock_sk(sk);
sk               1564 net/vmw_vsock/vmci_transport.c 				sk, pkt, false, NULL, NULL,
sk               1589 net/vmw_vsock/vmci_transport.c 	vmci_trans(vsk)->sk = &vsk->sk;
sk               1654 net/vmw_vsock/vmci_transport.c 	vmci_trans(vsk)->sk = NULL;
sk               1699 net/vmw_vsock/vmci_transport.c 						 &vsk->sk,
sk               1762 net/vmw_vsock/vmci_transport.c 	skb = skb_recv_datagram(&vsk->sk, flags, noblock, &err);
sk               1797 net/vmw_vsock/vmci_transport.c 	skb_free_datagram(&vsk->sk, skb);
sk               1817 net/vmw_vsock/vmci_transport.c 	struct sock *sk = &vsk->sk;
sk               1822 net/vmw_vsock/vmci_transport.c 			sk, vmci_trans(vsk)->queue_pair_size);
sk               1824 net/vmw_vsock/vmci_transport.c 			sk->sk_state = TCP_CLOSE;
sk               1831 net/vmw_vsock/vmci_transport.c 				sk, vmci_trans(vsk)->queue_pair_size,
sk               1834 net/vmw_vsock/vmci_transport.c 			sk->sk_state = TCP_CLOSE;
sk               1930 net/vmw_vsock/vmci_transport.c 			&vsk->sk, target, data_ready_now);
sk               1939 net/vmw_vsock/vmci_transport.c 			&vsk->sk, target, space_available_now);
sk               1948 net/vmw_vsock/vmci_transport.c 			&vsk->sk, target,
sk               1958 net/vmw_vsock/vmci_transport.c 			&vsk->sk, target,
sk               1968 net/vmw_vsock/vmci_transport.c 			&vsk->sk, target,
sk               1980 net/vmw_vsock/vmci_transport.c 			&vsk->sk, target, copied, data_read,
sk               1989 net/vmw_vsock/vmci_transport.c 			&vsk->sk,
sk               1998 net/vmw_vsock/vmci_transport.c 			&vsk->sk,
sk               2007 net/vmw_vsock/vmci_transport.c 			&vsk->sk,
sk               2017 net/vmw_vsock/vmci_transport.c 			&vsk->sk, written,
sk               2036 net/vmw_vsock/vmci_transport.c static bool vmci_transport_proto_to_notify_struct(struct sock *sk,
sk               2040 net/vmw_vsock/vmci_transport.c 	struct vsock_sock *vsk = vsock_sk(sk);
sk               2062 net/vmw_vsock/vmci_transport.c 	vmci_trans(vsk)->notify_ops->socket_init(sk);
sk                118 net/vmw_vsock/vmci_transport.h 	struct sock *sk;
sk                129 net/vmw_vsock/vmci_transport.h int vmci_transport_send_wrote(struct sock *sk);
sk                130 net/vmw_vsock/vmci_transport.h int vmci_transport_send_read(struct sock *sk);
sk                131 net/vmw_vsock/vmci_transport.h int vmci_transport_send_waiting_write(struct sock *sk,
sk                133 net/vmw_vsock/vmci_transport.h int vmci_transport_send_waiting_read(struct sock *sk,
sk                105 net/vmw_vsock/vmci_transport_notify.c vmci_transport_handle_waiting_read(struct sock *sk,
sk                114 net/vmw_vsock/vmci_transport_notify.c 	vsk = vsock_sk(sk);
sk                126 net/vmw_vsock/vmci_transport_notify.c 			sent = vmci_transport_send_wrote(sk) > 0;
sk                135 net/vmw_vsock/vmci_transport_notify.c vmci_transport_handle_waiting_write(struct sock *sk,
sk                144 net/vmw_vsock/vmci_transport_notify.c 	vsk = vsock_sk(sk);
sk                156 net/vmw_vsock/vmci_transport_notify.c 			sent = vmci_transport_send_read(sk) > 0;
sk                165 net/vmw_vsock/vmci_transport_notify.c vmci_transport_handle_read(struct sock *sk,
sk                173 net/vmw_vsock/vmci_transport_notify.c 	vsk = vsock_sk(sk);
sk                177 net/vmw_vsock/vmci_transport_notify.c 	sk->sk_write_space(sk);
sk                180 net/vmw_vsock/vmci_transport_notify.c static bool send_waiting_read(struct sock *sk, u64 room_needed)
sk                190 net/vmw_vsock/vmci_transport_notify.c 	vsk = vsock_sk(sk);
sk                212 net/vmw_vsock/vmci_transport_notify.c 	ret = vmci_transport_send_waiting_read(sk, &waiting_info) > 0;
sk                222 net/vmw_vsock/vmci_transport_notify.c static bool send_waiting_write(struct sock *sk, u64 room_needed)
sk                232 net/vmw_vsock/vmci_transport_notify.c 	vsk = vsock_sk(sk);
sk                249 net/vmw_vsock/vmci_transport_notify.c 	ret = vmci_transport_send_waiting_write(sk, &waiting_info) > 0;
sk                259 net/vmw_vsock/vmci_transport_notify.c static int vmci_transport_send_read_notification(struct sock *sk)
sk                266 net/vmw_vsock/vmci_transport_notify.c 	vsk = vsock_sk(sk);
sk                282 net/vmw_vsock/vmci_transport_notify.c 			err = vmci_transport_send_read(sk);
sk                290 net/vmw_vsock/vmci_transport_notify.c 			pr_err("%p unable to send read notify to peer\n", sk);
sk                301 net/vmw_vsock/vmci_transport_notify.c vmci_transport_handle_wrote(struct sock *sk,
sk                307 net/vmw_vsock/vmci_transport_notify.c 	struct vsock_sock *vsk = vsock_sk(sk);
sk                310 net/vmw_vsock/vmci_transport_notify.c 	sk->sk_data_ready(sk);
sk                313 net/vmw_vsock/vmci_transport_notify.c static void vmci_transport_notify_pkt_socket_init(struct sock *sk)
sk                315 net/vmw_vsock/vmci_transport_notify.c 	struct vsock_sock *vsk = vsock_sk(sk);
sk                338 net/vmw_vsock/vmci_transport_notify.c vmci_transport_notify_pkt_poll_in(struct sock *sk,
sk                341 net/vmw_vsock/vmci_transport_notify.c 	struct vsock_sock *vsk = vsock_sk(sk);
sk                350 net/vmw_vsock/vmci_transport_notify.c 		if (sk->sk_state == TCP_ESTABLISHED) {
sk                351 net/vmw_vsock/vmci_transport_notify.c 			if (!send_waiting_read(sk, 1))
sk                362 net/vmw_vsock/vmci_transport_notify.c vmci_transport_notify_pkt_poll_out(struct sock *sk,
sk                366 net/vmw_vsock/vmci_transport_notify.c 	struct vsock_sock *vsk = vsock_sk(sk);
sk                381 net/vmw_vsock/vmci_transport_notify.c 		if (!send_waiting_write(sk, 1))
sk                392 net/vmw_vsock/vmci_transport_notify.c 			struct sock *sk,
sk                396 net/vmw_vsock/vmci_transport_notify.c 	struct vsock_sock *vsk = vsock_sk(sk);
sk                428 net/vmw_vsock/vmci_transport_notify.c 				struct sock *sk,
sk                435 net/vmw_vsock/vmci_transport_notify.c 	if (!send_waiting_read(sk, target)) {
sk                441 net/vmw_vsock/vmci_transport_notify.c 		err = vmci_transport_send_read_notification(sk);
sk                454 net/vmw_vsock/vmci_transport_notify.c 				struct sock *sk,
sk                458 net/vmw_vsock/vmci_transport_notify.c 	struct vsock_sock *vsk = vsock_sk(sk);
sk                474 net/vmw_vsock/vmci_transport_notify.c 				struct sock *sk,
sk                483 net/vmw_vsock/vmci_transport_notify.c 	vsk = vsock_sk(sk);
sk                497 net/vmw_vsock/vmci_transport_notify.c 		err = vmci_transport_send_read_notification(sk);
sk                507 net/vmw_vsock/vmci_transport_notify.c 			struct sock *sk,
sk                520 net/vmw_vsock/vmci_transport_notify.c 				struct sock *sk,
sk                524 net/vmw_vsock/vmci_transport_notify.c 	if (!send_waiting_write(sk, 1))
sk                532 net/vmw_vsock/vmci_transport_notify.c 				struct sock *sk,
sk                535 net/vmw_vsock/vmci_transport_notify.c 	struct vsock_sock *vsk = vsock_sk(sk);
sk                548 net/vmw_vsock/vmci_transport_notify.c 				struct sock *sk,
sk                557 net/vmw_vsock/vmci_transport_notify.c 	vsk = vsock_sk(sk);
sk                578 net/vmw_vsock/vmci_transport_notify.c 			err = vmci_transport_send_wrote(sk);
sk                586 net/vmw_vsock/vmci_transport_notify.c 			pr_err("%p unable to send wrote notify to peer\n", sk);
sk                599 net/vmw_vsock/vmci_transport_notify.c 			struct sock *sk,
sk                609 net/vmw_vsock/vmci_transport_notify.c 		vmci_transport_handle_wrote(sk, pkt, bottom_half, dst, src);
sk                613 net/vmw_vsock/vmci_transport_notify.c 		vmci_transport_handle_read(sk, pkt, bottom_half, dst, src);
sk                617 net/vmw_vsock/vmci_transport_notify.c 		vmci_transport_handle_waiting_write(sk, pkt, bottom_half,
sk                623 net/vmw_vsock/vmci_transport_notify.c 		vmci_transport_handle_waiting_read(sk, pkt, bottom_half,
sk                633 net/vmw_vsock/vmci_transport_notify.c static void vmci_transport_notify_pkt_process_request(struct sock *sk)
sk                635 net/vmw_vsock/vmci_transport_notify.c 	struct vsock_sock *vsk = vsock_sk(sk);
sk                644 net/vmw_vsock/vmci_transport_notify.c static void vmci_transport_notify_pkt_process_negotiate(struct sock *sk)
sk                646 net/vmw_vsock/vmci_transport_notify.c 	struct vsock_sock *vsk = vsock_sk(sk);
sk                 40 net/vmw_vsock/vmci_transport_notify.h 	void (*socket_init) (struct sock *sk);
sk                 42 net/vmw_vsock/vmci_transport_notify.h 	int (*poll_in) (struct sock *sk, size_t target,
sk                 44 net/vmw_vsock/vmci_transport_notify.h 	int (*poll_out) (struct sock *sk, size_t target,
sk                 46 net/vmw_vsock/vmci_transport_notify.h 	void (*handle_notify_pkt) (struct sock *sk,
sk                 51 net/vmw_vsock/vmci_transport_notify.h 	int (*recv_init) (struct sock *sk, size_t target,
sk                 53 net/vmw_vsock/vmci_transport_notify.h 	int (*recv_pre_block) (struct sock *sk, size_t target,
sk                 55 net/vmw_vsock/vmci_transport_notify.h 	int (*recv_pre_dequeue) (struct sock *sk, size_t target,
sk                 57 net/vmw_vsock/vmci_transport_notify.h 	int (*recv_post_dequeue) (struct sock *sk, size_t target,
sk                 60 net/vmw_vsock/vmci_transport_notify.h 	int (*send_init) (struct sock *sk,
sk                 62 net/vmw_vsock/vmci_transport_notify.h 	int (*send_pre_block) (struct sock *sk,
sk                 64 net/vmw_vsock/vmci_transport_notify.h 	int (*send_pre_enqueue) (struct sock *sk,
sk                 66 net/vmw_vsock/vmci_transport_notify.h 	int (*send_post_enqueue) (struct sock *sk, ssize_t written,
sk                 68 net/vmw_vsock/vmci_transport_notify.h 	void (*process_request) (struct sock *sk);
sk                 69 net/vmw_vsock/vmci_transport_notify.h 	void (*process_negotiate) (struct sock *sk);
sk                 73 net/vmw_vsock/vmci_transport_notify_qstate.c vmci_transport_handle_read(struct sock *sk,
sk                 78 net/vmw_vsock/vmci_transport_notify_qstate.c 	sk->sk_write_space(sk);
sk                 82 net/vmw_vsock/vmci_transport_notify_qstate.c vmci_transport_handle_wrote(struct sock *sk,
sk                 87 net/vmw_vsock/vmci_transport_notify_qstate.c 	sk->sk_data_ready(sk);
sk                 90 net/vmw_vsock/vmci_transport_notify_qstate.c static void vsock_block_update_write_window(struct sock *sk)
sk                 92 net/vmw_vsock/vmci_transport_notify_qstate.c 	struct vsock_sock *vsk = vsock_sk(sk);
sk                100 net/vmw_vsock/vmci_transport_notify_qstate.c static int vmci_transport_send_read_notification(struct sock *sk)
sk                107 net/vmw_vsock/vmci_transport_notify_qstate.c 	vsk = vsock_sk(sk);
sk                123 net/vmw_vsock/vmci_transport_notify_qstate.c 			err = vmci_transport_send_read(sk);
sk                132 net/vmw_vsock/vmci_transport_notify_qstate.c 			       sk);
sk                140 net/vmw_vsock/vmci_transport_notify_qstate.c static void vmci_transport_notify_pkt_socket_init(struct sock *sk)
sk                142 net/vmw_vsock/vmci_transport_notify_qstate.c 	struct vsock_sock *vsk = vsock_sk(sk);
sk                159 net/vmw_vsock/vmci_transport_notify_qstate.c vmci_transport_notify_pkt_poll_in(struct sock *sk,
sk                162 net/vmw_vsock/vmci_transport_notify_qstate.c 	struct vsock_sock *vsk = vsock_sk(sk);
sk                171 net/vmw_vsock/vmci_transport_notify_qstate.c 		if (sk->sk_state == TCP_ESTABLISHED)
sk                172 net/vmw_vsock/vmci_transport_notify_qstate.c 			vsock_block_update_write_window(sk);
sk                180 net/vmw_vsock/vmci_transport_notify_qstate.c vmci_transport_notify_pkt_poll_out(struct sock *sk,
sk                184 net/vmw_vsock/vmci_transport_notify_qstate.c 	struct vsock_sock *vsk = vsock_sk(sk);
sk                202 net/vmw_vsock/vmci_transport_notify_qstate.c 				struct sock *sk,
sk                206 net/vmw_vsock/vmci_transport_notify_qstate.c 	struct vsock_sock *vsk = vsock_sk(sk);
sk                234 net/vmw_vsock/vmci_transport_notify_qstate.c 				struct sock *sk,
sk                240 net/vmw_vsock/vmci_transport_notify_qstate.c 	vsock_block_update_write_window(sk);
sk                243 net/vmw_vsock/vmci_transport_notify_qstate.c 		err = vmci_transport_send_read_notification(sk);
sk                254 net/vmw_vsock/vmci_transport_notify_qstate.c 				struct sock *sk,
sk                265 net/vmw_vsock/vmci_transport_notify_qstate.c 	vsk = vsock_sk(sk);
sk                278 net/vmw_vsock/vmci_transport_notify_qstate.c 		err = vmci_transport_send_read_notification(sk);
sk                285 net/vmw_vsock/vmci_transport_notify_qstate.c 		sk->sk_data_ready(sk);
sk                293 net/vmw_vsock/vmci_transport_notify_qstate.c 				struct sock *sk,
sk                304 net/vmw_vsock/vmci_transport_notify_qstate.c 				struct sock *sk,
sk                314 net/vmw_vsock/vmci_transport_notify_qstate.c 	vsk = vsock_sk(sk);
sk                324 net/vmw_vsock/vmci_transport_notify_qstate.c 			err = vmci_transport_send_wrote(sk);
sk                334 net/vmw_vsock/vmci_transport_notify_qstate.c 		       sk);
sk                343 net/vmw_vsock/vmci_transport_notify_qstate.c 				struct sock *sk,
sk                353 net/vmw_vsock/vmci_transport_notify_qstate.c 		vmci_transport_handle_wrote(sk, pkt, bottom_half, dst, src);
sk                357 net/vmw_vsock/vmci_transport_notify_qstate.c 		vmci_transport_handle_read(sk, pkt, bottom_half, dst, src);
sk                366 net/vmw_vsock/vmci_transport_notify_qstate.c static void vmci_transport_notify_pkt_process_request(struct sock *sk)
sk                368 net/vmw_vsock/vmci_transport_notify_qstate.c 	struct vsock_sock *vsk = vsock_sk(sk);
sk                377 net/vmw_vsock/vmci_transport_notify_qstate.c static void vmci_transport_notify_pkt_process_negotiate(struct sock *sk)
sk                379 net/vmw_vsock/vmci_transport_notify_qstate.c 	struct vsock_sock *vsk = vsock_sk(sk);
sk                390 net/vmw_vsock/vmci_transport_notify_qstate.c 				struct sock *sk,
sk                399 net/vmw_vsock/vmci_transport_notify_qstate.c 				struct sock *sk,
sk                407 net/vmw_vsock/vmci_transport_notify_qstate.c 				struct sock *sk,
sk                823 net/wireless/nl80211.c 		*wdev = __cfg80211_wdev_from_attrs(sock_net(cb->skb->sk),
sk               2492 net/wireless/nl80211.c 		netdev = __dev_get_by_index(sock_net(skb->sk), ifidx);
sk               2534 net/wireless/nl80211.c 		if (!net_eq(wiphy_net(&rdev->wiphy), sock_net(skb->sk)))
sk               3323 net/wireless/nl80211.c 		if (!net_eq(wiphy_net(&rdev->wiphy), sock_net(skb->sk)))
sk               9819 net/wireless/nl80211.c 		rdev = __cfg80211_rdev_from_attrs(sock_net(skb->sk), attrbuf);
sk               11478 net/wireless/nl80211.c 	if (inet_csk_get_port(cfg->sock->sk, port)) {
sk               11483 net/wireless/nl80211.c 	cfg->src_port = inet_sk(cfg->sock->sk)->inet_num;
sk               13031 net/wireless/nl80211.c 	*wdev = __cfg80211_wdev_from_attrs(sock_net(skb->sk), attrbuf);
sk               13035 net/wireless/nl80211.c 	*rdev = __cfg80211_rdev_from_attrs(sock_net(skb->sk), attrbuf);
sk                195 net/x25/af_x25.c static void x25_remove_socket(struct sock *sk)
sk                198 net/x25/af_x25.c 	sk_del_node_init(sk);
sk                260 net/x25/af_x25.c static void x25_insert_socket(struct sock *sk)
sk                263 net/x25/af_x25.c 	sk_add_node(sk, &x25_list);
sk                348 net/x25/af_x25.c 	struct sock *sk;
sk                350 net/x25/af_x25.c 	while ((sk = x25_find_socket(lci, nb)) != NULL) {
sk                351 net/x25/af_x25.c 		sock_put(sk);
sk                372 net/x25/af_x25.c 	struct sock *sk = from_timer(sk, t, sk_timer);
sk                374 net/x25/af_x25.c 	x25_destroy_socket_from_timer(sk);
sk                384 net/x25/af_x25.c static void __x25_destroy_socket(struct sock *sk)
sk                388 net/x25/af_x25.c 	x25_stop_heartbeat(sk);
sk                389 net/x25/af_x25.c 	x25_stop_timer(sk);
sk                391 net/x25/af_x25.c 	x25_remove_socket(sk);
sk                392 net/x25/af_x25.c 	x25_clear_queues(sk);		/* Flush the queues */
sk                394 net/x25/af_x25.c 	while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
sk                395 net/x25/af_x25.c 		if (skb->sk != sk) {		/* A pending connection */
sk                399 net/x25/af_x25.c 			skb->sk->sk_state = TCP_LISTEN;
sk                400 net/x25/af_x25.c 			sock_set_flag(skb->sk, SOCK_DEAD);
sk                401 net/x25/af_x25.c 			x25_start_heartbeat(skb->sk);
sk                402 net/x25/af_x25.c 			x25_sk(skb->sk)->state = X25_STATE_0;
sk                408 net/x25/af_x25.c 	if (sk_has_allocations(sk)) {
sk                410 net/x25/af_x25.c 		sk->sk_timer.expires  = jiffies + 10 * HZ;
sk                411 net/x25/af_x25.c 		sk->sk_timer.function = x25_destroy_timer;
sk                412 net/x25/af_x25.c 		add_timer(&sk->sk_timer);
sk                415 net/x25/af_x25.c 		__sock_put(sk);
sk                419 net/x25/af_x25.c void x25_destroy_socket_from_timer(struct sock *sk)
sk                421 net/x25/af_x25.c 	sock_hold(sk);
sk                422 net/x25/af_x25.c 	bh_lock_sock(sk);
sk                423 net/x25/af_x25.c 	__x25_destroy_socket(sk);
sk                424 net/x25/af_x25.c 	bh_unlock_sock(sk);
sk                425 net/x25/af_x25.c 	sock_put(sk);
sk                437 net/x25/af_x25.c 	struct sock *sk = sock->sk;
sk                452 net/x25/af_x25.c 		set_bit(X25_Q_BIT_FLAG, &x25_sk(sk)->flags);
sk                454 net/x25/af_x25.c 		clear_bit(X25_Q_BIT_FLAG, &x25_sk(sk)->flags);
sk                463 net/x25/af_x25.c 	struct sock *sk = sock->sk;
sk                483 net/x25/af_x25.c 	val = test_bit(X25_Q_BIT_FLAG, &x25_sk(sk)->flags);
sk                491 net/x25/af_x25.c 	struct sock *sk = sock->sk;
sk                494 net/x25/af_x25.c 	lock_sock(sk);
sk                495 net/x25/af_x25.c 	if (sk->sk_state != TCP_LISTEN) {
sk                496 net/x25/af_x25.c 		memset(&x25_sk(sk)->dest_addr, 0, X25_ADDR_LEN);
sk                497 net/x25/af_x25.c 		sk->sk_max_ack_backlog = backlog;
sk                498 net/x25/af_x25.c 		sk->sk_state           = TCP_LISTEN;
sk                501 net/x25/af_x25.c 	release_sock(sk);
sk                515 net/x25/af_x25.c 	struct sock *sk = sk_alloc(net, AF_X25, GFP_ATOMIC, &x25_proto, kern);
sk                517 net/x25/af_x25.c 	if (!sk)
sk                520 net/x25/af_x25.c 	sock_init_data(NULL, sk);
sk                522 net/x25/af_x25.c 	x25 = x25_sk(sk);
sk                528 net/x25/af_x25.c 	return sk;
sk                534 net/x25/af_x25.c 	struct sock *sk;
sk                550 net/x25/af_x25.c 	if ((sk = x25_alloc_socket(net, kern)) == NULL)
sk                553 net/x25/af_x25.c 	x25 = x25_sk(sk);
sk                555 net/x25/af_x25.c 	sock_init_data(sock, sk);
sk                557 net/x25/af_x25.c 	x25_init_timers(sk);
sk                560 net/x25/af_x25.c 	sk->sk_protocol = protocol;
sk                561 net/x25/af_x25.c 	sk->sk_backlog_rcv = x25_backlog_rcv;
sk                593 net/x25/af_x25.c 	struct sock *sk = NULL;
sk                599 net/x25/af_x25.c 	if ((sk = x25_alloc_socket(sock_net(osk), 0)) == NULL)
sk                602 net/x25/af_x25.c 	x25 = x25_sk(sk);
sk                604 net/x25/af_x25.c 	sk->sk_type        = osk->sk_type;
sk                605 net/x25/af_x25.c 	sk->sk_priority    = osk->sk_priority;
sk                606 net/x25/af_x25.c 	sk->sk_protocol    = osk->sk_protocol;
sk                607 net/x25/af_x25.c 	sk->sk_rcvbuf      = osk->sk_rcvbuf;
sk                608 net/x25/af_x25.c 	sk->sk_sndbuf      = osk->sk_sndbuf;
sk                609 net/x25/af_x25.c 	sk->sk_state       = TCP_ESTABLISHED;
sk                610 net/x25/af_x25.c 	sk->sk_backlog_rcv = osk->sk_backlog_rcv;
sk                611 net/x25/af_x25.c 	sock_copy_flags(sk, osk);
sk                624 net/x25/af_x25.c 	x25_init_timers(sk);
sk                626 net/x25/af_x25.c 	return sk;
sk                631 net/x25/af_x25.c 	struct sock *sk = sock->sk;
sk                634 net/x25/af_x25.c 	if (!sk)
sk                637 net/x25/af_x25.c 	x25 = x25_sk(sk);
sk                639 net/x25/af_x25.c 	sock_hold(sk);
sk                640 net/x25/af_x25.c 	lock_sock(sk);
sk                645 net/x25/af_x25.c 			x25_disconnect(sk, 0, 0, 0);
sk                646 net/x25/af_x25.c 			__x25_destroy_socket(sk);
sk                652 net/x25/af_x25.c 			x25_clear_queues(sk);
sk                653 net/x25/af_x25.c 			x25_write_internal(sk, X25_CLEAR_REQUEST);
sk                654 net/x25/af_x25.c 			x25_start_t23timer(sk);
sk                656 net/x25/af_x25.c 			sk->sk_state	= TCP_CLOSE;
sk                657 net/x25/af_x25.c 			sk->sk_shutdown	|= SEND_SHUTDOWN;
sk                658 net/x25/af_x25.c 			sk->sk_state_change(sk);
sk                659 net/x25/af_x25.c 			sock_set_flag(sk, SOCK_DEAD);
sk                660 net/x25/af_x25.c 			sock_set_flag(sk, SOCK_DESTROY);
sk                664 net/x25/af_x25.c 	sock_orphan(sk);
sk                666 net/x25/af_x25.c 	release_sock(sk);
sk                667 net/x25/af_x25.c 	sock_put(sk);
sk                673 net/x25/af_x25.c 	struct sock *sk = sock->sk;
sk                695 net/x25/af_x25.c 	lock_sock(sk);
sk                696 net/x25/af_x25.c 	if (sock_flag(sk, SOCK_ZAPPED)) {
sk                697 net/x25/af_x25.c 		x25_sk(sk)->source_addr = addr->sx25_addr;
sk                698 net/x25/af_x25.c 		x25_insert_socket(sk);
sk                699 net/x25/af_x25.c 		sock_reset_flag(sk, SOCK_ZAPPED);
sk                703 net/x25/af_x25.c 	release_sock(sk);
sk                704 net/x25/af_x25.c 	SOCK_DEBUG(sk, "x25_bind: socket is bound\n");
sk                709 net/x25/af_x25.c static int x25_wait_for_connection_establishment(struct sock *sk)
sk                714 net/x25/af_x25.c 	add_wait_queue_exclusive(sk_sleep(sk), &wait);
sk                720 net/x25/af_x25.c 		rc = sock_error(sk);
sk                722 net/x25/af_x25.c 			sk->sk_socket->state = SS_UNCONNECTED;
sk                726 net/x25/af_x25.c 		if (sk->sk_state != TCP_ESTABLISHED) {
sk                727 net/x25/af_x25.c 			release_sock(sk);
sk                729 net/x25/af_x25.c 			lock_sock(sk);
sk                734 net/x25/af_x25.c 	remove_wait_queue(sk_sleep(sk), &wait);
sk                741 net/x25/af_x25.c 	struct sock *sk = sock->sk;
sk                742 net/x25/af_x25.c 	struct x25_sock *x25 = x25_sk(sk);
sk                747 net/x25/af_x25.c 	lock_sock(sk);
sk                748 net/x25/af_x25.c 	if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) {
sk                754 net/x25/af_x25.c 	if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) {
sk                760 net/x25/af_x25.c 	if (sk->sk_state == TCP_ESTABLISHED)
sk                764 net/x25/af_x25.c 	if (sk->sk_state == TCP_SYN_SENT)
sk                767 net/x25/af_x25.c 	sk->sk_state   = TCP_CLOSE;
sk                791 net/x25/af_x25.c 	if (sock_flag(sk, SOCK_ZAPPED)) /* Must bind first - autobinding does not work */
sk                801 net/x25/af_x25.c 	sk->sk_state  = TCP_SYN_SENT;
sk                805 net/x25/af_x25.c 	x25_write_internal(sk, X25_CALL_REQUEST);
sk                807 net/x25/af_x25.c 	x25_start_heartbeat(sk);
sk                808 net/x25/af_x25.c 	x25_start_t21timer(sk);
sk                812 net/x25/af_x25.c 	if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK))
sk                815 net/x25/af_x25.c 	rc = x25_wait_for_connection_establishment(sk);
sk                832 net/x25/af_x25.c 	release_sock(sk);
sk                836 net/x25/af_x25.c static int x25_wait_for_data(struct sock *sk, long timeout)
sk                841 net/x25/af_x25.c 	add_wait_queue_exclusive(sk_sleep(sk), &wait);
sk                844 net/x25/af_x25.c 		if (sk->sk_shutdown & RCV_SHUTDOWN)
sk                853 net/x25/af_x25.c 		if (skb_queue_empty(&sk->sk_receive_queue)) {
sk                854 net/x25/af_x25.c 			release_sock(sk);
sk                856 net/x25/af_x25.c 			lock_sock(sk);
sk                861 net/x25/af_x25.c 	remove_wait_queue(sk_sleep(sk), &wait);
sk                868 net/x25/af_x25.c 	struct sock *sk = sock->sk;
sk                873 net/x25/af_x25.c 	if (!sk)
sk                877 net/x25/af_x25.c 	if (sk->sk_type != SOCK_SEQPACKET)
sk                880 net/x25/af_x25.c 	lock_sock(sk);
sk                882 net/x25/af_x25.c 	if (sk->sk_state != TCP_LISTEN)
sk                885 net/x25/af_x25.c 	rc = x25_wait_for_data(sk, sk->sk_rcvtimeo);
sk                888 net/x25/af_x25.c 	skb = skb_dequeue(&sk->sk_receive_queue);
sk                890 net/x25/af_x25.c 	if (!skb->sk)
sk                892 net/x25/af_x25.c 	newsk		 = skb->sk;
sk                896 net/x25/af_x25.c 	skb->sk = NULL;
sk                898 net/x25/af_x25.c 	sk->sk_ack_backlog--;
sk                902 net/x25/af_x25.c 	release_sock(sk);
sk                911 net/x25/af_x25.c 	struct sock *sk = sock->sk;
sk                912 net/x25/af_x25.c 	struct x25_sock *x25 = x25_sk(sk);
sk                916 net/x25/af_x25.c 		if (sk->sk_state != TCP_ESTABLISHED) {
sk                934 net/x25/af_x25.c 	struct sock *sk;
sk                988 net/x25/af_x25.c 	sk = x25_find_listener(&source_addr,skb);
sk                991 net/x25/af_x25.c 	if (sk != NULL && sk_acceptq_is_full(sk)) {
sk                999 net/x25/af_x25.c 	if (sk == NULL) {
sk               1017 net/x25/af_x25.c 	len = x25_negotiate_facilities(skb, sk, &facilities, &dte_facilities);
sk               1031 net/x25/af_x25.c 	make = x25_make_new(sk);
sk               1040 net/x25/af_x25.c 	skb->sk     = make;
sk               1050 net/x25/af_x25.c 	makex25->vc_facil_mask = x25_sk(sk)->vc_facil_mask;
sk               1055 net/x25/af_x25.c 	makex25->cudmatchlength = x25_sk(sk)->cudmatchlength;
sk               1069 net/x25/af_x25.c 	sk->sk_ack_backlog++;
sk               1073 net/x25/af_x25.c 	skb_queue_head(&sk->sk_receive_queue, skb);
sk               1077 net/x25/af_x25.c 	if (!sock_flag(sk, SOCK_DEAD))
sk               1078 net/x25/af_x25.c 		sk->sk_data_ready(sk);
sk               1080 net/x25/af_x25.c 	sock_put(sk);
sk               1084 net/x25/af_x25.c 	sock_put(sk);
sk               1093 net/x25/af_x25.c 	struct sock *sk = sock->sk;
sk               1094 net/x25/af_x25.c 	struct x25_sock *x25 = x25_sk(sk);
sk               1103 net/x25/af_x25.c 	lock_sock(sk);
sk               1112 net/x25/af_x25.c 	if (sock_flag(sk, SOCK_ZAPPED))
sk               1116 net/x25/af_x25.c 	if (sk->sk_shutdown & SEND_SHUTDOWN) {
sk               1143 net/x25/af_x25.c 		if (sk->sk_state != TCP_ESTABLISHED)
sk               1156 net/x25/af_x25.c 	SOCK_DEBUG(sk, "x25_sendmsg: sendto: Addresses built.\n");
sk               1159 net/x25/af_x25.c 	SOCK_DEBUG(sk, "x25_sendmsg: sendto: building packet.\n");
sk               1166 net/x25/af_x25.c 	release_sock(sk);
sk               1167 net/x25/af_x25.c 	skb = sock_alloc_send_skb(sk, size, noblock, &rc);
sk               1168 net/x25/af_x25.c 	lock_sock(sk);
sk               1178 net/x25/af_x25.c 	SOCK_DEBUG(sk, "x25_sendmsg: Copying user data\n");
sk               1202 net/x25/af_x25.c 	SOCK_DEBUG(sk, "x25_sendmsg: Building X.25 Header.\n");
sk               1236 net/x25/af_x25.c 	SOCK_DEBUG(sk, "x25_sendmsg: Built header.\n");
sk               1237 net/x25/af_x25.c 	SOCK_DEBUG(sk, "x25_sendmsg: Transmitting buffer\n");
sk               1240 net/x25/af_x25.c 	if (sk->sk_state != TCP_ESTABLISHED)
sk               1246 net/x25/af_x25.c 		rc = x25_output(sk, skb);
sk               1254 net/x25/af_x25.c 	x25_kick(sk);
sk               1257 net/x25/af_x25.c 	release_sock(sk);
sk               1268 net/x25/af_x25.c 	struct sock *sk = sock->sk;
sk               1269 net/x25/af_x25.c 	struct x25_sock *x25 = x25_sk(sk);
sk               1277 net/x25/af_x25.c 	lock_sock(sk);
sk               1289 net/x25/af_x25.c 	if (sk->sk_state != TCP_ESTABLISHED)
sk               1294 net/x25/af_x25.c 		if (sock_flag(sk, SOCK_URGINLINE) ||
sk               1316 net/x25/af_x25.c 		release_sock(sk);
sk               1317 net/x25/af_x25.c 		skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
sk               1319 net/x25/af_x25.c 		lock_sock(sk);
sk               1357 net/x25/af_x25.c 	x25_check_rbuf(sk);
sk               1360 net/x25/af_x25.c 	skb_free_datagram(sk, skb);
sk               1362 net/x25/af_x25.c 	release_sock(sk);
sk               1369 net/x25/af_x25.c 	struct sock *sk = sock->sk;
sk               1370 net/x25/af_x25.c 	struct x25_sock *x25 = x25_sk(sk);
sk               1378 net/x25/af_x25.c 		amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
sk               1392 net/x25/af_x25.c 		lock_sock(sk);
sk               1393 net/x25/af_x25.c 		if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL)
sk               1395 net/x25/af_x25.c 		release_sock(sk);
sk               1429 net/x25/af_x25.c 		lock_sock(sk);
sk               1433 net/x25/af_x25.c 		release_sock(sk);
sk               1443 net/x25/af_x25.c 		lock_sock(sk);
sk               1444 net/x25/af_x25.c 		if (sk->sk_state != TCP_LISTEN &&
sk               1445 net/x25/af_x25.c 		    sk->sk_state != TCP_CLOSE)
sk               1476 net/x25/af_x25.c 		release_sock(sk);
sk               1481 net/x25/af_x25.c 		lock_sock(sk);
sk               1484 net/x25/af_x25.c 		release_sock(sk);
sk               1496 net/x25/af_x25.c 		lock_sock(sk);
sk               1497 net/x25/af_x25.c 		if (sk->sk_state != TCP_LISTEN &&
sk               1498 net/x25/af_x25.c 		    sk->sk_state != TCP_CLOSE)
sk               1507 net/x25/af_x25.c 		release_sock(sk);
sk               1512 net/x25/af_x25.c 		lock_sock(sk);
sk               1516 net/x25/af_x25.c 		release_sock(sk);
sk               1529 net/x25/af_x25.c 		lock_sock(sk);
sk               1531 net/x25/af_x25.c 		release_sock(sk);
sk               1537 net/x25/af_x25.c 		lock_sock(sk);
sk               1540 net/x25/af_x25.c 		release_sock(sk);
sk               1549 net/x25/af_x25.c 		lock_sock(sk);
sk               1551 net/x25/af_x25.c 		release_sock(sk);
sk               1560 net/x25/af_x25.c 		lock_sock(sk);
sk               1561 net/x25/af_x25.c 		if(sk->sk_state != TCP_CLOSE)
sk               1573 net/x25/af_x25.c 		release_sock(sk);
sk               1579 net/x25/af_x25.c 		lock_sock(sk);
sk               1580 net/x25/af_x25.c 		if (sk->sk_state == TCP_CLOSE) {
sk               1584 net/x25/af_x25.c 		release_sock(sk);
sk               1590 net/x25/af_x25.c 		lock_sock(sk);
sk               1591 net/x25/af_x25.c 		if (sk->sk_state != TCP_ESTABLISHED)
sk               1596 net/x25/af_x25.c 		x25_write_internal(sk, X25_CALL_ACCEPTED);
sk               1600 net/x25/af_x25.c 		release_sock(sk);
sk                 28 net/x25/x25_dev.c 	struct sock *sk;
sk                 50 net/x25/x25_dev.c 	if ((sk = x25_find_socket(lci, nb)) != NULL) {
sk                 54 net/x25/x25_dev.c 		bh_lock_sock(sk);
sk                 55 net/x25/x25_dev.c 		if (!sock_owned_by_user(sk)) {
sk                 56 net/x25/x25_dev.c 			queued = x25_process_rx_frame(sk, skb);
sk                 58 net/x25/x25_dev.c 			queued = !sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf));
sk                 60 net/x25/x25_dev.c 		bh_unlock_sock(sk);
sk                 61 net/x25/x25_dev.c 		sock_put(sk);
sk                265 net/x25/x25_facilities.c int x25_negotiate_facilities(struct sk_buff *skb, struct sock *sk,
sk                268 net/x25/x25_facilities.c 	struct x25_sock *x25 = x25_sk(sk);
sk                285 net/x25/x25_facilities.c 		SOCK_DEBUG(sk, "X.25: rejecting reverse charging request\n");
sk                297 net/x25/x25_facilities.c 			SOCK_DEBUG(sk, "X.25: inbound throughput negotiated\n");
sk                301 net/x25/x25_facilities.c 			SOCK_DEBUG(sk,
sk                309 net/x25/x25_facilities.c 			SOCK_DEBUG(sk, "X.25: packet size inwards negotiated down\n");
sk                313 net/x25/x25_facilities.c 			SOCK_DEBUG(sk, "X.25: packet size outwards negotiated down\n");
sk                320 net/x25/x25_facilities.c 			SOCK_DEBUG(sk, "X.25: window size inwards negotiated down\n");
sk                324 net/x25/x25_facilities.c 			SOCK_DEBUG(sk, "X.25: window size outwards negotiated down\n");
sk                 32 net/x25/x25_in.c static int x25_queue_rx_frame(struct sock *sk, struct sk_buff *skb, int more)
sk                 35 net/x25/x25_in.c 	struct x25_sock *x25 = x25_sk(sk);
sk                 40 net/x25/x25_in.c 		skb_set_owner_r(skb, sk);
sk                 74 net/x25/x25_in.c 	skb_set_owner_r(skbn, sk);
sk                 75 net/x25/x25_in.c 	skb_queue_tail(&sk->sk_receive_queue, skbn);
sk                 76 net/x25/x25_in.c 	if (!sock_flag(sk, SOCK_DEAD))
sk                 77 net/x25/x25_in.c 		sk->sk_data_ready(sk);
sk                 87 net/x25/x25_in.c static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametype)
sk                 91 net/x25/x25_in.c 	struct x25_sock *x25 = x25_sk(sk);
sk                 96 net/x25/x25_in.c 		x25_stop_timer(sk);
sk                103 net/x25/x25_in.c 		sk->sk_state   = TCP_ESTABLISHED;
sk                136 net/x25/x25_in.c 		if (!sock_flag(sk, SOCK_DEAD))
sk                137 net/x25/x25_in.c 			sk->sk_state_change(sk);
sk                145 net/x25/x25_in.c 		x25_write_internal(sk, X25_CLEAR_REQUEST);
sk                146 net/x25/x25_in.c 		x25_disconnect(sk, EISCONN, 0x01, 0x48);
sk                153 net/x25/x25_in.c 		x25_write_internal(sk, X25_CLEAR_CONFIRMATION);
sk                154 net/x25/x25_in.c 		x25_disconnect(sk, ECONNREFUSED, skb->data[3], skb->data[4]);
sk                164 net/x25/x25_in.c 	x25_write_internal(sk, X25_CLEAR_REQUEST);
sk                166 net/x25/x25_in.c 	x25_start_t23timer(sk);
sk                175 net/x25/x25_in.c static int x25_state2_machine(struct sock *sk, struct sk_buff *skb, int frametype)
sk                183 net/x25/x25_in.c 			x25_write_internal(sk, X25_CLEAR_CONFIRMATION);
sk                184 net/x25/x25_in.c 			x25_disconnect(sk, 0, skb->data[3], skb->data[4]);
sk                188 net/x25/x25_in.c 			x25_disconnect(sk, 0, 0, 0);
sk                198 net/x25/x25_in.c 	x25_write_internal(sk, X25_CLEAR_REQUEST);
sk                199 net/x25/x25_in.c 	x25_start_t23timer(sk);
sk                208 net/x25/x25_in.c static int x25_state3_machine(struct sock *sk, struct sk_buff *skb, int frametype, int ns, int nr, int q, int d, int m)
sk                212 net/x25/x25_in.c 	struct x25_sock *x25 = x25_sk(sk);
sk                219 net/x25/x25_in.c 			x25_write_internal(sk, X25_RESET_CONFIRMATION);
sk                220 net/x25/x25_in.c 			x25_stop_timer(sk);
sk                226 net/x25/x25_in.c 			x25_requeue_frames(sk);
sk                233 net/x25/x25_in.c 			x25_write_internal(sk, X25_CLEAR_CONFIRMATION);
sk                234 net/x25/x25_in.c 			x25_disconnect(sk, 0, skb->data[3], skb->data[4]);
sk                239 net/x25/x25_in.c 			if (!x25_validate_nr(sk, nr)) {
sk                240 net/x25/x25_in.c 				x25_clear_queues(sk);
sk                241 net/x25/x25_in.c 				x25_write_internal(sk, X25_RESET_REQUEST);
sk                242 net/x25/x25_in.c 				x25_start_t22timer(sk);
sk                250 net/x25/x25_in.c 				x25_frames_acked(sk, nr);
sk                261 net/x25/x25_in.c 			if ((ns != x25->vr) || !x25_validate_nr(sk, nr)) {
sk                262 net/x25/x25_in.c 				x25_clear_queues(sk);
sk                263 net/x25/x25_in.c 				x25_write_internal(sk, X25_RESET_REQUEST);
sk                264 net/x25/x25_in.c 				x25_start_t22timer(sk);
sk                273 net/x25/x25_in.c 			x25_frames_acked(sk, nr);
sk                275 net/x25/x25_in.c 				if (x25_queue_rx_frame(sk, skb, m) == 0) {
sk                280 net/x25/x25_in.c 					x25_clear_queues(sk);
sk                281 net/x25/x25_in.c 					x25_write_internal(sk, X25_RESET_REQUEST);
sk                282 net/x25/x25_in.c 					x25_start_t22timer(sk);
sk                291 net/x25/x25_in.c 				if (atomic_read(&sk->sk_rmem_alloc) >
sk                292 net/x25/x25_in.c 				    (sk->sk_rcvbuf >> 1))
sk                301 net/x25/x25_in.c 				x25_stop_timer(sk);
sk                302 net/x25/x25_in.c 				x25_enquiry_response(sk);
sk                305 net/x25/x25_in.c 				x25_start_t2timer(sk);
sk                314 net/x25/x25_in.c 			if (sock_flag(sk, SOCK_URGINLINE))
sk                315 net/x25/x25_in.c 				queued = !sock_queue_rcv_skb(sk, skb);
sk                317 net/x25/x25_in.c 				skb_set_owner_r(skb, sk);
sk                321 net/x25/x25_in.c 			sk_send_sigurg(sk);
sk                322 net/x25/x25_in.c 			x25_write_internal(sk, X25_INTERRUPT_CONFIRMATION);
sk                333 net/x25/x25_in.c 	x25_write_internal(sk, X25_CLEAR_REQUEST);
sk                335 net/x25/x25_in.c 	x25_start_t23timer(sk);
sk                344 net/x25/x25_in.c static int x25_state4_machine(struct sock *sk, struct sk_buff *skb, int frametype)
sk                346 net/x25/x25_in.c 	struct x25_sock *x25 = x25_sk(sk);
sk                351 net/x25/x25_in.c 			x25_write_internal(sk, X25_RESET_CONFIRMATION);
sk                354 net/x25/x25_in.c 			x25_stop_timer(sk);
sk                361 net/x25/x25_in.c 			x25_requeue_frames(sk);
sk                368 net/x25/x25_in.c 			x25_write_internal(sk, X25_CLEAR_CONFIRMATION);
sk                369 net/x25/x25_in.c 			x25_disconnect(sk, 0, skb->data[3], skb->data[4]);
sk                379 net/x25/x25_in.c 	x25_write_internal(sk, X25_CLEAR_REQUEST);
sk                381 net/x25/x25_in.c 	x25_start_t23timer(sk);
sk                386 net/x25/x25_in.c int x25_process_rx_frame(struct sock *sk, struct sk_buff *skb)
sk                388 net/x25/x25_in.c 	struct x25_sock *x25 = x25_sk(sk);
sk                394 net/x25/x25_in.c 	frametype = x25_decode(sk, skb, &ns, &nr, &q, &d, &m);
sk                398 net/x25/x25_in.c 		queued = x25_state1_machine(sk, skb, frametype);
sk                401 net/x25/x25_in.c 		queued = x25_state2_machine(sk, skb, frametype);
sk                404 net/x25/x25_in.c 		queued = x25_state3_machine(sk, skb, frametype, ns, nr, q, d, m);
sk                407 net/x25/x25_in.c 		queued = x25_state4_machine(sk, skb, frametype);
sk                411 net/x25/x25_in.c 	x25_kick(sk);
sk                416 net/x25/x25_in.c int x25_backlog_rcv(struct sock *sk, struct sk_buff *skb)
sk                418 net/x25/x25_in.c 	int queued = x25_process_rx_frame(sk, skb);
sk                131 net/x25/x25_link.c 	skb->sk = NULL;
sk                156 net/x25/x25_link.c 	skb->sk = NULL;
sk                187 net/x25/x25_link.c 	skb->sk = NULL;
sk                 47 net/x25/x25_out.c int x25_output(struct sock *sk, struct sk_buff *skb)
sk                 53 net/x25/x25_out.c 	struct x25_sock *x25 = x25_sk(sk);
sk                 66 net/x25/x25_out.c 			release_sock(sk);
sk                 67 net/x25/x25_out.c 			skbn = sock_alloc_send_skb(sk, frontlen + max_len,
sk                 69 net/x25/x25_out.c 			lock_sock(sk);
sk                 75 net/x25/x25_out.c 				SOCK_DEBUG(sk, "x25_output: fragment alloc"
sk                100 net/x25/x25_out.c 			skb_queue_tail(&sk->sk_write_queue, skbn);
sk                106 net/x25/x25_out.c 		skb_queue_tail(&sk->sk_write_queue, skb);
sk                116 net/x25/x25_out.c static void x25_send_iframe(struct sock *sk, struct sk_buff *skb)
sk                118 net/x25/x25_out.c 	struct x25_sock *x25 = x25_sk(sk);
sk                136 net/x25/x25_out.c void x25_kick(struct sock *sk)
sk                141 net/x25/x25_out.c 	struct x25_sock *x25 = x25_sk(sk);
sk                159 net/x25/x25_out.c 	if (!skb_peek(&sk->sk_write_queue))
sk                177 net/x25/x25_out.c 	skb = skb_dequeue(&sk->sk_write_queue);
sk                181 net/x25/x25_out.c 			skb_queue_head(&sk->sk_write_queue, skb);
sk                185 net/x25/x25_out.c 		skb_set_owner_w(skbn, sk);
sk                190 net/x25/x25_out.c 		x25_send_iframe(sk, skbn);
sk                200 net/x25/x25_out.c 		 (skb = skb_dequeue(&sk->sk_write_queue)) != NULL);
sk                205 net/x25/x25_out.c 	x25_stop_timer(sk);
sk                213 net/x25/x25_out.c void x25_enquiry_response(struct sock *sk)
sk                215 net/x25/x25_out.c 	struct x25_sock *x25 = x25_sk(sk);
sk                218 net/x25/x25_out.c 		x25_write_internal(sk, X25_RNR);
sk                220 net/x25/x25_out.c 		x25_write_internal(sk, X25_RR);
sk                225 net/x25/x25_out.c 	x25_stop_timer(sk);
sk                 34 net/x25/x25_subr.c void x25_clear_queues(struct sock *sk)
sk                 36 net/x25/x25_subr.c 	struct x25_sock *x25 = x25_sk(sk);
sk                 38 net/x25/x25_subr.c 	skb_queue_purge(&sk->sk_write_queue);
sk                 51 net/x25/x25_subr.c void x25_frames_acked(struct sock *sk, unsigned short nr)
sk                 54 net/x25/x25_subr.c 	struct x25_sock *x25 = x25_sk(sk);
sk                 68 net/x25/x25_subr.c void x25_requeue_frames(struct sock *sk)
sk                 77 net/x25/x25_subr.c 	while ((skb = skb_dequeue(&x25_sk(sk)->ack_queue)) != NULL) {
sk                 79 net/x25/x25_subr.c 			skb_queue_head(&sk->sk_write_queue, skb);
sk                 81 net/x25/x25_subr.c 			skb_append(skb_prev, skb, &sk->sk_write_queue);
sk                 90 net/x25/x25_subr.c int x25_validate_nr(struct sock *sk, unsigned short nr)
sk                 92 net/x25/x25_subr.c 	struct x25_sock *x25 = x25_sk(sk);
sk                109 net/x25/x25_subr.c void x25_write_internal(struct sock *sk, int frametype)
sk                111 net/x25/x25_subr.c 	struct x25_sock *x25 = x25_sk(sk);
sk                260 net/x25/x25_subr.c int x25_decode(struct sock *sk, struct sk_buff *skb, int *ns, int *nr, int *q,
sk                263 net/x25/x25_subr.c 	struct x25_sock *x25 = x25_sk(sk);
sk                338 net/x25/x25_subr.c void x25_disconnect(struct sock *sk, int reason, unsigned char cause,
sk                341 net/x25/x25_subr.c 	struct x25_sock *x25 = x25_sk(sk);
sk                343 net/x25/x25_subr.c 	x25_clear_queues(sk);
sk                344 net/x25/x25_subr.c 	x25_stop_timer(sk);
sk                352 net/x25/x25_subr.c 	sk->sk_state     = TCP_CLOSE;
sk                353 net/x25/x25_subr.c 	sk->sk_err       = reason;
sk                354 net/x25/x25_subr.c 	sk->sk_shutdown |= SEND_SHUTDOWN;
sk                356 net/x25/x25_subr.c 	if (!sock_flag(sk, SOCK_DEAD)) {
sk                357 net/x25/x25_subr.c 		sk->sk_state_change(sk);
sk                358 net/x25/x25_subr.c 		sock_set_flag(sk, SOCK_DEAD);
sk                366 net/x25/x25_subr.c void x25_check_rbuf(struct sock *sk)
sk                368 net/x25/x25_subr.c 	struct x25_sock *x25 = x25_sk(sk);
sk                370 net/x25/x25_subr.c 	if (atomic_read(&sk->sk_rmem_alloc) < (sk->sk_rcvbuf >> 1) &&
sk                375 net/x25/x25_subr.c 		x25_write_internal(sk, X25_RR);
sk                376 net/x25/x25_subr.c 		x25_stop_timer(sk);
sk                 27 net/x25/x25_timer.c void x25_init_timers(struct sock *sk)
sk                 29 net/x25/x25_timer.c 	struct x25_sock *x25 = x25_sk(sk);
sk                 34 net/x25/x25_timer.c 	sk->sk_timer.function = x25_heartbeat_expiry;
sk                 37 net/x25/x25_timer.c void x25_start_heartbeat(struct sock *sk)
sk                 39 net/x25/x25_timer.c 	mod_timer(&sk->sk_timer, jiffies + 5 * HZ);
sk                 42 net/x25/x25_timer.c void x25_stop_heartbeat(struct sock *sk)
sk                 44 net/x25/x25_timer.c 	del_timer(&sk->sk_timer);
sk                 47 net/x25/x25_timer.c void x25_start_t2timer(struct sock *sk)
sk                 49 net/x25/x25_timer.c 	struct x25_sock *x25 = x25_sk(sk);
sk                 54 net/x25/x25_timer.c void x25_start_t21timer(struct sock *sk)
sk                 56 net/x25/x25_timer.c 	struct x25_sock *x25 = x25_sk(sk);
sk                 61 net/x25/x25_timer.c void x25_start_t22timer(struct sock *sk)
sk                 63 net/x25/x25_timer.c 	struct x25_sock *x25 = x25_sk(sk);
sk                 68 net/x25/x25_timer.c void x25_start_t23timer(struct sock *sk)
sk                 70 net/x25/x25_timer.c 	struct x25_sock *x25 = x25_sk(sk);
sk                 75 net/x25/x25_timer.c void x25_stop_timer(struct sock *sk)
sk                 77 net/x25/x25_timer.c 	del_timer(&x25_sk(sk)->timer);
sk                 80 net/x25/x25_timer.c unsigned long x25_display_timer(struct sock *sk)
sk                 82 net/x25/x25_timer.c 	struct x25_sock *x25 = x25_sk(sk);
sk                 92 net/x25/x25_timer.c 	struct sock *sk = from_timer(sk, t, sk_timer);
sk                 94 net/x25/x25_timer.c 	bh_lock_sock(sk);
sk                 95 net/x25/x25_timer.c 	if (sock_owned_by_user(sk)) /* can currently only occur in state 3 */
sk                 98 net/x25/x25_timer.c 	switch (x25_sk(sk)->state) {
sk                106 net/x25/x25_timer.c 			if (sock_flag(sk, SOCK_DESTROY) ||
sk                107 net/x25/x25_timer.c 			    (sk->sk_state == TCP_LISTEN &&
sk                108 net/x25/x25_timer.c 			     sock_flag(sk, SOCK_DEAD))) {
sk                109 net/x25/x25_timer.c 				bh_unlock_sock(sk);
sk                110 net/x25/x25_timer.c 				x25_destroy_socket_from_timer(sk);
sk                119 net/x25/x25_timer.c 			x25_check_rbuf(sk);
sk                123 net/x25/x25_timer.c 	x25_start_heartbeat(sk);
sk                124 net/x25/x25_timer.c 	bh_unlock_sock(sk);
sk                131 net/x25/x25_timer.c static inline void x25_do_timer_expiry(struct sock * sk)
sk                133 net/x25/x25_timer.c 	struct x25_sock *x25 = x25_sk(sk);
sk                140 net/x25/x25_timer.c 				x25_enquiry_response(sk);
sk                146 net/x25/x25_timer.c 			x25_write_internal(sk, X25_CLEAR_REQUEST);
sk                148 net/x25/x25_timer.c 			x25_start_t23timer(sk);
sk                152 net/x25/x25_timer.c 			x25_disconnect(sk, ETIMEDOUT, 0, 0);
sk                160 net/x25/x25_timer.c 	struct sock *sk = &x25->sk;
sk                162 net/x25/x25_timer.c 	bh_lock_sock(sk);
sk                163 net/x25/x25_timer.c 	if (sock_owned_by_user(sk)) { /* can currently only occur in state 3 */
sk                164 net/x25/x25_timer.c 		if (x25_sk(sk)->state == X25_STATE_3)
sk                165 net/x25/x25_timer.c 			x25_start_t2timer(sk);
sk                167 net/x25/x25_timer.c 		x25_do_timer_expiry(sk);
sk                168 net/x25/x25_timer.c 	bh_unlock_sock(sk);
sk                219 net/xdp/xsk.c  	xs->sk.sk_data_ready(&xs->sk);
sk                258 net/xdp/xsk.c  	xs->sk.sk_data_ready(&xs->sk);
sk                280 net/xdp/xsk.c  		xs->sk.sk_write_space(&xs->sk);
sk                329 net/xdp/xsk.c  	struct xdp_sock *xs = xdp_sk(skb->sk);
sk                339 net/xdp/xsk.c  static int xsk_generic_xmit(struct sock *sk)
sk                341 net/xdp/xsk.c  	struct xdp_sock *xs = xdp_sk(sk);
sk                364 net/xdp/xsk.c  		skb = sock_alloc_send_skb(sk, len, 1, &err);
sk                380 net/xdp/xsk.c  		skb->priority = sk->sk_priority;
sk                381 net/xdp/xsk.c  		skb->mark = sk->sk_mark;
sk                399 net/xdp/xsk.c  		sk->sk_write_space(sk);
sk                405 net/xdp/xsk.c  static int __xsk_sendmsg(struct sock *sk)
sk                407 net/xdp/xsk.c  	struct xdp_sock *xs = xdp_sk(sk);
sk                414 net/xdp/xsk.c  	return xs->zc ? xsk_zc_xmit(xs) : xsk_generic_xmit(sk);
sk                420 net/xdp/xsk.c  	struct sock *sk = sock->sk;
sk                421 net/xdp/xsk.c  	struct xdp_sock *xs = xdp_sk(sk);
sk                428 net/xdp/xsk.c  	return __xsk_sendmsg(sk);
sk                435 net/xdp/xsk.c  	struct sock *sk = sock->sk;
sk                436 net/xdp/xsk.c  	struct xdp_sock *xs = xdp_sk(sk);
sk                449 net/xdp/xsk.c  			__xsk_sendmsg(sk);
sk                541 net/xdp/xsk.c  	struct sock *sk = sock->sk;
sk                542 net/xdp/xsk.c  	struct xdp_sock *xs = xdp_sk(sk);
sk                545 net/xdp/xsk.c  	if (!sk)
sk                548 net/xdp/xsk.c  	net = sock_net(sk);
sk                551 net/xdp/xsk.c  	sk_del_node_init_rcu(sk);
sk                555 net/xdp/xsk.c  	sock_prot_inuse_add(net, sk->sk_prot, -1);
sk                566 net/xdp/xsk.c  	sock_orphan(sk);
sk                567 net/xdp/xsk.c  	sock->sk = NULL;
sk                569 net/xdp/xsk.c  	sk_refcnt_debug_release(sk);
sk                570 net/xdp/xsk.c  	sock_put(sk);
sk                584 net/xdp/xsk.c  	if (sock->sk->sk_family != PF_XDP) {
sk                613 net/xdp/xsk.c  	struct sock *sk = sock->sk;
sk                614 net/xdp/xsk.c  	struct xdp_sock *xs = xdp_sk(sk);
sk                636 net/xdp/xsk.c  	dev = dev_get_by_index(sock_net(sk), sxdp->sxdp_ifindex);
sk                672 net/xdp/xsk.c  		umem_xs = xdp_sk(sock->sk);
sk                737 net/xdp/xsk.c  	struct sock *sk = sock->sk;
sk                738 net/xdp/xsk.c  	struct xdp_sock *xs = xdp_sk(sk);
sk                850 net/xdp/xsk.c  	struct sock *sk = sock->sk;
sk                851 net/xdp/xsk.c  	struct xdp_sock *xs = xdp_sk(sk);
sk                967 net/xdp/xsk.c  	struct xdp_sock *xs = xdp_sk(sock->sk);
sk               1012 net/xdp/xsk.c  	struct sock *sk;
sk               1017 net/xdp/xsk.c  		sk_for_each(sk, &net->xdp.list) {
sk               1018 net/xdp/xsk.c  			struct xdp_sock *xs = xdp_sk(sk);
sk               1022 net/xdp/xsk.c  				sk->sk_err = ENETDOWN;
sk               1023 net/xdp/xsk.c  				if (!sock_flag(sk, SOCK_DEAD))
sk               1024 net/xdp/xsk.c  					sk->sk_error_report(sk);
sk               1066 net/xdp/xsk.c  static void xsk_destruct(struct sock *sk)
sk               1068 net/xdp/xsk.c  	struct xdp_sock *xs = xdp_sk(sk);
sk               1070 net/xdp/xsk.c  	if (!sock_flag(sk, SOCK_DEAD))
sk               1075 net/xdp/xsk.c  	sk_refcnt_debug_dec(sk);
sk               1081 net/xdp/xsk.c  	struct sock *sk;
sk               1094 net/xdp/xsk.c  	sk = sk_alloc(net, PF_XDP, GFP_KERNEL, &xsk_proto, kern);
sk               1095 net/xdp/xsk.c  	if (!sk)
sk               1100 net/xdp/xsk.c  	sock_init_data(sock, sk);
sk               1102 net/xdp/xsk.c  	sk->sk_family = PF_XDP;
sk               1104 net/xdp/xsk.c  	sk->sk_destruct = xsk_destruct;
sk               1105 net/xdp/xsk.c  	sk_refcnt_debug_inc(sk);
sk               1107 net/xdp/xsk.c  	sock_set_flag(sk, SOCK_RCU_FREE);
sk               1109 net/xdp/xsk.c  	xs = xdp_sk(sk);
sk               1119 net/xdp/xsk.c  	sk_add_node_rcu(sk, &net->xdp.list);
sk                 20 net/xdp/xsk.h  static inline struct xdp_sock *xdp_sk(struct sock *sk)
sk                 22 net/xdp/xsk.h  	return (struct xdp_sock *)sk;
sk                 79 net/xdp/xsk_diag.c static int xsk_diag_fill(struct sock *sk, struct sk_buff *nlskb,
sk                 84 net/xdp/xsk_diag.c 	struct xdp_sock *xs = xdp_sk(sk);
sk                 96 net/xdp/xsk_diag.c 	msg->xdiag_type = sk->sk_type;
sk                 98 net/xdp/xsk_diag.c 	sock_diag_save_cookie(sk, msg->xdiag_cookie);
sk                106 net/xdp/xsk_diag.c 			from_kuid_munged(user_ns, sock_i_uid(sk))))
sk                118 net/xdp/xsk_diag.c 	    sock_diag_put_meminfo(sk, nlskb, XDP_DIAG_MEMINFO))
sk                134 net/xdp/xsk_diag.c 	struct net *net = sock_net(nlskb->sk);
sk                136 net/xdp/xsk_diag.c 	struct sock *sk;
sk                140 net/xdp/xsk_diag.c 	sk_for_each(sk, &net->xdp.list) {
sk                141 net/xdp/xsk_diag.c 		if (!net_eq(sock_net(sk), net))
sk                146 net/xdp/xsk_diag.c 		if (xsk_diag_fill(sk, nlskb, req,
sk                147 net/xdp/xsk_diag.c 				  sk_user_ns(NETLINK_CB(cb->skb).sk),
sk                150 net/xdp/xsk_diag.c 				  sock_i_ino(sk)) < 0) {
sk                165 net/xdp/xsk_diag.c 	struct net *net = sock_net(nlskb->sk);
sk                 38 net/xfrm/xfrm_input.c 	int (*finish)(struct net *net, struct sock *sk, struct sk_buff *skb);
sk                317 net/xfrm/xfrm_interface.c 	err = dst_output(xi->net, skb->sk, skb);
sk                 21 net/xfrm/xfrm_output.c static int xfrm_output2(struct net *net, struct sock *sk, struct sk_buff *skb);
sk                507 net/xfrm/xfrm_output.c 		err = skb_dst(skb)->ops->local_out(net, skb->sk, skb);
sk                512 net/xfrm/xfrm_output.c 			return dst_output(net, skb->sk, skb);
sk                515 net/xfrm/xfrm_output.c 			      NF_INET_POST_ROUTING, net, skb->sk, skb,
sk                529 net/xfrm/xfrm_output.c static int xfrm_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
sk                534 net/xfrm/xfrm_output.c static int xfrm_output_gso(struct net *net, struct sock *sk, struct sk_buff *skb)
sk                552 net/xfrm/xfrm_output.c 		err = xfrm_output2(net, sk, segs);
sk                565 net/xfrm/xfrm_output.c int xfrm_output(struct sock *sk, struct sk_buff *skb)
sk                590 net/xfrm/xfrm_output.c 				return xfrm_output_gso(net, sk, skb);
sk                600 net/xfrm/xfrm_output.c 			return xfrm_output_gso(net, sk, skb);
sk                613 net/xfrm/xfrm_output.c 	return xfrm_output2(net, sk, skb);
sk                649 net/xfrm/xfrm_output.c 		 skb->sk->sk_family == AF_INET6)
sk               2152 net/xfrm/xfrm_policy.c static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
sk               2160 net/xfrm/xfrm_policy.c 	pol = rcu_dereference(sk->sk_policy[dir]);
sk               2172 net/xfrm/xfrm_policy.c 			if ((sk->sk_mark & pol->mark.m) != pol->mark.v ||
sk               2251 net/xfrm/xfrm_policy.c int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
sk               2253 net/xfrm/xfrm_policy.c 	struct net *net = sock_net(sk);
sk               2262 net/xfrm/xfrm_policy.c 	old_pol = rcu_dereference_protected(sk->sk_policy[dir],
sk               2269 net/xfrm/xfrm_policy.c 	rcu_assign_pointer(sk->sk_policy[dir], pol);
sk               2319 net/xfrm/xfrm_policy.c int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk)
sk               2334 net/xfrm/xfrm_policy.c 			rcu_assign_pointer(sk->sk_policy[i], np);
sk               2752 net/xfrm/xfrm_policy.c 	struct sock *sk;
sk               2767 net/xfrm/xfrm_policy.c 	sk = skb->sk;
sk               2772 net/xfrm/xfrm_policy.c 	dst = xfrm_lookup(net, xfrm_dst_path(dst), &fl, sk, XFRM_LOOKUP_QUEUE);
sk               2802 net/xfrm/xfrm_policy.c 		dst = xfrm_lookup(net, xfrm_dst_path(skb_dst(skb)), &fl, skb->sk, 0);
sk               2812 net/xfrm/xfrm_policy.c 		dst_output(net, skb->sk, skb);
sk               2825 net/xfrm/xfrm_policy.c static int xdst_queue_output(struct net *net, struct sock *sk, struct sk_buff *skb)
sk               2833 net/xfrm/xfrm_policy.c 	if (unlikely(skb_fclone_busy(sk, skb))) {
sk               3017 net/xfrm/xfrm_policy.c 					const struct sock *sk,
sk               3031 net/xfrm/xfrm_policy.c 	sk = sk_const_to_full_sk(sk);
sk               3032 net/xfrm/xfrm_policy.c 	if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
sk               3034 net/xfrm/xfrm_policy.c 		pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl, family,
sk               3168 net/xfrm/xfrm_policy.c 			      const struct flowi *fl, const struct sock *sk,
sk               3171 net/xfrm/xfrm_policy.c 	return xfrm_lookup_with_ifid(net, dst_orig, fl, sk, flags, 0);
sk               3180 net/xfrm/xfrm_policy.c 				    const struct sock *sk, int flags)
sk               3182 net/xfrm/xfrm_policy.c 	struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk,
sk               3502 net/xfrm/xfrm_policy.c int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
sk               3556 net/xfrm/xfrm_policy.c 	sk = sk_to_full_sk(sk);
sk               3557 net/xfrm/xfrm_policy.c 	if (sk && sk->sk_policy[dir]) {
sk               3558 net/xfrm/xfrm_policy.c 		pol = xfrm_sk_policy_lookup(sk, dir, &fl, family, if_id);
sk               2264 net/xfrm/xfrm_state.c int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
sk               2275 net/xfrm/xfrm_state.c 		xfrm_sk_policy_insert(sk, XFRM_POLICY_IN, NULL);
sk               2276 net/xfrm/xfrm_state.c 		xfrm_sk_policy_insert(sk, XFRM_POLICY_OUT, NULL);
sk               2277 net/xfrm/xfrm_state.c 		__sk_dst_reset(sk);
sk               2291 net/xfrm/xfrm_state.c 		pol = km->compile_policy(sk, optname, data,
sk               2299 net/xfrm/xfrm_state.c 		xfrm_sk_policy_insert(sk, err, pol);
sk               2301 net/xfrm/xfrm_state.c 		__sk_dst_reset(sk);
sk                674 net/xfrm/xfrm_user.c 	struct net *net = sock_net(skb->sk);
sk                753 net/xfrm/xfrm_user.c 	struct net *net = sock_net(skb->sk);
sk               1001 net/xfrm/xfrm_user.c 	struct sock *sk = cb->skb->sk;
sk               1002 net/xfrm/xfrm_user.c 	struct net *net = sock_net(sk);
sk               1012 net/xfrm/xfrm_user.c 	struct net *net = sock_net(skb->sk);
sk               1160 net/xfrm/xfrm_user.c 	struct net *net = sock_net(skb->sk);
sk               1205 net/xfrm/xfrm_user.c 	struct net *net = sock_net(skb->sk);
sk               1264 net/xfrm/xfrm_user.c 	struct net *net = sock_net(skb->sk);
sk               1284 net/xfrm/xfrm_user.c 	struct net *net = sock_net(skb->sk);
sk               1308 net/xfrm/xfrm_user.c 	struct net *net = sock_net(skb->sk);
sk               1648 net/xfrm/xfrm_user.c 	struct net *net = sock_net(skb->sk);
sk               1797 net/xfrm/xfrm_user.c 	struct net *net = sock_net(cb->skb->sk);
sk               1815 net/xfrm/xfrm_user.c 	struct net *net = sock_net(skb->sk);
sk               1858 net/xfrm/xfrm_user.c 	struct net *net = sock_net(skb->sk);
sk               1939 net/xfrm/xfrm_user.c 	struct net *net = sock_net(skb->sk);
sk               2039 net/xfrm/xfrm_user.c 	struct net *net = sock_net(skb->sk);
sk               2083 net/xfrm/xfrm_user.c 	struct net *net = sock_net(skb->sk);
sk               2134 net/xfrm/xfrm_user.c 	struct net *net = sock_net(skb->sk);
sk               2162 net/xfrm/xfrm_user.c 	struct net *net = sock_net(skb->sk);
sk               2226 net/xfrm/xfrm_user.c 	struct net *net = sock_net(skb->sk);
sk               2260 net/xfrm/xfrm_user.c 	struct net *net = sock_net(skb->sk);
sk               2371 net/xfrm/xfrm_user.c 	struct net *net = sock_net(skb->sk);
sk               2636 net/xfrm/xfrm_user.c 	struct net *net = sock_net(skb->sk);
sk               2685 net/xfrm/xfrm_user.c 	struct net *net = sock_net(skb->sk);
sk               2982 net/xfrm/xfrm_user.c static struct xfrm_policy *xfrm_compile_policy(struct sock *sk, int opt,
sk               2985 net/xfrm/xfrm_user.c 	struct net *net = sock_net(sk);
sk               2991 net/xfrm/xfrm_user.c 	switch (sk->sk_family) {
sk                 89 samples/bpf/hbm_kern.h 	struct bpf_sock *sk;
sk                 92 samples/bpf/hbm_kern.h 	sk = skb->sk;
sk                 93 samples/bpf/hbm_kern.h 	if (sk) {
sk                 94 samples/bpf/hbm_kern.h 		sk = bpf_sk_fullsock(sk);
sk                 95 samples/bpf/hbm_kern.h 		if (sk) {
sk                 96 samples/bpf/hbm_kern.h 			if (sk->protocol == IPPROTO_TCP) {
sk                 97 samples/bpf/hbm_kern.h 				tp = bpf_tcp_sock(sk);
sk                  9 samples/bpf/sock_flags_kern.c int bpf_prog1(struct bpf_sock *sk)
sk                 17 samples/bpf/sock_flags_kern.c 	bpf_trace_printk(fmt, sizeof(fmt), sk->family, sk->type, sk->protocol);
sk                 23 samples/bpf/sock_flags_kern.c 	if (sk->family == PF_INET6 &&
sk                 24 samples/bpf/sock_flags_kern.c 	    sk->type == SOCK_RAW   &&
sk                 25 samples/bpf/sock_flags_kern.c 	    sk->protocol == IPPROTO_ICMPV6)
sk                 32 samples/bpf/sock_flags_kern.c int bpf_prog2(struct bpf_sock *sk)
sk                 36 samples/bpf/sock_flags_kern.c 	bpf_trace_printk(fmt, sizeof(fmt), sk->family, sk->type, sk->protocol);
sk                 41 samples/bpf/sock_flags_kern.c 	if (sk->family == PF_INET &&
sk                 42 samples/bpf/sock_flags_kern.c 	    sk->type == SOCK_RAW  &&
sk                 43 samples/bpf/sock_flags_kern.c 	    sk->protocol == IPPROTO_ICMP)
sk                 29 samples/bpf/tcp_dumpstats_kern.c 	struct bpf_sock *sk;
sk                 43 samples/bpf/tcp_dumpstats_kern.c 	sk = ctx->sk;
sk                 44 samples/bpf/tcp_dumpstats_kern.c 	if (!sk)
sk                 47 samples/bpf/tcp_dumpstats_kern.c 	next_dump = bpf_sk_storage_get(&bpf_next_dump, sk, 0,
sk                 56 samples/bpf/tcp_dumpstats_kern.c 	tcp_sk = bpf_tcp_sock(sk);
sk                 57 security/apparmor/include/net.h 	struct lsm_network_audit NAME ## _net = { .sk = (SK),		  \
sk                 99 security/apparmor/include/net.h 					struct sock *sk)
sk                101 security/apparmor/include/net.h 	return aa_profile_af_perm(profile, sa, request, sk->sk_family,
sk                102 security/apparmor/include/net.h 				  sk->sk_type);
sk                104 security/apparmor/include/net.h int aa_sk_perm(const char *op, u32 request, struct sock *sk);
sk                110 security/apparmor/include/net.h 			   u32 secid, struct sock *sk);
sk                759 security/apparmor/lsm.c static int apparmor_sk_alloc_security(struct sock *sk, int family, gfp_t flags)
sk                767 security/apparmor/lsm.c 	SK_CTX(sk) = ctx;
sk                775 security/apparmor/lsm.c static void apparmor_sk_free_security(struct sock *sk)
sk                777 security/apparmor/lsm.c 	struct aa_sk_ctx *ctx = SK_CTX(sk);
sk                779 security/apparmor/lsm.c 	SK_CTX(sk) = NULL;
sk                788 security/apparmor/lsm.c static void apparmor_sk_clone_security(const struct sock *sk,
sk                791 security/apparmor/lsm.c 	struct aa_sk_ctx *ctx = SK_CTX(sk);
sk                842 security/apparmor/lsm.c 	if (sock->sk) {
sk                843 security/apparmor/lsm.c 		struct aa_sk_ctx *ctx = SK_CTX(sock->sk);
sk                860 security/apparmor/lsm.c 	AA_BUG(!sock->sk);
sk                864 security/apparmor/lsm.c 	return af_select(sock->sk->sk_family,
sk                866 security/apparmor/lsm.c 			 aa_sk_perm(OP_BIND, AA_MAY_BIND, sock->sk));
sk                876 security/apparmor/lsm.c 	AA_BUG(!sock->sk);
sk                880 security/apparmor/lsm.c 	return af_select(sock->sk->sk_family,
sk                882 security/apparmor/lsm.c 			 aa_sk_perm(OP_CONNECT, AA_MAY_CONNECT, sock->sk));
sk                891 security/apparmor/lsm.c 	AA_BUG(!sock->sk);
sk                894 security/apparmor/lsm.c 	return af_select(sock->sk->sk_family,
sk                896 security/apparmor/lsm.c 			 aa_sk_perm(OP_LISTEN, AA_MAY_LISTEN, sock->sk));
sk                908 security/apparmor/lsm.c 	AA_BUG(!sock->sk);
sk                912 security/apparmor/lsm.c 	return af_select(sock->sk->sk_family,
sk                914 security/apparmor/lsm.c 			 aa_sk_perm(OP_ACCEPT, AA_MAY_ACCEPT, sock->sk));
sk                921 security/apparmor/lsm.c 	AA_BUG(!sock->sk);
sk                925 security/apparmor/lsm.c 	return af_select(sock->sk->sk_family,
sk                927 security/apparmor/lsm.c 			 aa_sk_perm(op, request, sock->sk));
sk                952 security/apparmor/lsm.c 	AA_BUG(!sock->sk);
sk                955 security/apparmor/lsm.c 	return af_select(sock->sk->sk_family,
sk                957 security/apparmor/lsm.c 			 aa_sk_perm(op, request, sock->sk));
sk                981 security/apparmor/lsm.c 	AA_BUG(!sock->sk);
sk                984 security/apparmor/lsm.c 	return af_select(sock->sk->sk_family,
sk                986 security/apparmor/lsm.c 			 aa_sk_perm(op, request, sock->sk));
sk               1026 security/apparmor/lsm.c static int apparmor_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
sk               1028 security/apparmor/lsm.c 	struct aa_sk_ctx *ctx = SK_CTX(sk);
sk               1034 security/apparmor/lsm.c 				      skb->secmark, sk);
sk               1039 security/apparmor/lsm.c static struct aa_label *sk_peer_label(struct sock *sk)
sk               1041 security/apparmor/lsm.c 	struct aa_sk_ctx *ctx = SK_CTX(sk);
sk               1065 security/apparmor/lsm.c 	peer = sk_peer_label(sock->sk);
sk               1123 security/apparmor/lsm.c static void apparmor_sock_graft(struct sock *sk, struct socket *parent)
sk               1125 security/apparmor/lsm.c 	struct aa_sk_ctx *ctx = SK_CTX(sk);
sk               1132 security/apparmor/lsm.c static int apparmor_inet_conn_request(struct sock *sk, struct sk_buff *skb,
sk               1135 security/apparmor/lsm.c 	struct aa_sk_ctx *ctx = SK_CTX(sk);
sk               1141 security/apparmor/lsm.c 				      skb->secmark, sk);
sk               1618 security/apparmor/lsm.c 	struct sock *sk;
sk               1623 security/apparmor/lsm.c 	sk = skb_to_full_sk(skb);
sk               1624 security/apparmor/lsm.c 	if (sk == NULL)
sk               1627 security/apparmor/lsm.c 	ctx = SK_CTX(sk);
sk               1629 security/apparmor/lsm.c 				    skb->secmark, sk))
sk                144 security/apparmor/net.c 			    struct sock *sk)
sk                149 security/apparmor/net.c 	AA_BUG(!sk);
sk                153 security/apparmor/net.c 		DEFINE_AUDIT_SK(sa, op, sk);
sk                156 security/apparmor/net.c 			    aa_profile_af_sk_perm(profile, &sa, request, sk));
sk                162 security/apparmor/net.c int aa_sk_perm(const char *op, u32 request, struct sock *sk)
sk                167 security/apparmor/net.c 	AA_BUG(!sk);
sk                172 security/apparmor/net.c 	error = aa_label_sk_perm(label, op, request, sk);
sk                184 security/apparmor/net.c 	AA_BUG(!sock->sk);
sk                186 security/apparmor/net.c 	return aa_label_sk_perm(label, op, request, sock->sk);
sk                212 security/apparmor/net.c 			   struct common_audit_data *sa, struct sock *sk)
sk                245 security/apparmor/net.c 			   u32 secid, struct sock *sk)
sk                248 security/apparmor/net.c 	DEFINE_AUDIT_SK(sa, op, sk);
sk                252 security/apparmor/net.c 						    &sa, sk));
sk                318 security/lsm_audit.c 		if (a->u.net->sk) {
sk                319 security/lsm_audit.c 			struct sock *sk = a->u.net->sk;
sk                325 security/lsm_audit.c 			switch (sk->sk_family) {
sk                327 security/lsm_audit.c 				struct inet_sock *inet = inet_sk(sk);
sk                339 security/lsm_audit.c 				struct inet_sock *inet = inet_sk(sk);
sk                341 security/lsm_audit.c 				print_ipv6_addr(ab, &sk->sk_v6_rcv_saddr,
sk                344 security/lsm_audit.c 				print_ipv6_addr(ab, &sk->sk_v6_daddr,
sk                351 security/lsm_audit.c 				u = unix_sk(sk);
sk               1904 security/security.c int security_netlink_send(struct sock *sk, struct sk_buff *skb)
sk               1906 security/security.c 	return call_int_hook(netlink_send, 0, sk, skb);
sk               2047 security/security.c int security_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
sk               2049 security/security.c 	return call_int_hook(socket_sock_rcv_skb, 0, sk, skb);
sk               2067 security/security.c int security_sk_alloc(struct sock *sk, int family, gfp_t priority)
sk               2069 security/security.c 	return call_int_hook(sk_alloc_security, 0, sk, family, priority);
sk               2072 security/security.c void security_sk_free(struct sock *sk)
sk               2074 security/security.c 	call_void_hook(sk_free_security, sk);
sk               2077 security/security.c void security_sk_clone(const struct sock *sk, struct sock *newsk)
sk               2079 security/security.c 	call_void_hook(sk_clone_security, sk, newsk);
sk               2083 security/security.c void security_sk_classify_flow(struct sock *sk, struct flowi *fl)
sk               2085 security/security.c 	call_void_hook(sk_getsecid, sk, &fl->flowi_secid);
sk               2095 security/security.c void security_sock_graft(struct sock *sk, struct socket *parent)
sk               2097 security/security.c 	call_void_hook(sock_graft, sk, parent);
sk               2101 security/security.c int security_inet_conn_request(struct sock *sk,
sk               2104 security/security.c 	return call_int_hook(inet_conn_request, 0, sk, skb, req);
sk               2114 security/security.c void security_inet_conn_established(struct sock *sk,
sk               2117 security/security.c 	call_void_hook(inet_conn_established, sk, skb);
sk               2163 security/security.c int security_tun_dev_attach(struct sock *sk, void *security)
sk               2165 security/security.c 	return call_int_hook(tun_dev_attach, 0, sk, security);
sk               2181 security/security.c int security_sctp_bind_connect(struct sock *sk, int optname,
sk               2184 security/security.c 	return call_int_hook(sctp_bind_connect, 0, sk, optname,
sk               2189 security/security.c void security_sctp_sk_clone(struct sctp_endpoint *ep, struct sock *sk,
sk               2192 security/security.c 	call_void_hook(sctp_sk_clone, ep, sk, newsk);
sk               4480 security/selinux/hooks.c static int sock_has_perm(struct sock *sk, u32 perms)
sk               4482 security/selinux/hooks.c 	struct sk_security_struct *sksec = sk->sk_security;
sk               4491 security/selinux/hooks.c 	ad.u.net->sk = sk;
sk               4538 security/selinux/hooks.c 	if (sock->sk) {
sk               4539 security/selinux/hooks.c 		sksec = sock->sk->sk_security;
sk               4546 security/selinux/hooks.c 		err = selinux_netlbl_socket_post_create(sock->sk, family);
sk               4555 security/selinux/hooks.c 	struct sk_security_struct *sksec_a = socka->sk->sk_security;
sk               4556 security/selinux/hooks.c 	struct sk_security_struct *sksec_b = sockb->sk->sk_security;
sk               4570 security/selinux/hooks.c 	struct sock *sk = sock->sk;
sk               4571 security/selinux/hooks.c 	struct sk_security_struct *sksec = sk->sk_security;
sk               4575 security/selinux/hooks.c 	err = sock_has_perm(sk, SOCKET__BIND);
sk               4580 security/selinux/hooks.c 	family = sk->sk_family;
sk               4636 security/selinux/hooks.c 			inet_get_local_port_range(sock_net(sk), &low, &high);
sk               4638 security/selinux/hooks.c 			if (snum < max(inet_prot_sock(sock_net(sk)), low) ||
sk               4640 security/selinux/hooks.c 				err = sel_netport_sid(sk->sk_protocol,
sk               4705 security/selinux/hooks.c 	struct sock *sk = sock->sk;
sk               4706 security/selinux/hooks.c 	struct sk_security_struct *sksec = sk->sk_security;
sk               4709 security/selinux/hooks.c 	err = sock_has_perm(sk, SOCKET__CONNECT);
sk               4763 security/selinux/hooks.c 		err = sel_netport_sid(sk->sk_protocol, snum, &sid);
sk               4797 security/selinux/hooks.c 	struct sock *sk = sock->sk;
sk               4803 security/selinux/hooks.c 	return selinux_netlbl_socket_connect(sk, address);
sk               4808 security/selinux/hooks.c 	return sock_has_perm(sock->sk, SOCKET__LISTEN);
sk               4819 security/selinux/hooks.c 	err = sock_has_perm(sock->sk, SOCKET__ACCEPT);
sk               4840 security/selinux/hooks.c 	return sock_has_perm(sock->sk, SOCKET__WRITE);
sk               4846 security/selinux/hooks.c 	return sock_has_perm(sock->sk, SOCKET__READ);
sk               4851 security/selinux/hooks.c 	return sock_has_perm(sock->sk, SOCKET__GETATTR);
sk               4856 security/selinux/hooks.c 	return sock_has_perm(sock->sk, SOCKET__GETATTR);
sk               4863 security/selinux/hooks.c 	err = sock_has_perm(sock->sk, SOCKET__SETOPT);
sk               4873 security/selinux/hooks.c 	return sock_has_perm(sock->sk, SOCKET__GETOPT);
sk               4878 security/selinux/hooks.c 	return sock_has_perm(sock->sk, SOCKET__SHUTDOWN);
sk               4894 security/selinux/hooks.c 	ad.u.net->sk = other;
sk               4919 security/selinux/hooks.c 	struct sk_security_struct *ssec = sock->sk->sk_security;
sk               4920 security/selinux/hooks.c 	struct sk_security_struct *osec = other->sk->sk_security;
sk               4926 security/selinux/hooks.c 	ad.u.net->sk = other->sk;
sk               4958 security/selinux/hooks.c static int selinux_sock_rcv_skb_compat(struct sock *sk, struct sk_buff *skb,
sk               4962 security/selinux/hooks.c 	struct sk_security_struct *sksec = sk->sk_security;
sk               4992 security/selinux/hooks.c static int selinux_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
sk               4995 security/selinux/hooks.c 	struct sk_security_struct *sksec = sk->sk_security;
sk               4996 security/selinux/hooks.c 	u16 family = sk->sk_family;
sk               5016 security/selinux/hooks.c 		return selinux_sock_rcv_skb_compat(sk, skb, family);
sk               5037 security/selinux/hooks.c 		err = selinux_inet_sys_rcv_skb(sock_net(sk), skb->skb_iif,
sk               5069 security/selinux/hooks.c 	struct sk_security_struct *sksec = sock->sk->sk_security;
sk               5110 security/selinux/hooks.c 		family = sock->sk->sk_family;
sk               5127 security/selinux/hooks.c static int selinux_sk_alloc_security(struct sock *sk, int family, gfp_t priority)
sk               5139 security/selinux/hooks.c 	sk->sk_security = sksec;
sk               5144 security/selinux/hooks.c static void selinux_sk_free_security(struct sock *sk)
sk               5146 security/selinux/hooks.c 	struct sk_security_struct *sksec = sk->sk_security;
sk               5148 security/selinux/hooks.c 	sk->sk_security = NULL;
sk               5153 security/selinux/hooks.c static void selinux_sk_clone_security(const struct sock *sk, struct sock *newsk)
sk               5155 security/selinux/hooks.c 	struct sk_security_struct *sksec = sk->sk_security;
sk               5165 security/selinux/hooks.c static void selinux_sk_getsecid(struct sock *sk, u32 *secid)
sk               5167 security/selinux/hooks.c 	if (!sk)
sk               5170 security/selinux/hooks.c 		struct sk_security_struct *sksec = sk->sk_security;
sk               5176 security/selinux/hooks.c static void selinux_sock_graft(struct sock *sk, struct socket *parent)
sk               5180 security/selinux/hooks.c 	struct sk_security_struct *sksec = sk->sk_security;
sk               5182 security/selinux/hooks.c 	if (sk->sk_family == PF_INET || sk->sk_family == PF_INET6 ||
sk               5183 security/selinux/hooks.c 	    sk->sk_family == PF_UNIX)
sk               5195 security/selinux/hooks.c 	struct sk_security_struct *sksec = ep->base.sk->sk_security;
sk               5212 security/selinux/hooks.c 		err = selinux_skb_peerlbl_sid(skb, ep->base.sk->sk_family,
sk               5236 security/selinux/hooks.c 		ad.u.net->sk = ep->base.sk;
sk               5264 security/selinux/hooks.c static int selinux_sctp_bind_connect(struct sock *sk, int optname,
sk               5277 security/selinux/hooks.c 	sock = sk->sk_socket;
sk               5328 security/selinux/hooks.c 			err = selinux_netlbl_socket_connect_locked(sk, addr);
sk               5343 security/selinux/hooks.c static void selinux_sctp_sk_clone(struct sctp_endpoint *ep, struct sock *sk,
sk               5346 security/selinux/hooks.c 	struct sk_security_struct *sksec = sk->sk_security;
sk               5353 security/selinux/hooks.c 		return selinux_sk_clone_security(sk, newsk);
sk               5358 security/selinux/hooks.c 	selinux_netlbl_sctp_sk_clone(sk, newsk);
sk               5361 security/selinux/hooks.c static int selinux_inet_conn_request(struct sock *sk, struct sk_buff *skb,
sk               5364 security/selinux/hooks.c 	struct sk_security_struct *sksec = sk->sk_security;
sk               5399 security/selinux/hooks.c static void selinux_inet_conn_established(struct sock *sk, struct sk_buff *skb)
sk               5401 security/selinux/hooks.c 	u16 family = sk->sk_family;
sk               5402 security/selinux/hooks.c 	struct sk_security_struct *sksec = sk->sk_security;
sk               5483 security/selinux/hooks.c static int selinux_tun_dev_attach(struct sock *sk, void *security)
sk               5486 security/selinux/hooks.c 	struct sk_security_struct *sksec = sk->sk_security;
sk               5522 security/selinux/hooks.c static int selinux_nlmsg_perm(struct sock *sk, struct sk_buff *skb)
sk               5529 security/selinux/hooks.c 	struct sk_security_struct *sksec = sk->sk_security;
sk               5547 security/selinux/hooks.c 			rc = sock_has_perm(sk, perm);
sk               5555 security/selinux/hooks.c 				sk->sk_protocol, nlh->nlmsg_type,
sk               5659 security/selinux/hooks.c 	struct sock *sk;
sk               5668 security/selinux/hooks.c 	sk = skb->sk;
sk               5669 security/selinux/hooks.c 	if (sk) {
sk               5672 security/selinux/hooks.c 		if (sk_listener(sk))
sk               5688 security/selinux/hooks.c 		sksec = sk->sk_security;
sk               5718 security/selinux/hooks.c 	struct sock *sk = skb_to_full_sk(skb);
sk               5725 security/selinux/hooks.c 	if (sk == NULL)
sk               5727 security/selinux/hooks.c 	sksec = sk->sk_security;
sk               5755 security/selinux/hooks.c 	struct sock *sk;
sk               5774 security/selinux/hooks.c 	sk = skb_to_full_sk(skb);
sk               5789 security/selinux/hooks.c 	    !(sk && sk_listener(sk)))
sk               5793 security/selinux/hooks.c 	if (sk == NULL) {
sk               5806 security/selinux/hooks.c 	} else if (sk_listener(sk)) {
sk               5819 security/selinux/hooks.c 		sksec = sk->sk_security;
sk               5848 security/selinux/hooks.c 		struct sk_security_struct *sksec = sk->sk_security;
sk               5906 security/selinux/hooks.c static int selinux_netlink_send(struct sock *sk, struct sk_buff *skb)
sk               5908 security/selinux/hooks.c 	return selinux_nlmsg_perm(sk, skb);
sk                 45 security/selinux/include/netlabel.h void selinux_netlbl_inet_csk_clone(struct sock *sk, u16 family);
sk                 46 security/selinux/include/netlabel.h void selinux_netlbl_sctp_sk_clone(struct sock *sk, struct sock *newsk);
sk                 47 security/selinux/include/netlabel.h int selinux_netlbl_socket_post_create(struct sock *sk, u16 family);
sk                 55 security/selinux/include/netlabel.h int selinux_netlbl_socket_connect(struct sock *sk, struct sockaddr *addr);
sk                 56 security/selinux/include/netlabel.h int selinux_netlbl_socket_connect_locked(struct sock *sk,
sk                101 security/selinux/include/netlabel.h static inline int selinux_netlbl_conn_setsid(struct sock *sk,
sk                117 security/selinux/include/netlabel.h static inline void selinux_netlbl_inet_csk_clone(struct sock *sk, u16 family)
sk                121 security/selinux/include/netlabel.h static inline void selinux_netlbl_sctp_sk_clone(struct sock *sk,
sk                126 security/selinux/include/netlabel.h static inline int selinux_netlbl_socket_post_create(struct sock *sk,
sk                144 security/selinux/include/netlabel.h static inline int selinux_netlbl_socket_connect(struct sock *sk,
sk                149 security/selinux/include/netlabel.h static inline int selinux_netlbl_socket_connect_locked(struct sock *sk,
sk                 67 security/selinux/netlabel.c static struct netlbl_lsm_secattr *selinux_netlbl_sock_genattr(struct sock *sk)
sk                 70 security/selinux/netlabel.c 	struct sk_security_struct *sksec = sk->sk_security;
sk                100 security/selinux/netlabel.c 							const struct sock *sk,
sk                103 security/selinux/netlabel.c 	struct sk_security_struct *sksec = sk->sk_security;
sk                232 security/selinux/netlabel.c 	struct sock *sk;
sk                236 security/selinux/netlabel.c 	sk = skb_to_full_sk(skb);
sk                237 security/selinux/netlabel.c 	if (sk != NULL) {
sk                238 security/selinux/netlabel.c 		struct sk_security_struct *sksec = sk->sk_security;
sk                242 security/selinux/netlabel.c 		secattr = selinux_netlbl_sock_getattr(sk, sid);
sk                276 security/selinux/netlabel.c 	struct sk_security_struct *sksec = ep->base.sk->sk_security;
sk                280 security/selinux/netlabel.c 	if (ep->base.sk->sk_family != PF_INET &&
sk                281 security/selinux/netlabel.c 				ep->base.sk->sk_family != PF_INET6)
sk                296 security/selinux/netlabel.c 		rc = netlbl_conn_setattr(ep->base.sk, (void *)&addr4, &secattr);
sk                300 security/selinux/netlabel.c 		rc = netlbl_conn_setattr(ep->base.sk, (void *)&addr6, &secattr);
sk                353 security/selinux/netlabel.c void selinux_netlbl_inet_csk_clone(struct sock *sk, u16 family)
sk                355 security/selinux/netlabel.c 	struct sk_security_struct *sksec = sk->sk_security;
sk                371 security/selinux/netlabel.c void selinux_netlbl_sctp_sk_clone(struct sock *sk, struct sock *newsk)
sk                373 security/selinux/netlabel.c 	struct sk_security_struct *sksec = sk->sk_security;
sk                389 security/selinux/netlabel.c int selinux_netlbl_socket_post_create(struct sock *sk, u16 family)
sk                392 security/selinux/netlabel.c 	struct sk_security_struct *sksec = sk->sk_security;
sk                398 security/selinux/netlabel.c 	secattr = selinux_netlbl_sock_genattr(sk);
sk                401 security/selinux/netlabel.c 	rc = netlbl_sock_setattr(sk, family, secattr);
sk                506 security/selinux/netlabel.c 	struct sock *sk = sock->sk;
sk                507 security/selinux/netlabel.c 	struct sk_security_struct *sksec = sk->sk_security;
sk                514 security/selinux/netlabel.c 		lock_sock(sk);
sk                518 security/selinux/netlabel.c 		rc = netlbl_sock_getattr(sk, &secattr);
sk                519 security/selinux/netlabel.c 		release_sock(sk);
sk                541 security/selinux/netlabel.c static int selinux_netlbl_socket_connect_helper(struct sock *sk,
sk                545 security/selinux/netlabel.c 	struct sk_security_struct *sksec = sk->sk_security;
sk                552 security/selinux/netlabel.c 		netlbl_sock_delattr(sk);
sk                557 security/selinux/netlabel.c 	secattr = selinux_netlbl_sock_genattr(sk);
sk                562 security/selinux/netlabel.c 	rc = netlbl_conn_setattr(sk, addr, secattr);
sk                581 security/selinux/netlabel.c int selinux_netlbl_socket_connect_locked(struct sock *sk,
sk                584 security/selinux/netlabel.c 	struct sk_security_struct *sksec = sk->sk_security;
sk                590 security/selinux/netlabel.c 	return selinux_netlbl_socket_connect_helper(sk, addr);
sk                603 security/selinux/netlabel.c int selinux_netlbl_socket_connect(struct sock *sk, struct sockaddr *addr)
sk                607 security/selinux/netlabel.c 	lock_sock(sk);
sk                608 security/selinux/netlabel.c 	rc = selinux_netlbl_socket_connect_locked(sk, addr);
sk                609 security/selinux/netlabel.c 	release_sock(sk);
sk                483 security/smack/smack.h 					    struct sock *sk)
sk                485 security/smack/smack.h 	a->a.u.net->sk = sk;
sk                515 security/smack/smack.h 					    struct sock *sk)
sk               1454 security/smack/smack_lsm.c 		if (sock == NULL || sock->sk == NULL)
sk               1457 security/smack/smack_lsm.c 		ssp = sock->sk->sk_security;
sk               1839 security/smack/smack_lsm.c 		ssp = sock->sk->sk_security;
sk               2243 security/smack/smack_lsm.c static int smack_sk_alloc_security(struct sock *sk, int family, gfp_t gfp_flags)
sk               2264 security/smack/smack_lsm.c 	sk->sk_security = ssp;
sk               2275 security/smack/smack_lsm.c static void smack_sk_free_security(struct sock *sk)
sk               2280 security/smack/smack_lsm.c 	if (sk->sk_family == PF_INET6) {
sk               2283 security/smack/smack_lsm.c 			if (spp->smk_sock != sk)
sk               2291 security/smack/smack_lsm.c 	kfree(sk->sk_security);
sk               2405 security/smack/smack_lsm.c static int smack_netlabel(struct sock *sk, int labeled)
sk               2408 security/smack/smack_lsm.c 	struct socket_smack *ssp = sk->sk_security;
sk               2420 security/smack/smack_lsm.c 	bh_lock_sock_nested(sk);
sk               2424 security/smack/smack_lsm.c 		netlbl_sock_delattr(sk);
sk               2427 security/smack/smack_lsm.c 		rc = netlbl_sock_setattr(sk, sk->sk_family, &skp->smk_netlabel);
sk               2430 security/smack/smack_lsm.c 	bh_unlock_sock(sk);
sk               2447 security/smack/smack_lsm.c static int smack_netlabel_send(struct sock *sk, struct sockaddr_in *sap)
sk               2453 security/smack/smack_lsm.c 	struct socket_smack *ssp = sk->sk_security;
sk               2479 security/smack/smack_lsm.c 	return smack_netlabel(sk, sk_lbl);
sk               2525 security/smack/smack_lsm.c 	struct sock *sk = sock->sk;
sk               2527 security/smack/smack_lsm.c 	struct socket_smack *ssp = sock->sk->sk_security;
sk               2539 security/smack/smack_lsm.c 			if (sk != spp->smk_sock)
sk               2575 security/smack/smack_lsm.c 		spp->smk_sock = sk;
sk               2591 security/smack/smack_lsm.c 	spp->smk_sock = sk;
sk               2612 security/smack/smack_lsm.c static int smk_ipv6_port_check(struct sock *sk, struct sockaddr_in6 *address,
sk               2616 security/smack/smack_lsm.c 	struct socket_smack *ssp = sk->sk_security;
sk               2654 security/smack/smack_lsm.c 		if (spp->smk_port != port || spp->smk_sock_type != sk->sk_type)
sk               2706 security/smack/smack_lsm.c 	if (sock == NULL || sock->sk == NULL)
sk               2709 security/smack/smack_lsm.c 	ssp = sock->sk->sk_security;
sk               2715 security/smack/smack_lsm.c 		if (sock->sk->sk_family == PF_INET) {
sk               2716 security/smack/smack_lsm.c 			rc = smack_netlabel(sock->sk, SMACK_CIPSO_SOCKET);
sk               2726 security/smack/smack_lsm.c 	if (sock->sk->sk_family == PF_INET6)
sk               2750 security/smack/smack_lsm.c 	if (sock->sk == NULL)
sk               2757 security/smack/smack_lsm.c 		ssp = sock->sk->sk_security;
sk               2767 security/smack/smack_lsm.c 	return smack_netlabel(sock->sk, SMACK_CIPSO_SOCKET);
sk               2782 security/smack/smack_lsm.c 	struct socket_smack *asp = socka->sk->sk_security;
sk               2783 security/smack/smack_lsm.c 	struct socket_smack *bsp = sockb->sk->sk_security;
sk               2805 security/smack/smack_lsm.c 	if (sock->sk != NULL && sock->sk->sk_family == PF_INET6) {
sk               2830 security/smack/smack_lsm.c 	if (sock->sk == NULL)
sk               2832 security/smack/smack_lsm.c 	if (sock->sk->sk_family != PF_INET &&
sk               2833 security/smack/smack_lsm.c 	    (!IS_ENABLED(CONFIG_IPV6) || sock->sk->sk_family != PF_INET6))
sk               2846 security/smack/smack_lsm.c 			struct socket_smack *ssp = sock->sk->sk_security;
sk               2852 security/smack/smack_lsm.c 			rc = smk_ipv6_port_check(sock->sk, sip, SMK_CONNECTING);
sk               2858 security/smack/smack_lsm.c 	rc = smack_netlabel_send(sock->sk, (struct sockaddr_in *)sap);
sk               3625 security/smack/smack_lsm.c 	struct socket_smack *ssp = sock->sk->sk_security;
sk               3626 security/smack/smack_lsm.c 	struct socket_smack *osp = other->sk->sk_security;
sk               3634 security/smack/smack_lsm.c 	smk_ad_setfield_u_net_sk(&ad, other->sk);
sk               3663 security/smack/smack_lsm.c 	struct socket_smack *ssp = sock->sk->sk_security;
sk               3674 security/smack/smack_lsm.c 	switch (sock->sk->sk_family) {
sk               3679 security/smack/smack_lsm.c 		rc = smack_netlabel_send(sock->sk, sip);
sk               3693 security/smack/smack_lsm.c 		rc = smk_ipv6_port_check(sock->sk, sap, SMK_SENDING);
sk               3833 security/smack/smack_lsm.c static int smack_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
sk               3836 security/smack/smack_lsm.c 	struct socket_smack *ssp = sk->sk_security;
sk               3840 security/smack/smack_lsm.c 	u16 family = sk->sk_family;
sk               3927 security/smack/smack_lsm.c 		rc = smk_ipv6_port_check(sk, &sadd, SMK_RECEIVING);
sk               3957 security/smack/smack_lsm.c 	ssp = sock->sk->sk_security;
sk               4003 security/smack/smack_lsm.c 		family = sock->sk->sk_family;
sk               4007 security/smack/smack_lsm.c 		ssp = sock->sk->sk_security;
sk               4019 security/smack/smack_lsm.c 		if (sock != NULL && sock->sk != NULL)
sk               4020 security/smack/smack_lsm.c 			ssp = sock->sk->sk_security;
sk               4049 security/smack/smack_lsm.c static void smack_sock_graft(struct sock *sk, struct socket *parent)
sk               4054 security/smack/smack_lsm.c 	if (sk == NULL ||
sk               4055 security/smack/smack_lsm.c 	    (sk->sk_family != PF_INET && sk->sk_family != PF_INET6))
sk               4058 security/smack/smack_lsm.c 	ssp = sk->sk_security;
sk               4073 security/smack/smack_lsm.c static int smack_inet_conn_request(struct sock *sk, struct sk_buff *skb,
sk               4076 security/smack/smack_lsm.c 	u16 family = sk->sk_family;
sk               4078 security/smack/smack_lsm.c 	struct socket_smack *ssp = sk->sk_security;
sk               4174 security/smack/smack_lsm.c static void smack_inet_csk_clone(struct sock *sk,
sk               4177 security/smack/smack_lsm.c 	struct socket_smack *ssp = sk->sk_security;
sk                 27 security/smack/smack_netfilter.c 	struct sock *sk = skb_to_full_sk(skb);
sk                 31 security/smack/smack_netfilter.c 	if (sk && sk->sk_security) {
sk                 32 security/smack/smack_netfilter.c 		ssp = sk->sk_security;
sk                 45 security/smack/smack_netfilter.c 	struct sock *sk = skb_to_full_sk(skb);
sk                 49 security/smack/smack_netfilter.c 	if (sk && sk->sk_security) {
sk                 50 security/smack/smack_netfilter.c 		ssp = sk->sk_security;
sk                626 security/tomoyo/network.c static u8 tomoyo_sock_family(struct sock *sk)
sk                632 security/tomoyo/network.c 	family = sk->sk_family;
sk                653 security/tomoyo/network.c 	const u8 family = tomoyo_sock_family(sock->sk);
sk                690 security/tomoyo/network.c 	const u8 family = tomoyo_sock_family(sock->sk);
sk                710 security/tomoyo/network.c 	return tomoyo_check_inet_address(addr, addr_len, sock->sk->sk_protocol,
sk                727 security/tomoyo/network.c 	const u8 family = tomoyo_sock_family(sock->sk);
sk                745 security/tomoyo/network.c 	return tomoyo_check_inet_address(addr, addr_len, sock->sk->sk_protocol,
sk                762 security/tomoyo/network.c 	const u8 family = tomoyo_sock_family(sock->sk);
sk                776 security/tomoyo/network.c 					 sock->sk->sk_protocol, &address);
sk                138 sound/usb/usx2y/us122l.c 	s = us122l->sk.s;
sk                150 sound/usb/usx2y/us122l.c 		vaddr = us122l->sk.write_page + offset;
sk                234 sound/usb/usx2y/us122l.c 	s = us122l->sk.s;
sk                268 sound/usb/usx2y/us122l.c 	poll_wait(file, &us122l->sk.sleep, wait);
sk                272 sound/usb/usx2y/us122l.c 		struct usb_stream *s = us122l->sk.s;
sk                295 sound/usb/usx2y/us122l.c 	usb_stream_stop(&us122l->sk);
sk                296 sound/usb/usx2y/us122l.c 	usb_stream_free(&us122l->sk);
sk                342 sound/usb/usx2y/us122l.c 	if (!usb_stream_new(&us122l->sk, us122l->dev, 1, 2,
sk                352 sound/usb/usx2y/us122l.c 	err = usb_stream_start(&us122l->sk);
sk                411 sound/usb/usx2y/us122l.c 	s = us122l->sk.s;
sk                431 sound/usb/usx2y/us122l.c 	wake_up_all(&us122l->sk.sleep);
sk                539 sound/usb/usx2y/us122l.c 	init_waitqueue_head(&US122L(card)->sk.sleep);
sk                664 sound/usb/usx2y/us122l.c 	usb_stream_stop(&us122l->sk);
sk                704 sound/usb/usx2y/us122l.c 				     us122l->sk.s->cfg.sample_rate);
sk                709 sound/usb/usx2y/us122l.c 	err = usb_stream_start(&us122l->sk);
sk                 10 sound/usb/usx2y/us122l.h 	struct usb_stream_kernel sk;
sk                 14 sound/usb/usx2y/usb_stream.c static unsigned usb_stream_next_packet_size(struct usb_stream_kernel *sk)
sk                 16 sound/usb/usx2y/usb_stream.c 	struct usb_stream *s = sk->s;
sk                 17 sound/usb/usx2y/usb_stream.c 	sk->out_phase_peeked = (sk->out_phase & 0xffff) + sk->freqn;
sk                 18 sound/usb/usx2y/usb_stream.c 	return (sk->out_phase_peeked >> 16) * s->cfg.frame_size;
sk                 21 sound/usb/usx2y/usb_stream.c static void playback_prep_freqn(struct usb_stream_kernel *sk, struct urb *urb)
sk                 23 sound/usb/usx2y/usb_stream.c 	struct usb_stream *s = sk->s;
sk                 26 sound/usb/usx2y/usb_stream.c 	for (pack = 0; pack < sk->n_o_ps; pack++) {
sk                 27 sound/usb/usx2y/usb_stream.c 		int l = usb_stream_next_packet_size(sk);
sk                 31 sound/usb/usx2y/usb_stream.c 		sk->out_phase = sk->out_phase_peeked;
sk                 46 sound/usb/usx2y/usb_stream.c static int init_pipe_urbs(struct usb_stream_kernel *sk, unsigned use_packsize,
sk                 53 sound/usb/usx2y/usb_stream.c 	int transfer_length = maxpacket * sk->n_o_ps;
sk                 62 sound/usb/usx2y/usb_stream.c 		urb->number_of_packets = sk->n_o_ps;
sk                 63 sound/usb/usx2y/usb_stream.c 		urb->context = sk;
sk                 74 sound/usb/usx2y/usb_stream.c 		for (p = 1; p < sk->n_o_ps; ++p) {
sk                 83 sound/usb/usx2y/usb_stream.c static int init_urbs(struct usb_stream_kernel *sk, unsigned use_packsize,
sk                 86 sound/usb/usx2y/usb_stream.c 	struct usb_stream	*s = sk->s;
sk                 93 sound/usb/usx2y/usb_stream.c 		sk->inurb[u] = usb_alloc_urb(sk->n_o_ps, GFP_KERNEL);
sk                 94 sound/usb/usx2y/usb_stream.c 		if (!sk->inurb[u])
sk                 97 sound/usb/usx2y/usb_stream.c 		sk->outurb[u] = usb_alloc_urb(sk->n_o_ps, GFP_KERNEL);
sk                 98 sound/usb/usx2y/usb_stream.c 		if (!sk->outurb[u])
sk                102 sound/usb/usx2y/usb_stream.c 	if (init_pipe_urbs(sk, use_packsize, sk->inurb, indata, dev, in_pipe) ||
sk                103 sound/usb/usx2y/usb_stream.c 	    init_pipe_urbs(sk, use_packsize, sk->outurb, sk->write_page, dev,
sk                129 sound/usb/usx2y/usb_stream.c void usb_stream_free(struct usb_stream_kernel *sk)
sk                135 sound/usb/usx2y/usb_stream.c 		usb_free_urb(sk->inurb[u]);
sk                136 sound/usb/usx2y/usb_stream.c 		sk->inurb[u] = NULL;
sk                137 sound/usb/usx2y/usb_stream.c 		usb_free_urb(sk->outurb[u]);
sk                138 sound/usb/usx2y/usb_stream.c 		sk->outurb[u] = NULL;
sk                141 sound/usb/usx2y/usb_stream.c 	s = sk->s;
sk                145 sound/usb/usx2y/usb_stream.c 	free_pages_exact(sk->write_page, s->write_size);
sk                146 sound/usb/usx2y/usb_stream.c 	sk->write_page = NULL;
sk                148 sound/usb/usx2y/usb_stream.c 	sk->s = NULL;
sk                151 sound/usb/usx2y/usb_stream.c struct usb_stream *usb_stream_new(struct usb_stream_kernel *sk,
sk                191 sound/usb/usx2y/usb_stream.c 	sk->s = alloc_pages_exact(read_size,
sk                193 sound/usb/usx2y/usb_stream.c 	if (!sk->s) {
sk                197 sound/usb/usx2y/usb_stream.c 	sk->s->cfg.version = USB_STREAM_INTERFACE_VERSION;
sk                199 sound/usb/usx2y/usb_stream.c 	sk->s->read_size = read_size;
sk                201 sound/usb/usx2y/usb_stream.c 	sk->s->cfg.sample_rate = sample_rate;
sk                202 sound/usb/usx2y/usb_stream.c 	sk->s->cfg.frame_size = frame_size;
sk                203 sound/usb/usx2y/usb_stream.c 	sk->n_o_ps = packets;
sk                204 sound/usb/usx2y/usb_stream.c 	sk->s->inpackets = packets * USB_STREAM_URBDEPTH;
sk                205 sound/usb/usx2y/usb_stream.c 	sk->s->cfg.period_frames = period_frames;
sk                206 sound/usb/usx2y/usb_stream.c 	sk->s->period_size = frame_size * period_frames;
sk                208 sound/usb/usx2y/usb_stream.c 	sk->s->write_size = write_size;
sk                210 sound/usb/usx2y/usb_stream.c 	sk->write_page = alloc_pages_exact(write_size,
sk                212 sound/usb/usx2y/usb_stream.c 	if (!sk->write_page) {
sk                214 sound/usb/usx2y/usb_stream.c 		usb_stream_free(sk);
sk                220 sound/usb/usx2y/usb_stream.c 		sk->freqn = get_usb_full_speed_rate(sample_rate);
sk                222 sound/usb/usx2y/usb_stream.c 		sk->freqn = get_usb_high_speed_rate(sample_rate);
sk                224 sound/usb/usx2y/usb_stream.c 	if (init_urbs(sk, use_packsize, dev, in_pipe, out_pipe) < 0) {
sk                225 sound/usb/usx2y/usb_stream.c 		usb_stream_free(sk);
sk                229 sound/usb/usx2y/usb_stream.c 	sk->s->state = usb_stream_stopped;
sk                231 sound/usb/usx2y/usb_stream.c 	return sk->s;
sk                237 sound/usb/usx2y/usb_stream.c static bool balance_check(struct usb_stream_kernel *sk, struct urb *urb)
sk                243 sound/usb/usx2y/usb_stream.c 		sk->iso_frame_balance = 0x7FFFFFFF;
sk                246 sound/usb/usx2y/usb_stream.c 	r = sk->iso_frame_balance == 0;
sk                248 sound/usb/usx2y/usb_stream.c 		sk->i_urb = urb;
sk                252 sound/usb/usx2y/usb_stream.c static bool balance_playback(struct usb_stream_kernel *sk, struct urb *urb)
sk                254 sound/usb/usx2y/usb_stream.c 	sk->iso_frame_balance += urb->number_of_packets;
sk                255 sound/usb/usx2y/usb_stream.c 	return balance_check(sk, urb);
sk                258 sound/usb/usx2y/usb_stream.c static bool balance_capture(struct usb_stream_kernel *sk, struct urb *urb)
sk                260 sound/usb/usx2y/usb_stream.c 	sk->iso_frame_balance -= urb->number_of_packets;
sk                261 sound/usb/usx2y/usb_stream.c 	return balance_check(sk, urb);
sk                274 sound/usb/usx2y/usb_stream.c static int usb_stream_prepare_playback(struct usb_stream_kernel *sk,
sk                277 sound/usb/usx2y/usb_stream.c 	struct usb_stream *s = sk->s;
sk                282 sound/usb/usx2y/usb_stream.c 	io = sk->idle_outurb;
sk                286 sound/usb/usx2y/usb_stream.c 		struct urb *ii = sk->completed_inurb;
sk                297 sound/usb/usx2y/usb_stream.c 	     s->sync_packet < inurb->number_of_packets && p < sk->n_o_ps;
sk                350 sound/usb/usx2y/usb_stream.c static int submit_urbs(struct usb_stream_kernel *sk,
sk                354 sound/usb/usx2y/usb_stream.c 	prepare_inurb(sk->idle_outurb->number_of_packets, sk->idle_inurb);
sk                355 sound/usb/usx2y/usb_stream.c 	err = usb_submit_urb(sk->idle_inurb, GFP_ATOMIC);
sk                359 sound/usb/usx2y/usb_stream.c 	sk->idle_inurb = sk->completed_inurb;
sk                360 sound/usb/usx2y/usb_stream.c 	sk->completed_inurb = inurb;
sk                361 sound/usb/usx2y/usb_stream.c 	err = usb_submit_urb(sk->idle_outurb, GFP_ATOMIC);
sk                365 sound/usb/usx2y/usb_stream.c 	sk->idle_outurb = sk->completed_outurb;
sk                366 sound/usb/usx2y/usb_stream.c 	sk->completed_outurb = outurb;
sk                390 sound/usb/usx2y/usb_stream.c 		iu = sk->idle_inurb;
sk                415 sound/usb/usx2y/usb_stream.c 	if (iu == sk->completed_inurb) {
sk                423 sound/usb/usx2y/usb_stream.c 	iu = sk->completed_inurb;
sk                435 sound/usb/usx2y/usb_stream.c static void stream_idle(struct usb_stream_kernel *sk,
sk                438 sound/usb/usx2y/usb_stream.c 	struct usb_stream *s = sk->s;
sk                481 sound/usb/usx2y/usb_stream.c 	s->outpacket[0].offset = (sk->idle_outurb->transfer_buffer -
sk                482 sound/usb/usx2y/usb_stream.c 				  sk->write_page) - l;
sk                484 sound/usb/usx2y/usb_stream.c 	if (usb_stream_prepare_playback(sk, inurb) < 0)
sk                487 sound/usb/usx2y/usb_stream.c 	s->outpacket[0].length = sk->idle_outurb->transfer_buffer_length + l;
sk                488 sound/usb/usx2y/usb_stream.c 	s->outpacket[1].offset = sk->completed_outurb->transfer_buffer -
sk                489 sound/usb/usx2y/usb_stream.c 		sk->write_page;
sk                491 sound/usb/usx2y/usb_stream.c 	if (submit_urbs(sk, inurb, outurb) < 0)
sk                496 sound/usb/usx2y/usb_stream.c 	wake_up_all(&sk->sleep);
sk                500 sound/usb/usx2y/usb_stream.c 	wake_up_all(&sk->sleep);
sk                505 sound/usb/usx2y/usb_stream.c 	struct usb_stream_kernel *sk = urb->context;
sk                506 sound/usb/usx2y/usb_stream.c 	if (balance_capture(sk, urb))
sk                507 sound/usb/usx2y/usb_stream.c 		stream_idle(sk, urb, sk->i_urb);
sk                512 sound/usb/usx2y/usb_stream.c 	struct usb_stream_kernel *sk = urb->context;
sk                513 sound/usb/usx2y/usb_stream.c 	if (balance_playback(sk, urb))
sk                514 sound/usb/usx2y/usb_stream.c 		stream_idle(sk, sk->i_urb, urb);
sk                517 sound/usb/usx2y/usb_stream.c static void stream_start(struct usb_stream_kernel *sk,
sk                520 sound/usb/usx2y/usb_stream.c 	struct usb_stream *s = sk->s;
sk                580 sound/usb/usx2y/usb_stream.c 		if (usb_stream_prepare_playback(sk, inurb) < 0)
sk                584 sound/usb/usx2y/usb_stream.c 		playback_prep_freqn(sk, sk->idle_outurb);
sk                586 sound/usb/usx2y/usb_stream.c 	if (submit_urbs(sk, inurb, outurb) < 0)
sk                592 sound/usb/usx2y/usb_stream.c 		subs_set_complete(sk->inurb, i_capture_idle);
sk                593 sound/usb/usx2y/usb_stream.c 		subs_set_complete(sk->outurb, i_playback_idle);
sk                600 sound/usb/usx2y/usb_stream.c 	struct usb_stream_kernel *sk = urb->context;
sk                601 sound/usb/usx2y/usb_stream.c 	struct usb_stream *s = sk->s;
sk                639 sound/usb/usx2y/usb_stream.c 	if (balance_capture(sk, urb))
sk                640 sound/usb/usx2y/usb_stream.c 		stream_start(sk, urb, sk->i_urb);
sk                645 sound/usb/usx2y/usb_stream.c 	struct usb_stream_kernel *sk = urb->context;
sk                646 sound/usb/usx2y/usb_stream.c 	if (balance_playback(sk, urb))
sk                647 sound/usb/usx2y/usb_stream.c 		stream_start(sk, sk->i_urb, urb);
sk                650 sound/usb/usx2y/usb_stream.c int usb_stream_start(struct usb_stream_kernel *sk)
sk                652 sound/usb/usx2y/usb_stream.c 	struct usb_stream *s = sk->s;
sk                660 sound/usb/usx2y/usb_stream.c 	subs_set_complete(sk->inurb, i_capture_start);
sk                661 sound/usb/usx2y/usb_stream.c 	subs_set_complete(sk->outurb, i_playback_start);
sk                662 sound/usb/usx2y/usb_stream.c 	memset(sk->write_page, 0, s->write_size);
sk                669 sound/usb/usx2y/usb_stream.c 	sk->iso_frame_balance = 0;
sk                672 sound/usb/usx2y/usb_stream.c 		struct urb *inurb = sk->inurb[u];
sk                673 sound/usb/usx2y/usb_stream.c 		struct urb *outurb = sk->outurb[u];
sk                674 sound/usb/usx2y/usb_stream.c 		playback_prep_freqn(sk, outurb);
sk                713 sound/usb/usx2y/usb_stream.c 		usb_stream_stop(sk);
sk                724 sound/usb/usx2y/usb_stream.c 	sk->idle_inurb = sk->inurb[USB_STREAM_NURBS - 2];
sk                725 sound/usb/usx2y/usb_stream.c 	sk->idle_outurb = sk->outurb[USB_STREAM_NURBS - 2];
sk                726 sound/usb/usx2y/usb_stream.c 	sk->completed_inurb = sk->inurb[USB_STREAM_NURBS - 1];
sk                727 sound/usb/usx2y/usb_stream.c 	sk->completed_outurb = sk->outurb[USB_STREAM_NURBS - 1];
sk                745 sound/usb/usx2y/usb_stream.c void usb_stream_stop(struct usb_stream_kernel *sk)
sk                748 sound/usb/usx2y/usb_stream.c 	if (!sk->s)
sk                751 sound/usb/usx2y/usb_stream.c 		usb_kill_urb(sk->inurb[u]);
sk                752 sound/usb/usx2y/usb_stream.c 		usb_kill_urb(sk->outurb[u]);
sk                754 sound/usb/usx2y/usb_stream.c 	sk->s->state = usb_stream_stopped;
sk                 34 sound/usb/usx2y/usb_stream.h struct usb_stream *usb_stream_new(struct usb_stream_kernel *sk,
sk               3007 tools/include/uapi/linux/bpf.h 	__bpf_md_ptr(struct bpf_sock *, sk);
sk               3304 tools/include/uapi/linux/bpf.h 	__bpf_md_ptr(struct bpf_sock *, sk);
sk               3356 tools/include/uapi/linux/bpf.h 	__bpf_md_ptr(struct bpf_sock *, sk);
sk               3606 tools/include/uapi/linux/bpf.h 	__bpf_md_ptr(struct bpf_sock *, sk);
sk                652 tools/perf/util/sort.c 	int sk = *(const int *)arg;
sk                657 tools/perf/util/sort.c 	return sk >= 0 && he->socket != sk;
sk                185 tools/testing/selftests/bpf/bpf_helpers.h static int (*bpf_sk_release)(struct bpf_sock *sk) =
sk                197 tools/testing/selftests/bpf/bpf_helpers.h static struct bpf_sock *(*bpf_sk_fullsock)(struct bpf_sock *sk) =
sk                199 tools/testing/selftests/bpf/bpf_helpers.h static struct bpf_tcp_sock *(*bpf_tcp_sock)(struct bpf_sock *sk) =
sk                201 tools/testing/selftests/bpf/bpf_helpers.h static struct bpf_sock *(*bpf_get_listener_sock)(struct bpf_sock *sk) =
sk                205 tools/testing/selftests/bpf/bpf_helpers.h static int (*bpf_tcp_check_syncookie)(struct bpf_sock *sk,
sk                227 tools/testing/selftests/bpf/bpf_helpers.h static void *(*bpf_sk_storage_get)(void *map, struct bpf_sock *sk,
sk                230 tools/testing/selftests/bpf/bpf_helpers.h static int (*bpf_sk_storage_delete)(void *map, struct bpf_sock *sk) =
sk                233 tools/testing/selftests/bpf/bpf_helpers.h static long long (*bpf_tcp_gen_syncookie)(struct bpf_sock *sk, void *ip,
sk                421 tools/testing/selftests/bpf/prog_tests/flow_dissector.c 	int sk, ret;
sk                425 tools/testing/selftests/bpf/prog_tests/flow_dissector.c 	sk = socket(PF_INET, SOCK_DGRAM, 0);
sk                426 tools/testing/selftests/bpf/prog_tests/flow_dissector.c 	if (sk < 0)
sk                429 tools/testing/selftests/bpf/prog_tests/flow_dissector.c 	ret = ioctl(sk, SIOCGIFFLAGS, &ifr);
sk                431 tools/testing/selftests/bpf/prog_tests/flow_dissector.c 		close(sk);
sk                436 tools/testing/selftests/bpf/prog_tests/flow_dissector.c 	ret = ioctl(sk, SIOCSIFFLAGS, &ifr);
sk                438 tools/testing/selftests/bpf/prog_tests/flow_dissector.c 		close(sk);
sk                442 tools/testing/selftests/bpf/prog_tests/flow_dissector.c 	close(sk);
sk                 64 tools/testing/selftests/bpf/prog_tests/skb_ctx.c 	skb.sk = (struct bpf_sock *)1;
sk                 67 tools/testing/selftests/bpf/prog_tests/skb_ctx.c 	skb.sk = 0;
sk                 26 tools/testing/selftests/bpf/progs/connect4_prog.c 	struct bpf_sock *sk;
sk                 38 tools/testing/selftests/bpf/progs/connect4_prog.c 		sk = bpf_sk_lookup_tcp(ctx, &tuple, sizeof(tuple.ipv4),
sk                 41 tools/testing/selftests/bpf/progs/connect4_prog.c 		sk = bpf_sk_lookup_udp(ctx, &tuple, sizeof(tuple.ipv4),
sk                 44 tools/testing/selftests/bpf/progs/connect4_prog.c 	if (!sk)
sk                 47 tools/testing/selftests/bpf/progs/connect4_prog.c 	if (sk->src_ip4 != tuple.ipv4.daddr ||
sk                 48 tools/testing/selftests/bpf/progs/connect4_prog.c 	    sk->src_port != DST_REWRITE_PORT4) {
sk                 49 tools/testing/selftests/bpf/progs/connect4_prog.c 		bpf_sk_release(sk);
sk                 53 tools/testing/selftests/bpf/progs/connect4_prog.c 	bpf_sk_release(sk);
sk                 34 tools/testing/selftests/bpf/progs/connect6_prog.c 	struct bpf_sock *sk;
sk                 50 tools/testing/selftests/bpf/progs/connect6_prog.c 		sk = bpf_sk_lookup_tcp(ctx, &tuple, sizeof(tuple.ipv6),
sk                 53 tools/testing/selftests/bpf/progs/connect6_prog.c 		sk = bpf_sk_lookup_udp(ctx, &tuple, sizeof(tuple.ipv6),
sk                 56 tools/testing/selftests/bpf/progs/connect6_prog.c 	if (!sk)
sk                 59 tools/testing/selftests/bpf/progs/connect6_prog.c 	if (sk->src_ip6[0] != tuple.ipv6.daddr[0] ||
sk                 60 tools/testing/selftests/bpf/progs/connect6_prog.c 	    sk->src_ip6[1] != tuple.ipv6.daddr[1] ||
sk                 61 tools/testing/selftests/bpf/progs/connect6_prog.c 	    sk->src_ip6[2] != tuple.ipv6.daddr[2] ||
sk                 62 tools/testing/selftests/bpf/progs/connect6_prog.c 	    sk->src_ip6[3] != tuple.ipv6.daddr[3] ||
sk                 63 tools/testing/selftests/bpf/progs/connect6_prog.c 	    sk->src_port != DST_REWRITE_PORT6) {
sk                 64 tools/testing/selftests/bpf/progs/connect6_prog.c 		bpf_sk_release(sk);
sk                 68 tools/testing/selftests/bpf/progs/connect6_prog.c 	bpf_sk_release(sk);
sk                 30 tools/testing/selftests/bpf/progs/socket_cookie_prog.c 	p = bpf_sk_storage_get(&socket_cookies, ctx->sk, 0,
sk                 44 tools/testing/selftests/bpf/progs/socket_cookie_prog.c 	struct bpf_sock *sk;
sk                 53 tools/testing/selftests/bpf/progs/socket_cookie_prog.c 	if (!ctx->sk)
sk                 56 tools/testing/selftests/bpf/progs/socket_cookie_prog.c 	p = bpf_sk_storage_get(&socket_cookies, ctx->sk, 0, 0);
sk                 43 tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c 	__u8 sk, map;
sk                 49 tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c 	sk = d[1];
sk                 61 tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c 		return bpf_sk_redirect_map(skb, &sock_map_rx, sk, 0);
sk                 62 tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c 	return bpf_sk_redirect_map(skb, &sock_map_tx, sk, 0);
sk                 41 tools/testing/selftests/bpf/progs/sockopt_inherit.c 		return bpf_sk_storage_get(&cloned1_map, ctx->sk, 0,
sk                 44 tools/testing/selftests/bpf/progs/sockopt_inherit.c 		return bpf_sk_storage_get(&cloned2_map, ctx->sk, 0,
sk                 47 tools/testing/selftests/bpf/progs/sockopt_inherit.c 		return bpf_sk_storage_get(&listener_only_map, ctx->sk, 0,
sk                 61 tools/testing/selftests/bpf/progs/sockopt_sk.c 	storage = bpf_sk_storage_get(&socket_storage_map, ctx->sk, 0,
sk                122 tools/testing/selftests/bpf/progs/sockopt_sk.c 	storage = bpf_sk_storage_get(&socket_storage_map, ctx->sk, 0,
sk                 30 tools/testing/selftests/bpf/progs/tcp_rtt.c 	struct bpf_sock *sk;
sk                 32 tools/testing/selftests/bpf/progs/tcp_rtt.c 	sk = ctx->sk;
sk                 33 tools/testing/selftests/bpf/progs/tcp_rtt.c 	if (!sk)
sk                 36 tools/testing/selftests/bpf/progs/tcp_rtt.c 	storage = bpf_sk_storage_get(&socket_storage_map, sk, 0,
sk                 49 tools/testing/selftests/bpf/progs/tcp_rtt.c 	tcp_sk = bpf_tcp_sock(sk);
sk                 63 tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c 	struct bpf_sock *sk;
sk                 75 tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c 	sk = bpf_sk_lookup_tcp(skb, tuple, tuple_len, BPF_F_CURRENT_NETNS, 0);
sk                 76 tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c 	if (sk)
sk                 77 tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c 		bpf_sk_release(sk);
sk                 78 tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c 	return sk ? TC_ACT_OK : TC_ACT_UNSPEC;
sk                 85 tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c 	struct bpf_sock *sk;
sk                 87 tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c 	sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
sk                 88 tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c 	if (sk)
sk                 89 tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c 		bpf_sk_release(sk);
sk                 97 tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c 	struct bpf_sock *sk;
sk                100 tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c 	sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
sk                101 tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c 	if (sk) {
sk                102 tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c 		bpf_sk_release(sk);
sk                103 tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c 		family = sk->family;
sk                112 tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c 	struct bpf_sock *sk;
sk                115 tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c 	sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
sk                116 tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c 	if (sk) {
sk                117 tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c 		sk += 1;
sk                118 tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c 		bpf_sk_release(sk);
sk                127 tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c 	struct bpf_sock *sk;
sk                130 tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c 	sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
sk                131 tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c 	sk += 1;
sk                132 tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c 	if (sk)
sk                133 tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c 		bpf_sk_release(sk);
sk                150 tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c 	struct bpf_sock *sk;
sk                152 tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c 	sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
sk                153 tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c 	bpf_sk_release(sk);
sk                154 tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c 	bpf_sk_release(sk);
sk                162 tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c 	struct bpf_sock *sk;
sk                164 tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c 	sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
sk                165 tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c 	bpf_sk_release(sk);
sk                147 tools/testing/selftests/bpf/progs/test_sock_fields_kern.c 	struct bpf_sock *sk, *sk_ret;
sk                152 tools/testing/selftests/bpf/progs/test_sock_fields_kern.c 	sk = skb->sk;
sk                153 tools/testing/selftests/bpf/progs/test_sock_fields_kern.c 	if (!sk || sk->state == 10)
sk                156 tools/testing/selftests/bpf/progs/test_sock_fields_kern.c 	sk = bpf_sk_fullsock(sk);
sk                157 tools/testing/selftests/bpf/progs/test_sock_fields_kern.c 	if (!sk || sk->family != AF_INET6 || sk->protocol != IPPROTO_TCP ||
sk                158 tools/testing/selftests/bpf/progs/test_sock_fields_kern.c 	    !is_loopback6(sk->src_ip6))
sk                161 tools/testing/selftests/bpf/progs/test_sock_fields_kern.c 	tp = bpf_tcp_sock(sk);
sk                170 tools/testing/selftests/bpf/progs/test_sock_fields_kern.c 	if (sk->src_port == bpf_ntohs(srv_sa6->sin6_port))
sk                172 tools/testing/selftests/bpf/progs/test_sock_fields_kern.c 	else if (sk->src_port == bpf_ntohs(cli_sa6->sin6_port))
sk                182 tools/testing/selftests/bpf/progs/test_sock_fields_kern.c 	skcpy(sk_ret, sk);
sk                187 tools/testing/selftests/bpf/progs/test_sock_fields_kern.c 		pkt_out_cnt = bpf_sk_storage_get(&sk_pkt_out_cnt, sk, 0, 0);
sk                188 tools/testing/selftests/bpf/progs/test_sock_fields_kern.c 		pkt_out_cnt10 = bpf_sk_storage_get(&sk_pkt_out_cnt10, sk,
sk                191 tools/testing/selftests/bpf/progs/test_sock_fields_kern.c 		pkt_out_cnt = bpf_sk_storage_get(&sk_pkt_out_cnt, sk,
sk                195 tools/testing/selftests/bpf/progs/test_sock_fields_kern.c 						   sk, &cli_cnt_init,
sk                219 tools/testing/selftests/bpf/progs/test_sock_fields_kern.c 	struct bpf_sock *sk, *sk_ret;
sk                225 tools/testing/selftests/bpf/progs/test_sock_fields_kern.c 	sk = skb->sk;
sk                226 tools/testing/selftests/bpf/progs/test_sock_fields_kern.c 	if (!sk || sk->family != AF_INET6 || !is_loopback6(sk->src_ip6))
sk                230 tools/testing/selftests/bpf/progs/test_sock_fields_kern.c 	if (!srv_sa6 || sk->src_port != bpf_ntohs(srv_sa6->sin6_port))
sk                233 tools/testing/selftests/bpf/progs/test_sock_fields_kern.c 	if (sk->state != 10 && sk->state != 12)
sk                236 tools/testing/selftests/bpf/progs/test_sock_fields_kern.c 	sk = bpf_get_listener_sock(sk);
sk                237 tools/testing/selftests/bpf/progs/test_sock_fields_kern.c 	if (!sk)
sk                240 tools/testing/selftests/bpf/progs/test_sock_fields_kern.c 	tp = bpf_tcp_sock(sk);
sk                249 tools/testing/selftests/bpf/progs/test_sock_fields_kern.c 	skcpy(sk_ret, sk);
sk                 26 tools/testing/selftests/bpf/progs/test_tcp_check_syncookie_kern.c static __always_inline __s64 gen_syncookie(void *data_end, struct bpf_sock *sk,
sk                 40 tools/testing/selftests/bpf/progs/test_tcp_check_syncookie_kern.c 		return bpf_tcp_gen_syncookie(sk, iph, ip_size, tcph, thlen);
sk                 49 tools/testing/selftests/bpf/progs/test_tcp_check_syncookie_kern.c 	struct bpf_sock *sk;
sk                 82 tools/testing/selftests/bpf/progs/test_tcp_check_syncookie_kern.c 		sk = bpf_skc_lookup_tcp(ctx, &tup, sizeof(tup.ipv4),
sk                 84 tools/testing/selftests/bpf/progs/test_tcp_check_syncookie_kern.c 		if (!sk)
sk                 87 tools/testing/selftests/bpf/progs/test_tcp_check_syncookie_kern.c 		if (sk->state != BPF_TCP_LISTEN)
sk                 90 tools/testing/selftests/bpf/progs/test_tcp_check_syncookie_kern.c 		seq_mss = gen_syncookie(data_end, sk, ipv4h, sizeof(*ipv4h),
sk                 93 tools/testing/selftests/bpf/progs/test_tcp_check_syncookie_kern.c 		ret = bpf_tcp_check_syncookie(sk, ipv4h, sizeof(*ipv4h),
sk                114 tools/testing/selftests/bpf/progs/test_tcp_check_syncookie_kern.c 		sk = bpf_skc_lookup_tcp(ctx, &tup, sizeof(tup.ipv6),
sk                116 tools/testing/selftests/bpf/progs/test_tcp_check_syncookie_kern.c 		if (!sk)
sk                119 tools/testing/selftests/bpf/progs/test_tcp_check_syncookie_kern.c 		if (sk->state != BPF_TCP_LISTEN)
sk                122 tools/testing/selftests/bpf/progs/test_tcp_check_syncookie_kern.c 		seq_mss = gen_syncookie(data_end, sk, ipv6h, sizeof(*ipv6h),
sk                125 tools/testing/selftests/bpf/progs/test_tcp_check_syncookie_kern.c 		ret = bpf_tcp_check_syncookie(sk, ipv6h, sizeof(*ipv6h),
sk                148 tools/testing/selftests/bpf/progs/test_tcp_check_syncookie_kern.c 	bpf_sk_release(sk);
sk                 82 tools/testing/selftests/bpf/progs/test_tcp_estats.c 	struct sock		sk;
sk                 83 tools/testing/selftests/bpf/progs/test_tcp_estats.c #define inet_daddr		sk.__sk_common.skc_daddr
sk                 84 tools/testing/selftests/bpf/progs/test_tcp_estats.c #define inet_dport		sk.__sk_common.skc_dport
sk                 93 tools/testing/selftests/bpf/progs/test_tcp_estats.c static inline struct inet_sock *inet_sk(const struct sock *sk)
sk                 95 tools/testing/selftests/bpf/progs/test_tcp_estats.c 	return (struct inet_sock *)sk;
sk                212 tools/testing/selftests/bpf/progs/test_tcp_estats.c 						    struct sock *sk)
sk                214 tools/testing/selftests/bpf/progs/test_tcp_estats.c 	conn_id->localport = _(inet_sk(sk)->inet_sport);
sk                215 tools/testing/selftests/bpf/progs/test_tcp_estats.c 	conn_id->remport = _(inet_sk(sk)->inet_dport);
sk                217 tools/testing/selftests/bpf/progs/test_tcp_estats.c 	if (_(sk->sk_family) == AF_INET6)
sk                219 tools/testing/selftests/bpf/progs/test_tcp_estats.c 				  sk->sk_v6_rcv_saddr.s6_addr32,
sk                220 tools/testing/selftests/bpf/progs/test_tcp_estats.c 				  sk->sk_v6_daddr.s6_addr32);
sk                223 tools/testing/selftests/bpf/progs/test_tcp_estats.c 				  &inet_sk(sk)->inet_saddr,
sk                224 tools/testing/selftests/bpf/progs/test_tcp_estats.c 				  &inet_sk(sk)->inet_daddr);
sk                227 tools/testing/selftests/bpf/progs/test_tcp_estats.c static __always_inline void tcp_estats_init(struct sock *sk,
sk                233 tools/testing/selftests/bpf/progs/test_tcp_estats.c 	tcp_estats_conn_id_init(conn_id, sk);
sk                236 tools/testing/selftests/bpf/progs/test_tcp_estats.c static __always_inline void send_basic_event(struct sock *sk,
sk                243 tools/testing/selftests/bpf/progs/test_tcp_estats.c 	tcp_estats_init(sk, &ev.event, &ev.conn_id, type);
sk                 82 tools/testing/selftests/bpf/test_sock_fields.c static void print_sk(const struct bpf_sock *sk)
sk                 87 tools/testing/selftests/bpf/test_sock_fields.c 	inet_ntop(AF_INET, &sk->src_ip4, src_ip4, sizeof(src_ip4));
sk                 88 tools/testing/selftests/bpf/test_sock_fields.c 	inet_ntop(AF_INET6, &sk->src_ip6, src_ip6, sizeof(src_ip6));
sk                 89 tools/testing/selftests/bpf/test_sock_fields.c 	inet_ntop(AF_INET, &sk->dst_ip4, dst_ip4, sizeof(dst_ip4));
sk                 90 tools/testing/selftests/bpf/test_sock_fields.c 	inet_ntop(AF_INET6, &sk->dst_ip6, dst_ip6, sizeof(dst_ip6));
sk                 95 tools/testing/selftests/bpf/test_sock_fields.c 	       sk->state, sk->bound_dev_if, sk->family, sk->type, sk->protocol,
sk                 96 tools/testing/selftests/bpf/test_sock_fields.c 	       sk->mark, sk->priority,
sk                 97 tools/testing/selftests/bpf/test_sock_fields.c 	       sk->src_ip4, src_ip4,
sk                 98 tools/testing/selftests/bpf/test_sock_fields.c 	       sk->src_ip6[0], sk->src_ip6[1], sk->src_ip6[2], sk->src_ip6[3],
sk                 99 tools/testing/selftests/bpf/test_sock_fields.c 	       src_ip6, sk->src_port,
sk                100 tools/testing/selftests/bpf/test_sock_fields.c 	       sk->dst_ip4, dst_ip4,
sk                101 tools/testing/selftests/bpf/test_sock_fields.c 	       sk->dst_ip6[0], sk->dst_ip6[1], sk->dst_ip6[2], sk->dst_ip6[3],
sk                102 tools/testing/selftests/bpf/test_sock_fields.c 	       dst_ip6, ntohs(sk->dst_port));
sk                  4 tools/testing/selftests/bpf/verifier/sock.c 	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
sk                 16 tools/testing/selftests/bpf/verifier/sock.c 	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
sk                 30 tools/testing/selftests/bpf/verifier/sock.c 	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
sk                 45 tools/testing/selftests/bpf/verifier/sock.c 	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
sk                 57 tools/testing/selftests/bpf/verifier/sock.c 	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
sk                 73 tools/testing/selftests/bpf/verifier/sock.c 	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
sk                 91 tools/testing/selftests/bpf/verifier/sock.c 	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
sk                108 tools/testing/selftests/bpf/verifier/sock.c 	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
sk                126 tools/testing/selftests/bpf/verifier/sock.c 	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
sk                144 tools/testing/selftests/bpf/verifier/sock.c 	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
sk                163 tools/testing/selftests/bpf/verifier/sock.c 	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
sk                181 tools/testing/selftests/bpf/verifier/sock.c 	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
sk                199 tools/testing/selftests/bpf/verifier/sock.c 	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
sk                217 tools/testing/selftests/bpf/verifier/sock.c 	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
sk                236 tools/testing/selftests/bpf/verifier/sock.c 	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
sk                248 tools/testing/selftests/bpf/verifier/sock.c 	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
sk                264 tools/testing/selftests/bpf/verifier/sock.c 	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
sk                281 tools/testing/selftests/bpf/verifier/sock.c 	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
sk                298 tools/testing/selftests/bpf/verifier/sock.c 	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
sk                316 tools/testing/selftests/bpf/verifier/sock.c 	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
sk                337 tools/testing/selftests/bpf/verifier/sock.c 	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
sk                350 tools/testing/selftests/bpf/verifier/sock.c 	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
sk                369 tools/testing/selftests/bpf/verifier/sock.c 	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
sk                388 tools/testing/selftests/bpf/verifier/sock.c 	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
sk                411 tools/testing/selftests/bpf/verifier/sock.c 	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
sk                437 tools/testing/selftests/bpf/verifier/sock.c 	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
sk                463 tools/testing/selftests/bpf/verifier/sock.c 	BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, offsetof(struct __sk_buff, sk)),
sk                826 tools/testing/selftests/cgroup/test_memcontrol.c 	int sk, client_sk, ctl_fd, yes = 1, ret = -1;
sk                835 tools/testing/selftests/cgroup/test_memcontrol.c 	sk = socket(AF_INET6, SOCK_STREAM, 0);
sk                836 tools/testing/selftests/cgroup/test_memcontrol.c 	if (sk < 0)
sk                839 tools/testing/selftests/cgroup/test_memcontrol.c 	if (setsockopt(sk, SOL_SOCKET, SO_REUSEADDR, &yes, sizeof(yes)) < 0)
sk                842 tools/testing/selftests/cgroup/test_memcontrol.c 	if (bind(sk, (struct sockaddr *)&saddr, slen)) {
sk                847 tools/testing/selftests/cgroup/test_memcontrol.c 	if (listen(sk, 1))
sk                856 tools/testing/selftests/cgroup/test_memcontrol.c 	client_sk = accept(sk, NULL, NULL);
sk                874 tools/testing/selftests/cgroup/test_memcontrol.c 	close(sk);
sk                884 tools/testing/selftests/cgroup/test_memcontrol.c 	int sk, ret;
sk                891 tools/testing/selftests/cgroup/test_memcontrol.c 	sk = socket(ai->ai_family, ai->ai_socktype, ai->ai_protocol);
sk                892 tools/testing/selftests/cgroup/test_memcontrol.c 	if (sk < 0)
sk                895 tools/testing/selftests/cgroup/test_memcontrol.c 	ret = connect(sk, ai->ai_addr, ai->ai_addrlen);
sk                904 tools/testing/selftests/cgroup/test_memcontrol.c 		if (read(sk, buf, sizeof(buf)) <= 0)
sk                923 tools/testing/selftests/cgroup/test_memcontrol.c 	close(sk);