Lines Matching refs:sk
108 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp) in tcp_twsk_unique() argument
111 struct tcp_sock *tp = tcp_sk(sk); in tcp_twsk_unique()
141 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) in tcp_v4_connect() argument
144 struct inet_sock *inet = inet_sk(sk); in tcp_v4_connect()
145 struct tcp_sock *tp = tcp_sk(sk); in tcp_v4_connect()
161 sock_owned_by_user(sk)); in tcp_v4_connect()
172 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if, in tcp_v4_connect()
174 orig_sport, orig_dport, sk); in tcp_v4_connect()
178 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); in tcp_v4_connect()
192 sk_rcv_saddr_set(sk, inet->inet_saddr); in tcp_v4_connect()
204 tcp_fetch_timewait_stamp(sk, &rt->dst); in tcp_v4_connect()
207 sk_daddr_set(sk, daddr); in tcp_v4_connect()
209 inet_csk(sk)->icsk_ext_hdr_len = 0; in tcp_v4_connect()
211 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen; in tcp_v4_connect()
220 tcp_set_state(sk, TCP_SYN_SENT); in tcp_v4_connect()
221 err = inet_hash_connect(&tcp_death_row, sk); in tcp_v4_connect()
225 sk_set_txhash(sk); in tcp_v4_connect()
228 inet->inet_sport, inet->inet_dport, sk); in tcp_v4_connect()
235 sk->sk_gso_type = SKB_GSO_TCPV4; in tcp_v4_connect()
236 sk_setup_caps(sk, &rt->dst); in tcp_v4_connect()
246 err = tcp_connect(sk); in tcp_v4_connect()
259 tcp_set_state(sk, TCP_CLOSE); in tcp_v4_connect()
261 sk->sk_route_caps = 0; in tcp_v4_connect()
272 void tcp_v4_mtu_reduced(struct sock *sk) in tcp_v4_mtu_reduced() argument
275 struct inet_sock *inet = inet_sk(sk); in tcp_v4_mtu_reduced()
276 u32 mtu = tcp_sk(sk)->mtu_info; in tcp_v4_mtu_reduced()
278 dst = inet_csk_update_pmtu(sk, mtu); in tcp_v4_mtu_reduced()
285 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst)) in tcp_v4_mtu_reduced()
286 sk->sk_err_soft = EMSGSIZE; in tcp_v4_mtu_reduced()
291 ip_sk_accept_pmtu(sk) && in tcp_v4_mtu_reduced()
292 inet_csk(sk)->icsk_pmtu_cookie > mtu) { in tcp_v4_mtu_reduced()
293 tcp_sync_mss(sk, mtu); in tcp_v4_mtu_reduced()
300 tcp_simple_retransmit(sk); in tcp_v4_mtu_reduced()
305 static void do_redirect(struct sk_buff *skb, struct sock *sk) in do_redirect() argument
307 struct dst_entry *dst = __sk_dst_check(sk, 0); in do_redirect()
310 dst->ops->redirect(dst, sk, skb); in do_redirect()
315 void tcp_req_err(struct sock *sk, u32 seq, bool abort) in tcp_req_err() argument
317 struct request_sock *req = inet_reqsk(sk); in tcp_req_err()
318 struct net *net = sock_net(sk); in tcp_req_err()
364 struct sock *sk; in tcp_v4_err() local
372 sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr, in tcp_v4_err()
375 if (!sk) { in tcp_v4_err()
379 if (sk->sk_state == TCP_TIME_WAIT) { in tcp_v4_err()
380 inet_twsk_put(inet_twsk(sk)); in tcp_v4_err()
384 if (sk->sk_state == TCP_NEW_SYN_RECV) in tcp_v4_err()
385 return tcp_req_err(sk, seq, in tcp_v4_err()
392 bh_lock_sock(sk); in tcp_v4_err()
398 if (sock_owned_by_user(sk)) { in tcp_v4_err()
402 if (sk->sk_state == TCP_CLOSE) in tcp_v4_err()
405 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) { in tcp_v4_err()
410 icsk = inet_csk(sk); in tcp_v4_err()
411 tp = tcp_sk(sk); in tcp_v4_err()
415 if (sk->sk_state != TCP_LISTEN && in tcp_v4_err()
423 do_redirect(icmp_skb, sk); in tcp_v4_err()
440 if (sk->sk_state == TCP_LISTEN) in tcp_v4_err()
444 if (!sock_owned_by_user(sk)) { in tcp_v4_err()
445 tcp_v4_mtu_reduced(sk); in tcp_v4_err()
448 sock_hold(sk); in tcp_v4_err()
462 if (sock_owned_by_user(sk)) in tcp_v4_err()
470 skb = tcp_write_queue_head(sk); in tcp_v4_err()
478 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, in tcp_v4_err()
483 tcp_retransmit_timer(sk); in tcp_v4_err()
494 switch (sk->sk_state) { in tcp_v4_err()
500 if (fastopen && !fastopen->sk) in tcp_v4_err()
503 if (!sock_owned_by_user(sk)) { in tcp_v4_err()
504 sk->sk_err = err; in tcp_v4_err()
506 sk->sk_error_report(sk); in tcp_v4_err()
508 tcp_done(sk); in tcp_v4_err()
510 sk->sk_err_soft = err; in tcp_v4_err()
531 inet = inet_sk(sk); in tcp_v4_err()
532 if (!sock_owned_by_user(sk) && inet->recverr) { in tcp_v4_err()
533 sk->sk_err = err; in tcp_v4_err()
534 sk->sk_error_report(sk); in tcp_v4_err()
536 sk->sk_err_soft = err; in tcp_v4_err()
540 bh_unlock_sock(sk); in tcp_v4_err()
541 sock_put(sk); in tcp_v4_err()
561 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb) in tcp_v4_send_check() argument
563 const struct inet_sock *inet = inet_sk(sk); in tcp_v4_send_check()
582 static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb) in tcp_v4_send_reset() argument
608 if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL) in tcp_v4_send_reset()
630 net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev); in tcp_v4_send_reset()
633 if (!sk && hash_location) { in tcp_v4_send_reset()
658 key = sk ? tcp_md5_do_lookup(sk, (union tcp_md5_addr *) in tcp_v4_send_reset()
681 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0; in tcp_v4_send_reset()
686 if (sk) in tcp_v4_send_reset()
687 arg.bound_dev_if = sk->sk_bound_dev_if; in tcp_v4_send_reset()
783 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb) in tcp_v4_timewait_ack() argument
785 struct inet_timewait_sock *tw = inet_twsk(sk); in tcp_v4_timewait_ack()
786 struct tcp_timewait_sock *tcptw = tcp_twsk(sk); in tcp_v4_timewait_ack()
788 tcp_v4_send_ack(sock_net(sk), skb, in tcp_v4_timewait_ack()
802 static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb, in tcp_v4_reqsk_send_ack() argument
808 u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 : in tcp_v4_reqsk_send_ack()
809 tcp_sk(sk)->snd_nxt; in tcp_v4_reqsk_send_ack()
811 tcp_v4_send_ack(sock_net(sk), skb, seq, in tcp_v4_reqsk_send_ack()
816 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr, in tcp_v4_reqsk_send_ack()
827 static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst, in tcp_v4_send_synack() argument
839 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL) in tcp_v4_send_synack()
842 skb = tcp_make_synack(sk, dst, req, foc, attach_req); in tcp_v4_send_synack()
847 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr, in tcp_v4_send_synack()
873 struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk, in tcp_md5_do_lookup() argument
877 const struct tcp_sock *tp = tcp_sk(sk); in tcp_md5_do_lookup()
884 sock_owned_by_user(sk) || in tcp_md5_do_lookup()
885 lockdep_is_held((spinlock_t *)&sk->sk_lock.slock)); in tcp_md5_do_lookup()
902 struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk, in tcp_v4_md5_lookup() argument
908 return tcp_md5_do_lookup(sk, addr, AF_INET); in tcp_v4_md5_lookup()
913 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr, in tcp_md5_do_add() argument
918 struct tcp_sock *tp = tcp_sk(sk); in tcp_md5_do_add()
921 key = tcp_md5_do_lookup(sk, addr, family); in tcp_md5_do_add()
930 sock_owned_by_user(sk) || in tcp_md5_do_add()
931 lockdep_is_held(&sk->sk_lock.slock)); in tcp_md5_do_add()
937 sk_nocaps_add(sk, NETIF_F_GSO_MASK); in tcp_md5_do_add()
942 key = sock_kmalloc(sk, sizeof(*key), gfp); in tcp_md5_do_add()
946 sock_kfree_s(sk, key, sizeof(*key)); in tcp_md5_do_add()
961 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family) in tcp_md5_do_del() argument
965 key = tcp_md5_do_lookup(sk, addr, family); in tcp_md5_do_del()
969 atomic_sub(sizeof(*key), &sk->sk_omem_alloc); in tcp_md5_do_del()
975 static void tcp_clear_md5_list(struct sock *sk) in tcp_clear_md5_list() argument
977 struct tcp_sock *tp = tcp_sk(sk); in tcp_clear_md5_list()
986 atomic_sub(sizeof(*key), &sk->sk_omem_alloc); in tcp_clear_md5_list()
991 static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval, in tcp_v4_parse_md5_keys() argument
1007 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr, in tcp_v4_parse_md5_keys()
1013 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr, in tcp_v4_parse_md5_keys()
1074 const struct sock *sk, in tcp_v4_md5_hash_skb() argument
1082 if (sk) { /* valid for establish/request sockets */ in tcp_v4_md5_hash_skb()
1083 saddr = sk->sk_rcv_saddr; in tcp_v4_md5_hash_skb()
1084 daddr = sk->sk_daddr; in tcp_v4_md5_hash_skb()
1124 static bool tcp_v4_inbound_md5_hash(const struct sock *sk, in tcp_v4_inbound_md5_hash() argument
1143 hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr, in tcp_v4_inbound_md5_hash()
1152 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND); in tcp_v4_inbound_md5_hash()
1157 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED); in tcp_v4_inbound_md5_hash()
1193 static struct dst_entry *tcp_v4_route_req(const struct sock *sk, in tcp_v4_route_req() argument
1198 struct dst_entry *dst = inet_csk_route_req(sk, &fl->u.ip4, req); in tcp_v4_route_req()
1235 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) in tcp_v4_conn_request() argument
1242 &tcp_request_sock_ipv4_ops, sk, skb); in tcp_v4_conn_request()
1245 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); in tcp_v4_conn_request()
1255 struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, in tcp_v4_syn_recv_sock() argument
1270 if (sk_acceptq_is_full(sk)) in tcp_v4_syn_recv_sock()
1273 newsk = tcp_create_openreq_child(sk, req, skb); in tcp_v4_syn_recv_sock()
1298 dst = inet_csk_route_child_sock(sk, newsk, req); in tcp_v4_syn_recv_sock()
1310 if (tcp_sk(sk)->rx_opt.user_mss && in tcp_v4_syn_recv_sock()
1311 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss) in tcp_v4_syn_recv_sock()
1312 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss; in tcp_v4_syn_recv_sock()
1318 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr, in tcp_v4_syn_recv_sock()
1333 if (__inet_inherit_port(sk, newsk) < 0) in tcp_v4_syn_recv_sock()
1342 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); in tcp_v4_syn_recv_sock()
1346 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); in tcp_v4_syn_recv_sock()
1355 static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb) in tcp_v4_cookie_check() argument
1361 sk = cookie_v4_check(sk, skb); in tcp_v4_cookie_check()
1363 return sk; in tcp_v4_cookie_check()
1374 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) in tcp_v4_do_rcv() argument
1378 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ in tcp_v4_do_rcv()
1379 struct dst_entry *dst = sk->sk_rx_dst; in tcp_v4_do_rcv()
1381 sock_rps_save_rxhash(sk, skb); in tcp_v4_do_rcv()
1382 sk_mark_napi_id(sk, skb); in tcp_v4_do_rcv()
1384 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif || in tcp_v4_do_rcv()
1387 sk->sk_rx_dst = NULL; in tcp_v4_do_rcv()
1390 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len); in tcp_v4_do_rcv()
1397 if (sk->sk_state == TCP_LISTEN) { in tcp_v4_do_rcv()
1398 struct sock *nsk = tcp_v4_cookie_check(sk, skb); in tcp_v4_do_rcv()
1402 if (nsk != sk) { in tcp_v4_do_rcv()
1405 if (tcp_child_process(sk, nsk, skb)) { in tcp_v4_do_rcv()
1412 sock_rps_save_rxhash(sk, skb); in tcp_v4_do_rcv()
1414 if (tcp_rcv_state_process(sk, skb)) { in tcp_v4_do_rcv()
1415 rsk = sk; in tcp_v4_do_rcv()
1432 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS); in tcp_v4_do_rcv()
1433 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); in tcp_v4_do_rcv()
1442 struct sock *sk; in tcp_v4_early_demux() local
1456 sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo, in tcp_v4_early_demux()
1460 if (sk) { in tcp_v4_early_demux()
1461 skb->sk = sk; in tcp_v4_early_demux()
1463 if (sk_fullsock(sk)) { in tcp_v4_early_demux()
1464 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst); in tcp_v4_early_demux()
1469 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif) in tcp_v4_early_demux()
1482 bool tcp_prequeue(struct sock *sk, struct sk_buff *skb) in tcp_prequeue() argument
1484 struct tcp_sock *tp = tcp_sk(sk); in tcp_prequeue()
1499 if (likely(sk->sk_rx_dst)) in tcp_prequeue()
1506 if (tp->ucopy.memory > sk->sk_rcvbuf) { in tcp_prequeue()
1509 BUG_ON(sock_owned_by_user(sk)); in tcp_prequeue()
1512 sk_backlog_rcv(sk, skb1); in tcp_prequeue()
1513 NET_INC_STATS_BH(sock_net(sk), in tcp_prequeue()
1519 wake_up_interruptible_sync_poll(sk_sleep(sk), in tcp_prequeue()
1521 if (!inet_csk_ack_scheduled(sk)) in tcp_prequeue()
1522 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, in tcp_prequeue()
1523 (3 * tcp_rto_min(sk)) / 4, in tcp_prequeue()
1538 struct sock *sk; in tcp_v4_rcv() local
1585 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest); in tcp_v4_rcv()
1586 if (!sk) in tcp_v4_rcv()
1590 if (sk->sk_state == TCP_TIME_WAIT) in tcp_v4_rcv()
1593 if (sk->sk_state == TCP_NEW_SYN_RECV) { in tcp_v4_rcv()
1594 struct request_sock *req = inet_reqsk(sk); in tcp_v4_rcv()
1597 sk = req->rsk_listener; in tcp_v4_rcv()
1598 if (unlikely(tcp_v4_inbound_md5_hash(sk, skb))) { in tcp_v4_rcv()
1602 if (unlikely(sk->sk_state != TCP_LISTEN)) { in tcp_v4_rcv()
1603 inet_csk_reqsk_queue_drop_and_put(sk, req); in tcp_v4_rcv()
1606 sock_hold(sk); in tcp_v4_rcv()
1607 nsk = tcp_check_req(sk, skb, req, false); in tcp_v4_rcv()
1612 if (nsk == sk) { in tcp_v4_rcv()
1614 } else if (tcp_child_process(sk, nsk, skb)) { in tcp_v4_rcv()
1618 sock_put(sk); in tcp_v4_rcv()
1622 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) { in tcp_v4_rcv()
1627 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) in tcp_v4_rcv()
1630 if (tcp_v4_inbound_md5_hash(sk, skb)) in tcp_v4_rcv()
1635 if (sk_filter(sk, skb)) in tcp_v4_rcv()
1640 if (sk->sk_state == TCP_LISTEN) { in tcp_v4_rcv()
1641 ret = tcp_v4_do_rcv(sk, skb); in tcp_v4_rcv()
1645 sk_incoming_cpu_update(sk); in tcp_v4_rcv()
1647 bh_lock_sock_nested(sk); in tcp_v4_rcv()
1648 tcp_sk(sk)->segs_in += max_t(u16, 1, skb_shinfo(skb)->gso_segs); in tcp_v4_rcv()
1650 if (!sock_owned_by_user(sk)) { in tcp_v4_rcv()
1651 if (!tcp_prequeue(sk, skb)) in tcp_v4_rcv()
1652 ret = tcp_v4_do_rcv(sk, skb); in tcp_v4_rcv()
1653 } else if (unlikely(sk_add_backlog(sk, skb, in tcp_v4_rcv()
1654 sk->sk_rcvbuf + sk->sk_sndbuf))) { in tcp_v4_rcv()
1655 bh_unlock_sock(sk); in tcp_v4_rcv()
1659 bh_unlock_sock(sk); in tcp_v4_rcv()
1662 sock_put(sk); in tcp_v4_rcv()
1685 sock_put(sk); in tcp_v4_rcv()
1690 inet_twsk_put(inet_twsk(sk)); in tcp_v4_rcv()
1695 inet_twsk_put(inet_twsk(sk)); in tcp_v4_rcv()
1698 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) { in tcp_v4_rcv()
1706 inet_twsk_deschedule_put(inet_twsk(sk)); in tcp_v4_rcv()
1707 sk = sk2; in tcp_v4_rcv()
1713 tcp_v4_timewait_ack(sk, skb); in tcp_v4_rcv()
1728 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) in inet_sk_rx_dst_set() argument
1733 sk->sk_rx_dst = dst; in inet_sk_rx_dst_set()
1734 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; in inet_sk_rx_dst_set()
1771 static int tcp_v4_init_sock(struct sock *sk) in tcp_v4_init_sock() argument
1773 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_v4_init_sock()
1775 tcp_init_sock(sk); in tcp_v4_init_sock()
1780 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific; in tcp_v4_init_sock()
1786 void tcp_v4_destroy_sock(struct sock *sk) in tcp_v4_destroy_sock() argument
1788 struct tcp_sock *tp = tcp_sk(sk); in tcp_v4_destroy_sock()
1790 tcp_clear_xmit_timers(sk); in tcp_v4_destroy_sock()
1792 tcp_cleanup_congestion_control(sk); in tcp_v4_destroy_sock()
1795 tcp_write_queue_purge(sk); in tcp_v4_destroy_sock()
1803 tcp_clear_md5_list(sk); in tcp_v4_destroy_sock()
1813 if (inet_csk(sk)->icsk_bind_hash) in tcp_v4_destroy_sock()
1814 inet_put_port(sk); in tcp_v4_destroy_sock()
1822 sk_sockets_allocated_dec(sk); in tcp_v4_destroy_sock()
1823 sock_release_memcg(sk); in tcp_v4_destroy_sock()
1839 struct sock *sk = cur; in listening_get_next() local
1844 if (!sk) { in listening_get_next()
1847 sk = sk_nulls_head(&ilb->head); in listening_get_next()
1855 sk = sk_nulls_next(sk); in listening_get_next()
1857 sk_nulls_for_each_from(sk, node) { in listening_get_next()
1858 if (!net_eq(sock_net(sk), net)) in listening_get_next()
1860 if (sk->sk_family == st->family) { in listening_get_next()
1861 cur = sk; in listening_get_next()
1864 icsk = inet_csk(sk); in listening_get_next()
1871 sk = sk_nulls_head(&ilb->head); in listening_get_next()
1912 struct sock *sk; in established_get_first() local
1921 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) { in established_get_first()
1922 if (sk->sk_family != st->family || in established_get_first()
1923 !net_eq(sock_net(sk), net)) { in established_get_first()
1926 rc = sk; in established_get_first()
1937 struct sock *sk = cur; in established_get_next() local
1945 sk = sk_nulls_next(sk); in established_get_next()
1947 sk_nulls_for_each_from(sk, node) { in established_get_next()
1948 if (sk->sk_family == st->family && net_eq(sock_net(sk), net)) in established_get_next()
1949 return sk; in established_get_next()
2156 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i) in get_tcp4_sock() argument
2160 const struct tcp_sock *tp = tcp_sk(sk); in get_tcp4_sock()
2161 const struct inet_connection_sock *icsk = inet_csk(sk); in get_tcp4_sock()
2162 const struct inet_sock *inet = inet_sk(sk); in get_tcp4_sock()
2179 } else if (timer_pending(&sk->sk_timer)) { in get_tcp4_sock()
2181 timer_expires = sk->sk_timer.expires; in get_tcp4_sock()
2187 state = sk_state_load(sk); in get_tcp4_sock()
2189 rx_queue = sk->sk_ack_backlog; in get_tcp4_sock()
2204 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)), in get_tcp4_sock()
2206 sock_i_ino(sk), in get_tcp4_sock()
2207 atomic_read(&sk->sk_refcnt), sk, in get_tcp4_sock()
2241 struct sock *sk = v; in tcp4_seq_show() local
2252 if (sk->sk_state == TCP_TIME_WAIT) in tcp4_seq_show()
2254 else if (sk->sk_state == TCP_NEW_SYN_RECV) in tcp4_seq_show()
2373 struct sock *sk; in tcp_sk_init() local
2375 res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW, in tcp_sk_init()
2379 *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk; in tcp_sk_init()