Lines Matching refs:sk

329 void tcp_enter_memory_pressure(struct sock *sk)  in tcp_enter_memory_pressure()  argument
332 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURES); in tcp_enter_memory_pressure()
380 void tcp_init_sock(struct sock *sk) in tcp_init_sock() argument
382 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_init_sock()
383 struct tcp_sock *tp = tcp_sk(sk); in tcp_init_sock()
386 tcp_init_xmit_timers(sk); in tcp_init_sock()
411 tcp_assign_congestion_control(sk); in tcp_init_sock()
415 sk->sk_state = TCP_CLOSE; in tcp_init_sock()
417 sk->sk_write_space = sk_stream_write_space; in tcp_init_sock()
418 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); in tcp_init_sock()
422 sk->sk_sndbuf = sysctl_tcp_wmem[1]; in tcp_init_sock()
423 sk->sk_rcvbuf = sysctl_tcp_rmem[1]; in tcp_init_sock()
426 sock_update_memcg(sk); in tcp_init_sock()
427 sk_sockets_allocated_inc(sk); in tcp_init_sock()
432 static void tcp_tx_timestamp(struct sock *sk, struct sk_buff *skb) in tcp_tx_timestamp() argument
434 if (sk->sk_tsflags) { in tcp_tx_timestamp()
437 sock_tx_timestamp(sk, &shinfo->tx_flags); in tcp_tx_timestamp()
453 struct sock *sk = sock->sk; in tcp_poll() local
454 const struct tcp_sock *tp = tcp_sk(sk); in tcp_poll()
457 sock_rps_record_flow(sk); in tcp_poll()
459 sock_poll_wait(file, sk_sleep(sk), wait); in tcp_poll()
461 state = sk_state_load(sk); in tcp_poll()
463 return inet_csk_listen_poll(sk); in tcp_poll()
499 if (sk->sk_shutdown == SHUTDOWN_MASK || state == TCP_CLOSE) in tcp_poll()
501 if (sk->sk_shutdown & RCV_SHUTDOWN) in tcp_poll()
507 int target = sock_rcvlowat(sk, 0, INT_MAX); in tcp_poll()
510 !sock_flag(sk, SOCK_URGINLINE) && in tcp_poll()
517 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { in tcp_poll()
518 if (sk_stream_is_writeable(sk)) { in tcp_poll()
521 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); in tcp_poll()
522 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); in tcp_poll()
530 if (sk_stream_is_writeable(sk)) in tcp_poll()
541 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) in tcp_poll()
548 int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg) in tcp_ioctl() argument
550 struct tcp_sock *tp = tcp_sk(sk); in tcp_ioctl()
556 if (sk->sk_state == TCP_LISTEN) in tcp_ioctl()
559 slow = lock_sock_fast(sk); in tcp_ioctl()
560 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) in tcp_ioctl()
562 else if (sock_flag(sk, SOCK_URGINLINE) || in tcp_ioctl()
570 if (answ && sock_flag(sk, SOCK_DONE)) in tcp_ioctl()
574 unlock_sock_fast(sk, slow); in tcp_ioctl()
580 if (sk->sk_state == TCP_LISTEN) in tcp_ioctl()
583 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) in tcp_ioctl()
589 if (sk->sk_state == TCP_LISTEN) in tcp_ioctl()
592 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) in tcp_ioctl()
616 static void skb_entail(struct sock *sk, struct sk_buff *skb) in skb_entail() argument
618 struct tcp_sock *tp = tcp_sk(sk); in skb_entail()
626 tcp_add_write_queue_tail(sk, skb); in skb_entail()
627 sk->sk_wmem_queued += skb->truesize; in skb_entail()
628 sk_mem_charge(sk, skb->truesize); in skb_entail()
632 tcp_slow_start_after_idle_check(sk); in skb_entail()
651 static bool tcp_should_autocork(struct sock *sk, struct sk_buff *skb, in tcp_should_autocork() argument
656 skb != tcp_write_queue_head(sk) && in tcp_should_autocork()
657 atomic_read(&sk->sk_wmem_alloc) > skb->truesize; in tcp_should_autocork()
660 static void tcp_push(struct sock *sk, int flags, int mss_now, in tcp_push() argument
663 struct tcp_sock *tp = tcp_sk(sk); in tcp_push()
666 if (!tcp_send_head(sk)) in tcp_push()
669 skb = tcp_write_queue_tail(sk); in tcp_push()
675 if (tcp_should_autocork(sk, skb, size_goal)) { in tcp_push()
679 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAUTOCORKING); in tcp_push()
685 if (atomic_read(&sk->sk_wmem_alloc) > skb->truesize) in tcp_push()
692 __tcp_push_pending_frames(sk, mss_now, nonagle); in tcp_push()
701 ret = skb_splice_bits(skb, skb->sk, offset, tss->pipe, in tcp_splice_data_recv()
709 static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss) in __tcp_splice_read() argument
717 return tcp_read_sock(sk, &rd_desc, tcp_splice_data_recv); in __tcp_splice_read()
736 struct sock *sk = sock->sk; in tcp_splice_read() local
746 sock_rps_record_flow(sk); in tcp_splice_read()
755 lock_sock(sk); in tcp_splice_read()
757 timeo = sock_rcvtimeo(sk, sock->file->f_flags & O_NONBLOCK); in tcp_splice_read()
759 ret = __tcp_splice_read(sk, &tss); in tcp_splice_read()
765 if (sock_flag(sk, SOCK_DONE)) in tcp_splice_read()
767 if (sk->sk_err) { in tcp_splice_read()
768 ret = sock_error(sk); in tcp_splice_read()
771 if (sk->sk_shutdown & RCV_SHUTDOWN) in tcp_splice_read()
773 if (sk->sk_state == TCP_CLOSE) { in tcp_splice_read()
778 if (!sock_flag(sk, SOCK_DONE)) in tcp_splice_read()
786 sk_wait_data(sk, &timeo, NULL); in tcp_splice_read()
798 release_sock(sk); in tcp_splice_read()
799 lock_sock(sk); in tcp_splice_read()
801 if (sk->sk_err || sk->sk_state == TCP_CLOSE || in tcp_splice_read()
802 (sk->sk_shutdown & RCV_SHUTDOWN) || in tcp_splice_read()
807 release_sock(sk); in tcp_splice_read()
816 struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp, in sk_stream_alloc_skb() argument
824 if (unlikely(tcp_under_memory_pressure(sk))) in sk_stream_alloc_skb()
825 sk_mem_reclaim_partial(sk); in sk_stream_alloc_skb()
827 skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp); in sk_stream_alloc_skb()
833 sk_forced_mem_schedule(sk, skb->truesize); in sk_stream_alloc_skb()
835 mem_scheduled = sk_wmem_schedule(sk, skb->truesize); in sk_stream_alloc_skb()
838 skb_reserve(skb, sk->sk_prot->max_header); in sk_stream_alloc_skb()
848 sk->sk_prot->enter_memory_pressure(sk); in sk_stream_alloc_skb()
849 sk_stream_moderate_sndbuf(sk); in sk_stream_alloc_skb()
854 static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now, in tcp_xmit_size_goal() argument
857 struct tcp_sock *tp = tcp_sk(sk); in tcp_xmit_size_goal()
860 if (!large_allowed || !sk_can_gso(sk)) in tcp_xmit_size_goal()
864 new_size_goal = sk->sk_gso_max_size - 1 - MAX_TCP_HEADER; in tcp_xmit_size_goal()
872 sk->sk_gso_max_segs); in tcp_xmit_size_goal()
879 static int tcp_send_mss(struct sock *sk, int *size_goal, int flags) in tcp_send_mss() argument
883 mss_now = tcp_current_mss(sk); in tcp_send_mss()
884 *size_goal = tcp_xmit_size_goal(sk, mss_now, !(flags & MSG_OOB)); in tcp_send_mss()
889 static ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset, in do_tcp_sendpages() argument
892 struct tcp_sock *tp = tcp_sk(sk); in do_tcp_sendpages()
896 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); in do_tcp_sendpages()
902 if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) && in do_tcp_sendpages()
903 !tcp_passive_fastopen(sk)) { in do_tcp_sendpages()
904 err = sk_stream_wait_connect(sk, &timeo); in do_tcp_sendpages()
909 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); in do_tcp_sendpages()
911 mss_now = tcp_send_mss(sk, &size_goal, flags); in do_tcp_sendpages()
915 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) in do_tcp_sendpages()
919 struct sk_buff *skb = tcp_write_queue_tail(sk); in do_tcp_sendpages()
923 if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) { in do_tcp_sendpages()
925 if (!sk_stream_memory_free(sk)) in do_tcp_sendpages()
928 skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation, in do_tcp_sendpages()
929 skb_queue_empty(&sk->sk_write_queue)); in do_tcp_sendpages()
933 skb_entail(sk, skb); in do_tcp_sendpages()
946 if (!sk_wmem_schedule(sk, copy)) in do_tcp_sendpages()
960 sk->sk_wmem_queued += copy; in do_tcp_sendpages()
961 sk_mem_charge(sk, copy); in do_tcp_sendpages()
974 tcp_tx_timestamp(sk, skb); in do_tcp_sendpages()
983 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH); in do_tcp_sendpages()
984 } else if (skb == tcp_send_head(sk)) in do_tcp_sendpages()
985 tcp_push_one(sk, mss_now); in do_tcp_sendpages()
989 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); in do_tcp_sendpages()
991 tcp_push(sk, flags & ~MSG_MORE, mss_now, in do_tcp_sendpages()
994 err = sk_stream_wait_memory(sk, &timeo); in do_tcp_sendpages()
998 mss_now = tcp_send_mss(sk, &size_goal, flags); in do_tcp_sendpages()
1003 tcp_push(sk, flags, mss_now, tp->nonagle, size_goal); in do_tcp_sendpages()
1011 if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && err == -EAGAIN)) in do_tcp_sendpages()
1012 sk->sk_write_space(sk); in do_tcp_sendpages()
1013 return sk_stream_error(sk, flags, err); in do_tcp_sendpages()
1016 int tcp_sendpage(struct sock *sk, struct page *page, int offset, in tcp_sendpage() argument
1021 if (!(sk->sk_route_caps & NETIF_F_SG) || in tcp_sendpage()
1022 !(sk->sk_route_caps & NETIF_F_ALL_CSUM)) in tcp_sendpage()
1023 return sock_no_sendpage(sk->sk_socket, page, offset, size, in tcp_sendpage()
1026 lock_sock(sk); in tcp_sendpage()
1027 res = do_tcp_sendpages(sk, page, offset, size, flags); in tcp_sendpage()
1028 release_sock(sk); in tcp_sendpage()
1033 static inline int select_size(const struct sock *sk, bool sg) in select_size() argument
1035 const struct tcp_sock *tp = tcp_sk(sk); in select_size()
1039 if (sk_can_gso(sk)) { in select_size()
1064 static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, in tcp_sendmsg_fastopen() argument
1067 struct tcp_sock *tp = tcp_sk(sk); in tcp_sendmsg_fastopen()
1076 sk->sk_allocation); in tcp_sendmsg_fastopen()
1083 err = __inet_stream_connect(sk->sk_socket, msg->msg_name, in tcp_sendmsg_fastopen()
1090 int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) in tcp_sendmsg() argument
1092 struct tcp_sock *tp = tcp_sk(sk); in tcp_sendmsg()
1099 lock_sock(sk); in tcp_sendmsg()
1103 err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size); in tcp_sendmsg()
1110 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); in tcp_sendmsg()
1116 if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) && in tcp_sendmsg()
1117 !tcp_passive_fastopen(sk)) { in tcp_sendmsg()
1118 err = sk_stream_wait_connect(sk, &timeo); in tcp_sendmsg()
1125 copied = tcp_send_rcvq(sk, msg, size); in tcp_sendmsg()
1137 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); in tcp_sendmsg()
1139 mss_now = tcp_send_mss(sk, &size_goal, flags); in tcp_sendmsg()
1145 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) in tcp_sendmsg()
1148 sg = !!(sk->sk_route_caps & NETIF_F_SG); in tcp_sendmsg()
1154 skb = tcp_write_queue_tail(sk); in tcp_sendmsg()
1155 if (tcp_send_head(sk)) { in tcp_sendmsg()
1166 if (!sk_stream_memory_free(sk)) in tcp_sendmsg()
1169 skb = sk_stream_alloc_skb(sk, in tcp_sendmsg()
1170 select_size(sk, sg), in tcp_sendmsg()
1171 sk->sk_allocation, in tcp_sendmsg()
1172 skb_queue_empty(&sk->sk_write_queue)); in tcp_sendmsg()
1179 if (sk->sk_route_caps & NETIF_F_ALL_CSUM) in tcp_sendmsg()
1182 skb_entail(sk, skb); in tcp_sendmsg()
1202 err = skb_add_data_nocache(sk, skb, &msg->msg_iter, copy); in tcp_sendmsg()
1208 struct page_frag *pfrag = sk_page_frag(sk); in tcp_sendmsg()
1210 if (!sk_page_frag_refill(sk, pfrag)) in tcp_sendmsg()
1224 if (!sk_wmem_schedule(sk, copy)) in tcp_sendmsg()
1227 err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb, in tcp_sendmsg()
1254 tcp_tx_timestamp(sk, skb); in tcp_sendmsg()
1263 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH); in tcp_sendmsg()
1264 } else if (skb == tcp_send_head(sk)) in tcp_sendmsg()
1265 tcp_push_one(sk, mss_now); in tcp_sendmsg()
1269 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); in tcp_sendmsg()
1272 tcp_push(sk, flags & ~MSG_MORE, mss_now, in tcp_sendmsg()
1275 err = sk_stream_wait_memory(sk, &timeo); in tcp_sendmsg()
1279 mss_now = tcp_send_mss(sk, &size_goal, flags); in tcp_sendmsg()
1284 tcp_push(sk, flags, mss_now, tp->nonagle, size_goal); in tcp_sendmsg()
1286 release_sock(sk); in tcp_sendmsg()
1291 tcp_unlink_write_queue(skb, sk); in tcp_sendmsg()
1295 tcp_check_send_head(sk, skb); in tcp_sendmsg()
1296 sk_wmem_free_skb(sk, skb); in tcp_sendmsg()
1303 err = sk_stream_error(sk, flags, err); in tcp_sendmsg()
1305 if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && err == -EAGAIN)) in tcp_sendmsg()
1306 sk->sk_write_space(sk); in tcp_sendmsg()
1307 release_sock(sk); in tcp_sendmsg()
1317 static int tcp_recv_urg(struct sock *sk, struct msghdr *msg, int len, int flags) in tcp_recv_urg() argument
1319 struct tcp_sock *tp = tcp_sk(sk); in tcp_recv_urg()
1322 if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data || in tcp_recv_urg()
1326 if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE)) in tcp_recv_urg()
1349 if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN)) in tcp_recv_urg()
1361 static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len) in tcp_peek_sndq() argument
1368 skb_queue_walk(&sk->sk_write_queue, skb) { in tcp_peek_sndq()
1385 static void tcp_cleanup_rbuf(struct sock *sk, int copied) in tcp_cleanup_rbuf() argument
1387 struct tcp_sock *tp = tcp_sk(sk); in tcp_cleanup_rbuf()
1390 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); in tcp_cleanup_rbuf()
1396 if (inet_csk_ack_scheduled(sk)) { in tcp_cleanup_rbuf()
1397 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_cleanup_rbuf()
1413 !atomic_read(&sk->sk_rmem_alloc))) in tcp_cleanup_rbuf()
1423 if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) { in tcp_cleanup_rbuf()
1428 __u32 new_window = __tcp_select_window(sk); in tcp_cleanup_rbuf()
1440 tcp_send_ack(sk); in tcp_cleanup_rbuf()
1443 static void tcp_prequeue_process(struct sock *sk) in tcp_prequeue_process() argument
1446 struct tcp_sock *tp = tcp_sk(sk); in tcp_prequeue_process()
1448 NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPPREQUEUED); in tcp_prequeue_process()
1454 sk_backlog_rcv(sk, skb); in tcp_prequeue_process()
1461 static struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off) in tcp_recv_skb() argument
1466 while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) { in tcp_recv_skb()
1478 sk_eat_skb(sk, skb); in tcp_recv_skb()
1494 int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, in tcp_read_sock() argument
1498 struct tcp_sock *tp = tcp_sk(sk); in tcp_read_sock()
1503 if (sk->sk_state == TCP_LISTEN) in tcp_read_sock()
1505 while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) { in tcp_read_sock()
1534 skb = tcp_recv_skb(sk, seq - 1, &offset); in tcp_read_sock()
1544 sk_eat_skb(sk, skb); in tcp_read_sock()
1548 sk_eat_skb(sk, skb); in tcp_read_sock()
1555 tcp_rcv_space_adjust(sk); in tcp_read_sock()
1559 tcp_recv_skb(sk, seq, &offset); in tcp_read_sock()
1560 tcp_cleanup_rbuf(sk, copied); in tcp_read_sock()
1574 int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, in tcp_recvmsg() argument
1577 struct tcp_sock *tp = tcp_sk(sk); in tcp_recvmsg()
1590 return inet_recv_error(sk, msg, len, addr_len); in tcp_recvmsg()
1592 if (sk_can_busy_loop(sk) && skb_queue_empty(&sk->sk_receive_queue) && in tcp_recvmsg()
1593 (sk->sk_state == TCP_ESTABLISHED)) in tcp_recvmsg()
1594 sk_busy_loop(sk, nonblock); in tcp_recvmsg()
1596 lock_sock(sk); in tcp_recvmsg()
1599 if (sk->sk_state == TCP_LISTEN) in tcp_recvmsg()
1602 timeo = sock_rcvtimeo(sk, nonblock); in tcp_recvmsg()
1629 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); in tcp_recvmsg()
1646 last = skb_peek_tail(&sk->sk_receive_queue); in tcp_recvmsg()
1647 skb_queue_walk(&sk->sk_receive_queue, skb) { in tcp_recvmsg()
1672 if (copied >= target && !sk->sk_backlog.tail) in tcp_recvmsg()
1676 if (sk->sk_err || in tcp_recvmsg()
1677 sk->sk_state == TCP_CLOSE || in tcp_recvmsg()
1678 (sk->sk_shutdown & RCV_SHUTDOWN) || in tcp_recvmsg()
1683 if (sock_flag(sk, SOCK_DONE)) in tcp_recvmsg()
1686 if (sk->sk_err) { in tcp_recvmsg()
1687 copied = sock_error(sk); in tcp_recvmsg()
1691 if (sk->sk_shutdown & RCV_SHUTDOWN) in tcp_recvmsg()
1694 if (sk->sk_state == TCP_CLOSE) { in tcp_recvmsg()
1695 if (!sock_flag(sk, SOCK_DONE)) { in tcp_recvmsg()
1716 tcp_cleanup_rbuf(sk, copied); in tcp_recvmsg()
1765 release_sock(sk); in tcp_recvmsg()
1766 lock_sock(sk); in tcp_recvmsg()
1768 sk_wait_data(sk, &timeo, last); in tcp_recvmsg()
1778 NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk); in tcp_recvmsg()
1786 tcp_prequeue_process(sk); in tcp_recvmsg()
1790 NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk); in tcp_recvmsg()
1816 if (!sock_flag(sk, SOCK_URGINLINE)) { in tcp_recvmsg()
1843 tcp_rcv_space_adjust(sk); in tcp_recvmsg()
1848 tcp_fast_path_check(sk); in tcp_recvmsg()
1856 sk_eat_skb(sk, skb); in tcp_recvmsg()
1863 sk_eat_skb(sk, skb); in tcp_recvmsg()
1873 tcp_prequeue_process(sk); in tcp_recvmsg()
1876 NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk); in tcp_recvmsg()
1891 tcp_cleanup_rbuf(sk, copied); in tcp_recvmsg()
1893 release_sock(sk); in tcp_recvmsg()
1897 release_sock(sk); in tcp_recvmsg()
1901 err = tcp_recv_urg(sk, msg, len, flags); in tcp_recvmsg()
1905 err = tcp_peek_sndq(sk, msg, len); in tcp_recvmsg()
1910 void tcp_set_state(struct sock *sk, int state) in tcp_set_state() argument
1912 int oldstate = sk->sk_state; in tcp_set_state()
1917 TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB); in tcp_set_state()
1922 TCP_INC_STATS(sock_net(sk), TCP_MIB_ESTABRESETS); in tcp_set_state()
1924 sk->sk_prot->unhash(sk); in tcp_set_state()
1925 if (inet_csk(sk)->icsk_bind_hash && in tcp_set_state()
1926 !(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) in tcp_set_state()
1927 inet_put_port(sk); in tcp_set_state()
1931 TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB); in tcp_set_state()
1937 sk_state_store(sk, state); in tcp_set_state()
1940 SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n", sk, statename[oldstate], statename[state]); in tcp_set_state()
1969 static int tcp_close_state(struct sock *sk) in tcp_close_state() argument
1971 int next = (int)new_state[sk->sk_state]; in tcp_close_state()
1974 tcp_set_state(sk, ns); in tcp_close_state()
1984 void tcp_shutdown(struct sock *sk, int how) in tcp_shutdown() argument
1994 if ((1 << sk->sk_state) & in tcp_shutdown()
1998 if (tcp_close_state(sk)) in tcp_shutdown()
1999 tcp_send_fin(sk); in tcp_shutdown()
2004 bool tcp_check_oom(struct sock *sk, int shift) in tcp_check_oom() argument
2008 too_many_orphans = tcp_too_many_orphans(sk, shift); in tcp_check_oom()
2009 out_of_socket_memory = tcp_out_of_memory(sk); in tcp_check_oom()
2018 void tcp_close(struct sock *sk, long timeout) in tcp_close() argument
2024 lock_sock(sk); in tcp_close()
2025 sk->sk_shutdown = SHUTDOWN_MASK; in tcp_close()
2027 if (sk->sk_state == TCP_LISTEN) { in tcp_close()
2028 tcp_set_state(sk, TCP_CLOSE); in tcp_close()
2031 inet_csk_listen_stop(sk); in tcp_close()
2040 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) { in tcp_close()
2049 sk_mem_reclaim(sk); in tcp_close()
2052 if (sk->sk_state == TCP_CLOSE) in tcp_close()
2062 if (unlikely(tcp_sk(sk)->repair)) { in tcp_close()
2063 sk->sk_prot->disconnect(sk, 0); in tcp_close()
2066 NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE); in tcp_close()
2067 tcp_set_state(sk, TCP_CLOSE); in tcp_close()
2068 tcp_send_active_reset(sk, sk->sk_allocation); in tcp_close()
2069 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) { in tcp_close()
2071 sk->sk_prot->disconnect(sk, 0); in tcp_close()
2072 NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONDATA); in tcp_close()
2073 } else if (tcp_close_state(sk)) { in tcp_close()
2103 tcp_send_fin(sk); in tcp_close()
2106 sk_stream_wait_close(sk, timeout); in tcp_close()
2109 state = sk->sk_state; in tcp_close()
2110 sock_hold(sk); in tcp_close()
2111 sock_orphan(sk); in tcp_close()
2114 release_sock(sk); in tcp_close()
2121 bh_lock_sock(sk); in tcp_close()
2122 WARN_ON(sock_owned_by_user(sk)); in tcp_close()
2124 percpu_counter_inc(sk->sk_prot->orphan_count); in tcp_close()
2127 if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE) in tcp_close()
2144 if (sk->sk_state == TCP_FIN_WAIT2) { in tcp_close()
2145 struct tcp_sock *tp = tcp_sk(sk); in tcp_close()
2147 tcp_set_state(sk, TCP_CLOSE); in tcp_close()
2148 tcp_send_active_reset(sk, GFP_ATOMIC); in tcp_close()
2149 NET_INC_STATS_BH(sock_net(sk), in tcp_close()
2152 const int tmo = tcp_fin_time(sk); in tcp_close()
2155 inet_csk_reset_keepalive_timer(sk, in tcp_close()
2158 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); in tcp_close()
2163 if (sk->sk_state != TCP_CLOSE) { in tcp_close()
2164 sk_mem_reclaim(sk); in tcp_close()
2165 if (tcp_check_oom(sk, 0)) { in tcp_close()
2166 tcp_set_state(sk, TCP_CLOSE); in tcp_close()
2167 tcp_send_active_reset(sk, GFP_ATOMIC); in tcp_close()
2168 NET_INC_STATS_BH(sock_net(sk), in tcp_close()
2173 if (sk->sk_state == TCP_CLOSE) { in tcp_close()
2174 struct request_sock *req = tcp_sk(sk)->fastopen_rsk; in tcp_close()
2180 reqsk_fastopen_remove(sk, req, false); in tcp_close()
2181 inet_csk_destroy_sock(sk); in tcp_close()
2186 bh_unlock_sock(sk); in tcp_close()
2188 sock_put(sk); in tcp_close()
2201 int tcp_disconnect(struct sock *sk, int flags) in tcp_disconnect() argument
2203 struct inet_sock *inet = inet_sk(sk); in tcp_disconnect()
2204 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_disconnect()
2205 struct tcp_sock *tp = tcp_sk(sk); in tcp_disconnect()
2207 int old_state = sk->sk_state; in tcp_disconnect()
2210 tcp_set_state(sk, TCP_CLOSE); in tcp_disconnect()
2214 inet_csk_listen_stop(sk); in tcp_disconnect()
2216 sk->sk_err = ECONNABORTED; in tcp_disconnect()
2223 tcp_send_active_reset(sk, gfp_any()); in tcp_disconnect()
2224 sk->sk_err = ECONNRESET; in tcp_disconnect()
2226 sk->sk_err = ECONNRESET; in tcp_disconnect()
2228 tcp_clear_xmit_timers(sk); in tcp_disconnect()
2229 __skb_queue_purge(&sk->sk_receive_queue); in tcp_disconnect()
2230 tcp_write_queue_purge(sk); in tcp_disconnect()
2235 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) in tcp_disconnect()
2236 inet_reset_saddr(sk); in tcp_disconnect()
2238 sk->sk_shutdown = 0; in tcp_disconnect()
2239 sock_reset_flag(sk, SOCK_DONE); in tcp_disconnect()
2251 tcp_set_ca_state(sk, TCP_CA_Open); in tcp_disconnect()
2253 inet_csk_delack_init(sk); in tcp_disconnect()
2254 tcp_init_send_head(sk); in tcp_disconnect()
2256 __sk_dst_reset(sk); in tcp_disconnect()
2260 sk->sk_error_report(sk); in tcp_disconnect()
2265 static inline bool tcp_can_repair_sock(const struct sock *sk) in tcp_can_repair_sock() argument
2267 return ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN) && in tcp_can_repair_sock()
2268 ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_ESTABLISHED)); in tcp_can_repair_sock()
2323 static int do_tcp_setsockopt(struct sock *sk, int level, in do_tcp_setsockopt() argument
2326 struct tcp_sock *tp = tcp_sk(sk); in do_tcp_setsockopt()
2327 struct inet_connection_sock *icsk = inet_csk(sk); in do_tcp_setsockopt()
2345 lock_sock(sk); in do_tcp_setsockopt()
2346 err = tcp_set_congestion_control(sk, name); in do_tcp_setsockopt()
2347 release_sock(sk); in do_tcp_setsockopt()
2361 lock_sock(sk); in do_tcp_setsockopt()
2386 tcp_push_pending_frames(sk); in do_tcp_setsockopt()
2410 if (!tcp_can_repair_sock(sk)) in do_tcp_setsockopt()
2414 sk->sk_reuse = SK_FORCE_REUSE; in do_tcp_setsockopt()
2418 sk->sk_reuse = SK_NO_REUSE; in do_tcp_setsockopt()
2419 tcp_send_window_probe(sk); in do_tcp_setsockopt()
2435 if (sk->sk_state != TCP_CLOSE) in do_tcp_setsockopt()
2448 else if (sk->sk_state == TCP_ESTABLISHED) in do_tcp_setsockopt()
2474 tcp_push_pending_frames(sk); in do_tcp_setsockopt()
2483 if (sock_flag(sk, SOCK_KEEPOPEN) && in do_tcp_setsockopt()
2484 !((1 << sk->sk_state) & in do_tcp_setsockopt()
2491 inet_csk_reset_keepalive_timer(sk, elapsed); in do_tcp_setsockopt()
2539 if (sk->sk_state != TCP_CLOSE) { in do_tcp_setsockopt()
2554 if ((1 << sk->sk_state) & in do_tcp_setsockopt()
2556 inet_csk_ack_scheduled(sk)) { in do_tcp_setsockopt()
2558 tcp_cleanup_rbuf(sk, 1); in do_tcp_setsockopt()
2568 err = tp->af_specific->md5_parse(sk, optval, optlen); in do_tcp_setsockopt()
2582 if (val >= 0 && ((1 << sk->sk_state) & (TCPF_CLOSE | in do_tcp_setsockopt()
2586 fastopen_queue_tune(sk, val); in do_tcp_setsockopt()
2599 sk->sk_write_space(sk); in do_tcp_setsockopt()
2606 release_sock(sk); in do_tcp_setsockopt()
2610 int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, in tcp_setsockopt() argument
2613 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_setsockopt()
2616 return icsk->icsk_af_ops->setsockopt(sk, level, optname, in tcp_setsockopt()
2618 return do_tcp_setsockopt(sk, level, optname, optval, optlen); in tcp_setsockopt()
2623 int compat_tcp_setsockopt(struct sock *sk, int level, int optname, in compat_tcp_setsockopt() argument
2627 return inet_csk_compat_setsockopt(sk, level, optname, in compat_tcp_setsockopt()
2629 return do_tcp_setsockopt(sk, level, optname, optval, optlen); in compat_tcp_setsockopt()
2635 void tcp_get_info(struct sock *sk, struct tcp_info *info) in tcp_get_info() argument
2637 const struct tcp_sock *tp = tcp_sk(sk); /* iff sk_type == SOCK_STREAM */ in tcp_get_info()
2638 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_get_info()
2645 if (sk->sk_type != SOCK_STREAM) in tcp_get_info()
2648 info->tcpi_state = sk_state_load(sk); in tcp_get_info()
2678 info->tcpi_unacked = sk->sk_ack_backlog; in tcp_get_info()
2679 info->tcpi_sacked = sk->sk_max_ack_backlog; in tcp_get_info()
2706 rate = READ_ONCE(sk->sk_pacing_rate); in tcp_get_info()
2710 rate = READ_ONCE(sk->sk_max_pacing_rate); in tcp_get_info()
2724 static int do_tcp_getsockopt(struct sock *sk, int level, in do_tcp_getsockopt() argument
2727 struct inet_connection_sock *icsk = inet_csk(sk); in do_tcp_getsockopt()
2728 struct tcp_sock *tp = tcp_sk(sk); in do_tcp_getsockopt()
2742 if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) in do_tcp_getsockopt()
2783 tcp_get_info(sk, &info); in do_tcp_getsockopt()
2803 sz = ca_ops->get_info(sk, ~0U, &attr, &info); in do_tcp_getsockopt()
2874 lock_sock(sk); in do_tcp_getsockopt()
2878 release_sock(sk); in do_tcp_getsockopt()
2881 release_sock(sk); in do_tcp_getsockopt()
2886 release_sock(sk); in do_tcp_getsockopt()
2890 release_sock(sk); in do_tcp_getsockopt()
2894 release_sock(sk); in do_tcp_getsockopt()
2896 release_sock(sk); in do_tcp_getsockopt()
2914 int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, in tcp_getsockopt() argument
2917 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_getsockopt()
2920 return icsk->icsk_af_ops->getsockopt(sk, level, optname, in tcp_getsockopt()
2922 return do_tcp_getsockopt(sk, level, optname, optval, optlen); in tcp_getsockopt()
2927 int compat_tcp_getsockopt(struct sock *sk, int level, int optname, in compat_tcp_getsockopt() argument
2931 return inet_csk_compat_getsockopt(sk, level, optname, in compat_tcp_getsockopt()
2933 return do_tcp_getsockopt(sk, level, optname, optval, optlen); in compat_tcp_getsockopt()
3066 void tcp_done(struct sock *sk) in tcp_done() argument
3068 struct request_sock *req = tcp_sk(sk)->fastopen_rsk; in tcp_done()
3070 if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV) in tcp_done()
3071 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS); in tcp_done()
3073 tcp_set_state(sk, TCP_CLOSE); in tcp_done()
3074 tcp_clear_xmit_timers(sk); in tcp_done()
3076 reqsk_fastopen_remove(sk, req, false); in tcp_done()
3078 sk->sk_shutdown = SHUTDOWN_MASK; in tcp_done()
3080 if (!sock_flag(sk, SOCK_DEAD)) in tcp_done()
3081 sk->sk_state_change(sk); in tcp_done()
3083 inet_csk_destroy_sock(sk); in tcp_done()