Lines Matching refs:sk
133 void inet_sock_destruct(struct sock *sk) in inet_sock_destruct() argument
135 struct inet_sock *inet = inet_sk(sk); in inet_sock_destruct()
137 __skb_queue_purge(&sk->sk_receive_queue); in inet_sock_destruct()
138 __skb_queue_purge(&sk->sk_error_queue); in inet_sock_destruct()
140 sk_mem_reclaim(sk); in inet_sock_destruct()
142 if (sk->sk_type == SOCK_STREAM && sk->sk_state != TCP_CLOSE) { in inet_sock_destruct()
144 sk->sk_state, sk); in inet_sock_destruct()
147 if (!sock_flag(sk, SOCK_DEAD)) { in inet_sock_destruct()
148 pr_err("Attempt to release alive inet socket %p\n", sk); in inet_sock_destruct()
152 WARN_ON(atomic_read(&sk->sk_rmem_alloc)); in inet_sock_destruct()
153 WARN_ON(atomic_read(&sk->sk_wmem_alloc)); in inet_sock_destruct()
154 WARN_ON(sk->sk_wmem_queued); in inet_sock_destruct()
155 WARN_ON(sk->sk_forward_alloc); in inet_sock_destruct()
158 dst_release(rcu_dereference_check(sk->sk_dst_cache, 1)); in inet_sock_destruct()
159 dst_release(sk->sk_rx_dst); in inet_sock_destruct()
160 sk_refcnt_debug_dec(sk); in inet_sock_destruct()
174 static int inet_autobind(struct sock *sk) in inet_autobind() argument
178 lock_sock(sk); in inet_autobind()
179 inet = inet_sk(sk); in inet_autobind()
181 if (sk->sk_prot->get_port(sk, 0)) { in inet_autobind()
182 release_sock(sk); in inet_autobind()
187 release_sock(sk); in inet_autobind()
196 struct sock *sk = sock->sk; in inet_listen() local
200 lock_sock(sk); in inet_listen()
206 old_state = sk->sk_state; in inet_listen()
222 !inet_csk(sk)->icsk_accept_queue.fastopenq.max_qlen) { in inet_listen()
224 fastopen_queue_tune(sk, backlog); in inet_listen()
227 fastopen_queue_tune(sk, in inet_listen()
232 err = inet_csk_listen_start(sk, backlog); in inet_listen()
236 sk->sk_max_ack_backlog = backlog; in inet_listen()
240 release_sock(sk); in inet_listen()
252 struct sock *sk; in inet_create() local
323 sk = sk_alloc(net, PF_INET, GFP_KERNEL, answer_prot, kern); in inet_create()
324 if (!sk) in inet_create()
329 sk->sk_reuse = SK_CAN_REUSE; in inet_create()
331 inet = inet_sk(sk); in inet_create()
349 sock_init_data(sock, sk); in inet_create()
351 sk->sk_destruct = inet_sock_destruct; in inet_create()
352 sk->sk_protocol = protocol; in inet_create()
353 sk->sk_backlog_rcv = sk->sk_prot->backlog_rcv; in inet_create()
363 sk_refcnt_debug_inc(sk); in inet_create()
373 sk->sk_prot->hash(sk); in inet_create()
376 if (sk->sk_prot->init) { in inet_create()
377 err = sk->sk_prot->init(sk); in inet_create()
379 sk_common_release(sk); in inet_create()
396 struct sock *sk = sock->sk; in inet_release() local
398 if (sk) { in inet_release()
402 ip_mc_drop_socket(sk); in inet_release()
412 if (sock_flag(sk, SOCK_LINGER) && in inet_release()
414 timeout = sk->sk_lingertime; in inet_release()
415 sock->sk = NULL; in inet_release()
416 sk->sk_prot->close(sk, timeout); in inet_release()
425 struct sock *sk = sock->sk; in inet_bind() local
426 struct inet_sock *inet = inet_sk(sk); in inet_bind()
427 struct net *net = sock_net(sk); in inet_bind()
434 if (sk->sk_prot->bind) { in inet_bind()
435 err = sk->sk_prot->bind(sk, uaddr, addr_len); in inet_bind()
452 tb_id = l3mdev_fib_table_by_index(net, sk->sk_bound_dev_if) ? : tb_id; in inet_bind()
484 lock_sock(sk); in inet_bind()
488 if (sk->sk_state != TCP_CLOSE || inet->inet_num) in inet_bind()
497 sk->sk_prot->get_port(sk, snum)) { in inet_bind()
504 sk->sk_userlocks |= SOCK_BINDADDR_LOCK; in inet_bind()
506 sk->sk_userlocks |= SOCK_BINDPORT_LOCK; in inet_bind()
510 sk_dst_reset(sk); in inet_bind()
513 release_sock(sk); in inet_bind()
522 struct sock *sk = sock->sk; in inet_dgram_connect() local
527 return sk->sk_prot->disconnect(sk, flags); in inet_dgram_connect()
529 if (!inet_sk(sk)->inet_num && inet_autobind(sk)) in inet_dgram_connect()
531 return sk->sk_prot->connect(sk, uaddr, addr_len); in inet_dgram_connect()
535 static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias) in inet_wait_for_connect() argument
539 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); in inet_wait_for_connect()
540 sk->sk_write_pending += writebias; in inet_wait_for_connect()
547 while ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { in inet_wait_for_connect()
548 release_sock(sk); in inet_wait_for_connect()
550 lock_sock(sk); in inet_wait_for_connect()
553 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); in inet_wait_for_connect()
555 finish_wait(sk_sleep(sk), &wait); in inet_wait_for_connect()
556 sk->sk_write_pending -= writebias; in inet_wait_for_connect()
567 struct sock *sk = sock->sk; in __inet_stream_connect() local
575 err = sk->sk_prot->disconnect(sk, flags); in __inet_stream_connect()
593 if (sk->sk_state != TCP_CLOSE) in __inet_stream_connect()
596 err = sk->sk_prot->connect(sk, uaddr, addr_len); in __inet_stream_connect()
610 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK); in __inet_stream_connect()
612 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { in __inet_stream_connect()
613 int writebias = (sk->sk_protocol == IPPROTO_TCP) && in __inet_stream_connect()
614 tcp_sk(sk)->fastopen_req && in __inet_stream_connect()
615 tcp_sk(sk)->fastopen_req->data ? 1 : 0; in __inet_stream_connect()
618 if (!timeo || !inet_wait_for_connect(sk, timeo, writebias)) in __inet_stream_connect()
629 if (sk->sk_state == TCP_CLOSE) in __inet_stream_connect()
643 err = sock_error(sk) ? : -ECONNABORTED; in __inet_stream_connect()
645 if (sk->sk_prot->disconnect(sk, flags)) in __inet_stream_connect()
656 lock_sock(sock->sk); in inet_stream_connect()
658 release_sock(sock->sk); in inet_stream_connect()
669 struct sock *sk1 = sock->sk; in inet_accept()
700 struct sock *sk = sock->sk; in inet_getname() local
701 struct inet_sock *inet = inet_sk(sk); in inet_getname()
707 (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)) && in inet_getname()
727 struct sock *sk = sock->sk; in inet_sendmsg() local
729 sock_rps_record_flow(sk); in inet_sendmsg()
732 if (!inet_sk(sk)->inet_num && !sk->sk_prot->no_autobind && in inet_sendmsg()
733 inet_autobind(sk)) in inet_sendmsg()
736 return sk->sk_prot->sendmsg(sk, msg, size); in inet_sendmsg()
743 struct sock *sk = sock->sk; in inet_sendpage() local
745 sock_rps_record_flow(sk); in inet_sendpage()
748 if (!inet_sk(sk)->inet_num && !sk->sk_prot->no_autobind && in inet_sendpage()
749 inet_autobind(sk)) in inet_sendpage()
752 if (sk->sk_prot->sendpage) in inet_sendpage()
753 return sk->sk_prot->sendpage(sk, page, offset, size, flags); in inet_sendpage()
761 struct sock *sk = sock->sk; in inet_recvmsg() local
765 sock_rps_record_flow(sk); in inet_recvmsg()
767 err = sk->sk_prot->recvmsg(sk, msg, size, flags & MSG_DONTWAIT, in inet_recvmsg()
777 struct sock *sk = sock->sk; in inet_shutdown() local
789 lock_sock(sk); in inet_shutdown()
791 if ((1 << sk->sk_state) & in inet_shutdown()
798 switch (sk->sk_state) { in inet_shutdown()
804 sk->sk_shutdown |= how; in inet_shutdown()
805 if (sk->sk_prot->shutdown) in inet_shutdown()
806 sk->sk_prot->shutdown(sk, how); in inet_shutdown()
818 err = sk->sk_prot->disconnect(sk, O_NONBLOCK); in inet_shutdown()
824 sk->sk_state_change(sk); in inet_shutdown()
825 release_sock(sk); in inet_shutdown()
842 struct sock *sk = sock->sk; in inet_ioctl() local
844 struct net *net = sock_net(sk); in inet_ioctl()
848 err = sock_get_timestamp(sk, (struct timeval __user *)arg); in inet_ioctl()
851 err = sock_get_timestampns(sk, (struct timespec __user *)arg); in inet_ioctl()
877 if (sk->sk_prot->ioctl) in inet_ioctl()
878 err = sk->sk_prot->ioctl(sk, cmd, arg); in inet_ioctl()
890 struct sock *sk = sock->sk; in inet_compat_ioctl() local
893 if (sk->sk_prot->compat_ioctl) in inet_compat_ioctl()
894 err = sk->sk_prot->compat_ioctl(sk, cmd, arg); in inet_compat_ioctl()
1100 static int inet_sk_reselect_saddr(struct sock *sk) in inet_sk_reselect_saddr() argument
1102 struct inet_sock *inet = inet_sk(sk); in inet_sk_reselect_saddr()
1111 sock_owned_by_user(sk)); in inet_sk_reselect_saddr()
1117 rt = ip_route_connect(fl4, daddr, 0, RT_CONN_FLAGS(sk), in inet_sk_reselect_saddr()
1118 sk->sk_bound_dev_if, sk->sk_protocol, in inet_sk_reselect_saddr()
1119 inet->inet_sport, inet->inet_dport, sk); in inet_sk_reselect_saddr()
1123 sk_setup_caps(sk, &rt->dst); in inet_sk_reselect_saddr()
1145 __sk_prot_rehash(sk); in inet_sk_reselect_saddr()
1149 int inet_sk_rebuild_header(struct sock *sk) in inet_sk_rebuild_header() argument
1151 struct inet_sock *inet = inet_sk(sk); in inet_sk_rebuild_header()
1152 struct rtable *rt = (struct rtable *)__sk_dst_check(sk, 0); in inet_sk_rebuild_header()
1170 rt = ip_route_output_ports(sock_net(sk), fl4, sk, daddr, inet->inet_saddr, in inet_sk_rebuild_header()
1172 sk->sk_protocol, RT_CONN_FLAGS(sk), in inet_sk_rebuild_header()
1173 sk->sk_bound_dev_if); in inet_sk_rebuild_header()
1176 sk_setup_caps(sk, &rt->dst); in inet_sk_rebuild_header()
1181 sk->sk_route_caps = 0; in inet_sk_rebuild_header()
1187 sk->sk_state != TCP_SYN_SENT || in inet_sk_rebuild_header()
1188 (sk->sk_userlocks & SOCK_BINDADDR_LOCK) || in inet_sk_rebuild_header()
1189 (err = inet_sk_reselect_saddr(sk)) != 0) in inet_sk_rebuild_header()
1190 sk->sk_err_soft = -err; in inet_sk_rebuild_header()
1386 int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) in inet_recv_error() argument
1388 if (sk->sk_family == AF_INET) in inet_recv_error()
1389 return ip_recv_error(sk, msg, len, addr_len); in inet_recv_error()
1391 if (sk->sk_family == AF_INET6) in inet_recv_error()
1392 return pingv6_ops.ipv6_recv_error(sk, msg, len, addr_len); in inet_recv_error()
1428 int inet_ctl_sock_create(struct sock **sk, unsigned short family, in inet_ctl_sock_create() argument
1436 *sk = sock->sk; in inet_ctl_sock_create()
1437 (*sk)->sk_allocation = GFP_ATOMIC; in inet_ctl_sock_create()
1442 (*sk)->sk_prot->unhash(*sk); in inet_ctl_sock_create()