Lines Matching refs:sk
46 int inet_csk_bind_conflict(const struct sock *sk, in inet_csk_bind_conflict() argument
50 int reuse = sk->sk_reuse; in inet_csk_bind_conflict()
51 int reuseport = sk->sk_reuseport; in inet_csk_bind_conflict()
52 kuid_t uid = sock_i_uid((struct sock *)sk); in inet_csk_bind_conflict()
62 if (sk != sk2 && in inet_csk_bind_conflict()
64 (!sk->sk_bound_dev_if || in inet_csk_bind_conflict()
66 sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) { in inet_csk_bind_conflict()
73 if (!sk2->sk_rcv_saddr || !sk->sk_rcv_saddr || in inet_csk_bind_conflict()
74 sk2->sk_rcv_saddr == sk->sk_rcv_saddr) in inet_csk_bind_conflict()
80 if (!sk2->sk_rcv_saddr || !sk->sk_rcv_saddr || in inet_csk_bind_conflict()
81 sk2->sk_rcv_saddr == sk->sk_rcv_saddr) in inet_csk_bind_conflict()
93 int inet_csk_get_port(struct sock *sk, unsigned short snum) in inet_csk_get_port() argument
95 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; in inet_csk_get_port()
99 struct net *net = sock_net(sk); in inet_csk_get_port()
101 kuid_t uid = sock_i_uid(sk); in inet_csk_get_port()
102 int attempt_half = (sk->sk_reuse == SK_CAN_REUSE) ? 1 : 0; in inet_csk_get_port()
131 sk->sk_reuse && in inet_csk_get_port()
132 sk->sk_state != TCP_LISTEN) || in inet_csk_get_port()
134 sk->sk_reuseport && in inet_csk_get_port()
140 if (!inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, false)) { in inet_csk_get_port()
190 if (sk->sk_reuse == SK_FORCE_REUSE) in inet_csk_get_port()
194 sk->sk_reuse && sk->sk_state != TCP_LISTEN) || in inet_csk_get_port()
196 sk->sk_reuseport && uid_eq(tb->fastuid, uid))) && in inet_csk_get_port()
201 if (inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, true)) { in inet_csk_get_port()
202 if (((sk->sk_reuse && sk->sk_state != TCP_LISTEN) || in inet_csk_get_port()
204 sk->sk_reuseport && uid_eq(tb->fastuid, uid))) && in inet_csk_get_port()
220 if (sk->sk_reuse && sk->sk_state != TCP_LISTEN) in inet_csk_get_port()
224 if (sk->sk_reuseport) { in inet_csk_get_port()
231 (!sk->sk_reuse || sk->sk_state == TCP_LISTEN)) in inet_csk_get_port()
234 (!sk->sk_reuseport || !uid_eq(tb->fastuid, uid))) in inet_csk_get_port()
238 if (!inet_csk(sk)->icsk_bind_hash) in inet_csk_get_port()
239 inet_bind_hash(sk, tb, snum); in inet_csk_get_port()
240 WARN_ON(inet_csk(sk)->icsk_bind_hash != tb); in inet_csk_get_port()
255 static int inet_csk_wait_for_connect(struct sock *sk, long timeo) in inet_csk_wait_for_connect() argument
257 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_wait_for_connect()
276 prepare_to_wait_exclusive(sk_sleep(sk), &wait, in inet_csk_wait_for_connect()
278 release_sock(sk); in inet_csk_wait_for_connect()
282 lock_sock(sk); in inet_csk_wait_for_connect()
287 if (sk->sk_state != TCP_LISTEN) in inet_csk_wait_for_connect()
296 finish_wait(sk_sleep(sk), &wait); in inet_csk_wait_for_connect()
303 struct sock *inet_csk_accept(struct sock *sk, int flags, int *err) in inet_csk_accept() argument
305 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_accept()
311 lock_sock(sk); in inet_csk_accept()
317 if (sk->sk_state != TCP_LISTEN) in inet_csk_accept()
322 long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); in inet_csk_accept()
329 error = inet_csk_wait_for_connect(sk, timeo); in inet_csk_accept()
333 req = reqsk_queue_remove(queue, sk); in inet_csk_accept()
334 newsk = req->sk; in inet_csk_accept()
336 if (sk->sk_protocol == IPPROTO_TCP && in inet_csk_accept()
346 req->sk = NULL; in inet_csk_accept()
352 release_sock(sk); in inet_csk_accept()
369 void inet_csk_init_xmit_timers(struct sock *sk, in inet_csk_init_xmit_timers() argument
374 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_init_xmit_timers()
377 (unsigned long)sk); in inet_csk_init_xmit_timers()
379 (unsigned long)sk); in inet_csk_init_xmit_timers()
380 setup_timer(&sk->sk_timer, keepalive_handler, (unsigned long)sk); in inet_csk_init_xmit_timers()
385 void inet_csk_clear_xmit_timers(struct sock *sk) in inet_csk_clear_xmit_timers() argument
387 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_clear_xmit_timers()
391 sk_stop_timer(sk, &icsk->icsk_retransmit_timer); in inet_csk_clear_xmit_timers()
392 sk_stop_timer(sk, &icsk->icsk_delack_timer); in inet_csk_clear_xmit_timers()
393 sk_stop_timer(sk, &sk->sk_timer); in inet_csk_clear_xmit_timers()
397 void inet_csk_delete_keepalive_timer(struct sock *sk) in inet_csk_delete_keepalive_timer() argument
399 sk_stop_timer(sk, &sk->sk_timer); in inet_csk_delete_keepalive_timer()
403 void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long len) in inet_csk_reset_keepalive_timer() argument
405 sk_reset_timer(sk, &sk->sk_timer, jiffies + len); in inet_csk_reset_keepalive_timer()
409 struct dst_entry *inet_csk_route_req(const struct sock *sk, in inet_csk_route_req() argument
419 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, in inet_csk_route_req()
420 sk->sk_protocol, inet_sk_flowi_flags(sk), in inet_csk_route_req()
425 rt = ip_route_output_flow(net, fl4, sk); in inet_csk_route_req()
440 struct dst_entry *inet_csk_route_child_sock(const struct sock *sk, in inet_csk_route_child_sock() argument
456 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, in inet_csk_route_child_sock()
457 sk->sk_protocol, inet_sk_flowi_flags(sk), in inet_csk_route_child_sock()
462 rt = ip_route_output_flow(net, fl4, sk); in inet_csk_route_child_sock()
540 void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req) in inet_csk_reqsk_queue_drop() argument
542 if (reqsk_queue_unlink(&inet_csk(sk)->icsk_accept_queue, req)) { in inet_csk_reqsk_queue_drop()
543 reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req); in inet_csk_reqsk_queue_drop()
549 void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req) in inet_csk_reqsk_queue_drop_and_put() argument
551 inet_csk_reqsk_queue_drop(sk, req); in inet_csk_reqsk_queue_drop_and_put()
626 req->sk = NULL; in reqsk_queue_hash_req()
639 void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req, in inet_csk_reqsk_queue_hash_add() argument
643 inet_csk_reqsk_queue_added(sk); in inet_csk_reqsk_queue_hash_add()
655 struct sock *inet_csk_clone_lock(const struct sock *sk, in inet_csk_clone_lock() argument
659 struct sock *newsk = sk_clone_lock(sk, priority); in inet_csk_clone_lock()
695 void inet_csk_destroy_sock(struct sock *sk) in inet_csk_destroy_sock() argument
697 WARN_ON(sk->sk_state != TCP_CLOSE); in inet_csk_destroy_sock()
698 WARN_ON(!sock_flag(sk, SOCK_DEAD)); in inet_csk_destroy_sock()
701 WARN_ON(!sk_unhashed(sk)); in inet_csk_destroy_sock()
704 WARN_ON(inet_sk(sk)->inet_num && !inet_csk(sk)->icsk_bind_hash); in inet_csk_destroy_sock()
706 sk->sk_prot->destroy(sk); in inet_csk_destroy_sock()
708 sk_stream_kill_queues(sk); in inet_csk_destroy_sock()
710 xfrm_sk_free_policy(sk); in inet_csk_destroy_sock()
712 sk_refcnt_debug_release(sk); in inet_csk_destroy_sock()
714 percpu_counter_dec(sk->sk_prot->orphan_count); in inet_csk_destroy_sock()
715 sock_put(sk); in inet_csk_destroy_sock()
722 void inet_csk_prepare_forced_close(struct sock *sk) in inet_csk_prepare_forced_close() argument
723 __releases(&sk->sk_lock.slock) in inet_csk_prepare_forced_close()
726 bh_unlock_sock(sk); in inet_csk_prepare_forced_close()
727 sock_put(sk); in inet_csk_prepare_forced_close()
730 sock_set_flag(sk, SOCK_DEAD); in inet_csk_prepare_forced_close()
731 percpu_counter_inc(sk->sk_prot->orphan_count); in inet_csk_prepare_forced_close()
732 inet_sk(sk)->inet_num = 0; in inet_csk_prepare_forced_close()
736 int inet_csk_listen_start(struct sock *sk, int backlog) in inet_csk_listen_start() argument
738 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_listen_start()
739 struct inet_sock *inet = inet_sk(sk); in inet_csk_listen_start()
743 sk->sk_max_ack_backlog = backlog; in inet_csk_listen_start()
744 sk->sk_ack_backlog = 0; in inet_csk_listen_start()
745 inet_csk_delack_init(sk); in inet_csk_listen_start()
752 sk_state_store(sk, TCP_LISTEN); in inet_csk_listen_start()
753 if (!sk->sk_prot->get_port(sk, inet->inet_num)) { in inet_csk_listen_start()
756 sk_dst_reset(sk); in inet_csk_listen_start()
757 sk->sk_prot->hash(sk); in inet_csk_listen_start()
762 sk->sk_state = TCP_CLOSE; in inet_csk_listen_start()
767 static void inet_child_forget(struct sock *sk, struct request_sock *req, in inet_child_forget() argument
770 sk->sk_prot->disconnect(child, O_NONBLOCK); in inet_child_forget()
774 percpu_counter_inc(sk->sk_prot->orphan_count); in inet_child_forget()
776 if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->tfo_listener) { in inet_child_forget()
778 BUG_ON(sk != req->rsk_listener); in inet_child_forget()
792 struct sock *inet_csk_reqsk_queue_add(struct sock *sk, in inet_csk_reqsk_queue_add() argument
796 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; in inet_csk_reqsk_queue_add()
799 if (unlikely(sk->sk_state != TCP_LISTEN)) { in inet_csk_reqsk_queue_add()
800 inet_child_forget(sk, req, child); in inet_csk_reqsk_queue_add()
803 req->sk = child; in inet_csk_reqsk_queue_add()
810 sk_acceptq_added(sk); in inet_csk_reqsk_queue_add()
817 struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child, in inet_csk_complete_hashdance() argument
821 inet_csk_reqsk_queue_drop(sk, req); in inet_csk_complete_hashdance()
822 reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req); in inet_csk_complete_hashdance()
823 if (inet_csk_reqsk_queue_add(sk, req, child)) in inet_csk_complete_hashdance()
837 void inet_csk_listen_stop(struct sock *sk) in inet_csk_listen_stop() argument
839 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_listen_stop()
851 while ((req = reqsk_queue_remove(queue, sk)) != NULL) { in inet_csk_listen_stop()
852 struct sock *child = req->sk; in inet_csk_listen_stop()
859 inet_child_forget(sk, req, child); in inet_csk_listen_stop()
878 WARN_ON_ONCE(sk->sk_ack_backlog); in inet_csk_listen_stop()
882 void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr) in inet_csk_addr2sockaddr() argument
885 const struct inet_sock *inet = inet_sk(sk); in inet_csk_addr2sockaddr()
894 int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname, in inet_csk_compat_getsockopt() argument
897 const struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_compat_getsockopt()
900 return icsk->icsk_af_ops->compat_getsockopt(sk, level, optname, in inet_csk_compat_getsockopt()
902 return icsk->icsk_af_ops->getsockopt(sk, level, optname, in inet_csk_compat_getsockopt()
907 int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname, in inet_csk_compat_setsockopt() argument
910 const struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_compat_setsockopt()
913 return icsk->icsk_af_ops->compat_setsockopt(sk, level, optname, in inet_csk_compat_setsockopt()
915 return icsk->icsk_af_ops->setsockopt(sk, level, optname, in inet_csk_compat_setsockopt()
921 static struct dst_entry *inet_csk_rebuild_route(struct sock *sk, struct flowi *fl) in inet_csk_rebuild_route() argument
923 const struct inet_sock *inet = inet_sk(sk); in inet_csk_rebuild_route()
934 rt = ip_route_output_ports(sock_net(sk), fl4, sk, daddr, in inet_csk_rebuild_route()
936 inet->inet_sport, sk->sk_protocol, in inet_csk_rebuild_route()
937 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if); in inet_csk_rebuild_route()
941 sk_setup_caps(sk, &rt->dst); in inet_csk_rebuild_route()
947 struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu) in inet_csk_update_pmtu() argument
949 struct dst_entry *dst = __sk_dst_check(sk, 0); in inet_csk_update_pmtu()
950 struct inet_sock *inet = inet_sk(sk); in inet_csk_update_pmtu()
953 dst = inet_csk_rebuild_route(sk, &inet->cork.fl); in inet_csk_update_pmtu()
957 dst->ops->update_pmtu(dst, sk, NULL, mtu); in inet_csk_update_pmtu()
959 dst = __sk_dst_check(sk, 0); in inet_csk_update_pmtu()
961 dst = inet_csk_rebuild_route(sk, &inet->cork.fl); in inet_csk_update_pmtu()