Lines Matching refs:sk
42 int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) in dccp_v4_connect() argument
45 struct inet_sock *inet = inet_sk(sk); in dccp_v4_connect()
46 struct dccp_sock *dp = dccp_sk(sk); in dccp_v4_connect()
65 sock_owned_by_user(sk)); in dccp_v4_connect()
76 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if, in dccp_v4_connect()
78 orig_sport, orig_dport, sk); in dccp_v4_connect()
92 sk_rcv_saddr_set(sk, inet->inet_saddr); in dccp_v4_connect()
94 sk_daddr_set(sk, daddr); in dccp_v4_connect()
96 inet_csk(sk)->icsk_ext_hdr_len = 0; in dccp_v4_connect()
98 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen; in dccp_v4_connect()
105 dccp_set_state(sk, DCCP_REQUESTING); in dccp_v4_connect()
106 err = inet_hash_connect(&dccp_death_row, sk); in dccp_v4_connect()
111 inet->inet_sport, inet->inet_dport, sk); in dccp_v4_connect()
118 sk_setup_caps(sk, &rt->dst); in dccp_v4_connect()
126 err = dccp_connect(sk); in dccp_v4_connect()
136 dccp_set_state(sk, DCCP_CLOSED); in dccp_v4_connect()
138 sk->sk_route_caps = 0; in dccp_v4_connect()
147 static inline void dccp_do_pmtu_discovery(struct sock *sk, in dccp_do_pmtu_discovery() argument
152 const struct inet_sock *inet = inet_sk(sk); in dccp_do_pmtu_discovery()
153 const struct dccp_sock *dp = dccp_sk(sk); in dccp_do_pmtu_discovery()
159 if (sk->sk_state == DCCP_LISTEN) in dccp_do_pmtu_discovery()
162 dst = inet_csk_update_pmtu(sk, mtu); in dccp_do_pmtu_discovery()
169 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst)) in dccp_do_pmtu_discovery()
170 sk->sk_err_soft = EMSGSIZE; in dccp_do_pmtu_discovery()
175 ip_sk_accept_pmtu(sk) && in dccp_do_pmtu_discovery()
176 inet_csk(sk)->icsk_pmtu_cookie > mtu) { in dccp_do_pmtu_discovery()
177 dccp_sync_mss(sk, mtu); in dccp_do_pmtu_discovery()
186 dccp_send_sync(sk, dp->dccps_gsr, DCCP_PKT_SYNC); in dccp_do_pmtu_discovery()
190 static void dccp_do_redirect(struct sk_buff *skb, struct sock *sk) in dccp_do_redirect() argument
192 struct dst_entry *dst = __sk_dst_check(sk, 0); in dccp_do_redirect()
195 dst->ops->redirect(dst, sk, skb); in dccp_do_redirect()
198 void dccp_req_err(struct sock *sk, u64 seq) in dccp_req_err() argument
200 struct request_sock *req = inet_reqsk(sk); in dccp_req_err()
201 struct net *net = sock_net(sk); in dccp_req_err()
243 struct sock *sk; in dccp_v4_err() local
254 sk = __inet_lookup_established(net, &dccp_hashinfo, in dccp_v4_err()
258 if (!sk) { in dccp_v4_err()
263 if (sk->sk_state == DCCP_TIME_WAIT) { in dccp_v4_err()
264 inet_twsk_put(inet_twsk(sk)); in dccp_v4_err()
268 if (sk->sk_state == DCCP_NEW_SYN_RECV) in dccp_v4_err()
269 return dccp_req_err(sk, seq); in dccp_v4_err()
271 bh_lock_sock(sk); in dccp_v4_err()
275 if (sock_owned_by_user(sk)) in dccp_v4_err()
278 if (sk->sk_state == DCCP_CLOSED) in dccp_v4_err()
281 dp = dccp_sk(sk); in dccp_v4_err()
282 if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) && in dccp_v4_err()
290 dccp_do_redirect(skb, sk); in dccp_v4_err()
303 if (!sock_owned_by_user(sk)) in dccp_v4_err()
304 dccp_do_pmtu_discovery(sk, iph, info); in dccp_v4_err()
317 switch (sk->sk_state) { in dccp_v4_err()
320 if (!sock_owned_by_user(sk)) { in dccp_v4_err()
322 sk->sk_err = err; in dccp_v4_err()
324 sk->sk_error_report(sk); in dccp_v4_err()
326 dccp_done(sk); in dccp_v4_err()
328 sk->sk_err_soft = err; in dccp_v4_err()
348 inet = inet_sk(sk); in dccp_v4_err()
349 if (!sock_owned_by_user(sk) && inet->recverr) { in dccp_v4_err()
350 sk->sk_err = err; in dccp_v4_err()
351 sk->sk_error_report(sk); in dccp_v4_err()
353 sk->sk_err_soft = err; in dccp_v4_err()
355 bh_unlock_sock(sk); in dccp_v4_err()
356 sock_put(sk); in dccp_v4_err()
365 void dccp_v4_send_check(struct sock *sk, struct sk_buff *skb) in dccp_v4_send_check() argument
367 const struct inet_sock *inet = inet_sk(sk); in dccp_v4_send_check()
391 struct sock *dccp_v4_request_recv_sock(const struct sock *sk, in dccp_v4_request_recv_sock() argument
402 if (sk_acceptq_is_full(sk)) in dccp_v4_request_recv_sock()
405 newsk = dccp_create_openreq_child(sk, req, skb); in dccp_v4_request_recv_sock()
420 if (dst == NULL && (dst = inet_csk_route_child_sock(sk, newsk, req)) == NULL) in dccp_v4_request_recv_sock()
427 if (__inet_inherit_port(sk, newsk) < 0) in dccp_v4_request_recv_sock()
434 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); in dccp_v4_request_recv_sock()
438 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); in dccp_v4_request_recv_sock()
447 static struct dst_entry* dccp_v4_route_skb(struct net *net, struct sock *sk, in dccp_v4_route_skb() argument
456 .flowi4_tos = RT_CONN_FLAGS(sk), in dccp_v4_route_skb()
457 .flowi4_proto = sk->sk_protocol, in dccp_v4_route_skb()
463 rt = ip_route_output_flow(net, &fl4, sk); in dccp_v4_route_skb()
472 static int dccp_v4_send_response(const struct sock *sk, struct request_sock *req) in dccp_v4_send_response() argument
479 dst = inet_csk_route_req(sk, &fl4, req); in dccp_v4_send_response()
483 skb = dccp_make_response(sk, dst, req); in dccp_v4_send_response()
490 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr, in dccp_v4_send_response()
501 static void dccp_v4_ctl_send_reset(const struct sock *sk, struct sk_buff *rxskb) in dccp_v4_ctl_send_reset() argument
564 int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb) in dccp_v4_conn_request() argument
576 if (dccp_bad_service_code(sk, service)) { in dccp_v4_conn_request()
586 if (inet_csk_reqsk_queue_is_full(sk)) in dccp_v4_conn_request()
595 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) in dccp_v4_conn_request()
598 req = inet_reqsk_alloc(&dccp_request_sock_ops, sk, true); in dccp_v4_conn_request()
602 if (dccp_reqsk_init(req, dccp_sk(sk), skb)) in dccp_v4_conn_request()
606 if (dccp_parse_options(sk, dreq, skb)) in dccp_v4_conn_request()
609 if (security_inet_conn_request(sk, skb, req)) in dccp_v4_conn_request()
616 ireq->ir_iif = sk->sk_bound_dev_if; in dccp_v4_conn_request()
631 if (dccp_v4_send_response(sk, req)) in dccp_v4_conn_request()
634 inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT); in dccp_v4_conn_request()
645 int dccp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) in dccp_v4_do_rcv() argument
649 if (sk->sk_state == DCCP_OPEN) { /* Fast path */ in dccp_v4_do_rcv()
650 if (dccp_rcv_established(sk, skb, dh, skb->len)) in dccp_v4_do_rcv()
679 if (dccp_rcv_state_process(sk, skb, dh, skb->len)) in dccp_v4_do_rcv()
684 dccp_v4_ctl_send_reset(sk, skb); in dccp_v4_do_rcv()
767 struct sock *sk; in dccp_v4_rcv() local
803 sk = __inet_lookup_skb(&dccp_hashinfo, skb, in dccp_v4_rcv()
805 if (!sk) { in dccp_v4_rcv()
817 if (sk->sk_state == DCCP_TIME_WAIT) { in dccp_v4_rcv()
819 inet_twsk_put(inet_twsk(sk)); in dccp_v4_rcv()
823 if (sk->sk_state == DCCP_NEW_SYN_RECV) { in dccp_v4_rcv()
824 struct request_sock *req = inet_reqsk(sk); in dccp_v4_rcv()
827 sk = req->rsk_listener; in dccp_v4_rcv()
828 if (unlikely(sk->sk_state != DCCP_LISTEN)) { in dccp_v4_rcv()
829 inet_csk_reqsk_queue_drop_and_put(sk, req); in dccp_v4_rcv()
832 sock_hold(sk); in dccp_v4_rcv()
833 nsk = dccp_check_req(sk, skb, req); in dccp_v4_rcv()
838 if (nsk == sk) { in dccp_v4_rcv()
840 } else if (dccp_child_process(sk, nsk, skb)) { in dccp_v4_rcv()
841 dccp_v4_ctl_send_reset(sk, skb); in dccp_v4_rcv()
844 sock_put(sk); in dccp_v4_rcv()
853 min_cov = dccp_sk(sk)->dccps_pcrlen; in dccp_v4_rcv()
863 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) in dccp_v4_rcv()
867 return sk_receive_skb(sk, skb, 1); in dccp_v4_rcv()
881 dccp_v4_ctl_send_reset(sk, skb); in dccp_v4_rcv()
889 sock_put(sk); in dccp_v4_rcv()
911 static int dccp_v4_init_sock(struct sock *sk) in dccp_v4_init_sock() argument
914 int err = dccp_init_sock(sk, dccp_v4_ctl_sock_initialized); in dccp_v4_init_sock()
919 inet_csk(sk)->icsk_af_ops = &dccp_ipv4_af_ops; in dccp_v4_init_sock()