Lines Matching refs:sk

73 static void	tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
74 static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
77 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
85 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk, in tcp_v6_md5_do_lookup() argument
92 static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) in inet6_sk_rx_dst_set() argument
99 sk->sk_rx_dst = dst; in inet6_sk_rx_dst_set()
100 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; in inet6_sk_rx_dst_set()
101 inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt); in inet6_sk_rx_dst_set()
113 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, in tcp_v6_connect() argument
117 struct inet_sock *inet = inet_sk(sk); in tcp_v6_connect()
118 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_v6_connect()
119 struct ipv6_pinfo *np = inet6_sk(sk); in tcp_v6_connect()
120 struct tcp_sock *tp = tcp_sk(sk); in tcp_v6_connect()
141 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); in tcp_v6_connect()
166 if (sk->sk_bound_dev_if && in tcp_v6_connect()
167 sk->sk_bound_dev_if != usin->sin6_scope_id) in tcp_v6_connect()
170 sk->sk_bound_dev_if = usin->sin6_scope_id; in tcp_v6_connect()
174 if (!sk->sk_bound_dev_if) in tcp_v6_connect()
179 !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) { in tcp_v6_connect()
185 sk->sk_v6_daddr = usin->sin6_addr; in tcp_v6_connect()
196 SOCK_DEBUG(sk, "connect: ipv4 mapped\n"); in tcp_v6_connect()
198 if (__ipv6_only_sock(sk)) in tcp_v6_connect()
206 sk->sk_backlog_rcv = tcp_v4_do_rcv; in tcp_v6_connect()
211 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin)); in tcp_v6_connect()
216 sk->sk_backlog_rcv = tcp_v6_do_rcv; in tcp_v6_connect()
222 np->saddr = sk->sk_v6_rcv_saddr; in tcp_v6_connect()
227 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr)) in tcp_v6_connect()
228 saddr = &sk->sk_v6_rcv_saddr; in tcp_v6_connect()
231 fl6.daddr = sk->sk_v6_daddr; in tcp_v6_connect()
233 fl6.flowi6_oif = sk->sk_bound_dev_if; in tcp_v6_connect()
234 fl6.flowi6_mark = sk->sk_mark; in tcp_v6_connect()
238 opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk)); in tcp_v6_connect()
241 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); in tcp_v6_connect()
243 dst = ip6_dst_lookup_flow(sk, &fl6, final_p); in tcp_v6_connect()
251 sk->sk_v6_rcv_saddr = *saddr; in tcp_v6_connect()
258 sk->sk_gso_type = SKB_GSO_TCPV6; in tcp_v6_connect()
259 ip6_dst_store(sk, dst, NULL, NULL); in tcp_v6_connect()
263 ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr)) in tcp_v6_connect()
264 tcp_fetch_timewait_stamp(sk, dst); in tcp_v6_connect()
275 tcp_set_state(sk, TCP_SYN_SENT); in tcp_v6_connect()
276 err = inet6_hash_connect(&tcp_death_row, sk); in tcp_v6_connect()
280 sk_set_txhash(sk); in tcp_v6_connect()
284 sk->sk_v6_daddr.s6_addr32, in tcp_v6_connect()
288 err = tcp_connect(sk); in tcp_v6_connect()
295 tcp_set_state(sk, TCP_CLOSE); in tcp_v6_connect()
296 __sk_dst_reset(sk); in tcp_v6_connect()
299 sk->sk_route_caps = 0; in tcp_v6_connect()
303 static void tcp_v6_mtu_reduced(struct sock *sk) in tcp_v6_mtu_reduced() argument
307 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) in tcp_v6_mtu_reduced()
310 dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info); in tcp_v6_mtu_reduced()
314 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) { in tcp_v6_mtu_reduced()
315 tcp_sync_mss(sk, dst_mtu(dst)); in tcp_v6_mtu_reduced()
316 tcp_simple_retransmit(sk); in tcp_v6_mtu_reduced()
330 struct sock *sk; in tcp_v6_err() local
334 sk = __inet6_lookup_established(net, &tcp_hashinfo, in tcp_v6_err()
339 if (!sk) { in tcp_v6_err()
345 if (sk->sk_state == TCP_TIME_WAIT) { in tcp_v6_err()
346 inet_twsk_put(inet_twsk(sk)); in tcp_v6_err()
351 if (sk->sk_state == TCP_NEW_SYN_RECV) in tcp_v6_err()
352 return tcp_req_err(sk, seq, fatal); in tcp_v6_err()
354 bh_lock_sock(sk); in tcp_v6_err()
355 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG) in tcp_v6_err()
358 if (sk->sk_state == TCP_CLOSE) in tcp_v6_err()
361 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) { in tcp_v6_err()
366 tp = tcp_sk(sk); in tcp_v6_err()
370 if (sk->sk_state != TCP_LISTEN && in tcp_v6_err()
376 np = inet6_sk(sk); in tcp_v6_err()
379 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie); in tcp_v6_err()
382 dst->ops->redirect(dst, sk, skb); in tcp_v6_err()
391 if (sk->sk_state == TCP_LISTEN) in tcp_v6_err()
394 if (!ip6_sk_accept_pmtu(sk)) in tcp_v6_err()
398 if (!sock_owned_by_user(sk)) in tcp_v6_err()
399 tcp_v6_mtu_reduced(sk); in tcp_v6_err()
402 sock_hold(sk); in tcp_v6_err()
408 switch (sk->sk_state) { in tcp_v6_err()
414 if (fastopen && !fastopen->sk) in tcp_v6_err()
417 if (!sock_owned_by_user(sk)) { in tcp_v6_err()
418 sk->sk_err = err; in tcp_v6_err()
419 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */ in tcp_v6_err()
421 tcp_done(sk); in tcp_v6_err()
423 sk->sk_err_soft = err; in tcp_v6_err()
427 if (!sock_owned_by_user(sk) && np->recverr) { in tcp_v6_err()
428 sk->sk_err = err; in tcp_v6_err()
429 sk->sk_error_report(sk); in tcp_v6_err()
431 sk->sk_err_soft = err; in tcp_v6_err()
434 bh_unlock_sock(sk); in tcp_v6_err()
435 sock_put(sk); in tcp_v6_err()
439 static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst, in tcp_v6_send_synack() argument
446 struct ipv6_pinfo *np = inet6_sk(sk); in tcp_v6_send_synack()
452 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req, in tcp_v6_send_synack()
456 skb = tcp_make_synack(sk, dst, req, foc, attach_req); in tcp_v6_send_synack()
467 err = ip6_xmit(sk, skb, fl6, rcu_dereference(np->opt), in tcp_v6_send_synack()
484 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk, in tcp_v6_md5_do_lookup() argument
487 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6); in tcp_v6_md5_do_lookup()
490 static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk, in tcp_v6_md5_lookup() argument
493 return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr); in tcp_v6_md5_lookup()
496 static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval, in tcp_v6_parse_md5_keys() argument
513 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3], in tcp_v6_parse_md5_keys()
515 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr, in tcp_v6_parse_md5_keys()
523 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3], in tcp_v6_parse_md5_keys()
526 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr, in tcp_v6_parse_md5_keys()
583 const struct sock *sk, in tcp_v6_md5_hash_skb() argument
591 if (sk) { /* valid for establish/request sockets */ in tcp_v6_md5_hash_skb()
592 saddr = &sk->sk_v6_rcv_saddr; in tcp_v6_md5_hash_skb()
593 daddr = &sk->sk_v6_daddr; in tcp_v6_md5_hash_skb()
631 static bool tcp_v6_inbound_md5_hash(const struct sock *sk, in tcp_v6_inbound_md5_hash() argument
642 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr); in tcp_v6_inbound_md5_hash()
650 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND); in tcp_v6_inbound_md5_hash()
655 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED); in tcp_v6_inbound_md5_hash()
700 static struct dst_entry *tcp_v6_route_req(const struct sock *sk, in tcp_v6_route_req() argument
707 return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP); in tcp_v6_route_req()
736 static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq, in tcp_v6_send_response() argument
745 struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev); in tcp_v6_send_response()
835 static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb) in tcp_v6_send_reset() argument
855 if (!sk && !ipv6_unicast_destination(skb)) in tcp_v6_send_reset()
860 if (!sk && hash_location) { in tcp_v6_send_reset()
884 key = sk ? tcp_v6_md5_do_lookup(sk, &ipv6h->saddr) : NULL; in tcp_v6_send_reset()
894 oif = sk ? sk->sk_bound_dev_if : 0; in tcp_v6_send_reset()
895 tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0); in tcp_v6_send_reset()
906 static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq, in tcp_v6_send_ack() argument
911 tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0, in tcp_v6_send_ack()
915 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb) in tcp_v6_timewait_ack() argument
917 struct inet_timewait_sock *tw = inet_twsk(sk); in tcp_v6_timewait_ack()
918 struct tcp_timewait_sock *tcptw = tcp_twsk(sk); in tcp_v6_timewait_ack()
920 tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, in tcp_v6_timewait_ack()
929 static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb, in tcp_v6_reqsk_send_ack() argument
935 tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ? in tcp_v6_reqsk_send_ack()
936 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt, in tcp_v6_reqsk_send_ack()
938 tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if, in tcp_v6_reqsk_send_ack()
939 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), in tcp_v6_reqsk_send_ack()
944 static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb) in tcp_v6_cookie_check() argument
950 sk = cookie_v6_check(sk, skb); in tcp_v6_cookie_check()
952 return sk; in tcp_v6_cookie_check()
955 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) in tcp_v6_conn_request() argument
958 return tcp_v4_conn_request(sk, skb); in tcp_v6_conn_request()
964 &tcp_request_sock_ipv6_ops, sk, skb); in tcp_v6_conn_request()
967 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); in tcp_v6_conn_request()
971 static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, in tcp_v6_syn_recv_sock() argument
979 const struct ipv6_pinfo *np = inet6_sk(sk); in tcp_v6_syn_recv_sock()
995 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst, in tcp_v6_syn_recv_sock()
1045 if (sk_acceptq_is_full(sk)) in tcp_v6_syn_recv_sock()
1049 dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP); in tcp_v6_syn_recv_sock()
1054 newsk = tcp_create_openreq_child(sk, req, skb); in tcp_v6_syn_recv_sock()
1121 if (tcp_sk(sk)->rx_opt.user_mss && in tcp_v6_syn_recv_sock()
1122 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss) in tcp_v6_syn_recv_sock()
1123 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss; in tcp_v6_syn_recv_sock()
1132 key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr); in tcp_v6_syn_recv_sock()
1141 sk_gfp_atomic(sk, GFP_ATOMIC)); in tcp_v6_syn_recv_sock()
1145 if (__inet_inherit_port(sk, newsk) < 0) { in tcp_v6_syn_recv_sock()
1157 sk_gfp_atomic(sk, GFP_ATOMIC)); in tcp_v6_syn_recv_sock()
1168 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); in tcp_v6_syn_recv_sock()
1172 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); in tcp_v6_syn_recv_sock()
1184 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) in tcp_v6_do_rcv() argument
1186 struct ipv6_pinfo *np = inet6_sk(sk); in tcp_v6_do_rcv()
1199 return tcp_v4_do_rcv(sk, skb); in tcp_v6_do_rcv()
1201 if (sk_filter(sk, skb)) in tcp_v6_do_rcv()
1223 opt_skb = skb_clone(skb, sk_gfp_atomic(sk, GFP_ATOMIC)); in tcp_v6_do_rcv()
1225 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ in tcp_v6_do_rcv()
1226 struct dst_entry *dst = sk->sk_rx_dst; in tcp_v6_do_rcv()
1228 sock_rps_save_rxhash(sk, skb); in tcp_v6_do_rcv()
1229 sk_mark_napi_id(sk, skb); in tcp_v6_do_rcv()
1231 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif || in tcp_v6_do_rcv()
1234 sk->sk_rx_dst = NULL; in tcp_v6_do_rcv()
1238 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len); in tcp_v6_do_rcv()
1247 if (sk->sk_state == TCP_LISTEN) { in tcp_v6_do_rcv()
1248 struct sock *nsk = tcp_v6_cookie_check(sk, skb); in tcp_v6_do_rcv()
1253 if (nsk != sk) { in tcp_v6_do_rcv()
1256 if (tcp_child_process(sk, nsk, skb)) in tcp_v6_do_rcv()
1263 sock_rps_save_rxhash(sk, skb); in tcp_v6_do_rcv()
1265 if (tcp_rcv_state_process(sk, skb)) in tcp_v6_do_rcv()
1272 tcp_v6_send_reset(sk, skb); in tcp_v6_do_rcv()
1279 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS); in tcp_v6_do_rcv()
1280 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); in tcp_v6_do_rcv()
1292 tp = tcp_sk(sk); in tcp_v6_do_rcv()
1294 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) { in tcp_v6_do_rcv()
1303 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) { in tcp_v6_do_rcv()
1304 skb_set_owner_r(opt_skb, sk); in tcp_v6_do_rcv()
1351 struct sock *sk; in tcp_v6_rcv() local
1380 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest, in tcp_v6_rcv()
1382 if (!sk) in tcp_v6_rcv()
1386 if (sk->sk_state == TCP_TIME_WAIT) in tcp_v6_rcv()
1389 if (sk->sk_state == TCP_NEW_SYN_RECV) { in tcp_v6_rcv()
1390 struct request_sock *req = inet_reqsk(sk); in tcp_v6_rcv()
1393 sk = req->rsk_listener; in tcp_v6_rcv()
1395 if (tcp_v6_inbound_md5_hash(sk, skb)) { in tcp_v6_rcv()
1399 if (unlikely(sk->sk_state != TCP_LISTEN)) { in tcp_v6_rcv()
1400 inet_csk_reqsk_queue_drop_and_put(sk, req); in tcp_v6_rcv()
1403 sock_hold(sk); in tcp_v6_rcv()
1404 nsk = tcp_check_req(sk, skb, req, false); in tcp_v6_rcv()
1409 if (nsk == sk) { in tcp_v6_rcv()
1412 } else if (tcp_child_process(sk, nsk, skb)) { in tcp_v6_rcv()
1416 sock_put(sk); in tcp_v6_rcv()
1420 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) { in tcp_v6_rcv()
1425 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) in tcp_v6_rcv()
1430 if (tcp_v6_inbound_md5_hash(sk, skb)) in tcp_v6_rcv()
1433 if (sk_filter(sk, skb)) in tcp_v6_rcv()
1438 if (sk->sk_state == TCP_LISTEN) { in tcp_v6_rcv()
1439 ret = tcp_v6_do_rcv(sk, skb); in tcp_v6_rcv()
1443 sk_incoming_cpu_update(sk); in tcp_v6_rcv()
1445 bh_lock_sock_nested(sk); in tcp_v6_rcv()
1446 tcp_sk(sk)->segs_in += max_t(u16, 1, skb_shinfo(skb)->gso_segs); in tcp_v6_rcv()
1448 if (!sock_owned_by_user(sk)) { in tcp_v6_rcv()
1449 if (!tcp_prequeue(sk, skb)) in tcp_v6_rcv()
1450 ret = tcp_v6_do_rcv(sk, skb); in tcp_v6_rcv()
1451 } else if (unlikely(sk_add_backlog(sk, skb, in tcp_v6_rcv()
1452 sk->sk_rcvbuf + sk->sk_sndbuf))) { in tcp_v6_rcv()
1453 bh_unlock_sock(sk); in tcp_v6_rcv()
1457 bh_unlock_sock(sk); in tcp_v6_rcv()
1460 sock_put(sk); in tcp_v6_rcv()
1483 sock_put(sk); in tcp_v6_rcv()
1488 inet_twsk_put(inet_twsk(sk)); in tcp_v6_rcv()
1495 inet_twsk_put(inet_twsk(sk)); in tcp_v6_rcv()
1499 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) { in tcp_v6_rcv()
1509 struct inet_timewait_sock *tw = inet_twsk(sk); in tcp_v6_rcv()
1511 sk = sk2; in tcp_v6_rcv()
1518 tcp_v6_timewait_ack(sk, skb); in tcp_v6_rcv()
1533 struct sock *sk; in tcp_v6_early_demux() local
1548 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo, in tcp_v6_early_demux()
1552 if (sk) { in tcp_v6_early_demux()
1553 skb->sk = sk; in tcp_v6_early_demux()
1555 if (sk_fullsock(sk)) { in tcp_v6_early_demux()
1556 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst); in tcp_v6_early_demux()
1559 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie); in tcp_v6_early_demux()
1561 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif) in tcp_v6_early_demux()
1636 static int tcp_v6_init_sock(struct sock *sk) in tcp_v6_init_sock() argument
1638 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_v6_init_sock()
1640 tcp_init_sock(sk); in tcp_v6_init_sock()
1645 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific; in tcp_v6_init_sock()
1651 static void tcp_v6_destroy_sock(struct sock *sk) in tcp_v6_destroy_sock() argument
1653 tcp_v4_destroy_sock(sk); in tcp_v6_destroy_sock()
1654 inet6_destroy_sock(sk); in tcp_v6_destroy_sock()
1790 struct sock *sk = v; in tcp6_seq_show() local
1803 if (sk->sk_state == TCP_TIME_WAIT) in tcp6_seq_show()
1805 else if (sk->sk_state == TCP_NEW_SYN_RECV) in tcp6_seq_show()
1841 static void tcp_v6_clear_sk(struct sock *sk, int size) in tcp_v6_clear_sk() argument
1843 struct inet_sock *inet = inet_sk(sk); in tcp_v6_clear_sk()
1846 sk_prot_clear_nulls(sk, offsetof(struct inet_sock, pinet6)); in tcp_v6_clear_sk()