Lines Matching refs:skb

73 static void	tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
74 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
77 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
92 static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) in inet6_sk_rx_dst_set() argument
94 struct dst_entry *dst = skb_dst(skb); in inet6_sk_rx_dst_set()
100 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; in inet6_sk_rx_dst_set()
106 static __u32 tcp_v6_init_sequence(const struct sk_buff *skb) in tcp_v6_init_sequence() argument
108 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32, in tcp_v6_init_sequence()
109 ipv6_hdr(skb)->saddr.s6_addr32, in tcp_v6_init_sequence()
110 tcp_hdr(skb)->dest, in tcp_v6_init_sequence()
111 tcp_hdr(skb)->source); in tcp_v6_init_sequence()
323 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, in tcp_v6_err() argument
326 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data; in tcp_v6_err()
327 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset); in tcp_v6_err()
328 struct net *net = dev_net(skb->dev); in tcp_v6_err()
339 skb->dev->ifindex); in tcp_v6_err()
342 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev), in tcp_v6_err()
362 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) { in tcp_v6_err()
383 dst->ops->redirect(dst, sk, skb); in tcp_v6_err()
450 struct sk_buff *skb; in tcp_v6_send_synack() local
457 skb = tcp_make_synack(sk, dst, req, foc); in tcp_v6_send_synack()
459 if (skb) { in tcp_v6_send_synack()
460 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr, in tcp_v6_send_synack()
467 skb_set_queue_mapping(skb, queue_mapping); in tcp_v6_send_synack()
469 err = ip6_xmit(sk, skb, fl6, rcu_dereference(np->opt), in tcp_v6_send_synack()
586 const struct sk_buff *skb) in tcp_v6_md5_hash_skb() argument
591 const struct tcphdr *th = tcp_hdr(skb); in tcp_v6_md5_hash_skb()
597 const struct ipv6hdr *ip6h = ipv6_hdr(skb); in tcp_v6_md5_hash_skb()
610 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len)) in tcp_v6_md5_hash_skb()
614 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2)) in tcp_v6_md5_hash_skb()
631 static bool tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb) in tcp_v6_inbound_md5_hash() argument
635 const struct ipv6hdr *ip6h = ipv6_hdr(skb); in tcp_v6_inbound_md5_hash()
636 const struct tcphdr *th = tcp_hdr(skb); in tcp_v6_inbound_md5_hash()
660 NULL, skb); in tcp_v6_inbound_md5_hash()
674 struct sk_buff *skb) in tcp_v6_init_req() argument
679 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr; in tcp_v6_init_req()
680 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr; in tcp_v6_init_req()
685 ireq->ir_iif = tcp_v6_iif(skb); in tcp_v6_init_req()
687 if (!TCP_SKB_CB(skb)->tcp_tw_isn && in tcp_v6_init_req()
688 (ipv6_opt_accepted(sk, skb, &TCP_SKB_CB(skb)->header.h6) || in tcp_v6_init_req()
692 atomic_inc(&skb->users); in tcp_v6_init_req()
693 ireq->pktopts = skb; in tcp_v6_init_req()
733 static void tcp_v6_send_response(struct sock *sk, struct sk_buff *skb, u32 seq, in tcp_v6_send_response() argument
738 const struct tcphdr *th = tcp_hdr(skb); in tcp_v6_send_response()
742 struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev); in tcp_v6_send_response()
790 &ipv6_hdr(skb)->saddr, in tcp_v6_send_response()
791 &ipv6_hdr(skb)->daddr, t1); in tcp_v6_send_response()
796 fl6.daddr = ipv6_hdr(skb)->saddr; in tcp_v6_send_response()
797 fl6.saddr = ipv6_hdr(skb)->daddr; in tcp_v6_send_response()
807 fl6.flowi6_oif = tcp_v6_iif(skb); in tcp_v6_send_response()
810 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark); in tcp_v6_send_response()
813 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6)); in tcp_v6_send_response()
832 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb) in tcp_v6_send_reset() argument
834 const struct tcphdr *th = tcp_hdr(skb); in tcp_v6_send_reset()
839 struct ipv6hdr *ipv6h = ipv6_hdr(skb); in tcp_v6_send_reset()
852 if (!sk && !ipv6_unicast_destination(skb)) in tcp_v6_send_reset()
865 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev), in tcp_v6_send_reset()
868 ntohs(th->source), tcp_v6_iif(skb)); in tcp_v6_send_reset()
877 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb); in tcp_v6_send_reset()
888 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len - in tcp_v6_send_reset()
892 tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0); in tcp_v6_send_reset()
903 static void tcp_v6_send_ack(struct sock *sk, struct sk_buff *skb, u32 seq, in tcp_v6_send_ack() argument
908 tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0, in tcp_v6_send_ack()
912 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb) in tcp_v6_timewait_ack() argument
917 tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, in tcp_v6_timewait_ack()
926 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb, in tcp_v6_reqsk_send_ack() argument
932 tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ? in tcp_v6_reqsk_send_ack()
936 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), in tcp_v6_reqsk_send_ack()
941 static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb) in tcp_v6_hnd_req() argument
943 const struct tcphdr *th = tcp_hdr(skb); in tcp_v6_hnd_req()
949 &ipv6_hdr(skb)->saddr, in tcp_v6_hnd_req()
950 &ipv6_hdr(skb)->daddr, tcp_v6_iif(skb)); in tcp_v6_hnd_req()
952 nsk = tcp_check_req(sk, skb, req, false); in tcp_v6_hnd_req()
958 &ipv6_hdr(skb)->saddr, th->source, in tcp_v6_hnd_req()
959 &ipv6_hdr(skb)->daddr, ntohs(th->dest), in tcp_v6_hnd_req()
960 tcp_v6_iif(skb)); in tcp_v6_hnd_req()
973 sk = cookie_v6_check(sk, skb); in tcp_v6_hnd_req()
978 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) in tcp_v6_conn_request() argument
980 if (skb->protocol == htons(ETH_P_IP)) in tcp_v6_conn_request()
981 return tcp_v4_conn_request(sk, skb); in tcp_v6_conn_request()
983 if (!ipv6_unicast_destination(skb)) in tcp_v6_conn_request()
987 &tcp_request_sock_ipv6_ops, sk, skb); in tcp_v6_conn_request()
994 static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, in tcp_v6_syn_recv_sock() argument
1010 if (skb->protocol == htons(ETH_P_IP)) { in tcp_v6_syn_recv_sock()
1015 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst); in tcp_v6_syn_recv_sock()
1041 newnp->mcast_oif = tcp_v6_iif(skb); in tcp_v6_syn_recv_sock()
1042 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit; in tcp_v6_syn_recv_sock()
1043 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb)); in tcp_v6_syn_recv_sock()
1045 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb)); in tcp_v6_syn_recv_sock()
1073 newsk = tcp_create_openreq_child(sk, req, skb); in tcp_v6_syn_recv_sock()
1085 inet6_sk_rx_dst_set(newsk, skb); in tcp_v6_syn_recv_sock()
1125 newnp->mcast_oif = tcp_v6_iif(skb); in tcp_v6_syn_recv_sock()
1126 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit; in tcp_v6_syn_recv_sock()
1127 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb)); in tcp_v6_syn_recv_sock()
1129 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb)); in tcp_v6_syn_recv_sock()
1201 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) in tcp_v6_do_rcv() argument
1215 if (skb->protocol == htons(ETH_P_IP)) in tcp_v6_do_rcv()
1216 return tcp_v4_do_rcv(sk, skb); in tcp_v6_do_rcv()
1218 if (sk_filter(sk, skb)) in tcp_v6_do_rcv()
1240 opt_skb = skb_clone(skb, sk_gfp_atomic(sk, GFP_ATOMIC)); in tcp_v6_do_rcv()
1245 sock_rps_save_rxhash(sk, skb); in tcp_v6_do_rcv()
1246 sk_mark_napi_id(sk, skb); in tcp_v6_do_rcv()
1248 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif || in tcp_v6_do_rcv()
1255 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len); in tcp_v6_do_rcv()
1261 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb)) in tcp_v6_do_rcv()
1265 struct sock *nsk = tcp_v6_hnd_req(sk, skb); in tcp_v6_do_rcv()
1275 sock_rps_save_rxhash(nsk, skb); in tcp_v6_do_rcv()
1276 sk_mark_napi_id(sk, skb); in tcp_v6_do_rcv()
1277 if (tcp_child_process(sk, nsk, skb)) in tcp_v6_do_rcv()
1284 sock_rps_save_rxhash(sk, skb); in tcp_v6_do_rcv()
1286 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) in tcp_v6_do_rcv()
1293 tcp_v6_send_reset(sk, skb); in tcp_v6_do_rcv()
1297 kfree_skb(skb); in tcp_v6_do_rcv()
1337 static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr, in tcp_v6_fill_cb() argument
1345 memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb), in tcp_v6_fill_cb()
1349 TCP_SKB_CB(skb)->seq = ntohl(th->seq); in tcp_v6_fill_cb()
1350 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin + in tcp_v6_fill_cb()
1351 skb->len - th->doff*4); in tcp_v6_fill_cb()
1352 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq); in tcp_v6_fill_cb()
1353 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th); in tcp_v6_fill_cb()
1354 TCP_SKB_CB(skb)->tcp_tw_isn = 0; in tcp_v6_fill_cb()
1355 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr); in tcp_v6_fill_cb()
1356 TCP_SKB_CB(skb)->sacked = 0; in tcp_v6_fill_cb()
1359 static void tcp_v6_restore_cb(struct sk_buff *skb) in tcp_v6_restore_cb() argument
1364 memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6, in tcp_v6_restore_cb()
1368 static int tcp_v6_rcv(struct sk_buff *skb) in tcp_v6_rcv() argument
1374 struct net *net = dev_net(skb->dev); in tcp_v6_rcv()
1376 if (skb->pkt_type != PACKET_HOST) in tcp_v6_rcv()
1384 if (!pskb_may_pull(skb, sizeof(struct tcphdr))) in tcp_v6_rcv()
1387 th = tcp_hdr(skb); in tcp_v6_rcv()
1391 if (!pskb_may_pull(skb, th->doff*4)) in tcp_v6_rcv()
1394 if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo)) in tcp_v6_rcv()
1397 th = tcp_hdr(skb); in tcp_v6_rcv()
1398 hdr = ipv6_hdr(skb); in tcp_v6_rcv()
1400 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest, in tcp_v6_rcv()
1401 inet6_iif(skb)); in tcp_v6_rcv()
1414 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) in tcp_v6_rcv()
1417 tcp_v6_fill_cb(skb, hdr, th); in tcp_v6_rcv()
1420 if (tcp_v6_inbound_md5_hash(sk, skb)) in tcp_v6_rcv()
1424 if (sk_filter(sk, skb)) in tcp_v6_rcv()
1428 skb->dev = NULL; in tcp_v6_rcv()
1433 if (!tcp_prequeue(sk, skb)) in tcp_v6_rcv()
1434 ret = tcp_v6_do_rcv(sk, skb); in tcp_v6_rcv()
1435 } else if (unlikely(sk_add_backlog(sk, skb, in tcp_v6_rcv()
1447 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) in tcp_v6_rcv()
1450 tcp_v6_fill_cb(skb, hdr, th); in tcp_v6_rcv()
1452 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) { in tcp_v6_rcv()
1458 tcp_v6_send_reset(NULL, skb); in tcp_v6_rcv()
1462 kfree_skb(skb); in tcp_v6_rcv()
1470 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { in tcp_v6_rcv()
1475 tcp_v6_fill_cb(skb, hdr, th); in tcp_v6_rcv()
1477 if (skb->len < (th->doff<<2)) { in tcp_v6_rcv()
1481 if (tcp_checksum_complete(skb)) { in tcp_v6_rcv()
1486 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) { in tcp_v6_rcv()
1491 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo, in tcp_v6_rcv()
1492 &ipv6_hdr(skb)->saddr, th->source, in tcp_v6_rcv()
1493 &ipv6_hdr(skb)->daddr, in tcp_v6_rcv()
1494 ntohs(th->dest), tcp_v6_iif(skb)); in tcp_v6_rcv()
1500 tcp_v6_restore_cb(skb); in tcp_v6_rcv()
1506 tcp_v6_timewait_ack(sk, skb); in tcp_v6_rcv()
1509 tcp_v6_restore_cb(skb); in tcp_v6_rcv()
1517 static void tcp_v6_early_demux(struct sk_buff *skb) in tcp_v6_early_demux() argument
1523 if (skb->pkt_type != PACKET_HOST) in tcp_v6_early_demux()
1526 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr))) in tcp_v6_early_demux()
1529 hdr = ipv6_hdr(skb); in tcp_v6_early_demux()
1530 th = tcp_hdr(skb); in tcp_v6_early_demux()
1536 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo, in tcp_v6_early_demux()
1539 inet6_iif(skb)); in tcp_v6_early_demux()
1541 skb->sk = sk; in tcp_v6_early_demux()
1542 skb->destructor = sock_edemux; in tcp_v6_early_demux()
1549 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif) in tcp_v6_early_demux()
1550 skb_dst_set_noref(skb, dst); in tcp_v6_early_demux()