This source file includes following definitions.
- tcp_in_window
- tcp_timewait_check_oow_rate_limit
- tcp_timewait_state_process
- tcp_time_wait
- tcp_twsk_destructor
- tcp_openreq_init_rwin
- tcp_ecn_openreq_child
- tcp_ca_openreq_child
- smc_check_reset_syn_req
- tcp_create_openreq_child
- tcp_check_req
- tcp_child_process
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22 #include <linux/mm.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/sysctl.h>
26 #include <linux/workqueue.h>
27 #include <linux/static_key.h>
28 #include <net/tcp.h>
29 #include <net/inet_common.h>
30 #include <net/xfrm.h>
31 #include <net/busy_poll.h>
32
33 static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
34 {
35 if (seq == s_win)
36 return true;
37 if (after(end_seq, s_win) && before(seq, e_win))
38 return true;
39 return seq == e_win && seq == end_seq;
40 }
41
42 static enum tcp_tw_status
43 tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw,
44 const struct sk_buff *skb, int mib_idx)
45 {
46 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
47
48 if (!tcp_oow_rate_limited(twsk_net(tw), skb, mib_idx,
49 &tcptw->tw_last_oow_ack_time)) {
50
51
52
53 return TCP_TW_ACK;
54 }
55
56
57 inet_twsk_put(tw);
58 return TCP_TW_SUCCESS;
59 }
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91 enum tcp_tw_status
92 tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
93 const struct tcphdr *th)
94 {
95 struct tcp_options_received tmp_opt;
96 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
97 bool paws_reject = false;
98
99 tmp_opt.saw_tstamp = 0;
100 if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
101 tcp_parse_options(twsk_net(tw), skb, &tmp_opt, 0, NULL);
102
103 if (tmp_opt.saw_tstamp) {
104 if (tmp_opt.rcv_tsecr)
105 tmp_opt.rcv_tsecr -= tcptw->tw_ts_offset;
106 tmp_opt.ts_recent = tcptw->tw_ts_recent;
107 tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
108 paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
109 }
110 }
111
112 if (tw->tw_substate == TCP_FIN_WAIT2) {
113
114
115
116 if (paws_reject ||
117 !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
118 tcptw->tw_rcv_nxt,
119 tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
120 return tcp_timewait_check_oow_rate_limit(
121 tw, skb, LINUX_MIB_TCPACKSKIPPEDFINWAIT2);
122
123 if (th->rst)
124 goto kill;
125
126 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt))
127 return TCP_TW_RST;
128
129
130 if (!th->ack ||
131 !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
132 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
133 inet_twsk_put(tw);
134 return TCP_TW_SUCCESS;
135 }
136
137
138
139
140 if (!th->fin ||
141 TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1)
142 return TCP_TW_RST;
143
144
145 tw->tw_substate = TCP_TIME_WAIT;
146 tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
147 if (tmp_opt.saw_tstamp) {
148 tcptw->tw_ts_recent_stamp = ktime_get_seconds();
149 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
150 }
151
152 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
153 return TCP_TW_ACK;
154 }
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173 if (!paws_reject &&
174 (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt &&
175 (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
176
177
178 if (th->rst) {
179
180
181
182
183 if (twsk_net(tw)->ipv4.sysctl_tcp_rfc1337 == 0) {
184 kill:
185 inet_twsk_deschedule_put(tw);
186 return TCP_TW_SUCCESS;
187 }
188 } else {
189 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
190 }
191
192 if (tmp_opt.saw_tstamp) {
193 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
194 tcptw->tw_ts_recent_stamp = ktime_get_seconds();
195 }
196
197 inet_twsk_put(tw);
198 return TCP_TW_SUCCESS;
199 }
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218 if (th->syn && !th->rst && !th->ack && !paws_reject &&
219 (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) ||
220 (tmp_opt.saw_tstamp &&
221 (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
222 u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
223 if (isn == 0)
224 isn++;
225 TCP_SKB_CB(skb)->tcp_tw_isn = isn;
226 return TCP_TW_SYN;
227 }
228
229 if (paws_reject)
230 __NET_INC_STATS(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED);
231
232 if (!th->rst) {
233
234
235
236
237
238
239 if (paws_reject || th->ack)
240 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
241
242 return tcp_timewait_check_oow_rate_limit(
243 tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT);
244 }
245 inet_twsk_put(tw);
246 return TCP_TW_SUCCESS;
247 }
248 EXPORT_SYMBOL(tcp_timewait_state_process);
249
250
251
252
253 void tcp_time_wait(struct sock *sk, int state, int timeo)
254 {
255 const struct inet_connection_sock *icsk = inet_csk(sk);
256 const struct tcp_sock *tp = tcp_sk(sk);
257 struct inet_timewait_sock *tw;
258 struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
259
260 tw = inet_twsk_alloc(sk, tcp_death_row, state);
261
262 if (tw) {
263 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
264 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
265 struct inet_sock *inet = inet_sk(sk);
266
267 tw->tw_transparent = inet->transparent;
268 tw->tw_mark = sk->sk_mark;
269 tw->tw_priority = sk->sk_priority;
270 tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale;
271 tcptw->tw_rcv_nxt = tp->rcv_nxt;
272 tcptw->tw_snd_nxt = tp->snd_nxt;
273 tcptw->tw_rcv_wnd = tcp_receive_window(tp);
274 tcptw->tw_ts_recent = tp->rx_opt.ts_recent;
275 tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
276 tcptw->tw_ts_offset = tp->tsoffset;
277 tcptw->tw_last_oow_ack_time = 0;
278 tcptw->tw_tx_delay = tp->tcp_tx_delay;
279 #if IS_ENABLED(CONFIG_IPV6)
280 if (tw->tw_family == PF_INET6) {
281 struct ipv6_pinfo *np = inet6_sk(sk);
282
283 tw->tw_v6_daddr = sk->sk_v6_daddr;
284 tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
285 tw->tw_tclass = np->tclass;
286 tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK);
287 tw->tw_txhash = sk->sk_txhash;
288 tw->tw_ipv6only = sk->sk_ipv6only;
289 }
290 #endif
291
292 #ifdef CONFIG_TCP_MD5SIG
293
294
295
296
297
298
299 do {
300 tcptw->tw_md5_key = NULL;
301 if (static_branch_unlikely(&tcp_md5_needed)) {
302 struct tcp_md5sig_key *key;
303
304 key = tp->af_specific->md5_lookup(sk, sk);
305 if (key) {
306 tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
307 BUG_ON(tcptw->tw_md5_key && !tcp_alloc_md5sig_pool());
308 }
309 }
310 } while (0);
311 #endif
312
313
314 if (timeo < rto)
315 timeo = rto;
316
317 if (state == TCP_TIME_WAIT)
318 timeo = TCP_TIMEWAIT_LEN;
319
320
321
322
323
324 local_bh_disable();
325 inet_twsk_schedule(tw, timeo);
326
327
328
329 inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
330 local_bh_enable();
331 } else {
332
333
334
335
336 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEWAITOVERFLOW);
337 }
338
339 tcp_update_metrics(sk);
340 tcp_done(sk);
341 }
342 EXPORT_SYMBOL(tcp_time_wait);
343
344 void tcp_twsk_destructor(struct sock *sk)
345 {
346 #ifdef CONFIG_TCP_MD5SIG
347 if (static_branch_unlikely(&tcp_md5_needed)) {
348 struct tcp_timewait_sock *twsk = tcp_twsk(sk);
349
350 if (twsk->tw_md5_key)
351 kfree_rcu(twsk->tw_md5_key, rcu);
352 }
353 #endif
354 }
355 EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
356
357
358
359
360 void tcp_openreq_init_rwin(struct request_sock *req,
361 const struct sock *sk_listener,
362 const struct dst_entry *dst)
363 {
364 struct inet_request_sock *ireq = inet_rsk(req);
365 const struct tcp_sock *tp = tcp_sk(sk_listener);
366 int full_space = tcp_full_space(sk_listener);
367 u32 window_clamp;
368 __u8 rcv_wscale;
369 u32 rcv_wnd;
370 int mss;
371
372 mss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
373 window_clamp = READ_ONCE(tp->window_clamp);
374
375 req->rsk_window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW);
376
377
378 if (sk_listener->sk_userlocks & SOCK_RCVBUF_LOCK &&
379 (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0))
380 req->rsk_window_clamp = full_space;
381
382 rcv_wnd = tcp_rwnd_init_bpf((struct sock *)req);
383 if (rcv_wnd == 0)
384 rcv_wnd = dst_metric(dst, RTAX_INITRWND);
385 else if (full_space < rcv_wnd * mss)
386 full_space = rcv_wnd * mss;
387
388
389 tcp_select_initial_window(sk_listener, full_space,
390 mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
391 &req->rsk_rcv_wnd,
392 &req->rsk_window_clamp,
393 ireq->wscale_ok,
394 &rcv_wscale,
395 rcv_wnd);
396 ireq->rcv_wscale = rcv_wscale;
397 }
398 EXPORT_SYMBOL(tcp_openreq_init_rwin);
399
400 static void tcp_ecn_openreq_child(struct tcp_sock *tp,
401 const struct request_sock *req)
402 {
403 tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0;
404 }
405
406 void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst)
407 {
408 struct inet_connection_sock *icsk = inet_csk(sk);
409 u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
410 bool ca_got_dst = false;
411
412 if (ca_key != TCP_CA_UNSPEC) {
413 const struct tcp_congestion_ops *ca;
414
415 rcu_read_lock();
416 ca = tcp_ca_find_key(ca_key);
417 if (likely(ca && try_module_get(ca->owner))) {
418 icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst);
419 icsk->icsk_ca_ops = ca;
420 ca_got_dst = true;
421 }
422 rcu_read_unlock();
423 }
424
425
426 if (!ca_got_dst &&
427 (!icsk->icsk_ca_setsockopt ||
428 !try_module_get(icsk->icsk_ca_ops->owner)))
429 tcp_assign_congestion_control(sk);
430
431 tcp_set_ca_state(sk, TCP_CA_Open);
432 }
433 EXPORT_SYMBOL_GPL(tcp_ca_openreq_child);
434
435 static void smc_check_reset_syn_req(struct tcp_sock *oldtp,
436 struct request_sock *req,
437 struct tcp_sock *newtp)
438 {
439 #if IS_ENABLED(CONFIG_SMC)
440 struct inet_request_sock *ireq;
441
442 if (static_branch_unlikely(&tcp_have_smc)) {
443 ireq = inet_rsk(req);
444 if (oldtp->syn_smc && !ireq->smc_ok)
445 newtp->syn_smc = 0;
446 }
447 #endif
448 }
449
450
451
452
453
454
455
456 struct sock *tcp_create_openreq_child(const struct sock *sk,
457 struct request_sock *req,
458 struct sk_buff *skb)
459 {
460 struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
461 const struct inet_request_sock *ireq = inet_rsk(req);
462 struct tcp_request_sock *treq = tcp_rsk(req);
463 struct inet_connection_sock *newicsk;
464 struct tcp_sock *oldtp, *newtp;
465 u32 seq;
466
467 if (!newsk)
468 return NULL;
469
470 newicsk = inet_csk(newsk);
471 newtp = tcp_sk(newsk);
472 oldtp = tcp_sk(sk);
473
474 smc_check_reset_syn_req(oldtp, req, newtp);
475
476
477 newtp->pred_flags = 0;
478
479 seq = treq->rcv_isn + 1;
480 newtp->rcv_wup = seq;
481 WRITE_ONCE(newtp->copied_seq, seq);
482 WRITE_ONCE(newtp->rcv_nxt, seq);
483 newtp->segs_in = 1;
484
485 seq = treq->snt_isn + 1;
486 newtp->snd_sml = newtp->snd_una = seq;
487 WRITE_ONCE(newtp->snd_nxt, seq);
488 newtp->snd_up = seq;
489
490 INIT_LIST_HEAD(&newtp->tsq_node);
491 INIT_LIST_HEAD(&newtp->tsorted_sent_queue);
492
493 tcp_init_wl(newtp, treq->rcv_isn);
494
495 minmax_reset(&newtp->rtt_min, tcp_jiffies32, ~0U);
496 newicsk->icsk_ack.lrcvtime = tcp_jiffies32;
497
498 newtp->lsndtime = tcp_jiffies32;
499 newsk->sk_txhash = treq->txhash;
500 newtp->total_retrans = req->num_retrans;
501
502 tcp_init_xmit_timers(newsk);
503 WRITE_ONCE(newtp->write_seq, newtp->pushed_seq = treq->snt_isn + 1);
504
505 if (sock_flag(newsk, SOCK_KEEPOPEN))
506 inet_csk_reset_keepalive_timer(newsk,
507 keepalive_time_when(newtp));
508
509 newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
510 newtp->rx_opt.sack_ok = ireq->sack_ok;
511 newtp->window_clamp = req->rsk_window_clamp;
512 newtp->rcv_ssthresh = req->rsk_rcv_wnd;
513 newtp->rcv_wnd = req->rsk_rcv_wnd;
514 newtp->rx_opt.wscale_ok = ireq->wscale_ok;
515 if (newtp->rx_opt.wscale_ok) {
516 newtp->rx_opt.snd_wscale = ireq->snd_wscale;
517 newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
518 } else {
519 newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
520 newtp->window_clamp = min(newtp->window_clamp, 65535U);
521 }
522 newtp->snd_wnd = ntohs(tcp_hdr(skb)->window) << newtp->rx_opt.snd_wscale;
523 newtp->max_window = newtp->snd_wnd;
524
525 if (newtp->rx_opt.tstamp_ok) {
526 newtp->rx_opt.ts_recent = req->ts_recent;
527 newtp->rx_opt.ts_recent_stamp = ktime_get_seconds();
528 newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
529 } else {
530 newtp->rx_opt.ts_recent_stamp = 0;
531 newtp->tcp_header_len = sizeof(struct tcphdr);
532 }
533 if (req->num_timeout) {
534 newtp->undo_marker = treq->snt_isn;
535 newtp->retrans_stamp = div_u64(treq->snt_synack,
536 USEC_PER_SEC / TCP_TS_HZ);
537 }
538 newtp->tsoffset = treq->ts_off;
539 #ifdef CONFIG_TCP_MD5SIG
540 newtp->md5sig_info = NULL;
541 if (newtp->af_specific->md5_lookup(sk, newsk))
542 newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
543 #endif
544 if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
545 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
546 newtp->rx_opt.mss_clamp = req->mss;
547 tcp_ecn_openreq_child(newtp, req);
548 newtp->fastopen_req = NULL;
549 RCU_INIT_POINTER(newtp->fastopen_rsk, NULL);
550
551 __TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
552
553 return newsk;
554 }
555 EXPORT_SYMBOL(tcp_create_openreq_child);
556
557
558
559
560
561
562
563
564
565
566
567
568 struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
569 struct request_sock *req,
570 bool fastopen, bool *req_stolen)
571 {
572 struct tcp_options_received tmp_opt;
573 struct sock *child;
574 const struct tcphdr *th = tcp_hdr(skb);
575 __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
576 bool paws_reject = false;
577 bool own_req;
578
579 tmp_opt.saw_tstamp = 0;
580 if (th->doff > (sizeof(struct tcphdr)>>2)) {
581 tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0, NULL);
582
583 if (tmp_opt.saw_tstamp) {
584 tmp_opt.ts_recent = req->ts_recent;
585 if (tmp_opt.rcv_tsecr)
586 tmp_opt.rcv_tsecr -= tcp_rsk(req)->ts_off;
587
588
589
590
591 tmp_opt.ts_recent_stamp = ktime_get_seconds() - ((TCP_TIMEOUT_INIT/HZ)<<req->num_timeout);
592 paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
593 }
594 }
595
596
597 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
598 flg == TCP_FLAG_SYN &&
599 !paws_reject) {
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623 if (!tcp_oow_rate_limited(sock_net(sk), skb,
624 LINUX_MIB_TCPACKSKIPPEDSYNRECV,
625 &tcp_rsk(req)->last_oow_ack_time) &&
626
627 !inet_rtx_syn_ack(sk, req)) {
628 unsigned long expires = jiffies;
629
630 expires += min(TCP_TIMEOUT_INIT << req->num_timeout,
631 TCP_RTO_MAX);
632 if (!fastopen)
633 mod_timer_pending(&req->rsk_timer, expires);
634 else
635 req->rsk_timer.expires = expires;
636 }
637 return NULL;
638 }
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697 if ((flg & TCP_FLAG_ACK) && !fastopen &&
698 (TCP_SKB_CB(skb)->ack_seq !=
699 tcp_rsk(req)->snt_isn + 1))
700 return sk;
701
702
703
704
705
706
707
708
709 if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
710 tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rsk_rcv_wnd)) {
711
712 if (!(flg & TCP_FLAG_RST) &&
713 !tcp_oow_rate_limited(sock_net(sk), skb,
714 LINUX_MIB_TCPACKSKIPPEDSYNRECV,
715 &tcp_rsk(req)->last_oow_ack_time))
716 req->rsk_ops->send_ack(sk, skb, req);
717 if (paws_reject)
718 __NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
719 return NULL;
720 }
721
722
723
724 if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
725 req->ts_recent = tmp_opt.rcv_tsval;
726
727 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
728
729
730 flg &= ~TCP_FLAG_SYN;
731 }
732
733
734
735
736 if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
737 __TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
738 goto embryonic_reset;
739 }
740
741
742
743
744
745
746
747 if (!(flg & TCP_FLAG_ACK))
748 return NULL;
749
750
751
752
753 if (fastopen)
754 return sk;
755
756
757 if (req->num_timeout < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
758 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
759 inet_rsk(req)->acked = 1;
760 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
761 return NULL;
762 }
763
764
765
766
767
768
769
770 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
771 req, &own_req);
772 if (!child)
773 goto listen_overflow;
774
775 sock_rps_save_rxhash(child, skb);
776 tcp_synack_rtt_meas(child, req);
777 *req_stolen = !own_req;
778 return inet_csk_complete_hashdance(sk, child, req, own_req);
779
780 listen_overflow:
781 if (!sock_net(sk)->ipv4.sysctl_tcp_abort_on_overflow) {
782 inet_rsk(req)->acked = 1;
783 return NULL;
784 }
785
786 embryonic_reset:
787 if (!(flg & TCP_FLAG_RST)) {
788
789
790
791
792
793 req->rsk_ops->send_reset(sk, skb);
794 } else if (fastopen) {
795 reqsk_fastopen_remove(sk, req, true);
796 tcp_reset(sk);
797 }
798 if (!fastopen) {
799 inet_csk_reqsk_queue_drop(sk, req);
800 __NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
801 }
802 return NULL;
803 }
804 EXPORT_SYMBOL(tcp_check_req);
805
806
807
808
809
810
811
812
813
814
815
816
817
818 int tcp_child_process(struct sock *parent, struct sock *child,
819 struct sk_buff *skb)
820 {
821 int ret = 0;
822 int state = child->sk_state;
823
824
825 sk_mark_napi_id(child, skb);
826
827 tcp_segs_in(tcp_sk(child), skb);
828 if (!sock_owned_by_user(child)) {
829 ret = tcp_rcv_state_process(child, skb);
830
831 if (state == TCP_SYN_RECV && child->sk_state != state)
832 parent->sk_data_ready(parent);
833 } else {
834
835
836
837
838 __sk_add_backlog(child, skb);
839 }
840
841 bh_unlock_sock(child);
842 sock_put(child);
843 return ret;
844 }
845 EXPORT_SYMBOL(tcp_child_process);