This source file includes following definitions.
- ipv6_rcv_saddr_equal
- ipv4_rcv_saddr_equal
- inet_rcv_saddr_equal
- inet_rcv_saddr_any
- inet_get_local_port_range
- inet_csk_bind_conflict
- inet_csk_find_open_port
- sk_reuseport_match
- inet_csk_get_port
- inet_csk_wait_for_connect
- inet_csk_accept
- inet_csk_init_xmit_timers
- inet_csk_clear_xmit_timers
- inet_csk_delete_keepalive_timer
- inet_csk_reset_keepalive_timer
- inet_csk_route_req
- inet_csk_route_child_sock
- syn_ack_recalc
- inet_rtx_syn_ack
- reqsk_queue_unlink
- inet_csk_reqsk_queue_drop
- inet_csk_reqsk_queue_drop_and_put
- reqsk_timer_handler
- reqsk_queue_hash_req
- inet_csk_reqsk_queue_hash_add
- inet_csk_clone_lock
- inet_csk_destroy_sock
- inet_csk_prepare_forced_close
- inet_csk_listen_start
- inet_child_forget
- inet_csk_reqsk_queue_add
- inet_csk_complete_hashdance
- inet_csk_listen_stop
- inet_csk_addr2sockaddr
- inet_csk_compat_getsockopt
- inet_csk_compat_setsockopt
- inet_csk_rebuild_route
- inet_csk_update_pmtu
1
2
3
4
5
6
7
8
9
10
11
12 #include <linux/module.h>
13 #include <linux/jhash.h>
14
15 #include <net/inet_connection_sock.h>
16 #include <net/inet_hashtables.h>
17 #include <net/inet_timewait_sock.h>
18 #include <net/ip.h>
19 #include <net/route.h>
20 #include <net/tcp_states.h>
21 #include <net/xfrm.h>
22 #include <net/tcp.h>
23 #include <net/sock_reuseport.h>
24 #include <net/addrconf.h>
25
26 #if IS_ENABLED(CONFIG_IPV6)
27
28
29
30
31
32
33
34 static bool ipv6_rcv_saddr_equal(const struct in6_addr *sk1_rcv_saddr6,
35 const struct in6_addr *sk2_rcv_saddr6,
36 __be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr,
37 bool sk1_ipv6only, bool sk2_ipv6only,
38 bool match_sk1_wildcard,
39 bool match_sk2_wildcard)
40 {
41 int addr_type = ipv6_addr_type(sk1_rcv_saddr6);
42 int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED;
43
44
45 if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED) {
46 if (!sk2_ipv6only) {
47 if (sk1_rcv_saddr == sk2_rcv_saddr)
48 return true;
49 return (match_sk1_wildcard && !sk1_rcv_saddr) ||
50 (match_sk2_wildcard && !sk2_rcv_saddr);
51 }
52 return false;
53 }
54
55 if (addr_type == IPV6_ADDR_ANY && addr_type2 == IPV6_ADDR_ANY)
56 return true;
57
58 if (addr_type2 == IPV6_ADDR_ANY && match_sk2_wildcard &&
59 !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED))
60 return true;
61
62 if (addr_type == IPV6_ADDR_ANY && match_sk1_wildcard &&
63 !(sk1_ipv6only && addr_type2 == IPV6_ADDR_MAPPED))
64 return true;
65
66 if (sk2_rcv_saddr6 &&
67 ipv6_addr_equal(sk1_rcv_saddr6, sk2_rcv_saddr6))
68 return true;
69
70 return false;
71 }
72 #endif
73
74
75
76
77
78 static bool ipv4_rcv_saddr_equal(__be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr,
79 bool sk2_ipv6only, bool match_sk1_wildcard,
80 bool match_sk2_wildcard)
81 {
82 if (!sk2_ipv6only) {
83 if (sk1_rcv_saddr == sk2_rcv_saddr)
84 return true;
85 return (match_sk1_wildcard && !sk1_rcv_saddr) ||
86 (match_sk2_wildcard && !sk2_rcv_saddr);
87 }
88 return false;
89 }
90
91 bool inet_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2,
92 bool match_wildcard)
93 {
94 #if IS_ENABLED(CONFIG_IPV6)
95 if (sk->sk_family == AF_INET6)
96 return ipv6_rcv_saddr_equal(&sk->sk_v6_rcv_saddr,
97 inet6_rcv_saddr(sk2),
98 sk->sk_rcv_saddr,
99 sk2->sk_rcv_saddr,
100 ipv6_only_sock(sk),
101 ipv6_only_sock(sk2),
102 match_wildcard,
103 match_wildcard);
104 #endif
105 return ipv4_rcv_saddr_equal(sk->sk_rcv_saddr, sk2->sk_rcv_saddr,
106 ipv6_only_sock(sk2), match_wildcard,
107 match_wildcard);
108 }
109 EXPORT_SYMBOL(inet_rcv_saddr_equal);
110
111 bool inet_rcv_saddr_any(const struct sock *sk)
112 {
113 #if IS_ENABLED(CONFIG_IPV6)
114 if (sk->sk_family == AF_INET6)
115 return ipv6_addr_any(&sk->sk_v6_rcv_saddr);
116 #endif
117 return !sk->sk_rcv_saddr;
118 }
119
120 void inet_get_local_port_range(struct net *net, int *low, int *high)
121 {
122 unsigned int seq;
123
124 do {
125 seq = read_seqbegin(&net->ipv4.ip_local_ports.lock);
126
127 *low = net->ipv4.ip_local_ports.range[0];
128 *high = net->ipv4.ip_local_ports.range[1];
129 } while (read_seqretry(&net->ipv4.ip_local_ports.lock, seq));
130 }
131 EXPORT_SYMBOL(inet_get_local_port_range);
132
133 static int inet_csk_bind_conflict(const struct sock *sk,
134 const struct inet_bind_bucket *tb,
135 bool relax, bool reuseport_ok)
136 {
137 struct sock *sk2;
138 bool reuse = sk->sk_reuse;
139 bool reuseport = !!sk->sk_reuseport && reuseport_ok;
140 kuid_t uid = sock_i_uid((struct sock *)sk);
141
142
143
144
145
146
147
148
149 sk_for_each_bound(sk2, &tb->owners) {
150 if (sk != sk2 &&
151 (!sk->sk_bound_dev_if ||
152 !sk2->sk_bound_dev_if ||
153 sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
154 if ((!reuse || !sk2->sk_reuse ||
155 sk2->sk_state == TCP_LISTEN) &&
156 (!reuseport || !sk2->sk_reuseport ||
157 rcu_access_pointer(sk->sk_reuseport_cb) ||
158 (sk2->sk_state != TCP_TIME_WAIT &&
159 !uid_eq(uid, sock_i_uid(sk2))))) {
160 if (inet_rcv_saddr_equal(sk, sk2, true))
161 break;
162 }
163 if (!relax && reuse && sk2->sk_reuse &&
164 sk2->sk_state != TCP_LISTEN) {
165 if (inet_rcv_saddr_equal(sk, sk2, true))
166 break;
167 }
168 }
169 }
170 return sk2 != NULL;
171 }
172
173
174
175
176
177 static struct inet_bind_hashbucket *
178 inet_csk_find_open_port(struct sock *sk, struct inet_bind_bucket **tb_ret, int *port_ret)
179 {
180 struct inet_hashinfo *hinfo = sk->sk_prot->h.hashinfo;
181 int port = 0;
182 struct inet_bind_hashbucket *head;
183 struct net *net = sock_net(sk);
184 int i, low, high, attempt_half;
185 struct inet_bind_bucket *tb;
186 u32 remaining, offset;
187 int l3mdev;
188
189 l3mdev = inet_sk_bound_l3mdev(sk);
190 attempt_half = (sk->sk_reuse == SK_CAN_REUSE) ? 1 : 0;
191 other_half_scan:
192 inet_get_local_port_range(net, &low, &high);
193 high++;
194 if (high - low < 4)
195 attempt_half = 0;
196 if (attempt_half) {
197 int half = low + (((high - low) >> 2) << 1);
198
199 if (attempt_half == 1)
200 high = half;
201 else
202 low = half;
203 }
204 remaining = high - low;
205 if (likely(remaining > 1))
206 remaining &= ~1U;
207
208 offset = prandom_u32() % remaining;
209
210
211
212 offset |= 1U;
213
214 other_parity_scan:
215 port = low + offset;
216 for (i = 0; i < remaining; i += 2, port += 2) {
217 if (unlikely(port >= high))
218 port -= remaining;
219 if (inet_is_local_reserved_port(net, port))
220 continue;
221 head = &hinfo->bhash[inet_bhashfn(net, port,
222 hinfo->bhash_size)];
223 spin_lock_bh(&head->lock);
224 inet_bind_bucket_for_each(tb, &head->chain)
225 if (net_eq(ib_net(tb), net) && tb->l3mdev == l3mdev &&
226 tb->port == port) {
227 if (!inet_csk_bind_conflict(sk, tb, false, false))
228 goto success;
229 goto next_port;
230 }
231 tb = NULL;
232 goto success;
233 next_port:
234 spin_unlock_bh(&head->lock);
235 cond_resched();
236 }
237
238 offset--;
239 if (!(offset & 1))
240 goto other_parity_scan;
241
242 if (attempt_half == 1) {
243
244 attempt_half = 2;
245 goto other_half_scan;
246 }
247 return NULL;
248 success:
249 *port_ret = port;
250 *tb_ret = tb;
251 return head;
252 }
253
254 static inline int sk_reuseport_match(struct inet_bind_bucket *tb,
255 struct sock *sk)
256 {
257 kuid_t uid = sock_i_uid(sk);
258
259 if (tb->fastreuseport <= 0)
260 return 0;
261 if (!sk->sk_reuseport)
262 return 0;
263 if (rcu_access_pointer(sk->sk_reuseport_cb))
264 return 0;
265 if (!uid_eq(tb->fastuid, uid))
266 return 0;
267
268
269
270
271
272 if (tb->fastreuseport == FASTREUSEPORT_ANY)
273 return 1;
274 #if IS_ENABLED(CONFIG_IPV6)
275 if (tb->fast_sk_family == AF_INET6)
276 return ipv6_rcv_saddr_equal(&tb->fast_v6_rcv_saddr,
277 inet6_rcv_saddr(sk),
278 tb->fast_rcv_saddr,
279 sk->sk_rcv_saddr,
280 tb->fast_ipv6_only,
281 ipv6_only_sock(sk), true, false);
282 #endif
283 return ipv4_rcv_saddr_equal(tb->fast_rcv_saddr, sk->sk_rcv_saddr,
284 ipv6_only_sock(sk), true, false);
285 }
286
287
288
289
290
291 int inet_csk_get_port(struct sock *sk, unsigned short snum)
292 {
293 bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN;
294 struct inet_hashinfo *hinfo = sk->sk_prot->h.hashinfo;
295 int ret = 1, port = snum;
296 struct inet_bind_hashbucket *head;
297 struct net *net = sock_net(sk);
298 struct inet_bind_bucket *tb = NULL;
299 kuid_t uid = sock_i_uid(sk);
300 int l3mdev;
301
302 l3mdev = inet_sk_bound_l3mdev(sk);
303
304 if (!port) {
305 head = inet_csk_find_open_port(sk, &tb, &port);
306 if (!head)
307 return ret;
308 if (!tb)
309 goto tb_not_found;
310 goto success;
311 }
312 head = &hinfo->bhash[inet_bhashfn(net, port,
313 hinfo->bhash_size)];
314 spin_lock_bh(&head->lock);
315 inet_bind_bucket_for_each(tb, &head->chain)
316 if (net_eq(ib_net(tb), net) && tb->l3mdev == l3mdev &&
317 tb->port == port)
318 goto tb_found;
319 tb_not_found:
320 tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep,
321 net, head, port, l3mdev);
322 if (!tb)
323 goto fail_unlock;
324 tb_found:
325 if (!hlist_empty(&tb->owners)) {
326 if (sk->sk_reuse == SK_FORCE_REUSE)
327 goto success;
328
329 if ((tb->fastreuse > 0 && reuse) ||
330 sk_reuseport_match(tb, sk))
331 goto success;
332 if (inet_csk_bind_conflict(sk, tb, true, true))
333 goto fail_unlock;
334 }
335 success:
336 if (hlist_empty(&tb->owners)) {
337 tb->fastreuse = reuse;
338 if (sk->sk_reuseport) {
339 tb->fastreuseport = FASTREUSEPORT_ANY;
340 tb->fastuid = uid;
341 tb->fast_rcv_saddr = sk->sk_rcv_saddr;
342 tb->fast_ipv6_only = ipv6_only_sock(sk);
343 tb->fast_sk_family = sk->sk_family;
344 #if IS_ENABLED(CONFIG_IPV6)
345 tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
346 #endif
347 } else {
348 tb->fastreuseport = 0;
349 }
350 } else {
351 if (!reuse)
352 tb->fastreuse = 0;
353 if (sk->sk_reuseport) {
354
355
356
357
358
359
360
361
362
363
364
365 if (!sk_reuseport_match(tb, sk)) {
366 tb->fastreuseport = FASTREUSEPORT_STRICT;
367 tb->fastuid = uid;
368 tb->fast_rcv_saddr = sk->sk_rcv_saddr;
369 tb->fast_ipv6_only = ipv6_only_sock(sk);
370 tb->fast_sk_family = sk->sk_family;
371 #if IS_ENABLED(CONFIG_IPV6)
372 tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
373 #endif
374 }
375 } else {
376 tb->fastreuseport = 0;
377 }
378 }
379 if (!inet_csk(sk)->icsk_bind_hash)
380 inet_bind_hash(sk, tb, port);
381 WARN_ON(inet_csk(sk)->icsk_bind_hash != tb);
382 ret = 0;
383
384 fail_unlock:
385 spin_unlock_bh(&head->lock);
386 return ret;
387 }
388 EXPORT_SYMBOL_GPL(inet_csk_get_port);
389
390
391
392
393
394 static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
395 {
396 struct inet_connection_sock *icsk = inet_csk(sk);
397 DEFINE_WAIT(wait);
398 int err;
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414 for (;;) {
415 prepare_to_wait_exclusive(sk_sleep(sk), &wait,
416 TASK_INTERRUPTIBLE);
417 release_sock(sk);
418 if (reqsk_queue_empty(&icsk->icsk_accept_queue))
419 timeo = schedule_timeout(timeo);
420 sched_annotate_sleep();
421 lock_sock(sk);
422 err = 0;
423 if (!reqsk_queue_empty(&icsk->icsk_accept_queue))
424 break;
425 err = -EINVAL;
426 if (sk->sk_state != TCP_LISTEN)
427 break;
428 err = sock_intr_errno(timeo);
429 if (signal_pending(current))
430 break;
431 err = -EAGAIN;
432 if (!timeo)
433 break;
434 }
435 finish_wait(sk_sleep(sk), &wait);
436 return err;
437 }
438
439
440
441
442 struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern)
443 {
444 struct inet_connection_sock *icsk = inet_csk(sk);
445 struct request_sock_queue *queue = &icsk->icsk_accept_queue;
446 struct request_sock *req;
447 struct sock *newsk;
448 int error;
449
450 lock_sock(sk);
451
452
453
454
455 error = -EINVAL;
456 if (sk->sk_state != TCP_LISTEN)
457 goto out_err;
458
459
460 if (reqsk_queue_empty(queue)) {
461 long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
462
463
464 error = -EAGAIN;
465 if (!timeo)
466 goto out_err;
467
468 error = inet_csk_wait_for_connect(sk, timeo);
469 if (error)
470 goto out_err;
471 }
472 req = reqsk_queue_remove(queue, sk);
473 newsk = req->sk;
474
475 if (sk->sk_protocol == IPPROTO_TCP &&
476 tcp_rsk(req)->tfo_listener) {
477 spin_lock_bh(&queue->fastopenq.lock);
478 if (tcp_rsk(req)->tfo_listener) {
479
480
481
482
483
484
485 req->sk = NULL;
486 req = NULL;
487 }
488 spin_unlock_bh(&queue->fastopenq.lock);
489 }
490
491 out:
492 release_sock(sk);
493 if (newsk && mem_cgroup_sockets_enabled) {
494 int amt;
495
496
497
498
499 lock_sock(newsk);
500
501
502
503
504 amt = sk_mem_pages(newsk->sk_forward_alloc +
505 atomic_read(&newsk->sk_rmem_alloc));
506 mem_cgroup_sk_alloc(newsk);
507 if (newsk->sk_memcg && amt)
508 mem_cgroup_charge_skmem(newsk->sk_memcg, amt);
509
510 release_sock(newsk);
511 }
512 if (req)
513 reqsk_put(req);
514 return newsk;
515 out_err:
516 newsk = NULL;
517 req = NULL;
518 *err = error;
519 goto out;
520 }
521 EXPORT_SYMBOL(inet_csk_accept);
522
523
524
525
526
527
528 void inet_csk_init_xmit_timers(struct sock *sk,
529 void (*retransmit_handler)(struct timer_list *t),
530 void (*delack_handler)(struct timer_list *t),
531 void (*keepalive_handler)(struct timer_list *t))
532 {
533 struct inet_connection_sock *icsk = inet_csk(sk);
534
535 timer_setup(&icsk->icsk_retransmit_timer, retransmit_handler, 0);
536 timer_setup(&icsk->icsk_delack_timer, delack_handler, 0);
537 timer_setup(&sk->sk_timer, keepalive_handler, 0);
538 icsk->icsk_pending = icsk->icsk_ack.pending = 0;
539 }
540 EXPORT_SYMBOL(inet_csk_init_xmit_timers);
541
542 void inet_csk_clear_xmit_timers(struct sock *sk)
543 {
544 struct inet_connection_sock *icsk = inet_csk(sk);
545
546 icsk->icsk_pending = icsk->icsk_ack.pending = icsk->icsk_ack.blocked = 0;
547
548 sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
549 sk_stop_timer(sk, &icsk->icsk_delack_timer);
550 sk_stop_timer(sk, &sk->sk_timer);
551 }
552 EXPORT_SYMBOL(inet_csk_clear_xmit_timers);
553
554 void inet_csk_delete_keepalive_timer(struct sock *sk)
555 {
556 sk_stop_timer(sk, &sk->sk_timer);
557 }
558 EXPORT_SYMBOL(inet_csk_delete_keepalive_timer);
559
560 void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long len)
561 {
562 sk_reset_timer(sk, &sk->sk_timer, jiffies + len);
563 }
564 EXPORT_SYMBOL(inet_csk_reset_keepalive_timer);
565
566 struct dst_entry *inet_csk_route_req(const struct sock *sk,
567 struct flowi4 *fl4,
568 const struct request_sock *req)
569 {
570 const struct inet_request_sock *ireq = inet_rsk(req);
571 struct net *net = read_pnet(&ireq->ireq_net);
572 struct ip_options_rcu *opt;
573 struct rtable *rt;
574
575 rcu_read_lock();
576 opt = rcu_dereference(ireq->ireq_opt);
577
578 flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
579 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
580 sk->sk_protocol, inet_sk_flowi_flags(sk),
581 (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
582 ireq->ir_loc_addr, ireq->ir_rmt_port,
583 htons(ireq->ir_num), sk->sk_uid);
584 security_req_classify_flow(req, flowi4_to_flowi(fl4));
585 rt = ip_route_output_flow(net, fl4, sk);
586 if (IS_ERR(rt))
587 goto no_route;
588 if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
589 goto route_err;
590 rcu_read_unlock();
591 return &rt->dst;
592
593 route_err:
594 ip_rt_put(rt);
595 no_route:
596 rcu_read_unlock();
597 __IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
598 return NULL;
599 }
600 EXPORT_SYMBOL_GPL(inet_csk_route_req);
601
602 struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
603 struct sock *newsk,
604 const struct request_sock *req)
605 {
606 const struct inet_request_sock *ireq = inet_rsk(req);
607 struct net *net = read_pnet(&ireq->ireq_net);
608 struct inet_sock *newinet = inet_sk(newsk);
609 struct ip_options_rcu *opt;
610 struct flowi4 *fl4;
611 struct rtable *rt;
612
613 opt = rcu_dereference(ireq->ireq_opt);
614 fl4 = &newinet->cork.fl.u.ip4;
615
616 flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
617 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
618 sk->sk_protocol, inet_sk_flowi_flags(sk),
619 (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
620 ireq->ir_loc_addr, ireq->ir_rmt_port,
621 htons(ireq->ir_num), sk->sk_uid);
622 security_req_classify_flow(req, flowi4_to_flowi(fl4));
623 rt = ip_route_output_flow(net, fl4, sk);
624 if (IS_ERR(rt))
625 goto no_route;
626 if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
627 goto route_err;
628 return &rt->dst;
629
630 route_err:
631 ip_rt_put(rt);
632 no_route:
633 __IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
634 return NULL;
635 }
636 EXPORT_SYMBOL_GPL(inet_csk_route_child_sock);
637
638 #if IS_ENABLED(CONFIG_IPV6)
639 #define AF_INET_FAMILY(fam) ((fam) == AF_INET)
640 #else
641 #define AF_INET_FAMILY(fam) true
642 #endif
643
644
645 static inline void syn_ack_recalc(struct request_sock *req, const int thresh,
646 const int max_retries,
647 const u8 rskq_defer_accept,
648 int *expire, int *resend)
649 {
650 if (!rskq_defer_accept) {
651 *expire = req->num_timeout >= thresh;
652 *resend = 1;
653 return;
654 }
655 *expire = req->num_timeout >= thresh &&
656 (!inet_rsk(req)->acked || req->num_timeout >= max_retries);
657
658
659
660
661
662 *resend = !inet_rsk(req)->acked ||
663 req->num_timeout >= rskq_defer_accept - 1;
664 }
665
666 int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req)
667 {
668 int err = req->rsk_ops->rtx_syn_ack(parent, req);
669
670 if (!err)
671 req->num_retrans++;
672 return err;
673 }
674 EXPORT_SYMBOL(inet_rtx_syn_ack);
675
676
677 static bool reqsk_queue_unlink(struct request_sock *req)
678 {
679 struct inet_hashinfo *hashinfo = req_to_sk(req)->sk_prot->h.hashinfo;
680 bool found = false;
681
682 if (sk_hashed(req_to_sk(req))) {
683 spinlock_t *lock = inet_ehash_lockp(hashinfo, req->rsk_hash);
684
685 spin_lock(lock);
686 found = __sk_nulls_del_node_init_rcu(req_to_sk(req));
687 spin_unlock(lock);
688 }
689 if (timer_pending(&req->rsk_timer) && del_timer_sync(&req->rsk_timer))
690 reqsk_put(req);
691 return found;
692 }
693
694 void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req)
695 {
696 if (reqsk_queue_unlink(req)) {
697 reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
698 reqsk_put(req);
699 }
700 }
701 EXPORT_SYMBOL(inet_csk_reqsk_queue_drop);
702
703 void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req)
704 {
705 inet_csk_reqsk_queue_drop(sk, req);
706 reqsk_put(req);
707 }
708 EXPORT_SYMBOL(inet_csk_reqsk_queue_drop_and_put);
709
710 static void reqsk_timer_handler(struct timer_list *t)
711 {
712 struct request_sock *req = from_timer(req, t, rsk_timer);
713 struct sock *sk_listener = req->rsk_listener;
714 struct net *net = sock_net(sk_listener);
715 struct inet_connection_sock *icsk = inet_csk(sk_listener);
716 struct request_sock_queue *queue = &icsk->icsk_accept_queue;
717 int qlen, expire = 0, resend = 0;
718 int max_retries, thresh;
719 u8 defer_accept;
720
721 if (inet_sk_state_load(sk_listener) != TCP_LISTEN)
722 goto drop;
723
724 max_retries = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_synack_retries;
725 thresh = max_retries;
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743 qlen = reqsk_queue_len(queue);
744 if ((qlen << 1) > max(8U, sk_listener->sk_max_ack_backlog)) {
745 int young = reqsk_queue_len_young(queue) << 1;
746
747 while (thresh > 2) {
748 if (qlen < young)
749 break;
750 thresh--;
751 young <<= 1;
752 }
753 }
754 defer_accept = READ_ONCE(queue->rskq_defer_accept);
755 if (defer_accept)
756 max_retries = defer_accept;
757 syn_ack_recalc(req, thresh, max_retries, defer_accept,
758 &expire, &resend);
759 req->rsk_ops->syn_ack_timeout(req);
760 if (!expire &&
761 (!resend ||
762 !inet_rtx_syn_ack(sk_listener, req) ||
763 inet_rsk(req)->acked)) {
764 unsigned long timeo;
765
766 if (req->num_timeout++ == 0)
767 atomic_dec(&queue->young);
768 timeo = min(TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX);
769 mod_timer(&req->rsk_timer, jiffies + timeo);
770 return;
771 }
772 drop:
773 inet_csk_reqsk_queue_drop_and_put(sk_listener, req);
774 }
775
776 static void reqsk_queue_hash_req(struct request_sock *req,
777 unsigned long timeout)
778 {
779 timer_setup(&req->rsk_timer, reqsk_timer_handler, TIMER_PINNED);
780 mod_timer(&req->rsk_timer, jiffies + timeout);
781
782 inet_ehash_insert(req_to_sk(req), NULL);
783
784
785
786 smp_wmb();
787 refcount_set(&req->rsk_refcnt, 2 + 1);
788 }
789
790 void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
791 unsigned long timeout)
792 {
793 reqsk_queue_hash_req(req, timeout);
794 inet_csk_reqsk_queue_added(sk);
795 }
796 EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add);
797
798
799
800
801
802
803
804
805
806 struct sock *inet_csk_clone_lock(const struct sock *sk,
807 const struct request_sock *req,
808 const gfp_t priority)
809 {
810 struct sock *newsk = sk_clone_lock(sk, priority);
811
812 if (newsk) {
813 struct inet_connection_sock *newicsk = inet_csk(newsk);
814
815 inet_sk_set_state(newsk, TCP_SYN_RECV);
816 newicsk->icsk_bind_hash = NULL;
817
818 inet_sk(newsk)->inet_dport = inet_rsk(req)->ir_rmt_port;
819 inet_sk(newsk)->inet_num = inet_rsk(req)->ir_num;
820 inet_sk(newsk)->inet_sport = htons(inet_rsk(req)->ir_num);
821
822
823 sock_reset_flag(newsk, SOCK_RCU_FREE);
824
825 inet_sk(newsk)->mc_list = NULL;
826
827 newsk->sk_mark = inet_rsk(req)->ir_mark;
828 atomic64_set(&newsk->sk_cookie,
829 atomic64_read(&inet_rsk(req)->ir_cookie));
830
831 newicsk->icsk_retransmits = 0;
832 newicsk->icsk_backoff = 0;
833 newicsk->icsk_probes_out = 0;
834
835
836 memset(&newicsk->icsk_accept_queue, 0, sizeof(newicsk->icsk_accept_queue));
837
838 security_inet_csk_clone(newsk, req);
839 }
840 return newsk;
841 }
842 EXPORT_SYMBOL_GPL(inet_csk_clone_lock);
843
844
845
846
847
848
849
850 void inet_csk_destroy_sock(struct sock *sk)
851 {
852 WARN_ON(sk->sk_state != TCP_CLOSE);
853 WARN_ON(!sock_flag(sk, SOCK_DEAD));
854
855
856 WARN_ON(!sk_unhashed(sk));
857
858
859 WARN_ON(inet_sk(sk)->inet_num && !inet_csk(sk)->icsk_bind_hash);
860
861 sk->sk_prot->destroy(sk);
862
863 sk_stream_kill_queues(sk);
864
865 xfrm_sk_free_policy(sk);
866
867 sk_refcnt_debug_release(sk);
868
869 percpu_counter_dec(sk->sk_prot->orphan_count);
870
871 sock_put(sk);
872 }
873 EXPORT_SYMBOL(inet_csk_destroy_sock);
874
875
876
877
878 void inet_csk_prepare_forced_close(struct sock *sk)
879 __releases(&sk->sk_lock.slock)
880 {
881
882 bh_unlock_sock(sk);
883 sock_put(sk);
884
885
886 sock_set_flag(sk, SOCK_DEAD);
887 percpu_counter_inc(sk->sk_prot->orphan_count);
888 inet_sk(sk)->inet_num = 0;
889 }
890 EXPORT_SYMBOL(inet_csk_prepare_forced_close);
891
892 int inet_csk_listen_start(struct sock *sk, int backlog)
893 {
894 struct inet_connection_sock *icsk = inet_csk(sk);
895 struct inet_sock *inet = inet_sk(sk);
896 int err = -EADDRINUSE;
897
898 reqsk_queue_alloc(&icsk->icsk_accept_queue);
899
900 sk->sk_ack_backlog = 0;
901 inet_csk_delack_init(sk);
902
903
904
905
906
907
908 inet_sk_state_store(sk, TCP_LISTEN);
909 if (!sk->sk_prot->get_port(sk, inet->inet_num)) {
910 inet->inet_sport = htons(inet->inet_num);
911
912 sk_dst_reset(sk);
913 err = sk->sk_prot->hash(sk);
914
915 if (likely(!err))
916 return 0;
917 }
918
919 inet_sk_set_state(sk, TCP_CLOSE);
920 return err;
921 }
922 EXPORT_SYMBOL_GPL(inet_csk_listen_start);
923
924 static void inet_child_forget(struct sock *sk, struct request_sock *req,
925 struct sock *child)
926 {
927 sk->sk_prot->disconnect(child, O_NONBLOCK);
928
929 sock_orphan(child);
930
931 percpu_counter_inc(sk->sk_prot->orphan_count);
932
933 if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->tfo_listener) {
934 BUG_ON(rcu_access_pointer(tcp_sk(child)->fastopen_rsk) != req);
935 BUG_ON(sk != req->rsk_listener);
936
937
938
939
940
941
942
943 RCU_INIT_POINTER(tcp_sk(child)->fastopen_rsk, NULL);
944 }
945 inet_csk_destroy_sock(child);
946 }
947
948 struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
949 struct request_sock *req,
950 struct sock *child)
951 {
952 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
953
954 spin_lock(&queue->rskq_lock);
955 if (unlikely(sk->sk_state != TCP_LISTEN)) {
956 inet_child_forget(sk, req, child);
957 child = NULL;
958 } else {
959 req->sk = child;
960 req->dl_next = NULL;
961 if (queue->rskq_accept_head == NULL)
962 WRITE_ONCE(queue->rskq_accept_head, req);
963 else
964 queue->rskq_accept_tail->dl_next = req;
965 queue->rskq_accept_tail = req;
966 sk_acceptq_added(sk);
967 }
968 spin_unlock(&queue->rskq_lock);
969 return child;
970 }
971 EXPORT_SYMBOL(inet_csk_reqsk_queue_add);
972
973 struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
974 struct request_sock *req, bool own_req)
975 {
976 if (own_req) {
977 inet_csk_reqsk_queue_drop(sk, req);
978 reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
979 if (inet_csk_reqsk_queue_add(sk, req, child))
980 return child;
981 }
982
983 bh_unlock_sock(child);
984 sock_put(child);
985 return NULL;
986 }
987 EXPORT_SYMBOL(inet_csk_complete_hashdance);
988
989
990
991
992
993 void inet_csk_listen_stop(struct sock *sk)
994 {
995 struct inet_connection_sock *icsk = inet_csk(sk);
996 struct request_sock_queue *queue = &icsk->icsk_accept_queue;
997 struct request_sock *next, *req;
998
999
1000
1001
1002
1003
1004
1005
1006
1007 while ((req = reqsk_queue_remove(queue, sk)) != NULL) {
1008 struct sock *child = req->sk;
1009
1010 local_bh_disable();
1011 bh_lock_sock(child);
1012 WARN_ON(sock_owned_by_user(child));
1013 sock_hold(child);
1014
1015 inet_child_forget(sk, req, child);
1016 reqsk_put(req);
1017 bh_unlock_sock(child);
1018 local_bh_enable();
1019 sock_put(child);
1020
1021 cond_resched();
1022 }
1023 if (queue->fastopenq.rskq_rst_head) {
1024
1025 spin_lock_bh(&queue->fastopenq.lock);
1026 req = queue->fastopenq.rskq_rst_head;
1027 queue->fastopenq.rskq_rst_head = NULL;
1028 spin_unlock_bh(&queue->fastopenq.lock);
1029 while (req != NULL) {
1030 next = req->dl_next;
1031 reqsk_put(req);
1032 req = next;
1033 }
1034 }
1035 WARN_ON_ONCE(sk->sk_ack_backlog);
1036 }
1037 EXPORT_SYMBOL_GPL(inet_csk_listen_stop);
1038
1039 void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr)
1040 {
1041 struct sockaddr_in *sin = (struct sockaddr_in *)uaddr;
1042 const struct inet_sock *inet = inet_sk(sk);
1043
1044 sin->sin_family = AF_INET;
1045 sin->sin_addr.s_addr = inet->inet_daddr;
1046 sin->sin_port = inet->inet_dport;
1047 }
1048 EXPORT_SYMBOL_GPL(inet_csk_addr2sockaddr);
1049
1050 #ifdef CONFIG_COMPAT
1051 int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname,
1052 char __user *optval, int __user *optlen)
1053 {
1054 const struct inet_connection_sock *icsk = inet_csk(sk);
1055
1056 if (icsk->icsk_af_ops->compat_getsockopt)
1057 return icsk->icsk_af_ops->compat_getsockopt(sk, level, optname,
1058 optval, optlen);
1059 return icsk->icsk_af_ops->getsockopt(sk, level, optname,
1060 optval, optlen);
1061 }
1062 EXPORT_SYMBOL_GPL(inet_csk_compat_getsockopt);
1063
1064 int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
1065 char __user *optval, unsigned int optlen)
1066 {
1067 const struct inet_connection_sock *icsk = inet_csk(sk);
1068
1069 if (icsk->icsk_af_ops->compat_setsockopt)
1070 return icsk->icsk_af_ops->compat_setsockopt(sk, level, optname,
1071 optval, optlen);
1072 return icsk->icsk_af_ops->setsockopt(sk, level, optname,
1073 optval, optlen);
1074 }
1075 EXPORT_SYMBOL_GPL(inet_csk_compat_setsockopt);
1076 #endif
1077
1078 static struct dst_entry *inet_csk_rebuild_route(struct sock *sk, struct flowi *fl)
1079 {
1080 const struct inet_sock *inet = inet_sk(sk);
1081 const struct ip_options_rcu *inet_opt;
1082 __be32 daddr = inet->inet_daddr;
1083 struct flowi4 *fl4;
1084 struct rtable *rt;
1085
1086 rcu_read_lock();
1087 inet_opt = rcu_dereference(inet->inet_opt);
1088 if (inet_opt && inet_opt->opt.srr)
1089 daddr = inet_opt->opt.faddr;
1090 fl4 = &fl->u.ip4;
1091 rt = ip_route_output_ports(sock_net(sk), fl4, sk, daddr,
1092 inet->inet_saddr, inet->inet_dport,
1093 inet->inet_sport, sk->sk_protocol,
1094 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if);
1095 if (IS_ERR(rt))
1096 rt = NULL;
1097 if (rt)
1098 sk_setup_caps(sk, &rt->dst);
1099 rcu_read_unlock();
1100
1101 return &rt->dst;
1102 }
1103
1104 struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu)
1105 {
1106 struct dst_entry *dst = __sk_dst_check(sk, 0);
1107 struct inet_sock *inet = inet_sk(sk);
1108
1109 if (!dst) {
1110 dst = inet_csk_rebuild_route(sk, &inet->cork.fl);
1111 if (!dst)
1112 goto out;
1113 }
1114 dst->ops->update_pmtu(dst, sk, NULL, mtu, true);
1115
1116 dst = __sk_dst_check(sk, 0);
1117 if (!dst)
1118 dst = inet_csk_rebuild_route(sk, &inet->cork.fl);
1119 out:
1120 return dst;
1121 }
1122 EXPORT_SYMBOL_GPL(inet_csk_update_pmtu);