1/*
2 * INET		An implementation of the TCP/IP protocol suite for the LINUX
3 *		operating system.  INET is implemented using the  BSD Socket
4 *		interface as the means of communication with the user level.
5 *
6 *		Support for INET connection oriented protocols.
7 *
8 * Authors:	See the TCP sources
9 *
10 *		This program is free software; you can redistribute it and/or
11 *		modify it under the terms of the GNU General Public License
12 *		as published by the Free Software Foundation; either version
13 *		2 of the License, or(at your option) any later version.
14 */
15
16#include <linux/module.h>
17#include <linux/jhash.h>
18
19#include <net/inet_connection_sock.h>
20#include <net/inet_hashtables.h>
21#include <net/inet_timewait_sock.h>
22#include <net/ip.h>
23#include <net/route.h>
24#include <net/tcp_states.h>
25#include <net/xfrm.h>
26#include <net/tcp.h>
27
28#ifdef INET_CSK_DEBUG
29const char inet_csk_timer_bug_msg[] = "inet_csk BUG: unknown timer value\n";
30EXPORT_SYMBOL(inet_csk_timer_bug_msg);
31#endif
32
33void inet_get_local_port_range(struct net *net, int *low, int *high)
34{
35	unsigned int seq;
36
37	do {
38		seq = read_seqbegin(&net->ipv4.ip_local_ports.lock);
39
40		*low = net->ipv4.ip_local_ports.range[0];
41		*high = net->ipv4.ip_local_ports.range[1];
42	} while (read_seqretry(&net->ipv4.ip_local_ports.lock, seq));
43}
44EXPORT_SYMBOL(inet_get_local_port_range);
45
46int inet_csk_bind_conflict(const struct sock *sk,
47			   const struct inet_bind_bucket *tb, bool relax)
48{
49	struct sock *sk2;
50	int reuse = sk->sk_reuse;
51	int reuseport = sk->sk_reuseport;
52	kuid_t uid = sock_i_uid((struct sock *)sk);
53
54	/*
55	 * Unlike other sk lookup places we do not check
56	 * for sk_net here, since _all_ the socks listed
57	 * in tb->owners list belong to the same net - the
58	 * one this bucket belongs to.
59	 */
60
61	sk_for_each_bound(sk2, &tb->owners) {
62		if (sk != sk2 &&
63		    !inet_v6_ipv6only(sk2) &&
64		    (!sk->sk_bound_dev_if ||
65		     !sk2->sk_bound_dev_if ||
66		     sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
67			if ((!reuse || !sk2->sk_reuse ||
68			    sk2->sk_state == TCP_LISTEN) &&
69			    (!reuseport || !sk2->sk_reuseport ||
70			    (sk2->sk_state != TCP_TIME_WAIT &&
71			     !uid_eq(uid, sock_i_uid(sk2))))) {
72
73				if (!sk2->sk_rcv_saddr || !sk->sk_rcv_saddr ||
74				    sk2->sk_rcv_saddr == sk->sk_rcv_saddr)
75					break;
76			}
77			if (!relax && reuse && sk2->sk_reuse &&
78			    sk2->sk_state != TCP_LISTEN) {
79
80				if (!sk2->sk_rcv_saddr || !sk->sk_rcv_saddr ||
81				    sk2->sk_rcv_saddr == sk->sk_rcv_saddr)
82					break;
83			}
84		}
85	}
86	return sk2 != NULL;
87}
88EXPORT_SYMBOL_GPL(inet_csk_bind_conflict);
89
90/* Obtain a reference to a local port for the given sock,
91 * if snum is zero it means select any available local port.
92 */
93int inet_csk_get_port(struct sock *sk, unsigned short snum)
94{
95	struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
96	struct inet_bind_hashbucket *head;
97	struct inet_bind_bucket *tb;
98	int ret, attempts = 5;
99	struct net *net = sock_net(sk);
100	int smallest_size = -1, smallest_rover;
101	kuid_t uid = sock_i_uid(sk);
102
103	local_bh_disable();
104	if (!snum) {
105		int remaining, rover, low, high;
106
107again:
108		inet_get_local_port_range(net, &low, &high);
109		remaining = (high - low) + 1;
110		smallest_rover = rover = prandom_u32() % remaining + low;
111
112		smallest_size = -1;
113		do {
114			if (inet_is_local_reserved_port(net, rover))
115				goto next_nolock;
116			head = &hashinfo->bhash[inet_bhashfn(net, rover,
117					hashinfo->bhash_size)];
118			spin_lock(&head->lock);
119			inet_bind_bucket_for_each(tb, &head->chain)
120				if (net_eq(ib_net(tb), net) && tb->port == rover) {
121					if (((tb->fastreuse > 0 &&
122					      sk->sk_reuse &&
123					      sk->sk_state != TCP_LISTEN) ||
124					     (tb->fastreuseport > 0 &&
125					      sk->sk_reuseport &&
126					      uid_eq(tb->fastuid, uid))) &&
127					    (tb->num_owners < smallest_size || smallest_size == -1)) {
128						smallest_size = tb->num_owners;
129						smallest_rover = rover;
130						if (atomic_read(&hashinfo->bsockets) > (high - low) + 1 &&
131						    !inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, false)) {
132							snum = smallest_rover;
133							goto tb_found;
134						}
135					}
136					if (!inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, false)) {
137						snum = rover;
138						goto tb_found;
139					}
140					goto next;
141				}
142			break;
143		next:
144			spin_unlock(&head->lock);
145		next_nolock:
146			if (++rover > high)
147				rover = low;
148		} while (--remaining > 0);
149
150		/* Exhausted local port range during search?  It is not
151		 * possible for us to be holding one of the bind hash
152		 * locks if this test triggers, because if 'remaining'
153		 * drops to zero, we broke out of the do/while loop at
154		 * the top level, not from the 'break;' statement.
155		 */
156		ret = 1;
157		if (remaining <= 0) {
158			if (smallest_size != -1) {
159				snum = smallest_rover;
160				goto have_snum;
161			}
162			goto fail;
163		}
164		/* OK, here is the one we will use.  HEAD is
165		 * non-NULL and we hold it's mutex.
166		 */
167		snum = rover;
168	} else {
169have_snum:
170		head = &hashinfo->bhash[inet_bhashfn(net, snum,
171				hashinfo->bhash_size)];
172		spin_lock(&head->lock);
173		inet_bind_bucket_for_each(tb, &head->chain)
174			if (net_eq(ib_net(tb), net) && tb->port == snum)
175				goto tb_found;
176	}
177	tb = NULL;
178	goto tb_not_found;
179tb_found:
180	if (!hlist_empty(&tb->owners)) {
181		if (sk->sk_reuse == SK_FORCE_REUSE)
182			goto success;
183
184		if (((tb->fastreuse > 0 &&
185		      sk->sk_reuse && sk->sk_state != TCP_LISTEN) ||
186		     (tb->fastreuseport > 0 &&
187		      sk->sk_reuseport && uid_eq(tb->fastuid, uid))) &&
188		    smallest_size == -1) {
189			goto success;
190		} else {
191			ret = 1;
192			if (inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, true)) {
193				if (((sk->sk_reuse && sk->sk_state != TCP_LISTEN) ||
194				     (tb->fastreuseport > 0 &&
195				      sk->sk_reuseport && uid_eq(tb->fastuid, uid))) &&
196				    smallest_size != -1 && --attempts >= 0) {
197					spin_unlock(&head->lock);
198					goto again;
199				}
200
201				goto fail_unlock;
202			}
203		}
204	}
205tb_not_found:
206	ret = 1;
207	if (!tb && (tb = inet_bind_bucket_create(hashinfo->bind_bucket_cachep,
208					net, head, snum)) == NULL)
209		goto fail_unlock;
210	if (hlist_empty(&tb->owners)) {
211		if (sk->sk_reuse && sk->sk_state != TCP_LISTEN)
212			tb->fastreuse = 1;
213		else
214			tb->fastreuse = 0;
215		if (sk->sk_reuseport) {
216			tb->fastreuseport = 1;
217			tb->fastuid = uid;
218		} else
219			tb->fastreuseport = 0;
220	} else {
221		if (tb->fastreuse &&
222		    (!sk->sk_reuse || sk->sk_state == TCP_LISTEN))
223			tb->fastreuse = 0;
224		if (tb->fastreuseport &&
225		    (!sk->sk_reuseport || !uid_eq(tb->fastuid, uid)))
226			tb->fastreuseport = 0;
227	}
228success:
229	if (!inet_csk(sk)->icsk_bind_hash)
230		inet_bind_hash(sk, tb, snum);
231	WARN_ON(inet_csk(sk)->icsk_bind_hash != tb);
232	ret = 0;
233
234fail_unlock:
235	spin_unlock(&head->lock);
236fail:
237	local_bh_enable();
238	return ret;
239}
240EXPORT_SYMBOL_GPL(inet_csk_get_port);
241
242/*
243 * Wait for an incoming connection, avoid race conditions. This must be called
244 * with the socket locked.
245 */
246static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
247{
248	struct inet_connection_sock *icsk = inet_csk(sk);
249	DEFINE_WAIT(wait);
250	int err;
251
252	/*
253	 * True wake-one mechanism for incoming connections: only
254	 * one process gets woken up, not the 'whole herd'.
255	 * Since we do not 'race & poll' for established sockets
256	 * anymore, the common case will execute the loop only once.
257	 *
258	 * Subtle issue: "add_wait_queue_exclusive()" will be added
259	 * after any current non-exclusive waiters, and we know that
260	 * it will always _stay_ after any new non-exclusive waiters
261	 * because all non-exclusive waiters are added at the
262	 * beginning of the wait-queue. As such, it's ok to "drop"
263	 * our exclusiveness temporarily when we get woken up without
264	 * having to remove and re-insert us on the wait queue.
265	 */
266	for (;;) {
267		prepare_to_wait_exclusive(sk_sleep(sk), &wait,
268					  TASK_INTERRUPTIBLE);
269		release_sock(sk);
270		if (reqsk_queue_empty(&icsk->icsk_accept_queue))
271			timeo = schedule_timeout(timeo);
272		sched_annotate_sleep();
273		lock_sock(sk);
274		err = 0;
275		if (!reqsk_queue_empty(&icsk->icsk_accept_queue))
276			break;
277		err = -EINVAL;
278		if (sk->sk_state != TCP_LISTEN)
279			break;
280		err = sock_intr_errno(timeo);
281		if (signal_pending(current))
282			break;
283		err = -EAGAIN;
284		if (!timeo)
285			break;
286	}
287	finish_wait(sk_sleep(sk), &wait);
288	return err;
289}
290
291/*
292 * This will accept the next outstanding connection.
293 */
294struct sock *inet_csk_accept(struct sock *sk, int flags, int *err)
295{
296	struct inet_connection_sock *icsk = inet_csk(sk);
297	struct request_sock_queue *queue = &icsk->icsk_accept_queue;
298	struct request_sock *req;
299	struct sock *newsk;
300	int error;
301
302	lock_sock(sk);
303
304	/* We need to make sure that this socket is listening,
305	 * and that it has something pending.
306	 */
307	error = -EINVAL;
308	if (sk->sk_state != TCP_LISTEN)
309		goto out_err;
310
311	/* Find already established connection */
312	if (reqsk_queue_empty(queue)) {
313		long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
314
315		/* If this is a non blocking socket don't sleep */
316		error = -EAGAIN;
317		if (!timeo)
318			goto out_err;
319
320		error = inet_csk_wait_for_connect(sk, timeo);
321		if (error)
322			goto out_err;
323	}
324	req = reqsk_queue_remove(queue);
325	newsk = req->sk;
326
327	sk_acceptq_removed(sk);
328	if (sk->sk_protocol == IPPROTO_TCP &&
329	    tcp_rsk(req)->tfo_listener &&
330	    queue->fastopenq) {
331		spin_lock_bh(&queue->fastopenq->lock);
332		if (tcp_rsk(req)->tfo_listener) {
333			/* We are still waiting for the final ACK from 3WHS
334			 * so can't free req now. Instead, we set req->sk to
335			 * NULL to signify that the child socket is taken
336			 * so reqsk_fastopen_remove() will free the req
337			 * when 3WHS finishes (or is aborted).
338			 */
339			req->sk = NULL;
340			req = NULL;
341		}
342		spin_unlock_bh(&queue->fastopenq->lock);
343	}
344out:
345	release_sock(sk);
346	if (req)
347		reqsk_put(req);
348	return newsk;
349out_err:
350	newsk = NULL;
351	req = NULL;
352	*err = error;
353	goto out;
354}
355EXPORT_SYMBOL(inet_csk_accept);
356
357/*
358 * Using different timers for retransmit, delayed acks and probes
359 * We may wish use just one timer maintaining a list of expire jiffies
360 * to optimize.
361 */
362void inet_csk_init_xmit_timers(struct sock *sk,
363			       void (*retransmit_handler)(unsigned long),
364			       void (*delack_handler)(unsigned long),
365			       void (*keepalive_handler)(unsigned long))
366{
367	struct inet_connection_sock *icsk = inet_csk(sk);
368
369	setup_timer(&icsk->icsk_retransmit_timer, retransmit_handler,
370			(unsigned long)sk);
371	setup_timer(&icsk->icsk_delack_timer, delack_handler,
372			(unsigned long)sk);
373	setup_timer(&sk->sk_timer, keepalive_handler, (unsigned long)sk);
374	icsk->icsk_pending = icsk->icsk_ack.pending = 0;
375}
376EXPORT_SYMBOL(inet_csk_init_xmit_timers);
377
378void inet_csk_clear_xmit_timers(struct sock *sk)
379{
380	struct inet_connection_sock *icsk = inet_csk(sk);
381
382	icsk->icsk_pending = icsk->icsk_ack.pending = icsk->icsk_ack.blocked = 0;
383
384	sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
385	sk_stop_timer(sk, &icsk->icsk_delack_timer);
386	sk_stop_timer(sk, &sk->sk_timer);
387}
388EXPORT_SYMBOL(inet_csk_clear_xmit_timers);
389
390void inet_csk_delete_keepalive_timer(struct sock *sk)
391{
392	sk_stop_timer(sk, &sk->sk_timer);
393}
394EXPORT_SYMBOL(inet_csk_delete_keepalive_timer);
395
396void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long len)
397{
398	sk_reset_timer(sk, &sk->sk_timer, jiffies + len);
399}
400EXPORT_SYMBOL(inet_csk_reset_keepalive_timer);
401
402struct dst_entry *inet_csk_route_req(struct sock *sk,
403				     struct flowi4 *fl4,
404				     const struct request_sock *req)
405{
406	const struct inet_request_sock *ireq = inet_rsk(req);
407	struct net *net = read_pnet(&ireq->ireq_net);
408	struct ip_options_rcu *opt = ireq->opt;
409	struct rtable *rt;
410
411	flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
412			   RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
413			   sk->sk_protocol, inet_sk_flowi_flags(sk),
414			   (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
415			   ireq->ir_loc_addr, ireq->ir_rmt_port,
416			   htons(ireq->ir_num));
417	security_req_classify_flow(req, flowi4_to_flowi(fl4));
418	rt = ip_route_output_flow(net, fl4, sk);
419	if (IS_ERR(rt))
420		goto no_route;
421	if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
422		goto route_err;
423	return &rt->dst;
424
425route_err:
426	ip_rt_put(rt);
427no_route:
428	IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
429	return NULL;
430}
431EXPORT_SYMBOL_GPL(inet_csk_route_req);
432
433struct dst_entry *inet_csk_route_child_sock(struct sock *sk,
434					    struct sock *newsk,
435					    const struct request_sock *req)
436{
437	const struct inet_request_sock *ireq = inet_rsk(req);
438	struct net *net = read_pnet(&ireq->ireq_net);
439	struct inet_sock *newinet = inet_sk(newsk);
440	struct ip_options_rcu *opt;
441	struct flowi4 *fl4;
442	struct rtable *rt;
443
444	fl4 = &newinet->cork.fl.u.ip4;
445
446	rcu_read_lock();
447	opt = rcu_dereference(newinet->inet_opt);
448	flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
449			   RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
450			   sk->sk_protocol, inet_sk_flowi_flags(sk),
451			   (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
452			   ireq->ir_loc_addr, ireq->ir_rmt_port,
453			   htons(ireq->ir_num));
454	security_req_classify_flow(req, flowi4_to_flowi(fl4));
455	rt = ip_route_output_flow(net, fl4, sk);
456	if (IS_ERR(rt))
457		goto no_route;
458	if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
459		goto route_err;
460	rcu_read_unlock();
461	return &rt->dst;
462
463route_err:
464	ip_rt_put(rt);
465no_route:
466	rcu_read_unlock();
467	IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
468	return NULL;
469}
470EXPORT_SYMBOL_GPL(inet_csk_route_child_sock);
471
472static inline u32 inet_synq_hash(const __be32 raddr, const __be16 rport,
473				 const u32 rnd, const u32 synq_hsize)
474{
475	return jhash_2words((__force u32)raddr, (__force u32)rport, rnd) & (synq_hsize - 1);
476}
477
478#if IS_ENABLED(CONFIG_IPV6)
479#define AF_INET_FAMILY(fam) ((fam) == AF_INET)
480#else
481#define AF_INET_FAMILY(fam) true
482#endif
483
484/* Note: this is temporary :
485 * req sock will no longer be in listener hash table
486*/
487struct request_sock *inet_csk_search_req(struct sock *sk,
488					 const __be16 rport,
489					 const __be32 raddr,
490					 const __be32 laddr)
491{
492	struct inet_connection_sock *icsk = inet_csk(sk);
493	struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
494	struct request_sock *req;
495	u32 hash = inet_synq_hash(raddr, rport, lopt->hash_rnd,
496				  lopt->nr_table_entries);
497
498	spin_lock(&icsk->icsk_accept_queue.syn_wait_lock);
499	for (req = lopt->syn_table[hash]; req != NULL; req = req->dl_next) {
500		const struct inet_request_sock *ireq = inet_rsk(req);
501
502		if (ireq->ir_rmt_port == rport &&
503		    ireq->ir_rmt_addr == raddr &&
504		    ireq->ir_loc_addr == laddr &&
505		    AF_INET_FAMILY(req->rsk_ops->family)) {
506			atomic_inc(&req->rsk_refcnt);
507			WARN_ON(req->sk);
508			break;
509		}
510	}
511	spin_unlock(&icsk->icsk_accept_queue.syn_wait_lock);
512
513	return req;
514}
515EXPORT_SYMBOL_GPL(inet_csk_search_req);
516
517void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
518				   unsigned long timeout)
519{
520	struct inet_connection_sock *icsk = inet_csk(sk);
521	struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
522	const u32 h = inet_synq_hash(inet_rsk(req)->ir_rmt_addr,
523				     inet_rsk(req)->ir_rmt_port,
524				     lopt->hash_rnd, lopt->nr_table_entries);
525
526	reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, timeout);
527	inet_csk_reqsk_queue_added(sk, timeout);
528}
529EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add);
530
531/* Only thing we need from tcp.h */
532extern int sysctl_tcp_synack_retries;
533
534
535/* Decide when to expire the request and when to resend SYN-ACK */
536static inline void syn_ack_recalc(struct request_sock *req, const int thresh,
537				  const int max_retries,
538				  const u8 rskq_defer_accept,
539				  int *expire, int *resend)
540{
541	if (!rskq_defer_accept) {
542		*expire = req->num_timeout >= thresh;
543		*resend = 1;
544		return;
545	}
546	*expire = req->num_timeout >= thresh &&
547		  (!inet_rsk(req)->acked || req->num_timeout >= max_retries);
548	/*
549	 * Do not resend while waiting for data after ACK,
550	 * start to resend on end of deferring period to give
551	 * last chance for data or ACK to create established socket.
552	 */
553	*resend = !inet_rsk(req)->acked ||
554		  req->num_timeout >= rskq_defer_accept - 1;
555}
556
557int inet_rtx_syn_ack(struct sock *parent, struct request_sock *req)
558{
559	int err = req->rsk_ops->rtx_syn_ack(parent, req);
560
561	if (!err)
562		req->num_retrans++;
563	return err;
564}
565EXPORT_SYMBOL(inet_rtx_syn_ack);
566
567/* return true if req was found in the syn_table[] */
568static bool reqsk_queue_unlink(struct request_sock_queue *queue,
569			       struct request_sock *req)
570{
571	struct request_sock **prev;
572	struct listen_sock *lopt;
573	bool found = false;
574
575	spin_lock(&queue->syn_wait_lock);
576	lopt = queue->listen_opt;
577	if (lopt) {
578		for (prev = &lopt->syn_table[req->rsk_hash]; *prev != NULL;
579		     prev = &(*prev)->dl_next) {
580			if (*prev == req) {
581				*prev = req->dl_next;
582				found = true;
583				break;
584			}
585		}
586	}
587	spin_unlock(&queue->syn_wait_lock);
588	if (timer_pending(&req->rsk_timer) && del_timer_sync(&req->rsk_timer))
589		reqsk_put(req);
590	return found;
591}
592
593void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req)
594{
595	if (reqsk_queue_unlink(&inet_csk(sk)->icsk_accept_queue, req)) {
596		reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
597		reqsk_put(req);
598	}
599}
600EXPORT_SYMBOL(inet_csk_reqsk_queue_drop);
601
602static void reqsk_timer_handler(unsigned long data)
603{
604	struct request_sock *req = (struct request_sock *)data;
605	struct sock *sk_listener = req->rsk_listener;
606	struct inet_connection_sock *icsk = inet_csk(sk_listener);
607	struct request_sock_queue *queue = &icsk->icsk_accept_queue;
608	struct listen_sock *lopt = queue->listen_opt;
609	int qlen, expire = 0, resend = 0;
610	int max_retries, thresh;
611	u8 defer_accept;
612
613	if (sk_listener->sk_state != TCP_LISTEN || !lopt) {
614		reqsk_put(req);
615		return;
616	}
617
618	max_retries = icsk->icsk_syn_retries ? : sysctl_tcp_synack_retries;
619	thresh = max_retries;
620	/* Normally all the openreqs are young and become mature
621	 * (i.e. converted to established socket) for first timeout.
622	 * If synack was not acknowledged for 1 second, it means
623	 * one of the following things: synack was lost, ack was lost,
624	 * rtt is high or nobody planned to ack (i.e. synflood).
625	 * When server is a bit loaded, queue is populated with old
626	 * open requests, reducing effective size of queue.
627	 * When server is well loaded, queue size reduces to zero
628	 * after several minutes of work. It is not synflood,
629	 * it is normal operation. The solution is pruning
630	 * too old entries overriding normal timeout, when
631	 * situation becomes dangerous.
632	 *
633	 * Essentially, we reserve half of room for young
634	 * embrions; and abort old ones without pity, if old
635	 * ones are about to clog our table.
636	 */
637	qlen = listen_sock_qlen(lopt);
638	if (qlen >> (lopt->max_qlen_log - 1)) {
639		int young = listen_sock_young(lopt) << 1;
640
641		while (thresh > 2) {
642			if (qlen < young)
643				break;
644			thresh--;
645			young <<= 1;
646		}
647	}
648	defer_accept = READ_ONCE(queue->rskq_defer_accept);
649	if (defer_accept)
650		max_retries = defer_accept;
651	syn_ack_recalc(req, thresh, max_retries, defer_accept,
652		       &expire, &resend);
653	req->rsk_ops->syn_ack_timeout(req);
654	if (!expire &&
655	    (!resend ||
656	     !inet_rtx_syn_ack(sk_listener, req) ||
657	     inet_rsk(req)->acked)) {
658		unsigned long timeo;
659
660		if (req->num_timeout++ == 0)
661			atomic_inc(&lopt->young_dec);
662		timeo = min(TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX);
663		mod_timer_pinned(&req->rsk_timer, jiffies + timeo);
664		return;
665	}
666	inet_csk_reqsk_queue_drop(sk_listener, req);
667	reqsk_put(req);
668}
669
670void reqsk_queue_hash_req(struct request_sock_queue *queue,
671			  u32 hash, struct request_sock *req,
672			  unsigned long timeout)
673{
674	struct listen_sock *lopt = queue->listen_opt;
675
676	req->num_retrans = 0;
677	req->num_timeout = 0;
678	req->sk = NULL;
679
680	setup_timer(&req->rsk_timer, reqsk_timer_handler, (unsigned long)req);
681	mod_timer_pinned(&req->rsk_timer, jiffies + timeout);
682	req->rsk_hash = hash;
683
684	/* before letting lookups find us, make sure all req fields
685	 * are committed to memory and refcnt initialized.
686	 */
687	smp_wmb();
688	atomic_set(&req->rsk_refcnt, 2);
689
690	spin_lock(&queue->syn_wait_lock);
691	req->dl_next = lopt->syn_table[hash];
692	lopt->syn_table[hash] = req;
693	spin_unlock(&queue->syn_wait_lock);
694}
695EXPORT_SYMBOL(reqsk_queue_hash_req);
696
697/**
698 *	inet_csk_clone_lock - clone an inet socket, and lock its clone
699 *	@sk: the socket to clone
700 *	@req: request_sock
701 *	@priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
702 *
703 *	Caller must unlock socket even in error path (bh_unlock_sock(newsk))
704 */
705struct sock *inet_csk_clone_lock(const struct sock *sk,
706				 const struct request_sock *req,
707				 const gfp_t priority)
708{
709	struct sock *newsk = sk_clone_lock(sk, priority);
710
711	if (newsk) {
712		struct inet_connection_sock *newicsk = inet_csk(newsk);
713
714		newsk->sk_state = TCP_SYN_RECV;
715		newicsk->icsk_bind_hash = NULL;
716
717		inet_sk(newsk)->inet_dport = inet_rsk(req)->ir_rmt_port;
718		inet_sk(newsk)->inet_num = inet_rsk(req)->ir_num;
719		inet_sk(newsk)->inet_sport = htons(inet_rsk(req)->ir_num);
720		newsk->sk_write_space = sk_stream_write_space;
721
722		newsk->sk_mark = inet_rsk(req)->ir_mark;
723		atomic64_set(&newsk->sk_cookie,
724			     atomic64_read(&inet_rsk(req)->ir_cookie));
725
726		newicsk->icsk_retransmits = 0;
727		newicsk->icsk_backoff	  = 0;
728		newicsk->icsk_probes_out  = 0;
729
730		/* Deinitialize accept_queue to trap illegal accesses. */
731		memset(&newicsk->icsk_accept_queue, 0, sizeof(newicsk->icsk_accept_queue));
732
733		security_inet_csk_clone(newsk, req);
734	}
735	return newsk;
736}
737EXPORT_SYMBOL_GPL(inet_csk_clone_lock);
738
739/*
740 * At this point, there should be no process reference to this
741 * socket, and thus no user references at all.  Therefore we
742 * can assume the socket waitqueue is inactive and nobody will
743 * try to jump onto it.
744 */
745void inet_csk_destroy_sock(struct sock *sk)
746{
747	WARN_ON(sk->sk_state != TCP_CLOSE);
748	WARN_ON(!sock_flag(sk, SOCK_DEAD));
749
750	/* It cannot be in hash table! */
751	WARN_ON(!sk_unhashed(sk));
752
753	/* If it has not 0 inet_sk(sk)->inet_num, it must be bound */
754	WARN_ON(inet_sk(sk)->inet_num && !inet_csk(sk)->icsk_bind_hash);
755
756	sk->sk_prot->destroy(sk);
757
758	sk_stream_kill_queues(sk);
759
760	xfrm_sk_free_policy(sk);
761
762	sk_refcnt_debug_release(sk);
763
764	percpu_counter_dec(sk->sk_prot->orphan_count);
765	sock_put(sk);
766}
767EXPORT_SYMBOL(inet_csk_destroy_sock);
768
769/* This function allows to force a closure of a socket after the call to
770 * tcp/dccp_create_openreq_child().
771 */
772void inet_csk_prepare_forced_close(struct sock *sk)
773	__releases(&sk->sk_lock.slock)
774{
775	/* sk_clone_lock locked the socket and set refcnt to 2 */
776	bh_unlock_sock(sk);
777	sock_put(sk);
778
779	/* The below has to be done to allow calling inet_csk_destroy_sock */
780	sock_set_flag(sk, SOCK_DEAD);
781	percpu_counter_inc(sk->sk_prot->orphan_count);
782	inet_sk(sk)->inet_num = 0;
783}
784EXPORT_SYMBOL(inet_csk_prepare_forced_close);
785
786int inet_csk_listen_start(struct sock *sk, const int nr_table_entries)
787{
788	struct inet_sock *inet = inet_sk(sk);
789	struct inet_connection_sock *icsk = inet_csk(sk);
790	int rc = reqsk_queue_alloc(&icsk->icsk_accept_queue, nr_table_entries);
791
792	if (rc != 0)
793		return rc;
794
795	sk->sk_max_ack_backlog = 0;
796	sk->sk_ack_backlog = 0;
797	inet_csk_delack_init(sk);
798
799	/* There is race window here: we announce ourselves listening,
800	 * but this transition is still not validated by get_port().
801	 * It is OK, because this socket enters to hash table only
802	 * after validation is complete.
803	 */
804	sk->sk_state = TCP_LISTEN;
805	if (!sk->sk_prot->get_port(sk, inet->inet_num)) {
806		inet->inet_sport = htons(inet->inet_num);
807
808		sk_dst_reset(sk);
809		sk->sk_prot->hash(sk);
810
811		return 0;
812	}
813
814	sk->sk_state = TCP_CLOSE;
815	__reqsk_queue_destroy(&icsk->icsk_accept_queue);
816	return -EADDRINUSE;
817}
818EXPORT_SYMBOL_GPL(inet_csk_listen_start);
819
820/*
821 *	This routine closes sockets which have been at least partially
822 *	opened, but not yet accepted.
823 */
824void inet_csk_listen_stop(struct sock *sk)
825{
826	struct inet_connection_sock *icsk = inet_csk(sk);
827	struct request_sock_queue *queue = &icsk->icsk_accept_queue;
828	struct request_sock *acc_req;
829	struct request_sock *req;
830
831	/* make all the listen_opt local to us */
832	acc_req = reqsk_queue_yank_acceptq(queue);
833
834	/* Following specs, it would be better either to send FIN
835	 * (and enter FIN-WAIT-1, it is normal close)
836	 * or to send active reset (abort).
837	 * Certainly, it is pretty dangerous while synflood, but it is
838	 * bad justification for our negligence 8)
839	 * To be honest, we are not able to make either
840	 * of the variants now.			--ANK
841	 */
842	reqsk_queue_destroy(queue);
843
844	while ((req = acc_req) != NULL) {
845		struct sock *child = req->sk;
846
847		acc_req = req->dl_next;
848
849		local_bh_disable();
850		bh_lock_sock(child);
851		WARN_ON(sock_owned_by_user(child));
852		sock_hold(child);
853
854		sk->sk_prot->disconnect(child, O_NONBLOCK);
855
856		sock_orphan(child);
857
858		percpu_counter_inc(sk->sk_prot->orphan_count);
859
860		if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->tfo_listener) {
861			BUG_ON(tcp_sk(child)->fastopen_rsk != req);
862			BUG_ON(sk != req->rsk_listener);
863
864			/* Paranoid, to prevent race condition if
865			 * an inbound pkt destined for child is
866			 * blocked by sock lock in tcp_v4_rcv().
867			 * Also to satisfy an assertion in
868			 * tcp_v4_destroy_sock().
869			 */
870			tcp_sk(child)->fastopen_rsk = NULL;
871		}
872		inet_csk_destroy_sock(child);
873
874		bh_unlock_sock(child);
875		local_bh_enable();
876		sock_put(child);
877
878		sk_acceptq_removed(sk);
879		reqsk_put(req);
880	}
881	if (queue->fastopenq) {
882		/* Free all the reqs queued in rskq_rst_head. */
883		spin_lock_bh(&queue->fastopenq->lock);
884		acc_req = queue->fastopenq->rskq_rst_head;
885		queue->fastopenq->rskq_rst_head = NULL;
886		spin_unlock_bh(&queue->fastopenq->lock);
887		while ((req = acc_req) != NULL) {
888			acc_req = req->dl_next;
889			reqsk_put(req);
890		}
891	}
892	WARN_ON(sk->sk_ack_backlog);
893}
894EXPORT_SYMBOL_GPL(inet_csk_listen_stop);
895
896void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr)
897{
898	struct sockaddr_in *sin = (struct sockaddr_in *)uaddr;
899	const struct inet_sock *inet = inet_sk(sk);
900
901	sin->sin_family		= AF_INET;
902	sin->sin_addr.s_addr	= inet->inet_daddr;
903	sin->sin_port		= inet->inet_dport;
904}
905EXPORT_SYMBOL_GPL(inet_csk_addr2sockaddr);
906
907#ifdef CONFIG_COMPAT
908int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname,
909			       char __user *optval, int __user *optlen)
910{
911	const struct inet_connection_sock *icsk = inet_csk(sk);
912
913	if (icsk->icsk_af_ops->compat_getsockopt)
914		return icsk->icsk_af_ops->compat_getsockopt(sk, level, optname,
915							    optval, optlen);
916	return icsk->icsk_af_ops->getsockopt(sk, level, optname,
917					     optval, optlen);
918}
919EXPORT_SYMBOL_GPL(inet_csk_compat_getsockopt);
920
921int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
922			       char __user *optval, unsigned int optlen)
923{
924	const struct inet_connection_sock *icsk = inet_csk(sk);
925
926	if (icsk->icsk_af_ops->compat_setsockopt)
927		return icsk->icsk_af_ops->compat_setsockopt(sk, level, optname,
928							    optval, optlen);
929	return icsk->icsk_af_ops->setsockopt(sk, level, optname,
930					     optval, optlen);
931}
932EXPORT_SYMBOL_GPL(inet_csk_compat_setsockopt);
933#endif
934
935static struct dst_entry *inet_csk_rebuild_route(struct sock *sk, struct flowi *fl)
936{
937	const struct inet_sock *inet = inet_sk(sk);
938	const struct ip_options_rcu *inet_opt;
939	__be32 daddr = inet->inet_daddr;
940	struct flowi4 *fl4;
941	struct rtable *rt;
942
943	rcu_read_lock();
944	inet_opt = rcu_dereference(inet->inet_opt);
945	if (inet_opt && inet_opt->opt.srr)
946		daddr = inet_opt->opt.faddr;
947	fl4 = &fl->u.ip4;
948	rt = ip_route_output_ports(sock_net(sk), fl4, sk, daddr,
949				   inet->inet_saddr, inet->inet_dport,
950				   inet->inet_sport, sk->sk_protocol,
951				   RT_CONN_FLAGS(sk), sk->sk_bound_dev_if);
952	if (IS_ERR(rt))
953		rt = NULL;
954	if (rt)
955		sk_setup_caps(sk, &rt->dst);
956	rcu_read_unlock();
957
958	return &rt->dst;
959}
960
961struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu)
962{
963	struct dst_entry *dst = __sk_dst_check(sk, 0);
964	struct inet_sock *inet = inet_sk(sk);
965
966	if (!dst) {
967		dst = inet_csk_rebuild_route(sk, &inet->cork.fl);
968		if (!dst)
969			goto out;
970	}
971	dst->ops->update_pmtu(dst, sk, NULL, mtu);
972
973	dst = __sk_dst_check(sk, 0);
974	if (!dst)
975		dst = inet_csk_rebuild_route(sk, &inet->cork.fl);
976out:
977	return dst;
978}
979EXPORT_SYMBOL_GPL(inet_csk_update_pmtu);
980