1 /*
2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
3  *		operating system.  INET is implemented using the  BSD Socket
4  *		interface as the means of communication with the user level.
5  *
6  *		Implementation of the Transmission Control Protocol(TCP).
7  *
8  *		IPv4 specific functions
9  *
10  *
11  *		code split from:
12  *		linux/ipv4/tcp.c
13  *		linux/ipv4/tcp_input.c
14  *		linux/ipv4/tcp_output.c
15  *
16  *		See tcp.c for author information
17  *
18  *	This program is free software; you can redistribute it and/or
19  *      modify it under the terms of the GNU General Public License
20  *      as published by the Free Software Foundation; either version
21  *      2 of the License, or (at your option) any later version.
22  */
23 
24 /*
25  * Changes:
26  *		David S. Miller	:	New socket lookup architecture.
27  *					This code is dedicated to John Dyson.
28  *		David S. Miller :	Change semantics of established hash,
29  *					half is devoted to TIME_WAIT sockets
30  *					and the rest go in the other half.
31  *		Andi Kleen :		Add support for syncookies and fixed
32  *					some bugs: ip options weren't passed to
33  *					the TCP layer, missed a check for an
34  *					ACK bit.
35  *		Andi Kleen :		Implemented fast path mtu discovery.
36  *	     				Fixed many serious bugs in the
37  *					request_sock handling and moved
38  *					most of it into the af independent code.
39  *					Added tail drop and some other bugfixes.
40  *					Added new listen semantics.
41  *		Mike McLagan	:	Routing by source
42  *	Juan Jose Ciarlante:		ip_dynaddr bits
43  *		Andi Kleen:		various fixes.
44  *	Vitaly E. Lavrov	:	Transparent proxy revived after year
45  *					coma.
46  *	Andi Kleen		:	Fix new listen.
47  *	Andi Kleen		:	Fix accept error reporting.
48  *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
49  *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
50  *					a single port at the same time.
51  */
52 
53 #define pr_fmt(fmt) "TCP: " fmt
54 
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
65 
66 #include <net/net_namespace.h>
67 #include <net/icmp.h>
68 #include <net/inet_hashtables.h>
69 #include <net/tcp.h>
70 #include <net/transp_v6.h>
71 #include <net/ipv6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
74 #include <net/xfrm.h>
75 #include <net/secure_seq.h>
76 #include <net/tcp_memcontrol.h>
77 #include <net/busy_poll.h>
78 
79 #include <linux/inet.h>
80 #include <linux/ipv6.h>
81 #include <linux/stddef.h>
82 #include <linux/proc_fs.h>
83 #include <linux/seq_file.h>
84 
85 #include <linux/crypto.h>
86 #include <linux/scatterlist.h>
87 
88 int sysctl_tcp_tw_reuse __read_mostly;
89 int sysctl_tcp_low_latency __read_mostly;
90 EXPORT_SYMBOL(sysctl_tcp_low_latency);
91 
92 #ifdef CONFIG_TCP_MD5SIG
93 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
94 			       __be32 daddr, __be32 saddr, const struct tcphdr *th);
95 #endif
96 
97 struct inet_hashinfo tcp_hashinfo;
98 EXPORT_SYMBOL(tcp_hashinfo);
99 
tcp_v4_init_sequence(const struct sk_buff * skb)100 static  __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
101 {
102 	return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
103 					  ip_hdr(skb)->saddr,
104 					  tcp_hdr(skb)->dest,
105 					  tcp_hdr(skb)->source);
106 }
107 
tcp_twsk_unique(struct sock * sk,struct sock * sktw,void * twp)108 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
109 {
110 	const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
111 	struct tcp_sock *tp = tcp_sk(sk);
112 
113 	/* With PAWS, it is safe from the viewpoint
114 	   of data integrity. Even without PAWS it is safe provided sequence
115 	   spaces do not overlap i.e. at data rates <= 80Mbit/sec.
116 
117 	   Actually, the idea is close to VJ's one, only timestamp cache is
118 	   held not per host, but per port pair and TW bucket is used as state
119 	   holder.
120 
121 	   If TW bucket has been already destroyed we fall back to VJ's scheme
122 	   and use initial timestamp retrieved from peer table.
123 	 */
124 	if (tcptw->tw_ts_recent_stamp &&
125 	    (!twp || (sysctl_tcp_tw_reuse &&
126 			     get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
127 		tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
128 		if (tp->write_seq == 0)
129 			tp->write_seq = 1;
130 		tp->rx_opt.ts_recent	   = tcptw->tw_ts_recent;
131 		tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
132 		sock_hold(sktw);
133 		return 1;
134 	}
135 
136 	return 0;
137 }
138 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
139 
140 /* This will initiate an outgoing connection. */
tcp_v4_connect(struct sock * sk,struct sockaddr * uaddr,int addr_len)141 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
142 {
143 	struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
144 	struct inet_sock *inet = inet_sk(sk);
145 	struct tcp_sock *tp = tcp_sk(sk);
146 	__be16 orig_sport, orig_dport;
147 	__be32 daddr, nexthop;
148 	struct flowi4 *fl4;
149 	struct rtable *rt;
150 	int err;
151 	struct ip_options_rcu *inet_opt;
152 
153 	if (addr_len < sizeof(struct sockaddr_in))
154 		return -EINVAL;
155 
156 	if (usin->sin_family != AF_INET)
157 		return -EAFNOSUPPORT;
158 
159 	nexthop = daddr = usin->sin_addr.s_addr;
160 	inet_opt = rcu_dereference_protected(inet->inet_opt,
161 					     sock_owned_by_user(sk));
162 	if (inet_opt && inet_opt->opt.srr) {
163 		if (!daddr)
164 			return -EINVAL;
165 		nexthop = inet_opt->opt.faddr;
166 	}
167 
168 	orig_sport = inet->inet_sport;
169 	orig_dport = usin->sin_port;
170 	fl4 = &inet->cork.fl.u.ip4;
171 	rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
172 			      RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
173 			      IPPROTO_TCP,
174 			      orig_sport, orig_dport, sk);
175 	if (IS_ERR(rt)) {
176 		err = PTR_ERR(rt);
177 		if (err == -ENETUNREACH)
178 			IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
179 		return err;
180 	}
181 
182 	if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
183 		ip_rt_put(rt);
184 		return -ENETUNREACH;
185 	}
186 
187 	if (!inet_opt || !inet_opt->opt.srr)
188 		daddr = fl4->daddr;
189 
190 	if (!inet->inet_saddr)
191 		inet->inet_saddr = fl4->saddr;
192 	sk_rcv_saddr_set(sk, inet->inet_saddr);
193 
194 	if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
195 		/* Reset inherited state */
196 		tp->rx_opt.ts_recent	   = 0;
197 		tp->rx_opt.ts_recent_stamp = 0;
198 		if (likely(!tp->repair))
199 			tp->write_seq	   = 0;
200 	}
201 
202 	if (tcp_death_row.sysctl_tw_recycle &&
203 	    !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
204 		tcp_fetch_timewait_stamp(sk, &rt->dst);
205 
206 	inet->inet_dport = usin->sin_port;
207 	sk_daddr_set(sk, daddr);
208 
209 	inet_csk(sk)->icsk_ext_hdr_len = 0;
210 	if (inet_opt)
211 		inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
212 
213 	tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
214 
215 	/* Socket identity is still unknown (sport may be zero).
216 	 * However we set state to SYN-SENT and not releasing socket
217 	 * lock select source port, enter ourselves into the hash tables and
218 	 * complete initialization after this.
219 	 */
220 	tcp_set_state(sk, TCP_SYN_SENT);
221 	err = inet_hash_connect(&tcp_death_row, sk);
222 	if (err)
223 		goto failure;
224 
225 	inet_set_txhash(sk);
226 
227 	rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
228 			       inet->inet_sport, inet->inet_dport, sk);
229 	if (IS_ERR(rt)) {
230 		err = PTR_ERR(rt);
231 		rt = NULL;
232 		goto failure;
233 	}
234 	/* OK, now commit destination to socket.  */
235 	sk->sk_gso_type = SKB_GSO_TCPV4;
236 	sk_setup_caps(sk, &rt->dst);
237 
238 	if (!tp->write_seq && likely(!tp->repair))
239 		tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
240 							   inet->inet_daddr,
241 							   inet->inet_sport,
242 							   usin->sin_port);
243 
244 	inet->inet_id = tp->write_seq ^ jiffies;
245 
246 	err = tcp_connect(sk);
247 
248 	rt = NULL;
249 	if (err)
250 		goto failure;
251 
252 	return 0;
253 
254 failure:
255 	/*
256 	 * This unhashes the socket and releases the local port,
257 	 * if necessary.
258 	 */
259 	tcp_set_state(sk, TCP_CLOSE);
260 	ip_rt_put(rt);
261 	sk->sk_route_caps = 0;
262 	inet->inet_dport = 0;
263 	return err;
264 }
265 EXPORT_SYMBOL(tcp_v4_connect);
266 
267 /*
268  * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
269  * It can be called through tcp_release_cb() if socket was owned by user
270  * at the time tcp_v4_err() was called to handle ICMP message.
271  */
tcp_v4_mtu_reduced(struct sock * sk)272 void tcp_v4_mtu_reduced(struct sock *sk)
273 {
274 	struct dst_entry *dst;
275 	struct inet_sock *inet = inet_sk(sk);
276 	u32 mtu = tcp_sk(sk)->mtu_info;
277 
278 	dst = inet_csk_update_pmtu(sk, mtu);
279 	if (!dst)
280 		return;
281 
282 	/* Something is about to be wrong... Remember soft error
283 	 * for the case, if this connection will not able to recover.
284 	 */
285 	if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
286 		sk->sk_err_soft = EMSGSIZE;
287 
288 	mtu = dst_mtu(dst);
289 
290 	if (inet->pmtudisc != IP_PMTUDISC_DONT &&
291 	    ip_sk_accept_pmtu(sk) &&
292 	    inet_csk(sk)->icsk_pmtu_cookie > mtu) {
293 		tcp_sync_mss(sk, mtu);
294 
295 		/* Resend the TCP packet because it's
296 		 * clear that the old packet has been
297 		 * dropped. This is the new "fast" path mtu
298 		 * discovery.
299 		 */
300 		tcp_simple_retransmit(sk);
301 	} /* else let the usual retransmit timer handle it */
302 }
303 EXPORT_SYMBOL(tcp_v4_mtu_reduced);
304 
do_redirect(struct sk_buff * skb,struct sock * sk)305 static void do_redirect(struct sk_buff *skb, struct sock *sk)
306 {
307 	struct dst_entry *dst = __sk_dst_check(sk, 0);
308 
309 	if (dst)
310 		dst->ops->redirect(dst, sk, skb);
311 }
312 
313 
314 /* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
tcp_req_err(struct sock * sk,u32 seq)315 void tcp_req_err(struct sock *sk, u32 seq)
316 {
317 	struct request_sock *req = inet_reqsk(sk);
318 	struct net *net = sock_net(sk);
319 
320 	/* ICMPs are not backlogged, hence we cannot get
321 	 * an established socket here.
322 	 */
323 	WARN_ON(req->sk);
324 
325 	if (seq != tcp_rsk(req)->snt_isn) {
326 		NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
327 		reqsk_put(req);
328 	} else {
329 		/*
330 		 * Still in SYN_RECV, just remove it silently.
331 		 * There is no good way to pass the error to the newly
332 		 * created socket, and POSIX does not want network
333 		 * errors returned from accept().
334 		 */
335 		NET_INC_STATS_BH(net, LINUX_MIB_LISTENDROPS);
336 		inet_csk_reqsk_queue_drop(req->rsk_listener, req);
337 	}
338 }
339 EXPORT_SYMBOL(tcp_req_err);
340 
341 /*
342  * This routine is called by the ICMP module when it gets some
343  * sort of error condition.  If err < 0 then the socket should
344  * be closed and the error returned to the user.  If err > 0
345  * it's just the icmp type << 8 | icmp code.  After adjustment
346  * header points to the first 8 bytes of the tcp header.  We need
347  * to find the appropriate port.
348  *
349  * The locking strategy used here is very "optimistic". When
350  * someone else accesses the socket the ICMP is just dropped
351  * and for some paths there is no check at all.
352  * A more general error queue to queue errors for later handling
353  * is probably better.
354  *
355  */
356 
tcp_v4_err(struct sk_buff * icmp_skb,u32 info)357 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
358 {
359 	const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
360 	struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
361 	struct inet_connection_sock *icsk;
362 	struct tcp_sock *tp;
363 	struct inet_sock *inet;
364 	const int type = icmp_hdr(icmp_skb)->type;
365 	const int code = icmp_hdr(icmp_skb)->code;
366 	struct sock *sk;
367 	struct sk_buff *skb;
368 	struct request_sock *fastopen;
369 	__u32 seq, snd_una;
370 	__u32 remaining;
371 	int err;
372 	struct net *net = dev_net(icmp_skb->dev);
373 
374 	sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr,
375 				       th->dest, iph->saddr, ntohs(th->source),
376 				       inet_iif(icmp_skb));
377 	if (!sk) {
378 		ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
379 		return;
380 	}
381 	if (sk->sk_state == TCP_TIME_WAIT) {
382 		inet_twsk_put(inet_twsk(sk));
383 		return;
384 	}
385 	seq = ntohl(th->seq);
386 	if (sk->sk_state == TCP_NEW_SYN_RECV)
387 		return tcp_req_err(sk, seq);
388 
389 	bh_lock_sock(sk);
390 	/* If too many ICMPs get dropped on busy
391 	 * servers this needs to be solved differently.
392 	 * We do take care of PMTU discovery (RFC1191) special case :
393 	 * we can receive locally generated ICMP messages while socket is held.
394 	 */
395 	if (sock_owned_by_user(sk)) {
396 		if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
397 			NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
398 	}
399 	if (sk->sk_state == TCP_CLOSE)
400 		goto out;
401 
402 	if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
403 		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
404 		goto out;
405 	}
406 
407 	icsk = inet_csk(sk);
408 	tp = tcp_sk(sk);
409 	/* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
410 	fastopen = tp->fastopen_rsk;
411 	snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
412 	if (sk->sk_state != TCP_LISTEN &&
413 	    !between(seq, snd_una, tp->snd_nxt)) {
414 		NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
415 		goto out;
416 	}
417 
418 	switch (type) {
419 	case ICMP_REDIRECT:
420 		do_redirect(icmp_skb, sk);
421 		goto out;
422 	case ICMP_SOURCE_QUENCH:
423 		/* Just silently ignore these. */
424 		goto out;
425 	case ICMP_PARAMETERPROB:
426 		err = EPROTO;
427 		break;
428 	case ICMP_DEST_UNREACH:
429 		if (code > NR_ICMP_UNREACH)
430 			goto out;
431 
432 		if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
433 			/* We are not interested in TCP_LISTEN and open_requests
434 			 * (SYN-ACKs send out by Linux are always <576bytes so
435 			 * they should go through unfragmented).
436 			 */
437 			if (sk->sk_state == TCP_LISTEN)
438 				goto out;
439 
440 			tp->mtu_info = info;
441 			if (!sock_owned_by_user(sk)) {
442 				tcp_v4_mtu_reduced(sk);
443 			} else {
444 				if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags))
445 					sock_hold(sk);
446 			}
447 			goto out;
448 		}
449 
450 		err = icmp_err_convert[code].errno;
451 		/* check if icmp_skb allows revert of backoff
452 		 * (see draft-zimmermann-tcp-lcd) */
453 		if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
454 			break;
455 		if (seq != tp->snd_una  || !icsk->icsk_retransmits ||
456 		    !icsk->icsk_backoff || fastopen)
457 			break;
458 
459 		if (sock_owned_by_user(sk))
460 			break;
461 
462 		icsk->icsk_backoff--;
463 		icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
464 					       TCP_TIMEOUT_INIT;
465 		icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
466 
467 		skb = tcp_write_queue_head(sk);
468 		BUG_ON(!skb);
469 
470 		remaining = icsk->icsk_rto -
471 			    min(icsk->icsk_rto,
472 				tcp_time_stamp - tcp_skb_timestamp(skb));
473 
474 		if (remaining) {
475 			inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
476 						  remaining, TCP_RTO_MAX);
477 		} else {
478 			/* RTO revert clocked out retransmission.
479 			 * Will retransmit now */
480 			tcp_retransmit_timer(sk);
481 		}
482 
483 		break;
484 	case ICMP_TIME_EXCEEDED:
485 		err = EHOSTUNREACH;
486 		break;
487 	default:
488 		goto out;
489 	}
490 
491 	switch (sk->sk_state) {
492 	case TCP_SYN_SENT:
493 	case TCP_SYN_RECV:
494 		/* Only in fast or simultaneous open. If a fast open socket is
495 		 * is already accepted it is treated as a connected one below.
496 		 */
497 		if (fastopen && !fastopen->sk)
498 			break;
499 
500 		if (!sock_owned_by_user(sk)) {
501 			sk->sk_err = err;
502 
503 			sk->sk_error_report(sk);
504 
505 			tcp_done(sk);
506 		} else {
507 			sk->sk_err_soft = err;
508 		}
509 		goto out;
510 	}
511 
512 	/* If we've already connected we will keep trying
513 	 * until we time out, or the user gives up.
514 	 *
515 	 * rfc1122 4.2.3.9 allows to consider as hard errors
516 	 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
517 	 * but it is obsoleted by pmtu discovery).
518 	 *
519 	 * Note, that in modern internet, where routing is unreliable
520 	 * and in each dark corner broken firewalls sit, sending random
521 	 * errors ordered by their masters even this two messages finally lose
522 	 * their original sense (even Linux sends invalid PORT_UNREACHs)
523 	 *
524 	 * Now we are in compliance with RFCs.
525 	 *							--ANK (980905)
526 	 */
527 
528 	inet = inet_sk(sk);
529 	if (!sock_owned_by_user(sk) && inet->recverr) {
530 		sk->sk_err = err;
531 		sk->sk_error_report(sk);
532 	} else	{ /* Only an error on timeout */
533 		sk->sk_err_soft = err;
534 	}
535 
536 out:
537 	bh_unlock_sock(sk);
538 	sock_put(sk);
539 }
540 
__tcp_v4_send_check(struct sk_buff * skb,__be32 saddr,__be32 daddr)541 void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
542 {
543 	struct tcphdr *th = tcp_hdr(skb);
544 
545 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
546 		th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
547 		skb->csum_start = skb_transport_header(skb) - skb->head;
548 		skb->csum_offset = offsetof(struct tcphdr, check);
549 	} else {
550 		th->check = tcp_v4_check(skb->len, saddr, daddr,
551 					 csum_partial(th,
552 						      th->doff << 2,
553 						      skb->csum));
554 	}
555 }
556 
557 /* This routine computes an IPv4 TCP checksum. */
tcp_v4_send_check(struct sock * sk,struct sk_buff * skb)558 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
559 {
560 	const struct inet_sock *inet = inet_sk(sk);
561 
562 	__tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
563 }
564 EXPORT_SYMBOL(tcp_v4_send_check);
565 
566 /*
567  *	This routine will send an RST to the other tcp.
568  *
569  *	Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
570  *		      for reset.
571  *	Answer: if a packet caused RST, it is not for a socket
572  *		existing in our system, if it is matched to a socket,
573  *		it is just duplicate segment or bug in other side's TCP.
574  *		So that we build reply only basing on parameters
575  *		arrived with segment.
576  *	Exception: precedence violation. We do not implement it in any case.
577  */
578 
tcp_v4_send_reset(struct sock * sk,struct sk_buff * skb)579 static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
580 {
581 	const struct tcphdr *th = tcp_hdr(skb);
582 	struct {
583 		struct tcphdr th;
584 #ifdef CONFIG_TCP_MD5SIG
585 		__be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
586 #endif
587 	} rep;
588 	struct ip_reply_arg arg;
589 #ifdef CONFIG_TCP_MD5SIG
590 	struct tcp_md5sig_key *key;
591 	const __u8 *hash_location = NULL;
592 	unsigned char newhash[16];
593 	int genhash;
594 	struct sock *sk1 = NULL;
595 #endif
596 	struct net *net;
597 
598 	/* Never send a reset in response to a reset. */
599 	if (th->rst)
600 		return;
601 
602 	/* If sk not NULL, it means we did a successful lookup and incoming
603 	 * route had to be correct. prequeue might have dropped our dst.
604 	 */
605 	if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
606 		return;
607 
608 	/* Swap the send and the receive. */
609 	memset(&rep, 0, sizeof(rep));
610 	rep.th.dest   = th->source;
611 	rep.th.source = th->dest;
612 	rep.th.doff   = sizeof(struct tcphdr) / 4;
613 	rep.th.rst    = 1;
614 
615 	if (th->ack) {
616 		rep.th.seq = th->ack_seq;
617 	} else {
618 		rep.th.ack = 1;
619 		rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
620 				       skb->len - (th->doff << 2));
621 	}
622 
623 	memset(&arg, 0, sizeof(arg));
624 	arg.iov[0].iov_base = (unsigned char *)&rep;
625 	arg.iov[0].iov_len  = sizeof(rep.th);
626 
627 	net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
628 #ifdef CONFIG_TCP_MD5SIG
629 	hash_location = tcp_parse_md5sig_option(th);
630 	if (!sk && hash_location) {
631 		/*
632 		 * active side is lost. Try to find listening socket through
633 		 * source port, and then find md5 key through listening socket.
634 		 * we are not loose security here:
635 		 * Incoming packet is checked with md5 hash with finding key,
636 		 * no RST generated if md5 hash doesn't match.
637 		 */
638 		sk1 = __inet_lookup_listener(net,
639 					     &tcp_hashinfo, ip_hdr(skb)->saddr,
640 					     th->source, ip_hdr(skb)->daddr,
641 					     ntohs(th->source), inet_iif(skb));
642 		/* don't send rst if it can't find key */
643 		if (!sk1)
644 			return;
645 		rcu_read_lock();
646 		key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
647 					&ip_hdr(skb)->saddr, AF_INET);
648 		if (!key)
649 			goto release_sk1;
650 
651 		genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
652 		if (genhash || memcmp(hash_location, newhash, 16) != 0)
653 			goto release_sk1;
654 	} else {
655 		key = sk ? tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
656 					     &ip_hdr(skb)->saddr,
657 					     AF_INET) : NULL;
658 	}
659 
660 	if (key) {
661 		rep.opt[0] = htonl((TCPOPT_NOP << 24) |
662 				   (TCPOPT_NOP << 16) |
663 				   (TCPOPT_MD5SIG << 8) |
664 				   TCPOLEN_MD5SIG);
665 		/* Update length and the length the header thinks exists */
666 		arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
667 		rep.th.doff = arg.iov[0].iov_len / 4;
668 
669 		tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
670 				     key, ip_hdr(skb)->saddr,
671 				     ip_hdr(skb)->daddr, &rep.th);
672 	}
673 #endif
674 	arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
675 				      ip_hdr(skb)->saddr, /* XXX */
676 				      arg.iov[0].iov_len, IPPROTO_TCP, 0);
677 	arg.csumoffset = offsetof(struct tcphdr, check) / 2;
678 	arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
679 	/* When socket is gone, all binding information is lost.
680 	 * routing might fail in this case. No choice here, if we choose to force
681 	 * input interface, we will misroute in case of asymmetric route.
682 	 */
683 	if (sk)
684 		arg.bound_dev_if = sk->sk_bound_dev_if;
685 
686 	arg.tos = ip_hdr(skb)->tos;
687 	ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
688 			      skb, &TCP_SKB_CB(skb)->header.h4.opt,
689 			      ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
690 			      &arg, arg.iov[0].iov_len);
691 
692 	TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
693 	TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
694 
695 #ifdef CONFIG_TCP_MD5SIG
696 release_sk1:
697 	if (sk1) {
698 		rcu_read_unlock();
699 		sock_put(sk1);
700 	}
701 #endif
702 }
703 
704 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
705    outside socket context is ugly, certainly. What can I do?
706  */
707 
tcp_v4_send_ack(struct net * net,struct sk_buff * skb,u32 seq,u32 ack,u32 win,u32 tsval,u32 tsecr,int oif,struct tcp_md5sig_key * key,int reply_flags,u8 tos)708 static void tcp_v4_send_ack(struct net *net,
709 			    struct sk_buff *skb, u32 seq, u32 ack,
710 			    u32 win, u32 tsval, u32 tsecr, int oif,
711 			    struct tcp_md5sig_key *key,
712 			    int reply_flags, u8 tos)
713 {
714 	const struct tcphdr *th = tcp_hdr(skb);
715 	struct {
716 		struct tcphdr th;
717 		__be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
718 #ifdef CONFIG_TCP_MD5SIG
719 			   + (TCPOLEN_MD5SIG_ALIGNED >> 2)
720 #endif
721 			];
722 	} rep;
723 	struct ip_reply_arg arg;
724 
725 	memset(&rep.th, 0, sizeof(struct tcphdr));
726 	memset(&arg, 0, sizeof(arg));
727 
728 	arg.iov[0].iov_base = (unsigned char *)&rep;
729 	arg.iov[0].iov_len  = sizeof(rep.th);
730 	if (tsecr) {
731 		rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
732 				   (TCPOPT_TIMESTAMP << 8) |
733 				   TCPOLEN_TIMESTAMP);
734 		rep.opt[1] = htonl(tsval);
735 		rep.opt[2] = htonl(tsecr);
736 		arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
737 	}
738 
739 	/* Swap the send and the receive. */
740 	rep.th.dest    = th->source;
741 	rep.th.source  = th->dest;
742 	rep.th.doff    = arg.iov[0].iov_len / 4;
743 	rep.th.seq     = htonl(seq);
744 	rep.th.ack_seq = htonl(ack);
745 	rep.th.ack     = 1;
746 	rep.th.window  = htons(win);
747 
748 #ifdef CONFIG_TCP_MD5SIG
749 	if (key) {
750 		int offset = (tsecr) ? 3 : 0;
751 
752 		rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
753 					  (TCPOPT_NOP << 16) |
754 					  (TCPOPT_MD5SIG << 8) |
755 					  TCPOLEN_MD5SIG);
756 		arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
757 		rep.th.doff = arg.iov[0].iov_len/4;
758 
759 		tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
760 				    key, ip_hdr(skb)->saddr,
761 				    ip_hdr(skb)->daddr, &rep.th);
762 	}
763 #endif
764 	arg.flags = reply_flags;
765 	arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
766 				      ip_hdr(skb)->saddr, /* XXX */
767 				      arg.iov[0].iov_len, IPPROTO_TCP, 0);
768 	arg.csumoffset = offsetof(struct tcphdr, check) / 2;
769 	if (oif)
770 		arg.bound_dev_if = oif;
771 	arg.tos = tos;
772 	ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
773 			      skb, &TCP_SKB_CB(skb)->header.h4.opt,
774 			      ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
775 			      &arg, arg.iov[0].iov_len);
776 
777 	TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
778 }
779 
tcp_v4_timewait_ack(struct sock * sk,struct sk_buff * skb)780 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
781 {
782 	struct inet_timewait_sock *tw = inet_twsk(sk);
783 	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
784 
785 	tcp_v4_send_ack(sock_net(sk), skb,
786 			tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
787 			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
788 			tcp_time_stamp + tcptw->tw_ts_offset,
789 			tcptw->tw_ts_recent,
790 			tw->tw_bound_dev_if,
791 			tcp_twsk_md5_key(tcptw),
792 			tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
793 			tw->tw_tos
794 			);
795 
796 	inet_twsk_put(tw);
797 }
798 
tcp_v4_reqsk_send_ack(struct sock * sk,struct sk_buff * skb,struct request_sock * req)799 static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
800 				  struct request_sock *req)
801 {
802 	/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
803 	 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
804 	 */
805 	u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
806 					     tcp_sk(sk)->snd_nxt;
807 
808 	tcp_v4_send_ack(sock_net(sk), skb, seq,
809 			tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
810 			tcp_time_stamp,
811 			req->ts_recent,
812 			0,
813 			tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
814 					  AF_INET),
815 			inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
816 			ip_hdr(skb)->tos);
817 }
818 
819 /*
820  *	Send a SYN-ACK after having received a SYN.
821  *	This still operates on a request_sock only, not on a big
822  *	socket.
823  */
tcp_v4_send_synack(struct sock * sk,struct dst_entry * dst,struct flowi * fl,struct request_sock * req,u16 queue_mapping,struct tcp_fastopen_cookie * foc)824 static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
825 			      struct flowi *fl,
826 			      struct request_sock *req,
827 			      u16 queue_mapping,
828 			      struct tcp_fastopen_cookie *foc)
829 {
830 	const struct inet_request_sock *ireq = inet_rsk(req);
831 	struct flowi4 fl4;
832 	int err = -1;
833 	struct sk_buff *skb;
834 
835 	/* First, grab a route. */
836 	if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
837 		return -1;
838 
839 	skb = tcp_make_synack(sk, dst, req, foc);
840 
841 	if (skb) {
842 		__tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
843 
844 		skb_set_queue_mapping(skb, queue_mapping);
845 		err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
846 					    ireq->ir_rmt_addr,
847 					    ireq->opt);
848 		err = net_xmit_eval(err);
849 	}
850 
851 	return err;
852 }
853 
854 /*
855  *	IPv4 request_sock destructor.
856  */
tcp_v4_reqsk_destructor(struct request_sock * req)857 static void tcp_v4_reqsk_destructor(struct request_sock *req)
858 {
859 	kfree(inet_rsk(req)->opt);
860 }
861 
862 
863 #ifdef CONFIG_TCP_MD5SIG
864 /*
865  * RFC2385 MD5 checksumming requires a mapping of
866  * IP address->MD5 Key.
867  * We need to maintain these in the sk structure.
868  */
869 
870 /* Find the Key structure for an address.  */
tcp_md5_do_lookup(struct sock * sk,const union tcp_md5_addr * addr,int family)871 struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
872 					 const union tcp_md5_addr *addr,
873 					 int family)
874 {
875 	const struct tcp_sock *tp = tcp_sk(sk);
876 	struct tcp_md5sig_key *key;
877 	unsigned int size = sizeof(struct in_addr);
878 	const struct tcp_md5sig_info *md5sig;
879 
880 	/* caller either holds rcu_read_lock() or socket lock */
881 	md5sig = rcu_dereference_check(tp->md5sig_info,
882 				       sock_owned_by_user(sk) ||
883 				       lockdep_is_held(&sk->sk_lock.slock));
884 	if (!md5sig)
885 		return NULL;
886 #if IS_ENABLED(CONFIG_IPV6)
887 	if (family == AF_INET6)
888 		size = sizeof(struct in6_addr);
889 #endif
890 	hlist_for_each_entry_rcu(key, &md5sig->head, node) {
891 		if (key->family != family)
892 			continue;
893 		if (!memcmp(&key->addr, addr, size))
894 			return key;
895 	}
896 	return NULL;
897 }
898 EXPORT_SYMBOL(tcp_md5_do_lookup);
899 
tcp_v4_md5_lookup(struct sock * sk,const struct sock * addr_sk)900 struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
901 					 const struct sock *addr_sk)
902 {
903 	const union tcp_md5_addr *addr;
904 
905 	addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr;
906 	return tcp_md5_do_lookup(sk, addr, AF_INET);
907 }
908 EXPORT_SYMBOL(tcp_v4_md5_lookup);
909 
910 /* This can be called on a newly created socket, from other files */
tcp_md5_do_add(struct sock * sk,const union tcp_md5_addr * addr,int family,const u8 * newkey,u8 newkeylen,gfp_t gfp)911 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
912 		   int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
913 {
914 	/* Add Key to the list */
915 	struct tcp_md5sig_key *key;
916 	struct tcp_sock *tp = tcp_sk(sk);
917 	struct tcp_md5sig_info *md5sig;
918 
919 	key = tcp_md5_do_lookup(sk, addr, family);
920 	if (key) {
921 		/* Pre-existing entry - just update that one. */
922 		memcpy(key->key, newkey, newkeylen);
923 		key->keylen = newkeylen;
924 		return 0;
925 	}
926 
927 	md5sig = rcu_dereference_protected(tp->md5sig_info,
928 					   sock_owned_by_user(sk) ||
929 					   lockdep_is_held(&sk->sk_lock.slock));
930 	if (!md5sig) {
931 		md5sig = kmalloc(sizeof(*md5sig), gfp);
932 		if (!md5sig)
933 			return -ENOMEM;
934 
935 		sk_nocaps_add(sk, NETIF_F_GSO_MASK);
936 		INIT_HLIST_HEAD(&md5sig->head);
937 		rcu_assign_pointer(tp->md5sig_info, md5sig);
938 	}
939 
940 	key = sock_kmalloc(sk, sizeof(*key), gfp);
941 	if (!key)
942 		return -ENOMEM;
943 	if (!tcp_alloc_md5sig_pool()) {
944 		sock_kfree_s(sk, key, sizeof(*key));
945 		return -ENOMEM;
946 	}
947 
948 	memcpy(key->key, newkey, newkeylen);
949 	key->keylen = newkeylen;
950 	key->family = family;
951 	memcpy(&key->addr, addr,
952 	       (family == AF_INET6) ? sizeof(struct in6_addr) :
953 				      sizeof(struct in_addr));
954 	hlist_add_head_rcu(&key->node, &md5sig->head);
955 	return 0;
956 }
957 EXPORT_SYMBOL(tcp_md5_do_add);
958 
tcp_md5_do_del(struct sock * sk,const union tcp_md5_addr * addr,int family)959 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
960 {
961 	struct tcp_md5sig_key *key;
962 
963 	key = tcp_md5_do_lookup(sk, addr, family);
964 	if (!key)
965 		return -ENOENT;
966 	hlist_del_rcu(&key->node);
967 	atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
968 	kfree_rcu(key, rcu);
969 	return 0;
970 }
971 EXPORT_SYMBOL(tcp_md5_do_del);
972 
tcp_clear_md5_list(struct sock * sk)973 static void tcp_clear_md5_list(struct sock *sk)
974 {
975 	struct tcp_sock *tp = tcp_sk(sk);
976 	struct tcp_md5sig_key *key;
977 	struct hlist_node *n;
978 	struct tcp_md5sig_info *md5sig;
979 
980 	md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
981 
982 	hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
983 		hlist_del_rcu(&key->node);
984 		atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
985 		kfree_rcu(key, rcu);
986 	}
987 }
988 
tcp_v4_parse_md5_keys(struct sock * sk,char __user * optval,int optlen)989 static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
990 				 int optlen)
991 {
992 	struct tcp_md5sig cmd;
993 	struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
994 
995 	if (optlen < sizeof(cmd))
996 		return -EINVAL;
997 
998 	if (copy_from_user(&cmd, optval, sizeof(cmd)))
999 		return -EFAULT;
1000 
1001 	if (sin->sin_family != AF_INET)
1002 		return -EINVAL;
1003 
1004 	if (!cmd.tcpm_keylen)
1005 		return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1006 				      AF_INET);
1007 
1008 	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1009 		return -EINVAL;
1010 
1011 	return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1012 			      AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1013 			      GFP_KERNEL);
1014 }
1015 
tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool * hp,__be32 daddr,__be32 saddr,int nbytes)1016 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1017 					__be32 daddr, __be32 saddr, int nbytes)
1018 {
1019 	struct tcp4_pseudohdr *bp;
1020 	struct scatterlist sg;
1021 
1022 	bp = &hp->md5_blk.ip4;
1023 
1024 	/*
1025 	 * 1. the TCP pseudo-header (in the order: source IP address,
1026 	 * destination IP address, zero-padded protocol number, and
1027 	 * segment length)
1028 	 */
1029 	bp->saddr = saddr;
1030 	bp->daddr = daddr;
1031 	bp->pad = 0;
1032 	bp->protocol = IPPROTO_TCP;
1033 	bp->len = cpu_to_be16(nbytes);
1034 
1035 	sg_init_one(&sg, bp, sizeof(*bp));
1036 	return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1037 }
1038 
tcp_v4_md5_hash_hdr(char * md5_hash,const struct tcp_md5sig_key * key,__be32 daddr,__be32 saddr,const struct tcphdr * th)1039 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1040 			       __be32 daddr, __be32 saddr, const struct tcphdr *th)
1041 {
1042 	struct tcp_md5sig_pool *hp;
1043 	struct hash_desc *desc;
1044 
1045 	hp = tcp_get_md5sig_pool();
1046 	if (!hp)
1047 		goto clear_hash_noput;
1048 	desc = &hp->md5_desc;
1049 
1050 	if (crypto_hash_init(desc))
1051 		goto clear_hash;
1052 	if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1053 		goto clear_hash;
1054 	if (tcp_md5_hash_header(hp, th))
1055 		goto clear_hash;
1056 	if (tcp_md5_hash_key(hp, key))
1057 		goto clear_hash;
1058 	if (crypto_hash_final(desc, md5_hash))
1059 		goto clear_hash;
1060 
1061 	tcp_put_md5sig_pool();
1062 	return 0;
1063 
1064 clear_hash:
1065 	tcp_put_md5sig_pool();
1066 clear_hash_noput:
1067 	memset(md5_hash, 0, 16);
1068 	return 1;
1069 }
1070 
tcp_v4_md5_hash_skb(char * md5_hash,const struct tcp_md5sig_key * key,const struct sock * sk,const struct sk_buff * skb)1071 int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1072 			const struct sock *sk,
1073 			const struct sk_buff *skb)
1074 {
1075 	struct tcp_md5sig_pool *hp;
1076 	struct hash_desc *desc;
1077 	const struct tcphdr *th = tcp_hdr(skb);
1078 	__be32 saddr, daddr;
1079 
1080 	if (sk) { /* valid for establish/request sockets */
1081 		saddr = sk->sk_rcv_saddr;
1082 		daddr = sk->sk_daddr;
1083 	} else {
1084 		const struct iphdr *iph = ip_hdr(skb);
1085 		saddr = iph->saddr;
1086 		daddr = iph->daddr;
1087 	}
1088 
1089 	hp = tcp_get_md5sig_pool();
1090 	if (!hp)
1091 		goto clear_hash_noput;
1092 	desc = &hp->md5_desc;
1093 
1094 	if (crypto_hash_init(desc))
1095 		goto clear_hash;
1096 
1097 	if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1098 		goto clear_hash;
1099 	if (tcp_md5_hash_header(hp, th))
1100 		goto clear_hash;
1101 	if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1102 		goto clear_hash;
1103 	if (tcp_md5_hash_key(hp, key))
1104 		goto clear_hash;
1105 	if (crypto_hash_final(desc, md5_hash))
1106 		goto clear_hash;
1107 
1108 	tcp_put_md5sig_pool();
1109 	return 0;
1110 
1111 clear_hash:
1112 	tcp_put_md5sig_pool();
1113 clear_hash_noput:
1114 	memset(md5_hash, 0, 16);
1115 	return 1;
1116 }
1117 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1118 
1119 /* Called with rcu_read_lock() */
tcp_v4_inbound_md5_hash(struct sock * sk,const struct sk_buff * skb)1120 static bool tcp_v4_inbound_md5_hash(struct sock *sk,
1121 				    const struct sk_buff *skb)
1122 {
1123 	/*
1124 	 * This gets called for each TCP segment that arrives
1125 	 * so we want to be efficient.
1126 	 * We have 3 drop cases:
1127 	 * o No MD5 hash and one expected.
1128 	 * o MD5 hash and we're not expecting one.
1129 	 * o MD5 hash and its wrong.
1130 	 */
1131 	const __u8 *hash_location = NULL;
1132 	struct tcp_md5sig_key *hash_expected;
1133 	const struct iphdr *iph = ip_hdr(skb);
1134 	const struct tcphdr *th = tcp_hdr(skb);
1135 	int genhash;
1136 	unsigned char newhash[16];
1137 
1138 	hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1139 					  AF_INET);
1140 	hash_location = tcp_parse_md5sig_option(th);
1141 
1142 	/* We've parsed the options - do we have a hash? */
1143 	if (!hash_expected && !hash_location)
1144 		return false;
1145 
1146 	if (hash_expected && !hash_location) {
1147 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1148 		return true;
1149 	}
1150 
1151 	if (!hash_expected && hash_location) {
1152 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1153 		return true;
1154 	}
1155 
1156 	/* Okay, so this is hash_expected and hash_location -
1157 	 * so we need to calculate the checksum.
1158 	 */
1159 	genhash = tcp_v4_md5_hash_skb(newhash,
1160 				      hash_expected,
1161 				      NULL, skb);
1162 
1163 	if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1164 		net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1165 				     &iph->saddr, ntohs(th->source),
1166 				     &iph->daddr, ntohs(th->dest),
1167 				     genhash ? " tcp_v4_calc_md5_hash failed"
1168 				     : "");
1169 		return true;
1170 	}
1171 	return false;
1172 }
1173 #endif
1174 
tcp_v4_init_req(struct request_sock * req,struct sock * sk_listener,struct sk_buff * skb)1175 static void tcp_v4_init_req(struct request_sock *req, struct sock *sk_listener,
1176 			    struct sk_buff *skb)
1177 {
1178 	struct inet_request_sock *ireq = inet_rsk(req);
1179 
1180 	sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
1181 	sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
1182 	ireq->no_srccheck = inet_sk(sk_listener)->transparent;
1183 	ireq->opt = tcp_v4_save_options(skb);
1184 }
1185 
tcp_v4_route_req(struct sock * sk,struct flowi * fl,const struct request_sock * req,bool * strict)1186 static struct dst_entry *tcp_v4_route_req(struct sock *sk, struct flowi *fl,
1187 					  const struct request_sock *req,
1188 					  bool *strict)
1189 {
1190 	struct dst_entry *dst = inet_csk_route_req(sk, &fl->u.ip4, req);
1191 
1192 	if (strict) {
1193 		if (fl->u.ip4.daddr == inet_rsk(req)->ir_rmt_addr)
1194 			*strict = true;
1195 		else
1196 			*strict = false;
1197 	}
1198 
1199 	return dst;
1200 }
1201 
1202 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1203 	.family		=	PF_INET,
1204 	.obj_size	=	sizeof(struct tcp_request_sock),
1205 	.rtx_syn_ack	=	tcp_rtx_synack,
1206 	.send_ack	=	tcp_v4_reqsk_send_ack,
1207 	.destructor	=	tcp_v4_reqsk_destructor,
1208 	.send_reset	=	tcp_v4_send_reset,
1209 	.syn_ack_timeout =	tcp_syn_ack_timeout,
1210 };
1211 
1212 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1213 	.mss_clamp	=	TCP_MSS_DEFAULT,
1214 #ifdef CONFIG_TCP_MD5SIG
1215 	.req_md5_lookup	=	tcp_v4_md5_lookup,
1216 	.calc_md5_hash	=	tcp_v4_md5_hash_skb,
1217 #endif
1218 	.init_req	=	tcp_v4_init_req,
1219 #ifdef CONFIG_SYN_COOKIES
1220 	.cookie_init_seq =	cookie_v4_init_sequence,
1221 #endif
1222 	.route_req	=	tcp_v4_route_req,
1223 	.init_seq	=	tcp_v4_init_sequence,
1224 	.send_synack	=	tcp_v4_send_synack,
1225 	.queue_hash_add =	inet_csk_reqsk_queue_hash_add,
1226 };
1227 
tcp_v4_conn_request(struct sock * sk,struct sk_buff * skb)1228 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1229 {
1230 	/* Never answer to SYNs send to broadcast or multicast */
1231 	if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1232 		goto drop;
1233 
1234 	return tcp_conn_request(&tcp_request_sock_ops,
1235 				&tcp_request_sock_ipv4_ops, sk, skb);
1236 
1237 drop:
1238 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1239 	return 0;
1240 }
1241 EXPORT_SYMBOL(tcp_v4_conn_request);
1242 
1243 
1244 /*
1245  * The three way handshake has completed - we got a valid synack -
1246  * now create the new socket.
1247  */
tcp_v4_syn_recv_sock(struct sock * sk,struct sk_buff * skb,struct request_sock * req,struct dst_entry * dst)1248 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1249 				  struct request_sock *req,
1250 				  struct dst_entry *dst)
1251 {
1252 	struct inet_request_sock *ireq;
1253 	struct inet_sock *newinet;
1254 	struct tcp_sock *newtp;
1255 	struct sock *newsk;
1256 #ifdef CONFIG_TCP_MD5SIG
1257 	struct tcp_md5sig_key *key;
1258 #endif
1259 	struct ip_options_rcu *inet_opt;
1260 
1261 	if (sk_acceptq_is_full(sk))
1262 		goto exit_overflow;
1263 
1264 	newsk = tcp_create_openreq_child(sk, req, skb);
1265 	if (!newsk)
1266 		goto exit_nonewsk;
1267 
1268 	newsk->sk_gso_type = SKB_GSO_TCPV4;
1269 	inet_sk_rx_dst_set(newsk, skb);
1270 
1271 	newtp		      = tcp_sk(newsk);
1272 	newinet		      = inet_sk(newsk);
1273 	ireq		      = inet_rsk(req);
1274 	sk_daddr_set(newsk, ireq->ir_rmt_addr);
1275 	sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
1276 	newinet->inet_saddr	      = ireq->ir_loc_addr;
1277 	inet_opt	      = ireq->opt;
1278 	rcu_assign_pointer(newinet->inet_opt, inet_opt);
1279 	ireq->opt	      = NULL;
1280 	newinet->mc_index     = inet_iif(skb);
1281 	newinet->mc_ttl	      = ip_hdr(skb)->ttl;
1282 	newinet->rcv_tos      = ip_hdr(skb)->tos;
1283 	inet_csk(newsk)->icsk_ext_hdr_len = 0;
1284 	inet_set_txhash(newsk);
1285 	if (inet_opt)
1286 		inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1287 	newinet->inet_id = newtp->write_seq ^ jiffies;
1288 
1289 	if (!dst) {
1290 		dst = inet_csk_route_child_sock(sk, newsk, req);
1291 		if (!dst)
1292 			goto put_and_exit;
1293 	} else {
1294 		/* syncookie case : see end of cookie_v4_check() */
1295 	}
1296 	sk_setup_caps(newsk, dst);
1297 
1298 	tcp_ca_openreq_child(newsk, dst);
1299 
1300 	tcp_sync_mss(newsk, dst_mtu(dst));
1301 	newtp->advmss = dst_metric_advmss(dst);
1302 	if (tcp_sk(sk)->rx_opt.user_mss &&
1303 	    tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1304 		newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1305 
1306 	tcp_initialize_rcv_mss(newsk);
1307 
1308 #ifdef CONFIG_TCP_MD5SIG
1309 	/* Copy over the MD5 key from the original socket */
1310 	key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1311 				AF_INET);
1312 	if (key) {
1313 		/*
1314 		 * We're using one, so create a matching key
1315 		 * on the newsk structure. If we fail to get
1316 		 * memory, then we end up not copying the key
1317 		 * across. Shucks.
1318 		 */
1319 		tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1320 			       AF_INET, key->key, key->keylen, GFP_ATOMIC);
1321 		sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1322 	}
1323 #endif
1324 
1325 	if (__inet_inherit_port(sk, newsk) < 0)
1326 		goto put_and_exit;
1327 	__inet_hash_nolisten(newsk, NULL);
1328 
1329 	return newsk;
1330 
1331 exit_overflow:
1332 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1333 exit_nonewsk:
1334 	dst_release(dst);
1335 exit:
1336 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1337 	return NULL;
1338 put_and_exit:
1339 	inet_csk_prepare_forced_close(newsk);
1340 	tcp_done(newsk);
1341 	goto exit;
1342 }
1343 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1344 
tcp_v4_hnd_req(struct sock * sk,struct sk_buff * skb)1345 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1346 {
1347 	const struct tcphdr *th = tcp_hdr(skb);
1348 	const struct iphdr *iph = ip_hdr(skb);
1349 	struct request_sock *req;
1350 	struct sock *nsk;
1351 
1352 	req = inet_csk_search_req(sk, th->source, iph->saddr, iph->daddr);
1353 	if (req) {
1354 		nsk = tcp_check_req(sk, skb, req, false);
1355 		if (!nsk || nsk == sk)
1356 			reqsk_put(req);
1357 		return nsk;
1358 	}
1359 
1360 	nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
1361 			th->source, iph->daddr, th->dest, inet_iif(skb));
1362 
1363 	if (nsk) {
1364 		if (nsk->sk_state != TCP_TIME_WAIT) {
1365 			bh_lock_sock(nsk);
1366 			return nsk;
1367 		}
1368 		inet_twsk_put(inet_twsk(nsk));
1369 		return NULL;
1370 	}
1371 
1372 #ifdef CONFIG_SYN_COOKIES
1373 	if (!th->syn)
1374 		sk = cookie_v4_check(sk, skb);
1375 #endif
1376 	return sk;
1377 }
1378 
1379 /* The socket must have it's spinlock held when we get
1380  * here.
1381  *
1382  * We have a potential double-lock case here, so even when
1383  * doing backlog processing we use the BH locking scheme.
1384  * This is because we cannot sleep with the original spinlock
1385  * held.
1386  */
tcp_v4_do_rcv(struct sock * sk,struct sk_buff * skb)1387 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1388 {
1389 	struct sock *rsk;
1390 
1391 	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1392 		struct dst_entry *dst = sk->sk_rx_dst;
1393 
1394 		sock_rps_save_rxhash(sk, skb);
1395 		sk_mark_napi_id(sk, skb);
1396 		if (dst) {
1397 			if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1398 			    !dst->ops->check(dst, 0)) {
1399 				dst_release(dst);
1400 				sk->sk_rx_dst = NULL;
1401 			}
1402 		}
1403 		tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1404 		return 0;
1405 	}
1406 
1407 	if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1408 		goto csum_err;
1409 
1410 	if (sk->sk_state == TCP_LISTEN) {
1411 		struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1412 		if (!nsk)
1413 			goto discard;
1414 
1415 		if (nsk != sk) {
1416 			sock_rps_save_rxhash(nsk, skb);
1417 			sk_mark_napi_id(sk, skb);
1418 			if (tcp_child_process(sk, nsk, skb)) {
1419 				rsk = nsk;
1420 				goto reset;
1421 			}
1422 			return 0;
1423 		}
1424 	} else
1425 		sock_rps_save_rxhash(sk, skb);
1426 
1427 	if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
1428 		rsk = sk;
1429 		goto reset;
1430 	}
1431 	return 0;
1432 
1433 reset:
1434 	tcp_v4_send_reset(rsk, skb);
1435 discard:
1436 	kfree_skb(skb);
1437 	/* Be careful here. If this function gets more complicated and
1438 	 * gcc suffers from register pressure on the x86, sk (in %ebx)
1439 	 * might be destroyed here. This current version compiles correctly,
1440 	 * but you have been warned.
1441 	 */
1442 	return 0;
1443 
1444 csum_err:
1445 	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1446 	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1447 	goto discard;
1448 }
1449 EXPORT_SYMBOL(tcp_v4_do_rcv);
1450 
tcp_v4_early_demux(struct sk_buff * skb)1451 void tcp_v4_early_demux(struct sk_buff *skb)
1452 {
1453 	const struct iphdr *iph;
1454 	const struct tcphdr *th;
1455 	struct sock *sk;
1456 
1457 	if (skb->pkt_type != PACKET_HOST)
1458 		return;
1459 
1460 	if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1461 		return;
1462 
1463 	iph = ip_hdr(skb);
1464 	th = tcp_hdr(skb);
1465 
1466 	if (th->doff < sizeof(struct tcphdr) / 4)
1467 		return;
1468 
1469 	sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1470 				       iph->saddr, th->source,
1471 				       iph->daddr, ntohs(th->dest),
1472 				       skb->skb_iif);
1473 	if (sk) {
1474 		skb->sk = sk;
1475 		skb->destructor = sock_edemux;
1476 		if (sk_fullsock(sk)) {
1477 			struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1478 
1479 			if (dst)
1480 				dst = dst_check(dst, 0);
1481 			if (dst &&
1482 			    inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1483 				skb_dst_set_noref(skb, dst);
1484 		}
1485 	}
1486 }
1487 
1488 /* Packet is added to VJ-style prequeue for processing in process
1489  * context, if a reader task is waiting. Apparently, this exciting
1490  * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1491  * failed somewhere. Latency? Burstiness? Well, at least now we will
1492  * see, why it failed. 8)8)				  --ANK
1493  *
1494  */
tcp_prequeue(struct sock * sk,struct sk_buff * skb)1495 bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1496 {
1497 	struct tcp_sock *tp = tcp_sk(sk);
1498 
1499 	if (sysctl_tcp_low_latency || !tp->ucopy.task)
1500 		return false;
1501 
1502 	if (skb->len <= tcp_hdrlen(skb) &&
1503 	    skb_queue_len(&tp->ucopy.prequeue) == 0)
1504 		return false;
1505 
1506 	/* Before escaping RCU protected region, we need to take care of skb
1507 	 * dst. Prequeue is only enabled for established sockets.
1508 	 * For such sockets, we might need the skb dst only to set sk->sk_rx_dst
1509 	 * Instead of doing full sk_rx_dst validity here, let's perform
1510 	 * an optimistic check.
1511 	 */
1512 	if (likely(sk->sk_rx_dst))
1513 		skb_dst_drop(skb);
1514 	else
1515 		skb_dst_force_safe(skb);
1516 
1517 	__skb_queue_tail(&tp->ucopy.prequeue, skb);
1518 	tp->ucopy.memory += skb->truesize;
1519 	if (tp->ucopy.memory > sk->sk_rcvbuf) {
1520 		struct sk_buff *skb1;
1521 
1522 		BUG_ON(sock_owned_by_user(sk));
1523 
1524 		while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
1525 			sk_backlog_rcv(sk, skb1);
1526 			NET_INC_STATS_BH(sock_net(sk),
1527 					 LINUX_MIB_TCPPREQUEUEDROPPED);
1528 		}
1529 
1530 		tp->ucopy.memory = 0;
1531 	} else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
1532 		wake_up_interruptible_sync_poll(sk_sleep(sk),
1533 					   POLLIN | POLLRDNORM | POLLRDBAND);
1534 		if (!inet_csk_ack_scheduled(sk))
1535 			inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
1536 						  (3 * tcp_rto_min(sk)) / 4,
1537 						  TCP_RTO_MAX);
1538 	}
1539 	return true;
1540 }
1541 EXPORT_SYMBOL(tcp_prequeue);
1542 
1543 /*
1544  *	From tcp_input.c
1545  */
1546 
tcp_v4_rcv(struct sk_buff * skb)1547 int tcp_v4_rcv(struct sk_buff *skb)
1548 {
1549 	const struct iphdr *iph;
1550 	const struct tcphdr *th;
1551 	struct sock *sk;
1552 	int ret;
1553 	struct net *net = dev_net(skb->dev);
1554 
1555 	if (skb->pkt_type != PACKET_HOST)
1556 		goto discard_it;
1557 
1558 	/* Count it even if it's bad */
1559 	TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1560 
1561 	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1562 		goto discard_it;
1563 
1564 	th = tcp_hdr(skb);
1565 
1566 	if (th->doff < sizeof(struct tcphdr) / 4)
1567 		goto bad_packet;
1568 	if (!pskb_may_pull(skb, th->doff * 4))
1569 		goto discard_it;
1570 
1571 	/* An explanation is required here, I think.
1572 	 * Packet length and doff are validated by header prediction,
1573 	 * provided case of th->doff==0 is eliminated.
1574 	 * So, we defer the checks. */
1575 
1576 	if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
1577 		goto csum_error;
1578 
1579 	th = tcp_hdr(skb);
1580 	iph = ip_hdr(skb);
1581 	/* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1582 	 * barrier() makes sure compiler wont play fool^Waliasing games.
1583 	 */
1584 	memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
1585 		sizeof(struct inet_skb_parm));
1586 	barrier();
1587 
1588 	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1589 	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1590 				    skb->len - th->doff * 4);
1591 	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1592 	TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1593 	TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1594 	TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1595 	TCP_SKB_CB(skb)->sacked	 = 0;
1596 
1597 	sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1598 	if (!sk)
1599 		goto no_tcp_socket;
1600 
1601 process:
1602 	if (sk->sk_state == TCP_TIME_WAIT)
1603 		goto do_time_wait;
1604 
1605 	if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1606 		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1607 		goto discard_and_relse;
1608 	}
1609 
1610 	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1611 		goto discard_and_relse;
1612 
1613 #ifdef CONFIG_TCP_MD5SIG
1614 	/*
1615 	 * We really want to reject the packet as early as possible
1616 	 * if:
1617 	 *  o We're expecting an MD5'd packet and this is no MD5 tcp option
1618 	 *  o There is an MD5 option and we're not expecting one
1619 	 */
1620 	if (tcp_v4_inbound_md5_hash(sk, skb))
1621 		goto discard_and_relse;
1622 #endif
1623 
1624 	nf_reset(skb);
1625 
1626 	if (sk_filter(sk, skb))
1627 		goto discard_and_relse;
1628 
1629 	sk_incoming_cpu_update(sk);
1630 	skb->dev = NULL;
1631 
1632 	bh_lock_sock_nested(sk);
1633 	ret = 0;
1634 	if (!sock_owned_by_user(sk)) {
1635 		if (!tcp_prequeue(sk, skb))
1636 			ret = tcp_v4_do_rcv(sk, skb);
1637 	} else if (unlikely(sk_add_backlog(sk, skb,
1638 					   sk->sk_rcvbuf + sk->sk_sndbuf))) {
1639 		bh_unlock_sock(sk);
1640 		NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1641 		goto discard_and_relse;
1642 	}
1643 	bh_unlock_sock(sk);
1644 
1645 	sock_put(sk);
1646 
1647 	return ret;
1648 
1649 no_tcp_socket:
1650 	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1651 		goto discard_it;
1652 
1653 	if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1654 csum_error:
1655 		TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1656 bad_packet:
1657 		TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1658 	} else {
1659 		tcp_v4_send_reset(NULL, skb);
1660 	}
1661 
1662 discard_it:
1663 	/* Discard frame. */
1664 	kfree_skb(skb);
1665 	return 0;
1666 
1667 discard_and_relse:
1668 	sock_put(sk);
1669 	goto discard_it;
1670 
1671 do_time_wait:
1672 	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1673 		inet_twsk_put(inet_twsk(sk));
1674 		goto discard_it;
1675 	}
1676 
1677 	if (skb->len < (th->doff << 2)) {
1678 		inet_twsk_put(inet_twsk(sk));
1679 		goto bad_packet;
1680 	}
1681 	if (tcp_checksum_complete(skb)) {
1682 		inet_twsk_put(inet_twsk(sk));
1683 		goto csum_error;
1684 	}
1685 	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1686 	case TCP_TW_SYN: {
1687 		struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1688 							&tcp_hashinfo,
1689 							iph->saddr, th->source,
1690 							iph->daddr, th->dest,
1691 							inet_iif(skb));
1692 		if (sk2) {
1693 			inet_twsk_deschedule(inet_twsk(sk));
1694 			inet_twsk_put(inet_twsk(sk));
1695 			sk = sk2;
1696 			goto process;
1697 		}
1698 		/* Fall through to ACK */
1699 	}
1700 	case TCP_TW_ACK:
1701 		tcp_v4_timewait_ack(sk, skb);
1702 		break;
1703 	case TCP_TW_RST:
1704 		goto no_tcp_socket;
1705 	case TCP_TW_SUCCESS:;
1706 	}
1707 	goto discard_it;
1708 }
1709 
1710 static struct timewait_sock_ops tcp_timewait_sock_ops = {
1711 	.twsk_obj_size	= sizeof(struct tcp_timewait_sock),
1712 	.twsk_unique	= tcp_twsk_unique,
1713 	.twsk_destructor= tcp_twsk_destructor,
1714 };
1715 
inet_sk_rx_dst_set(struct sock * sk,const struct sk_buff * skb)1716 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
1717 {
1718 	struct dst_entry *dst = skb_dst(skb);
1719 
1720 	if (dst && dst_hold_safe(dst)) {
1721 		sk->sk_rx_dst = dst;
1722 		inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
1723 	}
1724 }
1725 EXPORT_SYMBOL(inet_sk_rx_dst_set);
1726 
1727 const struct inet_connection_sock_af_ops ipv4_specific = {
1728 	.queue_xmit	   = ip_queue_xmit,
1729 	.send_check	   = tcp_v4_send_check,
1730 	.rebuild_header	   = inet_sk_rebuild_header,
1731 	.sk_rx_dst_set	   = inet_sk_rx_dst_set,
1732 	.conn_request	   = tcp_v4_conn_request,
1733 	.syn_recv_sock	   = tcp_v4_syn_recv_sock,
1734 	.net_header_len	   = sizeof(struct iphdr),
1735 	.setsockopt	   = ip_setsockopt,
1736 	.getsockopt	   = ip_getsockopt,
1737 	.addr2sockaddr	   = inet_csk_addr2sockaddr,
1738 	.sockaddr_len	   = sizeof(struct sockaddr_in),
1739 	.bind_conflict	   = inet_csk_bind_conflict,
1740 #ifdef CONFIG_COMPAT
1741 	.compat_setsockopt = compat_ip_setsockopt,
1742 	.compat_getsockopt = compat_ip_getsockopt,
1743 #endif
1744 	.mtu_reduced	   = tcp_v4_mtu_reduced,
1745 };
1746 EXPORT_SYMBOL(ipv4_specific);
1747 
1748 #ifdef CONFIG_TCP_MD5SIG
1749 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1750 	.md5_lookup		= tcp_v4_md5_lookup,
1751 	.calc_md5_hash		= tcp_v4_md5_hash_skb,
1752 	.md5_parse		= tcp_v4_parse_md5_keys,
1753 };
1754 #endif
1755 
1756 /* NOTE: A lot of things set to zero explicitly by call to
1757  *       sk_alloc() so need not be done here.
1758  */
tcp_v4_init_sock(struct sock * sk)1759 static int tcp_v4_init_sock(struct sock *sk)
1760 {
1761 	struct inet_connection_sock *icsk = inet_csk(sk);
1762 
1763 	tcp_init_sock(sk);
1764 
1765 	icsk->icsk_af_ops = &ipv4_specific;
1766 
1767 #ifdef CONFIG_TCP_MD5SIG
1768 	tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
1769 #endif
1770 
1771 	return 0;
1772 }
1773 
tcp_v4_destroy_sock(struct sock * sk)1774 void tcp_v4_destroy_sock(struct sock *sk)
1775 {
1776 	struct tcp_sock *tp = tcp_sk(sk);
1777 
1778 	tcp_clear_xmit_timers(sk);
1779 
1780 	tcp_cleanup_congestion_control(sk);
1781 
1782 	/* Cleanup up the write buffer. */
1783 	tcp_write_queue_purge(sk);
1784 
1785 	/* Cleans up our, hopefully empty, out_of_order_queue. */
1786 	__skb_queue_purge(&tp->out_of_order_queue);
1787 
1788 #ifdef CONFIG_TCP_MD5SIG
1789 	/* Clean up the MD5 key list, if any */
1790 	if (tp->md5sig_info) {
1791 		tcp_clear_md5_list(sk);
1792 		kfree_rcu(tp->md5sig_info, rcu);
1793 		tp->md5sig_info = NULL;
1794 	}
1795 #endif
1796 
1797 	/* Clean prequeue, it must be empty really */
1798 	__skb_queue_purge(&tp->ucopy.prequeue);
1799 
1800 	/* Clean up a referenced TCP bind bucket. */
1801 	if (inet_csk(sk)->icsk_bind_hash)
1802 		inet_put_port(sk);
1803 
1804 	BUG_ON(tp->fastopen_rsk);
1805 
1806 	/* If socket is aborted during connect operation */
1807 	tcp_free_fastopen_req(tp);
1808 
1809 	sk_sockets_allocated_dec(sk);
1810 	sock_release_memcg(sk);
1811 }
1812 EXPORT_SYMBOL(tcp_v4_destroy_sock);
1813 
1814 #ifdef CONFIG_PROC_FS
1815 /* Proc filesystem TCP sock list dumping. */
1816 
1817 /*
1818  * Get next listener socket follow cur.  If cur is NULL, get first socket
1819  * starting from bucket given in st->bucket; when st->bucket is zero the
1820  * very first socket in the hash table is returned.
1821  */
listening_get_next(struct seq_file * seq,void * cur)1822 static void *listening_get_next(struct seq_file *seq, void *cur)
1823 {
1824 	struct inet_connection_sock *icsk;
1825 	struct hlist_nulls_node *node;
1826 	struct sock *sk = cur;
1827 	struct inet_listen_hashbucket *ilb;
1828 	struct tcp_iter_state *st = seq->private;
1829 	struct net *net = seq_file_net(seq);
1830 
1831 	if (!sk) {
1832 		ilb = &tcp_hashinfo.listening_hash[st->bucket];
1833 		spin_lock_bh(&ilb->lock);
1834 		sk = sk_nulls_head(&ilb->head);
1835 		st->offset = 0;
1836 		goto get_sk;
1837 	}
1838 	ilb = &tcp_hashinfo.listening_hash[st->bucket];
1839 	++st->num;
1840 	++st->offset;
1841 
1842 	if (st->state == TCP_SEQ_STATE_OPENREQ) {
1843 		struct request_sock *req = cur;
1844 
1845 		icsk = inet_csk(st->syn_wait_sk);
1846 		req = req->dl_next;
1847 		while (1) {
1848 			while (req) {
1849 				if (req->rsk_ops->family == st->family) {
1850 					cur = req;
1851 					goto out;
1852 				}
1853 				req = req->dl_next;
1854 			}
1855 			if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
1856 				break;
1857 get_req:
1858 			req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
1859 		}
1860 		sk	  = sk_nulls_next(st->syn_wait_sk);
1861 		st->state = TCP_SEQ_STATE_LISTENING;
1862 		spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1863 	} else {
1864 		icsk = inet_csk(sk);
1865 		spin_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1866 		if (reqsk_queue_len(&icsk->icsk_accept_queue))
1867 			goto start_req;
1868 		spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1869 		sk = sk_nulls_next(sk);
1870 	}
1871 get_sk:
1872 	sk_nulls_for_each_from(sk, node) {
1873 		if (!net_eq(sock_net(sk), net))
1874 			continue;
1875 		if (sk->sk_family == st->family) {
1876 			cur = sk;
1877 			goto out;
1878 		}
1879 		icsk = inet_csk(sk);
1880 		spin_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1881 		if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
1882 start_req:
1883 			st->uid		= sock_i_uid(sk);
1884 			st->syn_wait_sk = sk;
1885 			st->state	= TCP_SEQ_STATE_OPENREQ;
1886 			st->sbucket	= 0;
1887 			goto get_req;
1888 		}
1889 		spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1890 	}
1891 	spin_unlock_bh(&ilb->lock);
1892 	st->offset = 0;
1893 	if (++st->bucket < INET_LHTABLE_SIZE) {
1894 		ilb = &tcp_hashinfo.listening_hash[st->bucket];
1895 		spin_lock_bh(&ilb->lock);
1896 		sk = sk_nulls_head(&ilb->head);
1897 		goto get_sk;
1898 	}
1899 	cur = NULL;
1900 out:
1901 	return cur;
1902 }
1903 
listening_get_idx(struct seq_file * seq,loff_t * pos)1904 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
1905 {
1906 	struct tcp_iter_state *st = seq->private;
1907 	void *rc;
1908 
1909 	st->bucket = 0;
1910 	st->offset = 0;
1911 	rc = listening_get_next(seq, NULL);
1912 
1913 	while (rc && *pos) {
1914 		rc = listening_get_next(seq, rc);
1915 		--*pos;
1916 	}
1917 	return rc;
1918 }
1919 
empty_bucket(const struct tcp_iter_state * st)1920 static inline bool empty_bucket(const struct tcp_iter_state *st)
1921 {
1922 	return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
1923 }
1924 
1925 /*
1926  * Get first established socket starting from bucket given in st->bucket.
1927  * If st->bucket is zero, the very first socket in the hash is returned.
1928  */
established_get_first(struct seq_file * seq)1929 static void *established_get_first(struct seq_file *seq)
1930 {
1931 	struct tcp_iter_state *st = seq->private;
1932 	struct net *net = seq_file_net(seq);
1933 	void *rc = NULL;
1934 
1935 	st->offset = 0;
1936 	for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
1937 		struct sock *sk;
1938 		struct hlist_nulls_node *node;
1939 		spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
1940 
1941 		/* Lockless fast path for the common case of empty buckets */
1942 		if (empty_bucket(st))
1943 			continue;
1944 
1945 		spin_lock_bh(lock);
1946 		sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
1947 			if (sk->sk_family != st->family ||
1948 			    !net_eq(sock_net(sk), net)) {
1949 				continue;
1950 			}
1951 			rc = sk;
1952 			goto out;
1953 		}
1954 		spin_unlock_bh(lock);
1955 	}
1956 out:
1957 	return rc;
1958 }
1959 
established_get_next(struct seq_file * seq,void * cur)1960 static void *established_get_next(struct seq_file *seq, void *cur)
1961 {
1962 	struct sock *sk = cur;
1963 	struct hlist_nulls_node *node;
1964 	struct tcp_iter_state *st = seq->private;
1965 	struct net *net = seq_file_net(seq);
1966 
1967 	++st->num;
1968 	++st->offset;
1969 
1970 	sk = sk_nulls_next(sk);
1971 
1972 	sk_nulls_for_each_from(sk, node) {
1973 		if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
1974 			return sk;
1975 	}
1976 
1977 	spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
1978 	++st->bucket;
1979 	return established_get_first(seq);
1980 }
1981 
established_get_idx(struct seq_file * seq,loff_t pos)1982 static void *established_get_idx(struct seq_file *seq, loff_t pos)
1983 {
1984 	struct tcp_iter_state *st = seq->private;
1985 	void *rc;
1986 
1987 	st->bucket = 0;
1988 	rc = established_get_first(seq);
1989 
1990 	while (rc && pos) {
1991 		rc = established_get_next(seq, rc);
1992 		--pos;
1993 	}
1994 	return rc;
1995 }
1996 
tcp_get_idx(struct seq_file * seq,loff_t pos)1997 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
1998 {
1999 	void *rc;
2000 	struct tcp_iter_state *st = seq->private;
2001 
2002 	st->state = TCP_SEQ_STATE_LISTENING;
2003 	rc	  = listening_get_idx(seq, &pos);
2004 
2005 	if (!rc) {
2006 		st->state = TCP_SEQ_STATE_ESTABLISHED;
2007 		rc	  = established_get_idx(seq, pos);
2008 	}
2009 
2010 	return rc;
2011 }
2012 
tcp_seek_last_pos(struct seq_file * seq)2013 static void *tcp_seek_last_pos(struct seq_file *seq)
2014 {
2015 	struct tcp_iter_state *st = seq->private;
2016 	int offset = st->offset;
2017 	int orig_num = st->num;
2018 	void *rc = NULL;
2019 
2020 	switch (st->state) {
2021 	case TCP_SEQ_STATE_OPENREQ:
2022 	case TCP_SEQ_STATE_LISTENING:
2023 		if (st->bucket >= INET_LHTABLE_SIZE)
2024 			break;
2025 		st->state = TCP_SEQ_STATE_LISTENING;
2026 		rc = listening_get_next(seq, NULL);
2027 		while (offset-- && rc)
2028 			rc = listening_get_next(seq, rc);
2029 		if (rc)
2030 			break;
2031 		st->bucket = 0;
2032 		st->state = TCP_SEQ_STATE_ESTABLISHED;
2033 		/* Fallthrough */
2034 	case TCP_SEQ_STATE_ESTABLISHED:
2035 		if (st->bucket > tcp_hashinfo.ehash_mask)
2036 			break;
2037 		rc = established_get_first(seq);
2038 		while (offset-- && rc)
2039 			rc = established_get_next(seq, rc);
2040 	}
2041 
2042 	st->num = orig_num;
2043 
2044 	return rc;
2045 }
2046 
tcp_seq_start(struct seq_file * seq,loff_t * pos)2047 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2048 {
2049 	struct tcp_iter_state *st = seq->private;
2050 	void *rc;
2051 
2052 	if (*pos && *pos == st->last_pos) {
2053 		rc = tcp_seek_last_pos(seq);
2054 		if (rc)
2055 			goto out;
2056 	}
2057 
2058 	st->state = TCP_SEQ_STATE_LISTENING;
2059 	st->num = 0;
2060 	st->bucket = 0;
2061 	st->offset = 0;
2062 	rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2063 
2064 out:
2065 	st->last_pos = *pos;
2066 	return rc;
2067 }
2068 
tcp_seq_next(struct seq_file * seq,void * v,loff_t * pos)2069 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2070 {
2071 	struct tcp_iter_state *st = seq->private;
2072 	void *rc = NULL;
2073 
2074 	if (v == SEQ_START_TOKEN) {
2075 		rc = tcp_get_idx(seq, 0);
2076 		goto out;
2077 	}
2078 
2079 	switch (st->state) {
2080 	case TCP_SEQ_STATE_OPENREQ:
2081 	case TCP_SEQ_STATE_LISTENING:
2082 		rc = listening_get_next(seq, v);
2083 		if (!rc) {
2084 			st->state = TCP_SEQ_STATE_ESTABLISHED;
2085 			st->bucket = 0;
2086 			st->offset = 0;
2087 			rc	  = established_get_first(seq);
2088 		}
2089 		break;
2090 	case TCP_SEQ_STATE_ESTABLISHED:
2091 		rc = established_get_next(seq, v);
2092 		break;
2093 	}
2094 out:
2095 	++*pos;
2096 	st->last_pos = *pos;
2097 	return rc;
2098 }
2099 
tcp_seq_stop(struct seq_file * seq,void * v)2100 static void tcp_seq_stop(struct seq_file *seq, void *v)
2101 {
2102 	struct tcp_iter_state *st = seq->private;
2103 
2104 	switch (st->state) {
2105 	case TCP_SEQ_STATE_OPENREQ:
2106 		if (v) {
2107 			struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2108 			spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2109 		}
2110 	case TCP_SEQ_STATE_LISTENING:
2111 		if (v != SEQ_START_TOKEN)
2112 			spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2113 		break;
2114 	case TCP_SEQ_STATE_ESTABLISHED:
2115 		if (v)
2116 			spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2117 		break;
2118 	}
2119 }
2120 
tcp_seq_open(struct inode * inode,struct file * file)2121 int tcp_seq_open(struct inode *inode, struct file *file)
2122 {
2123 	struct tcp_seq_afinfo *afinfo = PDE_DATA(inode);
2124 	struct tcp_iter_state *s;
2125 	int err;
2126 
2127 	err = seq_open_net(inode, file, &afinfo->seq_ops,
2128 			  sizeof(struct tcp_iter_state));
2129 	if (err < 0)
2130 		return err;
2131 
2132 	s = ((struct seq_file *)file->private_data)->private;
2133 	s->family		= afinfo->family;
2134 	s->last_pos		= 0;
2135 	return 0;
2136 }
2137 EXPORT_SYMBOL(tcp_seq_open);
2138 
tcp_proc_register(struct net * net,struct tcp_seq_afinfo * afinfo)2139 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2140 {
2141 	int rc = 0;
2142 	struct proc_dir_entry *p;
2143 
2144 	afinfo->seq_ops.start		= tcp_seq_start;
2145 	afinfo->seq_ops.next		= tcp_seq_next;
2146 	afinfo->seq_ops.stop		= tcp_seq_stop;
2147 
2148 	p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2149 			     afinfo->seq_fops, afinfo);
2150 	if (!p)
2151 		rc = -ENOMEM;
2152 	return rc;
2153 }
2154 EXPORT_SYMBOL(tcp_proc_register);
2155 
tcp_proc_unregister(struct net * net,struct tcp_seq_afinfo * afinfo)2156 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2157 {
2158 	remove_proc_entry(afinfo->name, net->proc_net);
2159 }
2160 EXPORT_SYMBOL(tcp_proc_unregister);
2161 
get_openreq4(const struct request_sock * req,struct seq_file * f,int i,kuid_t uid)2162 static void get_openreq4(const struct request_sock *req,
2163 			 struct seq_file *f, int i, kuid_t uid)
2164 {
2165 	const struct inet_request_sock *ireq = inet_rsk(req);
2166 	long delta = req->rsk_timer.expires - jiffies;
2167 
2168 	seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2169 		" %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2170 		i,
2171 		ireq->ir_loc_addr,
2172 		ireq->ir_num,
2173 		ireq->ir_rmt_addr,
2174 		ntohs(ireq->ir_rmt_port),
2175 		TCP_SYN_RECV,
2176 		0, 0, /* could print option size, but that is af dependent. */
2177 		1,    /* timers active (only the expire timer) */
2178 		jiffies_delta_to_clock_t(delta),
2179 		req->num_timeout,
2180 		from_kuid_munged(seq_user_ns(f), uid),
2181 		0,  /* non standard timer */
2182 		0, /* open_requests have no inode */
2183 		0,
2184 		req);
2185 }
2186 
get_tcp4_sock(struct sock * sk,struct seq_file * f,int i)2187 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2188 {
2189 	int timer_active;
2190 	unsigned long timer_expires;
2191 	const struct tcp_sock *tp = tcp_sk(sk);
2192 	const struct inet_connection_sock *icsk = inet_csk(sk);
2193 	const struct inet_sock *inet = inet_sk(sk);
2194 	struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
2195 	__be32 dest = inet->inet_daddr;
2196 	__be32 src = inet->inet_rcv_saddr;
2197 	__u16 destp = ntohs(inet->inet_dport);
2198 	__u16 srcp = ntohs(inet->inet_sport);
2199 	int rx_queue;
2200 
2201 	if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2202 	    icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
2203 	    icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2204 		timer_active	= 1;
2205 		timer_expires	= icsk->icsk_timeout;
2206 	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2207 		timer_active	= 4;
2208 		timer_expires	= icsk->icsk_timeout;
2209 	} else if (timer_pending(&sk->sk_timer)) {
2210 		timer_active	= 2;
2211 		timer_expires	= sk->sk_timer.expires;
2212 	} else {
2213 		timer_active	= 0;
2214 		timer_expires = jiffies;
2215 	}
2216 
2217 	if (sk->sk_state == TCP_LISTEN)
2218 		rx_queue = sk->sk_ack_backlog;
2219 	else
2220 		/*
2221 		 * because we dont lock socket, we might find a transient negative value
2222 		 */
2223 		rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2224 
2225 	seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2226 			"%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2227 		i, src, srcp, dest, destp, sk->sk_state,
2228 		tp->write_seq - tp->snd_una,
2229 		rx_queue,
2230 		timer_active,
2231 		jiffies_delta_to_clock_t(timer_expires - jiffies),
2232 		icsk->icsk_retransmits,
2233 		from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2234 		icsk->icsk_probes_out,
2235 		sock_i_ino(sk),
2236 		atomic_read(&sk->sk_refcnt), sk,
2237 		jiffies_to_clock_t(icsk->icsk_rto),
2238 		jiffies_to_clock_t(icsk->icsk_ack.ato),
2239 		(icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2240 		tp->snd_cwnd,
2241 		sk->sk_state == TCP_LISTEN ?
2242 		    (fastopenq ? fastopenq->max_qlen : 0) :
2243 		    (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
2244 }
2245 
get_timewait4_sock(const struct inet_timewait_sock * tw,struct seq_file * f,int i)2246 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2247 			       struct seq_file *f, int i)
2248 {
2249 	long delta = tw->tw_timer.expires - jiffies;
2250 	__be32 dest, src;
2251 	__u16 destp, srcp;
2252 
2253 	dest  = tw->tw_daddr;
2254 	src   = tw->tw_rcv_saddr;
2255 	destp = ntohs(tw->tw_dport);
2256 	srcp  = ntohs(tw->tw_sport);
2257 
2258 	seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2259 		" %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2260 		i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2261 		3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2262 		atomic_read(&tw->tw_refcnt), tw);
2263 }
2264 
2265 #define TMPSZ 150
2266 
tcp4_seq_show(struct seq_file * seq,void * v)2267 static int tcp4_seq_show(struct seq_file *seq, void *v)
2268 {
2269 	struct tcp_iter_state *st;
2270 	struct sock *sk = v;
2271 
2272 	seq_setwidth(seq, TMPSZ - 1);
2273 	if (v == SEQ_START_TOKEN) {
2274 		seq_puts(seq, "  sl  local_address rem_address   st tx_queue "
2275 			   "rx_queue tr tm->when retrnsmt   uid  timeout "
2276 			   "inode");
2277 		goto out;
2278 	}
2279 	st = seq->private;
2280 
2281 	switch (st->state) {
2282 	case TCP_SEQ_STATE_LISTENING:
2283 	case TCP_SEQ_STATE_ESTABLISHED:
2284 		if (sk->sk_state == TCP_TIME_WAIT)
2285 			get_timewait4_sock(v, seq, st->num);
2286 		else
2287 			get_tcp4_sock(v, seq, st->num);
2288 		break;
2289 	case TCP_SEQ_STATE_OPENREQ:
2290 		get_openreq4(v, seq, st->num, st->uid);
2291 		break;
2292 	}
2293 out:
2294 	seq_pad(seq, '\n');
2295 	return 0;
2296 }
2297 
2298 static const struct file_operations tcp_afinfo_seq_fops = {
2299 	.owner   = THIS_MODULE,
2300 	.open    = tcp_seq_open,
2301 	.read    = seq_read,
2302 	.llseek  = seq_lseek,
2303 	.release = seq_release_net
2304 };
2305 
2306 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2307 	.name		= "tcp",
2308 	.family		= AF_INET,
2309 	.seq_fops	= &tcp_afinfo_seq_fops,
2310 	.seq_ops	= {
2311 		.show		= tcp4_seq_show,
2312 	},
2313 };
2314 
tcp4_proc_init_net(struct net * net)2315 static int __net_init tcp4_proc_init_net(struct net *net)
2316 {
2317 	return tcp_proc_register(net, &tcp4_seq_afinfo);
2318 }
2319 
tcp4_proc_exit_net(struct net * net)2320 static void __net_exit tcp4_proc_exit_net(struct net *net)
2321 {
2322 	tcp_proc_unregister(net, &tcp4_seq_afinfo);
2323 }
2324 
2325 static struct pernet_operations tcp4_net_ops = {
2326 	.init = tcp4_proc_init_net,
2327 	.exit = tcp4_proc_exit_net,
2328 };
2329 
tcp4_proc_init(void)2330 int __init tcp4_proc_init(void)
2331 {
2332 	return register_pernet_subsys(&tcp4_net_ops);
2333 }
2334 
tcp4_proc_exit(void)2335 void tcp4_proc_exit(void)
2336 {
2337 	unregister_pernet_subsys(&tcp4_net_ops);
2338 }
2339 #endif /* CONFIG_PROC_FS */
2340 
2341 struct proto tcp_prot = {
2342 	.name			= "TCP",
2343 	.owner			= THIS_MODULE,
2344 	.close			= tcp_close,
2345 	.connect		= tcp_v4_connect,
2346 	.disconnect		= tcp_disconnect,
2347 	.accept			= inet_csk_accept,
2348 	.ioctl			= tcp_ioctl,
2349 	.init			= tcp_v4_init_sock,
2350 	.destroy		= tcp_v4_destroy_sock,
2351 	.shutdown		= tcp_shutdown,
2352 	.setsockopt		= tcp_setsockopt,
2353 	.getsockopt		= tcp_getsockopt,
2354 	.recvmsg		= tcp_recvmsg,
2355 	.sendmsg		= tcp_sendmsg,
2356 	.sendpage		= tcp_sendpage,
2357 	.backlog_rcv		= tcp_v4_do_rcv,
2358 	.release_cb		= tcp_release_cb,
2359 	.hash			= inet_hash,
2360 	.unhash			= inet_unhash,
2361 	.get_port		= inet_csk_get_port,
2362 	.enter_memory_pressure	= tcp_enter_memory_pressure,
2363 	.stream_memory_free	= tcp_stream_memory_free,
2364 	.sockets_allocated	= &tcp_sockets_allocated,
2365 	.orphan_count		= &tcp_orphan_count,
2366 	.memory_allocated	= &tcp_memory_allocated,
2367 	.memory_pressure	= &tcp_memory_pressure,
2368 	.sysctl_mem		= sysctl_tcp_mem,
2369 	.sysctl_wmem		= sysctl_tcp_wmem,
2370 	.sysctl_rmem		= sysctl_tcp_rmem,
2371 	.max_header		= MAX_TCP_HEADER,
2372 	.obj_size		= sizeof(struct tcp_sock),
2373 	.slab_flags		= SLAB_DESTROY_BY_RCU,
2374 	.twsk_prot		= &tcp_timewait_sock_ops,
2375 	.rsk_prot		= &tcp_request_sock_ops,
2376 	.h.hashinfo		= &tcp_hashinfo,
2377 	.no_autobind		= true,
2378 #ifdef CONFIG_COMPAT
2379 	.compat_setsockopt	= compat_tcp_setsockopt,
2380 	.compat_getsockopt	= compat_tcp_getsockopt,
2381 #endif
2382 #ifdef CONFIG_MEMCG_KMEM
2383 	.init_cgroup		= tcp_init_cgroup,
2384 	.destroy_cgroup		= tcp_destroy_cgroup,
2385 	.proto_cgroup		= tcp_proto_cgroup,
2386 #endif
2387 };
2388 EXPORT_SYMBOL(tcp_prot);
2389 
tcp_sk_exit(struct net * net)2390 static void __net_exit tcp_sk_exit(struct net *net)
2391 {
2392 	int cpu;
2393 
2394 	for_each_possible_cpu(cpu)
2395 		inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
2396 	free_percpu(net->ipv4.tcp_sk);
2397 }
2398 
tcp_sk_init(struct net * net)2399 static int __net_init tcp_sk_init(struct net *net)
2400 {
2401 	int res, cpu;
2402 
2403 	net->ipv4.tcp_sk = alloc_percpu(struct sock *);
2404 	if (!net->ipv4.tcp_sk)
2405 		return -ENOMEM;
2406 
2407 	for_each_possible_cpu(cpu) {
2408 		struct sock *sk;
2409 
2410 		res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
2411 					   IPPROTO_TCP, net);
2412 		if (res)
2413 			goto fail;
2414 		*per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
2415 	}
2416 	net->ipv4.sysctl_tcp_ecn = 2;
2417 	net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
2418 	net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
2419 	net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
2420 	return 0;
2421 
2422 fail:
2423 	tcp_sk_exit(net);
2424 
2425 	return res;
2426 }
2427 
tcp_sk_exit_batch(struct list_head * net_exit_list)2428 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2429 {
2430 	inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
2431 }
2432 
2433 static struct pernet_operations __net_initdata tcp_sk_ops = {
2434        .init	   = tcp_sk_init,
2435        .exit	   = tcp_sk_exit,
2436        .exit_batch = tcp_sk_exit_batch,
2437 };
2438 
tcp_v4_init(void)2439 void __init tcp_v4_init(void)
2440 {
2441 	inet_hashinfo_init(&tcp_hashinfo);
2442 	if (register_pernet_subsys(&tcp_sk_ops))
2443 		panic("Failed to create the TCP control socket.\n");
2444 }
2445