1 /*
2  * INET        An implementation of the TCP/IP protocol suite for the LINUX
3  *             operating system.  INET is implemented using the  BSD Socket
4  *             interface as the means of communication with the user level.
5  *
6  *             Support for INET6 connection oriented protocols.
7  *
8  * Authors:    See the TCPv6 sources
9  *
10  *             This program is free software; you can redistribute it and/or
11  *             modify it under the terms of the GNU General Public License
12  *             as published by the Free Software Foundation; either version
13  *             2 of the License, or(at your option) any later version.
14  */
15 
16 #include <linux/module.h>
17 #include <linux/in6.h>
18 #include <linux/ipv6.h>
19 #include <linux/jhash.h>
20 #include <linux/slab.h>
21 
22 #include <net/addrconf.h>
23 #include <net/inet_connection_sock.h>
24 #include <net/inet_ecn.h>
25 #include <net/inet_hashtables.h>
26 #include <net/ip6_route.h>
27 #include <net/sock.h>
28 #include <net/inet6_connection_sock.h>
29 
inet6_csk_bind_conflict(const struct sock * sk,const struct inet_bind_bucket * tb,bool relax)30 int inet6_csk_bind_conflict(const struct sock *sk,
31 			    const struct inet_bind_bucket *tb, bool relax)
32 {
33 	const struct sock *sk2;
34 	int reuse = sk->sk_reuse;
35 	int reuseport = sk->sk_reuseport;
36 	kuid_t uid = sock_i_uid((struct sock *)sk);
37 
38 	/* We must walk the whole port owner list in this case. -DaveM */
39 	/*
40 	 * See comment in inet_csk_bind_conflict about sock lookup
41 	 * vs net namespaces issues.
42 	 */
43 	sk_for_each_bound(sk2, &tb->owners) {
44 		if (sk != sk2 &&
45 		    (!sk->sk_bound_dev_if ||
46 		     !sk2->sk_bound_dev_if ||
47 		     sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
48 			if ((!reuse || !sk2->sk_reuse ||
49 			     sk2->sk_state == TCP_LISTEN) &&
50 			    (!reuseport || !sk2->sk_reuseport ||
51 			     (sk2->sk_state != TCP_TIME_WAIT &&
52 			      !uid_eq(uid,
53 				      sock_i_uid((struct sock *)sk2))))) {
54 				if (ipv6_rcv_saddr_equal(sk, sk2))
55 					break;
56 			}
57 			if (!relax && reuse && sk2->sk_reuse &&
58 			    sk2->sk_state != TCP_LISTEN &&
59 			    ipv6_rcv_saddr_equal(sk, sk2))
60 				break;
61 		}
62 	}
63 
64 	return sk2 != NULL;
65 }
66 EXPORT_SYMBOL_GPL(inet6_csk_bind_conflict);
67 
inet6_csk_route_req(struct sock * sk,struct flowi6 * fl6,const struct request_sock * req)68 struct dst_entry *inet6_csk_route_req(struct sock *sk,
69 				      struct flowi6 *fl6,
70 				      const struct request_sock *req)
71 {
72 	struct inet_request_sock *ireq = inet_rsk(req);
73 	struct ipv6_pinfo *np = inet6_sk(sk);
74 	struct in6_addr *final_p, final;
75 	struct dst_entry *dst;
76 
77 	memset(fl6, 0, sizeof(*fl6));
78 	fl6->flowi6_proto = IPPROTO_TCP;
79 	fl6->daddr = ireq->ir_v6_rmt_addr;
80 	rcu_read_lock();
81 	final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
82 	rcu_read_unlock();
83 	fl6->saddr = ireq->ir_v6_loc_addr;
84 	fl6->flowi6_oif = ireq->ir_iif;
85 	fl6->flowi6_mark = ireq->ir_mark;
86 	fl6->fl6_dport = ireq->ir_rmt_port;
87 	fl6->fl6_sport = htons(ireq->ir_num);
88 	security_req_classify_flow(req, flowi6_to_flowi(fl6));
89 
90 	dst = ip6_dst_lookup_flow(sk, fl6, final_p);
91 	if (IS_ERR(dst))
92 		return NULL;
93 
94 	return dst;
95 }
96 
97 /*
98  * request_sock (formerly open request) hash tables.
99  */
inet6_synq_hash(const struct in6_addr * raddr,const __be16 rport,const u32 rnd,const u32 synq_hsize)100 static u32 inet6_synq_hash(const struct in6_addr *raddr, const __be16 rport,
101 			   const u32 rnd, const u32 synq_hsize)
102 {
103 	u32 c;
104 
105 	c = jhash_3words((__force u32)raddr->s6_addr32[0],
106 			 (__force u32)raddr->s6_addr32[1],
107 			 (__force u32)raddr->s6_addr32[2],
108 			 rnd);
109 
110 	c = jhash_2words((__force u32)raddr->s6_addr32[3],
111 			 (__force u32)rport,
112 			 c);
113 
114 	return c & (synq_hsize - 1);
115 }
116 
inet6_csk_search_req(struct sock * sk,const __be16 rport,const struct in6_addr * raddr,const struct in6_addr * laddr,const int iif)117 struct request_sock *inet6_csk_search_req(struct sock *sk,
118 					  const __be16 rport,
119 					  const struct in6_addr *raddr,
120 					  const struct in6_addr *laddr,
121 					  const int iif)
122 {
123 	struct inet_connection_sock *icsk = inet_csk(sk);
124 	struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
125 	struct request_sock *req;
126 	u32 hash = inet6_synq_hash(raddr, rport, lopt->hash_rnd,
127 				   lopt->nr_table_entries);
128 
129 	spin_lock(&icsk->icsk_accept_queue.syn_wait_lock);
130 	for (req = lopt->syn_table[hash]; req != NULL; req = req->dl_next) {
131 		const struct inet_request_sock *ireq = inet_rsk(req);
132 
133 		if (ireq->ir_rmt_port == rport &&
134 		    req->rsk_ops->family == AF_INET6 &&
135 		    ipv6_addr_equal(&ireq->ir_v6_rmt_addr, raddr) &&
136 		    ipv6_addr_equal(&ireq->ir_v6_loc_addr, laddr) &&
137 		    (!ireq->ir_iif || ireq->ir_iif == iif)) {
138 			atomic_inc(&req->rsk_refcnt);
139 			WARN_ON(req->sk != NULL);
140 			break;
141 		}
142 	}
143 	spin_unlock(&icsk->icsk_accept_queue.syn_wait_lock);
144 
145 	return req;
146 }
147 EXPORT_SYMBOL_GPL(inet6_csk_search_req);
148 
inet6_csk_reqsk_queue_hash_add(struct sock * sk,struct request_sock * req,const unsigned long timeout)149 void inet6_csk_reqsk_queue_hash_add(struct sock *sk,
150 				    struct request_sock *req,
151 				    const unsigned long timeout)
152 {
153 	struct inet_connection_sock *icsk = inet_csk(sk);
154 	struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
155 	const u32 h = inet6_synq_hash(&inet_rsk(req)->ir_v6_rmt_addr,
156 				      inet_rsk(req)->ir_rmt_port,
157 				      lopt->hash_rnd, lopt->nr_table_entries);
158 
159 	reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, timeout);
160 	inet_csk_reqsk_queue_added(sk, timeout);
161 }
162 EXPORT_SYMBOL_GPL(inet6_csk_reqsk_queue_hash_add);
163 
inet6_csk_addr2sockaddr(struct sock * sk,struct sockaddr * uaddr)164 void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr)
165 {
166 	struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) uaddr;
167 
168 	sin6->sin6_family = AF_INET6;
169 	sin6->sin6_addr = sk->sk_v6_daddr;
170 	sin6->sin6_port	= inet_sk(sk)->inet_dport;
171 	/* We do not store received flowlabel for TCP */
172 	sin6->sin6_flowinfo = 0;
173 	sin6->sin6_scope_id = ipv6_iface_scope_id(&sin6->sin6_addr,
174 						  sk->sk_bound_dev_if);
175 }
176 EXPORT_SYMBOL_GPL(inet6_csk_addr2sockaddr);
177 
178 static inline
__inet6_csk_dst_store(struct sock * sk,struct dst_entry * dst,const struct in6_addr * daddr,const struct in6_addr * saddr)179 void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
180 			   const struct in6_addr *daddr,
181 			   const struct in6_addr *saddr)
182 {
183 	__ip6_dst_store(sk, dst, daddr, saddr);
184 }
185 
186 static inline
__inet6_csk_dst_check(struct sock * sk,u32 cookie)187 struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
188 {
189 	return __sk_dst_check(sk, cookie);
190 }
191 
inet6_csk_route_socket(struct sock * sk,struct flowi6 * fl6)192 static struct dst_entry *inet6_csk_route_socket(struct sock *sk,
193 						struct flowi6 *fl6)
194 {
195 	struct inet_sock *inet = inet_sk(sk);
196 	struct ipv6_pinfo *np = inet6_sk(sk);
197 	struct in6_addr *final_p, final;
198 	struct dst_entry *dst;
199 
200 	memset(fl6, 0, sizeof(*fl6));
201 	fl6->flowi6_proto = sk->sk_protocol;
202 	fl6->daddr = sk->sk_v6_daddr;
203 	fl6->saddr = np->saddr;
204 	fl6->flowlabel = np->flow_label;
205 	IP6_ECN_flow_xmit(sk, fl6->flowlabel);
206 	fl6->flowi6_oif = sk->sk_bound_dev_if;
207 	fl6->flowi6_mark = sk->sk_mark;
208 	fl6->fl6_sport = inet->inet_sport;
209 	fl6->fl6_dport = inet->inet_dport;
210 	security_sk_classify_flow(sk, flowi6_to_flowi(fl6));
211 
212 	rcu_read_lock();
213 	final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
214 	rcu_read_unlock();
215 
216 	dst = __inet6_csk_dst_check(sk, np->dst_cookie);
217 	if (!dst) {
218 		dst = ip6_dst_lookup_flow(sk, fl6, final_p);
219 
220 		if (!IS_ERR(dst))
221 			__inet6_csk_dst_store(sk, dst, NULL, NULL);
222 	}
223 	return dst;
224 }
225 
inet6_csk_xmit(struct sock * sk,struct sk_buff * skb,struct flowi * fl_unused)226 int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl_unused)
227 {
228 	struct ipv6_pinfo *np = inet6_sk(sk);
229 	struct flowi6 fl6;
230 	struct dst_entry *dst;
231 	int res;
232 
233 	dst = inet6_csk_route_socket(sk, &fl6);
234 	if (IS_ERR(dst)) {
235 		sk->sk_err_soft = -PTR_ERR(dst);
236 		sk->sk_route_caps = 0;
237 		kfree_skb(skb);
238 		return PTR_ERR(dst);
239 	}
240 
241 	rcu_read_lock();
242 	skb_dst_set_noref(skb, dst);
243 
244 	/* Restore final destination back after routing done */
245 	fl6.daddr = sk->sk_v6_daddr;
246 
247 	res = ip6_xmit(sk, skb, &fl6, rcu_dereference(np->opt),
248 		       np->tclass);
249 	rcu_read_unlock();
250 	return res;
251 }
252 EXPORT_SYMBOL_GPL(inet6_csk_xmit);
253 
inet6_csk_update_pmtu(struct sock * sk,u32 mtu)254 struct dst_entry *inet6_csk_update_pmtu(struct sock *sk, u32 mtu)
255 {
256 	struct flowi6 fl6;
257 	struct dst_entry *dst = inet6_csk_route_socket(sk, &fl6);
258 
259 	if (IS_ERR(dst))
260 		return NULL;
261 	dst->ops->update_pmtu(dst, sk, NULL, mtu);
262 
263 	dst = inet6_csk_route_socket(sk, &fl6);
264 	return IS_ERR(dst) ? NULL : dst;
265 }
266 EXPORT_SYMBOL_GPL(inet6_csk_update_pmtu);
267