This source file includes following definitions.
- cookie_hash
- cookie_init_timestamp
- secure_tcp_syn_cookie
- check_tcp_syn_cookie
- __cookie_v4_init_sequence
- cookie_v4_init_sequence
- __cookie_v4_check
- tcp_get_cookie_sock
- cookie_timestamp_decode
- cookie_ecn_ok
- cookie_v4_check
1
2
3
4
5
6
7
8
9 #include <linux/tcp.h>
10 #include <linux/slab.h>
11 #include <linux/random.h>
12 #include <linux/siphash.h>
13 #include <linux/kernel.h>
14 #include <linux/export.h>
15 #include <net/secure_seq.h>
16 #include <net/tcp.h>
17 #include <net/route.h>
18
19 static siphash_key_t syncookie_secret[2] __read_mostly;
20
21 #define COOKIEBITS 24
22 #define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1)
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38 #define TS_OPT_WSCALE_MASK 0xf
39 #define TS_OPT_SACK BIT(4)
40 #define TS_OPT_ECN BIT(5)
41
42
43
44
45 #define TSBITS 6
46 #define TSMASK (((__u32)1 << TSBITS) - 1)
47
48 static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport,
49 u32 count, int c)
50 {
51 net_get_random_once(syncookie_secret, sizeof(syncookie_secret));
52 return siphash_4u32((__force u32)saddr, (__force u32)daddr,
53 (__force u32)sport << 16 | (__force u32)dport,
54 count, &syncookie_secret[c]);
55 }
56
57
58
59
60
61
62
63
64
65 u64 cookie_init_timestamp(struct request_sock *req)
66 {
67 struct inet_request_sock *ireq;
68 u32 ts, ts_now = tcp_time_stamp_raw();
69 u32 options = 0;
70
71 ireq = inet_rsk(req);
72
73 options = ireq->wscale_ok ? ireq->snd_wscale : TS_OPT_WSCALE_MASK;
74 if (ireq->sack_ok)
75 options |= TS_OPT_SACK;
76 if (ireq->ecn_ok)
77 options |= TS_OPT_ECN;
78
79 ts = ts_now & ~TSMASK;
80 ts |= options;
81 if (ts > ts_now) {
82 ts >>= TSBITS;
83 ts--;
84 ts <<= TSBITS;
85 ts |= options;
86 }
87 return (u64)ts * (NSEC_PER_SEC / TCP_TS_HZ);
88 }
89
90
91 static __u32 secure_tcp_syn_cookie(__be32 saddr, __be32 daddr, __be16 sport,
92 __be16 dport, __u32 sseq, __u32 data)
93 {
94
95
96
97
98
99
100
101
102
103
104 u32 count = tcp_cookie_time();
105 return (cookie_hash(saddr, daddr, sport, dport, 0, 0) +
106 sseq + (count << COOKIEBITS) +
107 ((cookie_hash(saddr, daddr, sport, dport, count, 1) + data)
108 & COOKIEMASK));
109 }
110
111
112
113
114
115
116
117
118
119
120 static __u32 check_tcp_syn_cookie(__u32 cookie, __be32 saddr, __be32 daddr,
121 __be16 sport, __be16 dport, __u32 sseq)
122 {
123 u32 diff, count = tcp_cookie_time();
124
125
126 cookie -= cookie_hash(saddr, daddr, sport, dport, 0, 0) + sseq;
127
128
129 diff = (count - (cookie >> COOKIEBITS)) & ((__u32) -1 >> COOKIEBITS);
130 if (diff >= MAX_SYNCOOKIE_AGE)
131 return (__u32)-1;
132
133 return (cookie -
134 cookie_hash(saddr, daddr, sport, dport, count - diff, 1))
135 & COOKIEMASK;
136 }
137
138
139
140
141
142
143
144
145
146
147
148
149
150 static __u16 const msstab[] = {
151 536,
152 1300,
153 1440,
154 1460,
155 };
156
157
158
159
160
161 u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
162 u16 *mssp)
163 {
164 int mssind;
165 const __u16 mss = *mssp;
166
167 for (mssind = ARRAY_SIZE(msstab) - 1; mssind ; mssind--)
168 if (mss >= msstab[mssind])
169 break;
170 *mssp = msstab[mssind];
171
172 return secure_tcp_syn_cookie(iph->saddr, iph->daddr,
173 th->source, th->dest, ntohl(th->seq),
174 mssind);
175 }
176 EXPORT_SYMBOL_GPL(__cookie_v4_init_sequence);
177
178 __u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mssp)
179 {
180 const struct iphdr *iph = ip_hdr(skb);
181 const struct tcphdr *th = tcp_hdr(skb);
182
183 return __cookie_v4_init_sequence(iph, th, mssp);
184 }
185
186
187
188
189
190 int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
191 u32 cookie)
192 {
193 __u32 seq = ntohl(th->seq) - 1;
194 __u32 mssind = check_tcp_syn_cookie(cookie, iph->saddr, iph->daddr,
195 th->source, th->dest, seq);
196
197 return mssind < ARRAY_SIZE(msstab) ? msstab[mssind] : 0;
198 }
199 EXPORT_SYMBOL_GPL(__cookie_v4_check);
200
201 struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
202 struct request_sock *req,
203 struct dst_entry *dst, u32 tsoff)
204 {
205 struct inet_connection_sock *icsk = inet_csk(sk);
206 struct sock *child;
207 bool own_req;
208
209 child = icsk->icsk_af_ops->syn_recv_sock(sk, skb, req, dst,
210 NULL, &own_req);
211 if (child) {
212 refcount_set(&req->rsk_refcnt, 1);
213 tcp_sk(child)->tsoffset = tsoff;
214 sock_rps_save_rxhash(child, skb);
215 if (inet_csk_reqsk_queue_add(sk, req, child))
216 return child;
217
218 bh_unlock_sock(child);
219 sock_put(child);
220 }
221 __reqsk_free(req);
222
223 return NULL;
224 }
225 EXPORT_SYMBOL(tcp_get_cookie_sock);
226
227
228
229
230
231
232
233
234
235 bool cookie_timestamp_decode(const struct net *net,
236 struct tcp_options_received *tcp_opt)
237 {
238
239 u32 options = tcp_opt->rcv_tsecr;
240
241 if (!tcp_opt->saw_tstamp) {
242 tcp_clear_options(tcp_opt);
243 return true;
244 }
245
246 if (!net->ipv4.sysctl_tcp_timestamps)
247 return false;
248
249 tcp_opt->sack_ok = (options & TS_OPT_SACK) ? TCP_SACK_SEEN : 0;
250
251 if (tcp_opt->sack_ok && !net->ipv4.sysctl_tcp_sack)
252 return false;
253
254 if ((options & TS_OPT_WSCALE_MASK) == TS_OPT_WSCALE_MASK)
255 return true;
256
257 tcp_opt->wscale_ok = 1;
258 tcp_opt->snd_wscale = options & TS_OPT_WSCALE_MASK;
259
260 return net->ipv4.sysctl_tcp_window_scaling != 0;
261 }
262 EXPORT_SYMBOL(cookie_timestamp_decode);
263
264 bool cookie_ecn_ok(const struct tcp_options_received *tcp_opt,
265 const struct net *net, const struct dst_entry *dst)
266 {
267 bool ecn_ok = tcp_opt->rcv_tsecr & TS_OPT_ECN;
268
269 if (!ecn_ok)
270 return false;
271
272 if (net->ipv4.sysctl_tcp_ecn)
273 return true;
274
275 return dst_feature(dst, RTAX_FEATURE_ECN);
276 }
277 EXPORT_SYMBOL(cookie_ecn_ok);
278
279
280
281
282
283 struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
284 {
285 struct ip_options *opt = &TCP_SKB_CB(skb)->header.h4.opt;
286 struct tcp_options_received tcp_opt;
287 struct inet_request_sock *ireq;
288 struct tcp_request_sock *treq;
289 struct tcp_sock *tp = tcp_sk(sk);
290 const struct tcphdr *th = tcp_hdr(skb);
291 __u32 cookie = ntohl(th->ack_seq) - 1;
292 struct sock *ret = sk;
293 struct request_sock *req;
294 int mss;
295 struct rtable *rt;
296 __u8 rcv_wscale;
297 struct flowi4 fl4;
298 u32 tsoff = 0;
299
300 if (!sock_net(sk)->ipv4.sysctl_tcp_syncookies || !th->ack || th->rst)
301 goto out;
302
303 if (tcp_synq_no_recent_overflow(sk))
304 goto out;
305
306 mss = __cookie_v4_check(ip_hdr(skb), th, cookie);
307 if (mss == 0) {
308 __NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED);
309 goto out;
310 }
311
312 __NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV);
313
314
315 memset(&tcp_opt, 0, sizeof(tcp_opt));
316 tcp_parse_options(sock_net(sk), skb, &tcp_opt, 0, NULL);
317
318 if (tcp_opt.saw_tstamp && tcp_opt.rcv_tsecr) {
319 tsoff = secure_tcp_ts_off(sock_net(sk),
320 ip_hdr(skb)->daddr,
321 ip_hdr(skb)->saddr);
322 tcp_opt.rcv_tsecr -= tsoff;
323 }
324
325 if (!cookie_timestamp_decode(sock_net(sk), &tcp_opt))
326 goto out;
327
328 ret = NULL;
329 req = inet_reqsk_alloc(&tcp_request_sock_ops, sk, false);
330 if (!req)
331 goto out;
332
333 ireq = inet_rsk(req);
334 treq = tcp_rsk(req);
335 treq->rcv_isn = ntohl(th->seq) - 1;
336 treq->snt_isn = cookie;
337 treq->ts_off = 0;
338 treq->txhash = net_tx_rndhash();
339 req->mss = mss;
340 ireq->ir_num = ntohs(th->dest);
341 ireq->ir_rmt_port = th->source;
342 sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
343 sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
344 ireq->ir_mark = inet_request_mark(sk, skb);
345 ireq->snd_wscale = tcp_opt.snd_wscale;
346 ireq->sack_ok = tcp_opt.sack_ok;
347 ireq->wscale_ok = tcp_opt.wscale_ok;
348 ireq->tstamp_ok = tcp_opt.saw_tstamp;
349 req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
350 treq->snt_synack = 0;
351 treq->tfo_listener = false;
352 if (IS_ENABLED(CONFIG_SMC))
353 ireq->smc_ok = 0;
354
355 ireq->ir_iif = inet_request_bound_dev_if(sk, skb);
356
357
358
359
360 RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(sock_net(sk), skb));
361
362 if (security_inet_conn_request(sk, skb, req)) {
363 reqsk_free(req);
364 goto out;
365 }
366
367 req->num_retrans = 0;
368
369
370
371
372
373
374
375 flowi4_init_output(&fl4, ireq->ir_iif, ireq->ir_mark,
376 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, IPPROTO_TCP,
377 inet_sk_flowi_flags(sk),
378 opt->srr ? opt->faddr : ireq->ir_rmt_addr,
379 ireq->ir_loc_addr, th->source, th->dest, sk->sk_uid);
380 security_req_classify_flow(req, flowi4_to_flowi(&fl4));
381 rt = ip_route_output_key(sock_net(sk), &fl4);
382 if (IS_ERR(rt)) {
383 reqsk_free(req);
384 goto out;
385 }
386
387
388 req->rsk_window_clamp = tp->window_clamp ? :dst_metric(&rt->dst, RTAX_WINDOW);
389
390 tcp_select_initial_window(sk, tcp_full_space(sk), req->mss,
391 &req->rsk_rcv_wnd, &req->rsk_window_clamp,
392 ireq->wscale_ok, &rcv_wscale,
393 dst_metric(&rt->dst, RTAX_INITRWND));
394
395 ireq->rcv_wscale = rcv_wscale;
396 ireq->ecn_ok = cookie_ecn_ok(&tcp_opt, sock_net(sk), &rt->dst);
397
398 ret = tcp_get_cookie_sock(sk, skb, req, &rt->dst, tsoff);
399
400
401
402 if (ret)
403 inet_sk(ret)->cork.fl.u.ip4 = fl4;
404 out: return ret;
405 }