This source file includes following definitions.
- nf_flow_state_check
- nf_flow_nat_ip_tcp
- nf_flow_nat_ip_udp
- nf_flow_nat_ip_l4proto
- nf_flow_snat_ip
- nf_flow_dnat_ip
- nf_flow_nat_ip
- ip_has_options
- nf_flow_tuple_ip
- nf_flow_exceeds_mtu
- nf_flow_offload_dst_check
- nf_flow_xmit_xfrm
- nf_flow_offload_ip_hook
- nf_flow_nat_ipv6_tcp
- nf_flow_nat_ipv6_udp
- nf_flow_nat_ipv6_l4proto
- nf_flow_snat_ipv6
- nf_flow_dnat_ipv6
- nf_flow_nat_ipv6
- nf_flow_tuple_ipv6
- nf_flow_offload_ipv6_hook
1
2 #include <linux/kernel.h>
3 #include <linux/init.h>
4 #include <linux/module.h>
5 #include <linux/netfilter.h>
6 #include <linux/rhashtable.h>
7 #include <linux/ip.h>
8 #include <linux/ipv6.h>
9 #include <linux/netdevice.h>
10 #include <net/ip.h>
11 #include <net/ipv6.h>
12 #include <net/ip6_route.h>
13 #include <net/neighbour.h>
14 #include <net/netfilter/nf_flow_table.h>
15
16 #include <linux/tcp.h>
17 #include <linux/udp.h>
18
19 static int nf_flow_state_check(struct flow_offload *flow, int proto,
20 struct sk_buff *skb, unsigned int thoff)
21 {
22 struct tcphdr *tcph;
23
24 if (proto != IPPROTO_TCP)
25 return 0;
26
27 if (!pskb_may_pull(skb, thoff + sizeof(*tcph)))
28 return -1;
29
30 tcph = (void *)(skb_network_header(skb) + thoff);
31 if (unlikely(tcph->fin || tcph->rst)) {
32 flow_offload_teardown(flow);
33 return -1;
34 }
35
36 return 0;
37 }
38
39 static int nf_flow_nat_ip_tcp(struct sk_buff *skb, unsigned int thoff,
40 __be32 addr, __be32 new_addr)
41 {
42 struct tcphdr *tcph;
43
44 if (!pskb_may_pull(skb, thoff + sizeof(*tcph)) ||
45 skb_try_make_writable(skb, thoff + sizeof(*tcph)))
46 return -1;
47
48 tcph = (void *)(skb_network_header(skb) + thoff);
49 inet_proto_csum_replace4(&tcph->check, skb, addr, new_addr, true);
50
51 return 0;
52 }
53
54 static int nf_flow_nat_ip_udp(struct sk_buff *skb, unsigned int thoff,
55 __be32 addr, __be32 new_addr)
56 {
57 struct udphdr *udph;
58
59 if (!pskb_may_pull(skb, thoff + sizeof(*udph)) ||
60 skb_try_make_writable(skb, thoff + sizeof(*udph)))
61 return -1;
62
63 udph = (void *)(skb_network_header(skb) + thoff);
64 if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
65 inet_proto_csum_replace4(&udph->check, skb, addr,
66 new_addr, true);
67 if (!udph->check)
68 udph->check = CSUM_MANGLED_0;
69 }
70
71 return 0;
72 }
73
74 static int nf_flow_nat_ip_l4proto(struct sk_buff *skb, struct iphdr *iph,
75 unsigned int thoff, __be32 addr,
76 __be32 new_addr)
77 {
78 switch (iph->protocol) {
79 case IPPROTO_TCP:
80 if (nf_flow_nat_ip_tcp(skb, thoff, addr, new_addr) < 0)
81 return NF_DROP;
82 break;
83 case IPPROTO_UDP:
84 if (nf_flow_nat_ip_udp(skb, thoff, addr, new_addr) < 0)
85 return NF_DROP;
86 break;
87 }
88
89 return 0;
90 }
91
92 static int nf_flow_snat_ip(const struct flow_offload *flow, struct sk_buff *skb,
93 struct iphdr *iph, unsigned int thoff,
94 enum flow_offload_tuple_dir dir)
95 {
96 __be32 addr, new_addr;
97
98 switch (dir) {
99 case FLOW_OFFLOAD_DIR_ORIGINAL:
100 addr = iph->saddr;
101 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v4.s_addr;
102 iph->saddr = new_addr;
103 break;
104 case FLOW_OFFLOAD_DIR_REPLY:
105 addr = iph->daddr;
106 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v4.s_addr;
107 iph->daddr = new_addr;
108 break;
109 default:
110 return -1;
111 }
112 csum_replace4(&iph->check, addr, new_addr);
113
114 return nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr);
115 }
116
117 static int nf_flow_dnat_ip(const struct flow_offload *flow, struct sk_buff *skb,
118 struct iphdr *iph, unsigned int thoff,
119 enum flow_offload_tuple_dir dir)
120 {
121 __be32 addr, new_addr;
122
123 switch (dir) {
124 case FLOW_OFFLOAD_DIR_ORIGINAL:
125 addr = iph->daddr;
126 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v4.s_addr;
127 iph->daddr = new_addr;
128 break;
129 case FLOW_OFFLOAD_DIR_REPLY:
130 addr = iph->saddr;
131 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v4.s_addr;
132 iph->saddr = new_addr;
133 break;
134 default:
135 return -1;
136 }
137 csum_replace4(&iph->check, addr, new_addr);
138
139 return nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr);
140 }
141
142 static int nf_flow_nat_ip(const struct flow_offload *flow, struct sk_buff *skb,
143 unsigned int thoff, enum flow_offload_tuple_dir dir)
144 {
145 struct iphdr *iph = ip_hdr(skb);
146
147 if (flow->flags & FLOW_OFFLOAD_SNAT &&
148 (nf_flow_snat_port(flow, skb, thoff, iph->protocol, dir) < 0 ||
149 nf_flow_snat_ip(flow, skb, iph, thoff, dir) < 0))
150 return -1;
151 if (flow->flags & FLOW_OFFLOAD_DNAT &&
152 (nf_flow_dnat_port(flow, skb, thoff, iph->protocol, dir) < 0 ||
153 nf_flow_dnat_ip(flow, skb, iph, thoff, dir) < 0))
154 return -1;
155
156 return 0;
157 }
158
159 static bool ip_has_options(unsigned int thoff)
160 {
161 return thoff != sizeof(struct iphdr);
162 }
163
164 static int nf_flow_tuple_ip(struct sk_buff *skb, const struct net_device *dev,
165 struct flow_offload_tuple *tuple)
166 {
167 struct flow_ports *ports;
168 unsigned int thoff;
169 struct iphdr *iph;
170
171 if (!pskb_may_pull(skb, sizeof(*iph)))
172 return -1;
173
174 iph = ip_hdr(skb);
175 thoff = iph->ihl * 4;
176
177 if (ip_is_fragment(iph) ||
178 unlikely(ip_has_options(thoff)))
179 return -1;
180
181 if (iph->protocol != IPPROTO_TCP &&
182 iph->protocol != IPPROTO_UDP)
183 return -1;
184
185 if (iph->ttl <= 1)
186 return -1;
187
188 thoff = iph->ihl * 4;
189 if (!pskb_may_pull(skb, thoff + sizeof(*ports)))
190 return -1;
191
192 iph = ip_hdr(skb);
193 ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
194
195 tuple->src_v4.s_addr = iph->saddr;
196 tuple->dst_v4.s_addr = iph->daddr;
197 tuple->src_port = ports->source;
198 tuple->dst_port = ports->dest;
199 tuple->l3proto = AF_INET;
200 tuple->l4proto = iph->protocol;
201 tuple->iifidx = dev->ifindex;
202
203 return 0;
204 }
205
206
207 static bool nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
208 {
209 if (skb->len <= mtu)
210 return false;
211
212 if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
213 return false;
214
215 return true;
216 }
217
218 static int nf_flow_offload_dst_check(struct dst_entry *dst)
219 {
220 if (unlikely(dst_xfrm(dst)))
221 return dst_check(dst, 0) ? 0 : -1;
222
223 return 0;
224 }
225
226 static unsigned int nf_flow_xmit_xfrm(struct sk_buff *skb,
227 const struct nf_hook_state *state,
228 struct dst_entry *dst)
229 {
230 skb_orphan(skb);
231 skb_dst_set_noref(skb, dst);
232 dst_output(state->net, state->sk, skb);
233 return NF_STOLEN;
234 }
235
236 unsigned int
237 nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
238 const struct nf_hook_state *state)
239 {
240 struct flow_offload_tuple_rhash *tuplehash;
241 struct nf_flowtable *flow_table = priv;
242 struct flow_offload_tuple tuple = {};
243 enum flow_offload_tuple_dir dir;
244 struct flow_offload *flow;
245 struct net_device *outdev;
246 struct rtable *rt;
247 unsigned int thoff;
248 struct iphdr *iph;
249 __be32 nexthop;
250
251 if (skb->protocol != htons(ETH_P_IP))
252 return NF_ACCEPT;
253
254 if (nf_flow_tuple_ip(skb, state->in, &tuple) < 0)
255 return NF_ACCEPT;
256
257 tuplehash = flow_offload_lookup(flow_table, &tuple);
258 if (tuplehash == NULL)
259 return NF_ACCEPT;
260
261 dir = tuplehash->tuple.dir;
262 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
263 rt = (struct rtable *)flow->tuplehash[dir].tuple.dst_cache;
264 outdev = rt->dst.dev;
265
266 if (unlikely(nf_flow_exceeds_mtu(skb, flow->tuplehash[dir].tuple.mtu)))
267 return NF_ACCEPT;
268
269 if (skb_try_make_writable(skb, sizeof(*iph)))
270 return NF_DROP;
271
272 thoff = ip_hdr(skb)->ihl * 4;
273 if (nf_flow_state_check(flow, ip_hdr(skb)->protocol, skb, thoff))
274 return NF_ACCEPT;
275
276 if (nf_flow_offload_dst_check(&rt->dst)) {
277 flow_offload_teardown(flow);
278 return NF_ACCEPT;
279 }
280
281 if (nf_flow_nat_ip(flow, skb, thoff, dir) < 0)
282 return NF_DROP;
283
284 flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
285 iph = ip_hdr(skb);
286 ip_decrease_ttl(iph);
287 skb->tstamp = 0;
288
289 if (unlikely(dst_xfrm(&rt->dst))) {
290 memset(skb->cb, 0, sizeof(struct inet_skb_parm));
291 IPCB(skb)->iif = skb->dev->ifindex;
292 IPCB(skb)->flags = IPSKB_FORWARDED;
293 return nf_flow_xmit_xfrm(skb, state, &rt->dst);
294 }
295
296 skb->dev = outdev;
297 nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr);
298 skb_dst_set_noref(skb, &rt->dst);
299 neigh_xmit(NEIGH_ARP_TABLE, outdev, &nexthop, skb);
300
301 return NF_STOLEN;
302 }
303 EXPORT_SYMBOL_GPL(nf_flow_offload_ip_hook);
304
305 static int nf_flow_nat_ipv6_tcp(struct sk_buff *skb, unsigned int thoff,
306 struct in6_addr *addr,
307 struct in6_addr *new_addr)
308 {
309 struct tcphdr *tcph;
310
311 if (!pskb_may_pull(skb, thoff + sizeof(*tcph)) ||
312 skb_try_make_writable(skb, thoff + sizeof(*tcph)))
313 return -1;
314
315 tcph = (void *)(skb_network_header(skb) + thoff);
316 inet_proto_csum_replace16(&tcph->check, skb, addr->s6_addr32,
317 new_addr->s6_addr32, true);
318
319 return 0;
320 }
321
322 static int nf_flow_nat_ipv6_udp(struct sk_buff *skb, unsigned int thoff,
323 struct in6_addr *addr,
324 struct in6_addr *new_addr)
325 {
326 struct udphdr *udph;
327
328 if (!pskb_may_pull(skb, thoff + sizeof(*udph)) ||
329 skb_try_make_writable(skb, thoff + sizeof(*udph)))
330 return -1;
331
332 udph = (void *)(skb_network_header(skb) + thoff);
333 if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
334 inet_proto_csum_replace16(&udph->check, skb, addr->s6_addr32,
335 new_addr->s6_addr32, true);
336 if (!udph->check)
337 udph->check = CSUM_MANGLED_0;
338 }
339
340 return 0;
341 }
342
343 static int nf_flow_nat_ipv6_l4proto(struct sk_buff *skb, struct ipv6hdr *ip6h,
344 unsigned int thoff, struct in6_addr *addr,
345 struct in6_addr *new_addr)
346 {
347 switch (ip6h->nexthdr) {
348 case IPPROTO_TCP:
349 if (nf_flow_nat_ipv6_tcp(skb, thoff, addr, new_addr) < 0)
350 return NF_DROP;
351 break;
352 case IPPROTO_UDP:
353 if (nf_flow_nat_ipv6_udp(skb, thoff, addr, new_addr) < 0)
354 return NF_DROP;
355 break;
356 }
357
358 return 0;
359 }
360
361 static int nf_flow_snat_ipv6(const struct flow_offload *flow,
362 struct sk_buff *skb, struct ipv6hdr *ip6h,
363 unsigned int thoff,
364 enum flow_offload_tuple_dir dir)
365 {
366 struct in6_addr addr, new_addr;
367
368 switch (dir) {
369 case FLOW_OFFLOAD_DIR_ORIGINAL:
370 addr = ip6h->saddr;
371 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v6;
372 ip6h->saddr = new_addr;
373 break;
374 case FLOW_OFFLOAD_DIR_REPLY:
375 addr = ip6h->daddr;
376 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v6;
377 ip6h->daddr = new_addr;
378 break;
379 default:
380 return -1;
381 }
382
383 return nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr);
384 }
385
386 static int nf_flow_dnat_ipv6(const struct flow_offload *flow,
387 struct sk_buff *skb, struct ipv6hdr *ip6h,
388 unsigned int thoff,
389 enum flow_offload_tuple_dir dir)
390 {
391 struct in6_addr addr, new_addr;
392
393 switch (dir) {
394 case FLOW_OFFLOAD_DIR_ORIGINAL:
395 addr = ip6h->daddr;
396 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v6;
397 ip6h->daddr = new_addr;
398 break;
399 case FLOW_OFFLOAD_DIR_REPLY:
400 addr = ip6h->saddr;
401 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v6;
402 ip6h->saddr = new_addr;
403 break;
404 default:
405 return -1;
406 }
407
408 return nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr);
409 }
410
411 static int nf_flow_nat_ipv6(const struct flow_offload *flow,
412 struct sk_buff *skb,
413 enum flow_offload_tuple_dir dir)
414 {
415 struct ipv6hdr *ip6h = ipv6_hdr(skb);
416 unsigned int thoff = sizeof(*ip6h);
417
418 if (flow->flags & FLOW_OFFLOAD_SNAT &&
419 (nf_flow_snat_port(flow, skb, thoff, ip6h->nexthdr, dir) < 0 ||
420 nf_flow_snat_ipv6(flow, skb, ip6h, thoff, dir) < 0))
421 return -1;
422 if (flow->flags & FLOW_OFFLOAD_DNAT &&
423 (nf_flow_dnat_port(flow, skb, thoff, ip6h->nexthdr, dir) < 0 ||
424 nf_flow_dnat_ipv6(flow, skb, ip6h, thoff, dir) < 0))
425 return -1;
426
427 return 0;
428 }
429
430 static int nf_flow_tuple_ipv6(struct sk_buff *skb, const struct net_device *dev,
431 struct flow_offload_tuple *tuple)
432 {
433 struct flow_ports *ports;
434 struct ipv6hdr *ip6h;
435 unsigned int thoff;
436
437 if (!pskb_may_pull(skb, sizeof(*ip6h)))
438 return -1;
439
440 ip6h = ipv6_hdr(skb);
441
442 if (ip6h->nexthdr != IPPROTO_TCP &&
443 ip6h->nexthdr != IPPROTO_UDP)
444 return -1;
445
446 if (ip6h->hop_limit <= 1)
447 return -1;
448
449 thoff = sizeof(*ip6h);
450 if (!pskb_may_pull(skb, thoff + sizeof(*ports)))
451 return -1;
452
453 ip6h = ipv6_hdr(skb);
454 ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
455
456 tuple->src_v6 = ip6h->saddr;
457 tuple->dst_v6 = ip6h->daddr;
458 tuple->src_port = ports->source;
459 tuple->dst_port = ports->dest;
460 tuple->l3proto = AF_INET6;
461 tuple->l4proto = ip6h->nexthdr;
462 tuple->iifidx = dev->ifindex;
463
464 return 0;
465 }
466
467 unsigned int
468 nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
469 const struct nf_hook_state *state)
470 {
471 struct flow_offload_tuple_rhash *tuplehash;
472 struct nf_flowtable *flow_table = priv;
473 struct flow_offload_tuple tuple = {};
474 enum flow_offload_tuple_dir dir;
475 const struct in6_addr *nexthop;
476 struct flow_offload *flow;
477 struct net_device *outdev;
478 struct ipv6hdr *ip6h;
479 struct rt6_info *rt;
480
481 if (skb->protocol != htons(ETH_P_IPV6))
482 return NF_ACCEPT;
483
484 if (nf_flow_tuple_ipv6(skb, state->in, &tuple) < 0)
485 return NF_ACCEPT;
486
487 tuplehash = flow_offload_lookup(flow_table, &tuple);
488 if (tuplehash == NULL)
489 return NF_ACCEPT;
490
491 dir = tuplehash->tuple.dir;
492 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
493 rt = (struct rt6_info *)flow->tuplehash[dir].tuple.dst_cache;
494 outdev = rt->dst.dev;
495
496 if (unlikely(nf_flow_exceeds_mtu(skb, flow->tuplehash[dir].tuple.mtu)))
497 return NF_ACCEPT;
498
499 if (nf_flow_state_check(flow, ipv6_hdr(skb)->nexthdr, skb,
500 sizeof(*ip6h)))
501 return NF_ACCEPT;
502
503 if (nf_flow_offload_dst_check(&rt->dst)) {
504 flow_offload_teardown(flow);
505 return NF_ACCEPT;
506 }
507
508 if (skb_try_make_writable(skb, sizeof(*ip6h)))
509 return NF_DROP;
510
511 if (nf_flow_nat_ipv6(flow, skb, dir) < 0)
512 return NF_DROP;
513
514 flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
515 ip6h = ipv6_hdr(skb);
516 ip6h->hop_limit--;
517 skb->tstamp = 0;
518
519 if (unlikely(dst_xfrm(&rt->dst))) {
520 memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
521 IP6CB(skb)->iif = skb->dev->ifindex;
522 IP6CB(skb)->flags = IP6SKB_FORWARDED;
523 return nf_flow_xmit_xfrm(skb, state, &rt->dst);
524 }
525
526 skb->dev = outdev;
527 nexthop = rt6_nexthop(rt, &flow->tuplehash[!dir].tuple.src_v6);
528 skb_dst_set_noref(skb, &rt->dst);
529 neigh_xmit(NEIGH_ND_TABLE, outdev, nexthop, skb);
530
531 return NF_STOLEN;
532 }
533 EXPORT_SYMBOL_GPL(nf_flow_offload_ipv6_hook);