This source file includes following definitions.
- nft_reject_br_push_etherhdr
- nft_bridge_iphdr_validate
- nft_reject_br_send_v4_tcp_reset
- nft_reject_br_send_v4_unreach
- nft_bridge_ip6hdr_validate
- nft_reject_br_send_v6_tcp_reset
- reject6_br_csum_ok
- nft_reject_br_send_v6_unreach
- nft_reject_bridge_eval
- nft_reject_bridge_validate
- nft_reject_bridge_init
- nft_reject_bridge_dump
- nft_reject_bridge_module_init
- nft_reject_bridge_module_exit
1
2
3
4
5
6 #include <linux/kernel.h>
7 #include <linux/init.h>
8 #include <linux/module.h>
9 #include <linux/netlink.h>
10 #include <linux/netfilter.h>
11 #include <linux/netfilter/nf_tables.h>
12 #include <net/netfilter/nf_tables.h>
13 #include <net/netfilter/nft_reject.h>
14 #include <net/netfilter/ipv4/nf_reject.h>
15 #include <net/netfilter/ipv6/nf_reject.h>
16 #include <linux/ip.h>
17 #include <net/ip.h>
18 #include <net/ip6_checksum.h>
19 #include <linux/netfilter_bridge.h>
20 #include <linux/netfilter_ipv6.h>
21 #include "../br_private.h"
22
23 static void nft_reject_br_push_etherhdr(struct sk_buff *oldskb,
24 struct sk_buff *nskb)
25 {
26 struct ethhdr *eth;
27
28 eth = skb_push(nskb, ETH_HLEN);
29 skb_reset_mac_header(nskb);
30 ether_addr_copy(eth->h_source, eth_hdr(oldskb)->h_dest);
31 ether_addr_copy(eth->h_dest, eth_hdr(oldskb)->h_source);
32 eth->h_proto = eth_hdr(oldskb)->h_proto;
33 skb_pull(nskb, ETH_HLEN);
34
35 if (skb_vlan_tag_present(oldskb)) {
36 u16 vid = skb_vlan_tag_get(oldskb);
37
38 __vlan_hwaccel_put_tag(nskb, oldskb->vlan_proto, vid);
39 }
40 }
41
42 static int nft_bridge_iphdr_validate(struct sk_buff *skb)
43 {
44 struct iphdr *iph;
45 u32 len;
46
47 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
48 return 0;
49
50 iph = ip_hdr(skb);
51 if (iph->ihl < 5 || iph->version != 4)
52 return 0;
53
54 len = ntohs(iph->tot_len);
55 if (skb->len < len)
56 return 0;
57 else if (len < (iph->ihl*4))
58 return 0;
59
60 if (!pskb_may_pull(skb, iph->ihl*4))
61 return 0;
62
63 return 1;
64 }
65
66
67
68
69 static void nft_reject_br_send_v4_tcp_reset(struct net *net,
70 struct sk_buff *oldskb,
71 const struct net_device *dev,
72 int hook)
73 {
74 struct sk_buff *nskb;
75 struct iphdr *niph;
76 const struct tcphdr *oth;
77 struct tcphdr _oth;
78
79 if (!nft_bridge_iphdr_validate(oldskb))
80 return;
81
82 oth = nf_reject_ip_tcphdr_get(oldskb, &_oth, hook);
83 if (!oth)
84 return;
85
86 nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) +
87 LL_MAX_HEADER, GFP_ATOMIC);
88 if (!nskb)
89 return;
90
91 skb_reserve(nskb, LL_MAX_HEADER);
92 niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP,
93 net->ipv4.sysctl_ip_default_ttl);
94 nf_reject_ip_tcphdr_put(nskb, oldskb, oth);
95 niph->tot_len = htons(nskb->len);
96 ip_send_check(niph);
97
98 nft_reject_br_push_etherhdr(oldskb, nskb);
99
100 br_forward(br_port_get_rcu(dev), nskb, false, true);
101 }
102
103 static void nft_reject_br_send_v4_unreach(struct net *net,
104 struct sk_buff *oldskb,
105 const struct net_device *dev,
106 int hook, u8 code)
107 {
108 struct sk_buff *nskb;
109 struct iphdr *niph;
110 struct icmphdr *icmph;
111 unsigned int len;
112 __wsum csum;
113 u8 proto;
114
115 if (!nft_bridge_iphdr_validate(oldskb))
116 return;
117
118
119 if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET))
120 return;
121
122
123 len = min_t(unsigned int, 536, oldskb->len);
124
125 if (!pskb_may_pull(oldskb, len))
126 return;
127
128 if (pskb_trim_rcsum(oldskb, ntohs(ip_hdr(oldskb)->tot_len)))
129 return;
130
131 proto = ip_hdr(oldskb)->protocol;
132
133 if (!skb_csum_unnecessary(oldskb) &&
134 nf_reject_verify_csum(proto) &&
135 nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), proto))
136 return;
137
138 nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct icmphdr) +
139 LL_MAX_HEADER + len, GFP_ATOMIC);
140 if (!nskb)
141 return;
142
143 skb_reserve(nskb, LL_MAX_HEADER);
144 niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_ICMP,
145 net->ipv4.sysctl_ip_default_ttl);
146
147 skb_reset_transport_header(nskb);
148 icmph = skb_put_zero(nskb, sizeof(struct icmphdr));
149 icmph->type = ICMP_DEST_UNREACH;
150 icmph->code = code;
151
152 skb_put_data(nskb, skb_network_header(oldskb), len);
153
154 csum = csum_partial((void *)icmph, len + sizeof(struct icmphdr), 0);
155 icmph->checksum = csum_fold(csum);
156
157 niph->tot_len = htons(nskb->len);
158 ip_send_check(niph);
159
160 nft_reject_br_push_etherhdr(oldskb, nskb);
161
162 br_forward(br_port_get_rcu(dev), nskb, false, true);
163 }
164
165 static int nft_bridge_ip6hdr_validate(struct sk_buff *skb)
166 {
167 struct ipv6hdr *hdr;
168 u32 pkt_len;
169
170 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
171 return 0;
172
173 hdr = ipv6_hdr(skb);
174 if (hdr->version != 6)
175 return 0;
176
177 pkt_len = ntohs(hdr->payload_len);
178 if (pkt_len + sizeof(struct ipv6hdr) > skb->len)
179 return 0;
180
181 return 1;
182 }
183
184 static void nft_reject_br_send_v6_tcp_reset(struct net *net,
185 struct sk_buff *oldskb,
186 const struct net_device *dev,
187 int hook)
188 {
189 struct sk_buff *nskb;
190 const struct tcphdr *oth;
191 struct tcphdr _oth;
192 unsigned int otcplen;
193 struct ipv6hdr *nip6h;
194
195 if (!nft_bridge_ip6hdr_validate(oldskb))
196 return;
197
198 oth = nf_reject_ip6_tcphdr_get(oldskb, &_oth, &otcplen, hook);
199 if (!oth)
200 return;
201
202 nskb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(struct tcphdr) +
203 LL_MAX_HEADER, GFP_ATOMIC);
204 if (!nskb)
205 return;
206
207 skb_reserve(nskb, LL_MAX_HEADER);
208 nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP,
209 net->ipv6.devconf_all->hop_limit);
210 nf_reject_ip6_tcphdr_put(nskb, oldskb, oth, otcplen);
211 nip6h->payload_len = htons(nskb->len - sizeof(struct ipv6hdr));
212
213 nft_reject_br_push_etherhdr(oldskb, nskb);
214
215 br_forward(br_port_get_rcu(dev), nskb, false, true);
216 }
217
218 static bool reject6_br_csum_ok(struct sk_buff *skb, int hook)
219 {
220 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
221 int thoff;
222 __be16 fo;
223 u8 proto = ip6h->nexthdr;
224
225 if (skb_csum_unnecessary(skb))
226 return true;
227
228 if (ip6h->payload_len &&
229 pskb_trim_rcsum(skb, ntohs(ip6h->payload_len) + sizeof(*ip6h)))
230 return false;
231
232 ip6h = ipv6_hdr(skb);
233 thoff = ipv6_skip_exthdr(skb, ((u8*)(ip6h+1) - skb->data), &proto, &fo);
234 if (thoff < 0 || thoff >= skb->len || (fo & htons(~0x7)) != 0)
235 return false;
236
237 if (!nf_reject_verify_csum(proto))
238 return true;
239
240 return nf_ip6_checksum(skb, hook, thoff, proto) == 0;
241 }
242
243 static void nft_reject_br_send_v6_unreach(struct net *net,
244 struct sk_buff *oldskb,
245 const struct net_device *dev,
246 int hook, u8 code)
247 {
248 struct sk_buff *nskb;
249 struct ipv6hdr *nip6h;
250 struct icmp6hdr *icmp6h;
251 unsigned int len;
252
253 if (!nft_bridge_ip6hdr_validate(oldskb))
254 return;
255
256
257
258
259 len = min_t(unsigned int, 1220, oldskb->len);
260
261 if (!pskb_may_pull(oldskb, len))
262 return;
263
264 if (!reject6_br_csum_ok(oldskb, hook))
265 return;
266
267 nskb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(struct icmp6hdr) +
268 LL_MAX_HEADER + len, GFP_ATOMIC);
269 if (!nskb)
270 return;
271
272 skb_reserve(nskb, LL_MAX_HEADER);
273 nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_ICMPV6,
274 net->ipv6.devconf_all->hop_limit);
275
276 skb_reset_transport_header(nskb);
277 icmp6h = skb_put_zero(nskb, sizeof(struct icmp6hdr));
278 icmp6h->icmp6_type = ICMPV6_DEST_UNREACH;
279 icmp6h->icmp6_code = code;
280
281 skb_put_data(nskb, skb_network_header(oldskb), len);
282 nip6h->payload_len = htons(nskb->len - sizeof(struct ipv6hdr));
283
284 icmp6h->icmp6_cksum =
285 csum_ipv6_magic(&nip6h->saddr, &nip6h->daddr,
286 nskb->len - sizeof(struct ipv6hdr),
287 IPPROTO_ICMPV6,
288 csum_partial(icmp6h,
289 nskb->len - sizeof(struct ipv6hdr),
290 0));
291
292 nft_reject_br_push_etherhdr(oldskb, nskb);
293
294 br_forward(br_port_get_rcu(dev), nskb, false, true);
295 }
296
297 static void nft_reject_bridge_eval(const struct nft_expr *expr,
298 struct nft_regs *regs,
299 const struct nft_pktinfo *pkt)
300 {
301 struct nft_reject *priv = nft_expr_priv(expr);
302 const unsigned char *dest = eth_hdr(pkt->skb)->h_dest;
303
304 if (is_broadcast_ether_addr(dest) ||
305 is_multicast_ether_addr(dest))
306 goto out;
307
308 switch (eth_hdr(pkt->skb)->h_proto) {
309 case htons(ETH_P_IP):
310 switch (priv->type) {
311 case NFT_REJECT_ICMP_UNREACH:
312 nft_reject_br_send_v4_unreach(nft_net(pkt), pkt->skb,
313 nft_in(pkt),
314 nft_hook(pkt),
315 priv->icmp_code);
316 break;
317 case NFT_REJECT_TCP_RST:
318 nft_reject_br_send_v4_tcp_reset(nft_net(pkt), pkt->skb,
319 nft_in(pkt),
320 nft_hook(pkt));
321 break;
322 case NFT_REJECT_ICMPX_UNREACH:
323 nft_reject_br_send_v4_unreach(nft_net(pkt), pkt->skb,
324 nft_in(pkt),
325 nft_hook(pkt),
326 nft_reject_icmp_code(priv->icmp_code));
327 break;
328 }
329 break;
330 case htons(ETH_P_IPV6):
331 switch (priv->type) {
332 case NFT_REJECT_ICMP_UNREACH:
333 nft_reject_br_send_v6_unreach(nft_net(pkt), pkt->skb,
334 nft_in(pkt),
335 nft_hook(pkt),
336 priv->icmp_code);
337 break;
338 case NFT_REJECT_TCP_RST:
339 nft_reject_br_send_v6_tcp_reset(nft_net(pkt), pkt->skb,
340 nft_in(pkt),
341 nft_hook(pkt));
342 break;
343 case NFT_REJECT_ICMPX_UNREACH:
344 nft_reject_br_send_v6_unreach(nft_net(pkt), pkt->skb,
345 nft_in(pkt),
346 nft_hook(pkt),
347 nft_reject_icmpv6_code(priv->icmp_code));
348 break;
349 }
350 break;
351 default:
352
353 break;
354 }
355 out:
356 regs->verdict.code = NF_DROP;
357 }
358
359 static int nft_reject_bridge_validate(const struct nft_ctx *ctx,
360 const struct nft_expr *expr,
361 const struct nft_data **data)
362 {
363 return nft_chain_validate_hooks(ctx->chain, (1 << NF_BR_PRE_ROUTING) |
364 (1 << NF_BR_LOCAL_IN));
365 }
366
367 static int nft_reject_bridge_init(const struct nft_ctx *ctx,
368 const struct nft_expr *expr,
369 const struct nlattr * const tb[])
370 {
371 struct nft_reject *priv = nft_expr_priv(expr);
372 int icmp_code;
373
374 if (tb[NFTA_REJECT_TYPE] == NULL)
375 return -EINVAL;
376
377 priv->type = ntohl(nla_get_be32(tb[NFTA_REJECT_TYPE]));
378 switch (priv->type) {
379 case NFT_REJECT_ICMP_UNREACH:
380 case NFT_REJECT_ICMPX_UNREACH:
381 if (tb[NFTA_REJECT_ICMP_CODE] == NULL)
382 return -EINVAL;
383
384 icmp_code = nla_get_u8(tb[NFTA_REJECT_ICMP_CODE]);
385 if (priv->type == NFT_REJECT_ICMPX_UNREACH &&
386 icmp_code > NFT_REJECT_ICMPX_MAX)
387 return -EINVAL;
388
389 priv->icmp_code = icmp_code;
390 break;
391 case NFT_REJECT_TCP_RST:
392 break;
393 default:
394 return -EINVAL;
395 }
396 return 0;
397 }
398
399 static int nft_reject_bridge_dump(struct sk_buff *skb,
400 const struct nft_expr *expr)
401 {
402 const struct nft_reject *priv = nft_expr_priv(expr);
403
404 if (nla_put_be32(skb, NFTA_REJECT_TYPE, htonl(priv->type)))
405 goto nla_put_failure;
406
407 switch (priv->type) {
408 case NFT_REJECT_ICMP_UNREACH:
409 case NFT_REJECT_ICMPX_UNREACH:
410 if (nla_put_u8(skb, NFTA_REJECT_ICMP_CODE, priv->icmp_code))
411 goto nla_put_failure;
412 break;
413 default:
414 break;
415 }
416
417 return 0;
418
419 nla_put_failure:
420 return -1;
421 }
422
423 static struct nft_expr_type nft_reject_bridge_type;
424 static const struct nft_expr_ops nft_reject_bridge_ops = {
425 .type = &nft_reject_bridge_type,
426 .size = NFT_EXPR_SIZE(sizeof(struct nft_reject)),
427 .eval = nft_reject_bridge_eval,
428 .init = nft_reject_bridge_init,
429 .dump = nft_reject_bridge_dump,
430 .validate = nft_reject_bridge_validate,
431 };
432
433 static struct nft_expr_type nft_reject_bridge_type __read_mostly = {
434 .family = NFPROTO_BRIDGE,
435 .name = "reject",
436 .ops = &nft_reject_bridge_ops,
437 .policy = nft_reject_policy,
438 .maxattr = NFTA_REJECT_MAX,
439 .owner = THIS_MODULE,
440 };
441
442 static int __init nft_reject_bridge_module_init(void)
443 {
444 return nft_register_expr(&nft_reject_bridge_type);
445 }
446
447 static void __exit nft_reject_bridge_module_exit(void)
448 {
449 nft_unregister_expr(&nft_reject_bridge_type);
450 }
451
452 module_init(nft_reject_bridge_module_init);
453 module_exit(nft_reject_bridge_module_exit);
454
455 MODULE_LICENSE("GPL");
456 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
457 MODULE_ALIAS_NFT_AF_EXPR(AF_BRIDGE, "reject");