1 /*
2 * Packet matching code.
3 *
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
6 * Copyright (c) 2006-2010 Patrick McHardy <kaber@trash.net>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15 #include <linux/kernel.h>
16 #include <linux/capability.h>
17 #include <linux/in.h>
18 #include <linux/skbuff.h>
19 #include <linux/kmod.h>
20 #include <linux/vmalloc.h>
21 #include <linux/netdevice.h>
22 #include <linux/module.h>
23 #include <linux/poison.h>
24 #include <linux/icmpv6.h>
25 #include <net/ipv6.h>
26 #include <net/compat.h>
27 #include <asm/uaccess.h>
28 #include <linux/mutex.h>
29 #include <linux/proc_fs.h>
30 #include <linux/err.h>
31 #include <linux/cpumask.h>
32
33 #include <linux/netfilter_ipv6/ip6_tables.h>
34 #include <linux/netfilter/x_tables.h>
35 #include <net/netfilter/nf_log.h>
36 #include "../../netfilter/xt_repldata.h"
37
38 MODULE_LICENSE("GPL");
39 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
40 MODULE_DESCRIPTION("IPv6 packet filter");
41
42 /*#define DEBUG_IP_FIREWALL*/
43 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
44 /*#define DEBUG_IP_FIREWALL_USER*/
45
46 #ifdef DEBUG_IP_FIREWALL
47 #define dprintf(format, args...) pr_info(format , ## args)
48 #else
49 #define dprintf(format, args...)
50 #endif
51
52 #ifdef DEBUG_IP_FIREWALL_USER
53 #define duprintf(format, args...) pr_info(format , ## args)
54 #else
55 #define duprintf(format, args...)
56 #endif
57
58 #ifdef CONFIG_NETFILTER_DEBUG
59 #define IP_NF_ASSERT(x) WARN_ON(!(x))
60 #else
61 #define IP_NF_ASSERT(x)
62 #endif
63
64 #if 0
65 /* All the better to debug you with... */
66 #define static
67 #define inline
68 #endif
69
ip6t_alloc_initial_table(const struct xt_table * info)70 void *ip6t_alloc_initial_table(const struct xt_table *info)
71 {
72 return xt_alloc_initial_table(ip6t, IP6T);
73 }
74 EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table);
75
76 /*
77 We keep a set of rules for each CPU, so we can avoid write-locking
78 them in the softirq when updating the counters and therefore
79 only need to read-lock in the softirq; doing a write_lock_bh() in user
80 context stops packets coming through and allows user context to read
81 the counters or update the rules.
82
83 Hence the start of any table is given by get_table() below. */
84
85 /* Returns whether matches rule or not. */
86 /* Performance critical - called for every packet */
87 static inline bool
ip6_packet_match(const struct sk_buff * skb,const char * indev,const char * outdev,const struct ip6t_ip6 * ip6info,unsigned int * protoff,int * fragoff,bool * hotdrop)88 ip6_packet_match(const struct sk_buff *skb,
89 const char *indev,
90 const char *outdev,
91 const struct ip6t_ip6 *ip6info,
92 unsigned int *protoff,
93 int *fragoff, bool *hotdrop)
94 {
95 unsigned long ret;
96 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
97
98 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
99
100 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
101 &ip6info->src), IP6T_INV_SRCIP) ||
102 FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
103 &ip6info->dst), IP6T_INV_DSTIP)) {
104 dprintf("Source or dest mismatch.\n");
105 /*
106 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
107 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
108 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
109 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
110 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
111 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
112 return false;
113 }
114
115 ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask);
116
117 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
118 dprintf("VIA in mismatch (%s vs %s).%s\n",
119 indev, ip6info->iniface,
120 ip6info->invflags & IP6T_INV_VIA_IN ? " (INV)" : "");
121 return false;
122 }
123
124 ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask);
125
126 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
127 dprintf("VIA out mismatch (%s vs %s).%s\n",
128 outdev, ip6info->outiface,
129 ip6info->invflags & IP6T_INV_VIA_OUT ? " (INV)" : "");
130 return false;
131 }
132
133 /* ... might want to do something with class and flowlabel here ... */
134
135 /* look for the desired protocol header */
136 if (ip6info->flags & IP6T_F_PROTO) {
137 int protohdr;
138 unsigned short _frag_off;
139
140 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off, NULL);
141 if (protohdr < 0) {
142 if (_frag_off == 0)
143 *hotdrop = true;
144 return false;
145 }
146 *fragoff = _frag_off;
147
148 dprintf("Packet protocol %hi ?= %s%hi.\n",
149 protohdr,
150 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
151 ip6info->proto);
152
153 if (ip6info->proto == protohdr) {
154 if (ip6info->invflags & IP6T_INV_PROTO)
155 return false;
156
157 return true;
158 }
159
160 /* We need match for the '-p all', too! */
161 if ((ip6info->proto != 0) &&
162 !(ip6info->invflags & IP6T_INV_PROTO))
163 return false;
164 }
165 return true;
166 }
167
168 /* should be ip6 safe */
169 static bool
ip6_checkentry(const struct ip6t_ip6 * ipv6)170 ip6_checkentry(const struct ip6t_ip6 *ipv6)
171 {
172 if (ipv6->flags & ~IP6T_F_MASK) {
173 duprintf("Unknown flag bits set: %08X\n",
174 ipv6->flags & ~IP6T_F_MASK);
175 return false;
176 }
177 if (ipv6->invflags & ~IP6T_INV_MASK) {
178 duprintf("Unknown invflag bits set: %08X\n",
179 ipv6->invflags & ~IP6T_INV_MASK);
180 return false;
181 }
182 return true;
183 }
184
185 static unsigned int
ip6t_error(struct sk_buff * skb,const struct xt_action_param * par)186 ip6t_error(struct sk_buff *skb, const struct xt_action_param *par)
187 {
188 net_info_ratelimited("error: `%s'\n", (const char *)par->targinfo);
189
190 return NF_DROP;
191 }
192
193 static inline struct ip6t_entry *
get_entry(const void * base,unsigned int offset)194 get_entry(const void *base, unsigned int offset)
195 {
196 return (struct ip6t_entry *)(base + offset);
197 }
198
199 /* All zeroes == unconditional rule. */
200 /* Mildly perf critical (only if packet tracing is on) */
unconditional(const struct ip6t_entry * e)201 static inline bool unconditional(const struct ip6t_entry *e)
202 {
203 static const struct ip6t_ip6 uncond;
204
205 return e->target_offset == sizeof(struct ip6t_entry) &&
206 memcmp(&e->ipv6, &uncond, sizeof(uncond)) == 0;
207 }
208
209 static inline const struct xt_entry_target *
ip6t_get_target_c(const struct ip6t_entry * e)210 ip6t_get_target_c(const struct ip6t_entry *e)
211 {
212 return ip6t_get_target((struct ip6t_entry *)e);
213 }
214
215 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
216 /* This cries for unification! */
217 static const char *const hooknames[] = {
218 [NF_INET_PRE_ROUTING] = "PREROUTING",
219 [NF_INET_LOCAL_IN] = "INPUT",
220 [NF_INET_FORWARD] = "FORWARD",
221 [NF_INET_LOCAL_OUT] = "OUTPUT",
222 [NF_INET_POST_ROUTING] = "POSTROUTING",
223 };
224
225 enum nf_ip_trace_comments {
226 NF_IP6_TRACE_COMMENT_RULE,
227 NF_IP6_TRACE_COMMENT_RETURN,
228 NF_IP6_TRACE_COMMENT_POLICY,
229 };
230
231 static const char *const comments[] = {
232 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
233 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
234 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
235 };
236
237 static struct nf_loginfo trace_loginfo = {
238 .type = NF_LOG_TYPE_LOG,
239 .u = {
240 .log = {
241 .level = LOGLEVEL_WARNING,
242 .logflags = NF_LOG_MASK,
243 },
244 },
245 };
246
247 /* Mildly perf critical (only if packet tracing is on) */
248 static inline int
get_chainname_rulenum(const struct ip6t_entry * s,const struct ip6t_entry * e,const char * hookname,const char ** chainname,const char ** comment,unsigned int * rulenum)249 get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e,
250 const char *hookname, const char **chainname,
251 const char **comment, unsigned int *rulenum)
252 {
253 const struct xt_standard_target *t = (void *)ip6t_get_target_c(s);
254
255 if (strcmp(t->target.u.kernel.target->name, XT_ERROR_TARGET) == 0) {
256 /* Head of user chain: ERROR target with chainname */
257 *chainname = t->target.data;
258 (*rulenum) = 0;
259 } else if (s == e) {
260 (*rulenum)++;
261
262 if (unconditional(s) &&
263 strcmp(t->target.u.kernel.target->name,
264 XT_STANDARD_TARGET) == 0 &&
265 t->verdict < 0) {
266 /* Tail of chains: STANDARD target (return/policy) */
267 *comment = *chainname == hookname
268 ? comments[NF_IP6_TRACE_COMMENT_POLICY]
269 : comments[NF_IP6_TRACE_COMMENT_RETURN];
270 }
271 return 1;
272 } else
273 (*rulenum)++;
274
275 return 0;
276 }
277
trace_packet(struct net * net,const struct sk_buff * skb,unsigned int hook,const struct net_device * in,const struct net_device * out,const char * tablename,const struct xt_table_info * private,const struct ip6t_entry * e)278 static void trace_packet(struct net *net,
279 const struct sk_buff *skb,
280 unsigned int hook,
281 const struct net_device *in,
282 const struct net_device *out,
283 const char *tablename,
284 const struct xt_table_info *private,
285 const struct ip6t_entry *e)
286 {
287 const struct ip6t_entry *root;
288 const char *hookname, *chainname, *comment;
289 const struct ip6t_entry *iter;
290 unsigned int rulenum = 0;
291
292 root = get_entry(private->entries, private->hook_entry[hook]);
293
294 hookname = chainname = hooknames[hook];
295 comment = comments[NF_IP6_TRACE_COMMENT_RULE];
296
297 xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
298 if (get_chainname_rulenum(iter, e, hookname,
299 &chainname, &comment, &rulenum) != 0)
300 break;
301
302 nf_log_trace(net, AF_INET6, hook, skb, in, out, &trace_loginfo,
303 "TRACE: %s:%s:%s:%u ",
304 tablename, chainname, comment, rulenum);
305 }
306 #endif
307
308 static inline struct ip6t_entry *
ip6t_next_entry(const struct ip6t_entry * entry)309 ip6t_next_entry(const struct ip6t_entry *entry)
310 {
311 return (void *)entry + entry->next_offset;
312 }
313
314 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
315 unsigned int
ip6t_do_table(struct sk_buff * skb,const struct nf_hook_state * state,struct xt_table * table)316 ip6t_do_table(struct sk_buff *skb,
317 const struct nf_hook_state *state,
318 struct xt_table *table)
319 {
320 unsigned int hook = state->hook;
321 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
322 /* Initializing verdict to NF_DROP keeps gcc happy. */
323 unsigned int verdict = NF_DROP;
324 const char *indev, *outdev;
325 const void *table_base;
326 struct ip6t_entry *e, **jumpstack;
327 unsigned int stackidx, cpu;
328 const struct xt_table_info *private;
329 struct xt_action_param acpar;
330 unsigned int addend;
331
332 /* Initialization */
333 stackidx = 0;
334 indev = state->in ? state->in->name : nulldevname;
335 outdev = state->out ? state->out->name : nulldevname;
336 /* We handle fragments by dealing with the first fragment as
337 * if it was a normal packet. All other fragments are treated
338 * normally, except that they will NEVER match rules that ask
339 * things we don't know, ie. tcp syn flag or ports). If the
340 * rule is also a fragment-specific rule, non-fragments won't
341 * match it. */
342 acpar.hotdrop = false;
343 acpar.net = state->net;
344 acpar.in = state->in;
345 acpar.out = state->out;
346 acpar.family = NFPROTO_IPV6;
347 acpar.hooknum = hook;
348
349 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
350
351 local_bh_disable();
352 addend = xt_write_recseq_begin();
353 private = table->private;
354 /*
355 * Ensure we load private-> members after we've fetched the base
356 * pointer.
357 */
358 smp_read_barrier_depends();
359 cpu = smp_processor_id();
360 table_base = private->entries;
361 jumpstack = (struct ip6t_entry **)private->jumpstack[cpu];
362
363 /* Switch to alternate jumpstack if we're being invoked via TEE.
364 * TEE issues XT_CONTINUE verdict on original skb so we must not
365 * clobber the jumpstack.
366 *
367 * For recursion via REJECT or SYNPROXY the stack will be clobbered
368 * but it is no problem since absolute verdict is issued by these.
369 */
370 if (static_key_false(&xt_tee_enabled))
371 jumpstack += private->stacksize * __this_cpu_read(nf_skb_duplicated);
372
373 e = get_entry(table_base, private->hook_entry[hook]);
374
375 do {
376 const struct xt_entry_target *t;
377 const struct xt_entry_match *ematch;
378 struct xt_counters *counter;
379
380 IP_NF_ASSERT(e);
381 acpar.thoff = 0;
382 if (!ip6_packet_match(skb, indev, outdev, &e->ipv6,
383 &acpar.thoff, &acpar.fragoff, &acpar.hotdrop)) {
384 no_match:
385 e = ip6t_next_entry(e);
386 continue;
387 }
388
389 xt_ematch_foreach(ematch, e) {
390 acpar.match = ematch->u.kernel.match;
391 acpar.matchinfo = ematch->data;
392 if (!acpar.match->match(skb, &acpar))
393 goto no_match;
394 }
395
396 counter = xt_get_this_cpu_counter(&e->counters);
397 ADD_COUNTER(*counter, skb->len, 1);
398
399 t = ip6t_get_target_c(e);
400 IP_NF_ASSERT(t->u.kernel.target);
401
402 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
403 /* The packet is traced: log it */
404 if (unlikely(skb->nf_trace))
405 trace_packet(state->net, skb, hook, state->in,
406 state->out, table->name, private, e);
407 #endif
408 /* Standard target? */
409 if (!t->u.kernel.target->target) {
410 int v;
411
412 v = ((struct xt_standard_target *)t)->verdict;
413 if (v < 0) {
414 /* Pop from stack? */
415 if (v != XT_RETURN) {
416 verdict = (unsigned int)(-v) - 1;
417 break;
418 }
419 if (stackidx == 0)
420 e = get_entry(table_base,
421 private->underflow[hook]);
422 else
423 e = ip6t_next_entry(jumpstack[--stackidx]);
424 continue;
425 }
426 if (table_base + v != ip6t_next_entry(e) &&
427 !(e->ipv6.flags & IP6T_F_GOTO)) {
428 jumpstack[stackidx++] = e;
429 }
430
431 e = get_entry(table_base, v);
432 continue;
433 }
434
435 acpar.target = t->u.kernel.target;
436 acpar.targinfo = t->data;
437
438 verdict = t->u.kernel.target->target(skb, &acpar);
439 if (verdict == XT_CONTINUE)
440 e = ip6t_next_entry(e);
441 else
442 /* Verdict */
443 break;
444 } while (!acpar.hotdrop);
445
446 xt_write_recseq_end(addend);
447 local_bh_enable();
448
449 #ifdef DEBUG_ALLOW_ALL
450 return NF_ACCEPT;
451 #else
452 if (acpar.hotdrop)
453 return NF_DROP;
454 else return verdict;
455 #endif
456 }
457
find_jump_target(const struct xt_table_info * t,const struct ip6t_entry * target)458 static bool find_jump_target(const struct xt_table_info *t,
459 const struct ip6t_entry *target)
460 {
461 struct ip6t_entry *iter;
462
463 xt_entry_foreach(iter, t->entries, t->size) {
464 if (iter == target)
465 return true;
466 }
467 return false;
468 }
469
470 /* Figures out from what hook each rule can be called: returns 0 if
471 there are loops. Puts hook bitmask in comefrom. */
472 static int
mark_source_chains(const struct xt_table_info * newinfo,unsigned int valid_hooks,void * entry0)473 mark_source_chains(const struct xt_table_info *newinfo,
474 unsigned int valid_hooks, void *entry0)
475 {
476 unsigned int hook;
477
478 /* No recursion; use packet counter to save back ptrs (reset
479 to 0 as we leave), and comefrom to save source hook bitmask */
480 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
481 unsigned int pos = newinfo->hook_entry[hook];
482 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
483
484 if (!(valid_hooks & (1 << hook)))
485 continue;
486
487 /* Set initial back pointer. */
488 e->counters.pcnt = pos;
489
490 for (;;) {
491 const struct xt_standard_target *t
492 = (void *)ip6t_get_target_c(e);
493 int visited = e->comefrom & (1 << hook);
494
495 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
496 pr_err("iptables: loop hook %u pos %u %08X.\n",
497 hook, pos, e->comefrom);
498 return 0;
499 }
500 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
501
502 /* Unconditional return/END. */
503 if ((unconditional(e) &&
504 (strcmp(t->target.u.user.name,
505 XT_STANDARD_TARGET) == 0) &&
506 t->verdict < 0) || visited) {
507 unsigned int oldpos, size;
508
509 if ((strcmp(t->target.u.user.name,
510 XT_STANDARD_TARGET) == 0) &&
511 t->verdict < -NF_MAX_VERDICT - 1) {
512 duprintf("mark_source_chains: bad "
513 "negative verdict (%i)\n",
514 t->verdict);
515 return 0;
516 }
517
518 /* Return: backtrack through the last
519 big jump. */
520 do {
521 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
522 #ifdef DEBUG_IP_FIREWALL_USER
523 if (e->comefrom
524 & (1 << NF_INET_NUMHOOKS)) {
525 duprintf("Back unset "
526 "on hook %u "
527 "rule %u\n",
528 hook, pos);
529 }
530 #endif
531 oldpos = pos;
532 pos = e->counters.pcnt;
533 e->counters.pcnt = 0;
534
535 /* We're at the start. */
536 if (pos == oldpos)
537 goto next;
538
539 e = (struct ip6t_entry *)
540 (entry0 + pos);
541 } while (oldpos == pos + e->next_offset);
542
543 /* Move along one */
544 size = e->next_offset;
545 e = (struct ip6t_entry *)
546 (entry0 + pos + size);
547 if (pos + size >= newinfo->size)
548 return 0;
549 e->counters.pcnt = pos;
550 pos += size;
551 } else {
552 int newpos = t->verdict;
553
554 if (strcmp(t->target.u.user.name,
555 XT_STANDARD_TARGET) == 0 &&
556 newpos >= 0) {
557 if (newpos > newinfo->size -
558 sizeof(struct ip6t_entry)) {
559 duprintf("mark_source_chains: "
560 "bad verdict (%i)\n",
561 newpos);
562 return 0;
563 }
564 /* This a jump; chase it. */
565 duprintf("Jump rule %u -> %u\n",
566 pos, newpos);
567 e = (struct ip6t_entry *)
568 (entry0 + newpos);
569 if (!find_jump_target(newinfo, e))
570 return 0;
571 } else {
572 /* ... this is a fallthru */
573 newpos = pos + e->next_offset;
574 if (newpos >= newinfo->size)
575 return 0;
576 }
577 e = (struct ip6t_entry *)
578 (entry0 + newpos);
579 e->counters.pcnt = pos;
580 pos = newpos;
581 }
582 }
583 next:
584 duprintf("Finished chain %u\n", hook);
585 }
586 return 1;
587 }
588
cleanup_match(struct xt_entry_match * m,struct net * net)589 static void cleanup_match(struct xt_entry_match *m, struct net *net)
590 {
591 struct xt_mtdtor_param par;
592
593 par.net = net;
594 par.match = m->u.kernel.match;
595 par.matchinfo = m->data;
596 par.family = NFPROTO_IPV6;
597 if (par.match->destroy != NULL)
598 par.match->destroy(&par);
599 module_put(par.match->me);
600 }
601
check_match(struct xt_entry_match * m,struct xt_mtchk_param * par)602 static int check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
603 {
604 const struct ip6t_ip6 *ipv6 = par->entryinfo;
605 int ret;
606
607 par->match = m->u.kernel.match;
608 par->matchinfo = m->data;
609
610 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
611 ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
612 if (ret < 0) {
613 duprintf("ip_tables: check failed for `%s'.\n",
614 par.match->name);
615 return ret;
616 }
617 return 0;
618 }
619
620 static int
find_check_match(struct xt_entry_match * m,struct xt_mtchk_param * par)621 find_check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
622 {
623 struct xt_match *match;
624 int ret;
625
626 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
627 m->u.user.revision);
628 if (IS_ERR(match)) {
629 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
630 return PTR_ERR(match);
631 }
632 m->u.kernel.match = match;
633
634 ret = check_match(m, par);
635 if (ret)
636 goto err;
637
638 return 0;
639 err:
640 module_put(m->u.kernel.match->me);
641 return ret;
642 }
643
check_target(struct ip6t_entry * e,struct net * net,const char * name)644 static int check_target(struct ip6t_entry *e, struct net *net, const char *name)
645 {
646 struct xt_entry_target *t = ip6t_get_target(e);
647 struct xt_tgchk_param par = {
648 .net = net,
649 .table = name,
650 .entryinfo = e,
651 .target = t->u.kernel.target,
652 .targinfo = t->data,
653 .hook_mask = e->comefrom,
654 .family = NFPROTO_IPV6,
655 };
656 int ret;
657
658 t = ip6t_get_target(e);
659 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
660 e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO);
661 if (ret < 0) {
662 duprintf("ip_tables: check failed for `%s'.\n",
663 t->u.kernel.target->name);
664 return ret;
665 }
666 return 0;
667 }
668
669 static int
find_check_entry(struct ip6t_entry * e,struct net * net,const char * name,unsigned int size)670 find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
671 unsigned int size)
672 {
673 struct xt_entry_target *t;
674 struct xt_target *target;
675 int ret;
676 unsigned int j;
677 struct xt_mtchk_param mtpar;
678 struct xt_entry_match *ematch;
679
680 e->counters.pcnt = xt_percpu_counter_alloc();
681 if (IS_ERR_VALUE(e->counters.pcnt))
682 return -ENOMEM;
683
684 j = 0;
685 mtpar.net = net;
686 mtpar.table = name;
687 mtpar.entryinfo = &e->ipv6;
688 mtpar.hook_mask = e->comefrom;
689 mtpar.family = NFPROTO_IPV6;
690 xt_ematch_foreach(ematch, e) {
691 ret = find_check_match(ematch, &mtpar);
692 if (ret != 0)
693 goto cleanup_matches;
694 ++j;
695 }
696
697 t = ip6t_get_target(e);
698 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
699 t->u.user.revision);
700 if (IS_ERR(target)) {
701 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
702 ret = PTR_ERR(target);
703 goto cleanup_matches;
704 }
705 t->u.kernel.target = target;
706
707 ret = check_target(e, net, name);
708 if (ret)
709 goto err;
710 return 0;
711 err:
712 module_put(t->u.kernel.target->me);
713 cleanup_matches:
714 xt_ematch_foreach(ematch, e) {
715 if (j-- == 0)
716 break;
717 cleanup_match(ematch, net);
718 }
719
720 xt_percpu_counter_free(e->counters.pcnt);
721
722 return ret;
723 }
724
check_underflow(const struct ip6t_entry * e)725 static bool check_underflow(const struct ip6t_entry *e)
726 {
727 const struct xt_entry_target *t;
728 unsigned int verdict;
729
730 if (!unconditional(e))
731 return false;
732 t = ip6t_get_target_c(e);
733 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
734 return false;
735 verdict = ((struct xt_standard_target *)t)->verdict;
736 verdict = -verdict - 1;
737 return verdict == NF_DROP || verdict == NF_ACCEPT;
738 }
739
740 static int
check_entry_size_and_hooks(struct ip6t_entry * e,struct xt_table_info * newinfo,const unsigned char * base,const unsigned char * limit,const unsigned int * hook_entries,const unsigned int * underflows,unsigned int valid_hooks)741 check_entry_size_and_hooks(struct ip6t_entry *e,
742 struct xt_table_info *newinfo,
743 const unsigned char *base,
744 const unsigned char *limit,
745 const unsigned int *hook_entries,
746 const unsigned int *underflows,
747 unsigned int valid_hooks)
748 {
749 unsigned int h;
750 int err;
751
752 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
753 (unsigned char *)e + sizeof(struct ip6t_entry) >= limit ||
754 (unsigned char *)e + e->next_offset > limit) {
755 duprintf("Bad offset %p\n", e);
756 return -EINVAL;
757 }
758
759 if (e->next_offset
760 < sizeof(struct ip6t_entry) + sizeof(struct xt_entry_target)) {
761 duprintf("checking: element %p size %u\n",
762 e, e->next_offset);
763 return -EINVAL;
764 }
765
766 if (!ip6_checkentry(&e->ipv6))
767 return -EINVAL;
768
769 err = xt_check_entry_offsets(e, e->elems, e->target_offset,
770 e->next_offset);
771 if (err)
772 return err;
773
774 /* Check hooks & underflows */
775 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
776 if (!(valid_hooks & (1 << h)))
777 continue;
778 if ((unsigned char *)e - base == hook_entries[h])
779 newinfo->hook_entry[h] = hook_entries[h];
780 if ((unsigned char *)e - base == underflows[h]) {
781 if (!check_underflow(e)) {
782 pr_debug("Underflows must be unconditional and "
783 "use the STANDARD target with "
784 "ACCEPT/DROP\n");
785 return -EINVAL;
786 }
787 newinfo->underflow[h] = underflows[h];
788 }
789 }
790
791 /* Clear counters and comefrom */
792 e->counters = ((struct xt_counters) { 0, 0 });
793 e->comefrom = 0;
794 return 0;
795 }
796
cleanup_entry(struct ip6t_entry * e,struct net * net)797 static void cleanup_entry(struct ip6t_entry *e, struct net *net)
798 {
799 struct xt_tgdtor_param par;
800 struct xt_entry_target *t;
801 struct xt_entry_match *ematch;
802
803 /* Cleanup all matches */
804 xt_ematch_foreach(ematch, e)
805 cleanup_match(ematch, net);
806 t = ip6t_get_target(e);
807
808 par.net = net;
809 par.target = t->u.kernel.target;
810 par.targinfo = t->data;
811 par.family = NFPROTO_IPV6;
812 if (par.target->destroy != NULL)
813 par.target->destroy(&par);
814 module_put(par.target->me);
815
816 xt_percpu_counter_free(e->counters.pcnt);
817 }
818
819 /* Checks and translates the user-supplied table segment (held in
820 newinfo) */
821 static int
translate_table(struct net * net,struct xt_table_info * newinfo,void * entry0,const struct ip6t_replace * repl)822 translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
823 const struct ip6t_replace *repl)
824 {
825 struct ip6t_entry *iter;
826 unsigned int i;
827 int ret = 0;
828
829 newinfo->size = repl->size;
830 newinfo->number = repl->num_entries;
831
832 /* Init all hooks to impossible value. */
833 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
834 newinfo->hook_entry[i] = 0xFFFFFFFF;
835 newinfo->underflow[i] = 0xFFFFFFFF;
836 }
837
838 duprintf("translate_table: size %u\n", newinfo->size);
839 i = 0;
840 /* Walk through entries, checking offsets. */
841 xt_entry_foreach(iter, entry0, newinfo->size) {
842 ret = check_entry_size_and_hooks(iter, newinfo, entry0,
843 entry0 + repl->size,
844 repl->hook_entry,
845 repl->underflow,
846 repl->valid_hooks);
847 if (ret != 0)
848 return ret;
849 ++i;
850 if (strcmp(ip6t_get_target(iter)->u.user.name,
851 XT_ERROR_TARGET) == 0)
852 ++newinfo->stacksize;
853 }
854
855 if (i != repl->num_entries) {
856 duprintf("translate_table: %u not %u entries\n",
857 i, repl->num_entries);
858 return -EINVAL;
859 }
860
861 /* Check hooks all assigned */
862 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
863 /* Only hooks which are valid */
864 if (!(repl->valid_hooks & (1 << i)))
865 continue;
866 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
867 duprintf("Invalid hook entry %u %u\n",
868 i, repl->hook_entry[i]);
869 return -EINVAL;
870 }
871 if (newinfo->underflow[i] == 0xFFFFFFFF) {
872 duprintf("Invalid underflow %u %u\n",
873 i, repl->underflow[i]);
874 return -EINVAL;
875 }
876 }
877
878 if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
879 return -ELOOP;
880
881 /* Finally, each sanity check must pass */
882 i = 0;
883 xt_entry_foreach(iter, entry0, newinfo->size) {
884 ret = find_check_entry(iter, net, repl->name, repl->size);
885 if (ret != 0)
886 break;
887 ++i;
888 }
889
890 if (ret != 0) {
891 xt_entry_foreach(iter, entry0, newinfo->size) {
892 if (i-- == 0)
893 break;
894 cleanup_entry(iter, net);
895 }
896 return ret;
897 }
898
899 return ret;
900 }
901
902 static void
get_counters(const struct xt_table_info * t,struct xt_counters counters[])903 get_counters(const struct xt_table_info *t,
904 struct xt_counters counters[])
905 {
906 struct ip6t_entry *iter;
907 unsigned int cpu;
908 unsigned int i;
909
910 for_each_possible_cpu(cpu) {
911 seqcount_t *s = &per_cpu(xt_recseq, cpu);
912
913 i = 0;
914 xt_entry_foreach(iter, t->entries, t->size) {
915 struct xt_counters *tmp;
916 u64 bcnt, pcnt;
917 unsigned int start;
918
919 tmp = xt_get_per_cpu_counter(&iter->counters, cpu);
920 do {
921 start = read_seqcount_begin(s);
922 bcnt = tmp->bcnt;
923 pcnt = tmp->pcnt;
924 } while (read_seqcount_retry(s, start));
925
926 ADD_COUNTER(counters[i], bcnt, pcnt);
927 ++i;
928 }
929 }
930 }
931
alloc_counters(const struct xt_table * table)932 static struct xt_counters *alloc_counters(const struct xt_table *table)
933 {
934 unsigned int countersize;
935 struct xt_counters *counters;
936 const struct xt_table_info *private = table->private;
937
938 /* We need atomic snapshot of counters: rest doesn't change
939 (other than comefrom, which userspace doesn't care
940 about). */
941 countersize = sizeof(struct xt_counters) * private->number;
942 counters = vzalloc(countersize);
943
944 if (counters == NULL)
945 return ERR_PTR(-ENOMEM);
946
947 get_counters(private, counters);
948
949 return counters;
950 }
951
952 static int
copy_entries_to_user(unsigned int total_size,const struct xt_table * table,void __user * userptr)953 copy_entries_to_user(unsigned int total_size,
954 const struct xt_table *table,
955 void __user *userptr)
956 {
957 unsigned int off, num;
958 const struct ip6t_entry *e;
959 struct xt_counters *counters;
960 const struct xt_table_info *private = table->private;
961 int ret = 0;
962 const void *loc_cpu_entry;
963
964 counters = alloc_counters(table);
965 if (IS_ERR(counters))
966 return PTR_ERR(counters);
967
968 loc_cpu_entry = private->entries;
969 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
970 ret = -EFAULT;
971 goto free_counters;
972 }
973
974 /* FIXME: use iterator macros --RR */
975 /* ... then go back and fix counters and names */
976 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
977 unsigned int i;
978 const struct xt_entry_match *m;
979 const struct xt_entry_target *t;
980
981 e = (struct ip6t_entry *)(loc_cpu_entry + off);
982 if (copy_to_user(userptr + off
983 + offsetof(struct ip6t_entry, counters),
984 &counters[num],
985 sizeof(counters[num])) != 0) {
986 ret = -EFAULT;
987 goto free_counters;
988 }
989
990 for (i = sizeof(struct ip6t_entry);
991 i < e->target_offset;
992 i += m->u.match_size) {
993 m = (void *)e + i;
994
995 if (copy_to_user(userptr + off + i
996 + offsetof(struct xt_entry_match,
997 u.user.name),
998 m->u.kernel.match->name,
999 strlen(m->u.kernel.match->name)+1)
1000 != 0) {
1001 ret = -EFAULT;
1002 goto free_counters;
1003 }
1004 }
1005
1006 t = ip6t_get_target_c(e);
1007 if (copy_to_user(userptr + off + e->target_offset
1008 + offsetof(struct xt_entry_target,
1009 u.user.name),
1010 t->u.kernel.target->name,
1011 strlen(t->u.kernel.target->name)+1) != 0) {
1012 ret = -EFAULT;
1013 goto free_counters;
1014 }
1015 }
1016
1017 free_counters:
1018 vfree(counters);
1019 return ret;
1020 }
1021
1022 #ifdef CONFIG_COMPAT
compat_standard_from_user(void * dst,const void * src)1023 static void compat_standard_from_user(void *dst, const void *src)
1024 {
1025 int v = *(compat_int_t *)src;
1026
1027 if (v > 0)
1028 v += xt_compat_calc_jump(AF_INET6, v);
1029 memcpy(dst, &v, sizeof(v));
1030 }
1031
compat_standard_to_user(void __user * dst,const void * src)1032 static int compat_standard_to_user(void __user *dst, const void *src)
1033 {
1034 compat_int_t cv = *(int *)src;
1035
1036 if (cv > 0)
1037 cv -= xt_compat_calc_jump(AF_INET6, cv);
1038 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1039 }
1040
compat_calc_entry(const struct ip6t_entry * e,const struct xt_table_info * info,const void * base,struct xt_table_info * newinfo)1041 static int compat_calc_entry(const struct ip6t_entry *e,
1042 const struct xt_table_info *info,
1043 const void *base, struct xt_table_info *newinfo)
1044 {
1045 const struct xt_entry_match *ematch;
1046 const struct xt_entry_target *t;
1047 unsigned int entry_offset;
1048 int off, i, ret;
1049
1050 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1051 entry_offset = (void *)e - base;
1052 xt_ematch_foreach(ematch, e)
1053 off += xt_compat_match_offset(ematch->u.kernel.match);
1054 t = ip6t_get_target_c(e);
1055 off += xt_compat_target_offset(t->u.kernel.target);
1056 newinfo->size -= off;
1057 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1058 if (ret)
1059 return ret;
1060
1061 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1062 if (info->hook_entry[i] &&
1063 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1064 newinfo->hook_entry[i] -= off;
1065 if (info->underflow[i] &&
1066 (e < (struct ip6t_entry *)(base + info->underflow[i])))
1067 newinfo->underflow[i] -= off;
1068 }
1069 return 0;
1070 }
1071
compat_table_info(const struct xt_table_info * info,struct xt_table_info * newinfo)1072 static int compat_table_info(const struct xt_table_info *info,
1073 struct xt_table_info *newinfo)
1074 {
1075 struct ip6t_entry *iter;
1076 const void *loc_cpu_entry;
1077 int ret;
1078
1079 if (!newinfo || !info)
1080 return -EINVAL;
1081
1082 /* we dont care about newinfo->entries */
1083 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1084 newinfo->initial_entries = 0;
1085 loc_cpu_entry = info->entries;
1086 xt_compat_init_offsets(AF_INET6, info->number);
1087 xt_entry_foreach(iter, loc_cpu_entry, info->size) {
1088 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
1089 if (ret != 0)
1090 return ret;
1091 }
1092 return 0;
1093 }
1094 #endif
1095
get_info(struct net * net,void __user * user,const int * len,int compat)1096 static int get_info(struct net *net, void __user *user,
1097 const int *len, int compat)
1098 {
1099 char name[XT_TABLE_MAXNAMELEN];
1100 struct xt_table *t;
1101 int ret;
1102
1103 if (*len != sizeof(struct ip6t_getinfo)) {
1104 duprintf("length %u != %zu\n", *len,
1105 sizeof(struct ip6t_getinfo));
1106 return -EINVAL;
1107 }
1108
1109 if (copy_from_user(name, user, sizeof(name)) != 0)
1110 return -EFAULT;
1111
1112 name[XT_TABLE_MAXNAMELEN-1] = '\0';
1113 #ifdef CONFIG_COMPAT
1114 if (compat)
1115 xt_compat_lock(AF_INET6);
1116 #endif
1117 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1118 "ip6table_%s", name);
1119 if (!IS_ERR_OR_NULL(t)) {
1120 struct ip6t_getinfo info;
1121 const struct xt_table_info *private = t->private;
1122 #ifdef CONFIG_COMPAT
1123 struct xt_table_info tmp;
1124
1125 if (compat) {
1126 ret = compat_table_info(private, &tmp);
1127 xt_compat_flush_offsets(AF_INET6);
1128 private = &tmp;
1129 }
1130 #endif
1131 memset(&info, 0, sizeof(info));
1132 info.valid_hooks = t->valid_hooks;
1133 memcpy(info.hook_entry, private->hook_entry,
1134 sizeof(info.hook_entry));
1135 memcpy(info.underflow, private->underflow,
1136 sizeof(info.underflow));
1137 info.num_entries = private->number;
1138 info.size = private->size;
1139 strcpy(info.name, name);
1140
1141 if (copy_to_user(user, &info, *len) != 0)
1142 ret = -EFAULT;
1143 else
1144 ret = 0;
1145
1146 xt_table_unlock(t);
1147 module_put(t->me);
1148 } else
1149 ret = t ? PTR_ERR(t) : -ENOENT;
1150 #ifdef CONFIG_COMPAT
1151 if (compat)
1152 xt_compat_unlock(AF_INET6);
1153 #endif
1154 return ret;
1155 }
1156
1157 static int
get_entries(struct net * net,struct ip6t_get_entries __user * uptr,const int * len)1158 get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
1159 const int *len)
1160 {
1161 int ret;
1162 struct ip6t_get_entries get;
1163 struct xt_table *t;
1164
1165 if (*len < sizeof(get)) {
1166 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1167 return -EINVAL;
1168 }
1169 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1170 return -EFAULT;
1171 if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1172 duprintf("get_entries: %u != %zu\n",
1173 *len, sizeof(get) + get.size);
1174 return -EINVAL;
1175 }
1176
1177 t = xt_find_table_lock(net, AF_INET6, get.name);
1178 if (!IS_ERR_OR_NULL(t)) {
1179 struct xt_table_info *private = t->private;
1180 duprintf("t->private->number = %u\n", private->number);
1181 if (get.size == private->size)
1182 ret = copy_entries_to_user(private->size,
1183 t, uptr->entrytable);
1184 else {
1185 duprintf("get_entries: I've got %u not %u!\n",
1186 private->size, get.size);
1187 ret = -EAGAIN;
1188 }
1189 module_put(t->me);
1190 xt_table_unlock(t);
1191 } else
1192 ret = t ? PTR_ERR(t) : -ENOENT;
1193
1194 return ret;
1195 }
1196
1197 static int
__do_replace(struct net * net,const char * name,unsigned int valid_hooks,struct xt_table_info * newinfo,unsigned int num_counters,void __user * counters_ptr)1198 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1199 struct xt_table_info *newinfo, unsigned int num_counters,
1200 void __user *counters_ptr)
1201 {
1202 int ret;
1203 struct xt_table *t;
1204 struct xt_table_info *oldinfo;
1205 struct xt_counters *counters;
1206 struct ip6t_entry *iter;
1207
1208 ret = 0;
1209 counters = vzalloc(num_counters * sizeof(struct xt_counters));
1210 if (!counters) {
1211 ret = -ENOMEM;
1212 goto out;
1213 }
1214
1215 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1216 "ip6table_%s", name);
1217 if (IS_ERR_OR_NULL(t)) {
1218 ret = t ? PTR_ERR(t) : -ENOENT;
1219 goto free_newinfo_counters_untrans;
1220 }
1221
1222 /* You lied! */
1223 if (valid_hooks != t->valid_hooks) {
1224 duprintf("Valid hook crap: %08X vs %08X\n",
1225 valid_hooks, t->valid_hooks);
1226 ret = -EINVAL;
1227 goto put_module;
1228 }
1229
1230 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1231 if (!oldinfo)
1232 goto put_module;
1233
1234 /* Update module usage count based on number of rules */
1235 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1236 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1237 if ((oldinfo->number > oldinfo->initial_entries) ||
1238 (newinfo->number <= oldinfo->initial_entries))
1239 module_put(t->me);
1240 if ((oldinfo->number > oldinfo->initial_entries) &&
1241 (newinfo->number <= oldinfo->initial_entries))
1242 module_put(t->me);
1243
1244 /* Get the old counters, and synchronize with replace */
1245 get_counters(oldinfo, counters);
1246
1247 /* Decrease module usage counts and free resource */
1248 xt_entry_foreach(iter, oldinfo->entries, oldinfo->size)
1249 cleanup_entry(iter, net);
1250
1251 xt_free_table_info(oldinfo);
1252 if (copy_to_user(counters_ptr, counters,
1253 sizeof(struct xt_counters) * num_counters) != 0) {
1254 /* Silent error, can't fail, new table is already in place */
1255 net_warn_ratelimited("ip6tables: counters copy to user failed while replacing table\n");
1256 }
1257 vfree(counters);
1258 xt_table_unlock(t);
1259 return ret;
1260
1261 put_module:
1262 module_put(t->me);
1263 xt_table_unlock(t);
1264 free_newinfo_counters_untrans:
1265 vfree(counters);
1266 out:
1267 return ret;
1268 }
1269
1270 static int
do_replace(struct net * net,const void __user * user,unsigned int len)1271 do_replace(struct net *net, const void __user *user, unsigned int len)
1272 {
1273 int ret;
1274 struct ip6t_replace tmp;
1275 struct xt_table_info *newinfo;
1276 void *loc_cpu_entry;
1277 struct ip6t_entry *iter;
1278
1279 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1280 return -EFAULT;
1281
1282 /* overflow check */
1283 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1284 return -ENOMEM;
1285 if (tmp.num_counters == 0)
1286 return -EINVAL;
1287
1288 tmp.name[sizeof(tmp.name)-1] = 0;
1289
1290 newinfo = xt_alloc_table_info(tmp.size);
1291 if (!newinfo)
1292 return -ENOMEM;
1293
1294 loc_cpu_entry = newinfo->entries;
1295 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1296 tmp.size) != 0) {
1297 ret = -EFAULT;
1298 goto free_newinfo;
1299 }
1300
1301 ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
1302 if (ret != 0)
1303 goto free_newinfo;
1304
1305 duprintf("ip_tables: Translated table\n");
1306
1307 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1308 tmp.num_counters, tmp.counters);
1309 if (ret)
1310 goto free_newinfo_untrans;
1311 return 0;
1312
1313 free_newinfo_untrans:
1314 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1315 cleanup_entry(iter, net);
1316 free_newinfo:
1317 xt_free_table_info(newinfo);
1318 return ret;
1319 }
1320
1321 static int
do_add_counters(struct net * net,const void __user * user,unsigned int len,int compat)1322 do_add_counters(struct net *net, const void __user *user, unsigned int len,
1323 int compat)
1324 {
1325 unsigned int i;
1326 struct xt_counters_info tmp;
1327 struct xt_counters *paddc;
1328 struct xt_table *t;
1329 const struct xt_table_info *private;
1330 int ret = 0;
1331 struct ip6t_entry *iter;
1332 unsigned int addend;
1333
1334 paddc = xt_copy_counters_from_user(user, len, &tmp, compat);
1335 if (IS_ERR(paddc))
1336 return PTR_ERR(paddc);
1337 t = xt_find_table_lock(net, AF_INET6, tmp.name);
1338 if (IS_ERR_OR_NULL(t)) {
1339 ret = t ? PTR_ERR(t) : -ENOENT;
1340 goto free;
1341 }
1342
1343 local_bh_disable();
1344 private = t->private;
1345 if (private->number != tmp.num_counters) {
1346 ret = -EINVAL;
1347 goto unlock_up_free;
1348 }
1349
1350 i = 0;
1351 addend = xt_write_recseq_begin();
1352 xt_entry_foreach(iter, private->entries, private->size) {
1353 struct xt_counters *tmp;
1354
1355 tmp = xt_get_this_cpu_counter(&iter->counters);
1356 ADD_COUNTER(*tmp, paddc[i].bcnt, paddc[i].pcnt);
1357 ++i;
1358 }
1359 xt_write_recseq_end(addend);
1360 unlock_up_free:
1361 local_bh_enable();
1362 xt_table_unlock(t);
1363 module_put(t->me);
1364 free:
1365 vfree(paddc);
1366
1367 return ret;
1368 }
1369
1370 #ifdef CONFIG_COMPAT
1371 struct compat_ip6t_replace {
1372 char name[XT_TABLE_MAXNAMELEN];
1373 u32 valid_hooks;
1374 u32 num_entries;
1375 u32 size;
1376 u32 hook_entry[NF_INET_NUMHOOKS];
1377 u32 underflow[NF_INET_NUMHOOKS];
1378 u32 num_counters;
1379 compat_uptr_t counters; /* struct xt_counters * */
1380 struct compat_ip6t_entry entries[0];
1381 };
1382
1383 static int
compat_copy_entry_to_user(struct ip6t_entry * e,void __user ** dstptr,unsigned int * size,struct xt_counters * counters,unsigned int i)1384 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1385 unsigned int *size, struct xt_counters *counters,
1386 unsigned int i)
1387 {
1388 struct xt_entry_target *t;
1389 struct compat_ip6t_entry __user *ce;
1390 u_int16_t target_offset, next_offset;
1391 compat_uint_t origsize;
1392 const struct xt_entry_match *ematch;
1393 int ret = 0;
1394
1395 origsize = *size;
1396 ce = (struct compat_ip6t_entry __user *)*dstptr;
1397 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)) != 0 ||
1398 copy_to_user(&ce->counters, &counters[i],
1399 sizeof(counters[i])) != 0)
1400 return -EFAULT;
1401
1402 *dstptr += sizeof(struct compat_ip6t_entry);
1403 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1404
1405 xt_ematch_foreach(ematch, e) {
1406 ret = xt_compat_match_to_user(ematch, dstptr, size);
1407 if (ret != 0)
1408 return ret;
1409 }
1410 target_offset = e->target_offset - (origsize - *size);
1411 t = ip6t_get_target(e);
1412 ret = xt_compat_target_to_user(t, dstptr, size);
1413 if (ret)
1414 return ret;
1415 next_offset = e->next_offset - (origsize - *size);
1416 if (put_user(target_offset, &ce->target_offset) != 0 ||
1417 put_user(next_offset, &ce->next_offset) != 0)
1418 return -EFAULT;
1419 return 0;
1420 }
1421
1422 static int
compat_find_calc_match(struct xt_entry_match * m,const struct ip6t_ip6 * ipv6,int * size)1423 compat_find_calc_match(struct xt_entry_match *m,
1424 const struct ip6t_ip6 *ipv6,
1425 int *size)
1426 {
1427 struct xt_match *match;
1428
1429 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
1430 m->u.user.revision);
1431 if (IS_ERR(match)) {
1432 duprintf("compat_check_calc_match: `%s' not found\n",
1433 m->u.user.name);
1434 return PTR_ERR(match);
1435 }
1436 m->u.kernel.match = match;
1437 *size += xt_compat_match_offset(match);
1438 return 0;
1439 }
1440
compat_release_entry(struct compat_ip6t_entry * e)1441 static void compat_release_entry(struct compat_ip6t_entry *e)
1442 {
1443 struct xt_entry_target *t;
1444 struct xt_entry_match *ematch;
1445
1446 /* Cleanup all matches */
1447 xt_ematch_foreach(ematch, e)
1448 module_put(ematch->u.kernel.match->me);
1449 t = compat_ip6t_get_target(e);
1450 module_put(t->u.kernel.target->me);
1451 }
1452
1453 static int
check_compat_entry_size_and_hooks(struct compat_ip6t_entry * e,struct xt_table_info * newinfo,unsigned int * size,const unsigned char * base,const unsigned char * limit)1454 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1455 struct xt_table_info *newinfo,
1456 unsigned int *size,
1457 const unsigned char *base,
1458 const unsigned char *limit)
1459 {
1460 struct xt_entry_match *ematch;
1461 struct xt_entry_target *t;
1462 struct xt_target *target;
1463 unsigned int entry_offset;
1464 unsigned int j;
1465 int ret, off;
1466
1467 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1468 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
1469 (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit ||
1470 (unsigned char *)e + e->next_offset > limit) {
1471 duprintf("Bad offset %p, limit = %p\n", e, limit);
1472 return -EINVAL;
1473 }
1474
1475 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1476 sizeof(struct compat_xt_entry_target)) {
1477 duprintf("checking: element %p size %u\n",
1478 e, e->next_offset);
1479 return -EINVAL;
1480 }
1481
1482 if (!ip6_checkentry(&e->ipv6))
1483 return -EINVAL;
1484
1485 ret = xt_compat_check_entry_offsets(e, e->elems,
1486 e->target_offset, e->next_offset);
1487 if (ret)
1488 return ret;
1489
1490 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1491 entry_offset = (void *)e - (void *)base;
1492 j = 0;
1493 xt_ematch_foreach(ematch, e) {
1494 ret = compat_find_calc_match(ematch, &e->ipv6, &off);
1495 if (ret != 0)
1496 goto release_matches;
1497 ++j;
1498 }
1499
1500 t = compat_ip6t_get_target(e);
1501 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
1502 t->u.user.revision);
1503 if (IS_ERR(target)) {
1504 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1505 t->u.user.name);
1506 ret = PTR_ERR(target);
1507 goto release_matches;
1508 }
1509 t->u.kernel.target = target;
1510
1511 off += xt_compat_target_offset(target);
1512 *size += off;
1513 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1514 if (ret)
1515 goto out;
1516
1517 return 0;
1518
1519 out:
1520 module_put(t->u.kernel.target->me);
1521 release_matches:
1522 xt_ematch_foreach(ematch, e) {
1523 if (j-- == 0)
1524 break;
1525 module_put(ematch->u.kernel.match->me);
1526 }
1527 return ret;
1528 }
1529
1530 static void
compat_copy_entry_from_user(struct compat_ip6t_entry * e,void ** dstptr,unsigned int * size,struct xt_table_info * newinfo,unsigned char * base)1531 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1532 unsigned int *size,
1533 struct xt_table_info *newinfo, unsigned char *base)
1534 {
1535 struct xt_entry_target *t;
1536 struct ip6t_entry *de;
1537 unsigned int origsize;
1538 int h;
1539 struct xt_entry_match *ematch;
1540
1541 origsize = *size;
1542 de = (struct ip6t_entry *)*dstptr;
1543 memcpy(de, e, sizeof(struct ip6t_entry));
1544 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1545
1546 *dstptr += sizeof(struct ip6t_entry);
1547 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1548
1549 xt_ematch_foreach(ematch, e)
1550 xt_compat_match_from_user(ematch, dstptr, size);
1551
1552 de->target_offset = e->target_offset - (origsize - *size);
1553 t = compat_ip6t_get_target(e);
1554 xt_compat_target_from_user(t, dstptr, size);
1555
1556 de->next_offset = e->next_offset - (origsize - *size);
1557 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1558 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1559 newinfo->hook_entry[h] -= origsize - *size;
1560 if ((unsigned char *)de - base < newinfo->underflow[h])
1561 newinfo->underflow[h] -= origsize - *size;
1562 }
1563 }
1564
1565 static int
translate_compat_table(struct net * net,struct xt_table_info ** pinfo,void ** pentry0,const struct compat_ip6t_replace * compatr)1566 translate_compat_table(struct net *net,
1567 struct xt_table_info **pinfo,
1568 void **pentry0,
1569 const struct compat_ip6t_replace *compatr)
1570 {
1571 unsigned int i, j;
1572 struct xt_table_info *newinfo, *info;
1573 void *pos, *entry0, *entry1;
1574 struct compat_ip6t_entry *iter0;
1575 struct ip6t_replace repl;
1576 unsigned int size;
1577 int ret = 0;
1578
1579 info = *pinfo;
1580 entry0 = *pentry0;
1581 size = compatr->size;
1582 info->number = compatr->num_entries;
1583
1584 duprintf("translate_compat_table: size %u\n", info->size);
1585 j = 0;
1586 xt_compat_lock(AF_INET6);
1587 xt_compat_init_offsets(AF_INET6, compatr->num_entries);
1588 /* Walk through entries, checking offsets. */
1589 xt_entry_foreach(iter0, entry0, compatr->size) {
1590 ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1591 entry0,
1592 entry0 + compatr->size);
1593 if (ret != 0)
1594 goto out_unlock;
1595 ++j;
1596 }
1597
1598 ret = -EINVAL;
1599 if (j != compatr->num_entries) {
1600 duprintf("translate_compat_table: %u not %u entries\n",
1601 j, compatr->num_entries);
1602 goto out_unlock;
1603 }
1604
1605 ret = -ENOMEM;
1606 newinfo = xt_alloc_table_info(size);
1607 if (!newinfo)
1608 goto out_unlock;
1609
1610 newinfo->number = compatr->num_entries;
1611 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1612 newinfo->hook_entry[i] = compatr->hook_entry[i];
1613 newinfo->underflow[i] = compatr->underflow[i];
1614 }
1615 entry1 = newinfo->entries;
1616 pos = entry1;
1617 size = compatr->size;
1618 xt_entry_foreach(iter0, entry0, compatr->size)
1619 compat_copy_entry_from_user(iter0, &pos, &size,
1620 newinfo, entry1);
1621
1622 /* all module references in entry0 are now gone. */
1623 xt_compat_flush_offsets(AF_INET6);
1624 xt_compat_unlock(AF_INET6);
1625
1626 memcpy(&repl, compatr, sizeof(*compatr));
1627
1628 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1629 repl.hook_entry[i] = newinfo->hook_entry[i];
1630 repl.underflow[i] = newinfo->underflow[i];
1631 }
1632
1633 repl.num_counters = 0;
1634 repl.counters = NULL;
1635 repl.size = newinfo->size;
1636 ret = translate_table(net, newinfo, entry1, &repl);
1637 if (ret)
1638 goto free_newinfo;
1639
1640 *pinfo = newinfo;
1641 *pentry0 = entry1;
1642 xt_free_table_info(info);
1643 return 0;
1644
1645 free_newinfo:
1646 xt_free_table_info(newinfo);
1647 return ret;
1648 out_unlock:
1649 xt_compat_flush_offsets(AF_INET6);
1650 xt_compat_unlock(AF_INET6);
1651 xt_entry_foreach(iter0, entry0, compatr->size) {
1652 if (j-- == 0)
1653 break;
1654 compat_release_entry(iter0);
1655 }
1656 return ret;
1657 }
1658
1659 static int
compat_do_replace(struct net * net,void __user * user,unsigned int len)1660 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1661 {
1662 int ret;
1663 struct compat_ip6t_replace tmp;
1664 struct xt_table_info *newinfo;
1665 void *loc_cpu_entry;
1666 struct ip6t_entry *iter;
1667
1668 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1669 return -EFAULT;
1670
1671 /* overflow check */
1672 if (tmp.size >= INT_MAX / num_possible_cpus())
1673 return -ENOMEM;
1674 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1675 return -ENOMEM;
1676 if (tmp.num_counters == 0)
1677 return -EINVAL;
1678
1679 tmp.name[sizeof(tmp.name)-1] = 0;
1680
1681 newinfo = xt_alloc_table_info(tmp.size);
1682 if (!newinfo)
1683 return -ENOMEM;
1684
1685 loc_cpu_entry = newinfo->entries;
1686 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1687 tmp.size) != 0) {
1688 ret = -EFAULT;
1689 goto free_newinfo;
1690 }
1691
1692 ret = translate_compat_table(net, &newinfo, &loc_cpu_entry, &tmp);
1693 if (ret != 0)
1694 goto free_newinfo;
1695
1696 duprintf("compat_do_replace: Translated table\n");
1697
1698 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1699 tmp.num_counters, compat_ptr(tmp.counters));
1700 if (ret)
1701 goto free_newinfo_untrans;
1702 return 0;
1703
1704 free_newinfo_untrans:
1705 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1706 cleanup_entry(iter, net);
1707 free_newinfo:
1708 xt_free_table_info(newinfo);
1709 return ret;
1710 }
1711
1712 static int
compat_do_ip6t_set_ctl(struct sock * sk,int cmd,void __user * user,unsigned int len)1713 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1714 unsigned int len)
1715 {
1716 int ret;
1717
1718 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1719 return -EPERM;
1720
1721 switch (cmd) {
1722 case IP6T_SO_SET_REPLACE:
1723 ret = compat_do_replace(sock_net(sk), user, len);
1724 break;
1725
1726 case IP6T_SO_SET_ADD_COUNTERS:
1727 ret = do_add_counters(sock_net(sk), user, len, 1);
1728 break;
1729
1730 default:
1731 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1732 ret = -EINVAL;
1733 }
1734
1735 return ret;
1736 }
1737
1738 struct compat_ip6t_get_entries {
1739 char name[XT_TABLE_MAXNAMELEN];
1740 compat_uint_t size;
1741 struct compat_ip6t_entry entrytable[0];
1742 };
1743
1744 static int
compat_copy_entries_to_user(unsigned int total_size,struct xt_table * table,void __user * userptr)1745 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1746 void __user *userptr)
1747 {
1748 struct xt_counters *counters;
1749 const struct xt_table_info *private = table->private;
1750 void __user *pos;
1751 unsigned int size;
1752 int ret = 0;
1753 unsigned int i = 0;
1754 struct ip6t_entry *iter;
1755
1756 counters = alloc_counters(table);
1757 if (IS_ERR(counters))
1758 return PTR_ERR(counters);
1759
1760 pos = userptr;
1761 size = total_size;
1762 xt_entry_foreach(iter, private->entries, total_size) {
1763 ret = compat_copy_entry_to_user(iter, &pos,
1764 &size, counters, i++);
1765 if (ret != 0)
1766 break;
1767 }
1768
1769 vfree(counters);
1770 return ret;
1771 }
1772
1773 static int
compat_get_entries(struct net * net,struct compat_ip6t_get_entries __user * uptr,int * len)1774 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1775 int *len)
1776 {
1777 int ret;
1778 struct compat_ip6t_get_entries get;
1779 struct xt_table *t;
1780
1781 if (*len < sizeof(get)) {
1782 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1783 return -EINVAL;
1784 }
1785
1786 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1787 return -EFAULT;
1788
1789 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
1790 duprintf("compat_get_entries: %u != %zu\n",
1791 *len, sizeof(get) + get.size);
1792 return -EINVAL;
1793 }
1794
1795 xt_compat_lock(AF_INET6);
1796 t = xt_find_table_lock(net, AF_INET6, get.name);
1797 if (!IS_ERR_OR_NULL(t)) {
1798 const struct xt_table_info *private = t->private;
1799 struct xt_table_info info;
1800 duprintf("t->private->number = %u\n", private->number);
1801 ret = compat_table_info(private, &info);
1802 if (!ret && get.size == info.size) {
1803 ret = compat_copy_entries_to_user(private->size,
1804 t, uptr->entrytable);
1805 } else if (!ret) {
1806 duprintf("compat_get_entries: I've got %u not %u!\n",
1807 private->size, get.size);
1808 ret = -EAGAIN;
1809 }
1810 xt_compat_flush_offsets(AF_INET6);
1811 module_put(t->me);
1812 xt_table_unlock(t);
1813 } else
1814 ret = t ? PTR_ERR(t) : -ENOENT;
1815
1816 xt_compat_unlock(AF_INET6);
1817 return ret;
1818 }
1819
1820 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
1821
1822 static int
compat_do_ip6t_get_ctl(struct sock * sk,int cmd,void __user * user,int * len)1823 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1824 {
1825 int ret;
1826
1827 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1828 return -EPERM;
1829
1830 switch (cmd) {
1831 case IP6T_SO_GET_INFO:
1832 ret = get_info(sock_net(sk), user, len, 1);
1833 break;
1834 case IP6T_SO_GET_ENTRIES:
1835 ret = compat_get_entries(sock_net(sk), user, len);
1836 break;
1837 default:
1838 ret = do_ip6t_get_ctl(sk, cmd, user, len);
1839 }
1840 return ret;
1841 }
1842 #endif
1843
1844 static int
do_ip6t_set_ctl(struct sock * sk,int cmd,void __user * user,unsigned int len)1845 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1846 {
1847 int ret;
1848
1849 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1850 return -EPERM;
1851
1852 switch (cmd) {
1853 case IP6T_SO_SET_REPLACE:
1854 ret = do_replace(sock_net(sk), user, len);
1855 break;
1856
1857 case IP6T_SO_SET_ADD_COUNTERS:
1858 ret = do_add_counters(sock_net(sk), user, len, 0);
1859 break;
1860
1861 default:
1862 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1863 ret = -EINVAL;
1864 }
1865
1866 return ret;
1867 }
1868
1869 static int
do_ip6t_get_ctl(struct sock * sk,int cmd,void __user * user,int * len)1870 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1871 {
1872 int ret;
1873
1874 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1875 return -EPERM;
1876
1877 switch (cmd) {
1878 case IP6T_SO_GET_INFO:
1879 ret = get_info(sock_net(sk), user, len, 0);
1880 break;
1881
1882 case IP6T_SO_GET_ENTRIES:
1883 ret = get_entries(sock_net(sk), user, len);
1884 break;
1885
1886 case IP6T_SO_GET_REVISION_MATCH:
1887 case IP6T_SO_GET_REVISION_TARGET: {
1888 struct xt_get_revision rev;
1889 int target;
1890
1891 if (*len != sizeof(rev)) {
1892 ret = -EINVAL;
1893 break;
1894 }
1895 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
1896 ret = -EFAULT;
1897 break;
1898 }
1899 rev.name[sizeof(rev.name)-1] = 0;
1900
1901 if (cmd == IP6T_SO_GET_REVISION_TARGET)
1902 target = 1;
1903 else
1904 target = 0;
1905
1906 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
1907 rev.revision,
1908 target, &ret),
1909 "ip6t_%s", rev.name);
1910 break;
1911 }
1912
1913 default:
1914 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
1915 ret = -EINVAL;
1916 }
1917
1918 return ret;
1919 }
1920
ip6t_register_table(struct net * net,const struct xt_table * table,const struct ip6t_replace * repl)1921 struct xt_table *ip6t_register_table(struct net *net,
1922 const struct xt_table *table,
1923 const struct ip6t_replace *repl)
1924 {
1925 int ret;
1926 struct xt_table_info *newinfo;
1927 struct xt_table_info bootstrap = {0};
1928 void *loc_cpu_entry;
1929 struct xt_table *new_table;
1930
1931 newinfo = xt_alloc_table_info(repl->size);
1932 if (!newinfo) {
1933 ret = -ENOMEM;
1934 goto out;
1935 }
1936
1937 loc_cpu_entry = newinfo->entries;
1938 memcpy(loc_cpu_entry, repl->entries, repl->size);
1939
1940 ret = translate_table(net, newinfo, loc_cpu_entry, repl);
1941 if (ret != 0)
1942 goto out_free;
1943
1944 new_table = xt_register_table(net, table, &bootstrap, newinfo);
1945 if (IS_ERR(new_table)) {
1946 ret = PTR_ERR(new_table);
1947 goto out_free;
1948 }
1949 return new_table;
1950
1951 out_free:
1952 xt_free_table_info(newinfo);
1953 out:
1954 return ERR_PTR(ret);
1955 }
1956
ip6t_unregister_table(struct net * net,struct xt_table * table)1957 void ip6t_unregister_table(struct net *net, struct xt_table *table)
1958 {
1959 struct xt_table_info *private;
1960 void *loc_cpu_entry;
1961 struct module *table_owner = table->me;
1962 struct ip6t_entry *iter;
1963
1964 private = xt_unregister_table(table);
1965
1966 /* Decrease module usage counts and free resources */
1967 loc_cpu_entry = private->entries;
1968 xt_entry_foreach(iter, loc_cpu_entry, private->size)
1969 cleanup_entry(iter, net);
1970 if (private->number > private->initial_entries)
1971 module_put(table_owner);
1972 xt_free_table_info(private);
1973 }
1974
1975 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
1976 static inline bool
icmp6_type_code_match(u_int8_t test_type,u_int8_t min_code,u_int8_t max_code,u_int8_t type,u_int8_t code,bool invert)1977 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
1978 u_int8_t type, u_int8_t code,
1979 bool invert)
1980 {
1981 return (type == test_type && code >= min_code && code <= max_code)
1982 ^ invert;
1983 }
1984
1985 static bool
icmp6_match(const struct sk_buff * skb,struct xt_action_param * par)1986 icmp6_match(const struct sk_buff *skb, struct xt_action_param *par)
1987 {
1988 const struct icmp6hdr *ic;
1989 struct icmp6hdr _icmph;
1990 const struct ip6t_icmp *icmpinfo = par->matchinfo;
1991
1992 /* Must not be a fragment. */
1993 if (par->fragoff != 0)
1994 return false;
1995
1996 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
1997 if (ic == NULL) {
1998 /* We've been asked to examine this packet, and we
1999 * can't. Hence, no choice but to drop.
2000 */
2001 duprintf("Dropping evil ICMP tinygram.\n");
2002 par->hotdrop = true;
2003 return false;
2004 }
2005
2006 return icmp6_type_code_match(icmpinfo->type,
2007 icmpinfo->code[0],
2008 icmpinfo->code[1],
2009 ic->icmp6_type, ic->icmp6_code,
2010 !!(icmpinfo->invflags&IP6T_ICMP_INV));
2011 }
2012
2013 /* Called when user tries to insert an entry of this type. */
icmp6_checkentry(const struct xt_mtchk_param * par)2014 static int icmp6_checkentry(const struct xt_mtchk_param *par)
2015 {
2016 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2017
2018 /* Must specify no unknown invflags */
2019 return (icmpinfo->invflags & ~IP6T_ICMP_INV) ? -EINVAL : 0;
2020 }
2021
2022 /* The built-in targets: standard (NULL) and error. */
2023 static struct xt_target ip6t_builtin_tg[] __read_mostly = {
2024 {
2025 .name = XT_STANDARD_TARGET,
2026 .targetsize = sizeof(int),
2027 .family = NFPROTO_IPV6,
2028 #ifdef CONFIG_COMPAT
2029 .compatsize = sizeof(compat_int_t),
2030 .compat_from_user = compat_standard_from_user,
2031 .compat_to_user = compat_standard_to_user,
2032 #endif
2033 },
2034 {
2035 .name = XT_ERROR_TARGET,
2036 .target = ip6t_error,
2037 .targetsize = XT_FUNCTION_MAXNAMELEN,
2038 .family = NFPROTO_IPV6,
2039 },
2040 };
2041
2042 static struct nf_sockopt_ops ip6t_sockopts = {
2043 .pf = PF_INET6,
2044 .set_optmin = IP6T_BASE_CTL,
2045 .set_optmax = IP6T_SO_SET_MAX+1,
2046 .set = do_ip6t_set_ctl,
2047 #ifdef CONFIG_COMPAT
2048 .compat_set = compat_do_ip6t_set_ctl,
2049 #endif
2050 .get_optmin = IP6T_BASE_CTL,
2051 .get_optmax = IP6T_SO_GET_MAX+1,
2052 .get = do_ip6t_get_ctl,
2053 #ifdef CONFIG_COMPAT
2054 .compat_get = compat_do_ip6t_get_ctl,
2055 #endif
2056 .owner = THIS_MODULE,
2057 };
2058
2059 static struct xt_match ip6t_builtin_mt[] __read_mostly = {
2060 {
2061 .name = "icmp6",
2062 .match = icmp6_match,
2063 .matchsize = sizeof(struct ip6t_icmp),
2064 .checkentry = icmp6_checkentry,
2065 .proto = IPPROTO_ICMPV6,
2066 .family = NFPROTO_IPV6,
2067 },
2068 };
2069
ip6_tables_net_init(struct net * net)2070 static int __net_init ip6_tables_net_init(struct net *net)
2071 {
2072 return xt_proto_init(net, NFPROTO_IPV6);
2073 }
2074
ip6_tables_net_exit(struct net * net)2075 static void __net_exit ip6_tables_net_exit(struct net *net)
2076 {
2077 xt_proto_fini(net, NFPROTO_IPV6);
2078 }
2079
2080 static struct pernet_operations ip6_tables_net_ops = {
2081 .init = ip6_tables_net_init,
2082 .exit = ip6_tables_net_exit,
2083 };
2084
ip6_tables_init(void)2085 static int __init ip6_tables_init(void)
2086 {
2087 int ret;
2088
2089 ret = register_pernet_subsys(&ip6_tables_net_ops);
2090 if (ret < 0)
2091 goto err1;
2092
2093 /* No one else will be downing sem now, so we won't sleep */
2094 ret = xt_register_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2095 if (ret < 0)
2096 goto err2;
2097 ret = xt_register_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2098 if (ret < 0)
2099 goto err4;
2100
2101 /* Register setsockopt */
2102 ret = nf_register_sockopt(&ip6t_sockopts);
2103 if (ret < 0)
2104 goto err5;
2105
2106 pr_info("(C) 2000-2006 Netfilter Core Team\n");
2107 return 0;
2108
2109 err5:
2110 xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2111 err4:
2112 xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2113 err2:
2114 unregister_pernet_subsys(&ip6_tables_net_ops);
2115 err1:
2116 return ret;
2117 }
2118
ip6_tables_fini(void)2119 static void __exit ip6_tables_fini(void)
2120 {
2121 nf_unregister_sockopt(&ip6t_sockopts);
2122
2123 xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2124 xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2125 unregister_pernet_subsys(&ip6_tables_net_ops);
2126 }
2127
2128 EXPORT_SYMBOL(ip6t_register_table);
2129 EXPORT_SYMBOL(ip6t_unregister_table);
2130 EXPORT_SYMBOL(ip6t_do_table);
2131
2132 module_init(ip6_tables_init);
2133 module_exit(ip6_tables_fini);
2134