1 /*
2  * Packet matching code.
3  *
4  * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5  * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
6  * Copyright (c) 2006-2010 Patrick McHardy <kaber@trash.net>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12 
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 
15 #include <linux/kernel.h>
16 #include <linux/capability.h>
17 #include <linux/in.h>
18 #include <linux/skbuff.h>
19 #include <linux/kmod.h>
20 #include <linux/vmalloc.h>
21 #include <linux/netdevice.h>
22 #include <linux/module.h>
23 #include <linux/poison.h>
24 #include <linux/icmpv6.h>
25 #include <net/ipv6.h>
26 #include <net/compat.h>
27 #include <asm/uaccess.h>
28 #include <linux/mutex.h>
29 #include <linux/proc_fs.h>
30 #include <linux/err.h>
31 #include <linux/cpumask.h>
32 
33 #include <linux/netfilter_ipv6/ip6_tables.h>
34 #include <linux/netfilter/x_tables.h>
35 #include <net/netfilter/nf_log.h>
36 #include "../../netfilter/xt_repldata.h"
37 
38 MODULE_LICENSE("GPL");
39 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
40 MODULE_DESCRIPTION("IPv6 packet filter");
41 
42 /*#define DEBUG_IP_FIREWALL*/
43 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
44 /*#define DEBUG_IP_FIREWALL_USER*/
45 
46 #ifdef DEBUG_IP_FIREWALL
47 #define dprintf(format, args...) pr_info(format , ## args)
48 #else
49 #define dprintf(format, args...)
50 #endif
51 
52 #ifdef DEBUG_IP_FIREWALL_USER
53 #define duprintf(format, args...) pr_info(format , ## args)
54 #else
55 #define duprintf(format, args...)
56 #endif
57 
58 #ifdef CONFIG_NETFILTER_DEBUG
59 #define IP_NF_ASSERT(x)	WARN_ON(!(x))
60 #else
61 #define IP_NF_ASSERT(x)
62 #endif
63 
64 #if 0
65 /* All the better to debug you with... */
66 #define static
67 #define inline
68 #endif
69 
ip6t_alloc_initial_table(const struct xt_table * info)70 void *ip6t_alloc_initial_table(const struct xt_table *info)
71 {
72 	return xt_alloc_initial_table(ip6t, IP6T);
73 }
74 EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table);
75 
76 /*
77    We keep a set of rules for each CPU, so we can avoid write-locking
78    them in the softirq when updating the counters and therefore
79    only need to read-lock in the softirq; doing a write_lock_bh() in user
80    context stops packets coming through and allows user context to read
81    the counters or update the rules.
82 
83    Hence the start of any table is given by get_table() below.  */
84 
85 /* Returns whether matches rule or not. */
86 /* Performance critical - called for every packet */
87 static inline bool
ip6_packet_match(const struct sk_buff * skb,const char * indev,const char * outdev,const struct ip6t_ip6 * ip6info,unsigned int * protoff,int * fragoff,bool * hotdrop)88 ip6_packet_match(const struct sk_buff *skb,
89 		 const char *indev,
90 		 const char *outdev,
91 		 const struct ip6t_ip6 *ip6info,
92 		 unsigned int *protoff,
93 		 int *fragoff, bool *hotdrop)
94 {
95 	unsigned long ret;
96 	const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
97 
98 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
99 
100 	if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
101 				       &ip6info->src), IP6T_INV_SRCIP) ||
102 	    FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
103 				       &ip6info->dst), IP6T_INV_DSTIP)) {
104 		dprintf("Source or dest mismatch.\n");
105 /*
106 		dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
107 			ipinfo->smsk.s_addr, ipinfo->src.s_addr,
108 			ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
109 		dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
110 			ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
111 			ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
112 		return false;
113 	}
114 
115 	ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask);
116 
117 	if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
118 		dprintf("VIA in mismatch (%s vs %s).%s\n",
119 			indev, ip6info->iniface,
120 			ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
121 		return false;
122 	}
123 
124 	ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask);
125 
126 	if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
127 		dprintf("VIA out mismatch (%s vs %s).%s\n",
128 			outdev, ip6info->outiface,
129 			ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
130 		return false;
131 	}
132 
133 /* ... might want to do something with class and flowlabel here ... */
134 
135 	/* look for the desired protocol header */
136 	if((ip6info->flags & IP6T_F_PROTO)) {
137 		int protohdr;
138 		unsigned short _frag_off;
139 
140 		protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off, NULL);
141 		if (protohdr < 0) {
142 			if (_frag_off == 0)
143 				*hotdrop = true;
144 			return false;
145 		}
146 		*fragoff = _frag_off;
147 
148 		dprintf("Packet protocol %hi ?= %s%hi.\n",
149 				protohdr,
150 				ip6info->invflags & IP6T_INV_PROTO ? "!":"",
151 				ip6info->proto);
152 
153 		if (ip6info->proto == protohdr) {
154 			if(ip6info->invflags & IP6T_INV_PROTO) {
155 				return false;
156 			}
157 			return true;
158 		}
159 
160 		/* We need match for the '-p all', too! */
161 		if ((ip6info->proto != 0) &&
162 			!(ip6info->invflags & IP6T_INV_PROTO))
163 			return false;
164 	}
165 	return true;
166 }
167 
168 /* should be ip6 safe */
169 static bool
ip6_checkentry(const struct ip6t_ip6 * ipv6)170 ip6_checkentry(const struct ip6t_ip6 *ipv6)
171 {
172 	if (ipv6->flags & ~IP6T_F_MASK) {
173 		duprintf("Unknown flag bits set: %08X\n",
174 			 ipv6->flags & ~IP6T_F_MASK);
175 		return false;
176 	}
177 	if (ipv6->invflags & ~IP6T_INV_MASK) {
178 		duprintf("Unknown invflag bits set: %08X\n",
179 			 ipv6->invflags & ~IP6T_INV_MASK);
180 		return false;
181 	}
182 	return true;
183 }
184 
185 static unsigned int
ip6t_error(struct sk_buff * skb,const struct xt_action_param * par)186 ip6t_error(struct sk_buff *skb, const struct xt_action_param *par)
187 {
188 	net_info_ratelimited("error: `%s'\n", (const char *)par->targinfo);
189 
190 	return NF_DROP;
191 }
192 
193 static inline struct ip6t_entry *
get_entry(const void * base,unsigned int offset)194 get_entry(const void *base, unsigned int offset)
195 {
196 	return (struct ip6t_entry *)(base + offset);
197 }
198 
199 /* All zeroes == unconditional rule. */
200 /* Mildly perf critical (only if packet tracing is on) */
unconditional(const struct ip6t_ip6 * ipv6)201 static inline bool unconditional(const struct ip6t_ip6 *ipv6)
202 {
203 	static const struct ip6t_ip6 uncond;
204 
205 	return memcmp(ipv6, &uncond, sizeof(uncond)) == 0;
206 }
207 
208 static inline const struct xt_entry_target *
ip6t_get_target_c(const struct ip6t_entry * e)209 ip6t_get_target_c(const struct ip6t_entry *e)
210 {
211 	return ip6t_get_target((struct ip6t_entry *)e);
212 }
213 
214 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
215 /* This cries for unification! */
216 static const char *const hooknames[] = {
217 	[NF_INET_PRE_ROUTING]		= "PREROUTING",
218 	[NF_INET_LOCAL_IN]		= "INPUT",
219 	[NF_INET_FORWARD]		= "FORWARD",
220 	[NF_INET_LOCAL_OUT]		= "OUTPUT",
221 	[NF_INET_POST_ROUTING]		= "POSTROUTING",
222 };
223 
224 enum nf_ip_trace_comments {
225 	NF_IP6_TRACE_COMMENT_RULE,
226 	NF_IP6_TRACE_COMMENT_RETURN,
227 	NF_IP6_TRACE_COMMENT_POLICY,
228 };
229 
230 static const char *const comments[] = {
231 	[NF_IP6_TRACE_COMMENT_RULE]	= "rule",
232 	[NF_IP6_TRACE_COMMENT_RETURN]	= "return",
233 	[NF_IP6_TRACE_COMMENT_POLICY]	= "policy",
234 };
235 
236 static struct nf_loginfo trace_loginfo = {
237 	.type = NF_LOG_TYPE_LOG,
238 	.u = {
239 		.log = {
240 			.level = LOGLEVEL_WARNING,
241 			.logflags = NF_LOG_MASK,
242 		},
243 	},
244 };
245 
246 /* Mildly perf critical (only if packet tracing is on) */
247 static inline int
get_chainname_rulenum(const struct ip6t_entry * s,const struct ip6t_entry * e,const char * hookname,const char ** chainname,const char ** comment,unsigned int * rulenum)248 get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e,
249 		      const char *hookname, const char **chainname,
250 		      const char **comment, unsigned int *rulenum)
251 {
252 	const struct xt_standard_target *t = (void *)ip6t_get_target_c(s);
253 
254 	if (strcmp(t->target.u.kernel.target->name, XT_ERROR_TARGET) == 0) {
255 		/* Head of user chain: ERROR target with chainname */
256 		*chainname = t->target.data;
257 		(*rulenum) = 0;
258 	} else if (s == e) {
259 		(*rulenum)++;
260 
261 		if (s->target_offset == sizeof(struct ip6t_entry) &&
262 		    strcmp(t->target.u.kernel.target->name,
263 			   XT_STANDARD_TARGET) == 0 &&
264 		    t->verdict < 0 &&
265 		    unconditional(&s->ipv6)) {
266 			/* Tail of chains: STANDARD target (return/policy) */
267 			*comment = *chainname == hookname
268 				? comments[NF_IP6_TRACE_COMMENT_POLICY]
269 				: comments[NF_IP6_TRACE_COMMENT_RETURN];
270 		}
271 		return 1;
272 	} else
273 		(*rulenum)++;
274 
275 	return 0;
276 }
277 
trace_packet(const struct sk_buff * skb,unsigned int hook,const struct net_device * in,const struct net_device * out,const char * tablename,const struct xt_table_info * private,const struct ip6t_entry * e)278 static void trace_packet(const struct sk_buff *skb,
279 			 unsigned int hook,
280 			 const struct net_device *in,
281 			 const struct net_device *out,
282 			 const char *tablename,
283 			 const struct xt_table_info *private,
284 			 const struct ip6t_entry *e)
285 {
286 	const void *table_base;
287 	const struct ip6t_entry *root;
288 	const char *hookname, *chainname, *comment;
289 	const struct ip6t_entry *iter;
290 	unsigned int rulenum = 0;
291 	struct net *net = dev_net(in ? in : out);
292 
293 	table_base = private->entries[smp_processor_id()];
294 	root = get_entry(table_base, private->hook_entry[hook]);
295 
296 	hookname = chainname = hooknames[hook];
297 	comment = comments[NF_IP6_TRACE_COMMENT_RULE];
298 
299 	xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
300 		if (get_chainname_rulenum(iter, e, hookname,
301 		    &chainname, &comment, &rulenum) != 0)
302 			break;
303 
304 	nf_log_trace(net, AF_INET6, hook, skb, in, out, &trace_loginfo,
305 		     "TRACE: %s:%s:%s:%u ",
306 		     tablename, chainname, comment, rulenum);
307 }
308 #endif
309 
310 static inline __pure struct ip6t_entry *
ip6t_next_entry(const struct ip6t_entry * entry)311 ip6t_next_entry(const struct ip6t_entry *entry)
312 {
313 	return (void *)entry + entry->next_offset;
314 }
315 
316 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
317 unsigned int
ip6t_do_table(struct sk_buff * skb,unsigned int hook,const struct nf_hook_state * state,struct xt_table * table)318 ip6t_do_table(struct sk_buff *skb,
319 	      unsigned int hook,
320 	      const struct nf_hook_state *state,
321 	      struct xt_table *table)
322 {
323 	static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
324 	/* Initializing verdict to NF_DROP keeps gcc happy. */
325 	unsigned int verdict = NF_DROP;
326 	const char *indev, *outdev;
327 	const void *table_base;
328 	struct ip6t_entry *e, **jumpstack;
329 	unsigned int *stackptr, origptr, cpu;
330 	const struct xt_table_info *private;
331 	struct xt_action_param acpar;
332 	unsigned int addend;
333 
334 	/* Initialization */
335 	indev = state->in ? state->in->name : nulldevname;
336 	outdev = state->out ? state->out->name : nulldevname;
337 	/* We handle fragments by dealing with the first fragment as
338 	 * if it was a normal packet.  All other fragments are treated
339 	 * normally, except that they will NEVER match rules that ask
340 	 * things we don't know, ie. tcp syn flag or ports).  If the
341 	 * rule is also a fragment-specific rule, non-fragments won't
342 	 * match it. */
343 	acpar.hotdrop = false;
344 	acpar.in      = state->in;
345 	acpar.out     = state->out;
346 	acpar.family  = NFPROTO_IPV6;
347 	acpar.hooknum = hook;
348 
349 	IP_NF_ASSERT(table->valid_hooks & (1 << hook));
350 
351 	local_bh_disable();
352 	addend = xt_write_recseq_begin();
353 	private = table->private;
354 	/*
355 	 * Ensure we load private-> members after we've fetched the base
356 	 * pointer.
357 	 */
358 	smp_read_barrier_depends();
359 	cpu        = smp_processor_id();
360 	table_base = private->entries[cpu];
361 	jumpstack  = (struct ip6t_entry **)private->jumpstack[cpu];
362 	stackptr   = per_cpu_ptr(private->stackptr, cpu);
363 	origptr    = *stackptr;
364 
365 	e = get_entry(table_base, private->hook_entry[hook]);
366 
367 	do {
368 		const struct xt_entry_target *t;
369 		const struct xt_entry_match *ematch;
370 
371 		IP_NF_ASSERT(e);
372 		acpar.thoff = 0;
373 		if (!ip6_packet_match(skb, indev, outdev, &e->ipv6,
374 		    &acpar.thoff, &acpar.fragoff, &acpar.hotdrop)) {
375  no_match:
376 			e = ip6t_next_entry(e);
377 			continue;
378 		}
379 
380 		xt_ematch_foreach(ematch, e) {
381 			acpar.match     = ematch->u.kernel.match;
382 			acpar.matchinfo = ematch->data;
383 			if (!acpar.match->match(skb, &acpar))
384 				goto no_match;
385 		}
386 
387 		ADD_COUNTER(e->counters, skb->len, 1);
388 
389 		t = ip6t_get_target_c(e);
390 		IP_NF_ASSERT(t->u.kernel.target);
391 
392 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
393 		/* The packet is traced: log it */
394 		if (unlikely(skb->nf_trace))
395 			trace_packet(skb, hook, state->in, state->out,
396 				     table->name, private, e);
397 #endif
398 		/* Standard target? */
399 		if (!t->u.kernel.target->target) {
400 			int v;
401 
402 			v = ((struct xt_standard_target *)t)->verdict;
403 			if (v < 0) {
404 				/* Pop from stack? */
405 				if (v != XT_RETURN) {
406 					verdict = (unsigned int)(-v) - 1;
407 					break;
408 				}
409 				if (*stackptr <= origptr)
410 					e = get_entry(table_base,
411 					    private->underflow[hook]);
412 				else
413 					e = ip6t_next_entry(jumpstack[--*stackptr]);
414 				continue;
415 			}
416 			if (table_base + v != ip6t_next_entry(e) &&
417 			    !(e->ipv6.flags & IP6T_F_GOTO)) {
418 				if (*stackptr >= private->stacksize) {
419 					verdict = NF_DROP;
420 					break;
421 				}
422 				jumpstack[(*stackptr)++] = e;
423 			}
424 
425 			e = get_entry(table_base, v);
426 			continue;
427 		}
428 
429 		acpar.target   = t->u.kernel.target;
430 		acpar.targinfo = t->data;
431 
432 		verdict = t->u.kernel.target->target(skb, &acpar);
433 		if (verdict == XT_CONTINUE)
434 			e = ip6t_next_entry(e);
435 		else
436 			/* Verdict */
437 			break;
438 	} while (!acpar.hotdrop);
439 
440 	*stackptr = origptr;
441 
442  	xt_write_recseq_end(addend);
443  	local_bh_enable();
444 
445 #ifdef DEBUG_ALLOW_ALL
446 	return NF_ACCEPT;
447 #else
448 	if (acpar.hotdrop)
449 		return NF_DROP;
450 	else return verdict;
451 #endif
452 }
453 
454 /* Figures out from what hook each rule can be called: returns 0 if
455    there are loops.  Puts hook bitmask in comefrom. */
456 static int
mark_source_chains(const struct xt_table_info * newinfo,unsigned int valid_hooks,void * entry0)457 mark_source_chains(const struct xt_table_info *newinfo,
458 		   unsigned int valid_hooks, void *entry0)
459 {
460 	unsigned int hook;
461 
462 	/* No recursion; use packet counter to save back ptrs (reset
463 	   to 0 as we leave), and comefrom to save source hook bitmask */
464 	for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
465 		unsigned int pos = newinfo->hook_entry[hook];
466 		struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
467 
468 		if (!(valid_hooks & (1 << hook)))
469 			continue;
470 
471 		/* Set initial back pointer. */
472 		e->counters.pcnt = pos;
473 
474 		for (;;) {
475 			const struct xt_standard_target *t
476 				= (void *)ip6t_get_target_c(e);
477 			int visited = e->comefrom & (1 << hook);
478 
479 			if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
480 				pr_err("iptables: loop hook %u pos %u %08X.\n",
481 				       hook, pos, e->comefrom);
482 				return 0;
483 			}
484 			e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
485 
486 			/* Unconditional return/END. */
487 			if ((e->target_offset == sizeof(struct ip6t_entry) &&
488 			     (strcmp(t->target.u.user.name,
489 				     XT_STANDARD_TARGET) == 0) &&
490 			     t->verdict < 0 &&
491 			     unconditional(&e->ipv6)) || visited) {
492 				unsigned int oldpos, size;
493 
494 				if ((strcmp(t->target.u.user.name,
495 					    XT_STANDARD_TARGET) == 0) &&
496 				    t->verdict < -NF_MAX_VERDICT - 1) {
497 					duprintf("mark_source_chains: bad "
498 						"negative verdict (%i)\n",
499 								t->verdict);
500 					return 0;
501 				}
502 
503 				/* Return: backtrack through the last
504 				   big jump. */
505 				do {
506 					e->comefrom ^= (1<<NF_INET_NUMHOOKS);
507 #ifdef DEBUG_IP_FIREWALL_USER
508 					if (e->comefrom
509 					    & (1 << NF_INET_NUMHOOKS)) {
510 						duprintf("Back unset "
511 							 "on hook %u "
512 							 "rule %u\n",
513 							 hook, pos);
514 					}
515 #endif
516 					oldpos = pos;
517 					pos = e->counters.pcnt;
518 					e->counters.pcnt = 0;
519 
520 					/* We're at the start. */
521 					if (pos == oldpos)
522 						goto next;
523 
524 					e = (struct ip6t_entry *)
525 						(entry0 + pos);
526 				} while (oldpos == pos + e->next_offset);
527 
528 				/* Move along one */
529 				size = e->next_offset;
530 				e = (struct ip6t_entry *)
531 					(entry0 + pos + size);
532 				e->counters.pcnt = pos;
533 				pos += size;
534 			} else {
535 				int newpos = t->verdict;
536 
537 				if (strcmp(t->target.u.user.name,
538 					   XT_STANDARD_TARGET) == 0 &&
539 				    newpos >= 0) {
540 					if (newpos > newinfo->size -
541 						sizeof(struct ip6t_entry)) {
542 						duprintf("mark_source_chains: "
543 							"bad verdict (%i)\n",
544 								newpos);
545 						return 0;
546 					}
547 					/* This a jump; chase it. */
548 					duprintf("Jump rule %u -> %u\n",
549 						 pos, newpos);
550 				} else {
551 					/* ... this is a fallthru */
552 					newpos = pos + e->next_offset;
553 				}
554 				e = (struct ip6t_entry *)
555 					(entry0 + newpos);
556 				e->counters.pcnt = pos;
557 				pos = newpos;
558 			}
559 		}
560 		next:
561 		duprintf("Finished chain %u\n", hook);
562 	}
563 	return 1;
564 }
565 
cleanup_match(struct xt_entry_match * m,struct net * net)566 static void cleanup_match(struct xt_entry_match *m, struct net *net)
567 {
568 	struct xt_mtdtor_param par;
569 
570 	par.net       = net;
571 	par.match     = m->u.kernel.match;
572 	par.matchinfo = m->data;
573 	par.family    = NFPROTO_IPV6;
574 	if (par.match->destroy != NULL)
575 		par.match->destroy(&par);
576 	module_put(par.match->me);
577 }
578 
579 static int
check_entry(const struct ip6t_entry * e,const char * name)580 check_entry(const struct ip6t_entry *e, const char *name)
581 {
582 	const struct xt_entry_target *t;
583 
584 	if (!ip6_checkentry(&e->ipv6)) {
585 		duprintf("ip_tables: ip check failed %p %s.\n", e, name);
586 		return -EINVAL;
587 	}
588 
589 	if (e->target_offset + sizeof(struct xt_entry_target) >
590 	    e->next_offset)
591 		return -EINVAL;
592 
593 	t = ip6t_get_target_c(e);
594 	if (e->target_offset + t->u.target_size > e->next_offset)
595 		return -EINVAL;
596 
597 	return 0;
598 }
599 
check_match(struct xt_entry_match * m,struct xt_mtchk_param * par)600 static int check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
601 {
602 	const struct ip6t_ip6 *ipv6 = par->entryinfo;
603 	int ret;
604 
605 	par->match     = m->u.kernel.match;
606 	par->matchinfo = m->data;
607 
608 	ret = xt_check_match(par, m->u.match_size - sizeof(*m),
609 			     ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
610 	if (ret < 0) {
611 		duprintf("ip_tables: check failed for `%s'.\n",
612 			 par.match->name);
613 		return ret;
614 	}
615 	return 0;
616 }
617 
618 static int
find_check_match(struct xt_entry_match * m,struct xt_mtchk_param * par)619 find_check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
620 {
621 	struct xt_match *match;
622 	int ret;
623 
624 	match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
625 				      m->u.user.revision);
626 	if (IS_ERR(match)) {
627 		duprintf("find_check_match: `%s' not found\n", m->u.user.name);
628 		return PTR_ERR(match);
629 	}
630 	m->u.kernel.match = match;
631 
632 	ret = check_match(m, par);
633 	if (ret)
634 		goto err;
635 
636 	return 0;
637 err:
638 	module_put(m->u.kernel.match->me);
639 	return ret;
640 }
641 
check_target(struct ip6t_entry * e,struct net * net,const char * name)642 static int check_target(struct ip6t_entry *e, struct net *net, const char *name)
643 {
644 	struct xt_entry_target *t = ip6t_get_target(e);
645 	struct xt_tgchk_param par = {
646 		.net       = net,
647 		.table     = name,
648 		.entryinfo = e,
649 		.target    = t->u.kernel.target,
650 		.targinfo  = t->data,
651 		.hook_mask = e->comefrom,
652 		.family    = NFPROTO_IPV6,
653 	};
654 	int ret;
655 
656 	t = ip6t_get_target(e);
657 	ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
658 	      e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO);
659 	if (ret < 0) {
660 		duprintf("ip_tables: check failed for `%s'.\n",
661 			 t->u.kernel.target->name);
662 		return ret;
663 	}
664 	return 0;
665 }
666 
667 static int
find_check_entry(struct ip6t_entry * e,struct net * net,const char * name,unsigned int size)668 find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
669 		 unsigned int size)
670 {
671 	struct xt_entry_target *t;
672 	struct xt_target *target;
673 	int ret;
674 	unsigned int j;
675 	struct xt_mtchk_param mtpar;
676 	struct xt_entry_match *ematch;
677 
678 	ret = check_entry(e, name);
679 	if (ret)
680 		return ret;
681 
682 	j = 0;
683 	mtpar.net	= net;
684 	mtpar.table     = name;
685 	mtpar.entryinfo = &e->ipv6;
686 	mtpar.hook_mask = e->comefrom;
687 	mtpar.family    = NFPROTO_IPV6;
688 	xt_ematch_foreach(ematch, e) {
689 		ret = find_check_match(ematch, &mtpar);
690 		if (ret != 0)
691 			goto cleanup_matches;
692 		++j;
693 	}
694 
695 	t = ip6t_get_target(e);
696 	target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
697 					t->u.user.revision);
698 	if (IS_ERR(target)) {
699 		duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
700 		ret = PTR_ERR(target);
701 		goto cleanup_matches;
702 	}
703 	t->u.kernel.target = target;
704 
705 	ret = check_target(e, net, name);
706 	if (ret)
707 		goto err;
708 	return 0;
709  err:
710 	module_put(t->u.kernel.target->me);
711  cleanup_matches:
712 	xt_ematch_foreach(ematch, e) {
713 		if (j-- == 0)
714 			break;
715 		cleanup_match(ematch, net);
716 	}
717 	return ret;
718 }
719 
check_underflow(const struct ip6t_entry * e)720 static bool check_underflow(const struct ip6t_entry *e)
721 {
722 	const struct xt_entry_target *t;
723 	unsigned int verdict;
724 
725 	if (!unconditional(&e->ipv6))
726 		return false;
727 	t = ip6t_get_target_c(e);
728 	if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
729 		return false;
730 	verdict = ((struct xt_standard_target *)t)->verdict;
731 	verdict = -verdict - 1;
732 	return verdict == NF_DROP || verdict == NF_ACCEPT;
733 }
734 
735 static int
check_entry_size_and_hooks(struct ip6t_entry * e,struct xt_table_info * newinfo,const unsigned char * base,const unsigned char * limit,const unsigned int * hook_entries,const unsigned int * underflows,unsigned int valid_hooks)736 check_entry_size_and_hooks(struct ip6t_entry *e,
737 			   struct xt_table_info *newinfo,
738 			   const unsigned char *base,
739 			   const unsigned char *limit,
740 			   const unsigned int *hook_entries,
741 			   const unsigned int *underflows,
742 			   unsigned int valid_hooks)
743 {
744 	unsigned int h;
745 
746 	if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
747 	    (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
748 		duprintf("Bad offset %p\n", e);
749 		return -EINVAL;
750 	}
751 
752 	if (e->next_offset
753 	    < sizeof(struct ip6t_entry) + sizeof(struct xt_entry_target)) {
754 		duprintf("checking: element %p size %u\n",
755 			 e, e->next_offset);
756 		return -EINVAL;
757 	}
758 
759 	/* Check hooks & underflows */
760 	for (h = 0; h < NF_INET_NUMHOOKS; h++) {
761 		if (!(valid_hooks & (1 << h)))
762 			continue;
763 		if ((unsigned char *)e - base == hook_entries[h])
764 			newinfo->hook_entry[h] = hook_entries[h];
765 		if ((unsigned char *)e - base == underflows[h]) {
766 			if (!check_underflow(e)) {
767 				pr_err("Underflows must be unconditional and "
768 				       "use the STANDARD target with "
769 				       "ACCEPT/DROP\n");
770 				return -EINVAL;
771 			}
772 			newinfo->underflow[h] = underflows[h];
773 		}
774 	}
775 
776 	/* Clear counters and comefrom */
777 	e->counters = ((struct xt_counters) { 0, 0 });
778 	e->comefrom = 0;
779 	return 0;
780 }
781 
cleanup_entry(struct ip6t_entry * e,struct net * net)782 static void cleanup_entry(struct ip6t_entry *e, struct net *net)
783 {
784 	struct xt_tgdtor_param par;
785 	struct xt_entry_target *t;
786 	struct xt_entry_match *ematch;
787 
788 	/* Cleanup all matches */
789 	xt_ematch_foreach(ematch, e)
790 		cleanup_match(ematch, net);
791 	t = ip6t_get_target(e);
792 
793 	par.net      = net;
794 	par.target   = t->u.kernel.target;
795 	par.targinfo = t->data;
796 	par.family   = NFPROTO_IPV6;
797 	if (par.target->destroy != NULL)
798 		par.target->destroy(&par);
799 	module_put(par.target->me);
800 }
801 
802 /* Checks and translates the user-supplied table segment (held in
803    newinfo) */
804 static int
translate_table(struct net * net,struct xt_table_info * newinfo,void * entry0,const struct ip6t_replace * repl)805 translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
806                 const struct ip6t_replace *repl)
807 {
808 	struct ip6t_entry *iter;
809 	unsigned int i;
810 	int ret = 0;
811 
812 	newinfo->size = repl->size;
813 	newinfo->number = repl->num_entries;
814 
815 	/* Init all hooks to impossible value. */
816 	for (i = 0; i < NF_INET_NUMHOOKS; i++) {
817 		newinfo->hook_entry[i] = 0xFFFFFFFF;
818 		newinfo->underflow[i] = 0xFFFFFFFF;
819 	}
820 
821 	duprintf("translate_table: size %u\n", newinfo->size);
822 	i = 0;
823 	/* Walk through entries, checking offsets. */
824 	xt_entry_foreach(iter, entry0, newinfo->size) {
825 		ret = check_entry_size_and_hooks(iter, newinfo, entry0,
826 						 entry0 + repl->size,
827 						 repl->hook_entry,
828 						 repl->underflow,
829 						 repl->valid_hooks);
830 		if (ret != 0)
831 			return ret;
832 		++i;
833 		if (strcmp(ip6t_get_target(iter)->u.user.name,
834 		    XT_ERROR_TARGET) == 0)
835 			++newinfo->stacksize;
836 	}
837 
838 	if (i != repl->num_entries) {
839 		duprintf("translate_table: %u not %u entries\n",
840 			 i, repl->num_entries);
841 		return -EINVAL;
842 	}
843 
844 	/* Check hooks all assigned */
845 	for (i = 0; i < NF_INET_NUMHOOKS; i++) {
846 		/* Only hooks which are valid */
847 		if (!(repl->valid_hooks & (1 << i)))
848 			continue;
849 		if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
850 			duprintf("Invalid hook entry %u %u\n",
851 				 i, repl->hook_entry[i]);
852 			return -EINVAL;
853 		}
854 		if (newinfo->underflow[i] == 0xFFFFFFFF) {
855 			duprintf("Invalid underflow %u %u\n",
856 				 i, repl->underflow[i]);
857 			return -EINVAL;
858 		}
859 	}
860 
861 	if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
862 		return -ELOOP;
863 
864 	/* Finally, each sanity check must pass */
865 	i = 0;
866 	xt_entry_foreach(iter, entry0, newinfo->size) {
867 		ret = find_check_entry(iter, net, repl->name, repl->size);
868 		if (ret != 0)
869 			break;
870 		++i;
871 	}
872 
873 	if (ret != 0) {
874 		xt_entry_foreach(iter, entry0, newinfo->size) {
875 			if (i-- == 0)
876 				break;
877 			cleanup_entry(iter, net);
878 		}
879 		return ret;
880 	}
881 
882 	/* And one copy for every other CPU */
883 	for_each_possible_cpu(i) {
884 		if (newinfo->entries[i] && newinfo->entries[i] != entry0)
885 			memcpy(newinfo->entries[i], entry0, newinfo->size);
886 	}
887 
888 	return ret;
889 }
890 
891 static void
get_counters(const struct xt_table_info * t,struct xt_counters counters[])892 get_counters(const struct xt_table_info *t,
893 	     struct xt_counters counters[])
894 {
895 	struct ip6t_entry *iter;
896 	unsigned int cpu;
897 	unsigned int i;
898 
899 	for_each_possible_cpu(cpu) {
900 		seqcount_t *s = &per_cpu(xt_recseq, cpu);
901 
902 		i = 0;
903 		xt_entry_foreach(iter, t->entries[cpu], t->size) {
904 			u64 bcnt, pcnt;
905 			unsigned int start;
906 
907 			do {
908 				start = read_seqcount_begin(s);
909 				bcnt = iter->counters.bcnt;
910 				pcnt = iter->counters.pcnt;
911 			} while (read_seqcount_retry(s, start));
912 
913 			ADD_COUNTER(counters[i], bcnt, pcnt);
914 			++i;
915 		}
916 	}
917 }
918 
alloc_counters(const struct xt_table * table)919 static struct xt_counters *alloc_counters(const struct xt_table *table)
920 {
921 	unsigned int countersize;
922 	struct xt_counters *counters;
923 	const struct xt_table_info *private = table->private;
924 
925 	/* We need atomic snapshot of counters: rest doesn't change
926 	   (other than comefrom, which userspace doesn't care
927 	   about). */
928 	countersize = sizeof(struct xt_counters) * private->number;
929 	counters = vzalloc(countersize);
930 
931 	if (counters == NULL)
932 		return ERR_PTR(-ENOMEM);
933 
934 	get_counters(private, counters);
935 
936 	return counters;
937 }
938 
939 static int
copy_entries_to_user(unsigned int total_size,const struct xt_table * table,void __user * userptr)940 copy_entries_to_user(unsigned int total_size,
941 		     const struct xt_table *table,
942 		     void __user *userptr)
943 {
944 	unsigned int off, num;
945 	const struct ip6t_entry *e;
946 	struct xt_counters *counters;
947 	const struct xt_table_info *private = table->private;
948 	int ret = 0;
949 	const void *loc_cpu_entry;
950 
951 	counters = alloc_counters(table);
952 	if (IS_ERR(counters))
953 		return PTR_ERR(counters);
954 
955 	/* choose the copy that is on our node/cpu, ...
956 	 * This choice is lazy (because current thread is
957 	 * allowed to migrate to another cpu)
958 	 */
959 	loc_cpu_entry = private->entries[raw_smp_processor_id()];
960 	if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
961 		ret = -EFAULT;
962 		goto free_counters;
963 	}
964 
965 	/* FIXME: use iterator macros --RR */
966 	/* ... then go back and fix counters and names */
967 	for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
968 		unsigned int i;
969 		const struct xt_entry_match *m;
970 		const struct xt_entry_target *t;
971 
972 		e = (struct ip6t_entry *)(loc_cpu_entry + off);
973 		if (copy_to_user(userptr + off
974 				 + offsetof(struct ip6t_entry, counters),
975 				 &counters[num],
976 				 sizeof(counters[num])) != 0) {
977 			ret = -EFAULT;
978 			goto free_counters;
979 		}
980 
981 		for (i = sizeof(struct ip6t_entry);
982 		     i < e->target_offset;
983 		     i += m->u.match_size) {
984 			m = (void *)e + i;
985 
986 			if (copy_to_user(userptr + off + i
987 					 + offsetof(struct xt_entry_match,
988 						    u.user.name),
989 					 m->u.kernel.match->name,
990 					 strlen(m->u.kernel.match->name)+1)
991 			    != 0) {
992 				ret = -EFAULT;
993 				goto free_counters;
994 			}
995 		}
996 
997 		t = ip6t_get_target_c(e);
998 		if (copy_to_user(userptr + off + e->target_offset
999 				 + offsetof(struct xt_entry_target,
1000 					    u.user.name),
1001 				 t->u.kernel.target->name,
1002 				 strlen(t->u.kernel.target->name)+1) != 0) {
1003 			ret = -EFAULT;
1004 			goto free_counters;
1005 		}
1006 	}
1007 
1008  free_counters:
1009 	vfree(counters);
1010 	return ret;
1011 }
1012 
1013 #ifdef CONFIG_COMPAT
compat_standard_from_user(void * dst,const void * src)1014 static void compat_standard_from_user(void *dst, const void *src)
1015 {
1016 	int v = *(compat_int_t *)src;
1017 
1018 	if (v > 0)
1019 		v += xt_compat_calc_jump(AF_INET6, v);
1020 	memcpy(dst, &v, sizeof(v));
1021 }
1022 
compat_standard_to_user(void __user * dst,const void * src)1023 static int compat_standard_to_user(void __user *dst, const void *src)
1024 {
1025 	compat_int_t cv = *(int *)src;
1026 
1027 	if (cv > 0)
1028 		cv -= xt_compat_calc_jump(AF_INET6, cv);
1029 	return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1030 }
1031 
compat_calc_entry(const struct ip6t_entry * e,const struct xt_table_info * info,const void * base,struct xt_table_info * newinfo)1032 static int compat_calc_entry(const struct ip6t_entry *e,
1033 			     const struct xt_table_info *info,
1034 			     const void *base, struct xt_table_info *newinfo)
1035 {
1036 	const struct xt_entry_match *ematch;
1037 	const struct xt_entry_target *t;
1038 	unsigned int entry_offset;
1039 	int off, i, ret;
1040 
1041 	off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1042 	entry_offset = (void *)e - base;
1043 	xt_ematch_foreach(ematch, e)
1044 		off += xt_compat_match_offset(ematch->u.kernel.match);
1045 	t = ip6t_get_target_c(e);
1046 	off += xt_compat_target_offset(t->u.kernel.target);
1047 	newinfo->size -= off;
1048 	ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1049 	if (ret)
1050 		return ret;
1051 
1052 	for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1053 		if (info->hook_entry[i] &&
1054 		    (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1055 			newinfo->hook_entry[i] -= off;
1056 		if (info->underflow[i] &&
1057 		    (e < (struct ip6t_entry *)(base + info->underflow[i])))
1058 			newinfo->underflow[i] -= off;
1059 	}
1060 	return 0;
1061 }
1062 
compat_table_info(const struct xt_table_info * info,struct xt_table_info * newinfo)1063 static int compat_table_info(const struct xt_table_info *info,
1064 			     struct xt_table_info *newinfo)
1065 {
1066 	struct ip6t_entry *iter;
1067 	void *loc_cpu_entry;
1068 	int ret;
1069 
1070 	if (!newinfo || !info)
1071 		return -EINVAL;
1072 
1073 	/* we dont care about newinfo->entries[] */
1074 	memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1075 	newinfo->initial_entries = 0;
1076 	loc_cpu_entry = info->entries[raw_smp_processor_id()];
1077 	xt_compat_init_offsets(AF_INET6, info->number);
1078 	xt_entry_foreach(iter, loc_cpu_entry, info->size) {
1079 		ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
1080 		if (ret != 0)
1081 			return ret;
1082 	}
1083 	return 0;
1084 }
1085 #endif
1086 
get_info(struct net * net,void __user * user,const int * len,int compat)1087 static int get_info(struct net *net, void __user *user,
1088                     const int *len, int compat)
1089 {
1090 	char name[XT_TABLE_MAXNAMELEN];
1091 	struct xt_table *t;
1092 	int ret;
1093 
1094 	if (*len != sizeof(struct ip6t_getinfo)) {
1095 		duprintf("length %u != %zu\n", *len,
1096 			 sizeof(struct ip6t_getinfo));
1097 		return -EINVAL;
1098 	}
1099 
1100 	if (copy_from_user(name, user, sizeof(name)) != 0)
1101 		return -EFAULT;
1102 
1103 	name[XT_TABLE_MAXNAMELEN-1] = '\0';
1104 #ifdef CONFIG_COMPAT
1105 	if (compat)
1106 		xt_compat_lock(AF_INET6);
1107 #endif
1108 	t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1109 				    "ip6table_%s", name);
1110 	if (!IS_ERR_OR_NULL(t)) {
1111 		struct ip6t_getinfo info;
1112 		const struct xt_table_info *private = t->private;
1113 #ifdef CONFIG_COMPAT
1114 		struct xt_table_info tmp;
1115 
1116 		if (compat) {
1117 			ret = compat_table_info(private, &tmp);
1118 			xt_compat_flush_offsets(AF_INET6);
1119 			private = &tmp;
1120 		}
1121 #endif
1122 		memset(&info, 0, sizeof(info));
1123 		info.valid_hooks = t->valid_hooks;
1124 		memcpy(info.hook_entry, private->hook_entry,
1125 		       sizeof(info.hook_entry));
1126 		memcpy(info.underflow, private->underflow,
1127 		       sizeof(info.underflow));
1128 		info.num_entries = private->number;
1129 		info.size = private->size;
1130 		strcpy(info.name, name);
1131 
1132 		if (copy_to_user(user, &info, *len) != 0)
1133 			ret = -EFAULT;
1134 		else
1135 			ret = 0;
1136 
1137 		xt_table_unlock(t);
1138 		module_put(t->me);
1139 	} else
1140 		ret = t ? PTR_ERR(t) : -ENOENT;
1141 #ifdef CONFIG_COMPAT
1142 	if (compat)
1143 		xt_compat_unlock(AF_INET6);
1144 #endif
1145 	return ret;
1146 }
1147 
1148 static int
get_entries(struct net * net,struct ip6t_get_entries __user * uptr,const int * len)1149 get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
1150             const int *len)
1151 {
1152 	int ret;
1153 	struct ip6t_get_entries get;
1154 	struct xt_table *t;
1155 
1156 	if (*len < sizeof(get)) {
1157 		duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1158 		return -EINVAL;
1159 	}
1160 	if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1161 		return -EFAULT;
1162 	if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1163 		duprintf("get_entries: %u != %zu\n",
1164 			 *len, sizeof(get) + get.size);
1165 		return -EINVAL;
1166 	}
1167 
1168 	t = xt_find_table_lock(net, AF_INET6, get.name);
1169 	if (!IS_ERR_OR_NULL(t)) {
1170 		struct xt_table_info *private = t->private;
1171 		duprintf("t->private->number = %u\n", private->number);
1172 		if (get.size == private->size)
1173 			ret = copy_entries_to_user(private->size,
1174 						   t, uptr->entrytable);
1175 		else {
1176 			duprintf("get_entries: I've got %u not %u!\n",
1177 				 private->size, get.size);
1178 			ret = -EAGAIN;
1179 		}
1180 		module_put(t->me);
1181 		xt_table_unlock(t);
1182 	} else
1183 		ret = t ? PTR_ERR(t) : -ENOENT;
1184 
1185 	return ret;
1186 }
1187 
1188 static int
__do_replace(struct net * net,const char * name,unsigned int valid_hooks,struct xt_table_info * newinfo,unsigned int num_counters,void __user * counters_ptr)1189 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1190 	     struct xt_table_info *newinfo, unsigned int num_counters,
1191 	     void __user *counters_ptr)
1192 {
1193 	int ret;
1194 	struct xt_table *t;
1195 	struct xt_table_info *oldinfo;
1196 	struct xt_counters *counters;
1197 	const void *loc_cpu_old_entry;
1198 	struct ip6t_entry *iter;
1199 
1200 	ret = 0;
1201 	counters = vzalloc(num_counters * sizeof(struct xt_counters));
1202 	if (!counters) {
1203 		ret = -ENOMEM;
1204 		goto out;
1205 	}
1206 
1207 	t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1208 				    "ip6table_%s", name);
1209 	if (IS_ERR_OR_NULL(t)) {
1210 		ret = t ? PTR_ERR(t) : -ENOENT;
1211 		goto free_newinfo_counters_untrans;
1212 	}
1213 
1214 	/* You lied! */
1215 	if (valid_hooks != t->valid_hooks) {
1216 		duprintf("Valid hook crap: %08X vs %08X\n",
1217 			 valid_hooks, t->valid_hooks);
1218 		ret = -EINVAL;
1219 		goto put_module;
1220 	}
1221 
1222 	oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1223 	if (!oldinfo)
1224 		goto put_module;
1225 
1226 	/* Update module usage count based on number of rules */
1227 	duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1228 		oldinfo->number, oldinfo->initial_entries, newinfo->number);
1229 	if ((oldinfo->number > oldinfo->initial_entries) ||
1230 	    (newinfo->number <= oldinfo->initial_entries))
1231 		module_put(t->me);
1232 	if ((oldinfo->number > oldinfo->initial_entries) &&
1233 	    (newinfo->number <= oldinfo->initial_entries))
1234 		module_put(t->me);
1235 
1236 	/* Get the old counters, and synchronize with replace */
1237 	get_counters(oldinfo, counters);
1238 
1239 	/* Decrease module usage counts and free resource */
1240 	loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1241 	xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
1242 		cleanup_entry(iter, net);
1243 
1244 	xt_free_table_info(oldinfo);
1245 	if (copy_to_user(counters_ptr, counters,
1246 			 sizeof(struct xt_counters) * num_counters) != 0) {
1247 		/* Silent error, can't fail, new table is already in place */
1248 		net_warn_ratelimited("ip6tables: counters copy to user failed while replacing table\n");
1249 	}
1250 	vfree(counters);
1251 	xt_table_unlock(t);
1252 	return ret;
1253 
1254  put_module:
1255 	module_put(t->me);
1256 	xt_table_unlock(t);
1257  free_newinfo_counters_untrans:
1258 	vfree(counters);
1259  out:
1260 	return ret;
1261 }
1262 
1263 static int
do_replace(struct net * net,const void __user * user,unsigned int len)1264 do_replace(struct net *net, const void __user *user, unsigned int len)
1265 {
1266 	int ret;
1267 	struct ip6t_replace tmp;
1268 	struct xt_table_info *newinfo;
1269 	void *loc_cpu_entry;
1270 	struct ip6t_entry *iter;
1271 
1272 	if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1273 		return -EFAULT;
1274 
1275 	/* overflow check */
1276 	if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1277 		return -ENOMEM;
1278 	if (tmp.num_counters == 0)
1279 		return -EINVAL;
1280 
1281 	tmp.name[sizeof(tmp.name)-1] = 0;
1282 
1283 	newinfo = xt_alloc_table_info(tmp.size);
1284 	if (!newinfo)
1285 		return -ENOMEM;
1286 
1287 	/* choose the copy that is on our node/cpu */
1288 	loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1289 	if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1290 			   tmp.size) != 0) {
1291 		ret = -EFAULT;
1292 		goto free_newinfo;
1293 	}
1294 
1295 	ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
1296 	if (ret != 0)
1297 		goto free_newinfo;
1298 
1299 	duprintf("ip_tables: Translated table\n");
1300 
1301 	ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1302 			   tmp.num_counters, tmp.counters);
1303 	if (ret)
1304 		goto free_newinfo_untrans;
1305 	return 0;
1306 
1307  free_newinfo_untrans:
1308 	xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1309 		cleanup_entry(iter, net);
1310  free_newinfo:
1311 	xt_free_table_info(newinfo);
1312 	return ret;
1313 }
1314 
1315 static int
do_add_counters(struct net * net,const void __user * user,unsigned int len,int compat)1316 do_add_counters(struct net *net, const void __user *user, unsigned int len,
1317 		int compat)
1318 {
1319 	unsigned int i, curcpu;
1320 	struct xt_counters_info tmp;
1321 	struct xt_counters *paddc;
1322 	unsigned int num_counters;
1323 	char *name;
1324 	int size;
1325 	void *ptmp;
1326 	struct xt_table *t;
1327 	const struct xt_table_info *private;
1328 	int ret = 0;
1329 	const void *loc_cpu_entry;
1330 	struct ip6t_entry *iter;
1331 	unsigned int addend;
1332 #ifdef CONFIG_COMPAT
1333 	struct compat_xt_counters_info compat_tmp;
1334 
1335 	if (compat) {
1336 		ptmp = &compat_tmp;
1337 		size = sizeof(struct compat_xt_counters_info);
1338 	} else
1339 #endif
1340 	{
1341 		ptmp = &tmp;
1342 		size = sizeof(struct xt_counters_info);
1343 	}
1344 
1345 	if (copy_from_user(ptmp, user, size) != 0)
1346 		return -EFAULT;
1347 
1348 #ifdef CONFIG_COMPAT
1349 	if (compat) {
1350 		num_counters = compat_tmp.num_counters;
1351 		name = compat_tmp.name;
1352 	} else
1353 #endif
1354 	{
1355 		num_counters = tmp.num_counters;
1356 		name = tmp.name;
1357 	}
1358 
1359 	if (len != size + num_counters * sizeof(struct xt_counters))
1360 		return -EINVAL;
1361 
1362 	paddc = vmalloc(len - size);
1363 	if (!paddc)
1364 		return -ENOMEM;
1365 
1366 	if (copy_from_user(paddc, user + size, len - size) != 0) {
1367 		ret = -EFAULT;
1368 		goto free;
1369 	}
1370 
1371 	t = xt_find_table_lock(net, AF_INET6, name);
1372 	if (IS_ERR_OR_NULL(t)) {
1373 		ret = t ? PTR_ERR(t) : -ENOENT;
1374 		goto free;
1375 	}
1376 
1377 
1378 	local_bh_disable();
1379 	private = t->private;
1380 	if (private->number != num_counters) {
1381 		ret = -EINVAL;
1382 		goto unlock_up_free;
1383 	}
1384 
1385 	i = 0;
1386 	/* Choose the copy that is on our node */
1387 	curcpu = smp_processor_id();
1388 	addend = xt_write_recseq_begin();
1389 	loc_cpu_entry = private->entries[curcpu];
1390 	xt_entry_foreach(iter, loc_cpu_entry, private->size) {
1391 		ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
1392 		++i;
1393 	}
1394 	xt_write_recseq_end(addend);
1395 
1396  unlock_up_free:
1397 	local_bh_enable();
1398 	xt_table_unlock(t);
1399 	module_put(t->me);
1400  free:
1401 	vfree(paddc);
1402 
1403 	return ret;
1404 }
1405 
1406 #ifdef CONFIG_COMPAT
1407 struct compat_ip6t_replace {
1408 	char			name[XT_TABLE_MAXNAMELEN];
1409 	u32			valid_hooks;
1410 	u32			num_entries;
1411 	u32			size;
1412 	u32			hook_entry[NF_INET_NUMHOOKS];
1413 	u32			underflow[NF_INET_NUMHOOKS];
1414 	u32			num_counters;
1415 	compat_uptr_t		counters;	/* struct xt_counters * */
1416 	struct compat_ip6t_entry entries[0];
1417 };
1418 
1419 static int
compat_copy_entry_to_user(struct ip6t_entry * e,void __user ** dstptr,unsigned int * size,struct xt_counters * counters,unsigned int i)1420 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1421 			  unsigned int *size, struct xt_counters *counters,
1422 			  unsigned int i)
1423 {
1424 	struct xt_entry_target *t;
1425 	struct compat_ip6t_entry __user *ce;
1426 	u_int16_t target_offset, next_offset;
1427 	compat_uint_t origsize;
1428 	const struct xt_entry_match *ematch;
1429 	int ret = 0;
1430 
1431 	origsize = *size;
1432 	ce = (struct compat_ip6t_entry __user *)*dstptr;
1433 	if (copy_to_user(ce, e, sizeof(struct ip6t_entry)) != 0 ||
1434 	    copy_to_user(&ce->counters, &counters[i],
1435 	    sizeof(counters[i])) != 0)
1436 		return -EFAULT;
1437 
1438 	*dstptr += sizeof(struct compat_ip6t_entry);
1439 	*size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1440 
1441 	xt_ematch_foreach(ematch, e) {
1442 		ret = xt_compat_match_to_user(ematch, dstptr, size);
1443 		if (ret != 0)
1444 			return ret;
1445 	}
1446 	target_offset = e->target_offset - (origsize - *size);
1447 	t = ip6t_get_target(e);
1448 	ret = xt_compat_target_to_user(t, dstptr, size);
1449 	if (ret)
1450 		return ret;
1451 	next_offset = e->next_offset - (origsize - *size);
1452 	if (put_user(target_offset, &ce->target_offset) != 0 ||
1453 	    put_user(next_offset, &ce->next_offset) != 0)
1454 		return -EFAULT;
1455 	return 0;
1456 }
1457 
1458 static int
compat_find_calc_match(struct xt_entry_match * m,const char * name,const struct ip6t_ip6 * ipv6,unsigned int hookmask,int * size)1459 compat_find_calc_match(struct xt_entry_match *m,
1460 		       const char *name,
1461 		       const struct ip6t_ip6 *ipv6,
1462 		       unsigned int hookmask,
1463 		       int *size)
1464 {
1465 	struct xt_match *match;
1466 
1467 	match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
1468 				      m->u.user.revision);
1469 	if (IS_ERR(match)) {
1470 		duprintf("compat_check_calc_match: `%s' not found\n",
1471 			 m->u.user.name);
1472 		return PTR_ERR(match);
1473 	}
1474 	m->u.kernel.match = match;
1475 	*size += xt_compat_match_offset(match);
1476 	return 0;
1477 }
1478 
compat_release_entry(struct compat_ip6t_entry * e)1479 static void compat_release_entry(struct compat_ip6t_entry *e)
1480 {
1481 	struct xt_entry_target *t;
1482 	struct xt_entry_match *ematch;
1483 
1484 	/* Cleanup all matches */
1485 	xt_ematch_foreach(ematch, e)
1486 		module_put(ematch->u.kernel.match->me);
1487 	t = compat_ip6t_get_target(e);
1488 	module_put(t->u.kernel.target->me);
1489 }
1490 
1491 static int
check_compat_entry_size_and_hooks(struct compat_ip6t_entry * e,struct xt_table_info * newinfo,unsigned int * size,const unsigned char * base,const unsigned char * limit,const unsigned int * hook_entries,const unsigned int * underflows,const char * name)1492 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1493 				  struct xt_table_info *newinfo,
1494 				  unsigned int *size,
1495 				  const unsigned char *base,
1496 				  const unsigned char *limit,
1497 				  const unsigned int *hook_entries,
1498 				  const unsigned int *underflows,
1499 				  const char *name)
1500 {
1501 	struct xt_entry_match *ematch;
1502 	struct xt_entry_target *t;
1503 	struct xt_target *target;
1504 	unsigned int entry_offset;
1505 	unsigned int j;
1506 	int ret, off, h;
1507 
1508 	duprintf("check_compat_entry_size_and_hooks %p\n", e);
1509 	if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
1510 	    (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
1511 		duprintf("Bad offset %p, limit = %p\n", e, limit);
1512 		return -EINVAL;
1513 	}
1514 
1515 	if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1516 			     sizeof(struct compat_xt_entry_target)) {
1517 		duprintf("checking: element %p size %u\n",
1518 			 e, e->next_offset);
1519 		return -EINVAL;
1520 	}
1521 
1522 	/* For purposes of check_entry casting the compat entry is fine */
1523 	ret = check_entry((struct ip6t_entry *)e, name);
1524 	if (ret)
1525 		return ret;
1526 
1527 	off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1528 	entry_offset = (void *)e - (void *)base;
1529 	j = 0;
1530 	xt_ematch_foreach(ematch, e) {
1531 		ret = compat_find_calc_match(ematch, name,
1532 					     &e->ipv6, e->comefrom, &off);
1533 		if (ret != 0)
1534 			goto release_matches;
1535 		++j;
1536 	}
1537 
1538 	t = compat_ip6t_get_target(e);
1539 	target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
1540 					t->u.user.revision);
1541 	if (IS_ERR(target)) {
1542 		duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1543 			 t->u.user.name);
1544 		ret = PTR_ERR(target);
1545 		goto release_matches;
1546 	}
1547 	t->u.kernel.target = target;
1548 
1549 	off += xt_compat_target_offset(target);
1550 	*size += off;
1551 	ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1552 	if (ret)
1553 		goto out;
1554 
1555 	/* Check hooks & underflows */
1556 	for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1557 		if ((unsigned char *)e - base == hook_entries[h])
1558 			newinfo->hook_entry[h] = hook_entries[h];
1559 		if ((unsigned char *)e - base == underflows[h])
1560 			newinfo->underflow[h] = underflows[h];
1561 	}
1562 
1563 	/* Clear counters and comefrom */
1564 	memset(&e->counters, 0, sizeof(e->counters));
1565 	e->comefrom = 0;
1566 	return 0;
1567 
1568 out:
1569 	module_put(t->u.kernel.target->me);
1570 release_matches:
1571 	xt_ematch_foreach(ematch, e) {
1572 		if (j-- == 0)
1573 			break;
1574 		module_put(ematch->u.kernel.match->me);
1575 	}
1576 	return ret;
1577 }
1578 
1579 static int
compat_copy_entry_from_user(struct compat_ip6t_entry * e,void ** dstptr,unsigned int * size,const char * name,struct xt_table_info * newinfo,unsigned char * base)1580 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1581 			    unsigned int *size, const char *name,
1582 			    struct xt_table_info *newinfo, unsigned char *base)
1583 {
1584 	struct xt_entry_target *t;
1585 	struct ip6t_entry *de;
1586 	unsigned int origsize;
1587 	int ret, h;
1588 	struct xt_entry_match *ematch;
1589 
1590 	ret = 0;
1591 	origsize = *size;
1592 	de = (struct ip6t_entry *)*dstptr;
1593 	memcpy(de, e, sizeof(struct ip6t_entry));
1594 	memcpy(&de->counters, &e->counters, sizeof(e->counters));
1595 
1596 	*dstptr += sizeof(struct ip6t_entry);
1597 	*size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1598 
1599 	xt_ematch_foreach(ematch, e) {
1600 		ret = xt_compat_match_from_user(ematch, dstptr, size);
1601 		if (ret != 0)
1602 			return ret;
1603 	}
1604 	de->target_offset = e->target_offset - (origsize - *size);
1605 	t = compat_ip6t_get_target(e);
1606 	xt_compat_target_from_user(t, dstptr, size);
1607 
1608 	de->next_offset = e->next_offset - (origsize - *size);
1609 	for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1610 		if ((unsigned char *)de - base < newinfo->hook_entry[h])
1611 			newinfo->hook_entry[h] -= origsize - *size;
1612 		if ((unsigned char *)de - base < newinfo->underflow[h])
1613 			newinfo->underflow[h] -= origsize - *size;
1614 	}
1615 	return ret;
1616 }
1617 
compat_check_entry(struct ip6t_entry * e,struct net * net,const char * name)1618 static int compat_check_entry(struct ip6t_entry *e, struct net *net,
1619 			      const char *name)
1620 {
1621 	unsigned int j;
1622 	int ret = 0;
1623 	struct xt_mtchk_param mtpar;
1624 	struct xt_entry_match *ematch;
1625 
1626 	j = 0;
1627 	mtpar.net	= net;
1628 	mtpar.table     = name;
1629 	mtpar.entryinfo = &e->ipv6;
1630 	mtpar.hook_mask = e->comefrom;
1631 	mtpar.family    = NFPROTO_IPV6;
1632 	xt_ematch_foreach(ematch, e) {
1633 		ret = check_match(ematch, &mtpar);
1634 		if (ret != 0)
1635 			goto cleanup_matches;
1636 		++j;
1637 	}
1638 
1639 	ret = check_target(e, net, name);
1640 	if (ret)
1641 		goto cleanup_matches;
1642 	return 0;
1643 
1644  cleanup_matches:
1645 	xt_ematch_foreach(ematch, e) {
1646 		if (j-- == 0)
1647 			break;
1648 		cleanup_match(ematch, net);
1649 	}
1650 	return ret;
1651 }
1652 
1653 static int
translate_compat_table(struct net * net,const char * name,unsigned int valid_hooks,struct xt_table_info ** pinfo,void ** pentry0,unsigned int total_size,unsigned int number,unsigned int * hook_entries,unsigned int * underflows)1654 translate_compat_table(struct net *net,
1655 		       const char *name,
1656 		       unsigned int valid_hooks,
1657 		       struct xt_table_info **pinfo,
1658 		       void **pentry0,
1659 		       unsigned int total_size,
1660 		       unsigned int number,
1661 		       unsigned int *hook_entries,
1662 		       unsigned int *underflows)
1663 {
1664 	unsigned int i, j;
1665 	struct xt_table_info *newinfo, *info;
1666 	void *pos, *entry0, *entry1;
1667 	struct compat_ip6t_entry *iter0;
1668 	struct ip6t_entry *iter1;
1669 	unsigned int size;
1670 	int ret = 0;
1671 
1672 	info = *pinfo;
1673 	entry0 = *pentry0;
1674 	size = total_size;
1675 	info->number = number;
1676 
1677 	/* Init all hooks to impossible value. */
1678 	for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1679 		info->hook_entry[i] = 0xFFFFFFFF;
1680 		info->underflow[i] = 0xFFFFFFFF;
1681 	}
1682 
1683 	duprintf("translate_compat_table: size %u\n", info->size);
1684 	j = 0;
1685 	xt_compat_lock(AF_INET6);
1686 	xt_compat_init_offsets(AF_INET6, number);
1687 	/* Walk through entries, checking offsets. */
1688 	xt_entry_foreach(iter0, entry0, total_size) {
1689 		ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1690 							entry0,
1691 							entry0 + total_size,
1692 							hook_entries,
1693 							underflows,
1694 							name);
1695 		if (ret != 0)
1696 			goto out_unlock;
1697 		++j;
1698 	}
1699 
1700 	ret = -EINVAL;
1701 	if (j != number) {
1702 		duprintf("translate_compat_table: %u not %u entries\n",
1703 			 j, number);
1704 		goto out_unlock;
1705 	}
1706 
1707 	/* Check hooks all assigned */
1708 	for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1709 		/* Only hooks which are valid */
1710 		if (!(valid_hooks & (1 << i)))
1711 			continue;
1712 		if (info->hook_entry[i] == 0xFFFFFFFF) {
1713 			duprintf("Invalid hook entry %u %u\n",
1714 				 i, hook_entries[i]);
1715 			goto out_unlock;
1716 		}
1717 		if (info->underflow[i] == 0xFFFFFFFF) {
1718 			duprintf("Invalid underflow %u %u\n",
1719 				 i, underflows[i]);
1720 			goto out_unlock;
1721 		}
1722 	}
1723 
1724 	ret = -ENOMEM;
1725 	newinfo = xt_alloc_table_info(size);
1726 	if (!newinfo)
1727 		goto out_unlock;
1728 
1729 	newinfo->number = number;
1730 	for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1731 		newinfo->hook_entry[i] = info->hook_entry[i];
1732 		newinfo->underflow[i] = info->underflow[i];
1733 	}
1734 	entry1 = newinfo->entries[raw_smp_processor_id()];
1735 	pos = entry1;
1736 	size = total_size;
1737 	xt_entry_foreach(iter0, entry0, total_size) {
1738 		ret = compat_copy_entry_from_user(iter0, &pos, &size,
1739 						  name, newinfo, entry1);
1740 		if (ret != 0)
1741 			break;
1742 	}
1743 	xt_compat_flush_offsets(AF_INET6);
1744 	xt_compat_unlock(AF_INET6);
1745 	if (ret)
1746 		goto free_newinfo;
1747 
1748 	ret = -ELOOP;
1749 	if (!mark_source_chains(newinfo, valid_hooks, entry1))
1750 		goto free_newinfo;
1751 
1752 	i = 0;
1753 	xt_entry_foreach(iter1, entry1, newinfo->size) {
1754 		ret = compat_check_entry(iter1, net, name);
1755 		if (ret != 0)
1756 			break;
1757 		++i;
1758 		if (strcmp(ip6t_get_target(iter1)->u.user.name,
1759 		    XT_ERROR_TARGET) == 0)
1760 			++newinfo->stacksize;
1761 	}
1762 	if (ret) {
1763 		/*
1764 		 * The first i matches need cleanup_entry (calls ->destroy)
1765 		 * because they had called ->check already. The other j-i
1766 		 * entries need only release.
1767 		 */
1768 		int skip = i;
1769 		j -= i;
1770 		xt_entry_foreach(iter0, entry0, newinfo->size) {
1771 			if (skip-- > 0)
1772 				continue;
1773 			if (j-- == 0)
1774 				break;
1775 			compat_release_entry(iter0);
1776 		}
1777 		xt_entry_foreach(iter1, entry1, newinfo->size) {
1778 			if (i-- == 0)
1779 				break;
1780 			cleanup_entry(iter1, net);
1781 		}
1782 		xt_free_table_info(newinfo);
1783 		return ret;
1784 	}
1785 
1786 	/* And one copy for every other CPU */
1787 	for_each_possible_cpu(i)
1788 		if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1789 			memcpy(newinfo->entries[i], entry1, newinfo->size);
1790 
1791 	*pinfo = newinfo;
1792 	*pentry0 = entry1;
1793 	xt_free_table_info(info);
1794 	return 0;
1795 
1796 free_newinfo:
1797 	xt_free_table_info(newinfo);
1798 out:
1799 	xt_entry_foreach(iter0, entry0, total_size) {
1800 		if (j-- == 0)
1801 			break;
1802 		compat_release_entry(iter0);
1803 	}
1804 	return ret;
1805 out_unlock:
1806 	xt_compat_flush_offsets(AF_INET6);
1807 	xt_compat_unlock(AF_INET6);
1808 	goto out;
1809 }
1810 
1811 static int
compat_do_replace(struct net * net,void __user * user,unsigned int len)1812 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1813 {
1814 	int ret;
1815 	struct compat_ip6t_replace tmp;
1816 	struct xt_table_info *newinfo;
1817 	void *loc_cpu_entry;
1818 	struct ip6t_entry *iter;
1819 
1820 	if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1821 		return -EFAULT;
1822 
1823 	/* overflow check */
1824 	if (tmp.size >= INT_MAX / num_possible_cpus())
1825 		return -ENOMEM;
1826 	if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1827 		return -ENOMEM;
1828 	if (tmp.num_counters == 0)
1829 		return -EINVAL;
1830 
1831 	tmp.name[sizeof(tmp.name)-1] = 0;
1832 
1833 	newinfo = xt_alloc_table_info(tmp.size);
1834 	if (!newinfo)
1835 		return -ENOMEM;
1836 
1837 	/* choose the copy that is on our node/cpu */
1838 	loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1839 	if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1840 			   tmp.size) != 0) {
1841 		ret = -EFAULT;
1842 		goto free_newinfo;
1843 	}
1844 
1845 	ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
1846 				     &newinfo, &loc_cpu_entry, tmp.size,
1847 				     tmp.num_entries, tmp.hook_entry,
1848 				     tmp.underflow);
1849 	if (ret != 0)
1850 		goto free_newinfo;
1851 
1852 	duprintf("compat_do_replace: Translated table\n");
1853 
1854 	ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1855 			   tmp.num_counters, compat_ptr(tmp.counters));
1856 	if (ret)
1857 		goto free_newinfo_untrans;
1858 	return 0;
1859 
1860  free_newinfo_untrans:
1861 	xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1862 		cleanup_entry(iter, net);
1863  free_newinfo:
1864 	xt_free_table_info(newinfo);
1865 	return ret;
1866 }
1867 
1868 static int
compat_do_ip6t_set_ctl(struct sock * sk,int cmd,void __user * user,unsigned int len)1869 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1870 		       unsigned int len)
1871 {
1872 	int ret;
1873 
1874 	if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1875 		return -EPERM;
1876 
1877 	switch (cmd) {
1878 	case IP6T_SO_SET_REPLACE:
1879 		ret = compat_do_replace(sock_net(sk), user, len);
1880 		break;
1881 
1882 	case IP6T_SO_SET_ADD_COUNTERS:
1883 		ret = do_add_counters(sock_net(sk), user, len, 1);
1884 		break;
1885 
1886 	default:
1887 		duprintf("do_ip6t_set_ctl:  unknown request %i\n", cmd);
1888 		ret = -EINVAL;
1889 	}
1890 
1891 	return ret;
1892 }
1893 
1894 struct compat_ip6t_get_entries {
1895 	char name[XT_TABLE_MAXNAMELEN];
1896 	compat_uint_t size;
1897 	struct compat_ip6t_entry entrytable[0];
1898 };
1899 
1900 static int
compat_copy_entries_to_user(unsigned int total_size,struct xt_table * table,void __user * userptr)1901 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1902 			    void __user *userptr)
1903 {
1904 	struct xt_counters *counters;
1905 	const struct xt_table_info *private = table->private;
1906 	void __user *pos;
1907 	unsigned int size;
1908 	int ret = 0;
1909 	const void *loc_cpu_entry;
1910 	unsigned int i = 0;
1911 	struct ip6t_entry *iter;
1912 
1913 	counters = alloc_counters(table);
1914 	if (IS_ERR(counters))
1915 		return PTR_ERR(counters);
1916 
1917 	/* choose the copy that is on our node/cpu, ...
1918 	 * This choice is lazy (because current thread is
1919 	 * allowed to migrate to another cpu)
1920 	 */
1921 	loc_cpu_entry = private->entries[raw_smp_processor_id()];
1922 	pos = userptr;
1923 	size = total_size;
1924 	xt_entry_foreach(iter, loc_cpu_entry, total_size) {
1925 		ret = compat_copy_entry_to_user(iter, &pos,
1926 						&size, counters, i++);
1927 		if (ret != 0)
1928 			break;
1929 	}
1930 
1931 	vfree(counters);
1932 	return ret;
1933 }
1934 
1935 static int
compat_get_entries(struct net * net,struct compat_ip6t_get_entries __user * uptr,int * len)1936 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1937 		   int *len)
1938 {
1939 	int ret;
1940 	struct compat_ip6t_get_entries get;
1941 	struct xt_table *t;
1942 
1943 	if (*len < sizeof(get)) {
1944 		duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1945 		return -EINVAL;
1946 	}
1947 
1948 	if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1949 		return -EFAULT;
1950 
1951 	if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
1952 		duprintf("compat_get_entries: %u != %zu\n",
1953 			 *len, sizeof(get) + get.size);
1954 		return -EINVAL;
1955 	}
1956 
1957 	xt_compat_lock(AF_INET6);
1958 	t = xt_find_table_lock(net, AF_INET6, get.name);
1959 	if (!IS_ERR_OR_NULL(t)) {
1960 		const struct xt_table_info *private = t->private;
1961 		struct xt_table_info info;
1962 		duprintf("t->private->number = %u\n", private->number);
1963 		ret = compat_table_info(private, &info);
1964 		if (!ret && get.size == info.size) {
1965 			ret = compat_copy_entries_to_user(private->size,
1966 							  t, uptr->entrytable);
1967 		} else if (!ret) {
1968 			duprintf("compat_get_entries: I've got %u not %u!\n",
1969 				 private->size, get.size);
1970 			ret = -EAGAIN;
1971 		}
1972 		xt_compat_flush_offsets(AF_INET6);
1973 		module_put(t->me);
1974 		xt_table_unlock(t);
1975 	} else
1976 		ret = t ? PTR_ERR(t) : -ENOENT;
1977 
1978 	xt_compat_unlock(AF_INET6);
1979 	return ret;
1980 }
1981 
1982 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
1983 
1984 static int
compat_do_ip6t_get_ctl(struct sock * sk,int cmd,void __user * user,int * len)1985 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1986 {
1987 	int ret;
1988 
1989 	if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1990 		return -EPERM;
1991 
1992 	switch (cmd) {
1993 	case IP6T_SO_GET_INFO:
1994 		ret = get_info(sock_net(sk), user, len, 1);
1995 		break;
1996 	case IP6T_SO_GET_ENTRIES:
1997 		ret = compat_get_entries(sock_net(sk), user, len);
1998 		break;
1999 	default:
2000 		ret = do_ip6t_get_ctl(sk, cmd, user, len);
2001 	}
2002 	return ret;
2003 }
2004 #endif
2005 
2006 static int
do_ip6t_set_ctl(struct sock * sk,int cmd,void __user * user,unsigned int len)2007 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2008 {
2009 	int ret;
2010 
2011 	if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
2012 		return -EPERM;
2013 
2014 	switch (cmd) {
2015 	case IP6T_SO_SET_REPLACE:
2016 		ret = do_replace(sock_net(sk), user, len);
2017 		break;
2018 
2019 	case IP6T_SO_SET_ADD_COUNTERS:
2020 		ret = do_add_counters(sock_net(sk), user, len, 0);
2021 		break;
2022 
2023 	default:
2024 		duprintf("do_ip6t_set_ctl:  unknown request %i\n", cmd);
2025 		ret = -EINVAL;
2026 	}
2027 
2028 	return ret;
2029 }
2030 
2031 static int
do_ip6t_get_ctl(struct sock * sk,int cmd,void __user * user,int * len)2032 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2033 {
2034 	int ret;
2035 
2036 	if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
2037 		return -EPERM;
2038 
2039 	switch (cmd) {
2040 	case IP6T_SO_GET_INFO:
2041 		ret = get_info(sock_net(sk), user, len, 0);
2042 		break;
2043 
2044 	case IP6T_SO_GET_ENTRIES:
2045 		ret = get_entries(sock_net(sk), user, len);
2046 		break;
2047 
2048 	case IP6T_SO_GET_REVISION_MATCH:
2049 	case IP6T_SO_GET_REVISION_TARGET: {
2050 		struct xt_get_revision rev;
2051 		int target;
2052 
2053 		if (*len != sizeof(rev)) {
2054 			ret = -EINVAL;
2055 			break;
2056 		}
2057 		if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2058 			ret = -EFAULT;
2059 			break;
2060 		}
2061 		rev.name[sizeof(rev.name)-1] = 0;
2062 
2063 		if (cmd == IP6T_SO_GET_REVISION_TARGET)
2064 			target = 1;
2065 		else
2066 			target = 0;
2067 
2068 		try_then_request_module(xt_find_revision(AF_INET6, rev.name,
2069 							 rev.revision,
2070 							 target, &ret),
2071 					"ip6t_%s", rev.name);
2072 		break;
2073 	}
2074 
2075 	default:
2076 		duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
2077 		ret = -EINVAL;
2078 	}
2079 
2080 	return ret;
2081 }
2082 
ip6t_register_table(struct net * net,const struct xt_table * table,const struct ip6t_replace * repl)2083 struct xt_table *ip6t_register_table(struct net *net,
2084 				     const struct xt_table *table,
2085 				     const struct ip6t_replace *repl)
2086 {
2087 	int ret;
2088 	struct xt_table_info *newinfo;
2089 	struct xt_table_info bootstrap = {0};
2090 	void *loc_cpu_entry;
2091 	struct xt_table *new_table;
2092 
2093 	newinfo = xt_alloc_table_info(repl->size);
2094 	if (!newinfo) {
2095 		ret = -ENOMEM;
2096 		goto out;
2097 	}
2098 
2099 	/* choose the copy on our node/cpu, but dont care about preemption */
2100 	loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2101 	memcpy(loc_cpu_entry, repl->entries, repl->size);
2102 
2103 	ret = translate_table(net, newinfo, loc_cpu_entry, repl);
2104 	if (ret != 0)
2105 		goto out_free;
2106 
2107 	new_table = xt_register_table(net, table, &bootstrap, newinfo);
2108 	if (IS_ERR(new_table)) {
2109 		ret = PTR_ERR(new_table);
2110 		goto out_free;
2111 	}
2112 	return new_table;
2113 
2114 out_free:
2115 	xt_free_table_info(newinfo);
2116 out:
2117 	return ERR_PTR(ret);
2118 }
2119 
ip6t_unregister_table(struct net * net,struct xt_table * table)2120 void ip6t_unregister_table(struct net *net, struct xt_table *table)
2121 {
2122 	struct xt_table_info *private;
2123 	void *loc_cpu_entry;
2124 	struct module *table_owner = table->me;
2125 	struct ip6t_entry *iter;
2126 
2127 	private = xt_unregister_table(table);
2128 
2129 	/* Decrease module usage counts and free resources */
2130 	loc_cpu_entry = private->entries[raw_smp_processor_id()];
2131 	xt_entry_foreach(iter, loc_cpu_entry, private->size)
2132 		cleanup_entry(iter, net);
2133 	if (private->number > private->initial_entries)
2134 		module_put(table_owner);
2135 	xt_free_table_info(private);
2136 }
2137 
2138 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2139 static inline bool
icmp6_type_code_match(u_int8_t test_type,u_int8_t min_code,u_int8_t max_code,u_int8_t type,u_int8_t code,bool invert)2140 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2141 		     u_int8_t type, u_int8_t code,
2142 		     bool invert)
2143 {
2144 	return (type == test_type && code >= min_code && code <= max_code)
2145 		^ invert;
2146 }
2147 
2148 static bool
icmp6_match(const struct sk_buff * skb,struct xt_action_param * par)2149 icmp6_match(const struct sk_buff *skb, struct xt_action_param *par)
2150 {
2151 	const struct icmp6hdr *ic;
2152 	struct icmp6hdr _icmph;
2153 	const struct ip6t_icmp *icmpinfo = par->matchinfo;
2154 
2155 	/* Must not be a fragment. */
2156 	if (par->fragoff != 0)
2157 		return false;
2158 
2159 	ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2160 	if (ic == NULL) {
2161 		/* We've been asked to examine this packet, and we
2162 		 * can't.  Hence, no choice but to drop.
2163 		 */
2164 		duprintf("Dropping evil ICMP tinygram.\n");
2165 		par->hotdrop = true;
2166 		return false;
2167 	}
2168 
2169 	return icmp6_type_code_match(icmpinfo->type,
2170 				     icmpinfo->code[0],
2171 				     icmpinfo->code[1],
2172 				     ic->icmp6_type, ic->icmp6_code,
2173 				     !!(icmpinfo->invflags&IP6T_ICMP_INV));
2174 }
2175 
2176 /* Called when user tries to insert an entry of this type. */
icmp6_checkentry(const struct xt_mtchk_param * par)2177 static int icmp6_checkentry(const struct xt_mtchk_param *par)
2178 {
2179 	const struct ip6t_icmp *icmpinfo = par->matchinfo;
2180 
2181 	/* Must specify no unknown invflags */
2182 	return (icmpinfo->invflags & ~IP6T_ICMP_INV) ? -EINVAL : 0;
2183 }
2184 
2185 /* The built-in targets: standard (NULL) and error. */
2186 static struct xt_target ip6t_builtin_tg[] __read_mostly = {
2187 	{
2188 		.name             = XT_STANDARD_TARGET,
2189 		.targetsize       = sizeof(int),
2190 		.family           = NFPROTO_IPV6,
2191 #ifdef CONFIG_COMPAT
2192 		.compatsize       = sizeof(compat_int_t),
2193 		.compat_from_user = compat_standard_from_user,
2194 		.compat_to_user   = compat_standard_to_user,
2195 #endif
2196 	},
2197 	{
2198 		.name             = XT_ERROR_TARGET,
2199 		.target           = ip6t_error,
2200 		.targetsize       = XT_FUNCTION_MAXNAMELEN,
2201 		.family           = NFPROTO_IPV6,
2202 	},
2203 };
2204 
2205 static struct nf_sockopt_ops ip6t_sockopts = {
2206 	.pf		= PF_INET6,
2207 	.set_optmin	= IP6T_BASE_CTL,
2208 	.set_optmax	= IP6T_SO_SET_MAX+1,
2209 	.set		= do_ip6t_set_ctl,
2210 #ifdef CONFIG_COMPAT
2211 	.compat_set	= compat_do_ip6t_set_ctl,
2212 #endif
2213 	.get_optmin	= IP6T_BASE_CTL,
2214 	.get_optmax	= IP6T_SO_GET_MAX+1,
2215 	.get		= do_ip6t_get_ctl,
2216 #ifdef CONFIG_COMPAT
2217 	.compat_get	= compat_do_ip6t_get_ctl,
2218 #endif
2219 	.owner		= THIS_MODULE,
2220 };
2221 
2222 static struct xt_match ip6t_builtin_mt[] __read_mostly = {
2223 	{
2224 		.name       = "icmp6",
2225 		.match      = icmp6_match,
2226 		.matchsize  = sizeof(struct ip6t_icmp),
2227 		.checkentry = icmp6_checkentry,
2228 		.proto      = IPPROTO_ICMPV6,
2229 		.family     = NFPROTO_IPV6,
2230 	},
2231 };
2232 
ip6_tables_net_init(struct net * net)2233 static int __net_init ip6_tables_net_init(struct net *net)
2234 {
2235 	return xt_proto_init(net, NFPROTO_IPV6);
2236 }
2237 
ip6_tables_net_exit(struct net * net)2238 static void __net_exit ip6_tables_net_exit(struct net *net)
2239 {
2240 	xt_proto_fini(net, NFPROTO_IPV6);
2241 }
2242 
2243 static struct pernet_operations ip6_tables_net_ops = {
2244 	.init = ip6_tables_net_init,
2245 	.exit = ip6_tables_net_exit,
2246 };
2247 
ip6_tables_init(void)2248 static int __init ip6_tables_init(void)
2249 {
2250 	int ret;
2251 
2252 	ret = register_pernet_subsys(&ip6_tables_net_ops);
2253 	if (ret < 0)
2254 		goto err1;
2255 
2256 	/* No one else will be downing sem now, so we won't sleep */
2257 	ret = xt_register_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2258 	if (ret < 0)
2259 		goto err2;
2260 	ret = xt_register_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2261 	if (ret < 0)
2262 		goto err4;
2263 
2264 	/* Register setsockopt */
2265 	ret = nf_register_sockopt(&ip6t_sockopts);
2266 	if (ret < 0)
2267 		goto err5;
2268 
2269 	pr_info("(C) 2000-2006 Netfilter Core Team\n");
2270 	return 0;
2271 
2272 err5:
2273 	xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2274 err4:
2275 	xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2276 err2:
2277 	unregister_pernet_subsys(&ip6_tables_net_ops);
2278 err1:
2279 	return ret;
2280 }
2281 
ip6_tables_fini(void)2282 static void __exit ip6_tables_fini(void)
2283 {
2284 	nf_unregister_sockopt(&ip6t_sockopts);
2285 
2286 	xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2287 	xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2288 	unregister_pernet_subsys(&ip6_tables_net_ops);
2289 }
2290 
2291 EXPORT_SYMBOL(ip6t_register_table);
2292 EXPORT_SYMBOL(ip6t_unregister_table);
2293 EXPORT_SYMBOL(ip6t_do_table);
2294 
2295 module_init(ip6_tables_init);
2296 module_exit(ip6_tables_fini);
2297