1/*
2 *  ebtables
3 *
4 *  Author:
5 *  Bart De Schuymer		<bdschuym@pandora.be>
6 *
7 *  ebtables.c,v 2.0, July, 2002
8 *
9 *  This code is strongly inspired by the iptables code which is
10 *  Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
11 *
12 *  This program is free software; you can redistribute it and/or
13 *  modify it under the terms of the GNU General Public License
14 *  as published by the Free Software Foundation; either version
15 *  2 of the License, or (at your option) any later version.
16 */
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18#include <linux/kmod.h>
19#include <linux/module.h>
20#include <linux/vmalloc.h>
21#include <linux/netfilter/x_tables.h>
22#include <linux/netfilter_bridge/ebtables.h>
23#include <linux/spinlock.h>
24#include <linux/mutex.h>
25#include <linux/slab.h>
26#include <asm/uaccess.h>
27#include <linux/smp.h>
28#include <linux/cpumask.h>
29#include <linux/audit.h>
30#include <net/sock.h>
31/* needed for logical [in,out]-dev filtering */
32#include "../br_private.h"
33
34#define BUGPRINT(format, args...) printk("kernel msg: ebtables bug: please "\
35					 "report to author: "format, ## args)
36/* #define BUGPRINT(format, args...) */
37
38/*
39 * Each cpu has its own set of counters, so there is no need for write_lock in
40 * the softirq
41 * For reading or updating the counters, the user context needs to
42 * get a write_lock
43 */
44
45/* The size of each set of counters is altered to get cache alignment */
46#define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
47#define COUNTER_OFFSET(n) (SMP_ALIGN(n * sizeof(struct ebt_counter)))
48#define COUNTER_BASE(c, n, cpu) ((struct ebt_counter *)(((char *)c) + \
49   COUNTER_OFFSET(n) * cpu))
50
51
52
53static DEFINE_MUTEX(ebt_mutex);
54
55#ifdef CONFIG_COMPAT
56static void ebt_standard_compat_from_user(void *dst, const void *src)
57{
58	int v = *(compat_int_t *)src;
59
60	if (v >= 0)
61		v += xt_compat_calc_jump(NFPROTO_BRIDGE, v);
62	memcpy(dst, &v, sizeof(v));
63}
64
65static int ebt_standard_compat_to_user(void __user *dst, const void *src)
66{
67	compat_int_t cv = *(int *)src;
68
69	if (cv >= 0)
70		cv -= xt_compat_calc_jump(NFPROTO_BRIDGE, cv);
71	return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
72}
73#endif
74
75
76static struct xt_target ebt_standard_target = {
77	.name       = "standard",
78	.revision   = 0,
79	.family     = NFPROTO_BRIDGE,
80	.targetsize = sizeof(int),
81#ifdef CONFIG_COMPAT
82	.compatsize = sizeof(compat_int_t),
83	.compat_from_user = ebt_standard_compat_from_user,
84	.compat_to_user =  ebt_standard_compat_to_user,
85#endif
86};
87
88static inline int
89ebt_do_watcher(const struct ebt_entry_watcher *w, struct sk_buff *skb,
90	       struct xt_action_param *par)
91{
92	par->target   = w->u.watcher;
93	par->targinfo = w->data;
94	w->u.watcher->target(skb, par);
95	/* watchers don't give a verdict */
96	return 0;
97}
98
99static inline int
100ebt_do_match(struct ebt_entry_match *m, const struct sk_buff *skb,
101	     struct xt_action_param *par)
102{
103	par->match     = m->u.match;
104	par->matchinfo = m->data;
105	return m->u.match->match(skb, par) ? EBT_MATCH : EBT_NOMATCH;
106}
107
108static inline int
109ebt_dev_check(const char *entry, const struct net_device *device)
110{
111	int i = 0;
112	const char *devname;
113
114	if (*entry == '\0')
115		return 0;
116	if (!device)
117		return 1;
118	devname = device->name;
119	/* 1 is the wildcard token */
120	while (entry[i] != '\0' && entry[i] != 1 && entry[i] == devname[i])
121		i++;
122	return devname[i] != entry[i] && entry[i] != 1;
123}
124
125#define FWINV2(bool, invflg) ((bool) ^ !!(e->invflags & invflg))
126/* process standard matches */
127static inline int
128ebt_basic_match(const struct ebt_entry *e, const struct sk_buff *skb,
129                const struct net_device *in, const struct net_device *out)
130{
131	const struct ethhdr *h = eth_hdr(skb);
132	const struct net_bridge_port *p;
133	__be16 ethproto;
134	int verdict, i;
135
136	if (skb_vlan_tag_present(skb))
137		ethproto = htons(ETH_P_8021Q);
138	else
139		ethproto = h->h_proto;
140
141	if (e->bitmask & EBT_802_3) {
142		if (FWINV2(eth_proto_is_802_3(ethproto), EBT_IPROTO))
143			return 1;
144	} else if (!(e->bitmask & EBT_NOPROTO) &&
145	   FWINV2(e->ethproto != ethproto, EBT_IPROTO))
146		return 1;
147
148	if (FWINV2(ebt_dev_check(e->in, in), EBT_IIN))
149		return 1;
150	if (FWINV2(ebt_dev_check(e->out, out), EBT_IOUT))
151		return 1;
152	/* rcu_read_lock()ed by nf_hook_slow */
153	if (in && (p = br_port_get_rcu(in)) != NULL &&
154	    FWINV2(ebt_dev_check(e->logical_in, p->br->dev), EBT_ILOGICALIN))
155		return 1;
156	if (out && (p = br_port_get_rcu(out)) != NULL &&
157	    FWINV2(ebt_dev_check(e->logical_out, p->br->dev), EBT_ILOGICALOUT))
158		return 1;
159
160	if (e->bitmask & EBT_SOURCEMAC) {
161		verdict = 0;
162		for (i = 0; i < 6; i++)
163			verdict |= (h->h_source[i] ^ e->sourcemac[i]) &
164			   e->sourcemsk[i];
165		if (FWINV2(verdict != 0, EBT_ISOURCE) )
166			return 1;
167	}
168	if (e->bitmask & EBT_DESTMAC) {
169		verdict = 0;
170		for (i = 0; i < 6; i++)
171			verdict |= (h->h_dest[i] ^ e->destmac[i]) &
172			   e->destmsk[i];
173		if (FWINV2(verdict != 0, EBT_IDEST) )
174			return 1;
175	}
176	return 0;
177}
178
179static inline
180struct ebt_entry *ebt_next_entry(const struct ebt_entry *entry)
181{
182	return (void *)entry + entry->next_offset;
183}
184
185/* Do some firewalling */
186unsigned int ebt_do_table(struct sk_buff *skb,
187			  const struct nf_hook_state *state,
188			  struct ebt_table *table)
189{
190	unsigned int hook = state->hook;
191	int i, nentries;
192	struct ebt_entry *point;
193	struct ebt_counter *counter_base, *cb_base;
194	const struct ebt_entry_target *t;
195	int verdict, sp = 0;
196	struct ebt_chainstack *cs;
197	struct ebt_entries *chaininfo;
198	const char *base;
199	const struct ebt_table_info *private;
200	struct xt_action_param acpar;
201
202	acpar.family  = NFPROTO_BRIDGE;
203	acpar.net     = state->net;
204	acpar.in      = state->in;
205	acpar.out     = state->out;
206	acpar.hotdrop = false;
207	acpar.hooknum = hook;
208
209	read_lock_bh(&table->lock);
210	private = table->private;
211	cb_base = COUNTER_BASE(private->counters, private->nentries,
212	   smp_processor_id());
213	if (private->chainstack)
214		cs = private->chainstack[smp_processor_id()];
215	else
216		cs = NULL;
217	chaininfo = private->hook_entry[hook];
218	nentries = private->hook_entry[hook]->nentries;
219	point = (struct ebt_entry *)(private->hook_entry[hook]->data);
220	counter_base = cb_base + private->hook_entry[hook]->counter_offset;
221	/* base for chain jumps */
222	base = private->entries;
223	i = 0;
224	while (i < nentries) {
225		if (ebt_basic_match(point, skb, state->in, state->out))
226			goto letscontinue;
227
228		if (EBT_MATCH_ITERATE(point, ebt_do_match, skb, &acpar) != 0)
229			goto letscontinue;
230		if (acpar.hotdrop) {
231			read_unlock_bh(&table->lock);
232			return NF_DROP;
233		}
234
235		/* increase counter */
236		(*(counter_base + i)).pcnt++;
237		(*(counter_base + i)).bcnt += skb->len;
238
239		/* these should only watch: not modify, nor tell us
240		   what to do with the packet */
241		EBT_WATCHER_ITERATE(point, ebt_do_watcher, skb, &acpar);
242
243		t = (struct ebt_entry_target *)
244		   (((char *)point) + point->target_offset);
245		/* standard target */
246		if (!t->u.target->target)
247			verdict = ((struct ebt_standard_target *)t)->verdict;
248		else {
249			acpar.target   = t->u.target;
250			acpar.targinfo = t->data;
251			verdict = t->u.target->target(skb, &acpar);
252		}
253		if (verdict == EBT_ACCEPT) {
254			read_unlock_bh(&table->lock);
255			return NF_ACCEPT;
256		}
257		if (verdict == EBT_DROP) {
258			read_unlock_bh(&table->lock);
259			return NF_DROP;
260		}
261		if (verdict == EBT_RETURN) {
262letsreturn:
263#ifdef CONFIG_NETFILTER_DEBUG
264			if (sp == 0) {
265				BUGPRINT("RETURN on base chain");
266				/* act like this is EBT_CONTINUE */
267				goto letscontinue;
268			}
269#endif
270			sp--;
271			/* put all the local variables right */
272			i = cs[sp].n;
273			chaininfo = cs[sp].chaininfo;
274			nentries = chaininfo->nentries;
275			point = cs[sp].e;
276			counter_base = cb_base +
277			   chaininfo->counter_offset;
278			continue;
279		}
280		if (verdict == EBT_CONTINUE)
281			goto letscontinue;
282#ifdef CONFIG_NETFILTER_DEBUG
283		if (verdict < 0) {
284			BUGPRINT("bogus standard verdict\n");
285			read_unlock_bh(&table->lock);
286			return NF_DROP;
287		}
288#endif
289		/* jump to a udc */
290		cs[sp].n = i + 1;
291		cs[sp].chaininfo = chaininfo;
292		cs[sp].e = ebt_next_entry(point);
293		i = 0;
294		chaininfo = (struct ebt_entries *) (base + verdict);
295#ifdef CONFIG_NETFILTER_DEBUG
296		if (chaininfo->distinguisher) {
297			BUGPRINT("jump to non-chain\n");
298			read_unlock_bh(&table->lock);
299			return NF_DROP;
300		}
301#endif
302		nentries = chaininfo->nentries;
303		point = (struct ebt_entry *)chaininfo->data;
304		counter_base = cb_base + chaininfo->counter_offset;
305		sp++;
306		continue;
307letscontinue:
308		point = ebt_next_entry(point);
309		i++;
310	}
311
312	/* I actually like this :) */
313	if (chaininfo->policy == EBT_RETURN)
314		goto letsreturn;
315	if (chaininfo->policy == EBT_ACCEPT) {
316		read_unlock_bh(&table->lock);
317		return NF_ACCEPT;
318	}
319	read_unlock_bh(&table->lock);
320	return NF_DROP;
321}
322
323/* If it succeeds, returns element and locks mutex */
324static inline void *
325find_inlist_lock_noload(struct list_head *head, const char *name, int *error,
326   struct mutex *mutex)
327{
328	struct {
329		struct list_head list;
330		char name[EBT_FUNCTION_MAXNAMELEN];
331	} *e;
332
333	mutex_lock(mutex);
334	list_for_each_entry(e, head, list) {
335		if (strcmp(e->name, name) == 0)
336			return e;
337	}
338	*error = -ENOENT;
339	mutex_unlock(mutex);
340	return NULL;
341}
342
343static void *
344find_inlist_lock(struct list_head *head, const char *name, const char *prefix,
345   int *error, struct mutex *mutex)
346{
347	return try_then_request_module(
348			find_inlist_lock_noload(head, name, error, mutex),
349			"%s%s", prefix, name);
350}
351
352static inline struct ebt_table *
353find_table_lock(struct net *net, const char *name, int *error,
354		struct mutex *mutex)
355{
356	return find_inlist_lock(&net->xt.tables[NFPROTO_BRIDGE], name,
357				"ebtable_", error, mutex);
358}
359
360static inline int
361ebt_check_match(struct ebt_entry_match *m, struct xt_mtchk_param *par,
362		unsigned int *cnt)
363{
364	const struct ebt_entry *e = par->entryinfo;
365	struct xt_match *match;
366	size_t left = ((char *)e + e->watchers_offset) - (char *)m;
367	int ret;
368
369	if (left < sizeof(struct ebt_entry_match) ||
370	    left - sizeof(struct ebt_entry_match) < m->match_size)
371		return -EINVAL;
372
373	match = xt_request_find_match(NFPROTO_BRIDGE, m->u.name, 0);
374	if (IS_ERR(match))
375		return PTR_ERR(match);
376	m->u.match = match;
377
378	par->match     = match;
379	par->matchinfo = m->data;
380	ret = xt_check_match(par, m->match_size,
381	      e->ethproto, e->invflags & EBT_IPROTO);
382	if (ret < 0) {
383		module_put(match->me);
384		return ret;
385	}
386
387	(*cnt)++;
388	return 0;
389}
390
391static inline int
392ebt_check_watcher(struct ebt_entry_watcher *w, struct xt_tgchk_param *par,
393		  unsigned int *cnt)
394{
395	const struct ebt_entry *e = par->entryinfo;
396	struct xt_target *watcher;
397	size_t left = ((char *)e + e->target_offset) - (char *)w;
398	int ret;
399
400	if (left < sizeof(struct ebt_entry_watcher) ||
401	   left - sizeof(struct ebt_entry_watcher) < w->watcher_size)
402		return -EINVAL;
403
404	watcher = xt_request_find_target(NFPROTO_BRIDGE, w->u.name, 0);
405	if (IS_ERR(watcher))
406		return PTR_ERR(watcher);
407	w->u.watcher = watcher;
408
409	par->target   = watcher;
410	par->targinfo = w->data;
411	ret = xt_check_target(par, w->watcher_size,
412	      e->ethproto, e->invflags & EBT_IPROTO);
413	if (ret < 0) {
414		module_put(watcher->me);
415		return ret;
416	}
417
418	(*cnt)++;
419	return 0;
420}
421
422static int ebt_verify_pointers(const struct ebt_replace *repl,
423			       struct ebt_table_info *newinfo)
424{
425	unsigned int limit = repl->entries_size;
426	unsigned int valid_hooks = repl->valid_hooks;
427	unsigned int offset = 0;
428	int i;
429
430	for (i = 0; i < NF_BR_NUMHOOKS; i++)
431		newinfo->hook_entry[i] = NULL;
432
433	newinfo->entries_size = repl->entries_size;
434	newinfo->nentries = repl->nentries;
435
436	while (offset < limit) {
437		size_t left = limit - offset;
438		struct ebt_entry *e = (void *)newinfo->entries + offset;
439
440		if (left < sizeof(unsigned int))
441			break;
442
443		for (i = 0; i < NF_BR_NUMHOOKS; i++) {
444			if ((valid_hooks & (1 << i)) == 0)
445				continue;
446			if ((char __user *)repl->hook_entry[i] ==
447			     repl->entries + offset)
448				break;
449		}
450
451		if (i != NF_BR_NUMHOOKS || !(e->bitmask & EBT_ENTRY_OR_ENTRIES)) {
452			if (e->bitmask != 0) {
453				/* we make userspace set this right,
454				   so there is no misunderstanding */
455				BUGPRINT("EBT_ENTRY_OR_ENTRIES shouldn't be set "
456					 "in distinguisher\n");
457				return -EINVAL;
458			}
459			if (i != NF_BR_NUMHOOKS)
460				newinfo->hook_entry[i] = (struct ebt_entries *)e;
461			if (left < sizeof(struct ebt_entries))
462				break;
463			offset += sizeof(struct ebt_entries);
464		} else {
465			if (left < sizeof(struct ebt_entry))
466				break;
467			if (left < e->next_offset)
468				break;
469			if (e->next_offset < sizeof(struct ebt_entry))
470				return -EINVAL;
471			offset += e->next_offset;
472		}
473	}
474	if (offset != limit) {
475		BUGPRINT("entries_size too small\n");
476		return -EINVAL;
477	}
478
479	/* check if all valid hooks have a chain */
480	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
481		if (!newinfo->hook_entry[i] &&
482		   (valid_hooks & (1 << i))) {
483			BUGPRINT("Valid hook without chain\n");
484			return -EINVAL;
485		}
486	}
487	return 0;
488}
489
490/*
491 * this one is very careful, as it is the first function
492 * to parse the userspace data
493 */
494static inline int
495ebt_check_entry_size_and_hooks(const struct ebt_entry *e,
496   const struct ebt_table_info *newinfo,
497   unsigned int *n, unsigned int *cnt,
498   unsigned int *totalcnt, unsigned int *udc_cnt)
499{
500	int i;
501
502	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
503		if ((void *)e == (void *)newinfo->hook_entry[i])
504			break;
505	}
506	/* beginning of a new chain
507	   if i == NF_BR_NUMHOOKS it must be a user defined chain */
508	if (i != NF_BR_NUMHOOKS || !e->bitmask) {
509		/* this checks if the previous chain has as many entries
510		   as it said it has */
511		if (*n != *cnt) {
512			BUGPRINT("nentries does not equal the nr of entries "
513				 "in the chain\n");
514			return -EINVAL;
515		}
516		if (((struct ebt_entries *)e)->policy != EBT_DROP &&
517		   ((struct ebt_entries *)e)->policy != EBT_ACCEPT) {
518			/* only RETURN from udc */
519			if (i != NF_BR_NUMHOOKS ||
520			   ((struct ebt_entries *)e)->policy != EBT_RETURN) {
521				BUGPRINT("bad policy\n");
522				return -EINVAL;
523			}
524		}
525		if (i == NF_BR_NUMHOOKS) /* it's a user defined chain */
526			(*udc_cnt)++;
527		if (((struct ebt_entries *)e)->counter_offset != *totalcnt) {
528			BUGPRINT("counter_offset != totalcnt");
529			return -EINVAL;
530		}
531		*n = ((struct ebt_entries *)e)->nentries;
532		*cnt = 0;
533		return 0;
534	}
535	/* a plain old entry, heh */
536	if (sizeof(struct ebt_entry) > e->watchers_offset ||
537	   e->watchers_offset > e->target_offset ||
538	   e->target_offset >= e->next_offset) {
539		BUGPRINT("entry offsets not in right order\n");
540		return -EINVAL;
541	}
542	/* this is not checked anywhere else */
543	if (e->next_offset - e->target_offset < sizeof(struct ebt_entry_target)) {
544		BUGPRINT("target size too small\n");
545		return -EINVAL;
546	}
547	(*cnt)++;
548	(*totalcnt)++;
549	return 0;
550}
551
552struct ebt_cl_stack
553{
554	struct ebt_chainstack cs;
555	int from;
556	unsigned int hookmask;
557};
558
559/*
560 * we need these positions to check that the jumps to a different part of the
561 * entries is a jump to the beginning of a new chain.
562 */
563static inline int
564ebt_get_udc_positions(struct ebt_entry *e, struct ebt_table_info *newinfo,
565   unsigned int *n, struct ebt_cl_stack *udc)
566{
567	int i;
568
569	/* we're only interested in chain starts */
570	if (e->bitmask)
571		return 0;
572	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
573		if (newinfo->hook_entry[i] == (struct ebt_entries *)e)
574			break;
575	}
576	/* only care about udc */
577	if (i != NF_BR_NUMHOOKS)
578		return 0;
579
580	udc[*n].cs.chaininfo = (struct ebt_entries *)e;
581	/* these initialisations are depended on later in check_chainloops() */
582	udc[*n].cs.n = 0;
583	udc[*n].hookmask = 0;
584
585	(*n)++;
586	return 0;
587}
588
589static inline int
590ebt_cleanup_match(struct ebt_entry_match *m, struct net *net, unsigned int *i)
591{
592	struct xt_mtdtor_param par;
593
594	if (i && (*i)-- == 0)
595		return 1;
596
597	par.net       = net;
598	par.match     = m->u.match;
599	par.matchinfo = m->data;
600	par.family    = NFPROTO_BRIDGE;
601	if (par.match->destroy != NULL)
602		par.match->destroy(&par);
603	module_put(par.match->me);
604	return 0;
605}
606
607static inline int
608ebt_cleanup_watcher(struct ebt_entry_watcher *w, struct net *net, unsigned int *i)
609{
610	struct xt_tgdtor_param par;
611
612	if (i && (*i)-- == 0)
613		return 1;
614
615	par.net      = net;
616	par.target   = w->u.watcher;
617	par.targinfo = w->data;
618	par.family   = NFPROTO_BRIDGE;
619	if (par.target->destroy != NULL)
620		par.target->destroy(&par);
621	module_put(par.target->me);
622	return 0;
623}
624
625static inline int
626ebt_cleanup_entry(struct ebt_entry *e, struct net *net, unsigned int *cnt)
627{
628	struct xt_tgdtor_param par;
629	struct ebt_entry_target *t;
630
631	if (e->bitmask == 0)
632		return 0;
633	/* we're done */
634	if (cnt && (*cnt)-- == 0)
635		return 1;
636	EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, NULL);
637	EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, NULL);
638	t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
639
640	par.net      = net;
641	par.target   = t->u.target;
642	par.targinfo = t->data;
643	par.family   = NFPROTO_BRIDGE;
644	if (par.target->destroy != NULL)
645		par.target->destroy(&par);
646	module_put(par.target->me);
647	return 0;
648}
649
650static inline int
651ebt_check_entry(struct ebt_entry *e, struct net *net,
652   const struct ebt_table_info *newinfo,
653   const char *name, unsigned int *cnt,
654   struct ebt_cl_stack *cl_s, unsigned int udc_cnt)
655{
656	struct ebt_entry_target *t;
657	struct xt_target *target;
658	unsigned int i, j, hook = 0, hookmask = 0;
659	size_t gap;
660	int ret;
661	struct xt_mtchk_param mtpar;
662	struct xt_tgchk_param tgpar;
663
664	/* don't mess with the struct ebt_entries */
665	if (e->bitmask == 0)
666		return 0;
667
668	if (e->bitmask & ~EBT_F_MASK) {
669		BUGPRINT("Unknown flag for bitmask\n");
670		return -EINVAL;
671	}
672	if (e->invflags & ~EBT_INV_MASK) {
673		BUGPRINT("Unknown flag for inv bitmask\n");
674		return -EINVAL;
675	}
676	if ( (e->bitmask & EBT_NOPROTO) && (e->bitmask & EBT_802_3) ) {
677		BUGPRINT("NOPROTO & 802_3 not allowed\n");
678		return -EINVAL;
679	}
680	/* what hook do we belong to? */
681	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
682		if (!newinfo->hook_entry[i])
683			continue;
684		if ((char *)newinfo->hook_entry[i] < (char *)e)
685			hook = i;
686		else
687			break;
688	}
689	/* (1 << NF_BR_NUMHOOKS) tells the check functions the rule is on
690	   a base chain */
691	if (i < NF_BR_NUMHOOKS)
692		hookmask = (1 << hook) | (1 << NF_BR_NUMHOOKS);
693	else {
694		for (i = 0; i < udc_cnt; i++)
695			if ((char *)(cl_s[i].cs.chaininfo) > (char *)e)
696				break;
697		if (i == 0)
698			hookmask = (1 << hook) | (1 << NF_BR_NUMHOOKS);
699		else
700			hookmask = cl_s[i - 1].hookmask;
701	}
702	i = 0;
703
704	mtpar.net	= tgpar.net       = net;
705	mtpar.table     = tgpar.table     = name;
706	mtpar.entryinfo = tgpar.entryinfo = e;
707	mtpar.hook_mask = tgpar.hook_mask = hookmask;
708	mtpar.family    = tgpar.family    = NFPROTO_BRIDGE;
709	ret = EBT_MATCH_ITERATE(e, ebt_check_match, &mtpar, &i);
710	if (ret != 0)
711		goto cleanup_matches;
712	j = 0;
713	ret = EBT_WATCHER_ITERATE(e, ebt_check_watcher, &tgpar, &j);
714	if (ret != 0)
715		goto cleanup_watchers;
716	t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
717	gap = e->next_offset - e->target_offset;
718
719	target = xt_request_find_target(NFPROTO_BRIDGE, t->u.name, 0);
720	if (IS_ERR(target)) {
721		ret = PTR_ERR(target);
722		goto cleanup_watchers;
723	}
724
725	t->u.target = target;
726	if (t->u.target == &ebt_standard_target) {
727		if (gap < sizeof(struct ebt_standard_target)) {
728			BUGPRINT("Standard target size too big\n");
729			ret = -EFAULT;
730			goto cleanup_watchers;
731		}
732		if (((struct ebt_standard_target *)t)->verdict <
733		   -NUM_STANDARD_TARGETS) {
734			BUGPRINT("Invalid standard target\n");
735			ret = -EFAULT;
736			goto cleanup_watchers;
737		}
738	} else if (t->target_size > gap - sizeof(struct ebt_entry_target)) {
739		module_put(t->u.target->me);
740		ret = -EFAULT;
741		goto cleanup_watchers;
742	}
743
744	tgpar.target   = target;
745	tgpar.targinfo = t->data;
746	ret = xt_check_target(&tgpar, t->target_size,
747	      e->ethproto, e->invflags & EBT_IPROTO);
748	if (ret < 0) {
749		module_put(target->me);
750		goto cleanup_watchers;
751	}
752	(*cnt)++;
753	return 0;
754cleanup_watchers:
755	EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, &j);
756cleanup_matches:
757	EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, &i);
758	return ret;
759}
760
761/*
762 * checks for loops and sets the hook mask for udc
763 * the hook mask for udc tells us from which base chains the udc can be
764 * accessed. This mask is a parameter to the check() functions of the extensions
765 */
766static int check_chainloops(const struct ebt_entries *chain, struct ebt_cl_stack *cl_s,
767   unsigned int udc_cnt, unsigned int hooknr, char *base)
768{
769	int i, chain_nr = -1, pos = 0, nentries = chain->nentries, verdict;
770	const struct ebt_entry *e = (struct ebt_entry *)chain->data;
771	const struct ebt_entry_target *t;
772
773	while (pos < nentries || chain_nr != -1) {
774		/* end of udc, go back one 'recursion' step */
775		if (pos == nentries) {
776			/* put back values of the time when this chain was called */
777			e = cl_s[chain_nr].cs.e;
778			if (cl_s[chain_nr].from != -1)
779				nentries =
780				cl_s[cl_s[chain_nr].from].cs.chaininfo->nentries;
781			else
782				nentries = chain->nentries;
783			pos = cl_s[chain_nr].cs.n;
784			/* make sure we won't see a loop that isn't one */
785			cl_s[chain_nr].cs.n = 0;
786			chain_nr = cl_s[chain_nr].from;
787			if (pos == nentries)
788				continue;
789		}
790		t = (struct ebt_entry_target *)
791		   (((char *)e) + e->target_offset);
792		if (strcmp(t->u.name, EBT_STANDARD_TARGET))
793			goto letscontinue;
794		if (e->target_offset + sizeof(struct ebt_standard_target) >
795		   e->next_offset) {
796			BUGPRINT("Standard target size too big\n");
797			return -1;
798		}
799		verdict = ((struct ebt_standard_target *)t)->verdict;
800		if (verdict >= 0) { /* jump to another chain */
801			struct ebt_entries *hlp2 =
802			   (struct ebt_entries *)(base + verdict);
803			for (i = 0; i < udc_cnt; i++)
804				if (hlp2 == cl_s[i].cs.chaininfo)
805					break;
806			/* bad destination or loop */
807			if (i == udc_cnt) {
808				BUGPRINT("bad destination\n");
809				return -1;
810			}
811			if (cl_s[i].cs.n) {
812				BUGPRINT("loop\n");
813				return -1;
814			}
815			if (cl_s[i].hookmask & (1 << hooknr))
816				goto letscontinue;
817			/* this can't be 0, so the loop test is correct */
818			cl_s[i].cs.n = pos + 1;
819			pos = 0;
820			cl_s[i].cs.e = ebt_next_entry(e);
821			e = (struct ebt_entry *)(hlp2->data);
822			nentries = hlp2->nentries;
823			cl_s[i].from = chain_nr;
824			chain_nr = i;
825			/* this udc is accessible from the base chain for hooknr */
826			cl_s[i].hookmask |= (1 << hooknr);
827			continue;
828		}
829letscontinue:
830		e = ebt_next_entry(e);
831		pos++;
832	}
833	return 0;
834}
835
836/* do the parsing of the table/chains/entries/matches/watchers/targets, heh */
837static int translate_table(struct net *net, const char *name,
838			   struct ebt_table_info *newinfo)
839{
840	unsigned int i, j, k, udc_cnt;
841	int ret;
842	struct ebt_cl_stack *cl_s = NULL; /* used in the checking for chain loops */
843
844	i = 0;
845	while (i < NF_BR_NUMHOOKS && !newinfo->hook_entry[i])
846		i++;
847	if (i == NF_BR_NUMHOOKS) {
848		BUGPRINT("No valid hooks specified\n");
849		return -EINVAL;
850	}
851	if (newinfo->hook_entry[i] != (struct ebt_entries *)newinfo->entries) {
852		BUGPRINT("Chains don't start at beginning\n");
853		return -EINVAL;
854	}
855	/* make sure chains are ordered after each other in same order
856	   as their corresponding hooks */
857	for (j = i + 1; j < NF_BR_NUMHOOKS; j++) {
858		if (!newinfo->hook_entry[j])
859			continue;
860		if (newinfo->hook_entry[j] <= newinfo->hook_entry[i]) {
861			BUGPRINT("Hook order must be followed\n");
862			return -EINVAL;
863		}
864		i = j;
865	}
866
867	/* do some early checkings and initialize some things */
868	i = 0; /* holds the expected nr. of entries for the chain */
869	j = 0; /* holds the up to now counted entries for the chain */
870	k = 0; /* holds the total nr. of entries, should equal
871		  newinfo->nentries afterwards */
872	udc_cnt = 0; /* will hold the nr. of user defined chains (udc) */
873	ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
874	   ebt_check_entry_size_and_hooks, newinfo,
875	   &i, &j, &k, &udc_cnt);
876
877	if (ret != 0)
878		return ret;
879
880	if (i != j) {
881		BUGPRINT("nentries does not equal the nr of entries in the "
882			 "(last) chain\n");
883		return -EINVAL;
884	}
885	if (k != newinfo->nentries) {
886		BUGPRINT("Total nentries is wrong\n");
887		return -EINVAL;
888	}
889
890	/* get the location of the udc, put them in an array
891	   while we're at it, allocate the chainstack */
892	if (udc_cnt) {
893		/* this will get free'd in do_replace()/ebt_register_table()
894		   if an error occurs */
895		newinfo->chainstack =
896			vmalloc(nr_cpu_ids * sizeof(*(newinfo->chainstack)));
897		if (!newinfo->chainstack)
898			return -ENOMEM;
899		for_each_possible_cpu(i) {
900			newinfo->chainstack[i] =
901			  vmalloc(udc_cnt * sizeof(*(newinfo->chainstack[0])));
902			if (!newinfo->chainstack[i]) {
903				while (i)
904					vfree(newinfo->chainstack[--i]);
905				vfree(newinfo->chainstack);
906				newinfo->chainstack = NULL;
907				return -ENOMEM;
908			}
909		}
910
911		cl_s = vmalloc(udc_cnt * sizeof(*cl_s));
912		if (!cl_s)
913			return -ENOMEM;
914		i = 0; /* the i'th udc */
915		EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
916		   ebt_get_udc_positions, newinfo, &i, cl_s);
917		/* sanity check */
918		if (i != udc_cnt) {
919			BUGPRINT("i != udc_cnt\n");
920			vfree(cl_s);
921			return -EFAULT;
922		}
923	}
924
925	/* Check for loops */
926	for (i = 0; i < NF_BR_NUMHOOKS; i++)
927		if (newinfo->hook_entry[i])
928			if (check_chainloops(newinfo->hook_entry[i],
929			   cl_s, udc_cnt, i, newinfo->entries)) {
930				vfree(cl_s);
931				return -EINVAL;
932			}
933
934	/* we now know the following (along with E=mc��):
935	   - the nr of entries in each chain is right
936	   - the size of the allocated space is right
937	   - all valid hooks have a corresponding chain
938	   - there are no loops
939	   - wrong data can still be on the level of a single entry
940	   - could be there are jumps to places that are not the
941	     beginning of a chain. This can only occur in chains that
942	     are not accessible from any base chains, so we don't care. */
943
944	/* used to know what we need to clean up if something goes wrong */
945	i = 0;
946	ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
947	   ebt_check_entry, net, newinfo, name, &i, cl_s, udc_cnt);
948	if (ret != 0) {
949		EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
950				  ebt_cleanup_entry, net, &i);
951	}
952	vfree(cl_s);
953	return ret;
954}
955
956/* called under write_lock */
957static void get_counters(const struct ebt_counter *oldcounters,
958   struct ebt_counter *counters, unsigned int nentries)
959{
960	int i, cpu;
961	struct ebt_counter *counter_base;
962
963	/* counters of cpu 0 */
964	memcpy(counters, oldcounters,
965	       sizeof(struct ebt_counter) * nentries);
966
967	/* add other counters to those of cpu 0 */
968	for_each_possible_cpu(cpu) {
969		if (cpu == 0)
970			continue;
971		counter_base = COUNTER_BASE(oldcounters, nentries, cpu);
972		for (i = 0; i < nentries; i++) {
973			counters[i].pcnt += counter_base[i].pcnt;
974			counters[i].bcnt += counter_base[i].bcnt;
975		}
976	}
977}
978
979static int do_replace_finish(struct net *net, struct ebt_replace *repl,
980			      struct ebt_table_info *newinfo)
981{
982	int ret, i;
983	struct ebt_counter *counterstmp = NULL;
984	/* used to be able to unlock earlier */
985	struct ebt_table_info *table;
986	struct ebt_table *t;
987
988	/* the user wants counters back
989	   the check on the size is done later, when we have the lock */
990	if (repl->num_counters) {
991		unsigned long size = repl->num_counters * sizeof(*counterstmp);
992		counterstmp = vmalloc(size);
993		if (!counterstmp)
994			return -ENOMEM;
995	}
996
997	newinfo->chainstack = NULL;
998	ret = ebt_verify_pointers(repl, newinfo);
999	if (ret != 0)
1000		goto free_counterstmp;
1001
1002	ret = translate_table(net, repl->name, newinfo);
1003
1004	if (ret != 0)
1005		goto free_counterstmp;
1006
1007	t = find_table_lock(net, repl->name, &ret, &ebt_mutex);
1008	if (!t) {
1009		ret = -ENOENT;
1010		goto free_iterate;
1011	}
1012
1013	/* the table doesn't like it */
1014	if (t->check && (ret = t->check(newinfo, repl->valid_hooks)))
1015		goto free_unlock;
1016
1017	if (repl->num_counters && repl->num_counters != t->private->nentries) {
1018		BUGPRINT("Wrong nr. of counters requested\n");
1019		ret = -EINVAL;
1020		goto free_unlock;
1021	}
1022
1023	/* we have the mutex lock, so no danger in reading this pointer */
1024	table = t->private;
1025	/* make sure the table can only be rmmod'ed if it contains no rules */
1026	if (!table->nentries && newinfo->nentries && !try_module_get(t->me)) {
1027		ret = -ENOENT;
1028		goto free_unlock;
1029	} else if (table->nentries && !newinfo->nentries)
1030		module_put(t->me);
1031	/* we need an atomic snapshot of the counters */
1032	write_lock_bh(&t->lock);
1033	if (repl->num_counters)
1034		get_counters(t->private->counters, counterstmp,
1035		   t->private->nentries);
1036
1037	t->private = newinfo;
1038	write_unlock_bh(&t->lock);
1039	mutex_unlock(&ebt_mutex);
1040	/* so, a user can change the chains while having messed up her counter
1041	   allocation. Only reason why this is done is because this way the lock
1042	   is held only once, while this doesn't bring the kernel into a
1043	   dangerous state. */
1044	if (repl->num_counters &&
1045	   copy_to_user(repl->counters, counterstmp,
1046	   repl->num_counters * sizeof(struct ebt_counter))) {
1047		/* Silent error, can't fail, new table is already in place */
1048		net_warn_ratelimited("ebtables: counters copy to user failed while replacing table\n");
1049	}
1050
1051	/* decrease module count and free resources */
1052	EBT_ENTRY_ITERATE(table->entries, table->entries_size,
1053			  ebt_cleanup_entry, net, NULL);
1054
1055	vfree(table->entries);
1056	if (table->chainstack) {
1057		for_each_possible_cpu(i)
1058			vfree(table->chainstack[i]);
1059		vfree(table->chainstack);
1060	}
1061	vfree(table);
1062
1063	vfree(counterstmp);
1064
1065#ifdef CONFIG_AUDIT
1066	if (audit_enabled) {
1067		struct audit_buffer *ab;
1068
1069		ab = audit_log_start(current->audit_context, GFP_KERNEL,
1070				     AUDIT_NETFILTER_CFG);
1071		if (ab) {
1072			audit_log_format(ab, "table=%s family=%u entries=%u",
1073					 repl->name, AF_BRIDGE, repl->nentries);
1074			audit_log_end(ab);
1075		}
1076	}
1077#endif
1078	return ret;
1079
1080free_unlock:
1081	mutex_unlock(&ebt_mutex);
1082free_iterate:
1083	EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
1084			  ebt_cleanup_entry, net, NULL);
1085free_counterstmp:
1086	vfree(counterstmp);
1087	/* can be initialized in translate_table() */
1088	if (newinfo->chainstack) {
1089		for_each_possible_cpu(i)
1090			vfree(newinfo->chainstack[i]);
1091		vfree(newinfo->chainstack);
1092	}
1093	return ret;
1094}
1095
1096/* replace the table */
1097static int do_replace(struct net *net, const void __user *user,
1098		      unsigned int len)
1099{
1100	int ret, countersize;
1101	struct ebt_table_info *newinfo;
1102	struct ebt_replace tmp;
1103
1104	if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1105		return -EFAULT;
1106
1107	if (len != sizeof(tmp) + tmp.entries_size) {
1108		BUGPRINT("Wrong len argument\n");
1109		return -EINVAL;
1110	}
1111
1112	if (tmp.entries_size == 0) {
1113		BUGPRINT("Entries_size never zero\n");
1114		return -EINVAL;
1115	}
1116	/* overflow check */
1117	if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) /
1118			NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
1119		return -ENOMEM;
1120	if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
1121		return -ENOMEM;
1122
1123	tmp.name[sizeof(tmp.name) - 1] = 0;
1124
1125	countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
1126	newinfo = vmalloc(sizeof(*newinfo) + countersize);
1127	if (!newinfo)
1128		return -ENOMEM;
1129
1130	if (countersize)
1131		memset(newinfo->counters, 0, countersize);
1132
1133	newinfo->entries = vmalloc(tmp.entries_size);
1134	if (!newinfo->entries) {
1135		ret = -ENOMEM;
1136		goto free_newinfo;
1137	}
1138	if (copy_from_user(
1139	   newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
1140		BUGPRINT("Couldn't copy entries from userspace\n");
1141		ret = -EFAULT;
1142		goto free_entries;
1143	}
1144
1145	ret = do_replace_finish(net, &tmp, newinfo);
1146	if (ret == 0)
1147		return ret;
1148free_entries:
1149	vfree(newinfo->entries);
1150free_newinfo:
1151	vfree(newinfo);
1152	return ret;
1153}
1154
1155struct ebt_table *
1156ebt_register_table(struct net *net, const struct ebt_table *input_table)
1157{
1158	struct ebt_table_info *newinfo;
1159	struct ebt_table *t, *table;
1160	struct ebt_replace_kernel *repl;
1161	int ret, i, countersize;
1162	void *p;
1163
1164	if (input_table == NULL || (repl = input_table->table) == NULL ||
1165	    repl->entries == NULL || repl->entries_size == 0 ||
1166	    repl->counters != NULL || input_table->private != NULL) {
1167		BUGPRINT("Bad table data for ebt_register_table!!!\n");
1168		return ERR_PTR(-EINVAL);
1169	}
1170
1171	/* Don't add one table to multiple lists. */
1172	table = kmemdup(input_table, sizeof(struct ebt_table), GFP_KERNEL);
1173	if (!table) {
1174		ret = -ENOMEM;
1175		goto out;
1176	}
1177
1178	countersize = COUNTER_OFFSET(repl->nentries) * nr_cpu_ids;
1179	newinfo = vmalloc(sizeof(*newinfo) + countersize);
1180	ret = -ENOMEM;
1181	if (!newinfo)
1182		goto free_table;
1183
1184	p = vmalloc(repl->entries_size);
1185	if (!p)
1186		goto free_newinfo;
1187
1188	memcpy(p, repl->entries, repl->entries_size);
1189	newinfo->entries = p;
1190
1191	newinfo->entries_size = repl->entries_size;
1192	newinfo->nentries = repl->nentries;
1193
1194	if (countersize)
1195		memset(newinfo->counters, 0, countersize);
1196
1197	/* fill in newinfo and parse the entries */
1198	newinfo->chainstack = NULL;
1199	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
1200		if ((repl->valid_hooks & (1 << i)) == 0)
1201			newinfo->hook_entry[i] = NULL;
1202		else
1203			newinfo->hook_entry[i] = p +
1204				((char *)repl->hook_entry[i] - repl->entries);
1205	}
1206	ret = translate_table(net, repl->name, newinfo);
1207	if (ret != 0) {
1208		BUGPRINT("Translate_table failed\n");
1209		goto free_chainstack;
1210	}
1211
1212	if (table->check && table->check(newinfo, table->valid_hooks)) {
1213		BUGPRINT("The table doesn't like its own initial data, lol\n");
1214		ret = -EINVAL;
1215		goto free_chainstack;
1216	}
1217
1218	table->private = newinfo;
1219	rwlock_init(&table->lock);
1220	mutex_lock(&ebt_mutex);
1221	list_for_each_entry(t, &net->xt.tables[NFPROTO_BRIDGE], list) {
1222		if (strcmp(t->name, table->name) == 0) {
1223			ret = -EEXIST;
1224			BUGPRINT("Table name already exists\n");
1225			goto free_unlock;
1226		}
1227	}
1228
1229	/* Hold a reference count if the chains aren't empty */
1230	if (newinfo->nentries && !try_module_get(table->me)) {
1231		ret = -ENOENT;
1232		goto free_unlock;
1233	}
1234	list_add(&table->list, &net->xt.tables[NFPROTO_BRIDGE]);
1235	mutex_unlock(&ebt_mutex);
1236	return table;
1237free_unlock:
1238	mutex_unlock(&ebt_mutex);
1239free_chainstack:
1240	if (newinfo->chainstack) {
1241		for_each_possible_cpu(i)
1242			vfree(newinfo->chainstack[i]);
1243		vfree(newinfo->chainstack);
1244	}
1245	vfree(newinfo->entries);
1246free_newinfo:
1247	vfree(newinfo);
1248free_table:
1249	kfree(table);
1250out:
1251	return ERR_PTR(ret);
1252}
1253
1254void ebt_unregister_table(struct net *net, struct ebt_table *table)
1255{
1256	int i;
1257
1258	if (!table) {
1259		BUGPRINT("Request to unregister NULL table!!!\n");
1260		return;
1261	}
1262	mutex_lock(&ebt_mutex);
1263	list_del(&table->list);
1264	mutex_unlock(&ebt_mutex);
1265	EBT_ENTRY_ITERATE(table->private->entries, table->private->entries_size,
1266			  ebt_cleanup_entry, net, NULL);
1267	if (table->private->nentries)
1268		module_put(table->me);
1269	vfree(table->private->entries);
1270	if (table->private->chainstack) {
1271		for_each_possible_cpu(i)
1272			vfree(table->private->chainstack[i]);
1273		vfree(table->private->chainstack);
1274	}
1275	vfree(table->private);
1276	kfree(table);
1277}
1278
1279/* userspace just supplied us with counters */
1280static int do_update_counters(struct net *net, const char *name,
1281				struct ebt_counter __user *counters,
1282				unsigned int num_counters,
1283				const void __user *user, unsigned int len)
1284{
1285	int i, ret;
1286	struct ebt_counter *tmp;
1287	struct ebt_table *t;
1288
1289	if (num_counters == 0)
1290		return -EINVAL;
1291
1292	tmp = vmalloc(num_counters * sizeof(*tmp));
1293	if (!tmp)
1294		return -ENOMEM;
1295
1296	t = find_table_lock(net, name, &ret, &ebt_mutex);
1297	if (!t)
1298		goto free_tmp;
1299
1300	if (num_counters != t->private->nentries) {
1301		BUGPRINT("Wrong nr of counters\n");
1302		ret = -EINVAL;
1303		goto unlock_mutex;
1304	}
1305
1306	if (copy_from_user(tmp, counters, num_counters * sizeof(*counters))) {
1307		ret = -EFAULT;
1308		goto unlock_mutex;
1309	}
1310
1311	/* we want an atomic add of the counters */
1312	write_lock_bh(&t->lock);
1313
1314	/* we add to the counters of the first cpu */
1315	for (i = 0; i < num_counters; i++) {
1316		t->private->counters[i].pcnt += tmp[i].pcnt;
1317		t->private->counters[i].bcnt += tmp[i].bcnt;
1318	}
1319
1320	write_unlock_bh(&t->lock);
1321	ret = 0;
1322unlock_mutex:
1323	mutex_unlock(&ebt_mutex);
1324free_tmp:
1325	vfree(tmp);
1326	return ret;
1327}
1328
1329static int update_counters(struct net *net, const void __user *user,
1330			    unsigned int len)
1331{
1332	struct ebt_replace hlp;
1333
1334	if (copy_from_user(&hlp, user, sizeof(hlp)))
1335		return -EFAULT;
1336
1337	if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
1338		return -EINVAL;
1339
1340	return do_update_counters(net, hlp.name, hlp.counters,
1341				hlp.num_counters, user, len);
1342}
1343
1344static inline int ebt_make_matchname(const struct ebt_entry_match *m,
1345    const char *base, char __user *ubase)
1346{
1347	char __user *hlp = ubase + ((char *)m - base);
1348	char name[EBT_FUNCTION_MAXNAMELEN] = {};
1349
1350	/* ebtables expects 32 bytes long names but xt_match names are 29 bytes
1351	   long. Copy 29 bytes and fill remaining bytes with zeroes. */
1352	strlcpy(name, m->u.match->name, sizeof(name));
1353	if (copy_to_user(hlp, name, EBT_FUNCTION_MAXNAMELEN))
1354		return -EFAULT;
1355	return 0;
1356}
1357
1358static inline int ebt_make_watchername(const struct ebt_entry_watcher *w,
1359    const char *base, char __user *ubase)
1360{
1361	char __user *hlp = ubase + ((char *)w - base);
1362	char name[EBT_FUNCTION_MAXNAMELEN] = {};
1363
1364	strlcpy(name, w->u.watcher->name, sizeof(name));
1365	if (copy_to_user(hlp , name, EBT_FUNCTION_MAXNAMELEN))
1366		return -EFAULT;
1367	return 0;
1368}
1369
1370static inline int
1371ebt_make_names(struct ebt_entry *e, const char *base, char __user *ubase)
1372{
1373	int ret;
1374	char __user *hlp;
1375	const struct ebt_entry_target *t;
1376	char name[EBT_FUNCTION_MAXNAMELEN] = {};
1377
1378	if (e->bitmask == 0)
1379		return 0;
1380
1381	hlp = ubase + (((char *)e + e->target_offset) - base);
1382	t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
1383
1384	ret = EBT_MATCH_ITERATE(e, ebt_make_matchname, base, ubase);
1385	if (ret != 0)
1386		return ret;
1387	ret = EBT_WATCHER_ITERATE(e, ebt_make_watchername, base, ubase);
1388	if (ret != 0)
1389		return ret;
1390	strlcpy(name, t->u.target->name, sizeof(name));
1391	if (copy_to_user(hlp, name, EBT_FUNCTION_MAXNAMELEN))
1392		return -EFAULT;
1393	return 0;
1394}
1395
1396static int copy_counters_to_user(struct ebt_table *t,
1397				  const struct ebt_counter *oldcounters,
1398				  void __user *user, unsigned int num_counters,
1399				  unsigned int nentries)
1400{
1401	struct ebt_counter *counterstmp;
1402	int ret = 0;
1403
1404	/* userspace might not need the counters */
1405	if (num_counters == 0)
1406		return 0;
1407
1408	if (num_counters != nentries) {
1409		BUGPRINT("Num_counters wrong\n");
1410		return -EINVAL;
1411	}
1412
1413	counterstmp = vmalloc(nentries * sizeof(*counterstmp));
1414	if (!counterstmp)
1415		return -ENOMEM;
1416
1417	write_lock_bh(&t->lock);
1418	get_counters(oldcounters, counterstmp, nentries);
1419	write_unlock_bh(&t->lock);
1420
1421	if (copy_to_user(user, counterstmp,
1422	   nentries * sizeof(struct ebt_counter)))
1423		ret = -EFAULT;
1424	vfree(counterstmp);
1425	return ret;
1426}
1427
1428/* called with ebt_mutex locked */
1429static int copy_everything_to_user(struct ebt_table *t, void __user *user,
1430    const int *len, int cmd)
1431{
1432	struct ebt_replace tmp;
1433	const struct ebt_counter *oldcounters;
1434	unsigned int entries_size, nentries;
1435	int ret;
1436	char *entries;
1437
1438	if (cmd == EBT_SO_GET_ENTRIES) {
1439		entries_size = t->private->entries_size;
1440		nentries = t->private->nentries;
1441		entries = t->private->entries;
1442		oldcounters = t->private->counters;
1443	} else {
1444		entries_size = t->table->entries_size;
1445		nentries = t->table->nentries;
1446		entries = t->table->entries;
1447		oldcounters = t->table->counters;
1448	}
1449
1450	if (copy_from_user(&tmp, user, sizeof(tmp)))
1451		return -EFAULT;
1452
1453	if (*len != sizeof(struct ebt_replace) + entries_size +
1454	   (tmp.num_counters ? nentries * sizeof(struct ebt_counter) : 0))
1455		return -EINVAL;
1456
1457	if (tmp.nentries != nentries) {
1458		BUGPRINT("Nentries wrong\n");
1459		return -EINVAL;
1460	}
1461
1462	if (tmp.entries_size != entries_size) {
1463		BUGPRINT("Wrong size\n");
1464		return -EINVAL;
1465	}
1466
1467	ret = copy_counters_to_user(t, oldcounters, tmp.counters,
1468					tmp.num_counters, nentries);
1469	if (ret)
1470		return ret;
1471
1472	if (copy_to_user(tmp.entries, entries, entries_size)) {
1473		BUGPRINT("Couldn't copy entries to userspace\n");
1474		return -EFAULT;
1475	}
1476	/* set the match/watcher/target names right */
1477	return EBT_ENTRY_ITERATE(entries, entries_size,
1478	   ebt_make_names, entries, tmp.entries);
1479}
1480
1481static int do_ebt_set_ctl(struct sock *sk,
1482	int cmd, void __user *user, unsigned int len)
1483{
1484	int ret;
1485	struct net *net = sock_net(sk);
1486
1487	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1488		return -EPERM;
1489
1490	switch (cmd) {
1491	case EBT_SO_SET_ENTRIES:
1492		ret = do_replace(net, user, len);
1493		break;
1494	case EBT_SO_SET_COUNTERS:
1495		ret = update_counters(net, user, len);
1496		break;
1497	default:
1498		ret = -EINVAL;
1499	}
1500	return ret;
1501}
1502
1503static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1504{
1505	int ret;
1506	struct ebt_replace tmp;
1507	struct ebt_table *t;
1508	struct net *net = sock_net(sk);
1509
1510	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1511		return -EPERM;
1512
1513	if (copy_from_user(&tmp, user, sizeof(tmp)))
1514		return -EFAULT;
1515
1516	t = find_table_lock(net, tmp.name, &ret, &ebt_mutex);
1517	if (!t)
1518		return ret;
1519
1520	switch (cmd) {
1521	case EBT_SO_GET_INFO:
1522	case EBT_SO_GET_INIT_INFO:
1523		if (*len != sizeof(struct ebt_replace)) {
1524			ret = -EINVAL;
1525			mutex_unlock(&ebt_mutex);
1526			break;
1527		}
1528		if (cmd == EBT_SO_GET_INFO) {
1529			tmp.nentries = t->private->nentries;
1530			tmp.entries_size = t->private->entries_size;
1531			tmp.valid_hooks = t->valid_hooks;
1532		} else {
1533			tmp.nentries = t->table->nentries;
1534			tmp.entries_size = t->table->entries_size;
1535			tmp.valid_hooks = t->table->valid_hooks;
1536		}
1537		mutex_unlock(&ebt_mutex);
1538		if (copy_to_user(user, &tmp, *len) != 0) {
1539			BUGPRINT("c2u Didn't work\n");
1540			ret = -EFAULT;
1541			break;
1542		}
1543		ret = 0;
1544		break;
1545
1546	case EBT_SO_GET_ENTRIES:
1547	case EBT_SO_GET_INIT_ENTRIES:
1548		ret = copy_everything_to_user(t, user, len, cmd);
1549		mutex_unlock(&ebt_mutex);
1550		break;
1551
1552	default:
1553		mutex_unlock(&ebt_mutex);
1554		ret = -EINVAL;
1555	}
1556
1557	return ret;
1558}
1559
1560#ifdef CONFIG_COMPAT
1561/* 32 bit-userspace compatibility definitions. */
1562struct compat_ebt_replace {
1563	char name[EBT_TABLE_MAXNAMELEN];
1564	compat_uint_t valid_hooks;
1565	compat_uint_t nentries;
1566	compat_uint_t entries_size;
1567	/* start of the chains */
1568	compat_uptr_t hook_entry[NF_BR_NUMHOOKS];
1569	/* nr of counters userspace expects back */
1570	compat_uint_t num_counters;
1571	/* where the kernel will put the old counters. */
1572	compat_uptr_t counters;
1573	compat_uptr_t entries;
1574};
1575
1576/* struct ebt_entry_match, _target and _watcher have same layout */
1577struct compat_ebt_entry_mwt {
1578	union {
1579		char name[EBT_FUNCTION_MAXNAMELEN];
1580		compat_uptr_t ptr;
1581	} u;
1582	compat_uint_t match_size;
1583	compat_uint_t data[0];
1584};
1585
1586/* account for possible padding between match_size and ->data */
1587static int ebt_compat_entry_padsize(void)
1588{
1589	BUILD_BUG_ON(XT_ALIGN(sizeof(struct ebt_entry_match)) <
1590			COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt)));
1591	return (int) XT_ALIGN(sizeof(struct ebt_entry_match)) -
1592			COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt));
1593}
1594
1595static int ebt_compat_match_offset(const struct xt_match *match,
1596				   unsigned int userlen)
1597{
1598	/*
1599	 * ebt_among needs special handling. The kernel .matchsize is
1600	 * set to -1 at registration time; at runtime an EBT_ALIGN()ed
1601	 * value is expected.
1602	 * Example: userspace sends 4500, ebt_among.c wants 4504.
1603	 */
1604	if (unlikely(match->matchsize == -1))
1605		return XT_ALIGN(userlen) - COMPAT_XT_ALIGN(userlen);
1606	return xt_compat_match_offset(match);
1607}
1608
1609static int compat_match_to_user(struct ebt_entry_match *m, void __user **dstptr,
1610				unsigned int *size)
1611{
1612	const struct xt_match *match = m->u.match;
1613	struct compat_ebt_entry_mwt __user *cm = *dstptr;
1614	int off = ebt_compat_match_offset(match, m->match_size);
1615	compat_uint_t msize = m->match_size - off;
1616
1617	BUG_ON(off >= m->match_size);
1618
1619	if (copy_to_user(cm->u.name, match->name,
1620	    strlen(match->name) + 1) || put_user(msize, &cm->match_size))
1621		return -EFAULT;
1622
1623	if (match->compat_to_user) {
1624		if (match->compat_to_user(cm->data, m->data))
1625			return -EFAULT;
1626	} else if (copy_to_user(cm->data, m->data, msize))
1627			return -EFAULT;
1628
1629	*size -= ebt_compat_entry_padsize() + off;
1630	*dstptr = cm->data;
1631	*dstptr += msize;
1632	return 0;
1633}
1634
1635static int compat_target_to_user(struct ebt_entry_target *t,
1636				 void __user **dstptr,
1637				 unsigned int *size)
1638{
1639	const struct xt_target *target = t->u.target;
1640	struct compat_ebt_entry_mwt __user *cm = *dstptr;
1641	int off = xt_compat_target_offset(target);
1642	compat_uint_t tsize = t->target_size - off;
1643
1644	BUG_ON(off >= t->target_size);
1645
1646	if (copy_to_user(cm->u.name, target->name,
1647	    strlen(target->name) + 1) || put_user(tsize, &cm->match_size))
1648		return -EFAULT;
1649
1650	if (target->compat_to_user) {
1651		if (target->compat_to_user(cm->data, t->data))
1652			return -EFAULT;
1653	} else if (copy_to_user(cm->data, t->data, tsize))
1654		return -EFAULT;
1655
1656	*size -= ebt_compat_entry_padsize() + off;
1657	*dstptr = cm->data;
1658	*dstptr += tsize;
1659	return 0;
1660}
1661
1662static int compat_watcher_to_user(struct ebt_entry_watcher *w,
1663				  void __user **dstptr,
1664				  unsigned int *size)
1665{
1666	return compat_target_to_user((struct ebt_entry_target *)w,
1667							dstptr, size);
1668}
1669
1670static int compat_copy_entry_to_user(struct ebt_entry *e, void __user **dstptr,
1671				unsigned int *size)
1672{
1673	struct ebt_entry_target *t;
1674	struct ebt_entry __user *ce;
1675	u32 watchers_offset, target_offset, next_offset;
1676	compat_uint_t origsize;
1677	int ret;
1678
1679	if (e->bitmask == 0) {
1680		if (*size < sizeof(struct ebt_entries))
1681			return -EINVAL;
1682		if (copy_to_user(*dstptr, e, sizeof(struct ebt_entries)))
1683			return -EFAULT;
1684
1685		*dstptr += sizeof(struct ebt_entries);
1686		*size -= sizeof(struct ebt_entries);
1687		return 0;
1688	}
1689
1690	if (*size < sizeof(*ce))
1691		return -EINVAL;
1692
1693	ce = (struct ebt_entry __user *)*dstptr;
1694	if (copy_to_user(ce, e, sizeof(*ce)))
1695		return -EFAULT;
1696
1697	origsize = *size;
1698	*dstptr += sizeof(*ce);
1699
1700	ret = EBT_MATCH_ITERATE(e, compat_match_to_user, dstptr, size);
1701	if (ret)
1702		return ret;
1703	watchers_offset = e->watchers_offset - (origsize - *size);
1704
1705	ret = EBT_WATCHER_ITERATE(e, compat_watcher_to_user, dstptr, size);
1706	if (ret)
1707		return ret;
1708	target_offset = e->target_offset - (origsize - *size);
1709
1710	t = (struct ebt_entry_target *) ((char *) e + e->target_offset);
1711
1712	ret = compat_target_to_user(t, dstptr, size);
1713	if (ret)
1714		return ret;
1715	next_offset = e->next_offset - (origsize - *size);
1716
1717	if (put_user(watchers_offset, &ce->watchers_offset) ||
1718	    put_user(target_offset, &ce->target_offset) ||
1719	    put_user(next_offset, &ce->next_offset))
1720		return -EFAULT;
1721
1722	*size -= sizeof(*ce);
1723	return 0;
1724}
1725
1726static int compat_calc_match(struct ebt_entry_match *m, int *off)
1727{
1728	*off += ebt_compat_match_offset(m->u.match, m->match_size);
1729	*off += ebt_compat_entry_padsize();
1730	return 0;
1731}
1732
1733static int compat_calc_watcher(struct ebt_entry_watcher *w, int *off)
1734{
1735	*off += xt_compat_target_offset(w->u.watcher);
1736	*off += ebt_compat_entry_padsize();
1737	return 0;
1738}
1739
1740static int compat_calc_entry(const struct ebt_entry *e,
1741			     const struct ebt_table_info *info,
1742			     const void *base,
1743			     struct compat_ebt_replace *newinfo)
1744{
1745	const struct ebt_entry_target *t;
1746	unsigned int entry_offset;
1747	int off, ret, i;
1748
1749	if (e->bitmask == 0)
1750		return 0;
1751
1752	off = 0;
1753	entry_offset = (void *)e - base;
1754
1755	EBT_MATCH_ITERATE(e, compat_calc_match, &off);
1756	EBT_WATCHER_ITERATE(e, compat_calc_watcher, &off);
1757
1758	t = (const struct ebt_entry_target *) ((char *) e + e->target_offset);
1759
1760	off += xt_compat_target_offset(t->u.target);
1761	off += ebt_compat_entry_padsize();
1762
1763	newinfo->entries_size -= off;
1764
1765	ret = xt_compat_add_offset(NFPROTO_BRIDGE, entry_offset, off);
1766	if (ret)
1767		return ret;
1768
1769	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
1770		const void *hookptr = info->hook_entry[i];
1771		if (info->hook_entry[i] &&
1772		    (e < (struct ebt_entry *)(base - hookptr))) {
1773			newinfo->hook_entry[i] -= off;
1774			pr_debug("0x%08X -> 0x%08X\n",
1775					newinfo->hook_entry[i] + off,
1776					newinfo->hook_entry[i]);
1777		}
1778	}
1779
1780	return 0;
1781}
1782
1783
1784static int compat_table_info(const struct ebt_table_info *info,
1785			     struct compat_ebt_replace *newinfo)
1786{
1787	unsigned int size = info->entries_size;
1788	const void *entries = info->entries;
1789
1790	newinfo->entries_size = size;
1791
1792	xt_compat_init_offsets(NFPROTO_BRIDGE, info->nentries);
1793	return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info,
1794							entries, newinfo);
1795}
1796
1797static int compat_copy_everything_to_user(struct ebt_table *t,
1798					  void __user *user, int *len, int cmd)
1799{
1800	struct compat_ebt_replace repl, tmp;
1801	struct ebt_counter *oldcounters;
1802	struct ebt_table_info tinfo;
1803	int ret;
1804	void __user *pos;
1805
1806	memset(&tinfo, 0, sizeof(tinfo));
1807
1808	if (cmd == EBT_SO_GET_ENTRIES) {
1809		tinfo.entries_size = t->private->entries_size;
1810		tinfo.nentries = t->private->nentries;
1811		tinfo.entries = t->private->entries;
1812		oldcounters = t->private->counters;
1813	} else {
1814		tinfo.entries_size = t->table->entries_size;
1815		tinfo.nentries = t->table->nentries;
1816		tinfo.entries = t->table->entries;
1817		oldcounters = t->table->counters;
1818	}
1819
1820	if (copy_from_user(&tmp, user, sizeof(tmp)))
1821		return -EFAULT;
1822
1823	if (tmp.nentries != tinfo.nentries ||
1824	   (tmp.num_counters && tmp.num_counters != tinfo.nentries))
1825		return -EINVAL;
1826
1827	memcpy(&repl, &tmp, sizeof(repl));
1828	if (cmd == EBT_SO_GET_ENTRIES)
1829		ret = compat_table_info(t->private, &repl);
1830	else
1831		ret = compat_table_info(&tinfo, &repl);
1832	if (ret)
1833		return ret;
1834
1835	if (*len != sizeof(tmp) + repl.entries_size +
1836	   (tmp.num_counters? tinfo.nentries * sizeof(struct ebt_counter): 0)) {
1837		pr_err("wrong size: *len %d, entries_size %u, replsz %d\n",
1838				*len, tinfo.entries_size, repl.entries_size);
1839		return -EINVAL;
1840	}
1841
1842	/* userspace might not need the counters */
1843	ret = copy_counters_to_user(t, oldcounters, compat_ptr(tmp.counters),
1844					tmp.num_counters, tinfo.nentries);
1845	if (ret)
1846		return ret;
1847
1848	pos = compat_ptr(tmp.entries);
1849	return EBT_ENTRY_ITERATE(tinfo.entries, tinfo.entries_size,
1850			compat_copy_entry_to_user, &pos, &tmp.entries_size);
1851}
1852
1853struct ebt_entries_buf_state {
1854	char *buf_kern_start;	/* kernel buffer to copy (translated) data to */
1855	u32 buf_kern_len;	/* total size of kernel buffer */
1856	u32 buf_kern_offset;	/* amount of data copied so far */
1857	u32 buf_user_offset;	/* read position in userspace buffer */
1858};
1859
1860static int ebt_buf_count(struct ebt_entries_buf_state *state, unsigned int sz)
1861{
1862	state->buf_kern_offset += sz;
1863	return state->buf_kern_offset >= sz ? 0 : -EINVAL;
1864}
1865
1866static int ebt_buf_add(struct ebt_entries_buf_state *state,
1867		       void *data, unsigned int sz)
1868{
1869	if (state->buf_kern_start == NULL)
1870		goto count_only;
1871
1872	BUG_ON(state->buf_kern_offset + sz > state->buf_kern_len);
1873
1874	memcpy(state->buf_kern_start + state->buf_kern_offset, data, sz);
1875
1876 count_only:
1877	state->buf_user_offset += sz;
1878	return ebt_buf_count(state, sz);
1879}
1880
1881static int ebt_buf_add_pad(struct ebt_entries_buf_state *state, unsigned int sz)
1882{
1883	char *b = state->buf_kern_start;
1884
1885	BUG_ON(b && state->buf_kern_offset > state->buf_kern_len);
1886
1887	if (b != NULL && sz > 0)
1888		memset(b + state->buf_kern_offset, 0, sz);
1889	/* do not adjust ->buf_user_offset here, we added kernel-side padding */
1890	return ebt_buf_count(state, sz);
1891}
1892
1893enum compat_mwt {
1894	EBT_COMPAT_MATCH,
1895	EBT_COMPAT_WATCHER,
1896	EBT_COMPAT_TARGET,
1897};
1898
1899static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
1900				enum compat_mwt compat_mwt,
1901				struct ebt_entries_buf_state *state,
1902				const unsigned char *base)
1903{
1904	char name[EBT_FUNCTION_MAXNAMELEN];
1905	struct xt_match *match;
1906	struct xt_target *wt;
1907	void *dst = NULL;
1908	int off, pad = 0;
1909	unsigned int size_kern, match_size = mwt->match_size;
1910
1911	strlcpy(name, mwt->u.name, sizeof(name));
1912
1913	if (state->buf_kern_start)
1914		dst = state->buf_kern_start + state->buf_kern_offset;
1915
1916	switch (compat_mwt) {
1917	case EBT_COMPAT_MATCH:
1918		match = xt_request_find_match(NFPROTO_BRIDGE, name, 0);
1919		if (IS_ERR(match))
1920			return PTR_ERR(match);
1921
1922		off = ebt_compat_match_offset(match, match_size);
1923		if (dst) {
1924			if (match->compat_from_user)
1925				match->compat_from_user(dst, mwt->data);
1926			else
1927				memcpy(dst, mwt->data, match_size);
1928		}
1929
1930		size_kern = match->matchsize;
1931		if (unlikely(size_kern == -1))
1932			size_kern = match_size;
1933		module_put(match->me);
1934		break;
1935	case EBT_COMPAT_WATCHER: /* fallthrough */
1936	case EBT_COMPAT_TARGET:
1937		wt = xt_request_find_target(NFPROTO_BRIDGE, name, 0);
1938		if (IS_ERR(wt))
1939			return PTR_ERR(wt);
1940		off = xt_compat_target_offset(wt);
1941
1942		if (dst) {
1943			if (wt->compat_from_user)
1944				wt->compat_from_user(dst, mwt->data);
1945			else
1946				memcpy(dst, mwt->data, match_size);
1947		}
1948
1949		size_kern = wt->targetsize;
1950		module_put(wt->me);
1951		break;
1952
1953	default:
1954		return -EINVAL;
1955	}
1956
1957	state->buf_kern_offset += match_size + off;
1958	state->buf_user_offset += match_size;
1959	pad = XT_ALIGN(size_kern) - size_kern;
1960
1961	if (pad > 0 && dst) {
1962		BUG_ON(state->buf_kern_len <= pad);
1963		BUG_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad);
1964		memset(dst + size_kern, 0, pad);
1965	}
1966	return off + match_size;
1967}
1968
1969/*
1970 * return size of all matches, watchers or target, including necessary
1971 * alignment and padding.
1972 */
1973static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
1974			unsigned int size_left, enum compat_mwt type,
1975			struct ebt_entries_buf_state *state, const void *base)
1976{
1977	int growth = 0;
1978	char *buf;
1979
1980	if (size_left == 0)
1981		return 0;
1982
1983	buf = (char *) match32;
1984
1985	while (size_left >= sizeof(*match32)) {
1986		struct ebt_entry_match *match_kern;
1987		int ret;
1988
1989		match_kern = (struct ebt_entry_match *) state->buf_kern_start;
1990		if (match_kern) {
1991			char *tmp;
1992			tmp = state->buf_kern_start + state->buf_kern_offset;
1993			match_kern = (struct ebt_entry_match *) tmp;
1994		}
1995		ret = ebt_buf_add(state, buf, sizeof(*match32));
1996		if (ret < 0)
1997			return ret;
1998		size_left -= sizeof(*match32);
1999
2000		/* add padding before match->data (if any) */
2001		ret = ebt_buf_add_pad(state, ebt_compat_entry_padsize());
2002		if (ret < 0)
2003			return ret;
2004
2005		if (match32->match_size > size_left)
2006			return -EINVAL;
2007
2008		size_left -= match32->match_size;
2009
2010		ret = compat_mtw_from_user(match32, type, state, base);
2011		if (ret < 0)
2012			return ret;
2013
2014		BUG_ON(ret < match32->match_size);
2015		growth += ret - match32->match_size;
2016		growth += ebt_compat_entry_padsize();
2017
2018		buf += sizeof(*match32);
2019		buf += match32->match_size;
2020
2021		if (match_kern)
2022			match_kern->match_size = ret;
2023
2024		WARN_ON(type == EBT_COMPAT_TARGET && size_left);
2025		match32 = (struct compat_ebt_entry_mwt *) buf;
2026	}
2027
2028	return growth;
2029}
2030
2031/* called for all ebt_entry structures. */
2032static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
2033			  unsigned int *total,
2034			  struct ebt_entries_buf_state *state)
2035{
2036	unsigned int i, j, startoff, new_offset = 0;
2037	/* stores match/watchers/targets & offset of next struct ebt_entry: */
2038	unsigned int offsets[4];
2039	unsigned int *offsets_update = NULL;
2040	int ret;
2041	char *buf_start;
2042
2043	if (*total < sizeof(struct ebt_entries))
2044		return -EINVAL;
2045
2046	if (!entry->bitmask) {
2047		*total -= sizeof(struct ebt_entries);
2048		return ebt_buf_add(state, entry, sizeof(struct ebt_entries));
2049	}
2050	if (*total < sizeof(*entry) || entry->next_offset < sizeof(*entry))
2051		return -EINVAL;
2052
2053	startoff = state->buf_user_offset;
2054	/* pull in most part of ebt_entry, it does not need to be changed. */
2055	ret = ebt_buf_add(state, entry,
2056			offsetof(struct ebt_entry, watchers_offset));
2057	if (ret < 0)
2058		return ret;
2059
2060	offsets[0] = sizeof(struct ebt_entry); /* matches come first */
2061	memcpy(&offsets[1], &entry->watchers_offset,
2062			sizeof(offsets) - sizeof(offsets[0]));
2063
2064	if (state->buf_kern_start) {
2065		buf_start = state->buf_kern_start + state->buf_kern_offset;
2066		offsets_update = (unsigned int *) buf_start;
2067	}
2068	ret = ebt_buf_add(state, &offsets[1],
2069			sizeof(offsets) - sizeof(offsets[0]));
2070	if (ret < 0)
2071		return ret;
2072	buf_start = (char *) entry;
2073	/*
2074	 * 0: matches offset, always follows ebt_entry.
2075	 * 1: watchers offset, from ebt_entry structure
2076	 * 2: target offset, from ebt_entry structure
2077	 * 3: next ebt_entry offset, from ebt_entry structure
2078	 *
2079	 * offsets are relative to beginning of struct ebt_entry (i.e., 0).
2080	 */
2081	for (i = 0, j = 1 ; j < 4 ; j++, i++) {
2082		struct compat_ebt_entry_mwt *match32;
2083		unsigned int size;
2084		char *buf = buf_start;
2085
2086		buf = buf_start + offsets[i];
2087		if (offsets[i] > offsets[j])
2088			return -EINVAL;
2089
2090		match32 = (struct compat_ebt_entry_mwt *) buf;
2091		size = offsets[j] - offsets[i];
2092		ret = ebt_size_mwt(match32, size, i, state, base);
2093		if (ret < 0)
2094			return ret;
2095		new_offset += ret;
2096		if (offsets_update && new_offset) {
2097			pr_debug("change offset %d to %d\n",
2098				offsets_update[i], offsets[j] + new_offset);
2099			offsets_update[i] = offsets[j] + new_offset;
2100		}
2101	}
2102
2103	if (state->buf_kern_start == NULL) {
2104		unsigned int offset = buf_start - (char *) base;
2105
2106		ret = xt_compat_add_offset(NFPROTO_BRIDGE, offset, new_offset);
2107		if (ret < 0)
2108			return ret;
2109	}
2110
2111	startoff = state->buf_user_offset - startoff;
2112
2113	BUG_ON(*total < startoff);
2114	*total -= startoff;
2115	return 0;
2116}
2117
2118/*
2119 * repl->entries_size is the size of the ebt_entry blob in userspace.
2120 * It might need more memory when copied to a 64 bit kernel in case
2121 * userspace is 32-bit. So, first task: find out how much memory is needed.
2122 *
2123 * Called before validation is performed.
2124 */
2125static int compat_copy_entries(unsigned char *data, unsigned int size_user,
2126				struct ebt_entries_buf_state *state)
2127{
2128	unsigned int size_remaining = size_user;
2129	int ret;
2130
2131	ret = EBT_ENTRY_ITERATE(data, size_user, size_entry_mwt, data,
2132					&size_remaining, state);
2133	if (ret < 0)
2134		return ret;
2135
2136	WARN_ON(size_remaining);
2137	return state->buf_kern_offset;
2138}
2139
2140
2141static int compat_copy_ebt_replace_from_user(struct ebt_replace *repl,
2142					    void __user *user, unsigned int len)
2143{
2144	struct compat_ebt_replace tmp;
2145	int i;
2146
2147	if (len < sizeof(tmp))
2148		return -EINVAL;
2149
2150	if (copy_from_user(&tmp, user, sizeof(tmp)))
2151		return -EFAULT;
2152
2153	if (len != sizeof(tmp) + tmp.entries_size)
2154		return -EINVAL;
2155
2156	if (tmp.entries_size == 0)
2157		return -EINVAL;
2158
2159	if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) /
2160			NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
2161		return -ENOMEM;
2162	if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
2163		return -ENOMEM;
2164
2165	memcpy(repl, &tmp, offsetof(struct ebt_replace, hook_entry));
2166
2167	/* starting with hook_entry, 32 vs. 64 bit structures are different */
2168	for (i = 0; i < NF_BR_NUMHOOKS; i++)
2169		repl->hook_entry[i] = compat_ptr(tmp.hook_entry[i]);
2170
2171	repl->num_counters = tmp.num_counters;
2172	repl->counters = compat_ptr(tmp.counters);
2173	repl->entries = compat_ptr(tmp.entries);
2174	return 0;
2175}
2176
2177static int compat_do_replace(struct net *net, void __user *user,
2178			     unsigned int len)
2179{
2180	int ret, i, countersize, size64;
2181	struct ebt_table_info *newinfo;
2182	struct ebt_replace tmp;
2183	struct ebt_entries_buf_state state;
2184	void *entries_tmp;
2185
2186	ret = compat_copy_ebt_replace_from_user(&tmp, user, len);
2187	if (ret) {
2188		/* try real handler in case userland supplied needed padding */
2189		if (ret == -EINVAL && do_replace(net, user, len) == 0)
2190			ret = 0;
2191		return ret;
2192	}
2193
2194	countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
2195	newinfo = vmalloc(sizeof(*newinfo) + countersize);
2196	if (!newinfo)
2197		return -ENOMEM;
2198
2199	if (countersize)
2200		memset(newinfo->counters, 0, countersize);
2201
2202	memset(&state, 0, sizeof(state));
2203
2204	newinfo->entries = vmalloc(tmp.entries_size);
2205	if (!newinfo->entries) {
2206		ret = -ENOMEM;
2207		goto free_newinfo;
2208	}
2209	if (copy_from_user(
2210	   newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
2211		ret = -EFAULT;
2212		goto free_entries;
2213	}
2214
2215	entries_tmp = newinfo->entries;
2216
2217	xt_compat_lock(NFPROTO_BRIDGE);
2218
2219	xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries);
2220	ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2221	if (ret < 0)
2222		goto out_unlock;
2223
2224	pr_debug("tmp.entries_size %d, kern off %d, user off %d delta %d\n",
2225		tmp.entries_size, state.buf_kern_offset, state.buf_user_offset,
2226		xt_compat_calc_jump(NFPROTO_BRIDGE, tmp.entries_size));
2227
2228	size64 = ret;
2229	newinfo->entries = vmalloc(size64);
2230	if (!newinfo->entries) {
2231		vfree(entries_tmp);
2232		ret = -ENOMEM;
2233		goto out_unlock;
2234	}
2235
2236	memset(&state, 0, sizeof(state));
2237	state.buf_kern_start = newinfo->entries;
2238	state.buf_kern_len = size64;
2239
2240	ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2241	BUG_ON(ret < 0);	/* parses same data again */
2242
2243	vfree(entries_tmp);
2244	tmp.entries_size = size64;
2245
2246	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
2247		char __user *usrptr;
2248		if (tmp.hook_entry[i]) {
2249			unsigned int delta;
2250			usrptr = (char __user *) tmp.hook_entry[i];
2251			delta = usrptr - tmp.entries;
2252			usrptr += xt_compat_calc_jump(NFPROTO_BRIDGE, delta);
2253			tmp.hook_entry[i] = (struct ebt_entries __user *)usrptr;
2254		}
2255	}
2256
2257	xt_compat_flush_offsets(NFPROTO_BRIDGE);
2258	xt_compat_unlock(NFPROTO_BRIDGE);
2259
2260	ret = do_replace_finish(net, &tmp, newinfo);
2261	if (ret == 0)
2262		return ret;
2263free_entries:
2264	vfree(newinfo->entries);
2265free_newinfo:
2266	vfree(newinfo);
2267	return ret;
2268out_unlock:
2269	xt_compat_flush_offsets(NFPROTO_BRIDGE);
2270	xt_compat_unlock(NFPROTO_BRIDGE);
2271	goto free_entries;
2272}
2273
2274static int compat_update_counters(struct net *net, void __user *user,
2275				  unsigned int len)
2276{
2277	struct compat_ebt_replace hlp;
2278
2279	if (copy_from_user(&hlp, user, sizeof(hlp)))
2280		return -EFAULT;
2281
2282	/* try real handler in case userland supplied needed padding */
2283	if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
2284		return update_counters(net, user, len);
2285
2286	return do_update_counters(net, hlp.name, compat_ptr(hlp.counters),
2287					hlp.num_counters, user, len);
2288}
2289
2290static int compat_do_ebt_set_ctl(struct sock *sk,
2291		int cmd, void __user *user, unsigned int len)
2292{
2293	int ret;
2294	struct net *net = sock_net(sk);
2295
2296	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2297		return -EPERM;
2298
2299	switch (cmd) {
2300	case EBT_SO_SET_ENTRIES:
2301		ret = compat_do_replace(net, user, len);
2302		break;
2303	case EBT_SO_SET_COUNTERS:
2304		ret = compat_update_counters(net, user, len);
2305		break;
2306	default:
2307		ret = -EINVAL;
2308  }
2309	return ret;
2310}
2311
2312static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
2313		void __user *user, int *len)
2314{
2315	int ret;
2316	struct compat_ebt_replace tmp;
2317	struct ebt_table *t;
2318	struct net *net = sock_net(sk);
2319
2320	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2321		return -EPERM;
2322
2323	/* try real handler in case userland supplied needed padding */
2324	if ((cmd == EBT_SO_GET_INFO ||
2325	     cmd == EBT_SO_GET_INIT_INFO) && *len != sizeof(tmp))
2326			return do_ebt_get_ctl(sk, cmd, user, len);
2327
2328	if (copy_from_user(&tmp, user, sizeof(tmp)))
2329		return -EFAULT;
2330
2331	t = find_table_lock(net, tmp.name, &ret, &ebt_mutex);
2332	if (!t)
2333		return ret;
2334
2335	xt_compat_lock(NFPROTO_BRIDGE);
2336	switch (cmd) {
2337	case EBT_SO_GET_INFO:
2338		tmp.nentries = t->private->nentries;
2339		ret = compat_table_info(t->private, &tmp);
2340		if (ret)
2341			goto out;
2342		tmp.valid_hooks = t->valid_hooks;
2343
2344		if (copy_to_user(user, &tmp, *len) != 0) {
2345			ret = -EFAULT;
2346			break;
2347		}
2348		ret = 0;
2349		break;
2350	case EBT_SO_GET_INIT_INFO:
2351		tmp.nentries = t->table->nentries;
2352		tmp.entries_size = t->table->entries_size;
2353		tmp.valid_hooks = t->table->valid_hooks;
2354
2355		if (copy_to_user(user, &tmp, *len) != 0) {
2356			ret = -EFAULT;
2357			break;
2358		}
2359		ret = 0;
2360		break;
2361	case EBT_SO_GET_ENTRIES:
2362	case EBT_SO_GET_INIT_ENTRIES:
2363		/*
2364		 * try real handler first in case of userland-side padding.
2365		 * in case we are dealing with an 'ordinary' 32 bit binary
2366		 * without 64bit compatibility padding, this will fail right
2367		 * after copy_from_user when the *len argument is validated.
2368		 *
2369		 * the compat_ variant needs to do one pass over the kernel
2370		 * data set to adjust for size differences before it the check.
2371		 */
2372		if (copy_everything_to_user(t, user, len, cmd) == 0)
2373			ret = 0;
2374		else
2375			ret = compat_copy_everything_to_user(t, user, len, cmd);
2376		break;
2377	default:
2378		ret = -EINVAL;
2379	}
2380 out:
2381	xt_compat_flush_offsets(NFPROTO_BRIDGE);
2382	xt_compat_unlock(NFPROTO_BRIDGE);
2383	mutex_unlock(&ebt_mutex);
2384	return ret;
2385}
2386#endif
2387
2388static struct nf_sockopt_ops ebt_sockopts = {
2389	.pf		= PF_INET,
2390	.set_optmin	= EBT_BASE_CTL,
2391	.set_optmax	= EBT_SO_SET_MAX + 1,
2392	.set		= do_ebt_set_ctl,
2393#ifdef CONFIG_COMPAT
2394	.compat_set	= compat_do_ebt_set_ctl,
2395#endif
2396	.get_optmin	= EBT_BASE_CTL,
2397	.get_optmax	= EBT_SO_GET_MAX + 1,
2398	.get		= do_ebt_get_ctl,
2399#ifdef CONFIG_COMPAT
2400	.compat_get	= compat_do_ebt_get_ctl,
2401#endif
2402	.owner		= THIS_MODULE,
2403};
2404
2405static int __init ebtables_init(void)
2406{
2407	int ret;
2408
2409	ret = xt_register_target(&ebt_standard_target);
2410	if (ret < 0)
2411		return ret;
2412	ret = nf_register_sockopt(&ebt_sockopts);
2413	if (ret < 0) {
2414		xt_unregister_target(&ebt_standard_target);
2415		return ret;
2416	}
2417
2418	printk(KERN_INFO "Ebtables v2.0 registered\n");
2419	return 0;
2420}
2421
2422static void __exit ebtables_fini(void)
2423{
2424	nf_unregister_sockopt(&ebt_sockopts);
2425	xt_unregister_target(&ebt_standard_target);
2426	printk(KERN_INFO "Ebtables v2.0 unregistered\n");
2427}
2428
2429EXPORT_SYMBOL(ebt_register_table);
2430EXPORT_SYMBOL(ebt_unregister_table);
2431EXPORT_SYMBOL(ebt_do_table);
2432module_init(ebtables_init);
2433module_exit(ebtables_fini);
2434MODULE_LICENSE("GPL");
2435