5 * Bart De Schuymer <bdschuym@pandora.be>
7 * ebtables.c,v 2.0, July, 2002
9 * This code is stongly inspired on the iptables code which is
10 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 #include <linux/kmod.h>
19 #include <linux/module.h>
20 #include <linux/vmalloc.h>
21 #include <linux/netfilter/x_tables.h>
22 #include <linux/netfilter_bridge/ebtables.h>
23 #include <linux/spinlock.h>
24 #include <linux/mutex.h>
25 #include <linux/slab.h>
26 #include <asm/uaccess.h>
27 #include <linux/smp.h>
28 #include <linux/cpumask.h>
30 /* needed for logical [in,out]-dev filtering */
31 #include "../br_private.h"
33 #define BUGPRINT(format, args...) printk("kernel msg: ebtables bug: please "\
34 "report to author: "format, ## args)
35 /* #define BUGPRINT(format, args...) */
38 * Each cpu has its own set of counters, so there is no need for write_lock in
40 * For reading or updating the counters, the user context needs to
44 /* The size of each set of counters is altered to get cache alignment */
45 #define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
46 #define COUNTER_OFFSET(n) (SMP_ALIGN(n * sizeof(struct ebt_counter)))
47 #define COUNTER_BASE(c, n, cpu) ((struct ebt_counter *)(((char *)c) + \
48 COUNTER_OFFSET(n) * cpu))
52 static DEFINE_MUTEX(ebt_mutex);
55 static void ebt_standard_compat_from_user(void *dst, const void *src)
57 int v = *(compat_int_t *)src;
60 v += xt_compat_calc_jump(NFPROTO_BRIDGE, v);
61 memcpy(dst, &v, sizeof(v));
64 static int ebt_standard_compat_to_user(void __user *dst, const void *src)
66 compat_int_t cv = *(int *)src;
69 cv -= xt_compat_calc_jump(NFPROTO_BRIDGE, cv);
70 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
75 static struct xt_target ebt_standard_target = {
78 .family = NFPROTO_BRIDGE,
79 .targetsize = sizeof(int),
81 .compatsize = sizeof(compat_int_t),
82 .compat_from_user = ebt_standard_compat_from_user,
83 .compat_to_user = ebt_standard_compat_to_user,
88 ebt_do_watcher(const struct ebt_entry_watcher *w, struct sk_buff *skb,
89 struct xt_action_param *par)
91 par->target = w->u.watcher;
92 par->targinfo = w->data;
93 w->u.watcher->target(skb, par);
94 /* watchers don't give a verdict */
99 ebt_do_match(struct ebt_entry_match *m, const struct sk_buff *skb,
100 struct xt_action_param *par)
102 par->match = m->u.match;
103 par->matchinfo = m->data;
104 return m->u.match->match(skb, par) ? EBT_MATCH : EBT_NOMATCH;
108 ebt_dev_check(const char *entry, const struct net_device *device)
117 devname = device->name;
118 /* 1 is the wildcard token */
119 while (entry[i] != '\0' && entry[i] != 1 && entry[i] == devname[i])
121 return (devname[i] != entry[i] && entry[i] != 1);
124 #define FWINV2(bool,invflg) ((bool) ^ !!(e->invflags & invflg))
125 /* process standard matches */
127 ebt_basic_match(const struct ebt_entry *e, const struct ethhdr *h,
128 const struct net_device *in, const struct net_device *out)
132 if (e->bitmask & EBT_802_3) {
133 if (FWINV2(ntohs(h->h_proto) >= 1536, EBT_IPROTO))
135 } else if (!(e->bitmask & EBT_NOPROTO) &&
136 FWINV2(e->ethproto != h->h_proto, EBT_IPROTO))
139 if (FWINV2(ebt_dev_check(e->in, in), EBT_IIN))
141 if (FWINV2(ebt_dev_check(e->out, out), EBT_IOUT))
143 /* rcu_read_lock()ed by nf_hook_slow */
144 if (in && br_port_exists(in) &&
145 FWINV2(ebt_dev_check(e->logical_in, br_port_get_rcu(in)->br->dev),
148 if (out && br_port_exists(out) &&
149 FWINV2(ebt_dev_check(e->logical_out, br_port_get_rcu(out)->br->dev),
153 if (e->bitmask & EBT_SOURCEMAC) {
155 for (i = 0; i < 6; i++)
156 verdict |= (h->h_source[i] ^ e->sourcemac[i]) &
158 if (FWINV2(verdict != 0, EBT_ISOURCE) )
161 if (e->bitmask & EBT_DESTMAC) {
163 for (i = 0; i < 6; i++)
164 verdict |= (h->h_dest[i] ^ e->destmac[i]) &
166 if (FWINV2(verdict != 0, EBT_IDEST) )
173 struct ebt_entry *ebt_next_entry(const struct ebt_entry *entry)
175 return (void *)entry + entry->next_offset;
178 /* Do some firewalling */
179 unsigned int ebt_do_table (unsigned int hook, struct sk_buff *skb,
180 const struct net_device *in, const struct net_device *out,
181 struct ebt_table *table)
184 struct ebt_entry *point;
185 struct ebt_counter *counter_base, *cb_base;
186 const struct ebt_entry_target *t;
188 struct ebt_chainstack *cs;
189 struct ebt_entries *chaininfo;
191 const struct ebt_table_info *private;
192 struct xt_action_param acpar;
194 acpar.family = NFPROTO_BRIDGE;
197 acpar.hotdrop = false;
198 acpar.hooknum = hook;
200 read_lock_bh(&table->lock);
201 private = table->private;
202 cb_base = COUNTER_BASE(private->counters, private->nentries,
204 if (private->chainstack)
205 cs = private->chainstack[smp_processor_id()];
208 chaininfo = private->hook_entry[hook];
209 nentries = private->hook_entry[hook]->nentries;
210 point = (struct ebt_entry *)(private->hook_entry[hook]->data);
211 counter_base = cb_base + private->hook_entry[hook]->counter_offset;
212 /* base for chain jumps */
213 base = private->entries;
215 while (i < nentries) {
216 if (ebt_basic_match(point, eth_hdr(skb), in, out))
219 if (EBT_MATCH_ITERATE(point, ebt_do_match, skb, &acpar) != 0)
222 read_unlock_bh(&table->lock);
226 /* increase counter */
227 (*(counter_base + i)).pcnt++;
228 (*(counter_base + i)).bcnt += skb->len;
230 /* these should only watch: not modify, nor tell us
231 what to do with the packet */
232 EBT_WATCHER_ITERATE(point, ebt_do_watcher, skb, &acpar);
234 t = (struct ebt_entry_target *)
235 (((char *)point) + point->target_offset);
236 /* standard target */
237 if (!t->u.target->target)
238 verdict = ((struct ebt_standard_target *)t)->verdict;
240 acpar.target = t->u.target;
241 acpar.targinfo = t->data;
242 verdict = t->u.target->target(skb, &acpar);
244 if (verdict == EBT_ACCEPT) {
245 read_unlock_bh(&table->lock);
248 if (verdict == EBT_DROP) {
249 read_unlock_bh(&table->lock);
252 if (verdict == EBT_RETURN) {
254 #ifdef CONFIG_NETFILTER_DEBUG
256 BUGPRINT("RETURN on base chain");
257 /* act like this is EBT_CONTINUE */
262 /* put all the local variables right */
264 chaininfo = cs[sp].chaininfo;
265 nentries = chaininfo->nentries;
267 counter_base = cb_base +
268 chaininfo->counter_offset;
271 if (verdict == EBT_CONTINUE)
273 #ifdef CONFIG_NETFILTER_DEBUG
275 BUGPRINT("bogus standard verdict\n");
276 read_unlock_bh(&table->lock);
282 cs[sp].chaininfo = chaininfo;
283 cs[sp].e = ebt_next_entry(point);
285 chaininfo = (struct ebt_entries *) (base + verdict);
286 #ifdef CONFIG_NETFILTER_DEBUG
287 if (chaininfo->distinguisher) {
288 BUGPRINT("jump to non-chain\n");
289 read_unlock_bh(&table->lock);
293 nentries = chaininfo->nentries;
294 point = (struct ebt_entry *)chaininfo->data;
295 counter_base = cb_base + chaininfo->counter_offset;
299 point = ebt_next_entry(point);
303 /* I actually like this :) */
304 if (chaininfo->policy == EBT_RETURN)
306 if (chaininfo->policy == EBT_ACCEPT) {
307 read_unlock_bh(&table->lock);
310 read_unlock_bh(&table->lock);
314 /* If it succeeds, returns element and locks mutex */
316 find_inlist_lock_noload(struct list_head *head, const char *name, int *error,
320 struct list_head list;
321 char name[EBT_FUNCTION_MAXNAMELEN];
324 *error = mutex_lock_interruptible(mutex);
328 list_for_each_entry(e, head, list) {
329 if (strcmp(e->name, name) == 0)
338 find_inlist_lock(struct list_head *head, const char *name, const char *prefix,
339 int *error, struct mutex *mutex)
341 return try_then_request_module(
342 find_inlist_lock_noload(head, name, error, mutex),
343 "%s%s", prefix, name);
346 static inline struct ebt_table *
347 find_table_lock(struct net *net, const char *name, int *error,
350 return find_inlist_lock(&net->xt.tables[NFPROTO_BRIDGE], name,
351 "ebtable_", error, mutex);
355 ebt_check_match(struct ebt_entry_match *m, struct xt_mtchk_param *par,
358 const struct ebt_entry *e = par->entryinfo;
359 struct xt_match *match;
360 size_t left = ((char *)e + e->watchers_offset) - (char *)m;
363 if (left < sizeof(struct ebt_entry_match) ||
364 left - sizeof(struct ebt_entry_match) < m->match_size)
367 match = xt_request_find_match(NFPROTO_BRIDGE, m->u.name, 0);
369 return PTR_ERR(match);
373 par->matchinfo = m->data;
374 ret = xt_check_match(par, m->match_size,
375 e->ethproto, e->invflags & EBT_IPROTO);
377 module_put(match->me);
386 ebt_check_watcher(struct ebt_entry_watcher *w, struct xt_tgchk_param *par,
389 const struct ebt_entry *e = par->entryinfo;
390 struct xt_target *watcher;
391 size_t left = ((char *)e + e->target_offset) - (char *)w;
394 if (left < sizeof(struct ebt_entry_watcher) ||
395 left - sizeof(struct ebt_entry_watcher) < w->watcher_size)
398 watcher = xt_request_find_target(NFPROTO_BRIDGE, w->u.name, 0);
400 return PTR_ERR(watcher);
401 w->u.watcher = watcher;
403 par->target = watcher;
404 par->targinfo = w->data;
405 ret = xt_check_target(par, w->watcher_size,
406 e->ethproto, e->invflags & EBT_IPROTO);
408 module_put(watcher->me);
416 static int ebt_verify_pointers(const struct ebt_replace *repl,
417 struct ebt_table_info *newinfo)
419 unsigned int limit = repl->entries_size;
420 unsigned int valid_hooks = repl->valid_hooks;
421 unsigned int offset = 0;
424 for (i = 0; i < NF_BR_NUMHOOKS; i++)
425 newinfo->hook_entry[i] = NULL;
427 newinfo->entries_size = repl->entries_size;
428 newinfo->nentries = repl->nentries;
430 while (offset < limit) {
431 size_t left = limit - offset;
432 struct ebt_entry *e = (void *)newinfo->entries + offset;
434 if (left < sizeof(unsigned int))
437 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
438 if ((valid_hooks & (1 << i)) == 0)
440 if ((char __user *)repl->hook_entry[i] ==
441 repl->entries + offset)
445 if (i != NF_BR_NUMHOOKS || !(e->bitmask & EBT_ENTRY_OR_ENTRIES)) {
446 if (e->bitmask != 0) {
447 /* we make userspace set this right,
448 so there is no misunderstanding */
449 BUGPRINT("EBT_ENTRY_OR_ENTRIES shouldn't be set "
450 "in distinguisher\n");
453 if (i != NF_BR_NUMHOOKS)
454 newinfo->hook_entry[i] = (struct ebt_entries *)e;
455 if (left < sizeof(struct ebt_entries))
457 offset += sizeof(struct ebt_entries);
459 if (left < sizeof(struct ebt_entry))
461 if (left < e->next_offset)
463 if (e->next_offset < sizeof(struct ebt_entry))
465 offset += e->next_offset;
468 if (offset != limit) {
469 BUGPRINT("entries_size too small\n");
473 /* check if all valid hooks have a chain */
474 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
475 if (!newinfo->hook_entry[i] &&
476 (valid_hooks & (1 << i))) {
477 BUGPRINT("Valid hook without chain\n");
485 * this one is very careful, as it is the first function
486 * to parse the userspace data
489 ebt_check_entry_size_and_hooks(const struct ebt_entry *e,
490 const struct ebt_table_info *newinfo,
491 unsigned int *n, unsigned int *cnt,
492 unsigned int *totalcnt, unsigned int *udc_cnt)
496 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
497 if ((void *)e == (void *)newinfo->hook_entry[i])
500 /* beginning of a new chain
501 if i == NF_BR_NUMHOOKS it must be a user defined chain */
502 if (i != NF_BR_NUMHOOKS || !e->bitmask) {
503 /* this checks if the previous chain has as many entries
506 BUGPRINT("nentries does not equal the nr of entries "
510 if (((struct ebt_entries *)e)->policy != EBT_DROP &&
511 ((struct ebt_entries *)e)->policy != EBT_ACCEPT) {
512 /* only RETURN from udc */
513 if (i != NF_BR_NUMHOOKS ||
514 ((struct ebt_entries *)e)->policy != EBT_RETURN) {
515 BUGPRINT("bad policy\n");
519 if (i == NF_BR_NUMHOOKS) /* it's a user defined chain */
521 if (((struct ebt_entries *)e)->counter_offset != *totalcnt) {
522 BUGPRINT("counter_offset != totalcnt");
525 *n = ((struct ebt_entries *)e)->nentries;
529 /* a plain old entry, heh */
530 if (sizeof(struct ebt_entry) > e->watchers_offset ||
531 e->watchers_offset > e->target_offset ||
532 e->target_offset >= e->next_offset) {
533 BUGPRINT("entry offsets not in right order\n");
536 /* this is not checked anywhere else */
537 if (e->next_offset - e->target_offset < sizeof(struct ebt_entry_target)) {
538 BUGPRINT("target size too small\n");
548 struct ebt_chainstack cs;
550 unsigned int hookmask;
554 * we need these positions to check that the jumps to a different part of the
555 * entries is a jump to the beginning of a new chain.
558 ebt_get_udc_positions(struct ebt_entry *e, struct ebt_table_info *newinfo,
559 unsigned int *n, struct ebt_cl_stack *udc)
563 /* we're only interested in chain starts */
566 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
567 if (newinfo->hook_entry[i] == (struct ebt_entries *)e)
570 /* only care about udc */
571 if (i != NF_BR_NUMHOOKS)
574 udc[*n].cs.chaininfo = (struct ebt_entries *)e;
575 /* these initialisations are depended on later in check_chainloops() */
577 udc[*n].hookmask = 0;
584 ebt_cleanup_match(struct ebt_entry_match *m, struct net *net, unsigned int *i)
586 struct xt_mtdtor_param par;
588 if (i && (*i)-- == 0)
592 par.match = m->u.match;
593 par.matchinfo = m->data;
594 par.family = NFPROTO_BRIDGE;
595 if (par.match->destroy != NULL)
596 par.match->destroy(&par);
597 module_put(par.match->me);
602 ebt_cleanup_watcher(struct ebt_entry_watcher *w, struct net *net, unsigned int *i)
604 struct xt_tgdtor_param par;
606 if (i && (*i)-- == 0)
610 par.target = w->u.watcher;
611 par.targinfo = w->data;
612 par.family = NFPROTO_BRIDGE;
613 if (par.target->destroy != NULL)
614 par.target->destroy(&par);
615 module_put(par.target->me);
620 ebt_cleanup_entry(struct ebt_entry *e, struct net *net, unsigned int *cnt)
622 struct xt_tgdtor_param par;
623 struct ebt_entry_target *t;
628 if (cnt && (*cnt)-- == 0)
630 EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, NULL);
631 EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, NULL);
632 t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
635 par.target = t->u.target;
636 par.targinfo = t->data;
637 par.family = NFPROTO_BRIDGE;
638 if (par.target->destroy != NULL)
639 par.target->destroy(&par);
640 module_put(par.target->me);
645 ebt_check_entry(struct ebt_entry *e, struct net *net,
646 const struct ebt_table_info *newinfo,
647 const char *name, unsigned int *cnt,
648 struct ebt_cl_stack *cl_s, unsigned int udc_cnt)
650 struct ebt_entry_target *t;
651 struct xt_target *target;
652 unsigned int i, j, hook = 0, hookmask = 0;
655 struct xt_mtchk_param mtpar;
656 struct xt_tgchk_param tgpar;
658 /* don't mess with the struct ebt_entries */
662 if (e->bitmask & ~EBT_F_MASK) {
663 BUGPRINT("Unknown flag for bitmask\n");
666 if (e->invflags & ~EBT_INV_MASK) {
667 BUGPRINT("Unknown flag for inv bitmask\n");
670 if ( (e->bitmask & EBT_NOPROTO) && (e->bitmask & EBT_802_3) ) {
671 BUGPRINT("NOPROTO & 802_3 not allowed\n");
674 /* what hook do we belong to? */
675 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
676 if (!newinfo->hook_entry[i])
678 if ((char *)newinfo->hook_entry[i] < (char *)e)
683 /* (1 << NF_BR_NUMHOOKS) tells the check functions the rule is on
685 if (i < NF_BR_NUMHOOKS)
686 hookmask = (1 << hook) | (1 << NF_BR_NUMHOOKS);
688 for (i = 0; i < udc_cnt; i++)
689 if ((char *)(cl_s[i].cs.chaininfo) > (char *)e)
692 hookmask = (1 << hook) | (1 << NF_BR_NUMHOOKS);
694 hookmask = cl_s[i - 1].hookmask;
698 mtpar.net = tgpar.net = net;
699 mtpar.table = tgpar.table = name;
700 mtpar.entryinfo = tgpar.entryinfo = e;
701 mtpar.hook_mask = tgpar.hook_mask = hookmask;
702 mtpar.family = tgpar.family = NFPROTO_BRIDGE;
703 ret = EBT_MATCH_ITERATE(e, ebt_check_match, &mtpar, &i);
705 goto cleanup_matches;
707 ret = EBT_WATCHER_ITERATE(e, ebt_check_watcher, &tgpar, &j);
709 goto cleanup_watchers;
710 t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
711 gap = e->next_offset - e->target_offset;
713 target = xt_request_find_target(NFPROTO_BRIDGE, t->u.name, 0);
714 if (IS_ERR(target)) {
715 ret = PTR_ERR(target);
716 goto cleanup_watchers;
719 t->u.target = target;
720 if (t->u.target == &ebt_standard_target) {
721 if (gap < sizeof(struct ebt_standard_target)) {
722 BUGPRINT("Standard target size too big\n");
724 goto cleanup_watchers;
726 if (((struct ebt_standard_target *)t)->verdict <
727 -NUM_STANDARD_TARGETS) {
728 BUGPRINT("Invalid standard target\n");
730 goto cleanup_watchers;
732 } else if (t->target_size > gap - sizeof(struct ebt_entry_target)) {
733 module_put(t->u.target->me);
735 goto cleanup_watchers;
738 tgpar.target = target;
739 tgpar.targinfo = t->data;
740 ret = xt_check_target(&tgpar, t->target_size,
741 e->ethproto, e->invflags & EBT_IPROTO);
743 module_put(target->me);
744 goto cleanup_watchers;
749 EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, &j);
751 EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, &i);
756 * checks for loops and sets the hook mask for udc
757 * the hook mask for udc tells us from which base chains the udc can be
758 * accessed. This mask is a parameter to the check() functions of the extensions
760 static int check_chainloops(const struct ebt_entries *chain, struct ebt_cl_stack *cl_s,
761 unsigned int udc_cnt, unsigned int hooknr, char *base)
763 int i, chain_nr = -1, pos = 0, nentries = chain->nentries, verdict;
764 const struct ebt_entry *e = (struct ebt_entry *)chain->data;
765 const struct ebt_entry_target *t;
767 while (pos < nentries || chain_nr != -1) {
768 /* end of udc, go back one 'recursion' step */
769 if (pos == nentries) {
770 /* put back values of the time when this chain was called */
771 e = cl_s[chain_nr].cs.e;
772 if (cl_s[chain_nr].from != -1)
774 cl_s[cl_s[chain_nr].from].cs.chaininfo->nentries;
776 nentries = chain->nentries;
777 pos = cl_s[chain_nr].cs.n;
778 /* make sure we won't see a loop that isn't one */
779 cl_s[chain_nr].cs.n = 0;
780 chain_nr = cl_s[chain_nr].from;
784 t = (struct ebt_entry_target *)
785 (((char *)e) + e->target_offset);
786 if (strcmp(t->u.name, EBT_STANDARD_TARGET))
788 if (e->target_offset + sizeof(struct ebt_standard_target) >
790 BUGPRINT("Standard target size too big\n");
793 verdict = ((struct ebt_standard_target *)t)->verdict;
794 if (verdict >= 0) { /* jump to another chain */
795 struct ebt_entries *hlp2 =
796 (struct ebt_entries *)(base + verdict);
797 for (i = 0; i < udc_cnt; i++)
798 if (hlp2 == cl_s[i].cs.chaininfo)
800 /* bad destination or loop */
802 BUGPRINT("bad destination\n");
809 if (cl_s[i].hookmask & (1 << hooknr))
811 /* this can't be 0, so the loop test is correct */
812 cl_s[i].cs.n = pos + 1;
814 cl_s[i].cs.e = ebt_next_entry(e);
815 e = (struct ebt_entry *)(hlp2->data);
816 nentries = hlp2->nentries;
817 cl_s[i].from = chain_nr;
819 /* this udc is accessible from the base chain for hooknr */
820 cl_s[i].hookmask |= (1 << hooknr);
824 e = ebt_next_entry(e);
830 /* do the parsing of the table/chains/entries/matches/watchers/targets, heh */
831 static int translate_table(struct net *net, const char *name,
832 struct ebt_table_info *newinfo)
834 unsigned int i, j, k, udc_cnt;
836 struct ebt_cl_stack *cl_s = NULL; /* used in the checking for chain loops */
839 while (i < NF_BR_NUMHOOKS && !newinfo->hook_entry[i])
841 if (i == NF_BR_NUMHOOKS) {
842 BUGPRINT("No valid hooks specified\n");
845 if (newinfo->hook_entry[i] != (struct ebt_entries *)newinfo->entries) {
846 BUGPRINT("Chains don't start at beginning\n");
849 /* make sure chains are ordered after each other in same order
850 as their corresponding hooks */
851 for (j = i + 1; j < NF_BR_NUMHOOKS; j++) {
852 if (!newinfo->hook_entry[j])
854 if (newinfo->hook_entry[j] <= newinfo->hook_entry[i]) {
855 BUGPRINT("Hook order must be followed\n");
861 /* do some early checkings and initialize some things */
862 i = 0; /* holds the expected nr. of entries for the chain */
863 j = 0; /* holds the up to now counted entries for the chain */
864 k = 0; /* holds the total nr. of entries, should equal
865 newinfo->nentries afterwards */
866 udc_cnt = 0; /* will hold the nr. of user defined chains (udc) */
867 ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
868 ebt_check_entry_size_and_hooks, newinfo,
869 &i, &j, &k, &udc_cnt);
875 BUGPRINT("nentries does not equal the nr of entries in the "
879 if (k != newinfo->nentries) {
880 BUGPRINT("Total nentries is wrong\n");
884 /* get the location of the udc, put them in an array
885 while we're at it, allocate the chainstack */
887 /* this will get free'd in do_replace()/ebt_register_table()
888 if an error occurs */
889 newinfo->chainstack =
890 vmalloc(nr_cpu_ids * sizeof(*(newinfo->chainstack)));
891 if (!newinfo->chainstack)
893 for_each_possible_cpu(i) {
894 newinfo->chainstack[i] =
895 vmalloc(udc_cnt * sizeof(*(newinfo->chainstack[0])));
896 if (!newinfo->chainstack[i]) {
898 vfree(newinfo->chainstack[--i]);
899 vfree(newinfo->chainstack);
900 newinfo->chainstack = NULL;
905 cl_s = vmalloc(udc_cnt * sizeof(*cl_s));
908 i = 0; /* the i'th udc */
909 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
910 ebt_get_udc_positions, newinfo, &i, cl_s);
913 BUGPRINT("i != udc_cnt\n");
919 /* Check for loops */
920 for (i = 0; i < NF_BR_NUMHOOKS; i++)
921 if (newinfo->hook_entry[i])
922 if (check_chainloops(newinfo->hook_entry[i],
923 cl_s, udc_cnt, i, newinfo->entries)) {
928 /* we now know the following (along with E=mc²):
929 - the nr of entries in each chain is right
930 - the size of the allocated space is right
931 - all valid hooks have a corresponding chain
933 - wrong data can still be on the level of a single entry
934 - could be there are jumps to places that are not the
935 beginning of a chain. This can only occur in chains that
936 are not accessible from any base chains, so we don't care. */
938 /* used to know what we need to clean up if something goes wrong */
940 ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
941 ebt_check_entry, net, newinfo, name, &i, cl_s, udc_cnt);
943 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
944 ebt_cleanup_entry, net, &i);
950 /* called under write_lock */
951 static void get_counters(const struct ebt_counter *oldcounters,
952 struct ebt_counter *counters, unsigned int nentries)
955 struct ebt_counter *counter_base;
957 /* counters of cpu 0 */
958 memcpy(counters, oldcounters,
959 sizeof(struct ebt_counter) * nentries);
961 /* add other counters to those of cpu 0 */
962 for_each_possible_cpu(cpu) {
965 counter_base = COUNTER_BASE(oldcounters, nentries, cpu);
966 for (i = 0; i < nentries; i++) {
967 counters[i].pcnt += counter_base[i].pcnt;
968 counters[i].bcnt += counter_base[i].bcnt;
973 static int do_replace_finish(struct net *net, struct ebt_replace *repl,
974 struct ebt_table_info *newinfo)
977 struct ebt_counter *counterstmp = NULL;
978 /* used to be able to unlock earlier */
979 struct ebt_table_info *table;
982 /* the user wants counters back
983 the check on the size is done later, when we have the lock */
984 if (repl->num_counters) {
985 unsigned long size = repl->num_counters * sizeof(*counterstmp);
986 counterstmp = vmalloc(size);
991 newinfo->chainstack = NULL;
992 ret = ebt_verify_pointers(repl, newinfo);
994 goto free_counterstmp;
996 ret = translate_table(net, repl->name, newinfo);
999 goto free_counterstmp;
1001 t = find_table_lock(net, repl->name, &ret, &ebt_mutex);
1007 /* the table doesn't like it */
1008 if (t->check && (ret = t->check(newinfo, repl->valid_hooks)))
1011 if (repl->num_counters && repl->num_counters != t->private->nentries) {
1012 BUGPRINT("Wrong nr. of counters requested\n");
1017 /* we have the mutex lock, so no danger in reading this pointer */
1019 /* make sure the table can only be rmmod'ed if it contains no rules */
1020 if (!table->nentries && newinfo->nentries && !try_module_get(t->me)) {
1023 } else if (table->nentries && !newinfo->nentries)
1025 /* we need an atomic snapshot of the counters */
1026 write_lock_bh(&t->lock);
1027 if (repl->num_counters)
1028 get_counters(t->private->counters, counterstmp,
1029 t->private->nentries);
1031 t->private = newinfo;
1032 write_unlock_bh(&t->lock);
1033 mutex_unlock(&ebt_mutex);
1034 /* so, a user can change the chains while having messed up her counter
1035 allocation. Only reason why this is done is because this way the lock
1036 is held only once, while this doesn't bring the kernel into a
1038 if (repl->num_counters &&
1039 copy_to_user(repl->counters, counterstmp,
1040 repl->num_counters * sizeof(struct ebt_counter))) {
1046 /* decrease module count and free resources */
1047 EBT_ENTRY_ITERATE(table->entries, table->entries_size,
1048 ebt_cleanup_entry, net, NULL);
1050 vfree(table->entries);
1051 if (table->chainstack) {
1052 for_each_possible_cpu(i)
1053 vfree(table->chainstack[i]);
1054 vfree(table->chainstack);
1062 mutex_unlock(&ebt_mutex);
1064 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
1065 ebt_cleanup_entry, net, NULL);
1068 /* can be initialized in translate_table() */
1069 if (newinfo->chainstack) {
1070 for_each_possible_cpu(i)
1071 vfree(newinfo->chainstack[i]);
1072 vfree(newinfo->chainstack);
1077 /* replace the table */
1078 static int do_replace(struct net *net, const void __user *user,
1081 int ret, countersize;
1082 struct ebt_table_info *newinfo;
1083 struct ebt_replace tmp;
1085 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1088 if (len != sizeof(tmp) + tmp.entries_size) {
1089 BUGPRINT("Wrong len argument\n");
1093 if (tmp.entries_size == 0) {
1094 BUGPRINT("Entries_size never zero\n");
1097 /* overflow check */
1098 if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) /
1099 NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
1101 if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
1104 countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
1105 newinfo = vmalloc(sizeof(*newinfo) + countersize);
1110 memset(newinfo->counters, 0, countersize);
1112 newinfo->entries = vmalloc(tmp.entries_size);
1113 if (!newinfo->entries) {
1118 newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
1119 BUGPRINT("Couldn't copy entries from userspace\n");
1124 ret = do_replace_finish(net, &tmp, newinfo);
1128 vfree(newinfo->entries);
1135 ebt_register_table(struct net *net, const struct ebt_table *input_table)
1137 struct ebt_table_info *newinfo;
1138 struct ebt_table *t, *table;
1139 struct ebt_replace_kernel *repl;
1140 int ret, i, countersize;
1143 if (input_table == NULL || (repl = input_table->table) == NULL ||
1144 repl->entries == 0 || repl->entries_size == 0 ||
1145 repl->counters != NULL || input_table->private != NULL) {
1146 BUGPRINT("Bad table data for ebt_register_table!!!\n");
1147 return ERR_PTR(-EINVAL);
1150 /* Don't add one table to multiple lists. */
1151 table = kmemdup(input_table, sizeof(struct ebt_table), GFP_KERNEL);
1157 countersize = COUNTER_OFFSET(repl->nentries) * nr_cpu_ids;
1158 newinfo = vmalloc(sizeof(*newinfo) + countersize);
1163 p = vmalloc(repl->entries_size);
1167 memcpy(p, repl->entries, repl->entries_size);
1168 newinfo->entries = p;
1170 newinfo->entries_size = repl->entries_size;
1171 newinfo->nentries = repl->nentries;
1174 memset(newinfo->counters, 0, countersize);
1176 /* fill in newinfo and parse the entries */
1177 newinfo->chainstack = NULL;
1178 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
1179 if ((repl->valid_hooks & (1 << i)) == 0)
1180 newinfo->hook_entry[i] = NULL;
1182 newinfo->hook_entry[i] = p +
1183 ((char *)repl->hook_entry[i] - repl->entries);
1185 ret = translate_table(net, repl->name, newinfo);
1187 BUGPRINT("Translate_table failed\n");
1188 goto free_chainstack;
1191 if (table->check && table->check(newinfo, table->valid_hooks)) {
1192 BUGPRINT("The table doesn't like its own initial data, lol\n");
1193 return ERR_PTR(-EINVAL);
1196 table->private = newinfo;
1197 rwlock_init(&table->lock);
1198 ret = mutex_lock_interruptible(&ebt_mutex);
1200 goto free_chainstack;
1202 list_for_each_entry(t, &net->xt.tables[NFPROTO_BRIDGE], list) {
1203 if (strcmp(t->name, table->name) == 0) {
1205 BUGPRINT("Table name already exists\n");
1210 /* Hold a reference count if the chains aren't empty */
1211 if (newinfo->nentries && !try_module_get(table->me)) {
1215 list_add(&table->list, &net->xt.tables[NFPROTO_BRIDGE]);
1216 mutex_unlock(&ebt_mutex);
1219 mutex_unlock(&ebt_mutex);
1221 if (newinfo->chainstack) {
1222 for_each_possible_cpu(i)
1223 vfree(newinfo->chainstack[i]);
1224 vfree(newinfo->chainstack);
1226 vfree(newinfo->entries);
1232 return ERR_PTR(ret);
1235 void ebt_unregister_table(struct net *net, struct ebt_table *table)
1240 BUGPRINT("Request to unregister NULL table!!!\n");
1243 mutex_lock(&ebt_mutex);
1244 list_del(&table->list);
1245 mutex_unlock(&ebt_mutex);
1246 EBT_ENTRY_ITERATE(table->private->entries, table->private->entries_size,
1247 ebt_cleanup_entry, net, NULL);
1248 if (table->private->nentries)
1249 module_put(table->me);
1250 vfree(table->private->entries);
1251 if (table->private->chainstack) {
1252 for_each_possible_cpu(i)
1253 vfree(table->private->chainstack[i]);
1254 vfree(table->private->chainstack);
1256 vfree(table->private);
1260 /* userspace just supplied us with counters */
1261 static int do_update_counters(struct net *net, const char *name,
1262 struct ebt_counter __user *counters,
1263 unsigned int num_counters,
1264 const void __user *user, unsigned int len)
1267 struct ebt_counter *tmp;
1268 struct ebt_table *t;
1270 if (num_counters == 0)
1273 tmp = vmalloc(num_counters * sizeof(*tmp));
1277 t = find_table_lock(net, name, &ret, &ebt_mutex);
1281 if (num_counters != t->private->nentries) {
1282 BUGPRINT("Wrong nr of counters\n");
1287 if (copy_from_user(tmp, counters, num_counters * sizeof(*counters))) {
1292 /* we want an atomic add of the counters */
1293 write_lock_bh(&t->lock);
1295 /* we add to the counters of the first cpu */
1296 for (i = 0; i < num_counters; i++) {
1297 t->private->counters[i].pcnt += tmp[i].pcnt;
1298 t->private->counters[i].bcnt += tmp[i].bcnt;
1301 write_unlock_bh(&t->lock);
1304 mutex_unlock(&ebt_mutex);
1310 static int update_counters(struct net *net, const void __user *user,
1313 struct ebt_replace hlp;
1315 if (copy_from_user(&hlp, user, sizeof(hlp)))
1318 if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
1321 return do_update_counters(net, hlp.name, hlp.counters,
1322 hlp.num_counters, user, len);
1325 static inline int ebt_make_matchname(const struct ebt_entry_match *m,
1326 const char *base, char __user *ubase)
1328 char __user *hlp = ubase + ((char *)m - base);
1329 if (copy_to_user(hlp, m->u.match->name, EBT_FUNCTION_MAXNAMELEN))
1334 static inline int ebt_make_watchername(const struct ebt_entry_watcher *w,
1335 const char *base, char __user *ubase)
1337 char __user *hlp = ubase + ((char *)w - base);
1338 if (copy_to_user(hlp , w->u.watcher->name, EBT_FUNCTION_MAXNAMELEN))
1344 ebt_make_names(struct ebt_entry *e, const char *base, char __user *ubase)
1348 const struct ebt_entry_target *t;
1350 if (e->bitmask == 0)
1353 hlp = ubase + (((char *)e + e->target_offset) - base);
1354 t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
1356 ret = EBT_MATCH_ITERATE(e, ebt_make_matchname, base, ubase);
1359 ret = EBT_WATCHER_ITERATE(e, ebt_make_watchername, base, ubase);
1362 if (copy_to_user(hlp, t->u.target->name, EBT_FUNCTION_MAXNAMELEN))
1367 static int copy_counters_to_user(struct ebt_table *t,
1368 const struct ebt_counter *oldcounters,
1369 void __user *user, unsigned int num_counters,
1370 unsigned int nentries)
1372 struct ebt_counter *counterstmp;
1375 /* userspace might not need the counters */
1376 if (num_counters == 0)
1379 if (num_counters != nentries) {
1380 BUGPRINT("Num_counters wrong\n");
1384 counterstmp = vmalloc(nentries * sizeof(*counterstmp));
1388 write_lock_bh(&t->lock);
1389 get_counters(oldcounters, counterstmp, nentries);
1390 write_unlock_bh(&t->lock);
1392 if (copy_to_user(user, counterstmp,
1393 nentries * sizeof(struct ebt_counter)))
1399 /* called with ebt_mutex locked */
1400 static int copy_everything_to_user(struct ebt_table *t, void __user *user,
1401 const int *len, int cmd)
1403 struct ebt_replace tmp;
1404 const struct ebt_counter *oldcounters;
1405 unsigned int entries_size, nentries;
1409 if (cmd == EBT_SO_GET_ENTRIES) {
1410 entries_size = t->private->entries_size;
1411 nentries = t->private->nentries;
1412 entries = t->private->entries;
1413 oldcounters = t->private->counters;
1415 entries_size = t->table->entries_size;
1416 nentries = t->table->nentries;
1417 entries = t->table->entries;
1418 oldcounters = t->table->counters;
1421 if (copy_from_user(&tmp, user, sizeof(tmp)))
1424 if (*len != sizeof(struct ebt_replace) + entries_size +
1425 (tmp.num_counters? nentries * sizeof(struct ebt_counter): 0))
1428 if (tmp.nentries != nentries) {
1429 BUGPRINT("Nentries wrong\n");
1433 if (tmp.entries_size != entries_size) {
1434 BUGPRINT("Wrong size\n");
1438 ret = copy_counters_to_user(t, oldcounters, tmp.counters,
1439 tmp.num_counters, nentries);
1443 if (copy_to_user(tmp.entries, entries, entries_size)) {
1444 BUGPRINT("Couldn't copy entries to userspace\n");
1447 /* set the match/watcher/target names right */
1448 return EBT_ENTRY_ITERATE(entries, entries_size,
1449 ebt_make_names, entries, tmp.entries);
1452 static int do_ebt_set_ctl(struct sock *sk,
1453 int cmd, void __user *user, unsigned int len)
1457 if (!capable(CAP_NET_ADMIN))
1461 case EBT_SO_SET_ENTRIES:
1462 ret = do_replace(sock_net(sk), user, len);
1464 case EBT_SO_SET_COUNTERS:
1465 ret = update_counters(sock_net(sk), user, len);
1473 static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1476 struct ebt_replace tmp;
1477 struct ebt_table *t;
1479 if (!capable(CAP_NET_ADMIN))
1482 if (copy_from_user(&tmp, user, sizeof(tmp)))
1485 t = find_table_lock(sock_net(sk), tmp.name, &ret, &ebt_mutex);
1490 case EBT_SO_GET_INFO:
1491 case EBT_SO_GET_INIT_INFO:
1492 if (*len != sizeof(struct ebt_replace)){
1494 mutex_unlock(&ebt_mutex);
1497 if (cmd == EBT_SO_GET_INFO) {
1498 tmp.nentries = t->private->nentries;
1499 tmp.entries_size = t->private->entries_size;
1500 tmp.valid_hooks = t->valid_hooks;
1502 tmp.nentries = t->table->nentries;
1503 tmp.entries_size = t->table->entries_size;
1504 tmp.valid_hooks = t->table->valid_hooks;
1506 mutex_unlock(&ebt_mutex);
1507 if (copy_to_user(user, &tmp, *len) != 0){
1508 BUGPRINT("c2u Didn't work\n");
1515 case EBT_SO_GET_ENTRIES:
1516 case EBT_SO_GET_INIT_ENTRIES:
1517 ret = copy_everything_to_user(t, user, len, cmd);
1518 mutex_unlock(&ebt_mutex);
1522 mutex_unlock(&ebt_mutex);
1529 #ifdef CONFIG_COMPAT
1530 /* 32 bit-userspace compatibility definitions. */
1531 struct compat_ebt_replace {
1532 char name[EBT_TABLE_MAXNAMELEN];
1533 compat_uint_t valid_hooks;
1534 compat_uint_t nentries;
1535 compat_uint_t entries_size;
1536 /* start of the chains */
1537 compat_uptr_t hook_entry[NF_BR_NUMHOOKS];
1538 /* nr of counters userspace expects back */
1539 compat_uint_t num_counters;
1540 /* where the kernel will put the old counters. */
1541 compat_uptr_t counters;
1542 compat_uptr_t entries;
1545 /* struct ebt_entry_match, _target and _watcher have same layout */
1546 struct compat_ebt_entry_mwt {
1548 char name[EBT_FUNCTION_MAXNAMELEN];
1551 compat_uint_t match_size;
1552 compat_uint_t data[0];
1555 /* account for possible padding between match_size and ->data */
1556 static int ebt_compat_entry_padsize(void)
1558 BUILD_BUG_ON(XT_ALIGN(sizeof(struct ebt_entry_match)) <
1559 COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt)));
1560 return (int) XT_ALIGN(sizeof(struct ebt_entry_match)) -
1561 COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt));
1564 static int ebt_compat_match_offset(const struct xt_match *match,
1565 unsigned int userlen)
1568 * ebt_among needs special handling. The kernel .matchsize is
1569 * set to -1 at registration time; at runtime an EBT_ALIGN()ed
1570 * value is expected.
1571 * Example: userspace sends 4500, ebt_among.c wants 4504.
1573 if (unlikely(match->matchsize == -1))
1574 return XT_ALIGN(userlen) - COMPAT_XT_ALIGN(userlen);
1575 return xt_compat_match_offset(match);
1578 static int compat_match_to_user(struct ebt_entry_match *m, void __user **dstptr,
1581 const struct xt_match *match = m->u.match;
1582 struct compat_ebt_entry_mwt __user *cm = *dstptr;
1583 int off = ebt_compat_match_offset(match, m->match_size);
1584 compat_uint_t msize = m->match_size - off;
1586 BUG_ON(off >= m->match_size);
1588 if (copy_to_user(cm->u.name, match->name,
1589 strlen(match->name) + 1) || put_user(msize, &cm->match_size))
1592 if (match->compat_to_user) {
1593 if (match->compat_to_user(cm->data, m->data))
1595 } else if (copy_to_user(cm->data, m->data, msize))
1598 *size -= ebt_compat_entry_padsize() + off;
1604 static int compat_target_to_user(struct ebt_entry_target *t,
1605 void __user **dstptr,
1608 const struct xt_target *target = t->u.target;
1609 struct compat_ebt_entry_mwt __user *cm = *dstptr;
1610 int off = xt_compat_target_offset(target);
1611 compat_uint_t tsize = t->target_size - off;
1613 BUG_ON(off >= t->target_size);
1615 if (copy_to_user(cm->u.name, target->name,
1616 strlen(target->name) + 1) || put_user(tsize, &cm->match_size))
1619 if (target->compat_to_user) {
1620 if (target->compat_to_user(cm->data, t->data))
1622 } else if (copy_to_user(cm->data, t->data, tsize))
1625 *size -= ebt_compat_entry_padsize() + off;
1631 static int compat_watcher_to_user(struct ebt_entry_watcher *w,
1632 void __user **dstptr,
1635 return compat_target_to_user((struct ebt_entry_target *)w,
1639 static int compat_copy_entry_to_user(struct ebt_entry *e, void __user **dstptr,
1642 struct ebt_entry_target *t;
1643 struct ebt_entry __user *ce;
1644 u32 watchers_offset, target_offset, next_offset;
1645 compat_uint_t origsize;
1648 if (e->bitmask == 0) {
1649 if (*size < sizeof(struct ebt_entries))
1651 if (copy_to_user(*dstptr, e, sizeof(struct ebt_entries)))
1654 *dstptr += sizeof(struct ebt_entries);
1655 *size -= sizeof(struct ebt_entries);
1659 if (*size < sizeof(*ce))
1662 ce = (struct ebt_entry __user *)*dstptr;
1663 if (copy_to_user(ce, e, sizeof(*ce)))
1667 *dstptr += sizeof(*ce);
1669 ret = EBT_MATCH_ITERATE(e, compat_match_to_user, dstptr, size);
1672 watchers_offset = e->watchers_offset - (origsize - *size);
1674 ret = EBT_WATCHER_ITERATE(e, compat_watcher_to_user, dstptr, size);
1677 target_offset = e->target_offset - (origsize - *size);
1679 t = (struct ebt_entry_target *) ((char *) e + e->target_offset);
1681 ret = compat_target_to_user(t, dstptr, size);
1684 next_offset = e->next_offset - (origsize - *size);
1686 if (put_user(watchers_offset, &ce->watchers_offset) ||
1687 put_user(target_offset, &ce->target_offset) ||
1688 put_user(next_offset, &ce->next_offset))
1691 *size -= sizeof(*ce);
1695 static int compat_calc_match(struct ebt_entry_match *m, int *off)
1697 *off += ebt_compat_match_offset(m->u.match, m->match_size);
1698 *off += ebt_compat_entry_padsize();
1702 static int compat_calc_watcher(struct ebt_entry_watcher *w, int *off)
1704 *off += xt_compat_target_offset(w->u.watcher);
1705 *off += ebt_compat_entry_padsize();
1709 static int compat_calc_entry(const struct ebt_entry *e,
1710 const struct ebt_table_info *info,
1712 struct compat_ebt_replace *newinfo)
1714 const struct ebt_entry_target *t;
1715 unsigned int entry_offset;
1718 if (e->bitmask == 0)
1722 entry_offset = (void *)e - base;
1724 EBT_MATCH_ITERATE(e, compat_calc_match, &off);
1725 EBT_WATCHER_ITERATE(e, compat_calc_watcher, &off);
1727 t = (const struct ebt_entry_target *) ((char *) e + e->target_offset);
1729 off += xt_compat_target_offset(t->u.target);
1730 off += ebt_compat_entry_padsize();
1732 newinfo->entries_size -= off;
1734 ret = xt_compat_add_offset(NFPROTO_BRIDGE, entry_offset, off);
1738 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
1739 const void *hookptr = info->hook_entry[i];
1740 if (info->hook_entry[i] &&
1741 (e < (struct ebt_entry *)(base - hookptr))) {
1742 newinfo->hook_entry[i] -= off;
1743 pr_debug("0x%08X -> 0x%08X\n",
1744 newinfo->hook_entry[i] + off,
1745 newinfo->hook_entry[i]);
1753 static int compat_table_info(const struct ebt_table_info *info,
1754 struct compat_ebt_replace *newinfo)
1756 unsigned int size = info->entries_size;
1757 const void *entries = info->entries;
1759 newinfo->entries_size = size;
1761 return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info,
1765 static int compat_copy_everything_to_user(struct ebt_table *t,
1766 void __user *user, int *len, int cmd)
1768 struct compat_ebt_replace repl, tmp;
1769 struct ebt_counter *oldcounters;
1770 struct ebt_table_info tinfo;
1774 memset(&tinfo, 0, sizeof(tinfo));
1776 if (cmd == EBT_SO_GET_ENTRIES) {
1777 tinfo.entries_size = t->private->entries_size;
1778 tinfo.nentries = t->private->nentries;
1779 tinfo.entries = t->private->entries;
1780 oldcounters = t->private->counters;
1782 tinfo.entries_size = t->table->entries_size;
1783 tinfo.nentries = t->table->nentries;
1784 tinfo.entries = t->table->entries;
1785 oldcounters = t->table->counters;
1788 if (copy_from_user(&tmp, user, sizeof(tmp)))
1791 if (tmp.nentries != tinfo.nentries ||
1792 (tmp.num_counters && tmp.num_counters != tinfo.nentries))
1795 memcpy(&repl, &tmp, sizeof(repl));
1796 if (cmd == EBT_SO_GET_ENTRIES)
1797 ret = compat_table_info(t->private, &repl);
1799 ret = compat_table_info(&tinfo, &repl);
1803 if (*len != sizeof(tmp) + repl.entries_size +
1804 (tmp.num_counters? tinfo.nentries * sizeof(struct ebt_counter): 0)) {
1805 pr_err("wrong size: *len %d, entries_size %u, replsz %d\n",
1806 *len, tinfo.entries_size, repl.entries_size);
1810 /* userspace might not need the counters */
1811 ret = copy_counters_to_user(t, oldcounters, compat_ptr(tmp.counters),
1812 tmp.num_counters, tinfo.nentries);
1816 pos = compat_ptr(tmp.entries);
1817 return EBT_ENTRY_ITERATE(tinfo.entries, tinfo.entries_size,
1818 compat_copy_entry_to_user, &pos, &tmp.entries_size);
1821 struct ebt_entries_buf_state {
1822 char *buf_kern_start; /* kernel buffer to copy (translated) data to */
1823 u32 buf_kern_len; /* total size of kernel buffer */
1824 u32 buf_kern_offset; /* amount of data copied so far */
1825 u32 buf_user_offset; /* read position in userspace buffer */
1828 static int ebt_buf_count(struct ebt_entries_buf_state *state, unsigned int sz)
1830 state->buf_kern_offset += sz;
1831 return state->buf_kern_offset >= sz ? 0 : -EINVAL;
1834 static int ebt_buf_add(struct ebt_entries_buf_state *state,
1835 void *data, unsigned int sz)
1837 if (state->buf_kern_start == NULL)
1840 BUG_ON(state->buf_kern_offset + sz > state->buf_kern_len);
1842 memcpy(state->buf_kern_start + state->buf_kern_offset, data, sz);
1845 state->buf_user_offset += sz;
1846 return ebt_buf_count(state, sz);
1849 static int ebt_buf_add_pad(struct ebt_entries_buf_state *state, unsigned int sz)
1851 char *b = state->buf_kern_start;
1853 BUG_ON(b && state->buf_kern_offset > state->buf_kern_len);
1855 if (b != NULL && sz > 0)
1856 memset(b + state->buf_kern_offset, 0, sz);
1857 /* do not adjust ->buf_user_offset here, we added kernel-side padding */
1858 return ebt_buf_count(state, sz);
1867 static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
1868 enum compat_mwt compat_mwt,
1869 struct ebt_entries_buf_state *state,
1870 const unsigned char *base)
1872 char name[EBT_FUNCTION_MAXNAMELEN];
1873 struct xt_match *match;
1874 struct xt_target *wt;
1876 int off, pad = 0, ret = 0;
1877 unsigned int size_kern, entry_offset, match_size = mwt->match_size;
1879 strlcpy(name, mwt->u.name, sizeof(name));
1881 if (state->buf_kern_start)
1882 dst = state->buf_kern_start + state->buf_kern_offset;
1884 entry_offset = (unsigned char *) mwt - base;
1885 switch (compat_mwt) {
1886 case EBT_COMPAT_MATCH:
1887 match = try_then_request_module(xt_find_match(NFPROTO_BRIDGE,
1888 name, 0), "ebt_%s", name);
1892 return PTR_ERR(match);
1894 off = ebt_compat_match_offset(match, match_size);
1896 if (match->compat_from_user)
1897 match->compat_from_user(dst, mwt->data);
1899 memcpy(dst, mwt->data, match_size);
1902 size_kern = match->matchsize;
1903 if (unlikely(size_kern == -1))
1904 size_kern = match_size;
1905 module_put(match->me);
1907 case EBT_COMPAT_WATCHER: /* fallthrough */
1908 case EBT_COMPAT_TARGET:
1909 wt = try_then_request_module(xt_find_target(NFPROTO_BRIDGE,
1910 name, 0), "ebt_%s", name);
1915 off = xt_compat_target_offset(wt);
1918 if (wt->compat_from_user)
1919 wt->compat_from_user(dst, mwt->data);
1921 memcpy(dst, mwt->data, match_size);
1924 size_kern = wt->targetsize;
1930 ret = xt_compat_add_offset(NFPROTO_BRIDGE, entry_offset,
1931 off + ebt_compat_entry_padsize());
1936 state->buf_kern_offset += match_size + off;
1937 state->buf_user_offset += match_size;
1938 pad = XT_ALIGN(size_kern) - size_kern;
1940 if (pad > 0 && dst) {
1941 BUG_ON(state->buf_kern_len <= pad);
1942 BUG_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad);
1943 memset(dst + size_kern, 0, pad);
1945 return off + match_size;
1949 * return size of all matches, watchers or target, including necessary
1950 * alignment and padding.
1952 static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
1953 unsigned int size_left, enum compat_mwt type,
1954 struct ebt_entries_buf_state *state, const void *base)
1962 buf = (char *) match32;
1964 while (size_left >= sizeof(*match32)) {
1965 struct ebt_entry_match *match_kern;
1968 match_kern = (struct ebt_entry_match *) state->buf_kern_start;
1971 tmp = state->buf_kern_start + state->buf_kern_offset;
1972 match_kern = (struct ebt_entry_match *) tmp;
1974 ret = ebt_buf_add(state, buf, sizeof(*match32));
1977 size_left -= sizeof(*match32);
1979 /* add padding before match->data (if any) */
1980 ret = ebt_buf_add_pad(state, ebt_compat_entry_padsize());
1984 if (match32->match_size > size_left)
1987 size_left -= match32->match_size;
1989 ret = compat_mtw_from_user(match32, type, state, base);
1993 BUG_ON(ret < match32->match_size);
1994 growth += ret - match32->match_size;
1995 growth += ebt_compat_entry_padsize();
1997 buf += sizeof(*match32);
1998 buf += match32->match_size;
2001 match_kern->match_size = ret;
2003 WARN_ON(type == EBT_COMPAT_TARGET && size_left);
2004 match32 = (struct compat_ebt_entry_mwt *) buf;
2010 #define EBT_COMPAT_WATCHER_ITERATE(e, fn, args...) \
2014 struct compat_ebt_entry_mwt *__watcher; \
2016 for (__i = e->watchers_offset; \
2017 __i < (e)->target_offset; \
2018 __i += __watcher->watcher_size + \
2019 sizeof(struct compat_ebt_entry_mwt)) { \
2020 __watcher = (void *)(e) + __i; \
2021 __ret = fn(__watcher , ## args); \
2026 if (__i != (e)->target_offset) \
2032 #define EBT_COMPAT_MATCH_ITERATE(e, fn, args...) \
2036 struct compat_ebt_entry_mwt *__match; \
2038 for (__i = sizeof(struct ebt_entry); \
2039 __i < (e)->watchers_offset; \
2040 __i += __match->match_size + \
2041 sizeof(struct compat_ebt_entry_mwt)) { \
2042 __match = (void *)(e) + __i; \
2043 __ret = fn(__match , ## args); \
2048 if (__i != (e)->watchers_offset) \
2054 /* called for all ebt_entry structures. */
2055 static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
2056 unsigned int *total,
2057 struct ebt_entries_buf_state *state)
2059 unsigned int i, j, startoff, new_offset = 0;
2060 /* stores match/watchers/targets & offset of next struct ebt_entry: */
2061 unsigned int offsets[4];
2062 unsigned int *offsets_update = NULL;
2066 if (*total < sizeof(struct ebt_entries))
2069 if (!entry->bitmask) {
2070 *total -= sizeof(struct ebt_entries);
2071 return ebt_buf_add(state, entry, sizeof(struct ebt_entries));
2073 if (*total < sizeof(*entry) || entry->next_offset < sizeof(*entry))
2076 startoff = state->buf_user_offset;
2077 /* pull in most part of ebt_entry, it does not need to be changed. */
2078 ret = ebt_buf_add(state, entry,
2079 offsetof(struct ebt_entry, watchers_offset));
2083 offsets[0] = sizeof(struct ebt_entry); /* matches come first */
2084 memcpy(&offsets[1], &entry->watchers_offset,
2085 sizeof(offsets) - sizeof(offsets[0]));
2087 if (state->buf_kern_start) {
2088 buf_start = state->buf_kern_start + state->buf_kern_offset;
2089 offsets_update = (unsigned int *) buf_start;
2091 ret = ebt_buf_add(state, &offsets[1],
2092 sizeof(offsets) - sizeof(offsets[0]));
2095 buf_start = (char *) entry;
2097 * 0: matches offset, always follows ebt_entry.
2098 * 1: watchers offset, from ebt_entry structure
2099 * 2: target offset, from ebt_entry structure
2100 * 3: next ebt_entry offset, from ebt_entry structure
2102 * offsets are relative to beginning of struct ebt_entry (i.e., 0).
2104 for (i = 0, j = 1 ; j < 4 ; j++, i++) {
2105 struct compat_ebt_entry_mwt *match32;
2107 char *buf = buf_start;
2109 buf = buf_start + offsets[i];
2110 if (offsets[i] > offsets[j])
2113 match32 = (struct compat_ebt_entry_mwt *) buf;
2114 size = offsets[j] - offsets[i];
2115 ret = ebt_size_mwt(match32, size, i, state, base);
2119 if (offsets_update && new_offset) {
2120 pr_debug("change offset %d to %d\n",
2121 offsets_update[i], offsets[j] + new_offset);
2122 offsets_update[i] = offsets[j] + new_offset;
2126 startoff = state->buf_user_offset - startoff;
2128 BUG_ON(*total < startoff);
2134 * repl->entries_size is the size of the ebt_entry blob in userspace.
2135 * It might need more memory when copied to a 64 bit kernel in case
2136 * userspace is 32-bit. So, first task: find out how much memory is needed.
2138 * Called before validation is performed.
2140 static int compat_copy_entries(unsigned char *data, unsigned int size_user,
2141 struct ebt_entries_buf_state *state)
2143 unsigned int size_remaining = size_user;
2146 ret = EBT_ENTRY_ITERATE(data, size_user, size_entry_mwt, data,
2147 &size_remaining, state);
2151 WARN_ON(size_remaining);
2152 return state->buf_kern_offset;
2156 static int compat_copy_ebt_replace_from_user(struct ebt_replace *repl,
2157 void __user *user, unsigned int len)
2159 struct compat_ebt_replace tmp;
2162 if (len < sizeof(tmp))
2165 if (copy_from_user(&tmp, user, sizeof(tmp)))
2168 if (len != sizeof(tmp) + tmp.entries_size)
2171 if (tmp.entries_size == 0)
2174 if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) /
2175 NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
2177 if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
2180 memcpy(repl, &tmp, offsetof(struct ebt_replace, hook_entry));
2182 /* starting with hook_entry, 32 vs. 64 bit structures are different */
2183 for (i = 0; i < NF_BR_NUMHOOKS; i++)
2184 repl->hook_entry[i] = compat_ptr(tmp.hook_entry[i]);
2186 repl->num_counters = tmp.num_counters;
2187 repl->counters = compat_ptr(tmp.counters);
2188 repl->entries = compat_ptr(tmp.entries);
2192 static int compat_do_replace(struct net *net, void __user *user,
2195 int ret, i, countersize, size64;
2196 struct ebt_table_info *newinfo;
2197 struct ebt_replace tmp;
2198 struct ebt_entries_buf_state state;
2201 ret = compat_copy_ebt_replace_from_user(&tmp, user, len);
2203 /* try real handler in case userland supplied needed padding */
2204 if (ret == -EINVAL && do_replace(net, user, len) == 0)
2209 countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
2210 newinfo = vmalloc(sizeof(*newinfo) + countersize);
2215 memset(newinfo->counters, 0, countersize);
2217 memset(&state, 0, sizeof(state));
2219 newinfo->entries = vmalloc(tmp.entries_size);
2220 if (!newinfo->entries) {
2225 newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
2230 entries_tmp = newinfo->entries;
2232 xt_compat_lock(NFPROTO_BRIDGE);
2234 ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2238 pr_debug("tmp.entries_size %d, kern off %d, user off %d delta %d\n",
2239 tmp.entries_size, state.buf_kern_offset, state.buf_user_offset,
2240 xt_compat_calc_jump(NFPROTO_BRIDGE, tmp.entries_size));
2243 newinfo->entries = vmalloc(size64);
2244 if (!newinfo->entries) {
2250 memset(&state, 0, sizeof(state));
2251 state.buf_kern_start = newinfo->entries;
2252 state.buf_kern_len = size64;
2254 ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2255 BUG_ON(ret < 0); /* parses same data again */
2258 tmp.entries_size = size64;
2260 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
2261 char __user *usrptr;
2262 if (tmp.hook_entry[i]) {
2264 usrptr = (char __user *) tmp.hook_entry[i];
2265 delta = usrptr - tmp.entries;
2266 usrptr += xt_compat_calc_jump(NFPROTO_BRIDGE, delta);
2267 tmp.hook_entry[i] = (struct ebt_entries __user *)usrptr;
2271 xt_compat_flush_offsets(NFPROTO_BRIDGE);
2272 xt_compat_unlock(NFPROTO_BRIDGE);
2274 ret = do_replace_finish(net, &tmp, newinfo);
2278 vfree(newinfo->entries);
2283 xt_compat_flush_offsets(NFPROTO_BRIDGE);
2284 xt_compat_unlock(NFPROTO_BRIDGE);
2288 static int compat_update_counters(struct net *net, void __user *user,
2291 struct compat_ebt_replace hlp;
2293 if (copy_from_user(&hlp, user, sizeof(hlp)))
2296 /* try real handler in case userland supplied needed padding */
2297 if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
2298 return update_counters(net, user, len);
2300 return do_update_counters(net, hlp.name, compat_ptr(hlp.counters),
2301 hlp.num_counters, user, len);
2304 static int compat_do_ebt_set_ctl(struct sock *sk,
2305 int cmd, void __user *user, unsigned int len)
2309 if (!capable(CAP_NET_ADMIN))
2313 case EBT_SO_SET_ENTRIES:
2314 ret = compat_do_replace(sock_net(sk), user, len);
2316 case EBT_SO_SET_COUNTERS:
2317 ret = compat_update_counters(sock_net(sk), user, len);
2325 static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
2326 void __user *user, int *len)
2329 struct compat_ebt_replace tmp;
2330 struct ebt_table *t;
2332 if (!capable(CAP_NET_ADMIN))
2335 /* try real handler in case userland supplied needed padding */
2336 if ((cmd == EBT_SO_GET_INFO ||
2337 cmd == EBT_SO_GET_INIT_INFO) && *len != sizeof(tmp))
2338 return do_ebt_get_ctl(sk, cmd, user, len);
2340 if (copy_from_user(&tmp, user, sizeof(tmp)))
2343 t = find_table_lock(sock_net(sk), tmp.name, &ret, &ebt_mutex);
2347 xt_compat_lock(NFPROTO_BRIDGE);
2349 case EBT_SO_GET_INFO:
2350 tmp.nentries = t->private->nentries;
2351 ret = compat_table_info(t->private, &tmp);
2354 tmp.valid_hooks = t->valid_hooks;
2356 if (copy_to_user(user, &tmp, *len) != 0) {
2362 case EBT_SO_GET_INIT_INFO:
2363 tmp.nentries = t->table->nentries;
2364 tmp.entries_size = t->table->entries_size;
2365 tmp.valid_hooks = t->table->valid_hooks;
2367 if (copy_to_user(user, &tmp, *len) != 0) {
2373 case EBT_SO_GET_ENTRIES:
2374 case EBT_SO_GET_INIT_ENTRIES:
2376 * try real handler first in case of userland-side padding.
2377 * in case we are dealing with an 'ordinary' 32 bit binary
2378 * without 64bit compatibility padding, this will fail right
2379 * after copy_from_user when the *len argument is validated.
2381 * the compat_ variant needs to do one pass over the kernel
2382 * data set to adjust for size differences before it the check.
2384 if (copy_everything_to_user(t, user, len, cmd) == 0)
2387 ret = compat_copy_everything_to_user(t, user, len, cmd);
2393 xt_compat_flush_offsets(NFPROTO_BRIDGE);
2394 xt_compat_unlock(NFPROTO_BRIDGE);
2395 mutex_unlock(&ebt_mutex);
2400 static struct nf_sockopt_ops ebt_sockopts =
2403 .set_optmin = EBT_BASE_CTL,
2404 .set_optmax = EBT_SO_SET_MAX + 1,
2405 .set = do_ebt_set_ctl,
2406 #ifdef CONFIG_COMPAT
2407 .compat_set = compat_do_ebt_set_ctl,
2409 .get_optmin = EBT_BASE_CTL,
2410 .get_optmax = EBT_SO_GET_MAX + 1,
2411 .get = do_ebt_get_ctl,
2412 #ifdef CONFIG_COMPAT
2413 .compat_get = compat_do_ebt_get_ctl,
2415 .owner = THIS_MODULE,
2418 static int __init ebtables_init(void)
2422 ret = xt_register_target(&ebt_standard_target);
2425 ret = nf_register_sockopt(&ebt_sockopts);
2427 xt_unregister_target(&ebt_standard_target);
2431 printk(KERN_INFO "Ebtables v2.0 registered\n");
2435 static void __exit ebtables_fini(void)
2437 nf_unregister_sockopt(&ebt_sockopts);
2438 xt_unregister_target(&ebt_standard_target);
2439 printk(KERN_INFO "Ebtables v2.0 unregistered\n");
2442 EXPORT_SYMBOL(ebt_register_table);
2443 EXPORT_SYMBOL(ebt_unregister_table);
2444 EXPORT_SYMBOL(ebt_do_table);
2445 module_init(ebtables_init);
2446 module_exit(ebtables_fini);
2447 MODULE_LICENSE("GPL");