5 * Bart De Schuymer <bdschuym@pandora.be>
7 * ebtables.c,v 2.0, July, 2002
9 * This code is stongly inspired on the iptables code which is
10 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 #include <linux/kmod.h>
19 #include <linux/module.h>
20 #include <linux/vmalloc.h>
21 #include <linux/netfilter/x_tables.h>
22 #include <linux/netfilter_bridge/ebtables.h>
23 #include <linux/spinlock.h>
24 #include <linux/mutex.h>
25 #include <linux/slab.h>
26 #include <asm/uaccess.h>
27 #include <linux/smp.h>
28 #include <linux/cpumask.h>
30 /* needed for logical [in,out]-dev filtering */
31 #include "../br_private.h"
33 #define BUGPRINT(format, args...) printk("kernel msg: ebtables bug: please "\
34 "report to author: "format, ## args)
35 /* #define BUGPRINT(format, args...) */
38 * Each cpu has its own set of counters, so there is no need for write_lock in
40 * For reading or updating the counters, the user context needs to
44 /* The size of each set of counters is altered to get cache alignment */
45 #define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
46 #define COUNTER_OFFSET(n) (SMP_ALIGN(n * sizeof(struct ebt_counter)))
47 #define COUNTER_BASE(c, n, cpu) ((struct ebt_counter *)(((char *)c) + \
48 COUNTER_OFFSET(n) * cpu))
52 static DEFINE_MUTEX(ebt_mutex);
55 static void ebt_standard_compat_from_user(void *dst, const void *src)
57 int v = *(compat_int_t *)src;
60 v += xt_compat_calc_jump(NFPROTO_BRIDGE, v);
61 memcpy(dst, &v, sizeof(v));
64 static int ebt_standard_compat_to_user(void __user *dst, const void *src)
66 compat_int_t cv = *(int *)src;
69 cv -= xt_compat_calc_jump(NFPROTO_BRIDGE, cv);
70 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
75 static struct xt_target ebt_standard_target = {
78 .family = NFPROTO_BRIDGE,
79 .targetsize = sizeof(int),
81 .compatsize = sizeof(compat_int_t),
82 .compat_from_user = ebt_standard_compat_from_user,
83 .compat_to_user = ebt_standard_compat_to_user,
88 ebt_do_watcher(const struct ebt_entry_watcher *w, struct sk_buff *skb,
89 struct xt_action_param *par)
91 par->target = w->u.watcher;
92 par->targinfo = w->data;
93 w->u.watcher->target(skb, par);
94 /* watchers don't give a verdict */
99 ebt_do_match(struct ebt_entry_match *m, const struct sk_buff *skb,
100 struct xt_action_param *par)
102 par->match = m->u.match;
103 par->matchinfo = m->data;
104 return m->u.match->match(skb, par) ? EBT_MATCH : EBT_NOMATCH;
108 ebt_dev_check(const char *entry, const struct net_device *device)
117 devname = device->name;
118 /* 1 is the wildcard token */
119 while (entry[i] != '\0' && entry[i] != 1 && entry[i] == devname[i])
121 return (devname[i] != entry[i] && entry[i] != 1);
124 #define FWINV2(bool,invflg) ((bool) ^ !!(e->invflags & invflg))
125 /* process standard matches */
127 ebt_basic_match(const struct ebt_entry *e, const struct ethhdr *h,
128 const struct net_device *in, const struct net_device *out)
132 if (e->bitmask & EBT_802_3) {
133 if (FWINV2(ntohs(h->h_proto) >= 1536, EBT_IPROTO))
135 } else if (!(e->bitmask & EBT_NOPROTO) &&
136 FWINV2(e->ethproto != h->h_proto, EBT_IPROTO))
139 if (FWINV2(ebt_dev_check(e->in, in), EBT_IIN))
141 if (FWINV2(ebt_dev_check(e->out, out), EBT_IOUT))
143 if ((!in || !in->br_port) ? 0 : FWINV2(ebt_dev_check(
144 e->logical_in, in->br_port->br->dev), EBT_ILOGICALIN))
146 if ((!out || !out->br_port) ? 0 : FWINV2(ebt_dev_check(
147 e->logical_out, out->br_port->br->dev), EBT_ILOGICALOUT))
150 if (e->bitmask & EBT_SOURCEMAC) {
152 for (i = 0; i < 6; i++)
153 verdict |= (h->h_source[i] ^ e->sourcemac[i]) &
155 if (FWINV2(verdict != 0, EBT_ISOURCE) )
158 if (e->bitmask & EBT_DESTMAC) {
160 for (i = 0; i < 6; i++)
161 verdict |= (h->h_dest[i] ^ e->destmac[i]) &
163 if (FWINV2(verdict != 0, EBT_IDEST) )
170 struct ebt_entry *ebt_next_entry(const struct ebt_entry *entry)
172 return (void *)entry + entry->next_offset;
175 /* Do some firewalling */
176 unsigned int ebt_do_table (unsigned int hook, struct sk_buff *skb,
177 const struct net_device *in, const struct net_device *out,
178 struct ebt_table *table)
181 struct ebt_entry *point;
182 struct ebt_counter *counter_base, *cb_base;
183 const struct ebt_entry_target *t;
185 struct ebt_chainstack *cs;
186 struct ebt_entries *chaininfo;
188 const struct ebt_table_info *private;
189 bool hotdrop = false;
190 struct xt_action_param acpar;
192 acpar.family = NFPROTO_BRIDGE;
195 acpar.hotdrop = &hotdrop;
196 acpar.hooknum = hook;
198 read_lock_bh(&table->lock);
199 private = table->private;
200 cb_base = COUNTER_BASE(private->counters, private->nentries,
202 if (private->chainstack)
203 cs = private->chainstack[smp_processor_id()];
206 chaininfo = private->hook_entry[hook];
207 nentries = private->hook_entry[hook]->nentries;
208 point = (struct ebt_entry *)(private->hook_entry[hook]->data);
209 counter_base = cb_base + private->hook_entry[hook]->counter_offset;
210 /* base for chain jumps */
211 base = private->entries;
213 while (i < nentries) {
214 if (ebt_basic_match(point, eth_hdr(skb), in, out))
217 if (EBT_MATCH_ITERATE(point, ebt_do_match, skb, &acpar) != 0)
220 read_unlock_bh(&table->lock);
224 /* increase counter */
225 (*(counter_base + i)).pcnt++;
226 (*(counter_base + i)).bcnt += skb->len;
228 /* these should only watch: not modify, nor tell us
229 what to do with the packet */
230 EBT_WATCHER_ITERATE(point, ebt_do_watcher, skb, &acpar);
232 t = (struct ebt_entry_target *)
233 (((char *)point) + point->target_offset);
234 /* standard target */
235 if (!t->u.target->target)
236 verdict = ((struct ebt_standard_target *)t)->verdict;
238 acpar.target = t->u.target;
239 acpar.targinfo = t->data;
240 verdict = t->u.target->target(skb, &acpar);
242 if (verdict == EBT_ACCEPT) {
243 read_unlock_bh(&table->lock);
246 if (verdict == EBT_DROP) {
247 read_unlock_bh(&table->lock);
250 if (verdict == EBT_RETURN) {
252 #ifdef CONFIG_NETFILTER_DEBUG
254 BUGPRINT("RETURN on base chain");
255 /* act like this is EBT_CONTINUE */
260 /* put all the local variables right */
262 chaininfo = cs[sp].chaininfo;
263 nentries = chaininfo->nentries;
265 counter_base = cb_base +
266 chaininfo->counter_offset;
269 if (verdict == EBT_CONTINUE)
271 #ifdef CONFIG_NETFILTER_DEBUG
273 BUGPRINT("bogus standard verdict\n");
274 read_unlock_bh(&table->lock);
280 cs[sp].chaininfo = chaininfo;
281 cs[sp].e = ebt_next_entry(point);
283 chaininfo = (struct ebt_entries *) (base + verdict);
284 #ifdef CONFIG_NETFILTER_DEBUG
285 if (chaininfo->distinguisher) {
286 BUGPRINT("jump to non-chain\n");
287 read_unlock_bh(&table->lock);
291 nentries = chaininfo->nentries;
292 point = (struct ebt_entry *)chaininfo->data;
293 counter_base = cb_base + chaininfo->counter_offset;
297 point = ebt_next_entry(point);
301 /* I actually like this :) */
302 if (chaininfo->policy == EBT_RETURN)
304 if (chaininfo->policy == EBT_ACCEPT) {
305 read_unlock_bh(&table->lock);
308 read_unlock_bh(&table->lock);
312 /* If it succeeds, returns element and locks mutex */
314 find_inlist_lock_noload(struct list_head *head, const char *name, int *error,
318 struct list_head list;
319 char name[EBT_FUNCTION_MAXNAMELEN];
322 *error = mutex_lock_interruptible(mutex);
326 list_for_each_entry(e, head, list) {
327 if (strcmp(e->name, name) == 0)
336 find_inlist_lock(struct list_head *head, const char *name, const char *prefix,
337 int *error, struct mutex *mutex)
339 return try_then_request_module(
340 find_inlist_lock_noload(head, name, error, mutex),
341 "%s%s", prefix, name);
344 static inline struct ebt_table *
345 find_table_lock(struct net *net, const char *name, int *error,
348 return find_inlist_lock(&net->xt.tables[NFPROTO_BRIDGE], name,
349 "ebtable_", error, mutex);
353 ebt_check_match(struct ebt_entry_match *m, struct xt_mtchk_param *par,
356 const struct ebt_entry *e = par->entryinfo;
357 struct xt_match *match;
358 size_t left = ((char *)e + e->watchers_offset) - (char *)m;
361 if (left < sizeof(struct ebt_entry_match) ||
362 left - sizeof(struct ebt_entry_match) < m->match_size)
365 match = xt_request_find_match(NFPROTO_BRIDGE, m->u.name, 0);
367 return PTR_ERR(match);
371 par->matchinfo = m->data;
372 ret = xt_check_match(par, m->match_size,
373 e->ethproto, e->invflags & EBT_IPROTO);
375 module_put(match->me);
384 ebt_check_watcher(struct ebt_entry_watcher *w, struct xt_tgchk_param *par,
387 const struct ebt_entry *e = par->entryinfo;
388 struct xt_target *watcher;
389 size_t left = ((char *)e + e->target_offset) - (char *)w;
392 if (left < sizeof(struct ebt_entry_watcher) ||
393 left - sizeof(struct ebt_entry_watcher) < w->watcher_size)
396 watcher = xt_request_find_target(NFPROTO_BRIDGE, w->u.name, 0);
398 return PTR_ERR(watcher);
399 w->u.watcher = watcher;
401 par->target = watcher;
402 par->targinfo = w->data;
403 ret = xt_check_target(par, w->watcher_size,
404 e->ethproto, e->invflags & EBT_IPROTO);
406 module_put(watcher->me);
414 static int ebt_verify_pointers(const struct ebt_replace *repl,
415 struct ebt_table_info *newinfo)
417 unsigned int limit = repl->entries_size;
418 unsigned int valid_hooks = repl->valid_hooks;
419 unsigned int offset = 0;
422 for (i = 0; i < NF_BR_NUMHOOKS; i++)
423 newinfo->hook_entry[i] = NULL;
425 newinfo->entries_size = repl->entries_size;
426 newinfo->nentries = repl->nentries;
428 while (offset < limit) {
429 size_t left = limit - offset;
430 struct ebt_entry *e = (void *)newinfo->entries + offset;
432 if (left < sizeof(unsigned int))
435 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
436 if ((valid_hooks & (1 << i)) == 0)
438 if ((char __user *)repl->hook_entry[i] ==
439 repl->entries + offset)
443 if (i != NF_BR_NUMHOOKS || !(e->bitmask & EBT_ENTRY_OR_ENTRIES)) {
444 if (e->bitmask != 0) {
445 /* we make userspace set this right,
446 so there is no misunderstanding */
447 BUGPRINT("EBT_ENTRY_OR_ENTRIES shouldn't be set "
448 "in distinguisher\n");
451 if (i != NF_BR_NUMHOOKS)
452 newinfo->hook_entry[i] = (struct ebt_entries *)e;
453 if (left < sizeof(struct ebt_entries))
455 offset += sizeof(struct ebt_entries);
457 if (left < sizeof(struct ebt_entry))
459 if (left < e->next_offset)
461 if (e->next_offset < sizeof(struct ebt_entry))
463 offset += e->next_offset;
466 if (offset != limit) {
467 BUGPRINT("entries_size too small\n");
471 /* check if all valid hooks have a chain */
472 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
473 if (!newinfo->hook_entry[i] &&
474 (valid_hooks & (1 << i))) {
475 BUGPRINT("Valid hook without chain\n");
483 * this one is very careful, as it is the first function
484 * to parse the userspace data
487 ebt_check_entry_size_and_hooks(const struct ebt_entry *e,
488 const struct ebt_table_info *newinfo,
489 unsigned int *n, unsigned int *cnt,
490 unsigned int *totalcnt, unsigned int *udc_cnt)
494 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
495 if ((void *)e == (void *)newinfo->hook_entry[i])
498 /* beginning of a new chain
499 if i == NF_BR_NUMHOOKS it must be a user defined chain */
500 if (i != NF_BR_NUMHOOKS || !e->bitmask) {
501 /* this checks if the previous chain has as many entries
504 BUGPRINT("nentries does not equal the nr of entries "
508 if (((struct ebt_entries *)e)->policy != EBT_DROP &&
509 ((struct ebt_entries *)e)->policy != EBT_ACCEPT) {
510 /* only RETURN from udc */
511 if (i != NF_BR_NUMHOOKS ||
512 ((struct ebt_entries *)e)->policy != EBT_RETURN) {
513 BUGPRINT("bad policy\n");
517 if (i == NF_BR_NUMHOOKS) /* it's a user defined chain */
519 if (((struct ebt_entries *)e)->counter_offset != *totalcnt) {
520 BUGPRINT("counter_offset != totalcnt");
523 *n = ((struct ebt_entries *)e)->nentries;
527 /* a plain old entry, heh */
528 if (sizeof(struct ebt_entry) > e->watchers_offset ||
529 e->watchers_offset > e->target_offset ||
530 e->target_offset >= e->next_offset) {
531 BUGPRINT("entry offsets not in right order\n");
534 /* this is not checked anywhere else */
535 if (e->next_offset - e->target_offset < sizeof(struct ebt_entry_target)) {
536 BUGPRINT("target size too small\n");
546 struct ebt_chainstack cs;
548 unsigned int hookmask;
552 * we need these positions to check that the jumps to a different part of the
553 * entries is a jump to the beginning of a new chain.
556 ebt_get_udc_positions(struct ebt_entry *e, struct ebt_table_info *newinfo,
557 unsigned int *n, struct ebt_cl_stack *udc)
561 /* we're only interested in chain starts */
564 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
565 if (newinfo->hook_entry[i] == (struct ebt_entries *)e)
568 /* only care about udc */
569 if (i != NF_BR_NUMHOOKS)
572 udc[*n].cs.chaininfo = (struct ebt_entries *)e;
573 /* these initialisations are depended on later in check_chainloops() */
575 udc[*n].hookmask = 0;
582 ebt_cleanup_match(struct ebt_entry_match *m, struct net *net, unsigned int *i)
584 struct xt_mtdtor_param par;
586 if (i && (*i)-- == 0)
590 par.match = m->u.match;
591 par.matchinfo = m->data;
592 par.family = NFPROTO_BRIDGE;
593 if (par.match->destroy != NULL)
594 par.match->destroy(&par);
595 module_put(par.match->me);
600 ebt_cleanup_watcher(struct ebt_entry_watcher *w, struct net *net, unsigned int *i)
602 struct xt_tgdtor_param par;
604 if (i && (*i)-- == 0)
608 par.target = w->u.watcher;
609 par.targinfo = w->data;
610 par.family = NFPROTO_BRIDGE;
611 if (par.target->destroy != NULL)
612 par.target->destroy(&par);
613 module_put(par.target->me);
618 ebt_cleanup_entry(struct ebt_entry *e, struct net *net, unsigned int *cnt)
620 struct xt_tgdtor_param par;
621 struct ebt_entry_target *t;
626 if (cnt && (*cnt)-- == 0)
628 EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, NULL);
629 EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, NULL);
630 t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
633 par.target = t->u.target;
634 par.targinfo = t->data;
635 par.family = NFPROTO_BRIDGE;
636 if (par.target->destroy != NULL)
637 par.target->destroy(&par);
638 module_put(par.target->me);
643 ebt_check_entry(struct ebt_entry *e, struct net *net,
644 const struct ebt_table_info *newinfo,
645 const char *name, unsigned int *cnt,
646 struct ebt_cl_stack *cl_s, unsigned int udc_cnt)
648 struct ebt_entry_target *t;
649 struct xt_target *target;
650 unsigned int i, j, hook = 0, hookmask = 0;
653 struct xt_mtchk_param mtpar;
654 struct xt_tgchk_param tgpar;
656 /* don't mess with the struct ebt_entries */
660 if (e->bitmask & ~EBT_F_MASK) {
661 BUGPRINT("Unknown flag for bitmask\n");
664 if (e->invflags & ~EBT_INV_MASK) {
665 BUGPRINT("Unknown flag for inv bitmask\n");
668 if ( (e->bitmask & EBT_NOPROTO) && (e->bitmask & EBT_802_3) ) {
669 BUGPRINT("NOPROTO & 802_3 not allowed\n");
672 /* what hook do we belong to? */
673 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
674 if (!newinfo->hook_entry[i])
676 if ((char *)newinfo->hook_entry[i] < (char *)e)
681 /* (1 << NF_BR_NUMHOOKS) tells the check functions the rule is on
683 if (i < NF_BR_NUMHOOKS)
684 hookmask = (1 << hook) | (1 << NF_BR_NUMHOOKS);
686 for (i = 0; i < udc_cnt; i++)
687 if ((char *)(cl_s[i].cs.chaininfo) > (char *)e)
690 hookmask = (1 << hook) | (1 << NF_BR_NUMHOOKS);
692 hookmask = cl_s[i - 1].hookmask;
696 mtpar.net = tgpar.net = net;
697 mtpar.table = tgpar.table = name;
698 mtpar.entryinfo = tgpar.entryinfo = e;
699 mtpar.hook_mask = tgpar.hook_mask = hookmask;
700 mtpar.family = tgpar.family = NFPROTO_BRIDGE;
701 ret = EBT_MATCH_ITERATE(e, ebt_check_match, &mtpar, &i);
703 goto cleanup_matches;
705 ret = EBT_WATCHER_ITERATE(e, ebt_check_watcher, &tgpar, &j);
707 goto cleanup_watchers;
708 t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
709 gap = e->next_offset - e->target_offset;
711 target = xt_request_find_target(NFPROTO_BRIDGE, t->u.name, 0);
712 if (IS_ERR(target)) {
713 ret = PTR_ERR(target);
714 goto cleanup_watchers;
717 t->u.target = target;
718 if (t->u.target == &ebt_standard_target) {
719 if (gap < sizeof(struct ebt_standard_target)) {
720 BUGPRINT("Standard target size too big\n");
722 goto cleanup_watchers;
724 if (((struct ebt_standard_target *)t)->verdict <
725 -NUM_STANDARD_TARGETS) {
726 BUGPRINT("Invalid standard target\n");
728 goto cleanup_watchers;
730 } else if (t->target_size > gap - sizeof(struct ebt_entry_target)) {
731 module_put(t->u.target->me);
733 goto cleanup_watchers;
736 tgpar.target = target;
737 tgpar.targinfo = t->data;
738 ret = xt_check_target(&tgpar, t->target_size,
739 e->ethproto, e->invflags & EBT_IPROTO);
741 module_put(target->me);
742 goto cleanup_watchers;
747 EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, &j);
749 EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, &i);
754 * checks for loops and sets the hook mask for udc
755 * the hook mask for udc tells us from which base chains the udc can be
756 * accessed. This mask is a parameter to the check() functions of the extensions
758 static int check_chainloops(const struct ebt_entries *chain, struct ebt_cl_stack *cl_s,
759 unsigned int udc_cnt, unsigned int hooknr, char *base)
761 int i, chain_nr = -1, pos = 0, nentries = chain->nentries, verdict;
762 const struct ebt_entry *e = (struct ebt_entry *)chain->data;
763 const struct ebt_entry_target *t;
765 while (pos < nentries || chain_nr != -1) {
766 /* end of udc, go back one 'recursion' step */
767 if (pos == nentries) {
768 /* put back values of the time when this chain was called */
769 e = cl_s[chain_nr].cs.e;
770 if (cl_s[chain_nr].from != -1)
772 cl_s[cl_s[chain_nr].from].cs.chaininfo->nentries;
774 nentries = chain->nentries;
775 pos = cl_s[chain_nr].cs.n;
776 /* make sure we won't see a loop that isn't one */
777 cl_s[chain_nr].cs.n = 0;
778 chain_nr = cl_s[chain_nr].from;
782 t = (struct ebt_entry_target *)
783 (((char *)e) + e->target_offset);
784 if (strcmp(t->u.name, EBT_STANDARD_TARGET))
786 if (e->target_offset + sizeof(struct ebt_standard_target) >
788 BUGPRINT("Standard target size too big\n");
791 verdict = ((struct ebt_standard_target *)t)->verdict;
792 if (verdict >= 0) { /* jump to another chain */
793 struct ebt_entries *hlp2 =
794 (struct ebt_entries *)(base + verdict);
795 for (i = 0; i < udc_cnt; i++)
796 if (hlp2 == cl_s[i].cs.chaininfo)
798 /* bad destination or loop */
800 BUGPRINT("bad destination\n");
807 if (cl_s[i].hookmask & (1 << hooknr))
809 /* this can't be 0, so the loop test is correct */
810 cl_s[i].cs.n = pos + 1;
812 cl_s[i].cs.e = ebt_next_entry(e);
813 e = (struct ebt_entry *)(hlp2->data);
814 nentries = hlp2->nentries;
815 cl_s[i].from = chain_nr;
817 /* this udc is accessible from the base chain for hooknr */
818 cl_s[i].hookmask |= (1 << hooknr);
822 e = ebt_next_entry(e);
828 /* do the parsing of the table/chains/entries/matches/watchers/targets, heh */
829 static int translate_table(struct net *net, const char *name,
830 struct ebt_table_info *newinfo)
832 unsigned int i, j, k, udc_cnt;
834 struct ebt_cl_stack *cl_s = NULL; /* used in the checking for chain loops */
837 while (i < NF_BR_NUMHOOKS && !newinfo->hook_entry[i])
839 if (i == NF_BR_NUMHOOKS) {
840 BUGPRINT("No valid hooks specified\n");
843 if (newinfo->hook_entry[i] != (struct ebt_entries *)newinfo->entries) {
844 BUGPRINT("Chains don't start at beginning\n");
847 /* make sure chains are ordered after each other in same order
848 as their corresponding hooks */
849 for (j = i + 1; j < NF_BR_NUMHOOKS; j++) {
850 if (!newinfo->hook_entry[j])
852 if (newinfo->hook_entry[j] <= newinfo->hook_entry[i]) {
853 BUGPRINT("Hook order must be followed\n");
859 /* do some early checkings and initialize some things */
860 i = 0; /* holds the expected nr. of entries for the chain */
861 j = 0; /* holds the up to now counted entries for the chain */
862 k = 0; /* holds the total nr. of entries, should equal
863 newinfo->nentries afterwards */
864 udc_cnt = 0; /* will hold the nr. of user defined chains (udc) */
865 ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
866 ebt_check_entry_size_and_hooks, newinfo,
867 &i, &j, &k, &udc_cnt);
873 BUGPRINT("nentries does not equal the nr of entries in the "
877 if (k != newinfo->nentries) {
878 BUGPRINT("Total nentries is wrong\n");
882 /* get the location of the udc, put them in an array
883 while we're at it, allocate the chainstack */
885 /* this will get free'd in do_replace()/ebt_register_table()
886 if an error occurs */
887 newinfo->chainstack =
888 vmalloc(nr_cpu_ids * sizeof(*(newinfo->chainstack)));
889 if (!newinfo->chainstack)
891 for_each_possible_cpu(i) {
892 newinfo->chainstack[i] =
893 vmalloc(udc_cnt * sizeof(*(newinfo->chainstack[0])));
894 if (!newinfo->chainstack[i]) {
896 vfree(newinfo->chainstack[--i]);
897 vfree(newinfo->chainstack);
898 newinfo->chainstack = NULL;
903 cl_s = vmalloc(udc_cnt * sizeof(*cl_s));
906 i = 0; /* the i'th udc */
907 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
908 ebt_get_udc_positions, newinfo, &i, cl_s);
911 BUGPRINT("i != udc_cnt\n");
917 /* Check for loops */
918 for (i = 0; i < NF_BR_NUMHOOKS; i++)
919 if (newinfo->hook_entry[i])
920 if (check_chainloops(newinfo->hook_entry[i],
921 cl_s, udc_cnt, i, newinfo->entries)) {
926 /* we now know the following (along with E=mc²):
927 - the nr of entries in each chain is right
928 - the size of the allocated space is right
929 - all valid hooks have a corresponding chain
931 - wrong data can still be on the level of a single entry
932 - could be there are jumps to places that are not the
933 beginning of a chain. This can only occur in chains that
934 are not accessible from any base chains, so we don't care. */
936 /* used to know what we need to clean up if something goes wrong */
938 ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
939 ebt_check_entry, net, newinfo, name, &i, cl_s, udc_cnt);
941 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
942 ebt_cleanup_entry, net, &i);
948 /* called under write_lock */
949 static void get_counters(const struct ebt_counter *oldcounters,
950 struct ebt_counter *counters, unsigned int nentries)
953 struct ebt_counter *counter_base;
955 /* counters of cpu 0 */
956 memcpy(counters, oldcounters,
957 sizeof(struct ebt_counter) * nentries);
959 /* add other counters to those of cpu 0 */
960 for_each_possible_cpu(cpu) {
963 counter_base = COUNTER_BASE(oldcounters, nentries, cpu);
964 for (i = 0; i < nentries; i++) {
965 counters[i].pcnt += counter_base[i].pcnt;
966 counters[i].bcnt += counter_base[i].bcnt;
971 static int do_replace_finish(struct net *net, struct ebt_replace *repl,
972 struct ebt_table_info *newinfo)
975 struct ebt_counter *counterstmp = NULL;
976 /* used to be able to unlock earlier */
977 struct ebt_table_info *table;
980 /* the user wants counters back
981 the check on the size is done later, when we have the lock */
982 if (repl->num_counters) {
983 unsigned long size = repl->num_counters * sizeof(*counterstmp);
984 counterstmp = vmalloc(size);
989 newinfo->chainstack = NULL;
990 ret = ebt_verify_pointers(repl, newinfo);
992 goto free_counterstmp;
994 ret = translate_table(net, repl->name, newinfo);
997 goto free_counterstmp;
999 t = find_table_lock(net, repl->name, &ret, &ebt_mutex);
1005 /* the table doesn't like it */
1006 if (t->check && (ret = t->check(newinfo, repl->valid_hooks)))
1009 if (repl->num_counters && repl->num_counters != t->private->nentries) {
1010 BUGPRINT("Wrong nr. of counters requested\n");
1015 /* we have the mutex lock, so no danger in reading this pointer */
1017 /* make sure the table can only be rmmod'ed if it contains no rules */
1018 if (!table->nentries && newinfo->nentries && !try_module_get(t->me)) {
1021 } else if (table->nentries && !newinfo->nentries)
1023 /* we need an atomic snapshot of the counters */
1024 write_lock_bh(&t->lock);
1025 if (repl->num_counters)
1026 get_counters(t->private->counters, counterstmp,
1027 t->private->nentries);
1029 t->private = newinfo;
1030 write_unlock_bh(&t->lock);
1031 mutex_unlock(&ebt_mutex);
1032 /* so, a user can change the chains while having messed up her counter
1033 allocation. Only reason why this is done is because this way the lock
1034 is held only once, while this doesn't bring the kernel into a
1036 if (repl->num_counters &&
1037 copy_to_user(repl->counters, counterstmp,
1038 repl->num_counters * sizeof(struct ebt_counter))) {
1044 /* decrease module count and free resources */
1045 EBT_ENTRY_ITERATE(table->entries, table->entries_size,
1046 ebt_cleanup_entry, net, NULL);
1048 vfree(table->entries);
1049 if (table->chainstack) {
1050 for_each_possible_cpu(i)
1051 vfree(table->chainstack[i]);
1052 vfree(table->chainstack);
1060 mutex_unlock(&ebt_mutex);
1062 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
1063 ebt_cleanup_entry, net, NULL);
1066 /* can be initialized in translate_table() */
1067 if (newinfo->chainstack) {
1068 for_each_possible_cpu(i)
1069 vfree(newinfo->chainstack[i]);
1070 vfree(newinfo->chainstack);
1075 /* replace the table */
1076 static int do_replace(struct net *net, const void __user *user,
1079 int ret, countersize;
1080 struct ebt_table_info *newinfo;
1081 struct ebt_replace tmp;
1083 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1086 if (len != sizeof(tmp) + tmp.entries_size) {
1087 BUGPRINT("Wrong len argument\n");
1091 if (tmp.entries_size == 0) {
1092 BUGPRINT("Entries_size never zero\n");
1095 /* overflow check */
1096 if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) /
1097 NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
1099 if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
1102 countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
1103 newinfo = vmalloc(sizeof(*newinfo) + countersize);
1108 memset(newinfo->counters, 0, countersize);
1110 newinfo->entries = vmalloc(tmp.entries_size);
1111 if (!newinfo->entries) {
1116 newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
1117 BUGPRINT("Couldn't copy entries from userspace\n");
1122 ret = do_replace_finish(net, &tmp, newinfo);
1126 vfree(newinfo->entries);
1133 ebt_register_table(struct net *net, const struct ebt_table *input_table)
1135 struct ebt_table_info *newinfo;
1136 struct ebt_table *t, *table;
1137 struct ebt_replace_kernel *repl;
1138 int ret, i, countersize;
1141 if (input_table == NULL || (repl = input_table->table) == NULL ||
1142 repl->entries == 0 || repl->entries_size == 0 ||
1143 repl->counters != NULL || input_table->private != NULL) {
1144 BUGPRINT("Bad table data for ebt_register_table!!!\n");
1145 return ERR_PTR(-EINVAL);
1148 /* Don't add one table to multiple lists. */
1149 table = kmemdup(input_table, sizeof(struct ebt_table), GFP_KERNEL);
1155 countersize = COUNTER_OFFSET(repl->nentries) * nr_cpu_ids;
1156 newinfo = vmalloc(sizeof(*newinfo) + countersize);
1161 p = vmalloc(repl->entries_size);
1165 memcpy(p, repl->entries, repl->entries_size);
1166 newinfo->entries = p;
1168 newinfo->entries_size = repl->entries_size;
1169 newinfo->nentries = repl->nentries;
1172 memset(newinfo->counters, 0, countersize);
1174 /* fill in newinfo and parse the entries */
1175 newinfo->chainstack = NULL;
1176 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
1177 if ((repl->valid_hooks & (1 << i)) == 0)
1178 newinfo->hook_entry[i] = NULL;
1180 newinfo->hook_entry[i] = p +
1181 ((char *)repl->hook_entry[i] - repl->entries);
1183 ret = translate_table(net, repl->name, newinfo);
1185 BUGPRINT("Translate_table failed\n");
1186 goto free_chainstack;
1189 if (table->check && table->check(newinfo, table->valid_hooks)) {
1190 BUGPRINT("The table doesn't like its own initial data, lol\n");
1191 return ERR_PTR(-EINVAL);
1194 table->private = newinfo;
1195 rwlock_init(&table->lock);
1196 ret = mutex_lock_interruptible(&ebt_mutex);
1198 goto free_chainstack;
1200 list_for_each_entry(t, &net->xt.tables[NFPROTO_BRIDGE], list) {
1201 if (strcmp(t->name, table->name) == 0) {
1203 BUGPRINT("Table name already exists\n");
1208 /* Hold a reference count if the chains aren't empty */
1209 if (newinfo->nentries && !try_module_get(table->me)) {
1213 list_add(&table->list, &net->xt.tables[NFPROTO_BRIDGE]);
1214 mutex_unlock(&ebt_mutex);
1217 mutex_unlock(&ebt_mutex);
1219 if (newinfo->chainstack) {
1220 for_each_possible_cpu(i)
1221 vfree(newinfo->chainstack[i]);
1222 vfree(newinfo->chainstack);
1224 vfree(newinfo->entries);
1230 return ERR_PTR(ret);
1233 void ebt_unregister_table(struct net *net, struct ebt_table *table)
1238 BUGPRINT("Request to unregister NULL table!!!\n");
1241 mutex_lock(&ebt_mutex);
1242 list_del(&table->list);
1243 mutex_unlock(&ebt_mutex);
1244 EBT_ENTRY_ITERATE(table->private->entries, table->private->entries_size,
1245 ebt_cleanup_entry, net, NULL);
1246 if (table->private->nentries)
1247 module_put(table->me);
1248 vfree(table->private->entries);
1249 if (table->private->chainstack) {
1250 for_each_possible_cpu(i)
1251 vfree(table->private->chainstack[i]);
1252 vfree(table->private->chainstack);
1254 vfree(table->private);
1258 /* userspace just supplied us with counters */
1259 static int do_update_counters(struct net *net, const char *name,
1260 struct ebt_counter __user *counters,
1261 unsigned int num_counters,
1262 const void __user *user, unsigned int len)
1265 struct ebt_counter *tmp;
1266 struct ebt_table *t;
1268 if (num_counters == 0)
1271 tmp = vmalloc(num_counters * sizeof(*tmp));
1275 t = find_table_lock(net, name, &ret, &ebt_mutex);
1279 if (num_counters != t->private->nentries) {
1280 BUGPRINT("Wrong nr of counters\n");
1285 if (copy_from_user(tmp, counters, num_counters * sizeof(*counters))) {
1290 /* we want an atomic add of the counters */
1291 write_lock_bh(&t->lock);
1293 /* we add to the counters of the first cpu */
1294 for (i = 0; i < num_counters; i++) {
1295 t->private->counters[i].pcnt += tmp[i].pcnt;
1296 t->private->counters[i].bcnt += tmp[i].bcnt;
1299 write_unlock_bh(&t->lock);
1302 mutex_unlock(&ebt_mutex);
1308 static int update_counters(struct net *net, const void __user *user,
1311 struct ebt_replace hlp;
1313 if (copy_from_user(&hlp, user, sizeof(hlp)))
1316 if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
1319 return do_update_counters(net, hlp.name, hlp.counters,
1320 hlp.num_counters, user, len);
1323 static inline int ebt_make_matchname(const struct ebt_entry_match *m,
1324 const char *base, char __user *ubase)
1326 char __user *hlp = ubase + ((char *)m - base);
1327 if (copy_to_user(hlp, m->u.match->name, EBT_FUNCTION_MAXNAMELEN))
1332 static inline int ebt_make_watchername(const struct ebt_entry_watcher *w,
1333 const char *base, char __user *ubase)
1335 char __user *hlp = ubase + ((char *)w - base);
1336 if (copy_to_user(hlp , w->u.watcher->name, EBT_FUNCTION_MAXNAMELEN))
1342 ebt_make_names(struct ebt_entry *e, const char *base, char __user *ubase)
1346 const struct ebt_entry_target *t;
1348 if (e->bitmask == 0)
1351 hlp = ubase + (((char *)e + e->target_offset) - base);
1352 t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
1354 ret = EBT_MATCH_ITERATE(e, ebt_make_matchname, base, ubase);
1357 ret = EBT_WATCHER_ITERATE(e, ebt_make_watchername, base, ubase);
1360 if (copy_to_user(hlp, t->u.target->name, EBT_FUNCTION_MAXNAMELEN))
1365 static int copy_counters_to_user(struct ebt_table *t,
1366 const struct ebt_counter *oldcounters,
1367 void __user *user, unsigned int num_counters,
1368 unsigned int nentries)
1370 struct ebt_counter *counterstmp;
1373 /* userspace might not need the counters */
1374 if (num_counters == 0)
1377 if (num_counters != nentries) {
1378 BUGPRINT("Num_counters wrong\n");
1382 counterstmp = vmalloc(nentries * sizeof(*counterstmp));
1386 write_lock_bh(&t->lock);
1387 get_counters(oldcounters, counterstmp, nentries);
1388 write_unlock_bh(&t->lock);
1390 if (copy_to_user(user, counterstmp,
1391 nentries * sizeof(struct ebt_counter)))
1397 /* called with ebt_mutex locked */
1398 static int copy_everything_to_user(struct ebt_table *t, void __user *user,
1399 const int *len, int cmd)
1401 struct ebt_replace tmp;
1402 const struct ebt_counter *oldcounters;
1403 unsigned int entries_size, nentries;
1407 if (cmd == EBT_SO_GET_ENTRIES) {
1408 entries_size = t->private->entries_size;
1409 nentries = t->private->nentries;
1410 entries = t->private->entries;
1411 oldcounters = t->private->counters;
1413 entries_size = t->table->entries_size;
1414 nentries = t->table->nentries;
1415 entries = t->table->entries;
1416 oldcounters = t->table->counters;
1419 if (copy_from_user(&tmp, user, sizeof(tmp)))
1422 if (*len != sizeof(struct ebt_replace) + entries_size +
1423 (tmp.num_counters? nentries * sizeof(struct ebt_counter): 0))
1426 if (tmp.nentries != nentries) {
1427 BUGPRINT("Nentries wrong\n");
1431 if (tmp.entries_size != entries_size) {
1432 BUGPRINT("Wrong size\n");
1436 ret = copy_counters_to_user(t, oldcounters, tmp.counters,
1437 tmp.num_counters, nentries);
1441 if (copy_to_user(tmp.entries, entries, entries_size)) {
1442 BUGPRINT("Couldn't copy entries to userspace\n");
1445 /* set the match/watcher/target names right */
1446 return EBT_ENTRY_ITERATE(entries, entries_size,
1447 ebt_make_names, entries, tmp.entries);
1450 static int do_ebt_set_ctl(struct sock *sk,
1451 int cmd, void __user *user, unsigned int len)
1455 if (!capable(CAP_NET_ADMIN))
1459 case EBT_SO_SET_ENTRIES:
1460 ret = do_replace(sock_net(sk), user, len);
1462 case EBT_SO_SET_COUNTERS:
1463 ret = update_counters(sock_net(sk), user, len);
1471 static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1474 struct ebt_replace tmp;
1475 struct ebt_table *t;
1477 if (!capable(CAP_NET_ADMIN))
1480 if (copy_from_user(&tmp, user, sizeof(tmp)))
1483 t = find_table_lock(sock_net(sk), tmp.name, &ret, &ebt_mutex);
1488 case EBT_SO_GET_INFO:
1489 case EBT_SO_GET_INIT_INFO:
1490 if (*len != sizeof(struct ebt_replace)){
1492 mutex_unlock(&ebt_mutex);
1495 if (cmd == EBT_SO_GET_INFO) {
1496 tmp.nentries = t->private->nentries;
1497 tmp.entries_size = t->private->entries_size;
1498 tmp.valid_hooks = t->valid_hooks;
1500 tmp.nentries = t->table->nentries;
1501 tmp.entries_size = t->table->entries_size;
1502 tmp.valid_hooks = t->table->valid_hooks;
1504 mutex_unlock(&ebt_mutex);
1505 if (copy_to_user(user, &tmp, *len) != 0){
1506 BUGPRINT("c2u Didn't work\n");
1513 case EBT_SO_GET_ENTRIES:
1514 case EBT_SO_GET_INIT_ENTRIES:
1515 ret = copy_everything_to_user(t, user, len, cmd);
1516 mutex_unlock(&ebt_mutex);
1520 mutex_unlock(&ebt_mutex);
1527 #ifdef CONFIG_COMPAT
1528 /* 32 bit-userspace compatibility definitions. */
1529 struct compat_ebt_replace {
1530 char name[EBT_TABLE_MAXNAMELEN];
1531 compat_uint_t valid_hooks;
1532 compat_uint_t nentries;
1533 compat_uint_t entries_size;
1534 /* start of the chains */
1535 compat_uptr_t hook_entry[NF_BR_NUMHOOKS];
1536 /* nr of counters userspace expects back */
1537 compat_uint_t num_counters;
1538 /* where the kernel will put the old counters. */
1539 compat_uptr_t counters;
1540 compat_uptr_t entries;
1543 /* struct ebt_entry_match, _target and _watcher have same layout */
1544 struct compat_ebt_entry_mwt {
1546 char name[EBT_FUNCTION_MAXNAMELEN];
1549 compat_uint_t match_size;
1550 compat_uint_t data[0];
1553 /* account for possible padding between match_size and ->data */
1554 static int ebt_compat_entry_padsize(void)
1556 BUILD_BUG_ON(XT_ALIGN(sizeof(struct ebt_entry_match)) <
1557 COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt)));
1558 return (int) XT_ALIGN(sizeof(struct ebt_entry_match)) -
1559 COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt));
1562 static int ebt_compat_match_offset(const struct xt_match *match,
1563 unsigned int userlen)
1566 * ebt_among needs special handling. The kernel .matchsize is
1567 * set to -1 at registration time; at runtime an EBT_ALIGN()ed
1568 * value is expected.
1569 * Example: userspace sends 4500, ebt_among.c wants 4504.
1571 if (unlikely(match->matchsize == -1))
1572 return XT_ALIGN(userlen) - COMPAT_XT_ALIGN(userlen);
1573 return xt_compat_match_offset(match);
1576 static int compat_match_to_user(struct ebt_entry_match *m, void __user **dstptr,
1579 const struct xt_match *match = m->u.match;
1580 struct compat_ebt_entry_mwt __user *cm = *dstptr;
1581 int off = ebt_compat_match_offset(match, m->match_size);
1582 compat_uint_t msize = m->match_size - off;
1584 BUG_ON(off >= m->match_size);
1586 if (copy_to_user(cm->u.name, match->name,
1587 strlen(match->name) + 1) || put_user(msize, &cm->match_size))
1590 if (match->compat_to_user) {
1591 if (match->compat_to_user(cm->data, m->data))
1593 } else if (copy_to_user(cm->data, m->data, msize))
1596 *size -= ebt_compat_entry_padsize() + off;
1602 static int compat_target_to_user(struct ebt_entry_target *t,
1603 void __user **dstptr,
1606 const struct xt_target *target = t->u.target;
1607 struct compat_ebt_entry_mwt __user *cm = *dstptr;
1608 int off = xt_compat_target_offset(target);
1609 compat_uint_t tsize = t->target_size - off;
1611 BUG_ON(off >= t->target_size);
1613 if (copy_to_user(cm->u.name, target->name,
1614 strlen(target->name) + 1) || put_user(tsize, &cm->match_size))
1617 if (target->compat_to_user) {
1618 if (target->compat_to_user(cm->data, t->data))
1620 } else if (copy_to_user(cm->data, t->data, tsize))
1623 *size -= ebt_compat_entry_padsize() + off;
1629 static int compat_watcher_to_user(struct ebt_entry_watcher *w,
1630 void __user **dstptr,
1633 return compat_target_to_user((struct ebt_entry_target *)w,
1637 static int compat_copy_entry_to_user(struct ebt_entry *e, void __user **dstptr,
1640 struct ebt_entry_target *t;
1641 struct ebt_entry __user *ce;
1642 u32 watchers_offset, target_offset, next_offset;
1643 compat_uint_t origsize;
1646 if (e->bitmask == 0) {
1647 if (*size < sizeof(struct ebt_entries))
1649 if (copy_to_user(*dstptr, e, sizeof(struct ebt_entries)))
1652 *dstptr += sizeof(struct ebt_entries);
1653 *size -= sizeof(struct ebt_entries);
1657 if (*size < sizeof(*ce))
1660 ce = (struct ebt_entry __user *)*dstptr;
1661 if (copy_to_user(ce, e, sizeof(*ce)))
1665 *dstptr += sizeof(*ce);
1667 ret = EBT_MATCH_ITERATE(e, compat_match_to_user, dstptr, size);
1670 watchers_offset = e->watchers_offset - (origsize - *size);
1672 ret = EBT_WATCHER_ITERATE(e, compat_watcher_to_user, dstptr, size);
1675 target_offset = e->target_offset - (origsize - *size);
1677 t = (struct ebt_entry_target *) ((char *) e + e->target_offset);
1679 ret = compat_target_to_user(t, dstptr, size);
1682 next_offset = e->next_offset - (origsize - *size);
1684 if (put_user(watchers_offset, &ce->watchers_offset) ||
1685 put_user(target_offset, &ce->target_offset) ||
1686 put_user(next_offset, &ce->next_offset))
1689 *size -= sizeof(*ce);
1693 static int compat_calc_match(struct ebt_entry_match *m, int *off)
1695 *off += ebt_compat_match_offset(m->u.match, m->match_size);
1696 *off += ebt_compat_entry_padsize();
1700 static int compat_calc_watcher(struct ebt_entry_watcher *w, int *off)
1702 *off += xt_compat_target_offset(w->u.watcher);
1703 *off += ebt_compat_entry_padsize();
1707 static int compat_calc_entry(const struct ebt_entry *e,
1708 const struct ebt_table_info *info,
1710 struct compat_ebt_replace *newinfo)
1712 const struct ebt_entry_target *t;
1713 unsigned int entry_offset;
1716 if (e->bitmask == 0)
1720 entry_offset = (void *)e - base;
1722 EBT_MATCH_ITERATE(e, compat_calc_match, &off);
1723 EBT_WATCHER_ITERATE(e, compat_calc_watcher, &off);
1725 t = (const struct ebt_entry_target *) ((char *) e + e->target_offset);
1727 off += xt_compat_target_offset(t->u.target);
1728 off += ebt_compat_entry_padsize();
1730 newinfo->entries_size -= off;
1732 ret = xt_compat_add_offset(NFPROTO_BRIDGE, entry_offset, off);
1736 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
1737 const void *hookptr = info->hook_entry[i];
1738 if (info->hook_entry[i] &&
1739 (e < (struct ebt_entry *)(base - hookptr))) {
1740 newinfo->hook_entry[i] -= off;
1741 pr_debug("0x%08X -> 0x%08X\n",
1742 newinfo->hook_entry[i] + off,
1743 newinfo->hook_entry[i]);
1751 static int compat_table_info(const struct ebt_table_info *info,
1752 struct compat_ebt_replace *newinfo)
1754 unsigned int size = info->entries_size;
1755 const void *entries = info->entries;
1757 newinfo->entries_size = size;
1759 return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info,
1763 static int compat_copy_everything_to_user(struct ebt_table *t,
1764 void __user *user, int *len, int cmd)
1766 struct compat_ebt_replace repl, tmp;
1767 struct ebt_counter *oldcounters;
1768 struct ebt_table_info tinfo;
1772 memset(&tinfo, 0, sizeof(tinfo));
1774 if (cmd == EBT_SO_GET_ENTRIES) {
1775 tinfo.entries_size = t->private->entries_size;
1776 tinfo.nentries = t->private->nentries;
1777 tinfo.entries = t->private->entries;
1778 oldcounters = t->private->counters;
1780 tinfo.entries_size = t->table->entries_size;
1781 tinfo.nentries = t->table->nentries;
1782 tinfo.entries = t->table->entries;
1783 oldcounters = t->table->counters;
1786 if (copy_from_user(&tmp, user, sizeof(tmp)))
1789 if (tmp.nentries != tinfo.nentries ||
1790 (tmp.num_counters && tmp.num_counters != tinfo.nentries))
1793 memcpy(&repl, &tmp, sizeof(repl));
1794 if (cmd == EBT_SO_GET_ENTRIES)
1795 ret = compat_table_info(t->private, &repl);
1797 ret = compat_table_info(&tinfo, &repl);
1801 if (*len != sizeof(tmp) + repl.entries_size +
1802 (tmp.num_counters? tinfo.nentries * sizeof(struct ebt_counter): 0)) {
1803 pr_err("wrong size: *len %d, entries_size %u, replsz %d\n",
1804 *len, tinfo.entries_size, repl.entries_size);
1808 /* userspace might not need the counters */
1809 ret = copy_counters_to_user(t, oldcounters, compat_ptr(tmp.counters),
1810 tmp.num_counters, tinfo.nentries);
1814 pos = compat_ptr(tmp.entries);
1815 return EBT_ENTRY_ITERATE(tinfo.entries, tinfo.entries_size,
1816 compat_copy_entry_to_user, &pos, &tmp.entries_size);
1819 struct ebt_entries_buf_state {
1820 char *buf_kern_start; /* kernel buffer to copy (translated) data to */
1821 u32 buf_kern_len; /* total size of kernel buffer */
1822 u32 buf_kern_offset; /* amount of data copied so far */
1823 u32 buf_user_offset; /* read position in userspace buffer */
1826 static int ebt_buf_count(struct ebt_entries_buf_state *state, unsigned int sz)
1828 state->buf_kern_offset += sz;
1829 return state->buf_kern_offset >= sz ? 0 : -EINVAL;
1832 static int ebt_buf_add(struct ebt_entries_buf_state *state,
1833 void *data, unsigned int sz)
1835 if (state->buf_kern_start == NULL)
1838 BUG_ON(state->buf_kern_offset + sz > state->buf_kern_len);
1840 memcpy(state->buf_kern_start + state->buf_kern_offset, data, sz);
1843 state->buf_user_offset += sz;
1844 return ebt_buf_count(state, sz);
1847 static int ebt_buf_add_pad(struct ebt_entries_buf_state *state, unsigned int sz)
1849 char *b = state->buf_kern_start;
1851 BUG_ON(b && state->buf_kern_offset > state->buf_kern_len);
1853 if (b != NULL && sz > 0)
1854 memset(b + state->buf_kern_offset, 0, sz);
1855 /* do not adjust ->buf_user_offset here, we added kernel-side padding */
1856 return ebt_buf_count(state, sz);
1865 static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
1866 enum compat_mwt compat_mwt,
1867 struct ebt_entries_buf_state *state,
1868 const unsigned char *base)
1870 char name[EBT_FUNCTION_MAXNAMELEN];
1871 struct xt_match *match;
1872 struct xt_target *wt;
1874 int off, pad = 0, ret = 0;
1875 unsigned int size_kern, entry_offset, match_size = mwt->match_size;
1877 strlcpy(name, mwt->u.name, sizeof(name));
1879 if (state->buf_kern_start)
1880 dst = state->buf_kern_start + state->buf_kern_offset;
1882 entry_offset = (unsigned char *) mwt - base;
1883 switch (compat_mwt) {
1884 case EBT_COMPAT_MATCH:
1885 match = try_then_request_module(xt_find_match(NFPROTO_BRIDGE,
1886 name, 0), "ebt_%s", name);
1890 return PTR_ERR(match);
1892 off = ebt_compat_match_offset(match, match_size);
1894 if (match->compat_from_user)
1895 match->compat_from_user(dst, mwt->data);
1897 memcpy(dst, mwt->data, match_size);
1900 size_kern = match->matchsize;
1901 if (unlikely(size_kern == -1))
1902 size_kern = match_size;
1903 module_put(match->me);
1905 case EBT_COMPAT_WATCHER: /* fallthrough */
1906 case EBT_COMPAT_TARGET:
1907 wt = try_then_request_module(xt_find_target(NFPROTO_BRIDGE,
1908 name, 0), "ebt_%s", name);
1913 off = xt_compat_target_offset(wt);
1916 if (wt->compat_from_user)
1917 wt->compat_from_user(dst, mwt->data);
1919 memcpy(dst, mwt->data, match_size);
1922 size_kern = wt->targetsize;
1928 ret = xt_compat_add_offset(NFPROTO_BRIDGE, entry_offset,
1929 off + ebt_compat_entry_padsize());
1934 state->buf_kern_offset += match_size + off;
1935 state->buf_user_offset += match_size;
1936 pad = XT_ALIGN(size_kern) - size_kern;
1938 if (pad > 0 && dst) {
1939 BUG_ON(state->buf_kern_len <= pad);
1940 BUG_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad);
1941 memset(dst + size_kern, 0, pad);
1943 return off + match_size;
1947 * return size of all matches, watchers or target, including necessary
1948 * alignment and padding.
1950 static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
1951 unsigned int size_left, enum compat_mwt type,
1952 struct ebt_entries_buf_state *state, const void *base)
1960 buf = (char *) match32;
1962 while (size_left >= sizeof(*match32)) {
1963 struct ebt_entry_match *match_kern;
1966 match_kern = (struct ebt_entry_match *) state->buf_kern_start;
1969 tmp = state->buf_kern_start + state->buf_kern_offset;
1970 match_kern = (struct ebt_entry_match *) tmp;
1972 ret = ebt_buf_add(state, buf, sizeof(*match32));
1975 size_left -= sizeof(*match32);
1977 /* add padding before match->data (if any) */
1978 ret = ebt_buf_add_pad(state, ebt_compat_entry_padsize());
1982 if (match32->match_size > size_left)
1985 size_left -= match32->match_size;
1987 ret = compat_mtw_from_user(match32, type, state, base);
1991 BUG_ON(ret < match32->match_size);
1992 growth += ret - match32->match_size;
1993 growth += ebt_compat_entry_padsize();
1995 buf += sizeof(*match32);
1996 buf += match32->match_size;
1999 match_kern->match_size = ret;
2001 WARN_ON(type == EBT_COMPAT_TARGET && size_left);
2002 match32 = (struct compat_ebt_entry_mwt *) buf;
2008 #define EBT_COMPAT_WATCHER_ITERATE(e, fn, args...) \
2012 struct compat_ebt_entry_mwt *__watcher; \
2014 for (__i = e->watchers_offset; \
2015 __i < (e)->target_offset; \
2016 __i += __watcher->watcher_size + \
2017 sizeof(struct compat_ebt_entry_mwt)) { \
2018 __watcher = (void *)(e) + __i; \
2019 __ret = fn(__watcher , ## args); \
2024 if (__i != (e)->target_offset) \
2030 #define EBT_COMPAT_MATCH_ITERATE(e, fn, args...) \
2034 struct compat_ebt_entry_mwt *__match; \
2036 for (__i = sizeof(struct ebt_entry); \
2037 __i < (e)->watchers_offset; \
2038 __i += __match->match_size + \
2039 sizeof(struct compat_ebt_entry_mwt)) { \
2040 __match = (void *)(e) + __i; \
2041 __ret = fn(__match , ## args); \
2046 if (__i != (e)->watchers_offset) \
2052 /* called for all ebt_entry structures. */
2053 static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
2054 unsigned int *total,
2055 struct ebt_entries_buf_state *state)
2057 unsigned int i, j, startoff, new_offset = 0;
2058 /* stores match/watchers/targets & offset of next struct ebt_entry: */
2059 unsigned int offsets[4];
2060 unsigned int *offsets_update = NULL;
2064 if (*total < sizeof(struct ebt_entries))
2067 if (!entry->bitmask) {
2068 *total -= sizeof(struct ebt_entries);
2069 return ebt_buf_add(state, entry, sizeof(struct ebt_entries));
2071 if (*total < sizeof(*entry) || entry->next_offset < sizeof(*entry))
2074 startoff = state->buf_user_offset;
2075 /* pull in most part of ebt_entry, it does not need to be changed. */
2076 ret = ebt_buf_add(state, entry,
2077 offsetof(struct ebt_entry, watchers_offset));
2081 offsets[0] = sizeof(struct ebt_entry); /* matches come first */
2082 memcpy(&offsets[1], &entry->watchers_offset,
2083 sizeof(offsets) - sizeof(offsets[0]));
2085 if (state->buf_kern_start) {
2086 buf_start = state->buf_kern_start + state->buf_kern_offset;
2087 offsets_update = (unsigned int *) buf_start;
2089 ret = ebt_buf_add(state, &offsets[1],
2090 sizeof(offsets) - sizeof(offsets[0]));
2093 buf_start = (char *) entry;
2095 * 0: matches offset, always follows ebt_entry.
2096 * 1: watchers offset, from ebt_entry structure
2097 * 2: target offset, from ebt_entry structure
2098 * 3: next ebt_entry offset, from ebt_entry structure
2100 * offsets are relative to beginning of struct ebt_entry (i.e., 0).
2102 for (i = 0, j = 1 ; j < 4 ; j++, i++) {
2103 struct compat_ebt_entry_mwt *match32;
2105 char *buf = buf_start;
2107 buf = buf_start + offsets[i];
2108 if (offsets[i] > offsets[j])
2111 match32 = (struct compat_ebt_entry_mwt *) buf;
2112 size = offsets[j] - offsets[i];
2113 ret = ebt_size_mwt(match32, size, i, state, base);
2117 if (offsets_update && new_offset) {
2118 pr_debug("change offset %d to %d\n",
2119 offsets_update[i], offsets[j] + new_offset);
2120 offsets_update[i] = offsets[j] + new_offset;
2124 startoff = state->buf_user_offset - startoff;
2126 BUG_ON(*total < startoff);
2132 * repl->entries_size is the size of the ebt_entry blob in userspace.
2133 * It might need more memory when copied to a 64 bit kernel in case
2134 * userspace is 32-bit. So, first task: find out how much memory is needed.
2136 * Called before validation is performed.
2138 static int compat_copy_entries(unsigned char *data, unsigned int size_user,
2139 struct ebt_entries_buf_state *state)
2141 unsigned int size_remaining = size_user;
2144 ret = EBT_ENTRY_ITERATE(data, size_user, size_entry_mwt, data,
2145 &size_remaining, state);
2149 WARN_ON(size_remaining);
2150 return state->buf_kern_offset;
2154 static int compat_copy_ebt_replace_from_user(struct ebt_replace *repl,
2155 void __user *user, unsigned int len)
2157 struct compat_ebt_replace tmp;
2160 if (len < sizeof(tmp))
2163 if (copy_from_user(&tmp, user, sizeof(tmp)))
2166 if (len != sizeof(tmp) + tmp.entries_size)
2169 if (tmp.entries_size == 0)
2172 if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) /
2173 NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
2175 if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
2178 memcpy(repl, &tmp, offsetof(struct ebt_replace, hook_entry));
2180 /* starting with hook_entry, 32 vs. 64 bit structures are different */
2181 for (i = 0; i < NF_BR_NUMHOOKS; i++)
2182 repl->hook_entry[i] = compat_ptr(tmp.hook_entry[i]);
2184 repl->num_counters = tmp.num_counters;
2185 repl->counters = compat_ptr(tmp.counters);
2186 repl->entries = compat_ptr(tmp.entries);
2190 static int compat_do_replace(struct net *net, void __user *user,
2193 int ret, i, countersize, size64;
2194 struct ebt_table_info *newinfo;
2195 struct ebt_replace tmp;
2196 struct ebt_entries_buf_state state;
2199 ret = compat_copy_ebt_replace_from_user(&tmp, user, len);
2201 /* try real handler in case userland supplied needed padding */
2202 if (ret == -EINVAL && do_replace(net, user, len) == 0)
2207 countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
2208 newinfo = vmalloc(sizeof(*newinfo) + countersize);
2213 memset(newinfo->counters, 0, countersize);
2215 memset(&state, 0, sizeof(state));
2217 newinfo->entries = vmalloc(tmp.entries_size);
2218 if (!newinfo->entries) {
2223 newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
2228 entries_tmp = newinfo->entries;
2230 xt_compat_lock(NFPROTO_BRIDGE);
2232 ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2236 pr_debug("tmp.entries_size %d, kern off %d, user off %d delta %d\n",
2237 tmp.entries_size, state.buf_kern_offset, state.buf_user_offset,
2238 xt_compat_calc_jump(NFPROTO_BRIDGE, tmp.entries_size));
2241 newinfo->entries = vmalloc(size64);
2242 if (!newinfo->entries) {
2248 memset(&state, 0, sizeof(state));
2249 state.buf_kern_start = newinfo->entries;
2250 state.buf_kern_len = size64;
2252 ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2253 BUG_ON(ret < 0); /* parses same data again */
2256 tmp.entries_size = size64;
2258 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
2259 char __user *usrptr;
2260 if (tmp.hook_entry[i]) {
2262 usrptr = (char __user *) tmp.hook_entry[i];
2263 delta = usrptr - tmp.entries;
2264 usrptr += xt_compat_calc_jump(NFPROTO_BRIDGE, delta);
2265 tmp.hook_entry[i] = (struct ebt_entries __user *)usrptr;
2269 xt_compat_flush_offsets(NFPROTO_BRIDGE);
2270 xt_compat_unlock(NFPROTO_BRIDGE);
2272 ret = do_replace_finish(net, &tmp, newinfo);
2276 vfree(newinfo->entries);
2281 xt_compat_flush_offsets(NFPROTO_BRIDGE);
2282 xt_compat_unlock(NFPROTO_BRIDGE);
2286 static int compat_update_counters(struct net *net, void __user *user,
2289 struct compat_ebt_replace hlp;
2291 if (copy_from_user(&hlp, user, sizeof(hlp)))
2294 /* try real handler in case userland supplied needed padding */
2295 if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
2296 return update_counters(net, user, len);
2298 return do_update_counters(net, hlp.name, compat_ptr(hlp.counters),
2299 hlp.num_counters, user, len);
2302 static int compat_do_ebt_set_ctl(struct sock *sk,
2303 int cmd, void __user *user, unsigned int len)
2307 if (!capable(CAP_NET_ADMIN))
2311 case EBT_SO_SET_ENTRIES:
2312 ret = compat_do_replace(sock_net(sk), user, len);
2314 case EBT_SO_SET_COUNTERS:
2315 ret = compat_update_counters(sock_net(sk), user, len);
2323 static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
2324 void __user *user, int *len)
2327 struct compat_ebt_replace tmp;
2328 struct ebt_table *t;
2330 if (!capable(CAP_NET_ADMIN))
2333 /* try real handler in case userland supplied needed padding */
2334 if ((cmd == EBT_SO_GET_INFO ||
2335 cmd == EBT_SO_GET_INIT_INFO) && *len != sizeof(tmp))
2336 return do_ebt_get_ctl(sk, cmd, user, len);
2338 if (copy_from_user(&tmp, user, sizeof(tmp)))
2341 t = find_table_lock(sock_net(sk), tmp.name, &ret, &ebt_mutex);
2345 xt_compat_lock(NFPROTO_BRIDGE);
2347 case EBT_SO_GET_INFO:
2348 tmp.nentries = t->private->nentries;
2349 ret = compat_table_info(t->private, &tmp);
2352 tmp.valid_hooks = t->valid_hooks;
2354 if (copy_to_user(user, &tmp, *len) != 0) {
2360 case EBT_SO_GET_INIT_INFO:
2361 tmp.nentries = t->table->nentries;
2362 tmp.entries_size = t->table->entries_size;
2363 tmp.valid_hooks = t->table->valid_hooks;
2365 if (copy_to_user(user, &tmp, *len) != 0) {
2371 case EBT_SO_GET_ENTRIES:
2372 case EBT_SO_GET_INIT_ENTRIES:
2374 * try real handler first in case of userland-side padding.
2375 * in case we are dealing with an 'ordinary' 32 bit binary
2376 * without 64bit compatibility padding, this will fail right
2377 * after copy_from_user when the *len argument is validated.
2379 * the compat_ variant needs to do one pass over the kernel
2380 * data set to adjust for size differences before it the check.
2382 if (copy_everything_to_user(t, user, len, cmd) == 0)
2385 ret = compat_copy_everything_to_user(t, user, len, cmd);
2391 xt_compat_flush_offsets(NFPROTO_BRIDGE);
2392 xt_compat_unlock(NFPROTO_BRIDGE);
2393 mutex_unlock(&ebt_mutex);
2398 static struct nf_sockopt_ops ebt_sockopts =
2401 .set_optmin = EBT_BASE_CTL,
2402 .set_optmax = EBT_SO_SET_MAX + 1,
2403 .set = do_ebt_set_ctl,
2404 #ifdef CONFIG_COMPAT
2405 .compat_set = compat_do_ebt_set_ctl,
2407 .get_optmin = EBT_BASE_CTL,
2408 .get_optmax = EBT_SO_GET_MAX + 1,
2409 .get = do_ebt_get_ctl,
2410 #ifdef CONFIG_COMPAT
2411 .compat_get = compat_do_ebt_get_ctl,
2413 .owner = THIS_MODULE,
2416 static int __init ebtables_init(void)
2420 ret = xt_register_target(&ebt_standard_target);
2423 ret = nf_register_sockopt(&ebt_sockopts);
2425 xt_unregister_target(&ebt_standard_target);
2429 printk(KERN_INFO "Ebtables v2.0 registered\n");
2433 static void __exit ebtables_fini(void)
2435 nf_unregister_sockopt(&ebt_sockopts);
2436 xt_unregister_target(&ebt_standard_target);
2437 printk(KERN_INFO "Ebtables v2.0 unregistered\n");
2440 EXPORT_SYMBOL(ebt_register_table);
2441 EXPORT_SYMBOL(ebt_unregister_table);
2442 EXPORT_SYMBOL(ebt_do_table);
2443 module_init(ebtables_init);
2444 module_exit(ebtables_fini);
2445 MODULE_LICENSE("GPL");