2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/capability.h>
14 #include <linux/skbuff.h>
15 #include <linux/kmod.h>
16 #include <linux/vmalloc.h>
17 #include <linux/netdevice.h>
18 #include <linux/module.h>
19 #include <linux/poison.h>
20 #include <linux/icmpv6.h>
22 #include <net/compat.h>
23 #include <asm/uaccess.h>
24 #include <linux/mutex.h>
25 #include <linux/proc_fs.h>
26 #include <linux/err.h>
27 #include <linux/cpumask.h>
29 #include <linux/netfilter_ipv6/ip6_tables.h>
30 #include <linux/netfilter/x_tables.h>
31 #include <net/netfilter/nf_log.h>
32 #include "../../netfilter/xt_repldata.h"
34 MODULE_LICENSE("GPL");
35 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
36 MODULE_DESCRIPTION("IPv6 packet filter");
38 /*#define DEBUG_IP_FIREWALL*/
39 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
40 /*#define DEBUG_IP_FIREWALL_USER*/
42 #ifdef DEBUG_IP_FIREWALL
43 #define dprintf(format, args...) printk(format , ## args)
45 #define dprintf(format, args...)
48 #ifdef DEBUG_IP_FIREWALL_USER
49 #define duprintf(format, args...) printk(format , ## args)
51 #define duprintf(format, args...)
54 #ifdef CONFIG_NETFILTER_DEBUG
55 #define IP_NF_ASSERT(x) \
58 printk("IP_NF_ASSERT: %s:%s:%u\n", \
59 __func__, __FILE__, __LINE__); \
62 #define IP_NF_ASSERT(x)
66 /* All the better to debug you with... */
71 void *ip6t_alloc_initial_table(const struct xt_table *info)
73 return xt_alloc_initial_table(ip6t, IP6T);
75 EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table);
78 We keep a set of rules for each CPU, so we can avoid write-locking
79 them in the softirq when updating the counters and therefore
80 only need to read-lock in the softirq; doing a write_lock_bh() in user
81 context stops packets coming through and allows user context to read
82 the counters or update the rules.
84 Hence the start of any table is given by get_table() below. */
86 /* Check for an extension */
88 ip6t_ext_hdr(u8 nexthdr)
90 return ( (nexthdr == IPPROTO_HOPOPTS) ||
91 (nexthdr == IPPROTO_ROUTING) ||
92 (nexthdr == IPPROTO_FRAGMENT) ||
93 (nexthdr == IPPROTO_ESP) ||
94 (nexthdr == IPPROTO_AH) ||
95 (nexthdr == IPPROTO_NONE) ||
96 (nexthdr == IPPROTO_DSTOPTS) );
99 /* Returns whether matches rule or not. */
100 /* Performance critical - called for every packet */
102 ip6_packet_match(const struct sk_buff *skb,
105 const struct ip6t_ip6 *ip6info,
106 unsigned int *protoff,
107 int *fragoff, bool *hotdrop)
110 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
112 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
114 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
115 &ip6info->src), IP6T_INV_SRCIP) ||
116 FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
117 &ip6info->dst), IP6T_INV_DSTIP)) {
118 dprintf("Source or dest mismatch.\n");
120 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
121 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
122 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
123 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
124 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
125 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
129 ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask);
131 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
132 dprintf("VIA in mismatch (%s vs %s).%s\n",
133 indev, ip6info->iniface,
134 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
138 ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask);
140 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
141 dprintf("VIA out mismatch (%s vs %s).%s\n",
142 outdev, ip6info->outiface,
143 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
147 /* ... might want to do something with class and flowlabel here ... */
149 /* look for the desired protocol header */
150 if((ip6info->flags & IP6T_F_PROTO)) {
152 unsigned short _frag_off;
154 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off);
160 *fragoff = _frag_off;
162 dprintf("Packet protocol %hi ?= %s%hi.\n",
164 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
167 if (ip6info->proto == protohdr) {
168 if(ip6info->invflags & IP6T_INV_PROTO) {
174 /* We need match for the '-p all', too! */
175 if ((ip6info->proto != 0) &&
176 !(ip6info->invflags & IP6T_INV_PROTO))
182 /* should be ip6 safe */
184 ip6_checkentry(const struct ip6t_ip6 *ipv6)
186 if (ipv6->flags & ~IP6T_F_MASK) {
187 duprintf("Unknown flag bits set: %08X\n",
188 ipv6->flags & ~IP6T_F_MASK);
191 if (ipv6->invflags & ~IP6T_INV_MASK) {
192 duprintf("Unknown invflag bits set: %08X\n",
193 ipv6->invflags & ~IP6T_INV_MASK);
200 ip6t_error(struct sk_buff *skb, const struct xt_target_param *par)
203 printk("ip6_tables: error: `%s'\n",
204 (const char *)par->targinfo);
209 /* Performance critical - called for every packet */
211 do_match(const struct ip6t_entry_match *m, const struct sk_buff *skb,
212 struct xt_match_param *par)
214 par->match = m->u.kernel.match;
215 par->matchinfo = m->data;
217 /* Stop iteration if it doesn't match */
218 if (!m->u.kernel.match->match(skb, par))
224 static inline struct ip6t_entry *
225 get_entry(const void *base, unsigned int offset)
227 return (struct ip6t_entry *)(base + offset);
230 /* All zeroes == unconditional rule. */
231 /* Mildly perf critical (only if packet tracing is on) */
232 static inline bool unconditional(const struct ip6t_ip6 *ipv6)
234 static const struct ip6t_ip6 uncond;
236 return memcmp(ipv6, &uncond, sizeof(uncond)) == 0;
239 static inline const struct ip6t_entry_target *
240 ip6t_get_target_c(const struct ip6t_entry *e)
242 return ip6t_get_target((struct ip6t_entry *)e);
245 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
246 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
247 /* This cries for unification! */
248 static const char *const hooknames[] = {
249 [NF_INET_PRE_ROUTING] = "PREROUTING",
250 [NF_INET_LOCAL_IN] = "INPUT",
251 [NF_INET_FORWARD] = "FORWARD",
252 [NF_INET_LOCAL_OUT] = "OUTPUT",
253 [NF_INET_POST_ROUTING] = "POSTROUTING",
256 enum nf_ip_trace_comments {
257 NF_IP6_TRACE_COMMENT_RULE,
258 NF_IP6_TRACE_COMMENT_RETURN,
259 NF_IP6_TRACE_COMMENT_POLICY,
262 static const char *const comments[] = {
263 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
264 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
265 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
268 static struct nf_loginfo trace_loginfo = {
269 .type = NF_LOG_TYPE_LOG,
273 .logflags = NF_LOG_MASK,
278 /* Mildly perf critical (only if packet tracing is on) */
280 get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e,
281 const char *hookname, const char **chainname,
282 const char **comment, unsigned int *rulenum)
284 const struct ip6t_standard_target *t = (void *)ip6t_get_target_c(s);
286 if (strcmp(t->target.u.kernel.target->name, IP6T_ERROR_TARGET) == 0) {
287 /* Head of user chain: ERROR target with chainname */
288 *chainname = t->target.data;
293 if (s->target_offset == sizeof(struct ip6t_entry) &&
294 strcmp(t->target.u.kernel.target->name,
295 IP6T_STANDARD_TARGET) == 0 &&
297 unconditional(&s->ipv6)) {
298 /* Tail of chains: STANDARD target (return/policy) */
299 *comment = *chainname == hookname
300 ? comments[NF_IP6_TRACE_COMMENT_POLICY]
301 : comments[NF_IP6_TRACE_COMMENT_RETURN];
310 static void trace_packet(const struct sk_buff *skb,
312 const struct net_device *in,
313 const struct net_device *out,
314 const char *tablename,
315 const struct xt_table_info *private,
316 const struct ip6t_entry *e)
318 const void *table_base;
319 const struct ip6t_entry *root;
320 const char *hookname, *chainname, *comment;
321 unsigned int rulenum = 0;
323 table_base = private->entries[smp_processor_id()];
324 root = get_entry(table_base, private->hook_entry[hook]);
326 hookname = chainname = hooknames[hook];
327 comment = comments[NF_IP6_TRACE_COMMENT_RULE];
329 IP6T_ENTRY_ITERATE(root,
330 private->size - private->hook_entry[hook],
331 get_chainname_rulenum,
332 e, hookname, &chainname, &comment, &rulenum);
334 nf_log_packet(AF_INET6, hook, skb, in, out, &trace_loginfo,
335 "TRACE: %s:%s:%s:%u ",
336 tablename, chainname, comment, rulenum);
340 static inline __pure struct ip6t_entry *
341 ip6t_next_entry(const struct ip6t_entry *entry)
343 return (void *)entry + entry->next_offset;
346 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
348 ip6t_do_table(struct sk_buff *skb,
350 const struct net_device *in,
351 const struct net_device *out,
352 struct xt_table *table)
354 #define tb_comefrom ((struct ip6t_entry *)table_base)->comefrom
356 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
357 bool hotdrop = false;
358 /* Initializing verdict to NF_DROP keeps gcc happy. */
359 unsigned int verdict = NF_DROP;
360 const char *indev, *outdev;
361 const void *table_base;
362 struct ip6t_entry *e, *back;
363 const struct xt_table_info *private;
364 struct xt_match_param mtpar;
365 struct xt_target_param tgpar;
368 indev = in ? in->name : nulldevname;
369 outdev = out ? out->name : nulldevname;
370 /* We handle fragments by dealing with the first fragment as
371 * if it was a normal packet. All other fragments are treated
372 * normally, except that they will NEVER match rules that ask
373 * things we don't know, ie. tcp syn flag or ports). If the
374 * rule is also a fragment-specific rule, non-fragments won't
376 mtpar.hotdrop = &hotdrop;
377 mtpar.in = tgpar.in = in;
378 mtpar.out = tgpar.out = out;
379 mtpar.family = tgpar.family = NFPROTO_IPV6;
380 mtpar.hooknum = tgpar.hooknum = hook;
382 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
385 private = table->private;
386 table_base = private->entries[smp_processor_id()];
388 e = get_entry(table_base, private->hook_entry[hook]);
390 /* For return from builtin chain */
391 back = get_entry(table_base, private->underflow[hook]);
394 const struct ip6t_entry_target *t;
398 if (!ip6_packet_match(skb, indev, outdev, &e->ipv6,
399 &mtpar.thoff, &mtpar.fragoff, &hotdrop) ||
400 IP6T_MATCH_ITERATE(e, do_match, skb, &mtpar) != 0) {
401 e = ip6t_next_entry(e);
405 ADD_COUNTER(e->counters,
406 ntohs(ipv6_hdr(skb)->payload_len) +
407 sizeof(struct ipv6hdr), 1);
409 t = ip6t_get_target_c(e);
410 IP_NF_ASSERT(t->u.kernel.target);
412 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
413 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
414 /* The packet is traced: log it */
415 if (unlikely(skb->nf_trace))
416 trace_packet(skb, hook, in, out,
417 table->name, private, e);
419 /* Standard target? */
420 if (!t->u.kernel.target->target) {
423 v = ((struct ip6t_standard_target *)t)->verdict;
425 /* Pop from stack? */
426 if (v != IP6T_RETURN) {
427 verdict = (unsigned)(-v) - 1;
431 back = get_entry(table_base, back->comefrom);
434 if (table_base + v != ip6t_next_entry(e) &&
435 !(e->ipv6.flags & IP6T_F_GOTO)) {
436 /* Save old back ptr in next entry */
437 struct ip6t_entry *next = ip6t_next_entry(e);
438 next->comefrom = (void *)back - table_base;
439 /* set back pointer to next entry */
443 e = get_entry(table_base, v);
447 /* Targets which reenter must return
449 tgpar.target = t->u.kernel.target;
450 tgpar.targinfo = t->data;
452 #ifdef CONFIG_NETFILTER_DEBUG
453 tb_comefrom = 0xeeeeeeec;
455 verdict = t->u.kernel.target->target(skb, &tgpar);
457 #ifdef CONFIG_NETFILTER_DEBUG
458 if (tb_comefrom != 0xeeeeeeec && verdict == IP6T_CONTINUE) {
459 printk("Target %s reentered!\n",
460 t->u.kernel.target->name);
463 tb_comefrom = 0x57acc001;
465 if (verdict == IP6T_CONTINUE)
466 e = ip6t_next_entry(e);
472 #ifdef CONFIG_NETFILTER_DEBUG
473 tb_comefrom = NETFILTER_LINK_POISON;
475 xt_info_rdunlock_bh();
477 #ifdef DEBUG_ALLOW_ALL
488 /* Figures out from what hook each rule can be called: returns 0 if
489 there are loops. Puts hook bitmask in comefrom. */
491 mark_source_chains(const struct xt_table_info *newinfo,
492 unsigned int valid_hooks, void *entry0)
496 /* No recursion; use packet counter to save back ptrs (reset
497 to 0 as we leave), and comefrom to save source hook bitmask */
498 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
499 unsigned int pos = newinfo->hook_entry[hook];
500 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
502 if (!(valid_hooks & (1 << hook)))
505 /* Set initial back pointer. */
506 e->counters.pcnt = pos;
509 const struct ip6t_standard_target *t
510 = (void *)ip6t_get_target_c(e);
511 int visited = e->comefrom & (1 << hook);
513 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
514 printk("iptables: loop hook %u pos %u %08X.\n",
515 hook, pos, e->comefrom);
518 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
520 /* Unconditional return/END. */
521 if ((e->target_offset == sizeof(struct ip6t_entry) &&
522 (strcmp(t->target.u.user.name,
523 IP6T_STANDARD_TARGET) == 0) &&
525 unconditional(&e->ipv6)) || visited) {
526 unsigned int oldpos, size;
528 if ((strcmp(t->target.u.user.name,
529 IP6T_STANDARD_TARGET) == 0) &&
530 t->verdict < -NF_MAX_VERDICT - 1) {
531 duprintf("mark_source_chains: bad "
532 "negative verdict (%i)\n",
537 /* Return: backtrack through the last
540 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
541 #ifdef DEBUG_IP_FIREWALL_USER
543 & (1 << NF_INET_NUMHOOKS)) {
544 duprintf("Back unset "
551 pos = e->counters.pcnt;
552 e->counters.pcnt = 0;
554 /* We're at the start. */
558 e = (struct ip6t_entry *)
560 } while (oldpos == pos + e->next_offset);
563 size = e->next_offset;
564 e = (struct ip6t_entry *)
565 (entry0 + pos + size);
566 e->counters.pcnt = pos;
569 int newpos = t->verdict;
571 if (strcmp(t->target.u.user.name,
572 IP6T_STANDARD_TARGET) == 0 &&
574 if (newpos > newinfo->size -
575 sizeof(struct ip6t_entry)) {
576 duprintf("mark_source_chains: "
577 "bad verdict (%i)\n",
581 /* This a jump; chase it. */
582 duprintf("Jump rule %u -> %u\n",
585 /* ... this is a fallthru */
586 newpos = pos + e->next_offset;
588 e = (struct ip6t_entry *)
590 e->counters.pcnt = pos;
595 duprintf("Finished chain %u\n", hook);
601 cleanup_match(struct ip6t_entry_match *m, struct net *net, unsigned int *i)
603 struct xt_mtdtor_param par;
605 if (i && (*i)-- == 0)
609 par.match = m->u.kernel.match;
610 par.matchinfo = m->data;
611 par.family = NFPROTO_IPV6;
612 if (par.match->destroy != NULL)
613 par.match->destroy(&par);
614 module_put(par.match->me);
619 check_entry(const struct ip6t_entry *e, const char *name)
621 const struct ip6t_entry_target *t;
623 if (!ip6_checkentry(&e->ipv6)) {
624 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
628 if (e->target_offset + sizeof(struct ip6t_entry_target) >
632 t = ip6t_get_target_c(e);
633 if (e->target_offset + t->u.target_size > e->next_offset)
639 static int check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par,
642 const struct ip6t_ip6 *ipv6 = par->entryinfo;
645 par->match = m->u.kernel.match;
646 par->matchinfo = m->data;
648 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
649 ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
651 duprintf("ip_tables: check failed for `%s'.\n",
660 find_check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par,
663 struct xt_match *match;
666 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
668 "ip6t_%s", m->u.user.name);
669 if (IS_ERR(match) || !match) {
670 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
671 return match ? PTR_ERR(match) : -ENOENT;
673 m->u.kernel.match = match;
675 ret = check_match(m, par, i);
681 module_put(m->u.kernel.match->me);
685 static int check_target(struct ip6t_entry *e, struct net *net, const char *name)
687 struct ip6t_entry_target *t = ip6t_get_target(e);
688 struct xt_tgchk_param par = {
692 .target = t->u.kernel.target,
694 .hook_mask = e->comefrom,
695 .family = NFPROTO_IPV6,
699 t = ip6t_get_target(e);
700 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
701 e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO);
703 duprintf("ip_tables: check failed for `%s'.\n",
704 t->u.kernel.target->name);
711 find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
712 unsigned int size, unsigned int *i)
714 struct ip6t_entry_target *t;
715 struct xt_target *target;
718 struct xt_mtchk_param mtpar;
720 ret = check_entry(e, name);
727 mtpar.entryinfo = &e->ipv6;
728 mtpar.hook_mask = e->comefrom;
729 mtpar.family = NFPROTO_IPV6;
730 ret = IP6T_MATCH_ITERATE(e, find_check_match, &mtpar, &j);
732 goto cleanup_matches;
734 t = ip6t_get_target(e);
735 target = try_then_request_module(xt_find_target(AF_INET6,
738 "ip6t_%s", t->u.user.name);
739 if (IS_ERR(target) || !target) {
740 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
741 ret = target ? PTR_ERR(target) : -ENOENT;
742 goto cleanup_matches;
744 t->u.kernel.target = target;
746 ret = check_target(e, net, name);
753 module_put(t->u.kernel.target->me);
755 IP6T_MATCH_ITERATE(e, cleanup_match, net, &j);
759 static bool check_underflow(const struct ip6t_entry *e)
761 const struct ip6t_entry_target *t;
762 unsigned int verdict;
764 if (!unconditional(&e->ipv6))
766 t = ip6t_get_target_c(e);
767 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
769 verdict = ((struct ip6t_standard_target *)t)->verdict;
770 verdict = -verdict - 1;
771 return verdict == NF_DROP || verdict == NF_ACCEPT;
775 check_entry_size_and_hooks(struct ip6t_entry *e,
776 struct xt_table_info *newinfo,
777 const unsigned char *base,
778 const unsigned char *limit,
779 const unsigned int *hook_entries,
780 const unsigned int *underflows,
781 unsigned int valid_hooks,
786 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
787 (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
788 duprintf("Bad offset %p\n", e);
793 < sizeof(struct ip6t_entry) + sizeof(struct ip6t_entry_target)) {
794 duprintf("checking: element %p size %u\n",
799 /* Check hooks & underflows */
800 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
801 if (!(valid_hooks & (1 << h)))
803 if ((unsigned char *)e - base == hook_entries[h])
804 newinfo->hook_entry[h] = hook_entries[h];
805 if ((unsigned char *)e - base == underflows[h]) {
806 if (!check_underflow(e)) {
807 pr_err("Underflows must be unconditional and "
808 "use the STANDARD target with "
812 newinfo->underflow[h] = underflows[h];
816 /* Clear counters and comefrom */
817 e->counters = ((struct xt_counters) { 0, 0 });
825 cleanup_entry(struct ip6t_entry *e, struct net *net, unsigned int *i)
827 struct xt_tgdtor_param par;
828 struct ip6t_entry_target *t;
830 if (i && (*i)-- == 0)
833 /* Cleanup all matches */
834 IP6T_MATCH_ITERATE(e, cleanup_match, net, NULL);
835 t = ip6t_get_target(e);
838 par.target = t->u.kernel.target;
839 par.targinfo = t->data;
840 par.family = NFPROTO_IPV6;
841 if (par.target->destroy != NULL)
842 par.target->destroy(&par);
843 module_put(par.target->me);
847 /* Checks and translates the user-supplied table segment (held in
850 translate_table(struct net *net,
852 unsigned int valid_hooks,
853 struct xt_table_info *newinfo,
857 const unsigned int *hook_entries,
858 const unsigned int *underflows)
863 newinfo->size = size;
864 newinfo->number = number;
866 /* Init all hooks to impossible value. */
867 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
868 newinfo->hook_entry[i] = 0xFFFFFFFF;
869 newinfo->underflow[i] = 0xFFFFFFFF;
872 duprintf("translate_table: size %u\n", newinfo->size);
874 /* Walk through entries, checking offsets. */
875 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
876 check_entry_size_and_hooks,
880 hook_entries, underflows, valid_hooks, &i);
885 duprintf("translate_table: %u not %u entries\n",
890 /* Check hooks all assigned */
891 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
892 /* Only hooks which are valid */
893 if (!(valid_hooks & (1 << i)))
895 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
896 duprintf("Invalid hook entry %u %u\n",
900 if (newinfo->underflow[i] == 0xFFFFFFFF) {
901 duprintf("Invalid underflow %u %u\n",
907 if (!mark_source_chains(newinfo, valid_hooks, entry0))
910 /* Finally, each sanity check must pass */
912 ret = IP6T_ENTRY_ITERATE(entry0, newinfo->size,
913 find_check_entry, net, name, size, &i);
916 IP6T_ENTRY_ITERATE(entry0, newinfo->size,
917 cleanup_entry, net, &i);
921 /* And one copy for every other CPU */
922 for_each_possible_cpu(i) {
923 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
924 memcpy(newinfo->entries[i], entry0, newinfo->size);
932 add_entry_to_counter(const struct ip6t_entry *e,
933 struct xt_counters total[],
936 ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
943 set_entry_to_counter(const struct ip6t_entry *e,
944 struct ip6t_counters total[],
947 SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
954 get_counters(const struct xt_table_info *t,
955 struct xt_counters counters[])
961 /* Instead of clearing (by a previous call to memset())
962 * the counters and using adds, we set the counters
963 * with data used by 'current' CPU
965 * Bottom half has to be disabled to prevent deadlock
966 * if new softirq were to run and call ipt_do_table
969 curcpu = smp_processor_id();
972 IP6T_ENTRY_ITERATE(t->entries[curcpu],
974 set_entry_to_counter,
978 for_each_possible_cpu(cpu) {
983 IP6T_ENTRY_ITERATE(t->entries[cpu],
985 add_entry_to_counter,
988 xt_info_wrunlock(cpu);
993 static struct xt_counters *alloc_counters(const struct xt_table *table)
995 unsigned int countersize;
996 struct xt_counters *counters;
997 const struct xt_table_info *private = table->private;
999 /* We need atomic snapshot of counters: rest doesn't change
1000 (other than comefrom, which userspace doesn't care
1002 countersize = sizeof(struct xt_counters) * private->number;
1003 counters = vmalloc_node(countersize, numa_node_id());
1005 if (counters == NULL)
1006 return ERR_PTR(-ENOMEM);
1008 get_counters(private, counters);
1014 copy_entries_to_user(unsigned int total_size,
1015 const struct xt_table *table,
1016 void __user *userptr)
1018 unsigned int off, num;
1019 const struct ip6t_entry *e;
1020 struct xt_counters *counters;
1021 const struct xt_table_info *private = table->private;
1023 const void *loc_cpu_entry;
1025 counters = alloc_counters(table);
1026 if (IS_ERR(counters))
1027 return PTR_ERR(counters);
1029 /* choose the copy that is on our node/cpu, ...
1030 * This choice is lazy (because current thread is
1031 * allowed to migrate to another cpu)
1033 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1034 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
1039 /* FIXME: use iterator macros --RR */
1040 /* ... then go back and fix counters and names */
1041 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
1043 const struct ip6t_entry_match *m;
1044 const struct ip6t_entry_target *t;
1046 e = (struct ip6t_entry *)(loc_cpu_entry + off);
1047 if (copy_to_user(userptr + off
1048 + offsetof(struct ip6t_entry, counters),
1050 sizeof(counters[num])) != 0) {
1055 for (i = sizeof(struct ip6t_entry);
1056 i < e->target_offset;
1057 i += m->u.match_size) {
1060 if (copy_to_user(userptr + off + i
1061 + offsetof(struct ip6t_entry_match,
1063 m->u.kernel.match->name,
1064 strlen(m->u.kernel.match->name)+1)
1071 t = ip6t_get_target_c(e);
1072 if (copy_to_user(userptr + off + e->target_offset
1073 + offsetof(struct ip6t_entry_target,
1075 t->u.kernel.target->name,
1076 strlen(t->u.kernel.target->name)+1) != 0) {
1087 #ifdef CONFIG_COMPAT
1088 static void compat_standard_from_user(void *dst, const void *src)
1090 int v = *(compat_int_t *)src;
1093 v += xt_compat_calc_jump(AF_INET6, v);
1094 memcpy(dst, &v, sizeof(v));
1097 static int compat_standard_to_user(void __user *dst, const void *src)
1099 compat_int_t cv = *(int *)src;
1102 cv -= xt_compat_calc_jump(AF_INET6, cv);
1103 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1107 compat_calc_match(const struct ip6t_entry_match *m, int *size)
1109 *size += xt_compat_match_offset(m->u.kernel.match);
1113 static int compat_calc_entry(const struct ip6t_entry *e,
1114 const struct xt_table_info *info,
1115 const void *base, struct xt_table_info *newinfo)
1117 const struct ip6t_entry_target *t;
1118 unsigned int entry_offset;
1121 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1122 entry_offset = (void *)e - base;
1123 IP6T_MATCH_ITERATE(e, compat_calc_match, &off);
1124 t = ip6t_get_target_c(e);
1125 off += xt_compat_target_offset(t->u.kernel.target);
1126 newinfo->size -= off;
1127 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1131 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1132 if (info->hook_entry[i] &&
1133 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1134 newinfo->hook_entry[i] -= off;
1135 if (info->underflow[i] &&
1136 (e < (struct ip6t_entry *)(base + info->underflow[i])))
1137 newinfo->underflow[i] -= off;
1142 static int compat_table_info(const struct xt_table_info *info,
1143 struct xt_table_info *newinfo)
1145 void *loc_cpu_entry;
1147 if (!newinfo || !info)
1150 /* we dont care about newinfo->entries[] */
1151 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1152 newinfo->initial_entries = 0;
1153 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1154 return IP6T_ENTRY_ITERATE(loc_cpu_entry, info->size,
1155 compat_calc_entry, info, loc_cpu_entry,
1160 static int get_info(struct net *net, void __user *user,
1161 const int *len, int compat)
1163 char name[IP6T_TABLE_MAXNAMELEN];
1167 if (*len != sizeof(struct ip6t_getinfo)) {
1168 duprintf("length %u != %zu\n", *len,
1169 sizeof(struct ip6t_getinfo));
1173 if (copy_from_user(name, user, sizeof(name)) != 0)
1176 name[IP6T_TABLE_MAXNAMELEN-1] = '\0';
1177 #ifdef CONFIG_COMPAT
1179 xt_compat_lock(AF_INET6);
1181 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1182 "ip6table_%s", name);
1183 if (t && !IS_ERR(t)) {
1184 struct ip6t_getinfo info;
1185 const struct xt_table_info *private = t->private;
1186 #ifdef CONFIG_COMPAT
1187 struct xt_table_info tmp;
1190 ret = compat_table_info(private, &tmp);
1191 xt_compat_flush_offsets(AF_INET6);
1195 info.valid_hooks = t->valid_hooks;
1196 memcpy(info.hook_entry, private->hook_entry,
1197 sizeof(info.hook_entry));
1198 memcpy(info.underflow, private->underflow,
1199 sizeof(info.underflow));
1200 info.num_entries = private->number;
1201 info.size = private->size;
1202 strcpy(info.name, name);
1204 if (copy_to_user(user, &info, *len) != 0)
1212 ret = t ? PTR_ERR(t) : -ENOENT;
1213 #ifdef CONFIG_COMPAT
1215 xt_compat_unlock(AF_INET6);
1221 get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
1225 struct ip6t_get_entries get;
1228 if (*len < sizeof(get)) {
1229 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1232 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1234 if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1235 duprintf("get_entries: %u != %zu\n",
1236 *len, sizeof(get) + get.size);
1240 t = xt_find_table_lock(net, AF_INET6, get.name);
1241 if (t && !IS_ERR(t)) {
1242 struct xt_table_info *private = t->private;
1243 duprintf("t->private->number = %u\n", private->number);
1244 if (get.size == private->size)
1245 ret = copy_entries_to_user(private->size,
1246 t, uptr->entrytable);
1248 duprintf("get_entries: I've got %u not %u!\n",
1249 private->size, get.size);
1255 ret = t ? PTR_ERR(t) : -ENOENT;
1261 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1262 struct xt_table_info *newinfo, unsigned int num_counters,
1263 void __user *counters_ptr)
1267 struct xt_table_info *oldinfo;
1268 struct xt_counters *counters;
1269 const void *loc_cpu_old_entry;
1272 counters = vmalloc_node(num_counters * sizeof(struct xt_counters),
1279 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1280 "ip6table_%s", name);
1281 if (!t || IS_ERR(t)) {
1282 ret = t ? PTR_ERR(t) : -ENOENT;
1283 goto free_newinfo_counters_untrans;
1287 if (valid_hooks != t->valid_hooks) {
1288 duprintf("Valid hook crap: %08X vs %08X\n",
1289 valid_hooks, t->valid_hooks);
1294 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1298 /* Update module usage count based on number of rules */
1299 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1300 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1301 if ((oldinfo->number > oldinfo->initial_entries) ||
1302 (newinfo->number <= oldinfo->initial_entries))
1304 if ((oldinfo->number > oldinfo->initial_entries) &&
1305 (newinfo->number <= oldinfo->initial_entries))
1308 /* Get the old counters, and synchronize with replace */
1309 get_counters(oldinfo, counters);
1311 /* Decrease module usage counts and free resource */
1312 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1313 IP6T_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,
1315 xt_free_table_info(oldinfo);
1316 if (copy_to_user(counters_ptr, counters,
1317 sizeof(struct xt_counters) * num_counters) != 0)
1326 free_newinfo_counters_untrans:
1333 do_replace(struct net *net, const void __user *user, unsigned int len)
1336 struct ip6t_replace tmp;
1337 struct xt_table_info *newinfo;
1338 void *loc_cpu_entry;
1340 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1343 /* overflow check */
1344 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1347 newinfo = xt_alloc_table_info(tmp.size);
1351 /* choose the copy that is on our node/cpu */
1352 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1353 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1359 ret = translate_table(net, tmp.name, tmp.valid_hooks,
1360 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1361 tmp.hook_entry, tmp.underflow);
1365 duprintf("ip_tables: Translated table\n");
1367 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1368 tmp.num_counters, tmp.counters);
1370 goto free_newinfo_untrans;
1373 free_newinfo_untrans:
1374 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, net, NULL);
1376 xt_free_table_info(newinfo);
1380 /* We're lazy, and add to the first CPU; overflow works its fey magic
1381 * and everything is OK. */
1383 add_counter_to_entry(struct ip6t_entry *e,
1384 const struct xt_counters addme[],
1387 ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt);
1394 do_add_counters(struct net *net, const void __user *user, unsigned int len,
1397 unsigned int i, curcpu;
1398 struct xt_counters_info tmp;
1399 struct xt_counters *paddc;
1400 unsigned int num_counters;
1405 const struct xt_table_info *private;
1407 const void *loc_cpu_entry;
1408 #ifdef CONFIG_COMPAT
1409 struct compat_xt_counters_info compat_tmp;
1413 size = sizeof(struct compat_xt_counters_info);
1418 size = sizeof(struct xt_counters_info);
1421 if (copy_from_user(ptmp, user, size) != 0)
1424 #ifdef CONFIG_COMPAT
1426 num_counters = compat_tmp.num_counters;
1427 name = compat_tmp.name;
1431 num_counters = tmp.num_counters;
1435 if (len != size + num_counters * sizeof(struct xt_counters))
1438 paddc = vmalloc_node(len - size, numa_node_id());
1442 if (copy_from_user(paddc, user + size, len - size) != 0) {
1447 t = xt_find_table_lock(net, AF_INET6, name);
1448 if (!t || IS_ERR(t)) {
1449 ret = t ? PTR_ERR(t) : -ENOENT;
1455 private = t->private;
1456 if (private->number != num_counters) {
1458 goto unlock_up_free;
1462 /* Choose the copy that is on our node */
1463 curcpu = smp_processor_id();
1464 xt_info_wrlock(curcpu);
1465 loc_cpu_entry = private->entries[curcpu];
1466 IP6T_ENTRY_ITERATE(loc_cpu_entry,
1468 add_counter_to_entry,
1471 xt_info_wrunlock(curcpu);
1483 #ifdef CONFIG_COMPAT
1484 struct compat_ip6t_replace {
1485 char name[IP6T_TABLE_MAXNAMELEN];
1489 u32 hook_entry[NF_INET_NUMHOOKS];
1490 u32 underflow[NF_INET_NUMHOOKS];
1492 compat_uptr_t counters; /* struct ip6t_counters * */
1493 struct compat_ip6t_entry entries[0];
1497 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1498 unsigned int *size, struct xt_counters *counters,
1501 struct ip6t_entry_target *t;
1502 struct compat_ip6t_entry __user *ce;
1503 u_int16_t target_offset, next_offset;
1504 compat_uint_t origsize;
1509 ce = (struct compat_ip6t_entry __user *)*dstptr;
1510 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)))
1513 if (copy_to_user(&ce->counters, &counters[*i], sizeof(counters[*i])))
1516 *dstptr += sizeof(struct compat_ip6t_entry);
1517 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1519 ret = IP6T_MATCH_ITERATE(e, xt_compat_match_to_user, dstptr, size);
1520 target_offset = e->target_offset - (origsize - *size);
1523 t = ip6t_get_target(e);
1524 ret = xt_compat_target_to_user(t, dstptr, size);
1528 next_offset = e->next_offset - (origsize - *size);
1529 if (put_user(target_offset, &ce->target_offset))
1531 if (put_user(next_offset, &ce->next_offset))
1541 compat_find_calc_match(struct ip6t_entry_match *m,
1543 const struct ip6t_ip6 *ipv6,
1544 unsigned int hookmask,
1545 int *size, unsigned int *i)
1547 struct xt_match *match;
1549 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
1550 m->u.user.revision),
1551 "ip6t_%s", m->u.user.name);
1552 if (IS_ERR(match) || !match) {
1553 duprintf("compat_check_calc_match: `%s' not found\n",
1555 return match ? PTR_ERR(match) : -ENOENT;
1557 m->u.kernel.match = match;
1558 *size += xt_compat_match_offset(match);
1565 compat_release_match(struct ip6t_entry_match *m, unsigned int *i)
1567 if (i && (*i)-- == 0)
1570 module_put(m->u.kernel.match->me);
1575 compat_release_entry(struct compat_ip6t_entry *e, unsigned int *i)
1577 struct ip6t_entry_target *t;
1579 if (i && (*i)-- == 0)
1582 /* Cleanup all matches */
1583 COMPAT_IP6T_MATCH_ITERATE(e, compat_release_match, NULL);
1584 t = compat_ip6t_get_target(e);
1585 module_put(t->u.kernel.target->me);
1590 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1591 struct xt_table_info *newinfo,
1593 const unsigned char *base,
1594 const unsigned char *limit,
1595 const unsigned int *hook_entries,
1596 const unsigned int *underflows,
1600 struct ip6t_entry_target *t;
1601 struct xt_target *target;
1602 unsigned int entry_offset;
1606 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1607 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
1608 (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
1609 duprintf("Bad offset %p, limit = %p\n", e, limit);
1613 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1614 sizeof(struct compat_xt_entry_target)) {
1615 duprintf("checking: element %p size %u\n",
1620 /* For purposes of check_entry casting the compat entry is fine */
1621 ret = check_entry((struct ip6t_entry *)e, name);
1625 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1626 entry_offset = (void *)e - (void *)base;
1628 ret = COMPAT_IP6T_MATCH_ITERATE(e, compat_find_calc_match, name,
1629 &e->ipv6, e->comefrom, &off, &j);
1631 goto release_matches;
1633 t = compat_ip6t_get_target(e);
1634 target = try_then_request_module(xt_find_target(AF_INET6,
1636 t->u.user.revision),
1637 "ip6t_%s", t->u.user.name);
1638 if (IS_ERR(target) || !target) {
1639 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1641 ret = target ? PTR_ERR(target) : -ENOENT;
1642 goto release_matches;
1644 t->u.kernel.target = target;
1646 off += xt_compat_target_offset(target);
1648 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1652 /* Check hooks & underflows */
1653 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1654 if ((unsigned char *)e - base == hook_entries[h])
1655 newinfo->hook_entry[h] = hook_entries[h];
1656 if ((unsigned char *)e - base == underflows[h])
1657 newinfo->underflow[h] = underflows[h];
1660 /* Clear counters and comefrom */
1661 memset(&e->counters, 0, sizeof(e->counters));
1668 module_put(t->u.kernel.target->me);
1670 IP6T_MATCH_ITERATE(e, compat_release_match, &j);
1675 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1676 unsigned int *size, const char *name,
1677 struct xt_table_info *newinfo, unsigned char *base)
1679 struct ip6t_entry_target *t;
1680 struct xt_target *target;
1681 struct ip6t_entry *de;
1682 unsigned int origsize;
1687 de = (struct ip6t_entry *)*dstptr;
1688 memcpy(de, e, sizeof(struct ip6t_entry));
1689 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1691 *dstptr += sizeof(struct ip6t_entry);
1692 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1694 ret = COMPAT_IP6T_MATCH_ITERATE(e, xt_compat_match_from_user,
1698 de->target_offset = e->target_offset - (origsize - *size);
1699 t = compat_ip6t_get_target(e);
1700 target = t->u.kernel.target;
1701 xt_compat_target_from_user(t, dstptr, size);
1703 de->next_offset = e->next_offset - (origsize - *size);
1704 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1705 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1706 newinfo->hook_entry[h] -= origsize - *size;
1707 if ((unsigned char *)de - base < newinfo->underflow[h])
1708 newinfo->underflow[h] -= origsize - *size;
1713 static int compat_check_entry(struct ip6t_entry *e, struct net *net,
1714 const char *name, unsigned int *i)
1718 struct xt_mtchk_param mtpar;
1723 mtpar.entryinfo = &e->ipv6;
1724 mtpar.hook_mask = e->comefrom;
1725 mtpar.family = NFPROTO_IPV6;
1726 ret = IP6T_MATCH_ITERATE(e, check_match, &mtpar, &j);
1728 goto cleanup_matches;
1730 ret = check_target(e, net, name);
1732 goto cleanup_matches;
1738 IP6T_MATCH_ITERATE(e, cleanup_match, net, &j);
1743 translate_compat_table(struct net *net,
1745 unsigned int valid_hooks,
1746 struct xt_table_info **pinfo,
1748 unsigned int total_size,
1749 unsigned int number,
1750 unsigned int *hook_entries,
1751 unsigned int *underflows)
1754 struct xt_table_info *newinfo, *info;
1755 void *pos, *entry0, *entry1;
1762 info->number = number;
1764 /* Init all hooks to impossible value. */
1765 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1766 info->hook_entry[i] = 0xFFFFFFFF;
1767 info->underflow[i] = 0xFFFFFFFF;
1770 duprintf("translate_compat_table: size %u\n", info->size);
1772 xt_compat_lock(AF_INET6);
1773 /* Walk through entries, checking offsets. */
1774 ret = COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size,
1775 check_compat_entry_size_and_hooks,
1776 info, &size, entry0,
1777 entry0 + total_size,
1778 hook_entries, underflows, &j, name);
1784 duprintf("translate_compat_table: %u not %u entries\n",
1789 /* Check hooks all assigned */
1790 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1791 /* Only hooks which are valid */
1792 if (!(valid_hooks & (1 << i)))
1794 if (info->hook_entry[i] == 0xFFFFFFFF) {
1795 duprintf("Invalid hook entry %u %u\n",
1796 i, hook_entries[i]);
1799 if (info->underflow[i] == 0xFFFFFFFF) {
1800 duprintf("Invalid underflow %u %u\n",
1807 newinfo = xt_alloc_table_info(size);
1811 newinfo->number = number;
1812 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1813 newinfo->hook_entry[i] = info->hook_entry[i];
1814 newinfo->underflow[i] = info->underflow[i];
1816 entry1 = newinfo->entries[raw_smp_processor_id()];
1819 ret = COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size,
1820 compat_copy_entry_from_user,
1821 &pos, &size, name, newinfo, entry1);
1822 xt_compat_flush_offsets(AF_INET6);
1823 xt_compat_unlock(AF_INET6);
1828 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1832 ret = IP6T_ENTRY_ITERATE(entry1, newinfo->size, compat_check_entry,
1836 COMPAT_IP6T_ENTRY_ITERATE_CONTINUE(entry0, newinfo->size, i,
1837 compat_release_entry, &j);
1838 IP6T_ENTRY_ITERATE(entry1, newinfo->size, cleanup_entry, net, &i);
1839 xt_free_table_info(newinfo);
1843 /* And one copy for every other CPU */
1844 for_each_possible_cpu(i)
1845 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1846 memcpy(newinfo->entries[i], entry1, newinfo->size);
1850 xt_free_table_info(info);
1854 xt_free_table_info(newinfo);
1856 COMPAT_IP6T_ENTRY_ITERATE(entry0, total_size, compat_release_entry, &j);
1859 xt_compat_flush_offsets(AF_INET6);
1860 xt_compat_unlock(AF_INET6);
1865 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1868 struct compat_ip6t_replace tmp;
1869 struct xt_table_info *newinfo;
1870 void *loc_cpu_entry;
1872 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1875 /* overflow check */
1876 if (tmp.size >= INT_MAX / num_possible_cpus())
1878 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1881 newinfo = xt_alloc_table_info(tmp.size);
1885 /* choose the copy that is on our node/cpu */
1886 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1887 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1893 ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
1894 &newinfo, &loc_cpu_entry, tmp.size,
1895 tmp.num_entries, tmp.hook_entry,
1900 duprintf("compat_do_replace: Translated table\n");
1902 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1903 tmp.num_counters, compat_ptr(tmp.counters));
1905 goto free_newinfo_untrans;
1908 free_newinfo_untrans:
1909 IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, net, NULL);
1911 xt_free_table_info(newinfo);
1916 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1921 if (!capable(CAP_NET_ADMIN))
1925 case IP6T_SO_SET_REPLACE:
1926 ret = compat_do_replace(sock_net(sk), user, len);
1929 case IP6T_SO_SET_ADD_COUNTERS:
1930 ret = do_add_counters(sock_net(sk), user, len, 1);
1934 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1941 struct compat_ip6t_get_entries {
1942 char name[IP6T_TABLE_MAXNAMELEN];
1944 struct compat_ip6t_entry entrytable[0];
1948 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1949 void __user *userptr)
1951 struct xt_counters *counters;
1952 const struct xt_table_info *private = table->private;
1956 const void *loc_cpu_entry;
1959 counters = alloc_counters(table);
1960 if (IS_ERR(counters))
1961 return PTR_ERR(counters);
1963 /* choose the copy that is on our node/cpu, ...
1964 * This choice is lazy (because current thread is
1965 * allowed to migrate to another cpu)
1967 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1970 ret = IP6T_ENTRY_ITERATE(loc_cpu_entry, total_size,
1971 compat_copy_entry_to_user,
1972 &pos, &size, counters, &i);
1979 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1983 struct compat_ip6t_get_entries get;
1986 if (*len < sizeof(get)) {
1987 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1991 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1994 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
1995 duprintf("compat_get_entries: %u != %zu\n",
1996 *len, sizeof(get) + get.size);
2000 xt_compat_lock(AF_INET6);
2001 t = xt_find_table_lock(net, AF_INET6, get.name);
2002 if (t && !IS_ERR(t)) {
2003 const struct xt_table_info *private = t->private;
2004 struct xt_table_info info;
2005 duprintf("t->private->number = %u\n", private->number);
2006 ret = compat_table_info(private, &info);
2007 if (!ret && get.size == info.size) {
2008 ret = compat_copy_entries_to_user(private->size,
2009 t, uptr->entrytable);
2011 duprintf("compat_get_entries: I've got %u not %u!\n",
2012 private->size, get.size);
2015 xt_compat_flush_offsets(AF_INET6);
2019 ret = t ? PTR_ERR(t) : -ENOENT;
2021 xt_compat_unlock(AF_INET6);
2025 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
2028 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2032 if (!capable(CAP_NET_ADMIN))
2036 case IP6T_SO_GET_INFO:
2037 ret = get_info(sock_net(sk), user, len, 1);
2039 case IP6T_SO_GET_ENTRIES:
2040 ret = compat_get_entries(sock_net(sk), user, len);
2043 ret = do_ip6t_get_ctl(sk, cmd, user, len);
2050 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2054 if (!capable(CAP_NET_ADMIN))
2058 case IP6T_SO_SET_REPLACE:
2059 ret = do_replace(sock_net(sk), user, len);
2062 case IP6T_SO_SET_ADD_COUNTERS:
2063 ret = do_add_counters(sock_net(sk), user, len, 0);
2067 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
2075 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2079 if (!capable(CAP_NET_ADMIN))
2083 case IP6T_SO_GET_INFO:
2084 ret = get_info(sock_net(sk), user, len, 0);
2087 case IP6T_SO_GET_ENTRIES:
2088 ret = get_entries(sock_net(sk), user, len);
2091 case IP6T_SO_GET_REVISION_MATCH:
2092 case IP6T_SO_GET_REVISION_TARGET: {
2093 struct ip6t_get_revision rev;
2096 if (*len != sizeof(rev)) {
2100 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2105 if (cmd == IP6T_SO_GET_REVISION_TARGET)
2110 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
2113 "ip6t_%s", rev.name);
2118 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
2125 struct xt_table *ip6t_register_table(struct net *net,
2126 const struct xt_table *table,
2127 const struct ip6t_replace *repl)
2130 struct xt_table_info *newinfo;
2131 struct xt_table_info bootstrap
2132 = { 0, 0, 0, { 0 }, { 0 }, { } };
2133 void *loc_cpu_entry;
2134 struct xt_table *new_table;
2136 newinfo = xt_alloc_table_info(repl->size);
2142 /* choose the copy on our node/cpu, but dont care about preemption */
2143 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2144 memcpy(loc_cpu_entry, repl->entries, repl->size);
2146 ret = translate_table(net, table->name, table->valid_hooks,
2147 newinfo, loc_cpu_entry, repl->size,
2154 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2155 if (IS_ERR(new_table)) {
2156 ret = PTR_ERR(new_table);
2162 xt_free_table_info(newinfo);
2164 return ERR_PTR(ret);
2167 void ip6t_unregister_table(struct net *net, struct xt_table *table)
2169 struct xt_table_info *private;
2170 void *loc_cpu_entry;
2171 struct module *table_owner = table->me;
2173 private = xt_unregister_table(table);
2175 /* Decrease module usage counts and free resources */
2176 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2177 IP6T_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, net, NULL);
2178 if (private->number > private->initial_entries)
2179 module_put(table_owner);
2180 xt_free_table_info(private);
2183 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2185 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2186 u_int8_t type, u_int8_t code,
2189 return (type == test_type && code >= min_code && code <= max_code)
2194 icmp6_match(const struct sk_buff *skb, const struct xt_match_param *par)
2196 const struct icmp6hdr *ic;
2197 struct icmp6hdr _icmph;
2198 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2200 /* Must not be a fragment. */
2201 if (par->fragoff != 0)
2204 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2206 /* We've been asked to examine this packet, and we
2207 * can't. Hence, no choice but to drop.
2209 duprintf("Dropping evil ICMP tinygram.\n");
2210 *par->hotdrop = true;
2214 return icmp6_type_code_match(icmpinfo->type,
2217 ic->icmp6_type, ic->icmp6_code,
2218 !!(icmpinfo->invflags&IP6T_ICMP_INV));
2221 /* Called when user tries to insert an entry of this type. */
2222 static bool icmp6_checkentry(const struct xt_mtchk_param *par)
2224 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2226 /* Must specify no unknown invflags */
2227 return !(icmpinfo->invflags & ~IP6T_ICMP_INV);
2230 /* The built-in targets: standard (NULL) and error. */
2231 static struct xt_target ip6t_standard_target __read_mostly = {
2232 .name = IP6T_STANDARD_TARGET,
2233 .targetsize = sizeof(int),
2234 .family = NFPROTO_IPV6,
2235 #ifdef CONFIG_COMPAT
2236 .compatsize = sizeof(compat_int_t),
2237 .compat_from_user = compat_standard_from_user,
2238 .compat_to_user = compat_standard_to_user,
2242 static struct xt_target ip6t_error_target __read_mostly = {
2243 .name = IP6T_ERROR_TARGET,
2244 .target = ip6t_error,
2245 .targetsize = IP6T_FUNCTION_MAXNAMELEN,
2246 .family = NFPROTO_IPV6,
2249 static struct nf_sockopt_ops ip6t_sockopts = {
2251 .set_optmin = IP6T_BASE_CTL,
2252 .set_optmax = IP6T_SO_SET_MAX+1,
2253 .set = do_ip6t_set_ctl,
2254 #ifdef CONFIG_COMPAT
2255 .compat_set = compat_do_ip6t_set_ctl,
2257 .get_optmin = IP6T_BASE_CTL,
2258 .get_optmax = IP6T_SO_GET_MAX+1,
2259 .get = do_ip6t_get_ctl,
2260 #ifdef CONFIG_COMPAT
2261 .compat_get = compat_do_ip6t_get_ctl,
2263 .owner = THIS_MODULE,
2266 static struct xt_match icmp6_matchstruct __read_mostly = {
2268 .match = icmp6_match,
2269 .matchsize = sizeof(struct ip6t_icmp),
2270 .checkentry = icmp6_checkentry,
2271 .proto = IPPROTO_ICMPV6,
2272 .family = NFPROTO_IPV6,
2275 static int __net_init ip6_tables_net_init(struct net *net)
2277 return xt_proto_init(net, NFPROTO_IPV6);
2280 static void __net_exit ip6_tables_net_exit(struct net *net)
2282 xt_proto_fini(net, NFPROTO_IPV6);
2285 static struct pernet_operations ip6_tables_net_ops = {
2286 .init = ip6_tables_net_init,
2287 .exit = ip6_tables_net_exit,
2290 static int __init ip6_tables_init(void)
2294 ret = register_pernet_subsys(&ip6_tables_net_ops);
2298 /* Noone else will be downing sem now, so we won't sleep */
2299 ret = xt_register_target(&ip6t_standard_target);
2302 ret = xt_register_target(&ip6t_error_target);
2305 ret = xt_register_match(&icmp6_matchstruct);
2309 /* Register setsockopt */
2310 ret = nf_register_sockopt(&ip6t_sockopts);
2314 printk(KERN_INFO "ip6_tables: (C) 2000-2006 Netfilter Core Team\n");
2318 xt_unregister_match(&icmp6_matchstruct);
2320 xt_unregister_target(&ip6t_error_target);
2322 xt_unregister_target(&ip6t_standard_target);
2324 unregister_pernet_subsys(&ip6_tables_net_ops);
2329 static void __exit ip6_tables_fini(void)
2331 nf_unregister_sockopt(&ip6t_sockopts);
2333 xt_unregister_match(&icmp6_matchstruct);
2334 xt_unregister_target(&ip6t_error_target);
2335 xt_unregister_target(&ip6t_standard_target);
2337 unregister_pernet_subsys(&ip6_tables_net_ops);
2341 * find the offset to specified header or the protocol number of last header
2342 * if target < 0. "last header" is transport protocol header, ESP, or
2345 * If target header is found, its offset is set in *offset and return protocol
2346 * number. Otherwise, return -1.
2348 * If the first fragment doesn't contain the final protocol header or
2349 * NEXTHDR_NONE it is considered invalid.
2351 * Note that non-1st fragment is special case that "the protocol number
2352 * of last header" is "next header" field in Fragment header. In this case,
2353 * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
2357 int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
2358 int target, unsigned short *fragoff)
2360 unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr);
2361 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
2362 unsigned int len = skb->len - start;
2367 while (nexthdr != target) {
2368 struct ipv6_opt_hdr _hdr, *hp;
2369 unsigned int hdrlen;
2371 if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
2377 hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
2380 if (nexthdr == NEXTHDR_FRAGMENT) {
2381 unsigned short _frag_off;
2383 fp = skb_header_pointer(skb,
2384 start+offsetof(struct frag_hdr,
2391 _frag_off = ntohs(*fp) & ~0x7;
2394 ((!ipv6_ext_hdr(hp->nexthdr)) ||
2395 hp->nexthdr == NEXTHDR_NONE)) {
2397 *fragoff = _frag_off;
2403 } else if (nexthdr == NEXTHDR_AUTH)
2404 hdrlen = (hp->hdrlen + 2) << 2;
2406 hdrlen = ipv6_optlen(hp);
2408 nexthdr = hp->nexthdr;
2417 EXPORT_SYMBOL(ip6t_register_table);
2418 EXPORT_SYMBOL(ip6t_unregister_table);
2419 EXPORT_SYMBOL(ip6t_do_table);
2420 EXPORT_SYMBOL(ip6t_ext_hdr);
2421 EXPORT_SYMBOL(ipv6_find_hdr);
2423 module_init(ip6_tables_init);
2424 module_exit(ip6_tables_fini);