2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/capability.h>
14 #include <linux/skbuff.h>
15 #include <linux/kmod.h>
16 #include <linux/vmalloc.h>
17 #include <linux/netdevice.h>
18 #include <linux/module.h>
19 #include <linux/poison.h>
20 #include <linux/icmpv6.h>
22 #include <net/compat.h>
23 #include <asm/uaccess.h>
24 #include <linux/mutex.h>
25 #include <linux/proc_fs.h>
26 #include <linux/err.h>
27 #include <linux/cpumask.h>
29 #include <linux/netfilter_ipv6/ip6_tables.h>
30 #include <linux/netfilter/x_tables.h>
31 #include <net/netfilter/nf_log.h>
32 #include "../../netfilter/xt_repldata.h"
34 MODULE_LICENSE("GPL");
35 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
36 MODULE_DESCRIPTION("IPv6 packet filter");
38 /*#define DEBUG_IP_FIREWALL*/
39 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
40 /*#define DEBUG_IP_FIREWALL_USER*/
42 #ifdef DEBUG_IP_FIREWALL
43 #define dprintf(format, args...) printk(format , ## args)
45 #define dprintf(format, args...)
48 #ifdef DEBUG_IP_FIREWALL_USER
49 #define duprintf(format, args...) printk(format , ## args)
51 #define duprintf(format, args...)
54 #ifdef CONFIG_NETFILTER_DEBUG
55 #define IP_NF_ASSERT(x) \
58 printk("IP_NF_ASSERT: %s:%s:%u\n", \
59 __func__, __FILE__, __LINE__); \
62 #define IP_NF_ASSERT(x)
66 /* All the better to debug you with... */
71 void *ip6t_alloc_initial_table(const struct xt_table *info)
73 return xt_alloc_initial_table(ip6t, IP6T);
75 EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table);
78 We keep a set of rules for each CPU, so we can avoid write-locking
79 them in the softirq when updating the counters and therefore
80 only need to read-lock in the softirq; doing a write_lock_bh() in user
81 context stops packets coming through and allows user context to read
82 the counters or update the rules.
84 Hence the start of any table is given by get_table() below. */
86 /* Check for an extension */
88 ip6t_ext_hdr(u8 nexthdr)
90 return ( (nexthdr == IPPROTO_HOPOPTS) ||
91 (nexthdr == IPPROTO_ROUTING) ||
92 (nexthdr == IPPROTO_FRAGMENT) ||
93 (nexthdr == IPPROTO_ESP) ||
94 (nexthdr == IPPROTO_AH) ||
95 (nexthdr == IPPROTO_NONE) ||
96 (nexthdr == IPPROTO_DSTOPTS) );
99 /* Returns whether matches rule or not. */
100 /* Performance critical - called for every packet */
102 ip6_packet_match(const struct sk_buff *skb,
105 const struct ip6t_ip6 *ip6info,
106 unsigned int *protoff,
107 int *fragoff, bool *hotdrop)
110 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
112 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
114 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
115 &ip6info->src), IP6T_INV_SRCIP) ||
116 FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
117 &ip6info->dst), IP6T_INV_DSTIP)) {
118 dprintf("Source or dest mismatch.\n");
120 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
121 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
122 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
123 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
124 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
125 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
129 ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask);
131 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
132 dprintf("VIA in mismatch (%s vs %s).%s\n",
133 indev, ip6info->iniface,
134 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
138 ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask);
140 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
141 dprintf("VIA out mismatch (%s vs %s).%s\n",
142 outdev, ip6info->outiface,
143 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
147 /* ... might want to do something with class and flowlabel here ... */
149 /* look for the desired protocol header */
150 if((ip6info->flags & IP6T_F_PROTO)) {
152 unsigned short _frag_off;
154 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off);
160 *fragoff = _frag_off;
162 dprintf("Packet protocol %hi ?= %s%hi.\n",
164 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
167 if (ip6info->proto == protohdr) {
168 if(ip6info->invflags & IP6T_INV_PROTO) {
174 /* We need match for the '-p all', too! */
175 if ((ip6info->proto != 0) &&
176 !(ip6info->invflags & IP6T_INV_PROTO))
182 /* should be ip6 safe */
184 ip6_checkentry(const struct ip6t_ip6 *ipv6)
186 if (ipv6->flags & ~IP6T_F_MASK) {
187 duprintf("Unknown flag bits set: %08X\n",
188 ipv6->flags & ~IP6T_F_MASK);
191 if (ipv6->invflags & ~IP6T_INV_MASK) {
192 duprintf("Unknown invflag bits set: %08X\n",
193 ipv6->invflags & ~IP6T_INV_MASK);
200 ip6t_error(struct sk_buff *skb, const struct xt_target_param *par)
203 printk("ip6_tables: error: `%s'\n",
204 (const char *)par->targinfo);
209 /* Performance critical - called for every packet */
211 do_match(const struct ip6t_entry_match *m, const struct sk_buff *skb,
212 struct xt_match_param *par)
214 par->match = m->u.kernel.match;
215 par->matchinfo = m->data;
217 /* Stop iteration if it doesn't match */
218 if (!m->u.kernel.match->match(skb, par))
224 static inline struct ip6t_entry *
225 get_entry(const void *base, unsigned int offset)
227 return (struct ip6t_entry *)(base + offset);
230 /* All zeroes == unconditional rule. */
231 /* Mildly perf critical (only if packet tracing is on) */
232 static inline bool unconditional(const struct ip6t_ip6 *ipv6)
234 static const struct ip6t_ip6 uncond;
236 return memcmp(ipv6, &uncond, sizeof(uncond)) == 0;
239 static inline const struct ip6t_entry_target *
240 ip6t_get_target_c(const struct ip6t_entry *e)
242 return ip6t_get_target((struct ip6t_entry *)e);
245 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
246 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
247 /* This cries for unification! */
248 static const char *const hooknames[] = {
249 [NF_INET_PRE_ROUTING] = "PREROUTING",
250 [NF_INET_LOCAL_IN] = "INPUT",
251 [NF_INET_FORWARD] = "FORWARD",
252 [NF_INET_LOCAL_OUT] = "OUTPUT",
253 [NF_INET_POST_ROUTING] = "POSTROUTING",
256 enum nf_ip_trace_comments {
257 NF_IP6_TRACE_COMMENT_RULE,
258 NF_IP6_TRACE_COMMENT_RETURN,
259 NF_IP6_TRACE_COMMENT_POLICY,
262 static const char *const comments[] = {
263 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
264 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
265 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
268 static struct nf_loginfo trace_loginfo = {
269 .type = NF_LOG_TYPE_LOG,
273 .logflags = NF_LOG_MASK,
278 /* Mildly perf critical (only if packet tracing is on) */
280 get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e,
281 const char *hookname, const char **chainname,
282 const char **comment, unsigned int *rulenum)
284 const struct ip6t_standard_target *t = (void *)ip6t_get_target_c(s);
286 if (strcmp(t->target.u.kernel.target->name, IP6T_ERROR_TARGET) == 0) {
287 /* Head of user chain: ERROR target with chainname */
288 *chainname = t->target.data;
293 if (s->target_offset == sizeof(struct ip6t_entry) &&
294 strcmp(t->target.u.kernel.target->name,
295 IP6T_STANDARD_TARGET) == 0 &&
297 unconditional(&s->ipv6)) {
298 /* Tail of chains: STANDARD target (return/policy) */
299 *comment = *chainname == hookname
300 ? comments[NF_IP6_TRACE_COMMENT_POLICY]
301 : comments[NF_IP6_TRACE_COMMENT_RETURN];
310 static void trace_packet(const struct sk_buff *skb,
312 const struct net_device *in,
313 const struct net_device *out,
314 const char *tablename,
315 const struct xt_table_info *private,
316 const struct ip6t_entry *e)
318 const void *table_base;
319 const struct ip6t_entry *root;
320 const char *hookname, *chainname, *comment;
321 const struct ip6t_entry *iter;
322 unsigned int rulenum = 0;
324 table_base = private->entries[smp_processor_id()];
325 root = get_entry(table_base, private->hook_entry[hook]);
327 hookname = chainname = hooknames[hook];
328 comment = comments[NF_IP6_TRACE_COMMENT_RULE];
330 xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
331 if (get_chainname_rulenum(iter, e, hookname,
332 &chainname, &comment, &rulenum) != 0)
335 nf_log_packet(AF_INET6, hook, skb, in, out, &trace_loginfo,
336 "TRACE: %s:%s:%s:%u ",
337 tablename, chainname, comment, rulenum);
341 static inline __pure struct ip6t_entry *
342 ip6t_next_entry(const struct ip6t_entry *entry)
344 return (void *)entry + entry->next_offset;
347 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
349 ip6t_do_table(struct sk_buff *skb,
351 const struct net_device *in,
352 const struct net_device *out,
353 struct xt_table *table)
355 #define tb_comefrom ((struct ip6t_entry *)table_base)->comefrom
357 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
358 bool hotdrop = false;
359 /* Initializing verdict to NF_DROP keeps gcc happy. */
360 unsigned int verdict = NF_DROP;
361 const char *indev, *outdev;
362 const void *table_base;
363 struct ip6t_entry *e, *back;
364 const struct xt_table_info *private;
365 struct xt_match_param mtpar;
366 struct xt_target_param tgpar;
369 indev = in ? in->name : nulldevname;
370 outdev = out ? out->name : nulldevname;
371 /* We handle fragments by dealing with the first fragment as
372 * if it was a normal packet. All other fragments are treated
373 * normally, except that they will NEVER match rules that ask
374 * things we don't know, ie. tcp syn flag or ports). If the
375 * rule is also a fragment-specific rule, non-fragments won't
377 mtpar.hotdrop = &hotdrop;
378 mtpar.in = tgpar.in = in;
379 mtpar.out = tgpar.out = out;
380 mtpar.family = tgpar.family = NFPROTO_IPV6;
381 mtpar.hooknum = tgpar.hooknum = hook;
383 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
386 private = table->private;
387 table_base = private->entries[smp_processor_id()];
389 e = get_entry(table_base, private->hook_entry[hook]);
391 /* For return from builtin chain */
392 back = get_entry(table_base, private->underflow[hook]);
395 const struct ip6t_entry_target *t;
396 const struct xt_entry_match *ematch;
400 if (!ip6_packet_match(skb, indev, outdev, &e->ipv6,
401 &mtpar.thoff, &mtpar.fragoff, &hotdrop)) {
403 e = ip6t_next_entry(e);
407 xt_ematch_foreach(ematch, e)
408 if (do_match(ematch, skb, &mtpar) != 0)
411 ADD_COUNTER(e->counters,
412 ntohs(ipv6_hdr(skb)->payload_len) +
413 sizeof(struct ipv6hdr), 1);
415 t = ip6t_get_target_c(e);
416 IP_NF_ASSERT(t->u.kernel.target);
418 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
419 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
420 /* The packet is traced: log it */
421 if (unlikely(skb->nf_trace))
422 trace_packet(skb, hook, in, out,
423 table->name, private, e);
425 /* Standard target? */
426 if (!t->u.kernel.target->target) {
429 v = ((struct ip6t_standard_target *)t)->verdict;
431 /* Pop from stack? */
432 if (v != IP6T_RETURN) {
433 verdict = (unsigned)(-v) - 1;
437 back = get_entry(table_base, back->comefrom);
440 if (table_base + v != ip6t_next_entry(e) &&
441 !(e->ipv6.flags & IP6T_F_GOTO)) {
442 /* Save old back ptr in next entry */
443 struct ip6t_entry *next = ip6t_next_entry(e);
444 next->comefrom = (void *)back - table_base;
445 /* set back pointer to next entry */
449 e = get_entry(table_base, v);
453 /* Targets which reenter must return
455 tgpar.target = t->u.kernel.target;
456 tgpar.targinfo = t->data;
458 #ifdef CONFIG_NETFILTER_DEBUG
459 tb_comefrom = 0xeeeeeeec;
461 verdict = t->u.kernel.target->target(skb, &tgpar);
463 #ifdef CONFIG_NETFILTER_DEBUG
464 if (tb_comefrom != 0xeeeeeeec && verdict == IP6T_CONTINUE) {
465 printk("Target %s reentered!\n",
466 t->u.kernel.target->name);
469 tb_comefrom = 0x57acc001;
471 if (verdict == IP6T_CONTINUE)
472 e = ip6t_next_entry(e);
478 #ifdef CONFIG_NETFILTER_DEBUG
479 tb_comefrom = NETFILTER_LINK_POISON;
481 xt_info_rdunlock_bh();
483 #ifdef DEBUG_ALLOW_ALL
494 /* Figures out from what hook each rule can be called: returns 0 if
495 there are loops. Puts hook bitmask in comefrom. */
497 mark_source_chains(const struct xt_table_info *newinfo,
498 unsigned int valid_hooks, void *entry0)
502 /* No recursion; use packet counter to save back ptrs (reset
503 to 0 as we leave), and comefrom to save source hook bitmask */
504 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
505 unsigned int pos = newinfo->hook_entry[hook];
506 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
508 if (!(valid_hooks & (1 << hook)))
511 /* Set initial back pointer. */
512 e->counters.pcnt = pos;
515 const struct ip6t_standard_target *t
516 = (void *)ip6t_get_target_c(e);
517 int visited = e->comefrom & (1 << hook);
519 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
520 printk("iptables: loop hook %u pos %u %08X.\n",
521 hook, pos, e->comefrom);
524 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
526 /* Unconditional return/END. */
527 if ((e->target_offset == sizeof(struct ip6t_entry) &&
528 (strcmp(t->target.u.user.name,
529 IP6T_STANDARD_TARGET) == 0) &&
531 unconditional(&e->ipv6)) || visited) {
532 unsigned int oldpos, size;
534 if ((strcmp(t->target.u.user.name,
535 IP6T_STANDARD_TARGET) == 0) &&
536 t->verdict < -NF_MAX_VERDICT - 1) {
537 duprintf("mark_source_chains: bad "
538 "negative verdict (%i)\n",
543 /* Return: backtrack through the last
546 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
547 #ifdef DEBUG_IP_FIREWALL_USER
549 & (1 << NF_INET_NUMHOOKS)) {
550 duprintf("Back unset "
557 pos = e->counters.pcnt;
558 e->counters.pcnt = 0;
560 /* We're at the start. */
564 e = (struct ip6t_entry *)
566 } while (oldpos == pos + e->next_offset);
569 size = e->next_offset;
570 e = (struct ip6t_entry *)
571 (entry0 + pos + size);
572 e->counters.pcnt = pos;
575 int newpos = t->verdict;
577 if (strcmp(t->target.u.user.name,
578 IP6T_STANDARD_TARGET) == 0 &&
580 if (newpos > newinfo->size -
581 sizeof(struct ip6t_entry)) {
582 duprintf("mark_source_chains: "
583 "bad verdict (%i)\n",
587 /* This a jump; chase it. */
588 duprintf("Jump rule %u -> %u\n",
591 /* ... this is a fallthru */
592 newpos = pos + e->next_offset;
594 e = (struct ip6t_entry *)
596 e->counters.pcnt = pos;
601 duprintf("Finished chain %u\n", hook);
607 cleanup_match(struct ip6t_entry_match *m, struct net *net, unsigned int *i)
609 struct xt_mtdtor_param par;
611 if (i && (*i)-- == 0)
615 par.match = m->u.kernel.match;
616 par.matchinfo = m->data;
617 par.family = NFPROTO_IPV6;
618 if (par.match->destroy != NULL)
619 par.match->destroy(&par);
620 module_put(par.match->me);
625 check_entry(const struct ip6t_entry *e, const char *name)
627 const struct ip6t_entry_target *t;
629 if (!ip6_checkentry(&e->ipv6)) {
630 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
634 if (e->target_offset + sizeof(struct ip6t_entry_target) >
638 t = ip6t_get_target_c(e);
639 if (e->target_offset + t->u.target_size > e->next_offset)
645 static int check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par,
648 const struct ip6t_ip6 *ipv6 = par->entryinfo;
651 par->match = m->u.kernel.match;
652 par->matchinfo = m->data;
654 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
655 ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
657 duprintf("ip_tables: check failed for `%s'.\n",
666 find_check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par,
669 struct xt_match *match;
672 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
674 "ip6t_%s", m->u.user.name);
675 if (IS_ERR(match) || !match) {
676 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
677 return match ? PTR_ERR(match) : -ENOENT;
679 m->u.kernel.match = match;
681 ret = check_match(m, par, i);
687 module_put(m->u.kernel.match->me);
691 static int check_target(struct ip6t_entry *e, struct net *net, const char *name)
693 struct ip6t_entry_target *t = ip6t_get_target(e);
694 struct xt_tgchk_param par = {
698 .target = t->u.kernel.target,
700 .hook_mask = e->comefrom,
701 .family = NFPROTO_IPV6,
705 t = ip6t_get_target(e);
706 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
707 e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO);
709 duprintf("ip_tables: check failed for `%s'.\n",
710 t->u.kernel.target->name);
717 find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
720 struct ip6t_entry_target *t;
721 struct xt_target *target;
724 struct xt_mtchk_param mtpar;
725 struct xt_entry_match *ematch;
727 ret = check_entry(e, name);
734 mtpar.entryinfo = &e->ipv6;
735 mtpar.hook_mask = e->comefrom;
736 mtpar.family = NFPROTO_IPV6;
737 xt_ematch_foreach(ematch, e) {
738 ret = find_check_match(ematch, &mtpar, &j);
743 goto cleanup_matches;
745 t = ip6t_get_target(e);
746 target = try_then_request_module(xt_find_target(AF_INET6,
749 "ip6t_%s", t->u.user.name);
750 if (IS_ERR(target) || !target) {
751 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
752 ret = target ? PTR_ERR(target) : -ENOENT;
753 goto cleanup_matches;
755 t->u.kernel.target = target;
757 ret = check_target(e, net, name);
762 module_put(t->u.kernel.target->me);
764 xt_ematch_foreach(ematch, e)
765 if (cleanup_match(ematch, net, &j) != 0)
770 static bool check_underflow(const struct ip6t_entry *e)
772 const struct ip6t_entry_target *t;
773 unsigned int verdict;
775 if (!unconditional(&e->ipv6))
777 t = ip6t_get_target_c(e);
778 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
780 verdict = ((struct ip6t_standard_target *)t)->verdict;
781 verdict = -verdict - 1;
782 return verdict == NF_DROP || verdict == NF_ACCEPT;
786 check_entry_size_and_hooks(struct ip6t_entry *e,
787 struct xt_table_info *newinfo,
788 const unsigned char *base,
789 const unsigned char *limit,
790 const unsigned int *hook_entries,
791 const unsigned int *underflows,
792 unsigned int valid_hooks)
796 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
797 (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
798 duprintf("Bad offset %p\n", e);
803 < sizeof(struct ip6t_entry) + sizeof(struct ip6t_entry_target)) {
804 duprintf("checking: element %p size %u\n",
809 /* Check hooks & underflows */
810 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
811 if (!(valid_hooks & (1 << h)))
813 if ((unsigned char *)e - base == hook_entries[h])
814 newinfo->hook_entry[h] = hook_entries[h];
815 if ((unsigned char *)e - base == underflows[h]) {
816 if (!check_underflow(e)) {
817 pr_err("Underflows must be unconditional and "
818 "use the STANDARD target with "
822 newinfo->underflow[h] = underflows[h];
826 /* Clear counters and comefrom */
827 e->counters = ((struct xt_counters) { 0, 0 });
832 static void cleanup_entry(struct ip6t_entry *e, struct net *net)
834 struct xt_tgdtor_param par;
835 struct ip6t_entry_target *t;
836 struct xt_entry_match *ematch;
838 /* Cleanup all matches */
839 xt_ematch_foreach(ematch, e)
840 if (cleanup_match(ematch, net, NULL) != 0)
842 t = ip6t_get_target(e);
845 par.target = t->u.kernel.target;
846 par.targinfo = t->data;
847 par.family = NFPROTO_IPV6;
848 if (par.target->destroy != NULL)
849 par.target->destroy(&par);
850 module_put(par.target->me);
853 /* Checks and translates the user-supplied table segment (held in
856 translate_table(struct net *net,
858 unsigned int valid_hooks,
859 struct xt_table_info *newinfo,
863 const unsigned int *hook_entries,
864 const unsigned int *underflows)
866 struct ip6t_entry *iter;
870 newinfo->size = size;
871 newinfo->number = number;
873 /* Init all hooks to impossible value. */
874 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
875 newinfo->hook_entry[i] = 0xFFFFFFFF;
876 newinfo->underflow[i] = 0xFFFFFFFF;
879 duprintf("translate_table: size %u\n", newinfo->size);
881 /* Walk through entries, checking offsets. */
882 xt_entry_foreach(iter, entry0, newinfo->size) {
883 ret = check_entry_size_and_hooks(iter, newinfo, entry0,
884 entry0 + size, hook_entries, underflows, valid_hooks);
891 duprintf("translate_table: %u not %u entries\n",
896 /* Check hooks all assigned */
897 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
898 /* Only hooks which are valid */
899 if (!(valid_hooks & (1 << i)))
901 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
902 duprintf("Invalid hook entry %u %u\n",
906 if (newinfo->underflow[i] == 0xFFFFFFFF) {
907 duprintf("Invalid underflow %u %u\n",
913 if (!mark_source_chains(newinfo, valid_hooks, entry0))
916 /* Finally, each sanity check must pass */
918 xt_entry_foreach(iter, entry0, newinfo->size) {
919 ret = find_check_entry(iter, net, name, size);
926 xt_entry_foreach(iter, entry0, newinfo->size) {
929 cleanup_entry(iter, net);
934 /* And one copy for every other CPU */
935 for_each_possible_cpu(i) {
936 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
937 memcpy(newinfo->entries[i], entry0, newinfo->size);
944 get_counters(const struct xt_table_info *t,
945 struct xt_counters counters[])
947 struct ip6t_entry *iter;
952 /* Instead of clearing (by a previous call to memset())
953 * the counters and using adds, we set the counters
954 * with data used by 'current' CPU
956 * Bottom half has to be disabled to prevent deadlock
957 * if new softirq were to run and call ipt_do_table
960 curcpu = smp_processor_id();
963 xt_entry_foreach(iter, t->entries[curcpu], t->size) {
964 SET_COUNTER(counters[i], iter->counters.bcnt,
965 iter->counters.pcnt);
969 for_each_possible_cpu(cpu) {
974 xt_entry_foreach(iter, t->entries[cpu], t->size) {
975 ADD_COUNTER(counters[i], iter->counters.bcnt,
976 iter->counters.pcnt);
979 xt_info_wrunlock(cpu);
984 static struct xt_counters *alloc_counters(const struct xt_table *table)
986 unsigned int countersize;
987 struct xt_counters *counters;
988 const struct xt_table_info *private = table->private;
990 /* We need atomic snapshot of counters: rest doesn't change
991 (other than comefrom, which userspace doesn't care
993 countersize = sizeof(struct xt_counters) * private->number;
994 counters = vmalloc_node(countersize, numa_node_id());
996 if (counters == NULL)
997 return ERR_PTR(-ENOMEM);
999 get_counters(private, counters);
1005 copy_entries_to_user(unsigned int total_size,
1006 const struct xt_table *table,
1007 void __user *userptr)
1009 unsigned int off, num;
1010 const struct ip6t_entry *e;
1011 struct xt_counters *counters;
1012 const struct xt_table_info *private = table->private;
1014 const void *loc_cpu_entry;
1016 counters = alloc_counters(table);
1017 if (IS_ERR(counters))
1018 return PTR_ERR(counters);
1020 /* choose the copy that is on our node/cpu, ...
1021 * This choice is lazy (because current thread is
1022 * allowed to migrate to another cpu)
1024 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1025 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
1030 /* FIXME: use iterator macros --RR */
1031 /* ... then go back and fix counters and names */
1032 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
1034 const struct ip6t_entry_match *m;
1035 const struct ip6t_entry_target *t;
1037 e = (struct ip6t_entry *)(loc_cpu_entry + off);
1038 if (copy_to_user(userptr + off
1039 + offsetof(struct ip6t_entry, counters),
1041 sizeof(counters[num])) != 0) {
1046 for (i = sizeof(struct ip6t_entry);
1047 i < e->target_offset;
1048 i += m->u.match_size) {
1051 if (copy_to_user(userptr + off + i
1052 + offsetof(struct ip6t_entry_match,
1054 m->u.kernel.match->name,
1055 strlen(m->u.kernel.match->name)+1)
1062 t = ip6t_get_target_c(e);
1063 if (copy_to_user(userptr + off + e->target_offset
1064 + offsetof(struct ip6t_entry_target,
1066 t->u.kernel.target->name,
1067 strlen(t->u.kernel.target->name)+1) != 0) {
1078 #ifdef CONFIG_COMPAT
1079 static void compat_standard_from_user(void *dst, const void *src)
1081 int v = *(compat_int_t *)src;
1084 v += xt_compat_calc_jump(AF_INET6, v);
1085 memcpy(dst, &v, sizeof(v));
1088 static int compat_standard_to_user(void __user *dst, const void *src)
1090 compat_int_t cv = *(int *)src;
1093 cv -= xt_compat_calc_jump(AF_INET6, cv);
1094 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1098 compat_calc_match(const struct ip6t_entry_match *m, int *size)
1100 *size += xt_compat_match_offset(m->u.kernel.match);
1104 static int compat_calc_entry(const struct ip6t_entry *e,
1105 const struct xt_table_info *info,
1106 const void *base, struct xt_table_info *newinfo)
1108 const struct xt_entry_match *ematch;
1109 const struct ip6t_entry_target *t;
1110 unsigned int entry_offset;
1113 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1114 entry_offset = (void *)e - base;
1115 xt_ematch_foreach(ematch, e)
1116 if (compat_calc_match(ematch, &off) != 0)
1118 t = ip6t_get_target_c(e);
1119 off += xt_compat_target_offset(t->u.kernel.target);
1120 newinfo->size -= off;
1121 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1125 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1126 if (info->hook_entry[i] &&
1127 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1128 newinfo->hook_entry[i] -= off;
1129 if (info->underflow[i] &&
1130 (e < (struct ip6t_entry *)(base + info->underflow[i])))
1131 newinfo->underflow[i] -= off;
1136 static int compat_table_info(const struct xt_table_info *info,
1137 struct xt_table_info *newinfo)
1139 struct ip6t_entry *iter;
1140 void *loc_cpu_entry;
1143 if (!newinfo || !info)
1146 /* we dont care about newinfo->entries[] */
1147 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1148 newinfo->initial_entries = 0;
1149 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1150 xt_entry_foreach(iter, loc_cpu_entry, info->size) {
1151 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
1159 static int get_info(struct net *net, void __user *user,
1160 const int *len, int compat)
1162 char name[IP6T_TABLE_MAXNAMELEN];
1166 if (*len != sizeof(struct ip6t_getinfo)) {
1167 duprintf("length %u != %zu\n", *len,
1168 sizeof(struct ip6t_getinfo));
1172 if (copy_from_user(name, user, sizeof(name)) != 0)
1175 name[IP6T_TABLE_MAXNAMELEN-1] = '\0';
1176 #ifdef CONFIG_COMPAT
1178 xt_compat_lock(AF_INET6);
1180 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1181 "ip6table_%s", name);
1182 if (t && !IS_ERR(t)) {
1183 struct ip6t_getinfo info;
1184 const struct xt_table_info *private = t->private;
1185 #ifdef CONFIG_COMPAT
1186 struct xt_table_info tmp;
1189 ret = compat_table_info(private, &tmp);
1190 xt_compat_flush_offsets(AF_INET6);
1194 info.valid_hooks = t->valid_hooks;
1195 memcpy(info.hook_entry, private->hook_entry,
1196 sizeof(info.hook_entry));
1197 memcpy(info.underflow, private->underflow,
1198 sizeof(info.underflow));
1199 info.num_entries = private->number;
1200 info.size = private->size;
1201 strcpy(info.name, name);
1203 if (copy_to_user(user, &info, *len) != 0)
1211 ret = t ? PTR_ERR(t) : -ENOENT;
1212 #ifdef CONFIG_COMPAT
1214 xt_compat_unlock(AF_INET6);
1220 get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
1224 struct ip6t_get_entries get;
1227 if (*len < sizeof(get)) {
1228 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1231 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1233 if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1234 duprintf("get_entries: %u != %zu\n",
1235 *len, sizeof(get) + get.size);
1239 t = xt_find_table_lock(net, AF_INET6, get.name);
1240 if (t && !IS_ERR(t)) {
1241 struct xt_table_info *private = t->private;
1242 duprintf("t->private->number = %u\n", private->number);
1243 if (get.size == private->size)
1244 ret = copy_entries_to_user(private->size,
1245 t, uptr->entrytable);
1247 duprintf("get_entries: I've got %u not %u!\n",
1248 private->size, get.size);
1254 ret = t ? PTR_ERR(t) : -ENOENT;
1260 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1261 struct xt_table_info *newinfo, unsigned int num_counters,
1262 void __user *counters_ptr)
1266 struct xt_table_info *oldinfo;
1267 struct xt_counters *counters;
1268 const void *loc_cpu_old_entry;
1269 struct ip6t_entry *iter;
1272 counters = vmalloc_node(num_counters * sizeof(struct xt_counters),
1279 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1280 "ip6table_%s", name);
1281 if (!t || IS_ERR(t)) {
1282 ret = t ? PTR_ERR(t) : -ENOENT;
1283 goto free_newinfo_counters_untrans;
1287 if (valid_hooks != t->valid_hooks) {
1288 duprintf("Valid hook crap: %08X vs %08X\n",
1289 valid_hooks, t->valid_hooks);
1294 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1298 /* Update module usage count based on number of rules */
1299 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1300 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1301 if ((oldinfo->number > oldinfo->initial_entries) ||
1302 (newinfo->number <= oldinfo->initial_entries))
1304 if ((oldinfo->number > oldinfo->initial_entries) &&
1305 (newinfo->number <= oldinfo->initial_entries))
1308 /* Get the old counters, and synchronize with replace */
1309 get_counters(oldinfo, counters);
1311 /* Decrease module usage counts and free resource */
1312 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1313 xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
1314 cleanup_entry(iter, net);
1316 xt_free_table_info(oldinfo);
1317 if (copy_to_user(counters_ptr, counters,
1318 sizeof(struct xt_counters) * num_counters) != 0)
1327 free_newinfo_counters_untrans:
1334 do_replace(struct net *net, const void __user *user, unsigned int len)
1337 struct ip6t_replace tmp;
1338 struct xt_table_info *newinfo;
1339 void *loc_cpu_entry;
1340 struct ip6t_entry *iter;
1342 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1345 /* overflow check */
1346 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1349 newinfo = xt_alloc_table_info(tmp.size);
1353 /* choose the copy that is on our node/cpu */
1354 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1355 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1361 ret = translate_table(net, tmp.name, tmp.valid_hooks,
1362 newinfo, loc_cpu_entry, tmp.size, tmp.num_entries,
1363 tmp.hook_entry, tmp.underflow);
1367 duprintf("ip_tables: Translated table\n");
1369 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1370 tmp.num_counters, tmp.counters);
1372 goto free_newinfo_untrans;
1375 free_newinfo_untrans:
1376 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1377 cleanup_entry(iter, net);
1379 xt_free_table_info(newinfo);
1384 do_add_counters(struct net *net, const void __user *user, unsigned int len,
1387 unsigned int i, curcpu;
1388 struct xt_counters_info tmp;
1389 struct xt_counters *paddc;
1390 unsigned int num_counters;
1395 const struct xt_table_info *private;
1397 const void *loc_cpu_entry;
1398 struct ip6t_entry *iter;
1399 #ifdef CONFIG_COMPAT
1400 struct compat_xt_counters_info compat_tmp;
1404 size = sizeof(struct compat_xt_counters_info);
1409 size = sizeof(struct xt_counters_info);
1412 if (copy_from_user(ptmp, user, size) != 0)
1415 #ifdef CONFIG_COMPAT
1417 num_counters = compat_tmp.num_counters;
1418 name = compat_tmp.name;
1422 num_counters = tmp.num_counters;
1426 if (len != size + num_counters * sizeof(struct xt_counters))
1429 paddc = vmalloc_node(len - size, numa_node_id());
1433 if (copy_from_user(paddc, user + size, len - size) != 0) {
1438 t = xt_find_table_lock(net, AF_INET6, name);
1439 if (!t || IS_ERR(t)) {
1440 ret = t ? PTR_ERR(t) : -ENOENT;
1446 private = t->private;
1447 if (private->number != num_counters) {
1449 goto unlock_up_free;
1453 /* Choose the copy that is on our node */
1454 curcpu = smp_processor_id();
1455 xt_info_wrlock(curcpu);
1456 loc_cpu_entry = private->entries[curcpu];
1457 xt_entry_foreach(iter, loc_cpu_entry, private->size) {
1458 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
1461 xt_info_wrunlock(curcpu);
1473 #ifdef CONFIG_COMPAT
1474 struct compat_ip6t_replace {
1475 char name[IP6T_TABLE_MAXNAMELEN];
1479 u32 hook_entry[NF_INET_NUMHOOKS];
1480 u32 underflow[NF_INET_NUMHOOKS];
1482 compat_uptr_t counters; /* struct ip6t_counters * */
1483 struct compat_ip6t_entry entries[0];
1487 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1488 unsigned int *size, struct xt_counters *counters,
1491 struct ip6t_entry_target *t;
1492 struct compat_ip6t_entry __user *ce;
1493 u_int16_t target_offset, next_offset;
1494 compat_uint_t origsize;
1495 const struct xt_entry_match *ematch;
1499 ce = (struct compat_ip6t_entry __user *)*dstptr;
1500 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)) != 0 ||
1501 copy_to_user(&ce->counters, &counters[i],
1502 sizeof(counters[i])) != 0)
1505 *dstptr += sizeof(struct compat_ip6t_entry);
1506 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1508 xt_ematch_foreach(ematch, e) {
1509 ret = xt_compat_match_to_user(ematch, dstptr, size);
1513 target_offset = e->target_offset - (origsize - *size);
1516 t = ip6t_get_target(e);
1517 ret = xt_compat_target_to_user(t, dstptr, size);
1520 next_offset = e->next_offset - (origsize - *size);
1521 if (put_user(target_offset, &ce->target_offset) != 0 ||
1522 put_user(next_offset, &ce->next_offset) != 0)
1528 compat_find_calc_match(struct ip6t_entry_match *m,
1530 const struct ip6t_ip6 *ipv6,
1531 unsigned int hookmask,
1532 int *size, unsigned int *i)
1534 struct xt_match *match;
1536 match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
1537 m->u.user.revision),
1538 "ip6t_%s", m->u.user.name);
1539 if (IS_ERR(match) || !match) {
1540 duprintf("compat_check_calc_match: `%s' not found\n",
1542 return match ? PTR_ERR(match) : -ENOENT;
1544 m->u.kernel.match = match;
1545 *size += xt_compat_match_offset(match);
1552 compat_release_match(struct ip6t_entry_match *m, unsigned int *i)
1554 if (i && (*i)-- == 0)
1557 module_put(m->u.kernel.match->me);
1561 static void compat_release_entry(struct compat_ip6t_entry *e)
1563 struct ip6t_entry_target *t;
1564 struct xt_entry_match *ematch;
1566 /* Cleanup all matches */
1567 xt_ematch_foreach(ematch, e)
1568 if (compat_release_match(ematch, NULL) != 0)
1570 t = compat_ip6t_get_target(e);
1571 module_put(t->u.kernel.target->me);
1575 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1576 struct xt_table_info *newinfo,
1578 const unsigned char *base,
1579 const unsigned char *limit,
1580 const unsigned int *hook_entries,
1581 const unsigned int *underflows,
1584 struct xt_entry_match *ematch;
1585 struct ip6t_entry_target *t;
1586 struct xt_target *target;
1587 unsigned int entry_offset;
1591 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1592 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
1593 (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
1594 duprintf("Bad offset %p, limit = %p\n", e, limit);
1598 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1599 sizeof(struct compat_xt_entry_target)) {
1600 duprintf("checking: element %p size %u\n",
1605 /* For purposes of check_entry casting the compat entry is fine */
1606 ret = check_entry((struct ip6t_entry *)e, name);
1610 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1611 entry_offset = (void *)e - (void *)base;
1613 xt_ematch_foreach(ematch, e) {
1614 ret = compat_find_calc_match(ematch, name,
1615 &e->ipv6, e->comefrom, &off, &j);
1620 goto release_matches;
1622 t = compat_ip6t_get_target(e);
1623 target = try_then_request_module(xt_find_target(AF_INET6,
1625 t->u.user.revision),
1626 "ip6t_%s", t->u.user.name);
1627 if (IS_ERR(target) || !target) {
1628 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1630 ret = target ? PTR_ERR(target) : -ENOENT;
1631 goto release_matches;
1633 t->u.kernel.target = target;
1635 off += xt_compat_target_offset(target);
1637 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1641 /* Check hooks & underflows */
1642 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1643 if ((unsigned char *)e - base == hook_entries[h])
1644 newinfo->hook_entry[h] = hook_entries[h];
1645 if ((unsigned char *)e - base == underflows[h])
1646 newinfo->underflow[h] = underflows[h];
1649 /* Clear counters and comefrom */
1650 memset(&e->counters, 0, sizeof(e->counters));
1655 module_put(t->u.kernel.target->me);
1657 xt_ematch_foreach(ematch, e)
1658 if (compat_release_match(ematch, &j) != 0)
1664 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1665 unsigned int *size, const char *name,
1666 struct xt_table_info *newinfo, unsigned char *base)
1668 struct ip6t_entry_target *t;
1669 struct xt_target *target;
1670 struct ip6t_entry *de;
1671 unsigned int origsize;
1673 struct xt_entry_match *ematch;
1677 de = (struct ip6t_entry *)*dstptr;
1678 memcpy(de, e, sizeof(struct ip6t_entry));
1679 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1681 *dstptr += sizeof(struct ip6t_entry);
1682 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1684 xt_ematch_foreach(ematch, e) {
1685 ret = xt_compat_match_from_user(ematch, dstptr, size);
1691 de->target_offset = e->target_offset - (origsize - *size);
1692 t = compat_ip6t_get_target(e);
1693 target = t->u.kernel.target;
1694 xt_compat_target_from_user(t, dstptr, size);
1696 de->next_offset = e->next_offset - (origsize - *size);
1697 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1698 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1699 newinfo->hook_entry[h] -= origsize - *size;
1700 if ((unsigned char *)de - base < newinfo->underflow[h])
1701 newinfo->underflow[h] -= origsize - *size;
1706 static int compat_check_entry(struct ip6t_entry *e, struct net *net,
1711 struct xt_mtchk_param mtpar;
1712 struct xt_entry_match *ematch;
1717 mtpar.entryinfo = &e->ipv6;
1718 mtpar.hook_mask = e->comefrom;
1719 mtpar.family = NFPROTO_IPV6;
1720 xt_ematch_foreach(ematch, e) {
1721 ret = check_match(ematch, &mtpar, &j);
1726 goto cleanup_matches;
1728 ret = check_target(e, net, name);
1730 goto cleanup_matches;
1734 xt_ematch_foreach(ematch, e)
1735 if (cleanup_match(ematch, net, &j) != 0)
1741 translate_compat_table(struct net *net,
1743 unsigned int valid_hooks,
1744 struct xt_table_info **pinfo,
1746 unsigned int total_size,
1747 unsigned int number,
1748 unsigned int *hook_entries,
1749 unsigned int *underflows)
1752 struct xt_table_info *newinfo, *info;
1753 void *pos, *entry0, *entry1;
1754 struct compat_ip6t_entry *iter0;
1755 struct ip6t_entry *iter1;
1762 info->number = number;
1764 /* Init all hooks to impossible value. */
1765 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1766 info->hook_entry[i] = 0xFFFFFFFF;
1767 info->underflow[i] = 0xFFFFFFFF;
1770 duprintf("translate_compat_table: size %u\n", info->size);
1772 xt_compat_lock(AF_INET6);
1773 /* Walk through entries, checking offsets. */
1774 xt_entry_foreach(iter0, entry0, total_size) {
1775 ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1776 entry0, entry0 + total_size, hook_entries, underflows,
1785 duprintf("translate_compat_table: %u not %u entries\n",
1790 /* Check hooks all assigned */
1791 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1792 /* Only hooks which are valid */
1793 if (!(valid_hooks & (1 << i)))
1795 if (info->hook_entry[i] == 0xFFFFFFFF) {
1796 duprintf("Invalid hook entry %u %u\n",
1797 i, hook_entries[i]);
1800 if (info->underflow[i] == 0xFFFFFFFF) {
1801 duprintf("Invalid underflow %u %u\n",
1808 newinfo = xt_alloc_table_info(size);
1812 newinfo->number = number;
1813 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1814 newinfo->hook_entry[i] = info->hook_entry[i];
1815 newinfo->underflow[i] = info->underflow[i];
1817 entry1 = newinfo->entries[raw_smp_processor_id()];
1820 xt_entry_foreach(iter0, entry0, total_size) {
1821 ret = compat_copy_entry_from_user(iter0, &pos,
1822 &size, name, newinfo, entry1);
1826 xt_compat_flush_offsets(AF_INET6);
1827 xt_compat_unlock(AF_INET6);
1832 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1836 xt_entry_foreach(iter1, entry1, newinfo->size) {
1837 ret = compat_check_entry(iter1, net, name);
1844 * The first i matches need cleanup_entry (calls ->destroy)
1845 * because they had called ->check already. The other j-i
1846 * entries need only release.
1850 xt_entry_foreach(iter0, entry0, newinfo->size) {
1855 compat_release_entry(iter0);
1857 xt_entry_foreach(iter1, entry1, newinfo->size) {
1860 cleanup_entry(iter1, net);
1862 xt_free_table_info(newinfo);
1866 /* And one copy for every other CPU */
1867 for_each_possible_cpu(i)
1868 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1869 memcpy(newinfo->entries[i], entry1, newinfo->size);
1873 xt_free_table_info(info);
1877 xt_free_table_info(newinfo);
1879 xt_entry_foreach(iter0, entry0, total_size) {
1882 compat_release_entry(iter0);
1886 xt_compat_flush_offsets(AF_INET6);
1887 xt_compat_unlock(AF_INET6);
1892 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1895 struct compat_ip6t_replace tmp;
1896 struct xt_table_info *newinfo;
1897 void *loc_cpu_entry;
1898 struct ip6t_entry *iter;
1900 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1903 /* overflow check */
1904 if (tmp.size >= INT_MAX / num_possible_cpus())
1906 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1909 newinfo = xt_alloc_table_info(tmp.size);
1913 /* choose the copy that is on our node/cpu */
1914 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1915 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1921 ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
1922 &newinfo, &loc_cpu_entry, tmp.size,
1923 tmp.num_entries, tmp.hook_entry,
1928 duprintf("compat_do_replace: Translated table\n");
1930 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1931 tmp.num_counters, compat_ptr(tmp.counters));
1933 goto free_newinfo_untrans;
1936 free_newinfo_untrans:
1937 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1938 cleanup_entry(iter, net);
1940 xt_free_table_info(newinfo);
1945 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1950 if (!capable(CAP_NET_ADMIN))
1954 case IP6T_SO_SET_REPLACE:
1955 ret = compat_do_replace(sock_net(sk), user, len);
1958 case IP6T_SO_SET_ADD_COUNTERS:
1959 ret = do_add_counters(sock_net(sk), user, len, 1);
1963 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1970 struct compat_ip6t_get_entries {
1971 char name[IP6T_TABLE_MAXNAMELEN];
1973 struct compat_ip6t_entry entrytable[0];
1977 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1978 void __user *userptr)
1980 struct xt_counters *counters;
1981 const struct xt_table_info *private = table->private;
1985 const void *loc_cpu_entry;
1987 struct ip6t_entry *iter;
1989 counters = alloc_counters(table);
1990 if (IS_ERR(counters))
1991 return PTR_ERR(counters);
1993 /* choose the copy that is on our node/cpu, ...
1994 * This choice is lazy (because current thread is
1995 * allowed to migrate to another cpu)
1997 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2000 xt_entry_foreach(iter, loc_cpu_entry, total_size) {
2001 ret = compat_copy_entry_to_user(iter, &pos,
2002 &size, counters, i++);
2012 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
2016 struct compat_ip6t_get_entries get;
2019 if (*len < sizeof(get)) {
2020 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
2024 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
2027 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
2028 duprintf("compat_get_entries: %u != %zu\n",
2029 *len, sizeof(get) + get.size);
2033 xt_compat_lock(AF_INET6);
2034 t = xt_find_table_lock(net, AF_INET6, get.name);
2035 if (t && !IS_ERR(t)) {
2036 const struct xt_table_info *private = t->private;
2037 struct xt_table_info info;
2038 duprintf("t->private->number = %u\n", private->number);
2039 ret = compat_table_info(private, &info);
2040 if (!ret && get.size == info.size) {
2041 ret = compat_copy_entries_to_user(private->size,
2042 t, uptr->entrytable);
2044 duprintf("compat_get_entries: I've got %u not %u!\n",
2045 private->size, get.size);
2048 xt_compat_flush_offsets(AF_INET6);
2052 ret = t ? PTR_ERR(t) : -ENOENT;
2054 xt_compat_unlock(AF_INET6);
2058 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
2061 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2065 if (!capable(CAP_NET_ADMIN))
2069 case IP6T_SO_GET_INFO:
2070 ret = get_info(sock_net(sk), user, len, 1);
2072 case IP6T_SO_GET_ENTRIES:
2073 ret = compat_get_entries(sock_net(sk), user, len);
2076 ret = do_ip6t_get_ctl(sk, cmd, user, len);
2083 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2087 if (!capable(CAP_NET_ADMIN))
2091 case IP6T_SO_SET_REPLACE:
2092 ret = do_replace(sock_net(sk), user, len);
2095 case IP6T_SO_SET_ADD_COUNTERS:
2096 ret = do_add_counters(sock_net(sk), user, len, 0);
2100 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
2108 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2112 if (!capable(CAP_NET_ADMIN))
2116 case IP6T_SO_GET_INFO:
2117 ret = get_info(sock_net(sk), user, len, 0);
2120 case IP6T_SO_GET_ENTRIES:
2121 ret = get_entries(sock_net(sk), user, len);
2124 case IP6T_SO_GET_REVISION_MATCH:
2125 case IP6T_SO_GET_REVISION_TARGET: {
2126 struct ip6t_get_revision rev;
2129 if (*len != sizeof(rev)) {
2133 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2138 if (cmd == IP6T_SO_GET_REVISION_TARGET)
2143 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
2146 "ip6t_%s", rev.name);
2151 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
2158 struct xt_table *ip6t_register_table(struct net *net,
2159 const struct xt_table *table,
2160 const struct ip6t_replace *repl)
2163 struct xt_table_info *newinfo;
2164 struct xt_table_info bootstrap
2165 = { 0, 0, 0, { 0 }, { 0 }, { } };
2166 void *loc_cpu_entry;
2167 struct xt_table *new_table;
2169 newinfo = xt_alloc_table_info(repl->size);
2175 /* choose the copy on our node/cpu, but dont care about preemption */
2176 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2177 memcpy(loc_cpu_entry, repl->entries, repl->size);
2179 ret = translate_table(net, table->name, table->valid_hooks,
2180 newinfo, loc_cpu_entry, repl->size,
2187 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2188 if (IS_ERR(new_table)) {
2189 ret = PTR_ERR(new_table);
2195 xt_free_table_info(newinfo);
2197 return ERR_PTR(ret);
2200 void ip6t_unregister_table(struct net *net, struct xt_table *table)
2202 struct xt_table_info *private;
2203 void *loc_cpu_entry;
2204 struct module *table_owner = table->me;
2205 struct ip6t_entry *iter;
2207 private = xt_unregister_table(table);
2209 /* Decrease module usage counts and free resources */
2210 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2211 xt_entry_foreach(iter, loc_cpu_entry, private->size)
2212 cleanup_entry(iter, net);
2213 if (private->number > private->initial_entries)
2214 module_put(table_owner);
2215 xt_free_table_info(private);
2218 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2220 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2221 u_int8_t type, u_int8_t code,
2224 return (type == test_type && code >= min_code && code <= max_code)
2229 icmp6_match(const struct sk_buff *skb, const struct xt_match_param *par)
2231 const struct icmp6hdr *ic;
2232 struct icmp6hdr _icmph;
2233 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2235 /* Must not be a fragment. */
2236 if (par->fragoff != 0)
2239 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2241 /* We've been asked to examine this packet, and we
2242 * can't. Hence, no choice but to drop.
2244 duprintf("Dropping evil ICMP tinygram.\n");
2245 *par->hotdrop = true;
2249 return icmp6_type_code_match(icmpinfo->type,
2252 ic->icmp6_type, ic->icmp6_code,
2253 !!(icmpinfo->invflags&IP6T_ICMP_INV));
2256 /* Called when user tries to insert an entry of this type. */
2257 static bool icmp6_checkentry(const struct xt_mtchk_param *par)
2259 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2261 /* Must specify no unknown invflags */
2262 return !(icmpinfo->invflags & ~IP6T_ICMP_INV);
2265 /* The built-in targets: standard (NULL) and error. */
2266 static struct xt_target ip6t_standard_target __read_mostly = {
2267 .name = IP6T_STANDARD_TARGET,
2268 .targetsize = sizeof(int),
2269 .family = NFPROTO_IPV6,
2270 #ifdef CONFIG_COMPAT
2271 .compatsize = sizeof(compat_int_t),
2272 .compat_from_user = compat_standard_from_user,
2273 .compat_to_user = compat_standard_to_user,
2277 static struct xt_target ip6t_error_target __read_mostly = {
2278 .name = IP6T_ERROR_TARGET,
2279 .target = ip6t_error,
2280 .targetsize = IP6T_FUNCTION_MAXNAMELEN,
2281 .family = NFPROTO_IPV6,
2284 static struct nf_sockopt_ops ip6t_sockopts = {
2286 .set_optmin = IP6T_BASE_CTL,
2287 .set_optmax = IP6T_SO_SET_MAX+1,
2288 .set = do_ip6t_set_ctl,
2289 #ifdef CONFIG_COMPAT
2290 .compat_set = compat_do_ip6t_set_ctl,
2292 .get_optmin = IP6T_BASE_CTL,
2293 .get_optmax = IP6T_SO_GET_MAX+1,
2294 .get = do_ip6t_get_ctl,
2295 #ifdef CONFIG_COMPAT
2296 .compat_get = compat_do_ip6t_get_ctl,
2298 .owner = THIS_MODULE,
2301 static struct xt_match icmp6_matchstruct __read_mostly = {
2303 .match = icmp6_match,
2304 .matchsize = sizeof(struct ip6t_icmp),
2305 .checkentry = icmp6_checkentry,
2306 .proto = IPPROTO_ICMPV6,
2307 .family = NFPROTO_IPV6,
2310 static int __net_init ip6_tables_net_init(struct net *net)
2312 return xt_proto_init(net, NFPROTO_IPV6);
2315 static void __net_exit ip6_tables_net_exit(struct net *net)
2317 xt_proto_fini(net, NFPROTO_IPV6);
2320 static struct pernet_operations ip6_tables_net_ops = {
2321 .init = ip6_tables_net_init,
2322 .exit = ip6_tables_net_exit,
2325 static int __init ip6_tables_init(void)
2329 ret = register_pernet_subsys(&ip6_tables_net_ops);
2333 /* Noone else will be downing sem now, so we won't sleep */
2334 ret = xt_register_target(&ip6t_standard_target);
2337 ret = xt_register_target(&ip6t_error_target);
2340 ret = xt_register_match(&icmp6_matchstruct);
2344 /* Register setsockopt */
2345 ret = nf_register_sockopt(&ip6t_sockopts);
2349 printk(KERN_INFO "ip6_tables: (C) 2000-2006 Netfilter Core Team\n");
2353 xt_unregister_match(&icmp6_matchstruct);
2355 xt_unregister_target(&ip6t_error_target);
2357 xt_unregister_target(&ip6t_standard_target);
2359 unregister_pernet_subsys(&ip6_tables_net_ops);
2364 static void __exit ip6_tables_fini(void)
2366 nf_unregister_sockopt(&ip6t_sockopts);
2368 xt_unregister_match(&icmp6_matchstruct);
2369 xt_unregister_target(&ip6t_error_target);
2370 xt_unregister_target(&ip6t_standard_target);
2372 unregister_pernet_subsys(&ip6_tables_net_ops);
2376 * find the offset to specified header or the protocol number of last header
2377 * if target < 0. "last header" is transport protocol header, ESP, or
2380 * If target header is found, its offset is set in *offset and return protocol
2381 * number. Otherwise, return -1.
2383 * If the first fragment doesn't contain the final protocol header or
2384 * NEXTHDR_NONE it is considered invalid.
2386 * Note that non-1st fragment is special case that "the protocol number
2387 * of last header" is "next header" field in Fragment header. In this case,
2388 * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
2392 int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
2393 int target, unsigned short *fragoff)
2395 unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr);
2396 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
2397 unsigned int len = skb->len - start;
2402 while (nexthdr != target) {
2403 struct ipv6_opt_hdr _hdr, *hp;
2404 unsigned int hdrlen;
2406 if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
2412 hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
2415 if (nexthdr == NEXTHDR_FRAGMENT) {
2416 unsigned short _frag_off;
2418 fp = skb_header_pointer(skb,
2419 start+offsetof(struct frag_hdr,
2426 _frag_off = ntohs(*fp) & ~0x7;
2429 ((!ipv6_ext_hdr(hp->nexthdr)) ||
2430 hp->nexthdr == NEXTHDR_NONE)) {
2432 *fragoff = _frag_off;
2438 } else if (nexthdr == NEXTHDR_AUTH)
2439 hdrlen = (hp->hdrlen + 2) << 2;
2441 hdrlen = ipv6_optlen(hp);
2443 nexthdr = hp->nexthdr;
2452 EXPORT_SYMBOL(ip6t_register_table);
2453 EXPORT_SYMBOL(ip6t_unregister_table);
2454 EXPORT_SYMBOL(ip6t_do_table);
2455 EXPORT_SYMBOL(ip6t_ext_hdr);
2456 EXPORT_SYMBOL(ipv6_find_hdr);
2458 module_init(ip6_tables_init);
2459 module_exit(ip6_tables_fini);