2 * Packet matching code.
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/capability.h>
14 #include <linux/skbuff.h>
15 #include <linux/kmod.h>
16 #include <linux/vmalloc.h>
17 #include <linux/netdevice.h>
18 #include <linux/module.h>
19 #include <linux/poison.h>
20 #include <linux/icmpv6.h>
22 #include <net/compat.h>
23 #include <asm/uaccess.h>
24 #include <linux/mutex.h>
25 #include <linux/proc_fs.h>
26 #include <linux/err.h>
27 #include <linux/cpumask.h>
29 #include <linux/netfilter_ipv6/ip6_tables.h>
30 #include <linux/netfilter/x_tables.h>
31 #include <net/netfilter/nf_log.h>
32 #include "../../netfilter/xt_repldata.h"
34 MODULE_LICENSE("GPL");
35 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
36 MODULE_DESCRIPTION("IPv6 packet filter");
38 /*#define DEBUG_IP_FIREWALL*/
39 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
40 /*#define DEBUG_IP_FIREWALL_USER*/
42 #ifdef DEBUG_IP_FIREWALL
43 #define dprintf(format, args...) pr_info(format , ## args)
45 #define dprintf(format, args...)
48 #ifdef DEBUG_IP_FIREWALL_USER
49 #define duprintf(format, args...) pr_info(format , ## args)
51 #define duprintf(format, args...)
54 #ifdef CONFIG_NETFILTER_DEBUG
55 #define IP_NF_ASSERT(x) \
58 printk("IP_NF_ASSERT: %s:%s:%u\n", \
59 __func__, __FILE__, __LINE__); \
62 #define IP_NF_ASSERT(x)
66 /* All the better to debug you with... */
71 void *ip6t_alloc_initial_table(const struct xt_table *info)
73 return xt_alloc_initial_table(ip6t, IP6T);
75 EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table);
78 We keep a set of rules for each CPU, so we can avoid write-locking
79 them in the softirq when updating the counters and therefore
80 only need to read-lock in the softirq; doing a write_lock_bh() in user
81 context stops packets coming through and allows user context to read
82 the counters or update the rules.
84 Hence the start of any table is given by get_table() below. */
86 /* Check for an extension */
88 ip6t_ext_hdr(u8 nexthdr)
90 return ( (nexthdr == IPPROTO_HOPOPTS) ||
91 (nexthdr == IPPROTO_ROUTING) ||
92 (nexthdr == IPPROTO_FRAGMENT) ||
93 (nexthdr == IPPROTO_ESP) ||
94 (nexthdr == IPPROTO_AH) ||
95 (nexthdr == IPPROTO_NONE) ||
96 (nexthdr == IPPROTO_DSTOPTS) );
99 /* Returns whether matches rule or not. */
100 /* Performance critical - called for every packet */
102 ip6_packet_match(const struct sk_buff *skb,
105 const struct ip6t_ip6 *ip6info,
106 unsigned int *protoff,
107 int *fragoff, bool *hotdrop)
110 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
112 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
114 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
115 &ip6info->src), IP6T_INV_SRCIP) ||
116 FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
117 &ip6info->dst), IP6T_INV_DSTIP)) {
118 dprintf("Source or dest mismatch.\n");
120 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
121 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
122 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
123 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
124 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
125 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
129 ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask);
131 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
132 dprintf("VIA in mismatch (%s vs %s).%s\n",
133 indev, ip6info->iniface,
134 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
138 ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask);
140 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
141 dprintf("VIA out mismatch (%s vs %s).%s\n",
142 outdev, ip6info->outiface,
143 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
147 /* ... might want to do something with class and flowlabel here ... */
149 /* look for the desired protocol header */
150 if((ip6info->flags & IP6T_F_PROTO)) {
152 unsigned short _frag_off;
154 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off);
160 *fragoff = _frag_off;
162 dprintf("Packet protocol %hi ?= %s%hi.\n",
164 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
167 if (ip6info->proto == protohdr) {
168 if(ip6info->invflags & IP6T_INV_PROTO) {
174 /* We need match for the '-p all', too! */
175 if ((ip6info->proto != 0) &&
176 !(ip6info->invflags & IP6T_INV_PROTO))
182 /* should be ip6 safe */
184 ip6_checkentry(const struct ip6t_ip6 *ipv6)
186 if (ipv6->flags & ~IP6T_F_MASK) {
187 duprintf("Unknown flag bits set: %08X\n",
188 ipv6->flags & ~IP6T_F_MASK);
191 if (ipv6->invflags & ~IP6T_INV_MASK) {
192 duprintf("Unknown invflag bits set: %08X\n",
193 ipv6->invflags & ~IP6T_INV_MASK);
200 ip6t_error(struct sk_buff *skb, const struct xt_target_param *par)
203 pr_info("error: `%s'\n", (const char *)par->targinfo);
208 static inline struct ip6t_entry *
209 get_entry(const void *base, unsigned int offset)
211 return (struct ip6t_entry *)(base + offset);
214 /* All zeroes == unconditional rule. */
215 /* Mildly perf critical (only if packet tracing is on) */
216 static inline bool unconditional(const struct ip6t_ip6 *ipv6)
218 static const struct ip6t_ip6 uncond;
220 return memcmp(ipv6, &uncond, sizeof(uncond)) == 0;
223 static inline const struct ip6t_entry_target *
224 ip6t_get_target_c(const struct ip6t_entry *e)
226 return ip6t_get_target((struct ip6t_entry *)e);
229 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
230 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
231 /* This cries for unification! */
232 static const char *const hooknames[] = {
233 [NF_INET_PRE_ROUTING] = "PREROUTING",
234 [NF_INET_LOCAL_IN] = "INPUT",
235 [NF_INET_FORWARD] = "FORWARD",
236 [NF_INET_LOCAL_OUT] = "OUTPUT",
237 [NF_INET_POST_ROUTING] = "POSTROUTING",
240 enum nf_ip_trace_comments {
241 NF_IP6_TRACE_COMMENT_RULE,
242 NF_IP6_TRACE_COMMENT_RETURN,
243 NF_IP6_TRACE_COMMENT_POLICY,
246 static const char *const comments[] = {
247 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
248 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
249 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
252 static struct nf_loginfo trace_loginfo = {
253 .type = NF_LOG_TYPE_LOG,
257 .logflags = NF_LOG_MASK,
262 /* Mildly perf critical (only if packet tracing is on) */
264 get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e,
265 const char *hookname, const char **chainname,
266 const char **comment, unsigned int *rulenum)
268 const struct ip6t_standard_target *t = (void *)ip6t_get_target_c(s);
270 if (strcmp(t->target.u.kernel.target->name, IP6T_ERROR_TARGET) == 0) {
271 /* Head of user chain: ERROR target with chainname */
272 *chainname = t->target.data;
277 if (s->target_offset == sizeof(struct ip6t_entry) &&
278 strcmp(t->target.u.kernel.target->name,
279 IP6T_STANDARD_TARGET) == 0 &&
281 unconditional(&s->ipv6)) {
282 /* Tail of chains: STANDARD target (return/policy) */
283 *comment = *chainname == hookname
284 ? comments[NF_IP6_TRACE_COMMENT_POLICY]
285 : comments[NF_IP6_TRACE_COMMENT_RETURN];
294 static void trace_packet(const struct sk_buff *skb,
296 const struct net_device *in,
297 const struct net_device *out,
298 const char *tablename,
299 const struct xt_table_info *private,
300 const struct ip6t_entry *e)
302 const void *table_base;
303 const struct ip6t_entry *root;
304 const char *hookname, *chainname, *comment;
305 const struct ip6t_entry *iter;
306 unsigned int rulenum = 0;
308 table_base = private->entries[smp_processor_id()];
309 root = get_entry(table_base, private->hook_entry[hook]);
311 hookname = chainname = hooknames[hook];
312 comment = comments[NF_IP6_TRACE_COMMENT_RULE];
314 xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
315 if (get_chainname_rulenum(iter, e, hookname,
316 &chainname, &comment, &rulenum) != 0)
319 nf_log_packet(AF_INET6, hook, skb, in, out, &trace_loginfo,
320 "TRACE: %s:%s:%s:%u ",
321 tablename, chainname, comment, rulenum);
325 static inline __pure struct ip6t_entry *
326 ip6t_next_entry(const struct ip6t_entry *entry)
328 return (void *)entry + entry->next_offset;
331 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
333 ip6t_do_table(struct sk_buff *skb,
335 const struct net_device *in,
336 const struct net_device *out,
337 struct xt_table *table)
339 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
340 bool hotdrop = false;
341 /* Initializing verdict to NF_DROP keeps gcc happy. */
342 unsigned int verdict = NF_DROP;
343 const char *indev, *outdev;
344 const void *table_base;
345 struct ip6t_entry *e, **jumpstack;
346 unsigned int *stackptr, origptr, cpu;
347 const struct xt_table_info *private;
348 struct xt_match_param mtpar;
349 struct xt_target_param tgpar;
352 indev = in ? in->name : nulldevname;
353 outdev = out ? out->name : nulldevname;
354 /* We handle fragments by dealing with the first fragment as
355 * if it was a normal packet. All other fragments are treated
356 * normally, except that they will NEVER match rules that ask
357 * things we don't know, ie. tcp syn flag or ports). If the
358 * rule is also a fragment-specific rule, non-fragments won't
360 mtpar.hotdrop = &hotdrop;
361 mtpar.in = tgpar.in = in;
362 mtpar.out = tgpar.out = out;
363 mtpar.family = tgpar.family = NFPROTO_IPV6;
364 mtpar.hooknum = tgpar.hooknum = hook;
366 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
369 private = table->private;
370 cpu = smp_processor_id();
371 table_base = private->entries[cpu];
372 jumpstack = (struct ip6t_entry **)private->jumpstack[cpu];
373 stackptr = &private->stackptr[cpu];
376 e = get_entry(table_base, private->hook_entry[hook]);
379 const struct ip6t_entry_target *t;
380 const struct xt_entry_match *ematch;
383 if (!ip6_packet_match(skb, indev, outdev, &e->ipv6,
384 &mtpar.thoff, &mtpar.fragoff, &hotdrop)) {
386 e = ip6t_next_entry(e);
390 xt_ematch_foreach(ematch, e) {
391 mtpar.match = ematch->u.kernel.match;
392 mtpar.matchinfo = ematch->data;
393 if (!mtpar.match->match(skb, &mtpar))
397 ADD_COUNTER(e->counters,
398 ntohs(ipv6_hdr(skb)->payload_len) +
399 sizeof(struct ipv6hdr), 1);
401 t = ip6t_get_target_c(e);
402 IP_NF_ASSERT(t->u.kernel.target);
404 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
405 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
406 /* The packet is traced: log it */
407 if (unlikely(skb->nf_trace))
408 trace_packet(skb, hook, in, out,
409 table->name, private, e);
411 /* Standard target? */
412 if (!t->u.kernel.target->target) {
415 v = ((struct ip6t_standard_target *)t)->verdict;
417 /* Pop from stack? */
418 if (v != IP6T_RETURN) {
419 verdict = (unsigned)(-v) - 1;
423 e = get_entry(table_base,
424 private->underflow[hook]);
426 e = ip6t_next_entry(jumpstack[--*stackptr]);
429 if (table_base + v != ip6t_next_entry(e) &&
430 !(e->ipv6.flags & IP6T_F_GOTO)) {
431 if (*stackptr >= private->stacksize) {
435 jumpstack[(*stackptr)++] = e;
438 e = get_entry(table_base, v);
442 tgpar.target = t->u.kernel.target;
443 tgpar.targinfo = t->data;
445 verdict = t->u.kernel.target->target(skb, &tgpar);
446 if (verdict == IP6T_CONTINUE)
447 e = ip6t_next_entry(e);
453 xt_info_rdunlock_bh();
456 #ifdef DEBUG_ALLOW_ALL
465 /* Figures out from what hook each rule can be called: returns 0 if
466 there are loops. Puts hook bitmask in comefrom. */
468 mark_source_chains(const struct xt_table_info *newinfo,
469 unsigned int valid_hooks, void *entry0)
473 /* No recursion; use packet counter to save back ptrs (reset
474 to 0 as we leave), and comefrom to save source hook bitmask */
475 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
476 unsigned int pos = newinfo->hook_entry[hook];
477 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
479 if (!(valid_hooks & (1 << hook)))
482 /* Set initial back pointer. */
483 e->counters.pcnt = pos;
486 const struct ip6t_standard_target *t
487 = (void *)ip6t_get_target_c(e);
488 int visited = e->comefrom & (1 << hook);
490 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
491 printk("iptables: loop hook %u pos %u %08X.\n",
492 hook, pos, e->comefrom);
495 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
497 /* Unconditional return/END. */
498 if ((e->target_offset == sizeof(struct ip6t_entry) &&
499 (strcmp(t->target.u.user.name,
500 IP6T_STANDARD_TARGET) == 0) &&
502 unconditional(&e->ipv6)) || visited) {
503 unsigned int oldpos, size;
505 if ((strcmp(t->target.u.user.name,
506 IP6T_STANDARD_TARGET) == 0) &&
507 t->verdict < -NF_MAX_VERDICT - 1) {
508 duprintf("mark_source_chains: bad "
509 "negative verdict (%i)\n",
514 /* Return: backtrack through the last
517 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
518 #ifdef DEBUG_IP_FIREWALL_USER
520 & (1 << NF_INET_NUMHOOKS)) {
521 duprintf("Back unset "
528 pos = e->counters.pcnt;
529 e->counters.pcnt = 0;
531 /* We're at the start. */
535 e = (struct ip6t_entry *)
537 } while (oldpos == pos + e->next_offset);
540 size = e->next_offset;
541 e = (struct ip6t_entry *)
542 (entry0 + pos + size);
543 e->counters.pcnt = pos;
546 int newpos = t->verdict;
548 if (strcmp(t->target.u.user.name,
549 IP6T_STANDARD_TARGET) == 0 &&
551 if (newpos > newinfo->size -
552 sizeof(struct ip6t_entry)) {
553 duprintf("mark_source_chains: "
554 "bad verdict (%i)\n",
558 /* This a jump; chase it. */
559 duprintf("Jump rule %u -> %u\n",
562 /* ... this is a fallthru */
563 newpos = pos + e->next_offset;
565 e = (struct ip6t_entry *)
567 e->counters.pcnt = pos;
572 duprintf("Finished chain %u\n", hook);
577 static void cleanup_match(struct ip6t_entry_match *m, struct net *net)
579 struct xt_mtdtor_param par;
582 par.match = m->u.kernel.match;
583 par.matchinfo = m->data;
584 par.family = NFPROTO_IPV6;
585 if (par.match->destroy != NULL)
586 par.match->destroy(&par);
587 module_put(par.match->me);
591 check_entry(const struct ip6t_entry *e, const char *name)
593 const struct ip6t_entry_target *t;
595 if (!ip6_checkentry(&e->ipv6)) {
596 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
600 if (e->target_offset + sizeof(struct ip6t_entry_target) >
604 t = ip6t_get_target_c(e);
605 if (e->target_offset + t->u.target_size > e->next_offset)
611 static int check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par)
613 const struct ip6t_ip6 *ipv6 = par->entryinfo;
616 par->match = m->u.kernel.match;
617 par->matchinfo = m->data;
619 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
620 ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
622 duprintf("ip_tables: check failed for `%s'.\n",
630 find_check_match(struct ip6t_entry_match *m, struct xt_mtchk_param *par)
632 struct xt_match *match;
635 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
638 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
639 return PTR_ERR(match);
641 m->u.kernel.match = match;
643 ret = check_match(m, par);
649 module_put(m->u.kernel.match->me);
653 static int check_target(struct ip6t_entry *e, struct net *net, const char *name)
655 struct ip6t_entry_target *t = ip6t_get_target(e);
656 struct xt_tgchk_param par = {
660 .target = t->u.kernel.target,
662 .hook_mask = e->comefrom,
663 .family = NFPROTO_IPV6,
667 t = ip6t_get_target(e);
668 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
669 e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO);
671 duprintf("ip_tables: check failed for `%s'.\n",
672 t->u.kernel.target->name);
679 find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
682 struct ip6t_entry_target *t;
683 struct xt_target *target;
686 struct xt_mtchk_param mtpar;
687 struct xt_entry_match *ematch;
689 ret = check_entry(e, name);
696 mtpar.entryinfo = &e->ipv6;
697 mtpar.hook_mask = e->comefrom;
698 mtpar.family = NFPROTO_IPV6;
699 xt_ematch_foreach(ematch, e) {
700 ret = find_check_match(ematch, &mtpar);
702 goto cleanup_matches;
706 t = ip6t_get_target(e);
707 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
709 if (IS_ERR(target)) {
710 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
711 ret = PTR_ERR(target);
712 goto cleanup_matches;
714 t->u.kernel.target = target;
716 ret = check_target(e, net, name);
721 module_put(t->u.kernel.target->me);
723 xt_ematch_foreach(ematch, e) {
726 cleanup_match(ematch, net);
731 static bool check_underflow(const struct ip6t_entry *e)
733 const struct ip6t_entry_target *t;
734 unsigned int verdict;
736 if (!unconditional(&e->ipv6))
738 t = ip6t_get_target_c(e);
739 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
741 verdict = ((struct ip6t_standard_target *)t)->verdict;
742 verdict = -verdict - 1;
743 return verdict == NF_DROP || verdict == NF_ACCEPT;
747 check_entry_size_and_hooks(struct ip6t_entry *e,
748 struct xt_table_info *newinfo,
749 const unsigned char *base,
750 const unsigned char *limit,
751 const unsigned int *hook_entries,
752 const unsigned int *underflows,
753 unsigned int valid_hooks)
757 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
758 (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
759 duprintf("Bad offset %p\n", e);
764 < sizeof(struct ip6t_entry) + sizeof(struct ip6t_entry_target)) {
765 duprintf("checking: element %p size %u\n",
770 /* Check hooks & underflows */
771 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
772 if (!(valid_hooks & (1 << h)))
774 if ((unsigned char *)e - base == hook_entries[h])
775 newinfo->hook_entry[h] = hook_entries[h];
776 if ((unsigned char *)e - base == underflows[h]) {
777 if (!check_underflow(e)) {
778 pr_err("Underflows must be unconditional and "
779 "use the STANDARD target with "
783 newinfo->underflow[h] = underflows[h];
787 /* Clear counters and comefrom */
788 e->counters = ((struct xt_counters) { 0, 0 });
793 static void cleanup_entry(struct ip6t_entry *e, struct net *net)
795 struct xt_tgdtor_param par;
796 struct ip6t_entry_target *t;
797 struct xt_entry_match *ematch;
799 /* Cleanup all matches */
800 xt_ematch_foreach(ematch, e)
801 cleanup_match(ematch, net);
802 t = ip6t_get_target(e);
805 par.target = t->u.kernel.target;
806 par.targinfo = t->data;
807 par.family = NFPROTO_IPV6;
808 if (par.target->destroy != NULL)
809 par.target->destroy(&par);
810 module_put(par.target->me);
813 /* Checks and translates the user-supplied table segment (held in
816 translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
817 const struct ip6t_replace *repl)
819 struct ip6t_entry *iter;
823 newinfo->size = repl->size;
824 newinfo->number = repl->num_entries;
826 /* Init all hooks to impossible value. */
827 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
828 newinfo->hook_entry[i] = 0xFFFFFFFF;
829 newinfo->underflow[i] = 0xFFFFFFFF;
832 duprintf("translate_table: size %u\n", newinfo->size);
834 /* Walk through entries, checking offsets. */
835 xt_entry_foreach(iter, entry0, newinfo->size) {
836 ret = check_entry_size_and_hooks(iter, newinfo, entry0,
844 if (strcmp(ip6t_get_target(iter)->u.user.name,
845 XT_ERROR_TARGET) == 0)
846 ++newinfo->stacksize;
849 if (i != repl->num_entries) {
850 duprintf("translate_table: %u not %u entries\n",
851 i, repl->num_entries);
855 /* Check hooks all assigned */
856 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
857 /* Only hooks which are valid */
858 if (!(repl->valid_hooks & (1 << i)))
860 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
861 duprintf("Invalid hook entry %u %u\n",
862 i, repl->hook_entry[i]);
865 if (newinfo->underflow[i] == 0xFFFFFFFF) {
866 duprintf("Invalid underflow %u %u\n",
867 i, repl->underflow[i]);
872 if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
875 /* Finally, each sanity check must pass */
877 xt_entry_foreach(iter, entry0, newinfo->size) {
878 ret = find_check_entry(iter, net, repl->name, repl->size);
885 xt_entry_foreach(iter, entry0, newinfo->size) {
888 cleanup_entry(iter, net);
893 /* And one copy for every other CPU */
894 for_each_possible_cpu(i) {
895 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
896 memcpy(newinfo->entries[i], entry0, newinfo->size);
903 get_counters(const struct xt_table_info *t,
904 struct xt_counters counters[])
906 struct ip6t_entry *iter;
911 /* Instead of clearing (by a previous call to memset())
912 * the counters and using adds, we set the counters
913 * with data used by 'current' CPU
915 * Bottom half has to be disabled to prevent deadlock
916 * if new softirq were to run and call ipt_do_table
919 curcpu = smp_processor_id();
922 xt_entry_foreach(iter, t->entries[curcpu], t->size) {
923 SET_COUNTER(counters[i], iter->counters.bcnt,
924 iter->counters.pcnt);
928 for_each_possible_cpu(cpu) {
933 xt_entry_foreach(iter, t->entries[cpu], t->size) {
934 ADD_COUNTER(counters[i], iter->counters.bcnt,
935 iter->counters.pcnt);
938 xt_info_wrunlock(cpu);
943 static struct xt_counters *alloc_counters(const struct xt_table *table)
945 unsigned int countersize;
946 struct xt_counters *counters;
947 const struct xt_table_info *private = table->private;
949 /* We need atomic snapshot of counters: rest doesn't change
950 (other than comefrom, which userspace doesn't care
952 countersize = sizeof(struct xt_counters) * private->number;
953 counters = vmalloc_node(countersize, numa_node_id());
955 if (counters == NULL)
956 return ERR_PTR(-ENOMEM);
958 get_counters(private, counters);
964 copy_entries_to_user(unsigned int total_size,
965 const struct xt_table *table,
966 void __user *userptr)
968 unsigned int off, num;
969 const struct ip6t_entry *e;
970 struct xt_counters *counters;
971 const struct xt_table_info *private = table->private;
973 const void *loc_cpu_entry;
975 counters = alloc_counters(table);
976 if (IS_ERR(counters))
977 return PTR_ERR(counters);
979 /* choose the copy that is on our node/cpu, ...
980 * This choice is lazy (because current thread is
981 * allowed to migrate to another cpu)
983 loc_cpu_entry = private->entries[raw_smp_processor_id()];
984 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
989 /* FIXME: use iterator macros --RR */
990 /* ... then go back and fix counters and names */
991 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
993 const struct ip6t_entry_match *m;
994 const struct ip6t_entry_target *t;
996 e = (struct ip6t_entry *)(loc_cpu_entry + off);
997 if (copy_to_user(userptr + off
998 + offsetof(struct ip6t_entry, counters),
1000 sizeof(counters[num])) != 0) {
1005 for (i = sizeof(struct ip6t_entry);
1006 i < e->target_offset;
1007 i += m->u.match_size) {
1010 if (copy_to_user(userptr + off + i
1011 + offsetof(struct ip6t_entry_match,
1013 m->u.kernel.match->name,
1014 strlen(m->u.kernel.match->name)+1)
1021 t = ip6t_get_target_c(e);
1022 if (copy_to_user(userptr + off + e->target_offset
1023 + offsetof(struct ip6t_entry_target,
1025 t->u.kernel.target->name,
1026 strlen(t->u.kernel.target->name)+1) != 0) {
1037 #ifdef CONFIG_COMPAT
1038 static void compat_standard_from_user(void *dst, const void *src)
1040 int v = *(compat_int_t *)src;
1043 v += xt_compat_calc_jump(AF_INET6, v);
1044 memcpy(dst, &v, sizeof(v));
1047 static int compat_standard_to_user(void __user *dst, const void *src)
1049 compat_int_t cv = *(int *)src;
1052 cv -= xt_compat_calc_jump(AF_INET6, cv);
1053 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1056 static int compat_calc_entry(const struct ip6t_entry *e,
1057 const struct xt_table_info *info,
1058 const void *base, struct xt_table_info *newinfo)
1060 const struct xt_entry_match *ematch;
1061 const struct ip6t_entry_target *t;
1062 unsigned int entry_offset;
1065 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1066 entry_offset = (void *)e - base;
1067 xt_ematch_foreach(ematch, e)
1068 off += xt_compat_match_offset(ematch->u.kernel.match);
1069 t = ip6t_get_target_c(e);
1070 off += xt_compat_target_offset(t->u.kernel.target);
1071 newinfo->size -= off;
1072 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1076 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1077 if (info->hook_entry[i] &&
1078 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1079 newinfo->hook_entry[i] -= off;
1080 if (info->underflow[i] &&
1081 (e < (struct ip6t_entry *)(base + info->underflow[i])))
1082 newinfo->underflow[i] -= off;
1087 static int compat_table_info(const struct xt_table_info *info,
1088 struct xt_table_info *newinfo)
1090 struct ip6t_entry *iter;
1091 void *loc_cpu_entry;
1094 if (!newinfo || !info)
1097 /* we dont care about newinfo->entries[] */
1098 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1099 newinfo->initial_entries = 0;
1100 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1101 xt_entry_foreach(iter, loc_cpu_entry, info->size) {
1102 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
1110 static int get_info(struct net *net, void __user *user,
1111 const int *len, int compat)
1113 char name[IP6T_TABLE_MAXNAMELEN];
1117 if (*len != sizeof(struct ip6t_getinfo)) {
1118 duprintf("length %u != %zu\n", *len,
1119 sizeof(struct ip6t_getinfo));
1123 if (copy_from_user(name, user, sizeof(name)) != 0)
1126 name[IP6T_TABLE_MAXNAMELEN-1] = '\0';
1127 #ifdef CONFIG_COMPAT
1129 xt_compat_lock(AF_INET6);
1131 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1132 "ip6table_%s", name);
1133 if (t && !IS_ERR(t)) {
1134 struct ip6t_getinfo info;
1135 const struct xt_table_info *private = t->private;
1136 #ifdef CONFIG_COMPAT
1137 struct xt_table_info tmp;
1140 ret = compat_table_info(private, &tmp);
1141 xt_compat_flush_offsets(AF_INET6);
1145 info.valid_hooks = t->valid_hooks;
1146 memcpy(info.hook_entry, private->hook_entry,
1147 sizeof(info.hook_entry));
1148 memcpy(info.underflow, private->underflow,
1149 sizeof(info.underflow));
1150 info.num_entries = private->number;
1151 info.size = private->size;
1152 strcpy(info.name, name);
1154 if (copy_to_user(user, &info, *len) != 0)
1162 ret = t ? PTR_ERR(t) : -ENOENT;
1163 #ifdef CONFIG_COMPAT
1165 xt_compat_unlock(AF_INET6);
1171 get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
1175 struct ip6t_get_entries get;
1178 if (*len < sizeof(get)) {
1179 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1182 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1184 if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1185 duprintf("get_entries: %u != %zu\n",
1186 *len, sizeof(get) + get.size);
1190 t = xt_find_table_lock(net, AF_INET6, get.name);
1191 if (t && !IS_ERR(t)) {
1192 struct xt_table_info *private = t->private;
1193 duprintf("t->private->number = %u\n", private->number);
1194 if (get.size == private->size)
1195 ret = copy_entries_to_user(private->size,
1196 t, uptr->entrytable);
1198 duprintf("get_entries: I've got %u not %u!\n",
1199 private->size, get.size);
1205 ret = t ? PTR_ERR(t) : -ENOENT;
1211 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1212 struct xt_table_info *newinfo, unsigned int num_counters,
1213 void __user *counters_ptr)
1217 struct xt_table_info *oldinfo;
1218 struct xt_counters *counters;
1219 const void *loc_cpu_old_entry;
1220 struct ip6t_entry *iter;
1223 counters = vmalloc_node(num_counters * sizeof(struct xt_counters),
1230 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1231 "ip6table_%s", name);
1232 if (!t || IS_ERR(t)) {
1233 ret = t ? PTR_ERR(t) : -ENOENT;
1234 goto free_newinfo_counters_untrans;
1238 if (valid_hooks != t->valid_hooks) {
1239 duprintf("Valid hook crap: %08X vs %08X\n",
1240 valid_hooks, t->valid_hooks);
1245 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1249 /* Update module usage count based on number of rules */
1250 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1251 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1252 if ((oldinfo->number > oldinfo->initial_entries) ||
1253 (newinfo->number <= oldinfo->initial_entries))
1255 if ((oldinfo->number > oldinfo->initial_entries) &&
1256 (newinfo->number <= oldinfo->initial_entries))
1259 /* Get the old counters, and synchronize with replace */
1260 get_counters(oldinfo, counters);
1262 /* Decrease module usage counts and free resource */
1263 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1264 xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
1265 cleanup_entry(iter, net);
1267 xt_free_table_info(oldinfo);
1268 if (copy_to_user(counters_ptr, counters,
1269 sizeof(struct xt_counters) * num_counters) != 0)
1278 free_newinfo_counters_untrans:
1285 do_replace(struct net *net, const void __user *user, unsigned int len)
1288 struct ip6t_replace tmp;
1289 struct xt_table_info *newinfo;
1290 void *loc_cpu_entry;
1291 struct ip6t_entry *iter;
1293 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1296 /* overflow check */
1297 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1300 newinfo = xt_alloc_table_info(tmp.size);
1304 /* choose the copy that is on our node/cpu */
1305 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1306 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1312 ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
1316 duprintf("ip_tables: Translated table\n");
1318 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1319 tmp.num_counters, tmp.counters);
1321 goto free_newinfo_untrans;
1324 free_newinfo_untrans:
1325 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1326 cleanup_entry(iter, net);
1328 xt_free_table_info(newinfo);
1333 do_add_counters(struct net *net, const void __user *user, unsigned int len,
1336 unsigned int i, curcpu;
1337 struct xt_counters_info tmp;
1338 struct xt_counters *paddc;
1339 unsigned int num_counters;
1344 const struct xt_table_info *private;
1346 const void *loc_cpu_entry;
1347 struct ip6t_entry *iter;
1348 #ifdef CONFIG_COMPAT
1349 struct compat_xt_counters_info compat_tmp;
1353 size = sizeof(struct compat_xt_counters_info);
1358 size = sizeof(struct xt_counters_info);
1361 if (copy_from_user(ptmp, user, size) != 0)
1364 #ifdef CONFIG_COMPAT
1366 num_counters = compat_tmp.num_counters;
1367 name = compat_tmp.name;
1371 num_counters = tmp.num_counters;
1375 if (len != size + num_counters * sizeof(struct xt_counters))
1378 paddc = vmalloc_node(len - size, numa_node_id());
1382 if (copy_from_user(paddc, user + size, len - size) != 0) {
1387 t = xt_find_table_lock(net, AF_INET6, name);
1388 if (!t || IS_ERR(t)) {
1389 ret = t ? PTR_ERR(t) : -ENOENT;
1395 private = t->private;
1396 if (private->number != num_counters) {
1398 goto unlock_up_free;
1402 /* Choose the copy that is on our node */
1403 curcpu = smp_processor_id();
1404 xt_info_wrlock(curcpu);
1405 loc_cpu_entry = private->entries[curcpu];
1406 xt_entry_foreach(iter, loc_cpu_entry, private->size) {
1407 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
1410 xt_info_wrunlock(curcpu);
1422 #ifdef CONFIG_COMPAT
1423 struct compat_ip6t_replace {
1424 char name[IP6T_TABLE_MAXNAMELEN];
1428 u32 hook_entry[NF_INET_NUMHOOKS];
1429 u32 underflow[NF_INET_NUMHOOKS];
1431 compat_uptr_t counters; /* struct ip6t_counters * */
1432 struct compat_ip6t_entry entries[0];
1436 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1437 unsigned int *size, struct xt_counters *counters,
1440 struct ip6t_entry_target *t;
1441 struct compat_ip6t_entry __user *ce;
1442 u_int16_t target_offset, next_offset;
1443 compat_uint_t origsize;
1444 const struct xt_entry_match *ematch;
1448 ce = (struct compat_ip6t_entry __user *)*dstptr;
1449 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)) != 0 ||
1450 copy_to_user(&ce->counters, &counters[i],
1451 sizeof(counters[i])) != 0)
1454 *dstptr += sizeof(struct compat_ip6t_entry);
1455 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1457 xt_ematch_foreach(ematch, e) {
1458 ret = xt_compat_match_to_user(ematch, dstptr, size);
1462 target_offset = e->target_offset - (origsize - *size);
1463 t = ip6t_get_target(e);
1464 ret = xt_compat_target_to_user(t, dstptr, size);
1467 next_offset = e->next_offset - (origsize - *size);
1468 if (put_user(target_offset, &ce->target_offset) != 0 ||
1469 put_user(next_offset, &ce->next_offset) != 0)
1475 compat_find_calc_match(struct ip6t_entry_match *m,
1477 const struct ip6t_ip6 *ipv6,
1478 unsigned int hookmask,
1481 struct xt_match *match;
1483 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
1484 m->u.user.revision);
1485 if (IS_ERR(match)) {
1486 duprintf("compat_check_calc_match: `%s' not found\n",
1488 return PTR_ERR(match);
1490 m->u.kernel.match = match;
1491 *size += xt_compat_match_offset(match);
1495 static void compat_release_entry(struct compat_ip6t_entry *e)
1497 struct ip6t_entry_target *t;
1498 struct xt_entry_match *ematch;
1500 /* Cleanup all matches */
1501 xt_ematch_foreach(ematch, e)
1502 module_put(ematch->u.kernel.match->me);
1503 t = compat_ip6t_get_target(e);
1504 module_put(t->u.kernel.target->me);
1508 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1509 struct xt_table_info *newinfo,
1511 const unsigned char *base,
1512 const unsigned char *limit,
1513 const unsigned int *hook_entries,
1514 const unsigned int *underflows,
1517 struct xt_entry_match *ematch;
1518 struct ip6t_entry_target *t;
1519 struct xt_target *target;
1520 unsigned int entry_offset;
1524 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1525 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
1526 (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
1527 duprintf("Bad offset %p, limit = %p\n", e, limit);
1531 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1532 sizeof(struct compat_xt_entry_target)) {
1533 duprintf("checking: element %p size %u\n",
1538 /* For purposes of check_entry casting the compat entry is fine */
1539 ret = check_entry((struct ip6t_entry *)e, name);
1543 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1544 entry_offset = (void *)e - (void *)base;
1546 xt_ematch_foreach(ematch, e) {
1547 ret = compat_find_calc_match(ematch, name,
1548 &e->ipv6, e->comefrom, &off);
1550 goto release_matches;
1554 t = compat_ip6t_get_target(e);
1555 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
1556 t->u.user.revision);
1557 if (IS_ERR(target)) {
1558 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1560 ret = PTR_ERR(target);
1561 goto release_matches;
1563 t->u.kernel.target = target;
1565 off += xt_compat_target_offset(target);
1567 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1571 /* Check hooks & underflows */
1572 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1573 if ((unsigned char *)e - base == hook_entries[h])
1574 newinfo->hook_entry[h] = hook_entries[h];
1575 if ((unsigned char *)e - base == underflows[h])
1576 newinfo->underflow[h] = underflows[h];
1579 /* Clear counters and comefrom */
1580 memset(&e->counters, 0, sizeof(e->counters));
1585 module_put(t->u.kernel.target->me);
1587 xt_ematch_foreach(ematch, e) {
1590 module_put(ematch->u.kernel.match->me);
1596 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1597 unsigned int *size, const char *name,
1598 struct xt_table_info *newinfo, unsigned char *base)
1600 struct ip6t_entry_target *t;
1601 struct xt_target *target;
1602 struct ip6t_entry *de;
1603 unsigned int origsize;
1605 struct xt_entry_match *ematch;
1609 de = (struct ip6t_entry *)*dstptr;
1610 memcpy(de, e, sizeof(struct ip6t_entry));
1611 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1613 *dstptr += sizeof(struct ip6t_entry);
1614 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1616 xt_ematch_foreach(ematch, e) {
1617 ret = xt_compat_match_from_user(ematch, dstptr, size);
1621 de->target_offset = e->target_offset - (origsize - *size);
1622 t = compat_ip6t_get_target(e);
1623 target = t->u.kernel.target;
1624 xt_compat_target_from_user(t, dstptr, size);
1626 de->next_offset = e->next_offset - (origsize - *size);
1627 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1628 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1629 newinfo->hook_entry[h] -= origsize - *size;
1630 if ((unsigned char *)de - base < newinfo->underflow[h])
1631 newinfo->underflow[h] -= origsize - *size;
1636 static int compat_check_entry(struct ip6t_entry *e, struct net *net,
1641 struct xt_mtchk_param mtpar;
1642 struct xt_entry_match *ematch;
1647 mtpar.entryinfo = &e->ipv6;
1648 mtpar.hook_mask = e->comefrom;
1649 mtpar.family = NFPROTO_IPV6;
1650 xt_ematch_foreach(ematch, e) {
1651 ret = check_match(ematch, &mtpar);
1653 goto cleanup_matches;
1657 ret = check_target(e, net, name);
1659 goto cleanup_matches;
1663 xt_ematch_foreach(ematch, e) {
1666 cleanup_match(ematch, net);
1672 translate_compat_table(struct net *net,
1674 unsigned int valid_hooks,
1675 struct xt_table_info **pinfo,
1677 unsigned int total_size,
1678 unsigned int number,
1679 unsigned int *hook_entries,
1680 unsigned int *underflows)
1683 struct xt_table_info *newinfo, *info;
1684 void *pos, *entry0, *entry1;
1685 struct compat_ip6t_entry *iter0;
1686 struct ip6t_entry *iter1;
1693 info->number = number;
1695 /* Init all hooks to impossible value. */
1696 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1697 info->hook_entry[i] = 0xFFFFFFFF;
1698 info->underflow[i] = 0xFFFFFFFF;
1701 duprintf("translate_compat_table: size %u\n", info->size);
1703 xt_compat_lock(AF_INET6);
1704 /* Walk through entries, checking offsets. */
1705 xt_entry_foreach(iter0, entry0, total_size) {
1706 ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1708 entry0 + total_size,
1719 duprintf("translate_compat_table: %u not %u entries\n",
1724 /* Check hooks all assigned */
1725 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1726 /* Only hooks which are valid */
1727 if (!(valid_hooks & (1 << i)))
1729 if (info->hook_entry[i] == 0xFFFFFFFF) {
1730 duprintf("Invalid hook entry %u %u\n",
1731 i, hook_entries[i]);
1734 if (info->underflow[i] == 0xFFFFFFFF) {
1735 duprintf("Invalid underflow %u %u\n",
1742 newinfo = xt_alloc_table_info(size);
1746 newinfo->number = number;
1747 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1748 newinfo->hook_entry[i] = info->hook_entry[i];
1749 newinfo->underflow[i] = info->underflow[i];
1751 entry1 = newinfo->entries[raw_smp_processor_id()];
1754 xt_entry_foreach(iter0, entry0, total_size) {
1755 ret = compat_copy_entry_from_user(iter0, &pos, &size,
1756 name, newinfo, entry1);
1760 xt_compat_flush_offsets(AF_INET6);
1761 xt_compat_unlock(AF_INET6);
1766 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1770 xt_entry_foreach(iter1, entry1, newinfo->size) {
1771 ret = compat_check_entry(iter1, net, name);
1778 * The first i matches need cleanup_entry (calls ->destroy)
1779 * because they had called ->check already. The other j-i
1780 * entries need only release.
1784 xt_entry_foreach(iter0, entry0, newinfo->size) {
1789 compat_release_entry(iter0);
1791 xt_entry_foreach(iter1, entry1, newinfo->size) {
1794 cleanup_entry(iter1, net);
1796 xt_free_table_info(newinfo);
1800 /* And one copy for every other CPU */
1801 for_each_possible_cpu(i)
1802 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1803 memcpy(newinfo->entries[i], entry1, newinfo->size);
1807 xt_free_table_info(info);
1811 xt_free_table_info(newinfo);
1813 xt_entry_foreach(iter0, entry0, total_size) {
1816 compat_release_entry(iter0);
1820 xt_compat_flush_offsets(AF_INET6);
1821 xt_compat_unlock(AF_INET6);
1826 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1829 struct compat_ip6t_replace tmp;
1830 struct xt_table_info *newinfo;
1831 void *loc_cpu_entry;
1832 struct ip6t_entry *iter;
1834 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1837 /* overflow check */
1838 if (tmp.size >= INT_MAX / num_possible_cpus())
1840 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1843 newinfo = xt_alloc_table_info(tmp.size);
1847 /* choose the copy that is on our node/cpu */
1848 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1849 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1855 ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
1856 &newinfo, &loc_cpu_entry, tmp.size,
1857 tmp.num_entries, tmp.hook_entry,
1862 duprintf("compat_do_replace: Translated table\n");
1864 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1865 tmp.num_counters, compat_ptr(tmp.counters));
1867 goto free_newinfo_untrans;
1870 free_newinfo_untrans:
1871 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1872 cleanup_entry(iter, net);
1874 xt_free_table_info(newinfo);
1879 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1884 if (!capable(CAP_NET_ADMIN))
1888 case IP6T_SO_SET_REPLACE:
1889 ret = compat_do_replace(sock_net(sk), user, len);
1892 case IP6T_SO_SET_ADD_COUNTERS:
1893 ret = do_add_counters(sock_net(sk), user, len, 1);
1897 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1904 struct compat_ip6t_get_entries {
1905 char name[IP6T_TABLE_MAXNAMELEN];
1907 struct compat_ip6t_entry entrytable[0];
1911 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1912 void __user *userptr)
1914 struct xt_counters *counters;
1915 const struct xt_table_info *private = table->private;
1919 const void *loc_cpu_entry;
1921 struct ip6t_entry *iter;
1923 counters = alloc_counters(table);
1924 if (IS_ERR(counters))
1925 return PTR_ERR(counters);
1927 /* choose the copy that is on our node/cpu, ...
1928 * This choice is lazy (because current thread is
1929 * allowed to migrate to another cpu)
1931 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1934 xt_entry_foreach(iter, loc_cpu_entry, total_size) {
1935 ret = compat_copy_entry_to_user(iter, &pos,
1936 &size, counters, i++);
1946 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1950 struct compat_ip6t_get_entries get;
1953 if (*len < sizeof(get)) {
1954 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1958 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1961 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
1962 duprintf("compat_get_entries: %u != %zu\n",
1963 *len, sizeof(get) + get.size);
1967 xt_compat_lock(AF_INET6);
1968 t = xt_find_table_lock(net, AF_INET6, get.name);
1969 if (t && !IS_ERR(t)) {
1970 const struct xt_table_info *private = t->private;
1971 struct xt_table_info info;
1972 duprintf("t->private->number = %u\n", private->number);
1973 ret = compat_table_info(private, &info);
1974 if (!ret && get.size == info.size) {
1975 ret = compat_copy_entries_to_user(private->size,
1976 t, uptr->entrytable);
1978 duprintf("compat_get_entries: I've got %u not %u!\n",
1979 private->size, get.size);
1982 xt_compat_flush_offsets(AF_INET6);
1986 ret = t ? PTR_ERR(t) : -ENOENT;
1988 xt_compat_unlock(AF_INET6);
1992 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
1995 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1999 if (!capable(CAP_NET_ADMIN))
2003 case IP6T_SO_GET_INFO:
2004 ret = get_info(sock_net(sk), user, len, 1);
2006 case IP6T_SO_GET_ENTRIES:
2007 ret = compat_get_entries(sock_net(sk), user, len);
2010 ret = do_ip6t_get_ctl(sk, cmd, user, len);
2017 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2021 if (!capable(CAP_NET_ADMIN))
2025 case IP6T_SO_SET_REPLACE:
2026 ret = do_replace(sock_net(sk), user, len);
2029 case IP6T_SO_SET_ADD_COUNTERS:
2030 ret = do_add_counters(sock_net(sk), user, len, 0);
2034 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
2042 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2046 if (!capable(CAP_NET_ADMIN))
2050 case IP6T_SO_GET_INFO:
2051 ret = get_info(sock_net(sk), user, len, 0);
2054 case IP6T_SO_GET_ENTRIES:
2055 ret = get_entries(sock_net(sk), user, len);
2058 case IP6T_SO_GET_REVISION_MATCH:
2059 case IP6T_SO_GET_REVISION_TARGET: {
2060 struct ip6t_get_revision rev;
2063 if (*len != sizeof(rev)) {
2067 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2072 if (cmd == IP6T_SO_GET_REVISION_TARGET)
2077 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
2080 "ip6t_%s", rev.name);
2085 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
2092 struct xt_table *ip6t_register_table(struct net *net,
2093 const struct xt_table *table,
2094 const struct ip6t_replace *repl)
2097 struct xt_table_info *newinfo;
2098 struct xt_table_info bootstrap = {0};
2099 void *loc_cpu_entry;
2100 struct xt_table *new_table;
2102 newinfo = xt_alloc_table_info(repl->size);
2108 /* choose the copy on our node/cpu, but dont care about preemption */
2109 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2110 memcpy(loc_cpu_entry, repl->entries, repl->size);
2112 ret = translate_table(net, newinfo, loc_cpu_entry, repl);
2116 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2117 if (IS_ERR(new_table)) {
2118 ret = PTR_ERR(new_table);
2124 xt_free_table_info(newinfo);
2126 return ERR_PTR(ret);
2129 void ip6t_unregister_table(struct net *net, struct xt_table *table)
2131 struct xt_table_info *private;
2132 void *loc_cpu_entry;
2133 struct module *table_owner = table->me;
2134 struct ip6t_entry *iter;
2136 private = xt_unregister_table(table);
2138 /* Decrease module usage counts and free resources */
2139 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2140 xt_entry_foreach(iter, loc_cpu_entry, private->size)
2141 cleanup_entry(iter, net);
2142 if (private->number > private->initial_entries)
2143 module_put(table_owner);
2144 xt_free_table_info(private);
2147 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2149 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2150 u_int8_t type, u_int8_t code,
2153 return (type == test_type && code >= min_code && code <= max_code)
2158 icmp6_match(const struct sk_buff *skb, const struct xt_match_param *par)
2160 const struct icmp6hdr *ic;
2161 struct icmp6hdr _icmph;
2162 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2164 /* Must not be a fragment. */
2165 if (par->fragoff != 0)
2168 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2170 /* We've been asked to examine this packet, and we
2171 * can't. Hence, no choice but to drop.
2173 duprintf("Dropping evil ICMP tinygram.\n");
2174 *par->hotdrop = true;
2178 return icmp6_type_code_match(icmpinfo->type,
2181 ic->icmp6_type, ic->icmp6_code,
2182 !!(icmpinfo->invflags&IP6T_ICMP_INV));
2185 /* Called when user tries to insert an entry of this type. */
2186 static int icmp6_checkentry(const struct xt_mtchk_param *par)
2188 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2190 /* Must specify no unknown invflags */
2191 return (icmpinfo->invflags & ~IP6T_ICMP_INV) ? -EINVAL : 0;
2194 /* The built-in targets: standard (NULL) and error. */
2195 static struct xt_target ip6t_standard_target __read_mostly = {
2196 .name = IP6T_STANDARD_TARGET,
2197 .targetsize = sizeof(int),
2198 .family = NFPROTO_IPV6,
2199 #ifdef CONFIG_COMPAT
2200 .compatsize = sizeof(compat_int_t),
2201 .compat_from_user = compat_standard_from_user,
2202 .compat_to_user = compat_standard_to_user,
2206 static struct xt_target ip6t_error_target __read_mostly = {
2207 .name = IP6T_ERROR_TARGET,
2208 .target = ip6t_error,
2209 .targetsize = IP6T_FUNCTION_MAXNAMELEN,
2210 .family = NFPROTO_IPV6,
2213 static struct nf_sockopt_ops ip6t_sockopts = {
2215 .set_optmin = IP6T_BASE_CTL,
2216 .set_optmax = IP6T_SO_SET_MAX+1,
2217 .set = do_ip6t_set_ctl,
2218 #ifdef CONFIG_COMPAT
2219 .compat_set = compat_do_ip6t_set_ctl,
2221 .get_optmin = IP6T_BASE_CTL,
2222 .get_optmax = IP6T_SO_GET_MAX+1,
2223 .get = do_ip6t_get_ctl,
2224 #ifdef CONFIG_COMPAT
2225 .compat_get = compat_do_ip6t_get_ctl,
2227 .owner = THIS_MODULE,
2230 static struct xt_match icmp6_matchstruct __read_mostly = {
2232 .match = icmp6_match,
2233 .matchsize = sizeof(struct ip6t_icmp),
2234 .checkentry = icmp6_checkentry,
2235 .proto = IPPROTO_ICMPV6,
2236 .family = NFPROTO_IPV6,
2239 static int __net_init ip6_tables_net_init(struct net *net)
2241 return xt_proto_init(net, NFPROTO_IPV6);
2244 static void __net_exit ip6_tables_net_exit(struct net *net)
2246 xt_proto_fini(net, NFPROTO_IPV6);
2249 static struct pernet_operations ip6_tables_net_ops = {
2250 .init = ip6_tables_net_init,
2251 .exit = ip6_tables_net_exit,
2254 static int __init ip6_tables_init(void)
2258 ret = register_pernet_subsys(&ip6_tables_net_ops);
2262 /* Noone else will be downing sem now, so we won't sleep */
2263 ret = xt_register_target(&ip6t_standard_target);
2266 ret = xt_register_target(&ip6t_error_target);
2269 ret = xt_register_match(&icmp6_matchstruct);
2273 /* Register setsockopt */
2274 ret = nf_register_sockopt(&ip6t_sockopts);
2278 pr_info("(C) 2000-2006 Netfilter Core Team\n");
2282 xt_unregister_match(&icmp6_matchstruct);
2284 xt_unregister_target(&ip6t_error_target);
2286 xt_unregister_target(&ip6t_standard_target);
2288 unregister_pernet_subsys(&ip6_tables_net_ops);
2293 static void __exit ip6_tables_fini(void)
2295 nf_unregister_sockopt(&ip6t_sockopts);
2297 xt_unregister_match(&icmp6_matchstruct);
2298 xt_unregister_target(&ip6t_error_target);
2299 xt_unregister_target(&ip6t_standard_target);
2301 unregister_pernet_subsys(&ip6_tables_net_ops);
2305 * find the offset to specified header or the protocol number of last header
2306 * if target < 0. "last header" is transport protocol header, ESP, or
2309 * If target header is found, its offset is set in *offset and return protocol
2310 * number. Otherwise, return -1.
2312 * If the first fragment doesn't contain the final protocol header or
2313 * NEXTHDR_NONE it is considered invalid.
2315 * Note that non-1st fragment is special case that "the protocol number
2316 * of last header" is "next header" field in Fragment header. In this case,
2317 * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
2321 int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
2322 int target, unsigned short *fragoff)
2324 unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr);
2325 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
2326 unsigned int len = skb->len - start;
2331 while (nexthdr != target) {
2332 struct ipv6_opt_hdr _hdr, *hp;
2333 unsigned int hdrlen;
2335 if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
2341 hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
2344 if (nexthdr == NEXTHDR_FRAGMENT) {
2345 unsigned short _frag_off;
2347 fp = skb_header_pointer(skb,
2348 start+offsetof(struct frag_hdr,
2355 _frag_off = ntohs(*fp) & ~0x7;
2358 ((!ipv6_ext_hdr(hp->nexthdr)) ||
2359 hp->nexthdr == NEXTHDR_NONE)) {
2361 *fragoff = _frag_off;
2367 } else if (nexthdr == NEXTHDR_AUTH)
2368 hdrlen = (hp->hdrlen + 2) << 2;
2370 hdrlen = ipv6_optlen(hp);
2372 nexthdr = hp->nexthdr;
2381 EXPORT_SYMBOL(ip6t_register_table);
2382 EXPORT_SYMBOL(ip6t_unregister_table);
2383 EXPORT_SYMBOL(ip6t_do_table);
2384 EXPORT_SYMBOL(ip6t_ext_hdr);
2385 EXPORT_SYMBOL(ipv6_find_hdr);
2387 module_init(ip6_tables_init);
2388 module_exit(ip6_tables_fini);