]> bbs.cooldavid.org Git - net-next-2.6.git/blob - net/ipv4/netfilter/nf_nat_core.c
netfilter: nf_conntrack: add support for "conntrack zones"
[net-next-2.6.git] / net / ipv4 / netfilter / nf_nat_core.c
1 /* NAT for netfilter; shared with compatibility layer. */
2
3 /* (C) 1999-2001 Paul `Rusty' Russell
4  * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10
11 #include <linux/module.h>
12 #include <linux/types.h>
13 #include <linux/timer.h>
14 #include <linux/skbuff.h>
15 #include <net/checksum.h>
16 #include <net/icmp.h>
17 #include <net/ip.h>
18 #include <net/tcp.h>  /* For tcp_prot in getorigdst */
19 #include <linux/icmp.h>
20 #include <linux/udp.h>
21 #include <linux/jhash.h>
22
23 #include <linux/netfilter_ipv4.h>
24 #include <net/netfilter/nf_conntrack.h>
25 #include <net/netfilter/nf_conntrack_core.h>
26 #include <net/netfilter/nf_nat.h>
27 #include <net/netfilter/nf_nat_protocol.h>
28 #include <net/netfilter/nf_nat_core.h>
29 #include <net/netfilter/nf_nat_helper.h>
30 #include <net/netfilter/nf_conntrack_helper.h>
31 #include <net/netfilter/nf_conntrack_l3proto.h>
32 #include <net/netfilter/nf_conntrack_l4proto.h>
33 #include <net/netfilter/nf_conntrack_zones.h>
34
35 static DEFINE_SPINLOCK(nf_nat_lock);
36
37 static struct nf_conntrack_l3proto *l3proto __read_mostly;
38
39 #define MAX_IP_NAT_PROTO 256
40 static const struct nf_nat_protocol *nf_nat_protos[MAX_IP_NAT_PROTO]
41                                                 __read_mostly;
42
43 static inline const struct nf_nat_protocol *
44 __nf_nat_proto_find(u_int8_t protonum)
45 {
46         return rcu_dereference(nf_nat_protos[protonum]);
47 }
48
49 const struct nf_nat_protocol *
50 nf_nat_proto_find_get(u_int8_t protonum)
51 {
52         const struct nf_nat_protocol *p;
53
54         rcu_read_lock();
55         p = __nf_nat_proto_find(protonum);
56         if (!try_module_get(p->me))
57                 p = &nf_nat_unknown_protocol;
58         rcu_read_unlock();
59
60         return p;
61 }
62 EXPORT_SYMBOL_GPL(nf_nat_proto_find_get);
63
64 void
65 nf_nat_proto_put(const struct nf_nat_protocol *p)
66 {
67         module_put(p->me);
68 }
69 EXPORT_SYMBOL_GPL(nf_nat_proto_put);
70
71 /* We keep an extra hash for each conntrack, for fast searching. */
72 static inline unsigned int
73 hash_by_src(const struct net *net, u16 zone,
74             const struct nf_conntrack_tuple *tuple)
75 {
76         unsigned int hash;
77
78         /* Original src, to ensure we map it consistently if poss. */
79         hash = jhash_3words((__force u32)tuple->src.u3.ip,
80                             (__force u32)tuple->src.u.all ^ zone,
81                             tuple->dst.protonum, 0);
82         return ((u64)hash * net->ipv4.nat_htable_size) >> 32;
83 }
84
85 /* Is this tuple already taken? (not by us) */
86 int
87 nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple,
88                   const struct nf_conn *ignored_conntrack)
89 {
90         /* Conntrack tracking doesn't keep track of outgoing tuples; only
91            incoming ones.  NAT means they don't have a fixed mapping,
92            so we invert the tuple and look for the incoming reply.
93
94            We could keep a separate hash if this proves too slow. */
95         struct nf_conntrack_tuple reply;
96
97         nf_ct_invert_tuplepr(&reply, tuple);
98         return nf_conntrack_tuple_taken(&reply, ignored_conntrack);
99 }
100 EXPORT_SYMBOL(nf_nat_used_tuple);
101
102 /* If we source map this tuple so reply looks like reply_tuple, will
103  * that meet the constraints of range. */
104 static int
105 in_range(const struct nf_conntrack_tuple *tuple,
106          const struct nf_nat_range *range)
107 {
108         const struct nf_nat_protocol *proto;
109         int ret = 0;
110
111         /* If we are supposed to map IPs, then we must be in the
112            range specified, otherwise let this drag us onto a new src IP. */
113         if (range->flags & IP_NAT_RANGE_MAP_IPS) {
114                 if (ntohl(tuple->src.u3.ip) < ntohl(range->min_ip) ||
115                     ntohl(tuple->src.u3.ip) > ntohl(range->max_ip))
116                         return 0;
117         }
118
119         rcu_read_lock();
120         proto = __nf_nat_proto_find(tuple->dst.protonum);
121         if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED) ||
122             proto->in_range(tuple, IP_NAT_MANIP_SRC,
123                             &range->min, &range->max))
124                 ret = 1;
125         rcu_read_unlock();
126
127         return ret;
128 }
129
130 static inline int
131 same_src(const struct nf_conn *ct,
132          const struct nf_conntrack_tuple *tuple)
133 {
134         const struct nf_conntrack_tuple *t;
135
136         t = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
137         return (t->dst.protonum == tuple->dst.protonum &&
138                 t->src.u3.ip == tuple->src.u3.ip &&
139                 t->src.u.all == tuple->src.u.all);
140 }
141
142 /* Only called for SRC manip */
143 static int
144 find_appropriate_src(struct net *net, u16 zone,
145                      const struct nf_conntrack_tuple *tuple,
146                      struct nf_conntrack_tuple *result,
147                      const struct nf_nat_range *range)
148 {
149         unsigned int h = hash_by_src(net, zone, tuple);
150         const struct nf_conn_nat *nat;
151         const struct nf_conn *ct;
152         const struct hlist_node *n;
153
154         rcu_read_lock();
155         hlist_for_each_entry_rcu(nat, n, &net->ipv4.nat_bysource[h], bysource) {
156                 ct = nat->ct;
157                 if (same_src(ct, tuple) && nf_ct_zone(ct) == zone) {
158                         /* Copy source part from reply tuple. */
159                         nf_ct_invert_tuplepr(result,
160                                        &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
161                         result->dst = tuple->dst;
162
163                         if (in_range(result, range)) {
164                                 rcu_read_unlock();
165                                 return 1;
166                         }
167                 }
168         }
169         rcu_read_unlock();
170         return 0;
171 }
172
173 /* For [FUTURE] fragmentation handling, we want the least-used
174    src-ip/dst-ip/proto triple.  Fairness doesn't come into it.  Thus
175    if the range specifies 1.2.3.4 ports 10000-10005 and 1.2.3.5 ports
176    1-65535, we don't do pro-rata allocation based on ports; we choose
177    the ip with the lowest src-ip/dst-ip/proto usage.
178 */
179 static void
180 find_best_ips_proto(u16 zone, struct nf_conntrack_tuple *tuple,
181                     const struct nf_nat_range *range,
182                     const struct nf_conn *ct,
183                     enum nf_nat_manip_type maniptype)
184 {
185         __be32 *var_ipp;
186         /* Host order */
187         u_int32_t minip, maxip, j;
188
189         /* No IP mapping?  Do nothing. */
190         if (!(range->flags & IP_NAT_RANGE_MAP_IPS))
191                 return;
192
193         if (maniptype == IP_NAT_MANIP_SRC)
194                 var_ipp = &tuple->src.u3.ip;
195         else
196                 var_ipp = &tuple->dst.u3.ip;
197
198         /* Fast path: only one choice. */
199         if (range->min_ip == range->max_ip) {
200                 *var_ipp = range->min_ip;
201                 return;
202         }
203
204         /* Hashing source and destination IPs gives a fairly even
205          * spread in practice (if there are a small number of IPs
206          * involved, there usually aren't that many connections
207          * anyway).  The consistency means that servers see the same
208          * client coming from the same IP (some Internet Banking sites
209          * like this), even across reboots. */
210         minip = ntohl(range->min_ip);
211         maxip = ntohl(range->max_ip);
212         j = jhash_2words((__force u32)tuple->src.u3.ip,
213                          range->flags & IP_NAT_RANGE_PERSISTENT ?
214                                 0 : (__force u32)tuple->dst.u3.ip ^ zone, 0);
215         j = ((u64)j * (maxip - minip + 1)) >> 32;
216         *var_ipp = htonl(minip + j);
217 }
218
219 /* Manipulate the tuple into the range given.  For NF_INET_POST_ROUTING,
220  * we change the source to map into the range.  For NF_INET_PRE_ROUTING
221  * and NF_INET_LOCAL_OUT, we change the destination to map into the
222  * range.  It might not be possible to get a unique tuple, but we try.
223  * At worst (or if we race), we will end up with a final duplicate in
224  * __ip_conntrack_confirm and drop the packet. */
225 static void
226 get_unique_tuple(struct nf_conntrack_tuple *tuple,
227                  const struct nf_conntrack_tuple *orig_tuple,
228                  const struct nf_nat_range *range,
229                  struct nf_conn *ct,
230                  enum nf_nat_manip_type maniptype)
231 {
232         struct net *net = nf_ct_net(ct);
233         const struct nf_nat_protocol *proto;
234         u16 zone = nf_ct_zone(ct);
235
236         /* 1) If this srcip/proto/src-proto-part is currently mapped,
237            and that same mapping gives a unique tuple within the given
238            range, use that.
239
240            This is only required for source (ie. NAT/masq) mappings.
241            So far, we don't do local source mappings, so multiple
242            manips not an issue.  */
243         if (maniptype == IP_NAT_MANIP_SRC &&
244             !(range->flags & IP_NAT_RANGE_PROTO_RANDOM)) {
245                 if (find_appropriate_src(net, zone, orig_tuple, tuple, range)) {
246                         pr_debug("get_unique_tuple: Found current src map\n");
247                         if (!nf_nat_used_tuple(tuple, ct))
248                                 return;
249                 }
250         }
251
252         /* 2) Select the least-used IP/proto combination in the given
253            range. */
254         *tuple = *orig_tuple;
255         find_best_ips_proto(zone, tuple, range, ct, maniptype);
256
257         /* 3) The per-protocol part of the manip is made to map into
258            the range to make a unique tuple. */
259
260         rcu_read_lock();
261         proto = __nf_nat_proto_find(orig_tuple->dst.protonum);
262
263         /* Change protocol info to have some randomization */
264         if (range->flags & IP_NAT_RANGE_PROTO_RANDOM) {
265                 proto->unique_tuple(tuple, range, maniptype, ct);
266                 goto out;
267         }
268
269         /* Only bother mapping if it's not already in range and unique */
270         if ((!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED) ||
271              proto->in_range(tuple, maniptype, &range->min, &range->max)) &&
272             !nf_nat_used_tuple(tuple, ct))
273                 goto out;
274
275         /* Last change: get protocol to try to obtain unique tuple. */
276         proto->unique_tuple(tuple, range, maniptype, ct);
277 out:
278         rcu_read_unlock();
279 }
280
281 unsigned int
282 nf_nat_setup_info(struct nf_conn *ct,
283                   const struct nf_nat_range *range,
284                   enum nf_nat_manip_type maniptype)
285 {
286         struct net *net = nf_ct_net(ct);
287         struct nf_conntrack_tuple curr_tuple, new_tuple;
288         struct nf_conn_nat *nat;
289         int have_to_hash = !(ct->status & IPS_NAT_DONE_MASK);
290
291         /* nat helper or nfctnetlink also setup binding */
292         nat = nfct_nat(ct);
293         if (!nat) {
294                 nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC);
295                 if (nat == NULL) {
296                         pr_debug("failed to add NAT extension\n");
297                         return NF_ACCEPT;
298                 }
299         }
300
301         NF_CT_ASSERT(maniptype == IP_NAT_MANIP_SRC ||
302                      maniptype == IP_NAT_MANIP_DST);
303         BUG_ON(nf_nat_initialized(ct, maniptype));
304
305         /* What we've got will look like inverse of reply. Normally
306            this is what is in the conntrack, except for prior
307            manipulations (future optimization: if num_manips == 0,
308            orig_tp =
309            conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple) */
310         nf_ct_invert_tuplepr(&curr_tuple,
311                              &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
312
313         get_unique_tuple(&new_tuple, &curr_tuple, range, ct, maniptype);
314
315         if (!nf_ct_tuple_equal(&new_tuple, &curr_tuple)) {
316                 struct nf_conntrack_tuple reply;
317
318                 /* Alter conntrack table so will recognize replies. */
319                 nf_ct_invert_tuplepr(&reply, &new_tuple);
320                 nf_conntrack_alter_reply(ct, &reply);
321
322                 /* Non-atomic: we own this at the moment. */
323                 if (maniptype == IP_NAT_MANIP_SRC)
324                         ct->status |= IPS_SRC_NAT;
325                 else
326                         ct->status |= IPS_DST_NAT;
327         }
328
329         /* Place in source hash if this is the first time. */
330         if (have_to_hash) {
331                 unsigned int srchash;
332
333                 srchash = hash_by_src(net, nf_ct_zone(ct),
334                                       &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
335                 spin_lock_bh(&nf_nat_lock);
336                 /* nf_conntrack_alter_reply might re-allocate exntension aera */
337                 nat = nfct_nat(ct);
338                 nat->ct = ct;
339                 hlist_add_head_rcu(&nat->bysource,
340                                    &net->ipv4.nat_bysource[srchash]);
341                 spin_unlock_bh(&nf_nat_lock);
342         }
343
344         /* It's done. */
345         if (maniptype == IP_NAT_MANIP_DST)
346                 set_bit(IPS_DST_NAT_DONE_BIT, &ct->status);
347         else
348                 set_bit(IPS_SRC_NAT_DONE_BIT, &ct->status);
349
350         return NF_ACCEPT;
351 }
352 EXPORT_SYMBOL(nf_nat_setup_info);
353
354 /* Returns true if succeeded. */
355 static bool
356 manip_pkt(u_int16_t proto,
357           struct sk_buff *skb,
358           unsigned int iphdroff,
359           const struct nf_conntrack_tuple *target,
360           enum nf_nat_manip_type maniptype)
361 {
362         struct iphdr *iph;
363         const struct nf_nat_protocol *p;
364
365         if (!skb_make_writable(skb, iphdroff + sizeof(*iph)))
366                 return false;
367
368         iph = (void *)skb->data + iphdroff;
369
370         /* Manipulate protcol part. */
371
372         /* rcu_read_lock()ed by nf_hook_slow */
373         p = __nf_nat_proto_find(proto);
374         if (!p->manip_pkt(skb, iphdroff, target, maniptype))
375                 return false;
376
377         iph = (void *)skb->data + iphdroff;
378
379         if (maniptype == IP_NAT_MANIP_SRC) {
380                 csum_replace4(&iph->check, iph->saddr, target->src.u3.ip);
381                 iph->saddr = target->src.u3.ip;
382         } else {
383                 csum_replace4(&iph->check, iph->daddr, target->dst.u3.ip);
384                 iph->daddr = target->dst.u3.ip;
385         }
386         return true;
387 }
388
389 /* Do packet manipulations according to nf_nat_setup_info. */
390 unsigned int nf_nat_packet(struct nf_conn *ct,
391                            enum ip_conntrack_info ctinfo,
392                            unsigned int hooknum,
393                            struct sk_buff *skb)
394 {
395         enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
396         unsigned long statusbit;
397         enum nf_nat_manip_type mtype = HOOK2MANIP(hooknum);
398
399         if (mtype == IP_NAT_MANIP_SRC)
400                 statusbit = IPS_SRC_NAT;
401         else
402                 statusbit = IPS_DST_NAT;
403
404         /* Invert if this is reply dir. */
405         if (dir == IP_CT_DIR_REPLY)
406                 statusbit ^= IPS_NAT_MASK;
407
408         /* Non-atomic: these bits don't change. */
409         if (ct->status & statusbit) {
410                 struct nf_conntrack_tuple target;
411
412                 /* We are aiming to look like inverse of other direction. */
413                 nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple);
414
415                 if (!manip_pkt(target.dst.protonum, skb, 0, &target, mtype))
416                         return NF_DROP;
417         }
418         return NF_ACCEPT;
419 }
420 EXPORT_SYMBOL_GPL(nf_nat_packet);
421
422 /* Dir is direction ICMP is coming from (opposite to packet it contains) */
423 int nf_nat_icmp_reply_translation(struct nf_conn *ct,
424                                   enum ip_conntrack_info ctinfo,
425                                   unsigned int hooknum,
426                                   struct sk_buff *skb)
427 {
428         struct {
429                 struct icmphdr icmp;
430                 struct iphdr ip;
431         } *inside;
432         const struct nf_conntrack_l4proto *l4proto;
433         struct nf_conntrack_tuple inner, target;
434         int hdrlen = ip_hdrlen(skb);
435         enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
436         unsigned long statusbit;
437         enum nf_nat_manip_type manip = HOOK2MANIP(hooknum);
438
439         if (!skb_make_writable(skb, hdrlen + sizeof(*inside)))
440                 return 0;
441
442         inside = (void *)skb->data + ip_hdrlen(skb);
443
444         /* We're actually going to mangle it beyond trivial checksum
445            adjustment, so make sure the current checksum is correct. */
446         if (nf_ip_checksum(skb, hooknum, hdrlen, 0))
447                 return 0;
448
449         /* Must be RELATED */
450         NF_CT_ASSERT(skb->nfctinfo == IP_CT_RELATED ||
451                      skb->nfctinfo == IP_CT_RELATED+IP_CT_IS_REPLY);
452
453         /* Redirects on non-null nats must be dropped, else they'll
454            start talking to each other without our translation, and be
455            confused... --RR */
456         if (inside->icmp.type == ICMP_REDIRECT) {
457                 /* If NAT isn't finished, assume it and drop. */
458                 if ((ct->status & IPS_NAT_DONE_MASK) != IPS_NAT_DONE_MASK)
459                         return 0;
460
461                 if (ct->status & IPS_NAT_MASK)
462                         return 0;
463         }
464
465         pr_debug("icmp_reply_translation: translating error %p manip %u "
466                  "dir %s\n", skb, manip,
467                  dir == IP_CT_DIR_ORIGINAL ? "ORIG" : "REPLY");
468
469         /* rcu_read_lock()ed by nf_hook_slow */
470         l4proto = __nf_ct_l4proto_find(PF_INET, inside->ip.protocol);
471
472         if (!nf_ct_get_tuple(skb,
473                              ip_hdrlen(skb) + sizeof(struct icmphdr),
474                              (ip_hdrlen(skb) +
475                               sizeof(struct icmphdr) + inside->ip.ihl * 4),
476                              (u_int16_t)AF_INET,
477                              inside->ip.protocol,
478                              &inner, l3proto, l4proto))
479                 return 0;
480
481         /* Change inner back to look like incoming packet.  We do the
482            opposite manip on this hook to normal, because it might not
483            pass all hooks (locally-generated ICMP).  Consider incoming
484            packet: PREROUTING (DST manip), routing produces ICMP, goes
485            through POSTROUTING (which must correct the DST manip). */
486         if (!manip_pkt(inside->ip.protocol, skb,
487                        ip_hdrlen(skb) + sizeof(inside->icmp),
488                        &ct->tuplehash[!dir].tuple,
489                        !manip))
490                 return 0;
491
492         if (skb->ip_summed != CHECKSUM_PARTIAL) {
493                 /* Reloading "inside" here since manip_pkt inner. */
494                 inside = (void *)skb->data + ip_hdrlen(skb);
495                 inside->icmp.checksum = 0;
496                 inside->icmp.checksum =
497                         csum_fold(skb_checksum(skb, hdrlen,
498                                                skb->len - hdrlen, 0));
499         }
500
501         /* Change outer to look the reply to an incoming packet
502          * (proto 0 means don't invert per-proto part). */
503         if (manip == IP_NAT_MANIP_SRC)
504                 statusbit = IPS_SRC_NAT;
505         else
506                 statusbit = IPS_DST_NAT;
507
508         /* Invert if this is reply dir. */
509         if (dir == IP_CT_DIR_REPLY)
510                 statusbit ^= IPS_NAT_MASK;
511
512         if (ct->status & statusbit) {
513                 nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple);
514                 if (!manip_pkt(0, skb, 0, &target, manip))
515                         return 0;
516         }
517
518         return 1;
519 }
520 EXPORT_SYMBOL_GPL(nf_nat_icmp_reply_translation);
521
522 /* Protocol registration. */
523 int nf_nat_protocol_register(const struct nf_nat_protocol *proto)
524 {
525         int ret = 0;
526
527         spin_lock_bh(&nf_nat_lock);
528         if (nf_nat_protos[proto->protonum] != &nf_nat_unknown_protocol) {
529                 ret = -EBUSY;
530                 goto out;
531         }
532         rcu_assign_pointer(nf_nat_protos[proto->protonum], proto);
533  out:
534         spin_unlock_bh(&nf_nat_lock);
535         return ret;
536 }
537 EXPORT_SYMBOL(nf_nat_protocol_register);
538
539 /* Noone stores the protocol anywhere; simply delete it. */
540 void nf_nat_protocol_unregister(const struct nf_nat_protocol *proto)
541 {
542         spin_lock_bh(&nf_nat_lock);
543         rcu_assign_pointer(nf_nat_protos[proto->protonum],
544                            &nf_nat_unknown_protocol);
545         spin_unlock_bh(&nf_nat_lock);
546         synchronize_rcu();
547 }
548 EXPORT_SYMBOL(nf_nat_protocol_unregister);
549
550 /* Noone using conntrack by the time this called. */
551 static void nf_nat_cleanup_conntrack(struct nf_conn *ct)
552 {
553         struct nf_conn_nat *nat = nf_ct_ext_find(ct, NF_CT_EXT_NAT);
554
555         if (nat == NULL || nat->ct == NULL)
556                 return;
557
558         NF_CT_ASSERT(nat->ct->status & IPS_NAT_DONE_MASK);
559
560         spin_lock_bh(&nf_nat_lock);
561         hlist_del_rcu(&nat->bysource);
562         spin_unlock_bh(&nf_nat_lock);
563 }
564
565 static void nf_nat_move_storage(void *new, void *old)
566 {
567         struct nf_conn_nat *new_nat = new;
568         struct nf_conn_nat *old_nat = old;
569         struct nf_conn *ct = old_nat->ct;
570
571         if (!ct || !(ct->status & IPS_NAT_DONE_MASK))
572                 return;
573
574         spin_lock_bh(&nf_nat_lock);
575         new_nat->ct = ct;
576         hlist_replace_rcu(&old_nat->bysource, &new_nat->bysource);
577         spin_unlock_bh(&nf_nat_lock);
578 }
579
580 static struct nf_ct_ext_type nat_extend __read_mostly = {
581         .len            = sizeof(struct nf_conn_nat),
582         .align          = __alignof__(struct nf_conn_nat),
583         .destroy        = nf_nat_cleanup_conntrack,
584         .move           = nf_nat_move_storage,
585         .id             = NF_CT_EXT_NAT,
586         .flags          = NF_CT_EXT_F_PREALLOC,
587 };
588
589 #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
590
591 #include <linux/netfilter/nfnetlink.h>
592 #include <linux/netfilter/nfnetlink_conntrack.h>
593
594 static const struct nla_policy protonat_nla_policy[CTA_PROTONAT_MAX+1] = {
595         [CTA_PROTONAT_PORT_MIN] = { .type = NLA_U16 },
596         [CTA_PROTONAT_PORT_MAX] = { .type = NLA_U16 },
597 };
598
599 static int nfnetlink_parse_nat_proto(struct nlattr *attr,
600                                      const struct nf_conn *ct,
601                                      struct nf_nat_range *range)
602 {
603         struct nlattr *tb[CTA_PROTONAT_MAX+1];
604         const struct nf_nat_protocol *npt;
605         int err;
606
607         err = nla_parse_nested(tb, CTA_PROTONAT_MAX, attr, protonat_nla_policy);
608         if (err < 0)
609                 return err;
610
611         npt = nf_nat_proto_find_get(nf_ct_protonum(ct));
612         if (npt->nlattr_to_range)
613                 err = npt->nlattr_to_range(tb, range);
614         nf_nat_proto_put(npt);
615         return err;
616 }
617
618 static const struct nla_policy nat_nla_policy[CTA_NAT_MAX+1] = {
619         [CTA_NAT_MINIP]         = { .type = NLA_U32 },
620         [CTA_NAT_MAXIP]         = { .type = NLA_U32 },
621 };
622
623 static int
624 nfnetlink_parse_nat(const struct nlattr *nat,
625                     const struct nf_conn *ct, struct nf_nat_range *range)
626 {
627         struct nlattr *tb[CTA_NAT_MAX+1];
628         int err;
629
630         memset(range, 0, sizeof(*range));
631
632         err = nla_parse_nested(tb, CTA_NAT_MAX, nat, nat_nla_policy);
633         if (err < 0)
634                 return err;
635
636         if (tb[CTA_NAT_MINIP])
637                 range->min_ip = nla_get_be32(tb[CTA_NAT_MINIP]);
638
639         if (!tb[CTA_NAT_MAXIP])
640                 range->max_ip = range->min_ip;
641         else
642                 range->max_ip = nla_get_be32(tb[CTA_NAT_MAXIP]);
643
644         if (range->min_ip)
645                 range->flags |= IP_NAT_RANGE_MAP_IPS;
646
647         if (!tb[CTA_NAT_PROTO])
648                 return 0;
649
650         err = nfnetlink_parse_nat_proto(tb[CTA_NAT_PROTO], ct, range);
651         if (err < 0)
652                 return err;
653
654         return 0;
655 }
656
657 static int
658 nfnetlink_parse_nat_setup(struct nf_conn *ct,
659                           enum nf_nat_manip_type manip,
660                           const struct nlattr *attr)
661 {
662         struct nf_nat_range range;
663
664         if (nfnetlink_parse_nat(attr, ct, &range) < 0)
665                 return -EINVAL;
666         if (nf_nat_initialized(ct, manip))
667                 return -EEXIST;
668
669         return nf_nat_setup_info(ct, &range, manip);
670 }
671 #else
672 static int
673 nfnetlink_parse_nat_setup(struct nf_conn *ct,
674                           enum nf_nat_manip_type manip,
675                           const struct nlattr *attr)
676 {
677         return -EOPNOTSUPP;
678 }
679 #endif
680
681 static int __net_init nf_nat_net_init(struct net *net)
682 {
683         /* Leave them the same for the moment. */
684         net->ipv4.nat_htable_size = net->ct.htable_size;
685         net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&net->ipv4.nat_htable_size,
686                                                        &net->ipv4.nat_vmalloced, 0);
687         if (!net->ipv4.nat_bysource)
688                 return -ENOMEM;
689         return 0;
690 }
691
692 /* Clear NAT section of all conntracks, in case we're loaded again. */
693 static int clean_nat(struct nf_conn *i, void *data)
694 {
695         struct nf_conn_nat *nat = nfct_nat(i);
696
697         if (!nat)
698                 return 0;
699         memset(nat, 0, sizeof(*nat));
700         i->status &= ~(IPS_NAT_MASK | IPS_NAT_DONE_MASK | IPS_SEQ_ADJUST);
701         return 0;
702 }
703
704 static void __net_exit nf_nat_net_exit(struct net *net)
705 {
706         nf_ct_iterate_cleanup(net, &clean_nat, NULL);
707         synchronize_rcu();
708         nf_ct_free_hashtable(net->ipv4.nat_bysource, net->ipv4.nat_vmalloced,
709                              net->ipv4.nat_htable_size);
710 }
711
712 static struct pernet_operations nf_nat_net_ops = {
713         .init = nf_nat_net_init,
714         .exit = nf_nat_net_exit,
715 };
716
717 static int __init nf_nat_init(void)
718 {
719         size_t i;
720         int ret;
721
722         need_ipv4_conntrack();
723
724         ret = nf_ct_extend_register(&nat_extend);
725         if (ret < 0) {
726                 printk(KERN_ERR "nf_nat_core: Unable to register extension\n");
727                 return ret;
728         }
729
730         ret = register_pernet_subsys(&nf_nat_net_ops);
731         if (ret < 0)
732                 goto cleanup_extend;
733
734         /* Sew in builtin protocols. */
735         spin_lock_bh(&nf_nat_lock);
736         for (i = 0; i < MAX_IP_NAT_PROTO; i++)
737                 rcu_assign_pointer(nf_nat_protos[i], &nf_nat_unknown_protocol);
738         rcu_assign_pointer(nf_nat_protos[IPPROTO_TCP], &nf_nat_protocol_tcp);
739         rcu_assign_pointer(nf_nat_protos[IPPROTO_UDP], &nf_nat_protocol_udp);
740         rcu_assign_pointer(nf_nat_protos[IPPROTO_ICMP], &nf_nat_protocol_icmp);
741         spin_unlock_bh(&nf_nat_lock);
742
743         /* Initialize fake conntrack so that NAT will skip it */
744         nf_conntrack_untracked.status |= IPS_NAT_DONE_MASK;
745
746         l3proto = nf_ct_l3proto_find_get((u_int16_t)AF_INET);
747
748         BUG_ON(nf_nat_seq_adjust_hook != NULL);
749         rcu_assign_pointer(nf_nat_seq_adjust_hook, nf_nat_seq_adjust);
750         BUG_ON(nfnetlink_parse_nat_setup_hook != NULL);
751         rcu_assign_pointer(nfnetlink_parse_nat_setup_hook,
752                            nfnetlink_parse_nat_setup);
753         BUG_ON(nf_ct_nat_offset != NULL);
754         rcu_assign_pointer(nf_ct_nat_offset, nf_nat_get_offset);
755         return 0;
756
757  cleanup_extend:
758         nf_ct_extend_unregister(&nat_extend);
759         return ret;
760 }
761
762 static void __exit nf_nat_cleanup(void)
763 {
764         unregister_pernet_subsys(&nf_nat_net_ops);
765         nf_ct_l3proto_put(l3proto);
766         nf_ct_extend_unregister(&nat_extend);
767         rcu_assign_pointer(nf_nat_seq_adjust_hook, NULL);
768         rcu_assign_pointer(nfnetlink_parse_nat_setup_hook, NULL);
769         rcu_assign_pointer(nf_ct_nat_offset, NULL);
770         synchronize_net();
771 }
772
773 MODULE_LICENSE("GPL");
774 MODULE_ALIAS("nf-nat-ipv4");
775
776 module_init(nf_nat_init);
777 module_exit(nf_nat_cleanup);