]> bbs.cooldavid.org Git - net-next-2.6.git/blame - net/ipv4/netfilter/nf_nat_core.c
netfilter: nf_conntrack: add support for "conntrack zones"
[net-next-2.6.git] / net / ipv4 / netfilter / nf_nat_core.c
CommitLineData
5b1158e9
JK
1/* NAT for netfilter; shared with compatibility layer. */
2
3/* (C) 1999-2001 Paul `Rusty' Russell
4 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/module.h>
12#include <linux/types.h>
13#include <linux/timer.h>
14#include <linux/skbuff.h>
5b1158e9
JK
15#include <net/checksum.h>
16#include <net/icmp.h>
17#include <net/ip.h>
18#include <net/tcp.h> /* For tcp_prot in getorigdst */
19#include <linux/icmp.h>
20#include <linux/udp.h>
21#include <linux/jhash.h>
22
23#include <linux/netfilter_ipv4.h>
24#include <net/netfilter/nf_conntrack.h>
25#include <net/netfilter/nf_conntrack_core.h>
26#include <net/netfilter/nf_nat.h>
27#include <net/netfilter/nf_nat_protocol.h>
28#include <net/netfilter/nf_nat_core.h>
29#include <net/netfilter/nf_nat_helper.h>
30#include <net/netfilter/nf_conntrack_helper.h>
31#include <net/netfilter/nf_conntrack_l3proto.h>
32#include <net/netfilter/nf_conntrack_l4proto.h>
5d0aa2cc 33#include <net/netfilter/nf_conntrack_zones.h>
5b1158e9 34
02502f62 35static DEFINE_SPINLOCK(nf_nat_lock);
5b1158e9 36
ce4b1ceb 37static struct nf_conntrack_l3proto *l3proto __read_mostly;
5b1158e9 38
5b1158e9 39#define MAX_IP_NAT_PROTO 256
ce4b1ceb
PM
40static const struct nf_nat_protocol *nf_nat_protos[MAX_IP_NAT_PROTO]
41 __read_mostly;
5b1158e9 42
2b628a08 43static inline const struct nf_nat_protocol *
5b1158e9
JK
44__nf_nat_proto_find(u_int8_t protonum)
45{
e22a0548 46 return rcu_dereference(nf_nat_protos[protonum]);
5b1158e9
JK
47}
48
2b628a08 49const struct nf_nat_protocol *
5b1158e9
JK
50nf_nat_proto_find_get(u_int8_t protonum)
51{
2b628a08 52 const struct nf_nat_protocol *p;
5b1158e9 53
e22a0548 54 rcu_read_lock();
5b1158e9
JK
55 p = __nf_nat_proto_find(protonum);
56 if (!try_module_get(p->me))
57 p = &nf_nat_unknown_protocol;
e22a0548 58 rcu_read_unlock();
5b1158e9
JK
59
60 return p;
61}
62EXPORT_SYMBOL_GPL(nf_nat_proto_find_get);
63
64void
2b628a08 65nf_nat_proto_put(const struct nf_nat_protocol *p)
5b1158e9
JK
66{
67 module_put(p->me);
68}
69EXPORT_SYMBOL_GPL(nf_nat_proto_put);
70
71/* We keep an extra hash for each conntrack, for fast searching. */
72static inline unsigned int
5d0aa2cc
PM
73hash_by_src(const struct net *net, u16 zone,
74 const struct nf_conntrack_tuple *tuple)
5b1158e9 75{
34498825
PM
76 unsigned int hash;
77
5b1158e9 78 /* Original src, to ensure we map it consistently if poss. */
34498825 79 hash = jhash_3words((__force u32)tuple->src.u3.ip,
5d0aa2cc 80 (__force u32)tuple->src.u.all ^ zone,
34498825 81 tuple->dst.protonum, 0);
d696c7bd 82 return ((u64)hash * net->ipv4.nat_htable_size) >> 32;
5b1158e9
JK
83}
84
5b1158e9
JK
85/* Is this tuple already taken? (not by us) */
86int
87nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple,
88 const struct nf_conn *ignored_conntrack)
89{
90 /* Conntrack tracking doesn't keep track of outgoing tuples; only
91 incoming ones. NAT means they don't have a fixed mapping,
92 so we invert the tuple and look for the incoming reply.
93
94 We could keep a separate hash if this proves too slow. */
95 struct nf_conntrack_tuple reply;
96
97 nf_ct_invert_tuplepr(&reply, tuple);
98 return nf_conntrack_tuple_taken(&reply, ignored_conntrack);
99}
100EXPORT_SYMBOL(nf_nat_used_tuple);
101
102/* If we source map this tuple so reply looks like reply_tuple, will
103 * that meet the constraints of range. */
104static int
105in_range(const struct nf_conntrack_tuple *tuple,
106 const struct nf_nat_range *range)
107{
2b628a08 108 const struct nf_nat_protocol *proto;
e22a0548 109 int ret = 0;
5b1158e9 110
5b1158e9
JK
111 /* If we are supposed to map IPs, then we must be in the
112 range specified, otherwise let this drag us onto a new src IP. */
113 if (range->flags & IP_NAT_RANGE_MAP_IPS) {
114 if (ntohl(tuple->src.u3.ip) < ntohl(range->min_ip) ||
115 ntohl(tuple->src.u3.ip) > ntohl(range->max_ip))
116 return 0;
117 }
118
e22a0548
PM
119 rcu_read_lock();
120 proto = __nf_nat_proto_find(tuple->dst.protonum);
5b1158e9
JK
121 if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED) ||
122 proto->in_range(tuple, IP_NAT_MANIP_SRC,
123 &range->min, &range->max))
e22a0548
PM
124 ret = 1;
125 rcu_read_unlock();
5b1158e9 126
e22a0548 127 return ret;
5b1158e9
JK
128}
129
130static inline int
131same_src(const struct nf_conn *ct,
132 const struct nf_conntrack_tuple *tuple)
133{
134 const struct nf_conntrack_tuple *t;
135
136 t = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
137 return (t->dst.protonum == tuple->dst.protonum &&
138 t->src.u3.ip == tuple->src.u3.ip &&
139 t->src.u.all == tuple->src.u.all);
140}
141
142/* Only called for SRC manip */
143static int
5d0aa2cc 144find_appropriate_src(struct net *net, u16 zone,
0c4c9288 145 const struct nf_conntrack_tuple *tuple,
5b1158e9
JK
146 struct nf_conntrack_tuple *result,
147 const struct nf_nat_range *range)
148{
5d0aa2cc 149 unsigned int h = hash_by_src(net, zone, tuple);
72b72949
JE
150 const struct nf_conn_nat *nat;
151 const struct nf_conn *ct;
152 const struct hlist_node *n;
5b1158e9 153
4d354c57 154 rcu_read_lock();
0c4c9288 155 hlist_for_each_entry_rcu(nat, n, &net->ipv4.nat_bysource[h], bysource) {
b6b84d4a 156 ct = nat->ct;
5d0aa2cc 157 if (same_src(ct, tuple) && nf_ct_zone(ct) == zone) {
5b1158e9
JK
158 /* Copy source part from reply tuple. */
159 nf_ct_invert_tuplepr(result,
160 &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
161 result->dst = tuple->dst;
162
163 if (in_range(result, range)) {
4d354c57 164 rcu_read_unlock();
5b1158e9
JK
165 return 1;
166 }
167 }
168 }
4d354c57 169 rcu_read_unlock();
5b1158e9
JK
170 return 0;
171}
172
173/* For [FUTURE] fragmentation handling, we want the least-used
174 src-ip/dst-ip/proto triple. Fairness doesn't come into it. Thus
175 if the range specifies 1.2.3.4 ports 10000-10005 and 1.2.3.5 ports
176 1-65535, we don't do pro-rata allocation based on ports; we choose
177 the ip with the lowest src-ip/dst-ip/proto usage.
178*/
179static void
5d0aa2cc 180find_best_ips_proto(u16 zone, struct nf_conntrack_tuple *tuple,
5b1158e9
JK
181 const struct nf_nat_range *range,
182 const struct nf_conn *ct,
183 enum nf_nat_manip_type maniptype)
184{
185 __be32 *var_ipp;
186 /* Host order */
187 u_int32_t minip, maxip, j;
188
189 /* No IP mapping? Do nothing. */
190 if (!(range->flags & IP_NAT_RANGE_MAP_IPS))
191 return;
192
193 if (maniptype == IP_NAT_MANIP_SRC)
194 var_ipp = &tuple->src.u3.ip;
195 else
196 var_ipp = &tuple->dst.u3.ip;
197
198 /* Fast path: only one choice. */
199 if (range->min_ip == range->max_ip) {
200 *var_ipp = range->min_ip;
201 return;
202 }
203
204 /* Hashing source and destination IPs gives a fairly even
205 * spread in practice (if there are a small number of IPs
206 * involved, there usually aren't that many connections
207 * anyway). The consistency means that servers see the same
208 * client coming from the same IP (some Internet Banking sites
209 * like this), even across reboots. */
210 minip = ntohl(range->min_ip);
211 maxip = ntohl(range->max_ip);
212 j = jhash_2words((__force u32)tuple->src.u3.ip,
98d500d6 213 range->flags & IP_NAT_RANGE_PERSISTENT ?
5d0aa2cc 214 0 : (__force u32)tuple->dst.u3.ip ^ zone, 0);
34498825
PM
215 j = ((u64)j * (maxip - minip + 1)) >> 32;
216 *var_ipp = htonl(minip + j);
5b1158e9
JK
217}
218
6e23ae2a
PM
219/* Manipulate the tuple into the range given. For NF_INET_POST_ROUTING,
220 * we change the source to map into the range. For NF_INET_PRE_ROUTING
221 * and NF_INET_LOCAL_OUT, we change the destination to map into the
5b1158e9
JK
222 * range. It might not be possible to get a unique tuple, but we try.
223 * At worst (or if we race), we will end up with a final duplicate in
224 * __ip_conntrack_confirm and drop the packet. */
225static void
226get_unique_tuple(struct nf_conntrack_tuple *tuple,
227 const struct nf_conntrack_tuple *orig_tuple,
228 const struct nf_nat_range *range,
229 struct nf_conn *ct,
230 enum nf_nat_manip_type maniptype)
231{
0c4c9288 232 struct net *net = nf_ct_net(ct);
2b628a08 233 const struct nf_nat_protocol *proto;
5d0aa2cc 234 u16 zone = nf_ct_zone(ct);
5b1158e9
JK
235
236 /* 1) If this srcip/proto/src-proto-part is currently mapped,
237 and that same mapping gives a unique tuple within the given
238 range, use that.
239
240 This is only required for source (ie. NAT/masq) mappings.
241 So far, we don't do local source mappings, so multiple
242 manips not an issue. */
0dbff689
CG
243 if (maniptype == IP_NAT_MANIP_SRC &&
244 !(range->flags & IP_NAT_RANGE_PROTO_RANDOM)) {
5d0aa2cc 245 if (find_appropriate_src(net, zone, orig_tuple, tuple, range)) {
0d53778e 246 pr_debug("get_unique_tuple: Found current src map\n");
0dbff689
CG
247 if (!nf_nat_used_tuple(tuple, ct))
248 return;
5b1158e9
JK
249 }
250 }
251
252 /* 2) Select the least-used IP/proto combination in the given
253 range. */
254 *tuple = *orig_tuple;
5d0aa2cc 255 find_best_ips_proto(zone, tuple, range, ct, maniptype);
5b1158e9
JK
256
257 /* 3) The per-protocol part of the manip is made to map into
258 the range to make a unique tuple. */
259
e22a0548
PM
260 rcu_read_lock();
261 proto = __nf_nat_proto_find(orig_tuple->dst.protonum);
5b1158e9 262
41f4689a
EL
263 /* Change protocol info to have some randomization */
264 if (range->flags & IP_NAT_RANGE_PROTO_RANDOM) {
265 proto->unique_tuple(tuple, range, maniptype, ct);
e22a0548 266 goto out;
41f4689a
EL
267 }
268
5b1158e9
JK
269 /* Only bother mapping if it's not already in range and unique */
270 if ((!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED) ||
271 proto->in_range(tuple, maniptype, &range->min, &range->max)) &&
e22a0548
PM
272 !nf_nat_used_tuple(tuple, ct))
273 goto out;
5b1158e9
JK
274
275 /* Last change: get protocol to try to obtain unique tuple. */
276 proto->unique_tuple(tuple, range, maniptype, ct);
e22a0548
PM
277out:
278 rcu_read_unlock();
5b1158e9
JK
279}
280
281unsigned int
282nf_nat_setup_info(struct nf_conn *ct,
283 const struct nf_nat_range *range,
cc01dcbd 284 enum nf_nat_manip_type maniptype)
5b1158e9 285{
0c4c9288 286 struct net *net = nf_ct_net(ct);
5b1158e9 287 struct nf_conntrack_tuple curr_tuple, new_tuple;
2d59e5ca 288 struct nf_conn_nat *nat;
5b1158e9 289 int have_to_hash = !(ct->status & IPS_NAT_DONE_MASK);
5b1158e9 290
2d59e5ca
YK
291 /* nat helper or nfctnetlink also setup binding */
292 nat = nfct_nat(ct);
293 if (!nat) {
294 nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC);
295 if (nat == NULL) {
0d53778e 296 pr_debug("failed to add NAT extension\n");
2d59e5ca
YK
297 return NF_ACCEPT;
298 }
299 }
300
cc01dcbd
PM
301 NF_CT_ASSERT(maniptype == IP_NAT_MANIP_SRC ||
302 maniptype == IP_NAT_MANIP_DST);
5b1158e9
JK
303 BUG_ON(nf_nat_initialized(ct, maniptype));
304
305 /* What we've got will look like inverse of reply. Normally
306 this is what is in the conntrack, except for prior
307 manipulations (future optimization: if num_manips == 0,
308 orig_tp =
309 conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple) */
310 nf_ct_invert_tuplepr(&curr_tuple,
311 &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
312
313 get_unique_tuple(&new_tuple, &curr_tuple, range, ct, maniptype);
314
315 if (!nf_ct_tuple_equal(&new_tuple, &curr_tuple)) {
316 struct nf_conntrack_tuple reply;
317
318 /* Alter conntrack table so will recognize replies. */
319 nf_ct_invert_tuplepr(&reply, &new_tuple);
320 nf_conntrack_alter_reply(ct, &reply);
321
322 /* Non-atomic: we own this at the moment. */
323 if (maniptype == IP_NAT_MANIP_SRC)
324 ct->status |= IPS_SRC_NAT;
325 else
326 ct->status |= IPS_DST_NAT;
327 }
328
329 /* Place in source hash if this is the first time. */
330 if (have_to_hash) {
331 unsigned int srchash;
332
5d0aa2cc
PM
333 srchash = hash_by_src(net, nf_ct_zone(ct),
334 &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
02502f62 335 spin_lock_bh(&nf_nat_lock);
2d59e5ca 336 /* nf_conntrack_alter_reply might re-allocate exntension aera */
b6b84d4a
YK
337 nat = nfct_nat(ct);
338 nat->ct = ct;
0c4c9288
AD
339 hlist_add_head_rcu(&nat->bysource,
340 &net->ipv4.nat_bysource[srchash]);
02502f62 341 spin_unlock_bh(&nf_nat_lock);
5b1158e9
JK
342 }
343
344 /* It's done. */
345 if (maniptype == IP_NAT_MANIP_DST)
346 set_bit(IPS_DST_NAT_DONE_BIT, &ct->status);
347 else
348 set_bit(IPS_SRC_NAT_DONE_BIT, &ct->status);
349
350 return NF_ACCEPT;
351}
352EXPORT_SYMBOL(nf_nat_setup_info);
353
354/* Returns true if succeeded. */
f2ea825f 355static bool
5b1158e9 356manip_pkt(u_int16_t proto,
3db05fea 357 struct sk_buff *skb,
5b1158e9
JK
358 unsigned int iphdroff,
359 const struct nf_conntrack_tuple *target,
360 enum nf_nat_manip_type maniptype)
361{
362 struct iphdr *iph;
2b628a08 363 const struct nf_nat_protocol *p;
5b1158e9 364
3db05fea 365 if (!skb_make_writable(skb, iphdroff + sizeof(*iph)))
f2ea825f 366 return false;
5b1158e9 367
3db05fea 368 iph = (void *)skb->data + iphdroff;
5b1158e9
JK
369
370 /* Manipulate protcol part. */
e22a0548
PM
371
372 /* rcu_read_lock()ed by nf_hook_slow */
373 p = __nf_nat_proto_find(proto);
3db05fea 374 if (!p->manip_pkt(skb, iphdroff, target, maniptype))
f2ea825f 375 return false;
5b1158e9 376
3db05fea 377 iph = (void *)skb->data + iphdroff;
5b1158e9
JK
378
379 if (maniptype == IP_NAT_MANIP_SRC) {
be0ea7d5 380 csum_replace4(&iph->check, iph->saddr, target->src.u3.ip);
5b1158e9
JK
381 iph->saddr = target->src.u3.ip;
382 } else {
be0ea7d5 383 csum_replace4(&iph->check, iph->daddr, target->dst.u3.ip);
5b1158e9
JK
384 iph->daddr = target->dst.u3.ip;
385 }
f2ea825f 386 return true;
5b1158e9
JK
387}
388
389/* Do packet manipulations according to nf_nat_setup_info. */
390unsigned int nf_nat_packet(struct nf_conn *ct,
391 enum ip_conntrack_info ctinfo,
392 unsigned int hooknum,
3db05fea 393 struct sk_buff *skb)
5b1158e9
JK
394{
395 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
396 unsigned long statusbit;
397 enum nf_nat_manip_type mtype = HOOK2MANIP(hooknum);
398
399 if (mtype == IP_NAT_MANIP_SRC)
400 statusbit = IPS_SRC_NAT;
401 else
402 statusbit = IPS_DST_NAT;
403
404 /* Invert if this is reply dir. */
405 if (dir == IP_CT_DIR_REPLY)
406 statusbit ^= IPS_NAT_MASK;
407
408 /* Non-atomic: these bits don't change. */
409 if (ct->status & statusbit) {
410 struct nf_conntrack_tuple target;
411
412 /* We are aiming to look like inverse of other direction. */
413 nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple);
414
3db05fea 415 if (!manip_pkt(target.dst.protonum, skb, 0, &target, mtype))
5b1158e9
JK
416 return NF_DROP;
417 }
418 return NF_ACCEPT;
419}
420EXPORT_SYMBOL_GPL(nf_nat_packet);
421
422/* Dir is direction ICMP is coming from (opposite to packet it contains) */
423int nf_nat_icmp_reply_translation(struct nf_conn *ct,
424 enum ip_conntrack_info ctinfo,
425 unsigned int hooknum,
3db05fea 426 struct sk_buff *skb)
5b1158e9
JK
427{
428 struct {
429 struct icmphdr icmp;
430 struct iphdr ip;
431 } *inside;
72b72949 432 const struct nf_conntrack_l4proto *l4proto;
5b1158e9 433 struct nf_conntrack_tuple inner, target;
3db05fea 434 int hdrlen = ip_hdrlen(skb);
5b1158e9
JK
435 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
436 unsigned long statusbit;
437 enum nf_nat_manip_type manip = HOOK2MANIP(hooknum);
438
3db05fea 439 if (!skb_make_writable(skb, hdrlen + sizeof(*inside)))
5b1158e9
JK
440 return 0;
441
3db05fea 442 inside = (void *)skb->data + ip_hdrlen(skb);
5b1158e9
JK
443
444 /* We're actually going to mangle it beyond trivial checksum
445 adjustment, so make sure the current checksum is correct. */
3db05fea 446 if (nf_ip_checksum(skb, hooknum, hdrlen, 0))
5b1158e9
JK
447 return 0;
448
449 /* Must be RELATED */
3db05fea
HX
450 NF_CT_ASSERT(skb->nfctinfo == IP_CT_RELATED ||
451 skb->nfctinfo == IP_CT_RELATED+IP_CT_IS_REPLY);
5b1158e9
JK
452
453 /* Redirects on non-null nats must be dropped, else they'll
e905a9ed
YH
454 start talking to each other without our translation, and be
455 confused... --RR */
5b1158e9
JK
456 if (inside->icmp.type == ICMP_REDIRECT) {
457 /* If NAT isn't finished, assume it and drop. */
458 if ((ct->status & IPS_NAT_DONE_MASK) != IPS_NAT_DONE_MASK)
459 return 0;
460
461 if (ct->status & IPS_NAT_MASK)
462 return 0;
463 }
464
0d53778e 465 pr_debug("icmp_reply_translation: translating error %p manip %u "
3db05fea 466 "dir %s\n", skb, manip,
0d53778e 467 dir == IP_CT_DIR_ORIGINAL ? "ORIG" : "REPLY");
5b1158e9 468
923f4902
PM
469 /* rcu_read_lock()ed by nf_hook_slow */
470 l4proto = __nf_ct_l4proto_find(PF_INET, inside->ip.protocol);
471
3db05fea
HX
472 if (!nf_ct_get_tuple(skb,
473 ip_hdrlen(skb) + sizeof(struct icmphdr),
474 (ip_hdrlen(skb) +
c9bdd4b5 475 sizeof(struct icmphdr) + inside->ip.ihl * 4),
e905a9ed
YH
476 (u_int16_t)AF_INET,
477 inside->ip.protocol,
923f4902 478 &inner, l3proto, l4proto))
5b1158e9
JK
479 return 0;
480
481 /* Change inner back to look like incoming packet. We do the
482 opposite manip on this hook to normal, because it might not
483 pass all hooks (locally-generated ICMP). Consider incoming
484 packet: PREROUTING (DST manip), routing produces ICMP, goes
485 through POSTROUTING (which must correct the DST manip). */
3db05fea
HX
486 if (!manip_pkt(inside->ip.protocol, skb,
487 ip_hdrlen(skb) + sizeof(inside->icmp),
5b1158e9
JK
488 &ct->tuplehash[!dir].tuple,
489 !manip))
490 return 0;
491
3db05fea 492 if (skb->ip_summed != CHECKSUM_PARTIAL) {
5b1158e9 493 /* Reloading "inside" here since manip_pkt inner. */
3db05fea 494 inside = (void *)skb->data + ip_hdrlen(skb);
5b1158e9
JK
495 inside->icmp.checksum = 0;
496 inside->icmp.checksum =
3db05fea
HX
497 csum_fold(skb_checksum(skb, hdrlen,
498 skb->len - hdrlen, 0));
5b1158e9
JK
499 }
500
501 /* Change outer to look the reply to an incoming packet
502 * (proto 0 means don't invert per-proto part). */
503 if (manip == IP_NAT_MANIP_SRC)
504 statusbit = IPS_SRC_NAT;
505 else
506 statusbit = IPS_DST_NAT;
507
508 /* Invert if this is reply dir. */
509 if (dir == IP_CT_DIR_REPLY)
510 statusbit ^= IPS_NAT_MASK;
511
512 if (ct->status & statusbit) {
513 nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple);
3db05fea 514 if (!manip_pkt(0, skb, 0, &target, manip))
5b1158e9
JK
515 return 0;
516 }
517
518 return 1;
519}
520EXPORT_SYMBOL_GPL(nf_nat_icmp_reply_translation);
521
522/* Protocol registration. */
2b628a08 523int nf_nat_protocol_register(const struct nf_nat_protocol *proto)
5b1158e9
JK
524{
525 int ret = 0;
526
02502f62 527 spin_lock_bh(&nf_nat_lock);
5b1158e9
JK
528 if (nf_nat_protos[proto->protonum] != &nf_nat_unknown_protocol) {
529 ret = -EBUSY;
530 goto out;
531 }
e22a0548 532 rcu_assign_pointer(nf_nat_protos[proto->protonum], proto);
5b1158e9 533 out:
02502f62 534 spin_unlock_bh(&nf_nat_lock);
5b1158e9
JK
535 return ret;
536}
537EXPORT_SYMBOL(nf_nat_protocol_register);
538
539/* Noone stores the protocol anywhere; simply delete it. */
2b628a08 540void nf_nat_protocol_unregister(const struct nf_nat_protocol *proto)
5b1158e9 541{
02502f62 542 spin_lock_bh(&nf_nat_lock);
e22a0548
PM
543 rcu_assign_pointer(nf_nat_protos[proto->protonum],
544 &nf_nat_unknown_protocol);
02502f62 545 spin_unlock_bh(&nf_nat_lock);
e22a0548 546 synchronize_rcu();
5b1158e9
JK
547}
548EXPORT_SYMBOL(nf_nat_protocol_unregister);
549
d8a0509a
YK
550/* Noone using conntrack by the time this called. */
551static void nf_nat_cleanup_conntrack(struct nf_conn *ct)
552{
553 struct nf_conn_nat *nat = nf_ct_ext_find(ct, NF_CT_EXT_NAT);
554
b6b84d4a 555 if (nat == NULL || nat->ct == NULL)
d8a0509a
YK
556 return;
557
b6b84d4a 558 NF_CT_ASSERT(nat->ct->status & IPS_NAT_DONE_MASK);
d8a0509a 559
02502f62 560 spin_lock_bh(&nf_nat_lock);
4d354c57 561 hlist_del_rcu(&nat->bysource);
02502f62 562 spin_unlock_bh(&nf_nat_lock);
d8a0509a
YK
563}
564
86577c66 565static void nf_nat_move_storage(void *new, void *old)
2d59e5ca 566{
86577c66
PM
567 struct nf_conn_nat *new_nat = new;
568 struct nf_conn_nat *old_nat = old;
b6b84d4a 569 struct nf_conn *ct = old_nat->ct;
2d59e5ca 570
1f305323 571 if (!ct || !(ct->status & IPS_NAT_DONE_MASK))
2d59e5ca
YK
572 return;
573
02502f62 574 spin_lock_bh(&nf_nat_lock);
b6b84d4a 575 new_nat->ct = ct;
68b80f11 576 hlist_replace_rcu(&old_nat->bysource, &new_nat->bysource);
02502f62 577 spin_unlock_bh(&nf_nat_lock);
2d59e5ca
YK
578}
579
61eb3107 580static struct nf_ct_ext_type nat_extend __read_mostly = {
d8a0509a
YK
581 .len = sizeof(struct nf_conn_nat),
582 .align = __alignof__(struct nf_conn_nat),
583 .destroy = nf_nat_cleanup_conntrack,
584 .move = nf_nat_move_storage,
585 .id = NF_CT_EXT_NAT,
586 .flags = NF_CT_EXT_F_PREALLOC,
2d59e5ca
YK
587};
588
e6a7d3c0
PNA
589#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
590
591#include <linux/netfilter/nfnetlink.h>
592#include <linux/netfilter/nfnetlink_conntrack.h>
593
594static const struct nla_policy protonat_nla_policy[CTA_PROTONAT_MAX+1] = {
595 [CTA_PROTONAT_PORT_MIN] = { .type = NLA_U16 },
596 [CTA_PROTONAT_PORT_MAX] = { .type = NLA_U16 },
597};
598
599static int nfnetlink_parse_nat_proto(struct nlattr *attr,
600 const struct nf_conn *ct,
601 struct nf_nat_range *range)
602{
603 struct nlattr *tb[CTA_PROTONAT_MAX+1];
604 const struct nf_nat_protocol *npt;
605 int err;
606
607 err = nla_parse_nested(tb, CTA_PROTONAT_MAX, attr, protonat_nla_policy);
608 if (err < 0)
609 return err;
610
611 npt = nf_nat_proto_find_get(nf_ct_protonum(ct));
612 if (npt->nlattr_to_range)
613 err = npt->nlattr_to_range(tb, range);
614 nf_nat_proto_put(npt);
615 return err;
616}
617
618static const struct nla_policy nat_nla_policy[CTA_NAT_MAX+1] = {
619 [CTA_NAT_MINIP] = { .type = NLA_U32 },
620 [CTA_NAT_MAXIP] = { .type = NLA_U32 },
621};
622
623static int
39938324 624nfnetlink_parse_nat(const struct nlattr *nat,
e6a7d3c0
PNA
625 const struct nf_conn *ct, struct nf_nat_range *range)
626{
627 struct nlattr *tb[CTA_NAT_MAX+1];
628 int err;
629
630 memset(range, 0, sizeof(*range));
631
632 err = nla_parse_nested(tb, CTA_NAT_MAX, nat, nat_nla_policy);
633 if (err < 0)
634 return err;
635
636 if (tb[CTA_NAT_MINIP])
637 range->min_ip = nla_get_be32(tb[CTA_NAT_MINIP]);
638
639 if (!tb[CTA_NAT_MAXIP])
640 range->max_ip = range->min_ip;
641 else
642 range->max_ip = nla_get_be32(tb[CTA_NAT_MAXIP]);
643
644 if (range->min_ip)
645 range->flags |= IP_NAT_RANGE_MAP_IPS;
646
647 if (!tb[CTA_NAT_PROTO])
648 return 0;
649
650 err = nfnetlink_parse_nat_proto(tb[CTA_NAT_PROTO], ct, range);
651 if (err < 0)
652 return err;
653
654 return 0;
655}
656
657static int
658nfnetlink_parse_nat_setup(struct nf_conn *ct,
659 enum nf_nat_manip_type manip,
39938324 660 const struct nlattr *attr)
e6a7d3c0
PNA
661{
662 struct nf_nat_range range;
663
664 if (nfnetlink_parse_nat(attr, ct, &range) < 0)
665 return -EINVAL;
666 if (nf_nat_initialized(ct, manip))
667 return -EEXIST;
668
669 return nf_nat_setup_info(ct, &range, manip);
670}
671#else
672static int
673nfnetlink_parse_nat_setup(struct nf_conn *ct,
674 enum nf_nat_manip_type manip,
39938324 675 const struct nlattr *attr)
e6a7d3c0
PNA
676{
677 return -EOPNOTSUPP;
678}
679#endif
680
0c4c9288
AD
681static int __net_init nf_nat_net_init(struct net *net)
682{
d696c7bd
PM
683 /* Leave them the same for the moment. */
684 net->ipv4.nat_htable_size = net->ct.htable_size;
685 net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&net->ipv4.nat_htable_size,
686 &net->ipv4.nat_vmalloced, 0);
0c4c9288
AD
687 if (!net->ipv4.nat_bysource)
688 return -ENOMEM;
689 return 0;
690}
691
692/* Clear NAT section of all conntracks, in case we're loaded again. */
693static int clean_nat(struct nf_conn *i, void *data)
694{
695 struct nf_conn_nat *nat = nfct_nat(i);
696
697 if (!nat)
698 return 0;
699 memset(nat, 0, sizeof(*nat));
700 i->status &= ~(IPS_NAT_MASK | IPS_NAT_DONE_MASK | IPS_SEQ_ADJUST);
701 return 0;
702}
703
704static void __net_exit nf_nat_net_exit(struct net *net)
705{
706 nf_ct_iterate_cleanup(net, &clean_nat, NULL);
707 synchronize_rcu();
708 nf_ct_free_hashtable(net->ipv4.nat_bysource, net->ipv4.nat_vmalloced,
d696c7bd 709 net->ipv4.nat_htable_size);
0c4c9288
AD
710}
711
712static struct pernet_operations nf_nat_net_ops = {
713 .init = nf_nat_net_init,
714 .exit = nf_nat_net_exit,
715};
716
5b1158e9
JK
717static int __init nf_nat_init(void)
718{
719 size_t i;
2d59e5ca
YK
720 int ret;
721
475959d4
JE
722 need_ipv4_conntrack();
723
2d59e5ca
YK
724 ret = nf_ct_extend_register(&nat_extend);
725 if (ret < 0) {
726 printk(KERN_ERR "nf_nat_core: Unable to register extension\n");
727 return ret;
728 }
5b1158e9 729
0c4c9288
AD
730 ret = register_pernet_subsys(&nf_nat_net_ops);
731 if (ret < 0)
2d59e5ca 732 goto cleanup_extend;
5b1158e9
JK
733
734 /* Sew in builtin protocols. */
02502f62 735 spin_lock_bh(&nf_nat_lock);
5b1158e9 736 for (i = 0; i < MAX_IP_NAT_PROTO; i++)
e22a0548
PM
737 rcu_assign_pointer(nf_nat_protos[i], &nf_nat_unknown_protocol);
738 rcu_assign_pointer(nf_nat_protos[IPPROTO_TCP], &nf_nat_protocol_tcp);
739 rcu_assign_pointer(nf_nat_protos[IPPROTO_UDP], &nf_nat_protocol_udp);
740 rcu_assign_pointer(nf_nat_protos[IPPROTO_ICMP], &nf_nat_protocol_icmp);
02502f62 741 spin_unlock_bh(&nf_nat_lock);
5b1158e9 742
5b1158e9
JK
743 /* Initialize fake conntrack so that NAT will skip it */
744 nf_conntrack_untracked.status |= IPS_NAT_DONE_MASK;
745
746 l3proto = nf_ct_l3proto_find_get((u_int16_t)AF_INET);
dd13b010
PM
747
748 BUG_ON(nf_nat_seq_adjust_hook != NULL);
749 rcu_assign_pointer(nf_nat_seq_adjust_hook, nf_nat_seq_adjust);
e6a7d3c0
PNA
750 BUG_ON(nfnetlink_parse_nat_setup_hook != NULL);
751 rcu_assign_pointer(nfnetlink_parse_nat_setup_hook,
752 nfnetlink_parse_nat_setup);
f9dd09c7
JK
753 BUG_ON(nf_ct_nat_offset != NULL);
754 rcu_assign_pointer(nf_ct_nat_offset, nf_nat_get_offset);
5b1158e9 755 return 0;
2d59e5ca
YK
756
757 cleanup_extend:
758 nf_ct_extend_unregister(&nat_extend);
759 return ret;
5b1158e9
JK
760}
761
5b1158e9
JK
762static void __exit nf_nat_cleanup(void)
763{
0c4c9288 764 unregister_pernet_subsys(&nf_nat_net_ops);
5b1158e9 765 nf_ct_l3proto_put(l3proto);
2d59e5ca 766 nf_ct_extend_unregister(&nat_extend);
dd13b010 767 rcu_assign_pointer(nf_nat_seq_adjust_hook, NULL);
e6a7d3c0 768 rcu_assign_pointer(nfnetlink_parse_nat_setup_hook, NULL);
f9dd09c7 769 rcu_assign_pointer(nf_ct_nat_offset, NULL);
dd13b010 770 synchronize_net();
5b1158e9
JK
771}
772
773MODULE_LICENSE("GPL");
e6a7d3c0 774MODULE_ALIAS("nf-nat-ipv4");
5b1158e9
JK
775
776module_init(nf_nat_init);
777module_exit(nf_nat_cleanup);