]> bbs.cooldavid.org Git - net-next-2.6.git/blame - net/netfilter/nf_conntrack_core.c
[NETFILTER]: nf_conntrack: don't inline early_drop()
[net-next-2.6.git] / net / netfilter / nf_conntrack_core.c
CommitLineData
9fb9cbb1
YK
1/* Connection state tracking for netfilter. This is separated from,
2 but required by, the NAT layer; it can also be used by an iptables
3 extension. */
4
5/* (C) 1999-2001 Paul `Rusty' Russell
dc808fe2 6 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
9fb9cbb1
YK
7 * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
9fb9cbb1
YK
12 */
13
9fb9cbb1
YK
14#include <linux/types.h>
15#include <linux/netfilter.h>
16#include <linux/module.h>
17#include <linux/skbuff.h>
18#include <linux/proc_fs.h>
19#include <linux/vmalloc.h>
20#include <linux/stddef.h>
21#include <linux/slab.h>
22#include <linux/random.h>
23#include <linux/jhash.h>
24#include <linux/err.h>
25#include <linux/percpu.h>
26#include <linux/moduleparam.h>
27#include <linux/notifier.h>
28#include <linux/kernel.h>
29#include <linux/netdevice.h>
30#include <linux/socket.h>
d7fe0f24 31#include <linux/mm.h>
9fb9cbb1 32
9fb9cbb1
YK
33#include <net/netfilter/nf_conntrack.h>
34#include <net/netfilter/nf_conntrack_l3proto.h>
605dcad6 35#include <net/netfilter/nf_conntrack_l4proto.h>
77ab9cff 36#include <net/netfilter/nf_conntrack_expect.h>
9fb9cbb1
YK
37#include <net/netfilter/nf_conntrack_helper.h>
38#include <net/netfilter/nf_conntrack_core.h>
ecfab2c9 39#include <net/netfilter/nf_conntrack_extend.h>
9fb9cbb1 40
dc808fe2 41#define NF_CONNTRACK_VERSION "0.5.0"
9fb9cbb1 42
f8ba1aff 43DEFINE_SPINLOCK(nf_conntrack_lock);
13b18339 44EXPORT_SYMBOL_GPL(nf_conntrack_lock);
9fb9cbb1
YK
45
46/* nf_conntrack_standalone needs this */
47atomic_t nf_conntrack_count = ATOMIC_INIT(0);
a999e683 48EXPORT_SYMBOL_GPL(nf_conntrack_count);
9fb9cbb1 49
e2b7606c 50unsigned int nf_conntrack_htable_size __read_mostly;
13b18339
PM
51EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
52
94aec08e 53int nf_conntrack_max __read_mostly;
a999e683 54EXPORT_SYMBOL_GPL(nf_conntrack_max);
13b18339 55
f205c5e0 56struct hlist_head *nf_conntrack_hash __read_mostly;
13b18339
PM
57EXPORT_SYMBOL_GPL(nf_conntrack_hash);
58
e2b7606c 59struct nf_conn nf_conntrack_untracked __read_mostly;
13b18339
PM
60EXPORT_SYMBOL_GPL(nf_conntrack_untracked);
61
94aec08e 62unsigned int nf_ct_log_invalid __read_mostly;
f205c5e0 63HLIST_HEAD(unconfirmed);
1192e403 64static int nf_conntrack_vmalloc __read_mostly;
dacd2a1a 65static struct kmem_cache *nf_conntrack_cachep __read_mostly;
77ab9cff 66
9fb9cbb1
YK
67DEFINE_PER_CPU(struct ip_conntrack_stat, nf_conntrack_stat);
68EXPORT_PER_CPU_SYMBOL(nf_conntrack_stat);
69
9fb9cbb1
YK
70static int nf_conntrack_hash_rnd_initted;
71static unsigned int nf_conntrack_hash_rnd;
72
73static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple,
74 unsigned int size, unsigned int rnd)
75{
0794935e
PM
76 unsigned int n;
77 u_int32_t h;
78
79 /* The direction must be ignored, so we hash everything up to the
80 * destination ports (which is a multiple of 4) and treat the last
81 * three bytes manually.
82 */
83 n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32);
84 h = jhash2((u32 *)tuple, n,
85 rnd ^ (((__force __u16)tuple->dst.u.all << 16) |
86 tuple->dst.protonum));
87
88 return ((u64)h * size) >> 32;
9fb9cbb1
YK
89}
90
91static inline u_int32_t hash_conntrack(const struct nf_conntrack_tuple *tuple)
92{
93 return __hash_conntrack(tuple, nf_conntrack_htable_size,
94 nf_conntrack_hash_rnd);
95}
96
9fb9cbb1
YK
97int
98nf_ct_get_tuple(const struct sk_buff *skb,
99 unsigned int nhoff,
100 unsigned int dataoff,
101 u_int16_t l3num,
102 u_int8_t protonum,
103 struct nf_conntrack_tuple *tuple,
104 const struct nf_conntrack_l3proto *l3proto,
605dcad6 105 const struct nf_conntrack_l4proto *l4proto)
9fb9cbb1
YK
106{
107 NF_CT_TUPLE_U_BLANK(tuple);
108
109 tuple->src.l3num = l3num;
110 if (l3proto->pkt_to_tuple(skb, nhoff, tuple) == 0)
111 return 0;
112
113 tuple->dst.protonum = protonum;
114 tuple->dst.dir = IP_CT_DIR_ORIGINAL;
115
605dcad6 116 return l4proto->pkt_to_tuple(skb, dataoff, tuple);
9fb9cbb1 117}
13b18339 118EXPORT_SYMBOL_GPL(nf_ct_get_tuple);
9fb9cbb1 119
e2a3123f
YK
120int nf_ct_get_tuplepr(const struct sk_buff *skb,
121 unsigned int nhoff,
122 u_int16_t l3num,
123 struct nf_conntrack_tuple *tuple)
124{
125 struct nf_conntrack_l3proto *l3proto;
126 struct nf_conntrack_l4proto *l4proto;
127 unsigned int protoff;
128 u_int8_t protonum;
129 int ret;
130
131 rcu_read_lock();
132
133 l3proto = __nf_ct_l3proto_find(l3num);
134 ret = l3proto->get_l4proto(skb, nhoff, &protoff, &protonum);
135 if (ret != NF_ACCEPT) {
136 rcu_read_unlock();
137 return 0;
138 }
139
140 l4proto = __nf_ct_l4proto_find(l3num, protonum);
141
142 ret = nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, tuple,
143 l3proto, l4proto);
144
145 rcu_read_unlock();
146 return ret;
147}
148EXPORT_SYMBOL_GPL(nf_ct_get_tuplepr);
149
9fb9cbb1
YK
150int
151nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
152 const struct nf_conntrack_tuple *orig,
153 const struct nf_conntrack_l3proto *l3proto,
605dcad6 154 const struct nf_conntrack_l4proto *l4proto)
9fb9cbb1
YK
155{
156 NF_CT_TUPLE_U_BLANK(inverse);
157
158 inverse->src.l3num = orig->src.l3num;
159 if (l3proto->invert_tuple(inverse, orig) == 0)
160 return 0;
161
162 inverse->dst.dir = !orig->dst.dir;
163
164 inverse->dst.protonum = orig->dst.protonum;
605dcad6 165 return l4proto->invert_tuple(inverse, orig);
9fb9cbb1 166}
13b18339 167EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
9fb9cbb1 168
9fb9cbb1
YK
169static void
170clean_from_lists(struct nf_conn *ct)
171{
0d53778e 172 pr_debug("clean_from_lists(%p)\n", ct);
76507f69
PM
173 hlist_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode);
174 hlist_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnode);
9fb9cbb1
YK
175
176 /* Destroy all pending expectations */
c1d10adb 177 nf_ct_remove_expectations(ct);
9fb9cbb1
YK
178}
179
180static void
181destroy_conntrack(struct nf_conntrack *nfct)
182{
183 struct nf_conn *ct = (struct nf_conn *)nfct;
605dcad6 184 struct nf_conntrack_l4proto *l4proto;
9fb9cbb1 185
0d53778e 186 pr_debug("destroy_conntrack(%p)\n", ct);
9fb9cbb1
YK
187 NF_CT_ASSERT(atomic_read(&nfct->use) == 0);
188 NF_CT_ASSERT(!timer_pending(&ct->timeout));
189
190 nf_conntrack_event(IPCT_DESTROY, ct);
191 set_bit(IPS_DYING_BIT, &ct->status);
192
193 /* To make sure we don't get any weird locking issues here:
194 * destroy_conntrack() MUST NOT be called with a write lock
195 * to nf_conntrack_lock!!! -HW */
923f4902 196 rcu_read_lock();
923f4902
PM
197 l4proto = __nf_ct_l4proto_find(ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.l3num,
198 ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.protonum);
605dcad6
MJ
199 if (l4proto && l4proto->destroy)
200 l4proto->destroy(ct);
9fb9cbb1 201
ecfab2c9
YK
202 nf_ct_ext_destroy(ct);
203
982d9a9c 204 rcu_read_unlock();
9fb9cbb1 205
f8ba1aff 206 spin_lock_bh(&nf_conntrack_lock);
9fb9cbb1
YK
207 /* Expectations will have been removed in clean_from_lists,
208 * except TFTP can create an expectation on the first packet,
209 * before connection is in the list, so we need to clean here,
210 * too. */
c1d10adb 211 nf_ct_remove_expectations(ct);
9fb9cbb1
YK
212
213 /* We overload first tuple to link into unconfirmed list. */
214 if (!nf_ct_is_confirmed(ct)) {
f205c5e0
PM
215 BUG_ON(hlist_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode));
216 hlist_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode);
9fb9cbb1
YK
217 }
218
219 NF_CT_STAT_INC(delete);
f8ba1aff 220 spin_unlock_bh(&nf_conntrack_lock);
9fb9cbb1
YK
221
222 if (ct->master)
223 nf_ct_put(ct->master);
224
0d53778e 225 pr_debug("destroy_conntrack: returning ct=%p to slab\n", ct);
9fb9cbb1
YK
226 nf_conntrack_free(ct);
227}
228
229static void death_by_timeout(unsigned long ul_conntrack)
230{
231 struct nf_conn *ct = (void *)ul_conntrack;
5397e97d 232 struct nf_conn_help *help = nfct_help(ct);
3c158f7f 233 struct nf_conntrack_helper *helper;
5397e97d 234
3c158f7f
PM
235 if (help) {
236 rcu_read_lock();
237 helper = rcu_dereference(help->helper);
238 if (helper && helper->destroy)
239 helper->destroy(ct);
240 rcu_read_unlock();
241 }
9fb9cbb1 242
f8ba1aff 243 spin_lock_bh(&nf_conntrack_lock);
9fb9cbb1
YK
244 /* Inside lock so preempt is disabled on module removal path.
245 * Otherwise we can get spurious warnings. */
246 NF_CT_STAT_INC(delete_list);
247 clean_from_lists(ct);
f8ba1aff 248 spin_unlock_bh(&nf_conntrack_lock);
9fb9cbb1
YK
249 nf_ct_put(ct);
250}
251
c1d10adb 252struct nf_conntrack_tuple_hash *
ba419aff 253__nf_conntrack_find(const struct nf_conntrack_tuple *tuple)
9fb9cbb1
YK
254{
255 struct nf_conntrack_tuple_hash *h;
f205c5e0 256 struct hlist_node *n;
9fb9cbb1
YK
257 unsigned int hash = hash_conntrack(tuple);
258
76507f69 259 hlist_for_each_entry_rcu(h, n, &nf_conntrack_hash[hash], hnode) {
ba419aff 260 if (nf_ct_tuple_equal(tuple, &h->tuple)) {
9fb9cbb1
YK
261 NF_CT_STAT_INC(found);
262 return h;
263 }
264 NF_CT_STAT_INC(searched);
265 }
266
267 return NULL;
268}
13b18339 269EXPORT_SYMBOL_GPL(__nf_conntrack_find);
9fb9cbb1
YK
270
271/* Find a connection corresponding to a tuple. */
272struct nf_conntrack_tuple_hash *
330f7db5 273nf_conntrack_find_get(const struct nf_conntrack_tuple *tuple)
9fb9cbb1
YK
274{
275 struct nf_conntrack_tuple_hash *h;
76507f69 276 struct nf_conn *ct;
9fb9cbb1 277
76507f69 278 rcu_read_lock();
ba419aff 279 h = __nf_conntrack_find(tuple);
76507f69
PM
280 if (h) {
281 ct = nf_ct_tuplehash_to_ctrack(h);
282 if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use)))
283 h = NULL;
284 }
285 rcu_read_unlock();
9fb9cbb1
YK
286
287 return h;
288}
13b18339 289EXPORT_SYMBOL_GPL(nf_conntrack_find_get);
9fb9cbb1 290
c1d10adb
PNA
291static void __nf_conntrack_hash_insert(struct nf_conn *ct,
292 unsigned int hash,
601e68e1 293 unsigned int repl_hash)
c1d10adb 294{
76507f69
PM
295 hlist_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode,
296 &nf_conntrack_hash[hash]);
297 hlist_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnode,
298 &nf_conntrack_hash[repl_hash]);
c1d10adb
PNA
299}
300
301void nf_conntrack_hash_insert(struct nf_conn *ct)
302{
303 unsigned int hash, repl_hash;
304
305 hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
306 repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
307
f8ba1aff 308 spin_lock_bh(&nf_conntrack_lock);
c1d10adb 309 __nf_conntrack_hash_insert(ct, hash, repl_hash);
f8ba1aff 310 spin_unlock_bh(&nf_conntrack_lock);
c1d10adb 311}
13b18339 312EXPORT_SYMBOL_GPL(nf_conntrack_hash_insert);
c1d10adb 313
9fb9cbb1
YK
314/* Confirm a connection given skb; places it in hash table */
315int
3db05fea 316__nf_conntrack_confirm(struct sk_buff *skb)
9fb9cbb1
YK
317{
318 unsigned int hash, repl_hash;
df0933dc 319 struct nf_conntrack_tuple_hash *h;
9fb9cbb1 320 struct nf_conn *ct;
df0933dc 321 struct nf_conn_help *help;
f205c5e0 322 struct hlist_node *n;
9fb9cbb1
YK
323 enum ip_conntrack_info ctinfo;
324
3db05fea 325 ct = nf_ct_get(skb, &ctinfo);
9fb9cbb1
YK
326
327 /* ipt_REJECT uses nf_conntrack_attach to attach related
328 ICMP/TCP RST packets in other direction. Actual packet
329 which created connection will be IP_CT_NEW or for an
330 expected connection, IP_CT_RELATED. */
331 if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
332 return NF_ACCEPT;
333
334 hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
335 repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
336
337 /* We're not in hash table, and we refuse to set up related
338 connections for unconfirmed conns. But packet copies and
339 REJECT will give spurious warnings here. */
340 /* NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 1); */
341
342 /* No external references means noone else could have
343 confirmed us. */
344 NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
0d53778e 345 pr_debug("Confirming conntrack %p\n", ct);
9fb9cbb1 346
f8ba1aff 347 spin_lock_bh(&nf_conntrack_lock);
9fb9cbb1
YK
348
349 /* See if there's one in the list already, including reverse:
350 NAT could have grabbed it without realizing, since we're
351 not in the hash. If there is, we lost race. */
f205c5e0 352 hlist_for_each_entry(h, n, &nf_conntrack_hash[hash], hnode)
df0933dc
PM
353 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
354 &h->tuple))
355 goto out;
f205c5e0 356 hlist_for_each_entry(h, n, &nf_conntrack_hash[repl_hash], hnode)
df0933dc
PM
357 if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
358 &h->tuple))
359 goto out;
9fb9cbb1 360
df0933dc 361 /* Remove from unconfirmed list */
f205c5e0 362 hlist_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode);
df0933dc
PM
363
364 __nf_conntrack_hash_insert(ct, hash, repl_hash);
365 /* Timer relative to confirmation time, not original
366 setting time, otherwise we'd get timer wrap in
367 weird delay cases. */
368 ct->timeout.expires += jiffies;
369 add_timer(&ct->timeout);
370 atomic_inc(&ct->ct_general.use);
371 set_bit(IPS_CONFIRMED_BIT, &ct->status);
372 NF_CT_STAT_INC(insert);
f8ba1aff 373 spin_unlock_bh(&nf_conntrack_lock);
df0933dc
PM
374 help = nfct_help(ct);
375 if (help && help->helper)
3db05fea 376 nf_conntrack_event_cache(IPCT_HELPER, skb);
9fb9cbb1 377#ifdef CONFIG_NF_NAT_NEEDED
df0933dc
PM
378 if (test_bit(IPS_SRC_NAT_DONE_BIT, &ct->status) ||
379 test_bit(IPS_DST_NAT_DONE_BIT, &ct->status))
3db05fea 380 nf_conntrack_event_cache(IPCT_NATINFO, skb);
9fb9cbb1 381#endif
df0933dc 382 nf_conntrack_event_cache(master_ct(ct) ?
3db05fea 383 IPCT_RELATED : IPCT_NEW, skb);
df0933dc 384 return NF_ACCEPT;
9fb9cbb1 385
df0933dc 386out:
9fb9cbb1 387 NF_CT_STAT_INC(insert_failed);
f8ba1aff 388 spin_unlock_bh(&nf_conntrack_lock);
9fb9cbb1
YK
389 return NF_DROP;
390}
13b18339 391EXPORT_SYMBOL_GPL(__nf_conntrack_confirm);
9fb9cbb1
YK
392
393/* Returns true if a connection correspondings to the tuple (required
394 for NAT). */
395int
396nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
397 const struct nf_conn *ignored_conntrack)
398{
399 struct nf_conntrack_tuple_hash *h;
ba419aff
PM
400 struct hlist_node *n;
401 unsigned int hash = hash_conntrack(tuple);
9fb9cbb1 402
76507f69 403 rcu_read_lock();
ba419aff
PM
404 hlist_for_each_entry_rcu(h, n, &nf_conntrack_hash[hash], hnode) {
405 if (nf_ct_tuplehash_to_ctrack(h) != ignored_conntrack &&
406 nf_ct_tuple_equal(tuple, &h->tuple)) {
407 NF_CT_STAT_INC(found);
408 rcu_read_unlock();
409 return 1;
410 }
411 NF_CT_STAT_INC(searched);
412 }
76507f69 413 rcu_read_unlock();
9fb9cbb1 414
ba419aff 415 return 0;
9fb9cbb1 416}
13b18339 417EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken);
9fb9cbb1 418
7ae7730f
PM
419#define NF_CT_EVICTION_RANGE 8
420
9fb9cbb1
YK
421/* There's a small race here where we may free a just-assured
422 connection. Too bad: we're in trouble anyway. */
76eb9460 423static noinline int early_drop(unsigned int hash)
9fb9cbb1 424{
f205c5e0 425 /* Use oldest entry, which is roughly LRU */
9fb9cbb1 426 struct nf_conntrack_tuple_hash *h;
df0933dc 427 struct nf_conn *ct = NULL, *tmp;
f205c5e0 428 struct hlist_node *n;
7ae7730f 429 unsigned int i, cnt = 0;
9fb9cbb1
YK
430 int dropped = 0;
431
76507f69 432 rcu_read_lock();
7ae7730f 433 for (i = 0; i < nf_conntrack_htable_size; i++) {
76507f69
PM
434 hlist_for_each_entry_rcu(h, n, &nf_conntrack_hash[hash],
435 hnode) {
7ae7730f
PM
436 tmp = nf_ct_tuplehash_to_ctrack(h);
437 if (!test_bit(IPS_ASSURED_BIT, &tmp->status))
438 ct = tmp;
439 cnt++;
440 }
76507f69
PM
441
442 if (ct && unlikely(!atomic_inc_not_zero(&ct->ct_general.use)))
443 ct = NULL;
7ae7730f
PM
444 if (ct || cnt >= NF_CT_EVICTION_RANGE)
445 break;
446 hash = (hash + 1) % nf_conntrack_htable_size;
9fb9cbb1 447 }
76507f69 448 rcu_read_unlock();
9fb9cbb1
YK
449
450 if (!ct)
451 return dropped;
452
453 if (del_timer(&ct->timeout)) {
454 death_by_timeout((unsigned long)ct);
455 dropped = 1;
c0e912d7 456 NF_CT_STAT_INC_ATOMIC(early_drop);
9fb9cbb1
YK
457 }
458 nf_ct_put(ct);
459 return dropped;
460}
461
dacd2a1a
YK
462struct nf_conn *nf_conntrack_alloc(const struct nf_conntrack_tuple *orig,
463 const struct nf_conntrack_tuple *repl)
9fb9cbb1
YK
464{
465 struct nf_conn *conntrack = NULL;
9fb9cbb1 466
dc808fe2 467 if (unlikely(!nf_conntrack_hash_rnd_initted)) {
9fb9cbb1
YK
468 get_random_bytes(&nf_conntrack_hash_rnd, 4);
469 nf_conntrack_hash_rnd_initted = 1;
470 }
471
5251e2d2
PNA
472 /* We don't want any race condition at early drop stage */
473 atomic_inc(&nf_conntrack_count);
474
76eb9460
PM
475 if (nf_conntrack_max &&
476 unlikely(atomic_read(&nf_conntrack_count) > nf_conntrack_max)) {
9fb9cbb1 477 unsigned int hash = hash_conntrack(orig);
7ae7730f 478 if (!early_drop(hash)) {
5251e2d2 479 atomic_dec(&nf_conntrack_count);
9fb9cbb1
YK
480 if (net_ratelimit())
481 printk(KERN_WARNING
482 "nf_conntrack: table full, dropping"
483 " packet.\n");
484 return ERR_PTR(-ENOMEM);
485 }
486 }
487
dacd2a1a 488 conntrack = kmem_cache_zalloc(nf_conntrack_cachep, GFP_ATOMIC);
9fb9cbb1 489 if (conntrack == NULL) {
0d53778e 490 pr_debug("nf_conntrack_alloc: Can't alloc conntrack.\n");
dacd2a1a
YK
491 atomic_dec(&nf_conntrack_count);
492 return ERR_PTR(-ENOMEM);
9fb9cbb1
YK
493 }
494
9fb9cbb1 495 atomic_set(&conntrack->ct_general.use, 1);
9fb9cbb1
YK
496 conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
497 conntrack->tuplehash[IP_CT_DIR_REPLY].tuple = *repl;
498 /* Don't set timer yet: wait for confirmation */
e6f689db
PM
499 setup_timer(&conntrack->timeout, death_by_timeout,
500 (unsigned long)conntrack);
76507f69 501 INIT_RCU_HEAD(&conntrack->rcu);
9fb9cbb1 502
9fb9cbb1
YK
503 return conntrack;
504}
13b18339 505EXPORT_SYMBOL_GPL(nf_conntrack_alloc);
9fb9cbb1 506
76507f69 507static void nf_conntrack_free_rcu(struct rcu_head *head)
9fb9cbb1 508{
76507f69
PM
509 struct nf_conn *ct = container_of(head, struct nf_conn, rcu);
510
511 nf_ct_ext_free(ct);
512 kmem_cache_free(nf_conntrack_cachep, ct);
9fb9cbb1
YK
513 atomic_dec(&nf_conntrack_count);
514}
76507f69
PM
515
516void nf_conntrack_free(struct nf_conn *conntrack)
517{
518 call_rcu(&conntrack->rcu, nf_conntrack_free_rcu);
519}
13b18339 520EXPORT_SYMBOL_GPL(nf_conntrack_free);
9fb9cbb1
YK
521
522/* Allocate a new conntrack: we return -ENOMEM if classification
523 failed due to stress. Otherwise it really is unclassifiable. */
524static struct nf_conntrack_tuple_hash *
525init_conntrack(const struct nf_conntrack_tuple *tuple,
526 struct nf_conntrack_l3proto *l3proto,
605dcad6 527 struct nf_conntrack_l4proto *l4proto,
9fb9cbb1
YK
528 struct sk_buff *skb,
529 unsigned int dataoff)
530{
531 struct nf_conn *conntrack;
3c158f7f 532 struct nf_conn_help *help;
9fb9cbb1
YK
533 struct nf_conntrack_tuple repl_tuple;
534 struct nf_conntrack_expect *exp;
535
605dcad6 536 if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) {
0d53778e 537 pr_debug("Can't invert tuple.\n");
9fb9cbb1
YK
538 return NULL;
539 }
540
dacd2a1a 541 conntrack = nf_conntrack_alloc(tuple, &repl_tuple);
9fb9cbb1 542 if (conntrack == NULL || IS_ERR(conntrack)) {
0d53778e 543 pr_debug("Can't allocate conntrack.\n");
9fb9cbb1
YK
544 return (struct nf_conntrack_tuple_hash *)conntrack;
545 }
546
605dcad6 547 if (!l4proto->new(conntrack, skb, dataoff)) {
9fb9cbb1 548 nf_conntrack_free(conntrack);
0d53778e 549 pr_debug("init conntrack: can't track with proto module\n");
9fb9cbb1
YK
550 return NULL;
551 }
552
f8ba1aff 553 spin_lock_bh(&nf_conntrack_lock);
6823645d 554 exp = nf_ct_find_expectation(tuple);
9fb9cbb1 555 if (exp) {
0d53778e
PM
556 pr_debug("conntrack: expectation arrives ct=%p exp=%p\n",
557 conntrack, exp);
9fb9cbb1
YK
558 /* Welcome, Mr. Bond. We've been expecting you... */
559 __set_bit(IPS_EXPECTED_BIT, &conntrack->status);
560 conntrack->master = exp->master;
ceceae1b 561 if (exp->helper) {
b560580a 562 help = nf_ct_helper_ext_add(conntrack, GFP_ATOMIC);
ceceae1b
YK
563 if (help)
564 rcu_assign_pointer(help->helper, exp->helper);
ceceae1b
YK
565 }
566
9fb9cbb1
YK
567#ifdef CONFIG_NF_CONNTRACK_MARK
568 conntrack->mark = exp->master->mark;
7c9728c3
JM
569#endif
570#ifdef CONFIG_NF_CONNTRACK_SECMARK
571 conntrack->secmark = exp->master->secmark;
9fb9cbb1
YK
572#endif
573 nf_conntrack_get(&conntrack->master->ct_general);
574 NF_CT_STAT_INC(expect_new);
22e7410b 575 } else {
ceceae1b
YK
576 struct nf_conntrack_helper *helper;
577
578 helper = __nf_ct_helper_find(&repl_tuple);
579 if (helper) {
b560580a 580 help = nf_ct_helper_ext_add(conntrack, GFP_ATOMIC);
ceceae1b 581 if (help)
ceceae1b 582 rcu_assign_pointer(help->helper, helper);
3c158f7f 583 }
9fb9cbb1 584 NF_CT_STAT_INC(new);
22e7410b 585 }
9fb9cbb1
YK
586
587 /* Overload tuple linked list to put us in unconfirmed list. */
f205c5e0
PM
588 hlist_add_head(&conntrack->tuplehash[IP_CT_DIR_ORIGINAL].hnode,
589 &unconfirmed);
9fb9cbb1 590
f8ba1aff 591 spin_unlock_bh(&nf_conntrack_lock);
9fb9cbb1
YK
592
593 if (exp) {
594 if (exp->expectfn)
595 exp->expectfn(conntrack, exp);
6823645d 596 nf_ct_expect_put(exp);
9fb9cbb1
YK
597 }
598
599 return &conntrack->tuplehash[IP_CT_DIR_ORIGINAL];
600}
601
602/* On success, returns conntrack ptr, sets skb->nfct and ctinfo */
603static inline struct nf_conn *
604resolve_normal_ct(struct sk_buff *skb,
605 unsigned int dataoff,
606 u_int16_t l3num,
607 u_int8_t protonum,
608 struct nf_conntrack_l3proto *l3proto,
605dcad6 609 struct nf_conntrack_l4proto *l4proto,
9fb9cbb1
YK
610 int *set_reply,
611 enum ip_conntrack_info *ctinfo)
612{
613 struct nf_conntrack_tuple tuple;
614 struct nf_conntrack_tuple_hash *h;
615 struct nf_conn *ct;
616
bbe735e4 617 if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
9fb9cbb1 618 dataoff, l3num, protonum, &tuple, l3proto,
605dcad6 619 l4proto)) {
0d53778e 620 pr_debug("resolve_normal_ct: Can't get tuple\n");
9fb9cbb1
YK
621 return NULL;
622 }
623
624 /* look for tuple match */
330f7db5 625 h = nf_conntrack_find_get(&tuple);
9fb9cbb1 626 if (!h) {
605dcad6 627 h = init_conntrack(&tuple, l3proto, l4proto, skb, dataoff);
9fb9cbb1
YK
628 if (!h)
629 return NULL;
630 if (IS_ERR(h))
631 return (void *)h;
632 }
633 ct = nf_ct_tuplehash_to_ctrack(h);
634
635 /* It exists; we have (non-exclusive) reference. */
636 if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) {
637 *ctinfo = IP_CT_ESTABLISHED + IP_CT_IS_REPLY;
638 /* Please set reply bit if this packet OK */
639 *set_reply = 1;
640 } else {
641 /* Once we've had two way comms, always ESTABLISHED. */
642 if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
0d53778e 643 pr_debug("nf_conntrack_in: normal packet for %p\n", ct);
9fb9cbb1
YK
644 *ctinfo = IP_CT_ESTABLISHED;
645 } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) {
0d53778e
PM
646 pr_debug("nf_conntrack_in: related packet for %p\n",
647 ct);
9fb9cbb1
YK
648 *ctinfo = IP_CT_RELATED;
649 } else {
0d53778e 650 pr_debug("nf_conntrack_in: new packet for %p\n", ct);
9fb9cbb1
YK
651 *ctinfo = IP_CT_NEW;
652 }
653 *set_reply = 0;
654 }
655 skb->nfct = &ct->ct_general;
656 skb->nfctinfo = *ctinfo;
657 return ct;
658}
659
660unsigned int
3db05fea 661nf_conntrack_in(int pf, unsigned int hooknum, struct sk_buff *skb)
9fb9cbb1
YK
662{
663 struct nf_conn *ct;
664 enum ip_conntrack_info ctinfo;
665 struct nf_conntrack_l3proto *l3proto;
605dcad6 666 struct nf_conntrack_l4proto *l4proto;
9fb9cbb1
YK
667 unsigned int dataoff;
668 u_int8_t protonum;
669 int set_reply = 0;
670 int ret;
671
672 /* Previously seen (loopback or untracked)? Ignore. */
3db05fea 673 if (skb->nfct) {
c0e912d7 674 NF_CT_STAT_INC_ATOMIC(ignore);
9fb9cbb1
YK
675 return NF_ACCEPT;
676 }
677
923f4902 678 /* rcu_read_lock()ed by nf_hook_slow */
c1d10adb 679 l3proto = __nf_ct_l3proto_find((u_int16_t)pf);
3db05fea 680 ret = l3proto->get_l4proto(skb, skb_network_offset(skb),
ffc30690
YK
681 &dataoff, &protonum);
682 if (ret <= 0) {
0d53778e 683 pr_debug("not prepared to track yet or error occured\n");
d87d8469
YK
684 NF_CT_STAT_INC_ATOMIC(error);
685 NF_CT_STAT_INC_ATOMIC(invalid);
9fb9cbb1
YK
686 return -ret;
687 }
688
605dcad6 689 l4proto = __nf_ct_l4proto_find((u_int16_t)pf, protonum);
9fb9cbb1
YK
690
691 /* It may be an special packet, error, unclean...
692 * inverse of the return code tells to the netfilter
693 * core what to do with the packet. */
605dcad6 694 if (l4proto->error != NULL &&
3db05fea 695 (ret = l4proto->error(skb, dataoff, &ctinfo, pf, hooknum)) <= 0) {
c0e912d7
PM
696 NF_CT_STAT_INC_ATOMIC(error);
697 NF_CT_STAT_INC_ATOMIC(invalid);
9fb9cbb1
YK
698 return -ret;
699 }
700
3db05fea 701 ct = resolve_normal_ct(skb, dataoff, pf, protonum, l3proto, l4proto,
9fb9cbb1
YK
702 &set_reply, &ctinfo);
703 if (!ct) {
704 /* Not valid part of a connection */
c0e912d7 705 NF_CT_STAT_INC_ATOMIC(invalid);
9fb9cbb1
YK
706 return NF_ACCEPT;
707 }
708
709 if (IS_ERR(ct)) {
710 /* Too stressed to deal. */
c0e912d7 711 NF_CT_STAT_INC_ATOMIC(drop);
9fb9cbb1
YK
712 return NF_DROP;
713 }
714
3db05fea 715 NF_CT_ASSERT(skb->nfct);
9fb9cbb1 716
3db05fea 717 ret = l4proto->packet(ct, skb, dataoff, ctinfo, pf, hooknum);
9fb9cbb1
YK
718 if (ret < 0) {
719 /* Invalid: inverse of the return code tells
720 * the netfilter core what to do */
0d53778e 721 pr_debug("nf_conntrack_in: Can't track with proto module\n");
3db05fea
HX
722 nf_conntrack_put(skb->nfct);
723 skb->nfct = NULL;
c0e912d7 724 NF_CT_STAT_INC_ATOMIC(invalid);
9fb9cbb1
YK
725 return -ret;
726 }
727
728 if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status))
3db05fea 729 nf_conntrack_event_cache(IPCT_STATUS, skb);
9fb9cbb1
YK
730
731 return ret;
732}
13b18339 733EXPORT_SYMBOL_GPL(nf_conntrack_in);
9fb9cbb1
YK
734
735int nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
736 const struct nf_conntrack_tuple *orig)
737{
923f4902
PM
738 int ret;
739
740 rcu_read_lock();
741 ret = nf_ct_invert_tuple(inverse, orig,
742 __nf_ct_l3proto_find(orig->src.l3num),
743 __nf_ct_l4proto_find(orig->src.l3num,
744 orig->dst.protonum));
745 rcu_read_unlock();
746 return ret;
9fb9cbb1 747}
13b18339 748EXPORT_SYMBOL_GPL(nf_ct_invert_tuplepr);
9fb9cbb1 749
5b1158e9
JK
750/* Alter reply tuple (maybe alter helper). This is for NAT, and is
751 implicitly racy: see __nf_conntrack_confirm */
752void nf_conntrack_alter_reply(struct nf_conn *ct,
753 const struct nf_conntrack_tuple *newreply)
754{
755 struct nf_conn_help *help = nfct_help(ct);
ceceae1b 756 struct nf_conntrack_helper *helper;
5b1158e9 757
5b1158e9
JK
758 /* Should be unconfirmed, so not in hash table yet */
759 NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
760
0d53778e 761 pr_debug("Altering reply tuple of %p to ", ct);
5b1158e9
JK
762 NF_CT_DUMP_TUPLE(newreply);
763
764 ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply;
ceceae1b 765 if (ct->master || (help && help->expecting != 0))
c52fbb41 766 return;
ceceae1b 767
c52fbb41 768 rcu_read_lock();
ceceae1b
YK
769 helper = __nf_ct_helper_find(newreply);
770 if (helper == NULL) {
771 if (help)
772 rcu_assign_pointer(help->helper, NULL);
773 goto out;
5d78a849 774 }
ceceae1b
YK
775
776 if (help == NULL) {
b560580a
PM
777 help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
778 if (help == NULL)
ceceae1b 779 goto out;
ceceae1b
YK
780 } else {
781 memset(&help->help, 0, sizeof(help->help));
782 }
783
784 rcu_assign_pointer(help->helper, helper);
785out:
c52fbb41 786 rcu_read_unlock();
5b1158e9 787}
13b18339 788EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply);
5b1158e9 789
9fb9cbb1
YK
790/* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */
791void __nf_ct_refresh_acct(struct nf_conn *ct,
792 enum ip_conntrack_info ctinfo,
793 const struct sk_buff *skb,
794 unsigned long extra_jiffies,
795 int do_acct)
796{
797 int event = 0;
798
799 NF_CT_ASSERT(ct->timeout.data == (unsigned long)ct);
800 NF_CT_ASSERT(skb);
801
f8ba1aff 802 spin_lock_bh(&nf_conntrack_lock);
9fb9cbb1 803
997ae831 804 /* Only update if this is not a fixed timeout */
47d95045
PM
805 if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status))
806 goto acct;
997ae831 807
9fb9cbb1
YK
808 /* If not in hash table, timer will not be active yet */
809 if (!nf_ct_is_confirmed(ct)) {
810 ct->timeout.expires = extra_jiffies;
811 event = IPCT_REFRESH;
812 } else {
be00c8e4
MJ
813 unsigned long newtime = jiffies + extra_jiffies;
814
815 /* Only update the timeout if the new timeout is at least
816 HZ jiffies from the old timeout. Need del_timer for race
817 avoidance (may already be dying). */
818 if (newtime - ct->timeout.expires >= HZ
819 && del_timer(&ct->timeout)) {
820 ct->timeout.expires = newtime;
9fb9cbb1
YK
821 add_timer(&ct->timeout);
822 event = IPCT_REFRESH;
823 }
824 }
825
47d95045 826acct:
9fb9cbb1
YK
827#ifdef CONFIG_NF_CT_ACCT
828 if (do_acct) {
829 ct->counters[CTINFO2DIR(ctinfo)].packets++;
830 ct->counters[CTINFO2DIR(ctinfo)].bytes +=
bbe735e4 831 skb->len - skb_network_offset(skb);
3ffd5eeb
MJ
832
833 if ((ct->counters[CTINFO2DIR(ctinfo)].packets & 0x80000000)
834 || (ct->counters[CTINFO2DIR(ctinfo)].bytes & 0x80000000))
835 event |= IPCT_COUNTER_FILLING;
9fb9cbb1
YK
836 }
837#endif
838
f8ba1aff 839 spin_unlock_bh(&nf_conntrack_lock);
9fb9cbb1
YK
840
841 /* must be unlocked when calling event cache */
842 if (event)
843 nf_conntrack_event_cache(event, skb);
844}
13b18339 845EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct);
9fb9cbb1 846
e281db5c 847#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
c1d10adb
PNA
848
849#include <linux/netfilter/nfnetlink.h>
850#include <linux/netfilter/nfnetlink_conntrack.h>
57b47a53
IM
851#include <linux/mutex.h>
852
c1d10adb
PNA
853/* Generic function for tcp/udp/sctp/dccp and alike. This needs to be
854 * in ip_conntrack_core, since we don't want the protocols to autoload
855 * or depend on ctnetlink */
fdf70832 856int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
c1d10adb
PNA
857 const struct nf_conntrack_tuple *tuple)
858{
77236b6e
PM
859 NLA_PUT_BE16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port);
860 NLA_PUT_BE16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port);
c1d10adb
PNA
861 return 0;
862
df6fb868 863nla_put_failure:
c1d10adb
PNA
864 return -1;
865}
fdf70832 866EXPORT_SYMBOL_GPL(nf_ct_port_tuple_to_nlattr);
c1d10adb 867
f73e924c
PM
868const struct nla_policy nf_ct_port_nla_policy[CTA_PROTO_MAX+1] = {
869 [CTA_PROTO_SRC_PORT] = { .type = NLA_U16 },
870 [CTA_PROTO_DST_PORT] = { .type = NLA_U16 },
c1d10adb 871};
f73e924c 872EXPORT_SYMBOL_GPL(nf_ct_port_nla_policy);
c1d10adb 873
fdf70832 874int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[],
c1d10adb
PNA
875 struct nf_conntrack_tuple *t)
876{
df6fb868 877 if (!tb[CTA_PROTO_SRC_PORT] || !tb[CTA_PROTO_DST_PORT])
c1d10adb
PNA
878 return -EINVAL;
879
77236b6e
PM
880 t->src.u.tcp.port = nla_get_be16(tb[CTA_PROTO_SRC_PORT]);
881 t->dst.u.tcp.port = nla_get_be16(tb[CTA_PROTO_DST_PORT]);
c1d10adb
PNA
882
883 return 0;
884}
fdf70832 885EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_to_tuple);
c1d10adb
PNA
886#endif
887
9fb9cbb1 888/* Used by ipt_REJECT and ip6t_REJECT. */
b334aadc 889static void nf_conntrack_attach(struct sk_buff *nskb, struct sk_buff *skb)
9fb9cbb1
YK
890{
891 struct nf_conn *ct;
892 enum ip_conntrack_info ctinfo;
893
894 /* This ICMP is in reverse direction to the packet which caused it */
895 ct = nf_ct_get(skb, &ctinfo);
896 if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL)
897 ctinfo = IP_CT_RELATED + IP_CT_IS_REPLY;
898 else
899 ctinfo = IP_CT_RELATED;
900
901 /* Attach to new skbuff, and increment count */
902 nskb->nfct = &ct->ct_general;
903 nskb->nfctinfo = ctinfo;
904 nf_conntrack_get(nskb->nfct);
905}
906
907static inline int
908do_iter(const struct nf_conntrack_tuple_hash *i,
909 int (*iter)(struct nf_conn *i, void *data),
910 void *data)
911{
912 return iter(nf_ct_tuplehash_to_ctrack(i), data);
913}
914
915/* Bring out ya dead! */
df0933dc 916static struct nf_conn *
9fb9cbb1
YK
917get_next_corpse(int (*iter)(struct nf_conn *i, void *data),
918 void *data, unsigned int *bucket)
919{
df0933dc
PM
920 struct nf_conntrack_tuple_hash *h;
921 struct nf_conn *ct;
f205c5e0 922 struct hlist_node *n;
9fb9cbb1 923
f8ba1aff 924 spin_lock_bh(&nf_conntrack_lock);
9fb9cbb1 925 for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {
f205c5e0 926 hlist_for_each_entry(h, n, &nf_conntrack_hash[*bucket], hnode) {
df0933dc
PM
927 ct = nf_ct_tuplehash_to_ctrack(h);
928 if (iter(ct, data))
929 goto found;
930 }
601e68e1 931 }
f205c5e0 932 hlist_for_each_entry(h, n, &unconfirmed, hnode) {
df0933dc
PM
933 ct = nf_ct_tuplehash_to_ctrack(h);
934 if (iter(ct, data))
ec68e97d 935 set_bit(IPS_DYING_BIT, &ct->status);
df0933dc 936 }
f8ba1aff 937 spin_unlock_bh(&nf_conntrack_lock);
df0933dc
PM
938 return NULL;
939found:
c073e3fa 940 atomic_inc(&ct->ct_general.use);
f8ba1aff 941 spin_unlock_bh(&nf_conntrack_lock);
df0933dc 942 return ct;
9fb9cbb1
YK
943}
944
945void
946nf_ct_iterate_cleanup(int (*iter)(struct nf_conn *i, void *data), void *data)
947{
df0933dc 948 struct nf_conn *ct;
9fb9cbb1
YK
949 unsigned int bucket = 0;
950
df0933dc 951 while ((ct = get_next_corpse(iter, data, &bucket)) != NULL) {
9fb9cbb1
YK
952 /* Time to push up daises... */
953 if (del_timer(&ct->timeout))
954 death_by_timeout((unsigned long)ct);
955 /* ... else the timer will get him soon. */
956
957 nf_ct_put(ct);
958 }
959}
13b18339 960EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup);
9fb9cbb1
YK
961
962static int kill_all(struct nf_conn *i, void *data)
963{
964 return 1;
965}
966
96eb24d7 967void nf_ct_free_hashtable(struct hlist_head *hash, int vmalloced, unsigned int size)
9fb9cbb1
YK
968{
969 if (vmalloced)
970 vfree(hash);
971 else
601e68e1 972 free_pages((unsigned long)hash,
f205c5e0 973 get_order(sizeof(struct hlist_head) * size));
9fb9cbb1 974}
ac565e5f 975EXPORT_SYMBOL_GPL(nf_ct_free_hashtable);
9fb9cbb1 976
272491ef 977void nf_conntrack_flush(void)
c1d10adb
PNA
978{
979 nf_ct_iterate_cleanup(kill_all, NULL);
980}
13b18339 981EXPORT_SYMBOL_GPL(nf_conntrack_flush);
c1d10adb 982
9fb9cbb1
YK
983/* Mishearing the voices in his head, our hero wonders how he's
984 supposed to kill the mall. */
985void nf_conntrack_cleanup(void)
986{
c3a47ab3 987 rcu_assign_pointer(ip_ct_attach, NULL);
7d3cdc6b 988
9fb9cbb1
YK
989 /* This makes sure all current packets have passed through
990 netfilter framework. Roll on, two-stage module
991 delete... */
992 synchronize_net();
993
994 nf_ct_event_cache_flush();
995 i_see_dead_people:
c1d10adb 996 nf_conntrack_flush();
9fb9cbb1
YK
997 if (atomic_read(&nf_conntrack_count) != 0) {
998 schedule();
999 goto i_see_dead_people;
1000 }
6636568c
PM
1001 /* wait until all references to nf_conntrack_untracked are dropped */
1002 while (atomic_read(&nf_conntrack_untracked.ct_general.use) > 1)
1003 schedule();
9fb9cbb1 1004
de6e05c4
YK
1005 rcu_assign_pointer(nf_ct_destroy, NULL);
1006
dacd2a1a 1007 kmem_cache_destroy(nf_conntrack_cachep);
ac565e5f
PM
1008 nf_ct_free_hashtable(nf_conntrack_hash, nf_conntrack_vmalloc,
1009 nf_conntrack_htable_size);
5a6f294e 1010
ac5357eb 1011 nf_conntrack_proto_fini();
ceceae1b 1012 nf_conntrack_helper_fini();
e9c1b084 1013 nf_conntrack_expect_fini();
9fb9cbb1
YK
1014}
1015
96eb24d7 1016struct hlist_head *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced)
9fb9cbb1 1017{
f205c5e0 1018 struct hlist_head *hash;
8e5105a0 1019 unsigned int size, i;
9fb9cbb1 1020
601e68e1 1021 *vmalloced = 0;
8e5105a0 1022
f205c5e0 1023 size = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_head));
29b67497 1024 hash = (void*)__get_free_pages(GFP_KERNEL|__GFP_NOWARN,
f205c5e0 1025 get_order(sizeof(struct hlist_head)
9fb9cbb1 1026 * size));
601e68e1 1027 if (!hash) {
9fb9cbb1
YK
1028 *vmalloced = 1;
1029 printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n");
f205c5e0 1030 hash = vmalloc(sizeof(struct hlist_head) * size);
9fb9cbb1
YK
1031 }
1032
1033 if (hash)
601e68e1 1034 for (i = 0; i < size; i++)
f205c5e0 1035 INIT_HLIST_HEAD(&hash[i]);
9fb9cbb1
YK
1036
1037 return hash;
1038}
ac565e5f 1039EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable);
9fb9cbb1 1040
fae718dd 1041int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
9fb9cbb1 1042{
96eb24d7
SH
1043 int i, bucket, vmalloced, old_vmalloced;
1044 unsigned int hashsize, old_size;
9fb9cbb1 1045 int rnd;
f205c5e0 1046 struct hlist_head *hash, *old_hash;
9fb9cbb1
YK
1047 struct nf_conntrack_tuple_hash *h;
1048
1049 /* On boot, we can set this without any fancy locking. */
1050 if (!nf_conntrack_htable_size)
1051 return param_set_uint(val, kp);
1052
96eb24d7 1053 hashsize = simple_strtoul(val, NULL, 0);
9fb9cbb1
YK
1054 if (!hashsize)
1055 return -EINVAL;
1056
ac565e5f 1057 hash = nf_ct_alloc_hashtable(&hashsize, &vmalloced);
9fb9cbb1
YK
1058 if (!hash)
1059 return -ENOMEM;
1060
1061 /* We have to rehahs for the new table anyway, so we also can
1062 * use a newrandom seed */
1063 get_random_bytes(&rnd, 4);
1064
76507f69
PM
1065 /* Lookups in the old hash might happen in parallel, which means we
1066 * might get false negatives during connection lookup. New connections
1067 * created because of a false negative won't make it into the hash
1068 * though since that required taking the lock.
1069 */
f8ba1aff 1070 spin_lock_bh(&nf_conntrack_lock);
9fb9cbb1 1071 for (i = 0; i < nf_conntrack_htable_size; i++) {
f205c5e0
PM
1072 while (!hlist_empty(&nf_conntrack_hash[i])) {
1073 h = hlist_entry(nf_conntrack_hash[i].first,
1074 struct nf_conntrack_tuple_hash, hnode);
76507f69 1075 hlist_del_rcu(&h->hnode);
9fb9cbb1 1076 bucket = __hash_conntrack(&h->tuple, hashsize, rnd);
f205c5e0 1077 hlist_add_head(&h->hnode, &hash[bucket]);
9fb9cbb1
YK
1078 }
1079 }
1080 old_size = nf_conntrack_htable_size;
1081 old_vmalloced = nf_conntrack_vmalloc;
1082 old_hash = nf_conntrack_hash;
1083
1084 nf_conntrack_htable_size = hashsize;
1085 nf_conntrack_vmalloc = vmalloced;
1086 nf_conntrack_hash = hash;
1087 nf_conntrack_hash_rnd = rnd;
f8ba1aff 1088 spin_unlock_bh(&nf_conntrack_lock);
9fb9cbb1 1089
ac565e5f 1090 nf_ct_free_hashtable(old_hash, old_vmalloced, old_size);
9fb9cbb1
YK
1091 return 0;
1092}
fae718dd 1093EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize);
9fb9cbb1 1094
fae718dd 1095module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint,
9fb9cbb1
YK
1096 &nf_conntrack_htable_size, 0600);
1097
1098int __init nf_conntrack_init(void)
1099{
f205c5e0 1100 int max_factor = 8;
9fb9cbb1
YK
1101 int ret;
1102
1103 /* Idea from tcp.c: use 1/16384 of memory. On i386: 32MB
f205c5e0 1104 * machine has 512 buckets. >= 1GB machines have 16384 buckets. */
9fb9cbb1
YK
1105 if (!nf_conntrack_htable_size) {
1106 nf_conntrack_htable_size
1107 = (((num_physpages << PAGE_SHIFT) / 16384)
f205c5e0 1108 / sizeof(struct hlist_head));
9fb9cbb1 1109 if (num_physpages > (1024 * 1024 * 1024 / PAGE_SIZE))
f205c5e0
PM
1110 nf_conntrack_htable_size = 16384;
1111 if (nf_conntrack_htable_size < 32)
1112 nf_conntrack_htable_size = 32;
1113
1114 /* Use a max. factor of four by default to get the same max as
1115 * with the old struct list_heads. When a table size is given
1116 * we use the old value of 8 to avoid reducing the max.
1117 * entries. */
1118 max_factor = 4;
9fb9cbb1 1119 }
ac565e5f
PM
1120 nf_conntrack_hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size,
1121 &nf_conntrack_vmalloc);
9fb9cbb1
YK
1122 if (!nf_conntrack_hash) {
1123 printk(KERN_ERR "Unable to create nf_conntrack_hash\n");
1124 goto err_out;
1125 }
1126
f205c5e0 1127 nf_conntrack_max = max_factor * nf_conntrack_htable_size;
8e5105a0
PM
1128
1129 printk("nf_conntrack version %s (%u buckets, %d max)\n",
1130 NF_CONNTRACK_VERSION, nf_conntrack_htable_size,
1131 nf_conntrack_max);
1132
dacd2a1a
YK
1133 nf_conntrack_cachep = kmem_cache_create("nf_conntrack",
1134 sizeof(struct nf_conn),
20c2df83 1135 0, 0, NULL);
dacd2a1a 1136 if (!nf_conntrack_cachep) {
9fb9cbb1
YK
1137 printk(KERN_ERR "Unable to create nf_conn slab cache\n");
1138 goto err_free_hash;
1139 }
1140
e9c1b084
PM
1141 ret = nf_conntrack_proto_init();
1142 if (ret < 0)
9fb9cbb1 1143 goto err_free_conntrack_slab;
9fb9cbb1 1144
e9c1b084 1145 ret = nf_conntrack_expect_init();
933a41e7 1146 if (ret < 0)
e9c1b084 1147 goto out_fini_proto;
933a41e7 1148
ceceae1b
YK
1149 ret = nf_conntrack_helper_init();
1150 if (ret < 0)
e9c1b084 1151 goto out_fini_expect;
ceceae1b 1152
7d3cdc6b 1153 /* For use by REJECT target */
b334aadc 1154 rcu_assign_pointer(ip_ct_attach, nf_conntrack_attach);
de6e05c4 1155 rcu_assign_pointer(nf_ct_destroy, destroy_conntrack);
7d3cdc6b 1156
9fb9cbb1
YK
1157 /* Set up fake conntrack:
1158 - to never be deleted, not in any hashes */
1159 atomic_set(&nf_conntrack_untracked.ct_general.use, 1);
1160 /* - and look it like as a confirmed connection */
1161 set_bit(IPS_CONFIRMED_BIT, &nf_conntrack_untracked.status);
1162
1163 return ret;
1164
e9c1b084
PM
1165out_fini_expect:
1166 nf_conntrack_expect_fini();
ceceae1b
YK
1167out_fini_proto:
1168 nf_conntrack_proto_fini();
9fb9cbb1 1169err_free_conntrack_slab:
dacd2a1a 1170 kmem_cache_destroy(nf_conntrack_cachep);
9fb9cbb1 1171err_free_hash:
ac565e5f
PM
1172 nf_ct_free_hashtable(nf_conntrack_hash, nf_conntrack_vmalloc,
1173 nf_conntrack_htable_size);
9fb9cbb1
YK
1174err_out:
1175 return -ENOMEM;
1176}