]>
Commit | Line | Data |
---|---|---|
1 | /* Connection state tracking for netfilter. This is separated from, | |
2 | but required by, the NAT layer; it can also be used by an iptables | |
3 | extension. */ | |
4 | ||
5 | /* (C) 1999-2001 Paul `Rusty' Russell | |
6 | * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org> | |
7 | * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org> | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify | |
10 | * it under the terms of the GNU General Public License version 2 as | |
11 | * published by the Free Software Foundation. | |
12 | */ | |
13 | ||
14 | #include <linux/types.h> | |
15 | #include <linux/netfilter.h> | |
16 | #include <linux/module.h> | |
17 | #include <linux/sched.h> | |
18 | #include <linux/skbuff.h> | |
19 | #include <linux/proc_fs.h> | |
20 | #include <linux/vmalloc.h> | |
21 | #include <linux/stddef.h> | |
22 | #include <linux/slab.h> | |
23 | #include <linux/random.h> | |
24 | #include <linux/jhash.h> | |
25 | #include <linux/err.h> | |
26 | #include <linux/percpu.h> | |
27 | #include <linux/moduleparam.h> | |
28 | #include <linux/notifier.h> | |
29 | #include <linux/kernel.h> | |
30 | #include <linux/netdevice.h> | |
31 | #include <linux/socket.h> | |
32 | #include <linux/mm.h> | |
33 | #include <linux/rculist_nulls.h> | |
34 | ||
35 | #include <net/netfilter/nf_conntrack.h> | |
36 | #include <net/netfilter/nf_conntrack_l3proto.h> | |
37 | #include <net/netfilter/nf_conntrack_l4proto.h> | |
38 | #include <net/netfilter/nf_conntrack_expect.h> | |
39 | #include <net/netfilter/nf_conntrack_helper.h> | |
40 | #include <net/netfilter/nf_conntrack_core.h> | |
41 | #include <net/netfilter/nf_conntrack_extend.h> | |
42 | #include <net/netfilter/nf_conntrack_acct.h> | |
43 | #include <net/netfilter/nf_conntrack_ecache.h> | |
44 | #include <net/netfilter/nf_nat.h> | |
45 | #include <net/netfilter/nf_nat_core.h> | |
46 | ||
47 | #define NF_CONNTRACK_VERSION "0.5.0" | |
48 | ||
49 | int (*nfnetlink_parse_nat_setup_hook)(struct nf_conn *ct, | |
50 | enum nf_nat_manip_type manip, | |
51 | const struct nlattr *attr) __read_mostly; | |
52 | EXPORT_SYMBOL_GPL(nfnetlink_parse_nat_setup_hook); | |
53 | ||
54 | DEFINE_SPINLOCK(nf_conntrack_lock); | |
55 | EXPORT_SYMBOL_GPL(nf_conntrack_lock); | |
56 | ||
57 | unsigned int nf_conntrack_htable_size __read_mostly; | |
58 | EXPORT_SYMBOL_GPL(nf_conntrack_htable_size); | |
59 | ||
60 | unsigned int nf_conntrack_max __read_mostly; | |
61 | EXPORT_SYMBOL_GPL(nf_conntrack_max); | |
62 | ||
63 | struct nf_conn nf_conntrack_untracked __read_mostly; | |
64 | EXPORT_SYMBOL_GPL(nf_conntrack_untracked); | |
65 | ||
66 | static struct kmem_cache *nf_conntrack_cachep __read_mostly; | |
67 | ||
68 | static int nf_conntrack_hash_rnd_initted; | |
69 | static unsigned int nf_conntrack_hash_rnd; | |
70 | ||
71 | static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple, | |
72 | unsigned int size, unsigned int rnd) | |
73 | { | |
74 | unsigned int n; | |
75 | u_int32_t h; | |
76 | ||
77 | /* The direction must be ignored, so we hash everything up to the | |
78 | * destination ports (which is a multiple of 4) and treat the last | |
79 | * three bytes manually. | |
80 | */ | |
81 | n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32); | |
82 | h = jhash2((u32 *)tuple, n, | |
83 | rnd ^ (((__force __u16)tuple->dst.u.all << 16) | | |
84 | tuple->dst.protonum)); | |
85 | ||
86 | return ((u64)h * size) >> 32; | |
87 | } | |
88 | ||
89 | static inline u_int32_t hash_conntrack(const struct nf_conntrack_tuple *tuple) | |
90 | { | |
91 | return __hash_conntrack(tuple, nf_conntrack_htable_size, | |
92 | nf_conntrack_hash_rnd); | |
93 | } | |
94 | ||
95 | bool | |
96 | nf_ct_get_tuple(const struct sk_buff *skb, | |
97 | unsigned int nhoff, | |
98 | unsigned int dataoff, | |
99 | u_int16_t l3num, | |
100 | u_int8_t protonum, | |
101 | struct nf_conntrack_tuple *tuple, | |
102 | const struct nf_conntrack_l3proto *l3proto, | |
103 | const struct nf_conntrack_l4proto *l4proto) | |
104 | { | |
105 | memset(tuple, 0, sizeof(*tuple)); | |
106 | ||
107 | tuple->src.l3num = l3num; | |
108 | if (l3proto->pkt_to_tuple(skb, nhoff, tuple) == 0) | |
109 | return false; | |
110 | ||
111 | tuple->dst.protonum = protonum; | |
112 | tuple->dst.dir = IP_CT_DIR_ORIGINAL; | |
113 | ||
114 | return l4proto->pkt_to_tuple(skb, dataoff, tuple); | |
115 | } | |
116 | EXPORT_SYMBOL_GPL(nf_ct_get_tuple); | |
117 | ||
118 | bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff, | |
119 | u_int16_t l3num, struct nf_conntrack_tuple *tuple) | |
120 | { | |
121 | struct nf_conntrack_l3proto *l3proto; | |
122 | struct nf_conntrack_l4proto *l4proto; | |
123 | unsigned int protoff; | |
124 | u_int8_t protonum; | |
125 | int ret; | |
126 | ||
127 | rcu_read_lock(); | |
128 | ||
129 | l3proto = __nf_ct_l3proto_find(l3num); | |
130 | ret = l3proto->get_l4proto(skb, nhoff, &protoff, &protonum); | |
131 | if (ret != NF_ACCEPT) { | |
132 | rcu_read_unlock(); | |
133 | return false; | |
134 | } | |
135 | ||
136 | l4proto = __nf_ct_l4proto_find(l3num, protonum); | |
137 | ||
138 | ret = nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, tuple, | |
139 | l3proto, l4proto); | |
140 | ||
141 | rcu_read_unlock(); | |
142 | return ret; | |
143 | } | |
144 | EXPORT_SYMBOL_GPL(nf_ct_get_tuplepr); | |
145 | ||
146 | bool | |
147 | nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse, | |
148 | const struct nf_conntrack_tuple *orig, | |
149 | const struct nf_conntrack_l3proto *l3proto, | |
150 | const struct nf_conntrack_l4proto *l4proto) | |
151 | { | |
152 | memset(inverse, 0, sizeof(*inverse)); | |
153 | ||
154 | inverse->src.l3num = orig->src.l3num; | |
155 | if (l3proto->invert_tuple(inverse, orig) == 0) | |
156 | return false; | |
157 | ||
158 | inverse->dst.dir = !orig->dst.dir; | |
159 | ||
160 | inverse->dst.protonum = orig->dst.protonum; | |
161 | return l4proto->invert_tuple(inverse, orig); | |
162 | } | |
163 | EXPORT_SYMBOL_GPL(nf_ct_invert_tuple); | |
164 | ||
165 | static void | |
166 | clean_from_lists(struct nf_conn *ct) | |
167 | { | |
168 | pr_debug("clean_from_lists(%p)\n", ct); | |
169 | hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode); | |
170 | hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode); | |
171 | ||
172 | /* Destroy all pending expectations */ | |
173 | nf_ct_remove_expectations(ct); | |
174 | } | |
175 | ||
176 | static void | |
177 | destroy_conntrack(struct nf_conntrack *nfct) | |
178 | { | |
179 | struct nf_conn *ct = (struct nf_conn *)nfct; | |
180 | struct net *net = nf_ct_net(ct); | |
181 | struct nf_conntrack_l4proto *l4proto; | |
182 | ||
183 | pr_debug("destroy_conntrack(%p)\n", ct); | |
184 | NF_CT_ASSERT(atomic_read(&nfct->use) == 0); | |
185 | NF_CT_ASSERT(!timer_pending(&ct->timeout)); | |
186 | ||
187 | /* To make sure we don't get any weird locking issues here: | |
188 | * destroy_conntrack() MUST NOT be called with a write lock | |
189 | * to nf_conntrack_lock!!! -HW */ | |
190 | rcu_read_lock(); | |
191 | l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); | |
192 | if (l4proto && l4proto->destroy) | |
193 | l4proto->destroy(ct); | |
194 | ||
195 | rcu_read_unlock(); | |
196 | ||
197 | spin_lock_bh(&nf_conntrack_lock); | |
198 | /* Expectations will have been removed in clean_from_lists, | |
199 | * except TFTP can create an expectation on the first packet, | |
200 | * before connection is in the list, so we need to clean here, | |
201 | * too. */ | |
202 | nf_ct_remove_expectations(ct); | |
203 | ||
204 | /* We overload first tuple to link into unconfirmed list. */ | |
205 | if (!nf_ct_is_confirmed(ct)) { | |
206 | BUG_ON(hlist_nulls_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode)); | |
207 | hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode); | |
208 | } | |
209 | ||
210 | NF_CT_STAT_INC(net, delete); | |
211 | spin_unlock_bh(&nf_conntrack_lock); | |
212 | ||
213 | if (ct->master) | |
214 | nf_ct_put(ct->master); | |
215 | ||
216 | pr_debug("destroy_conntrack: returning ct=%p to slab\n", ct); | |
217 | nf_conntrack_free(ct); | |
218 | } | |
219 | ||
220 | void nf_ct_delete_from_lists(struct nf_conn *ct) | |
221 | { | |
222 | struct net *net = nf_ct_net(ct); | |
223 | ||
224 | nf_ct_helper_destroy(ct); | |
225 | spin_lock_bh(&nf_conntrack_lock); | |
226 | /* Inside lock so preempt is disabled on module removal path. | |
227 | * Otherwise we can get spurious warnings. */ | |
228 | NF_CT_STAT_INC(net, delete_list); | |
229 | clean_from_lists(ct); | |
230 | spin_unlock_bh(&nf_conntrack_lock); | |
231 | } | |
232 | EXPORT_SYMBOL_GPL(nf_ct_delete_from_lists); | |
233 | ||
234 | static void death_by_event(unsigned long ul_conntrack) | |
235 | { | |
236 | struct nf_conn *ct = (void *)ul_conntrack; | |
237 | struct net *net = nf_ct_net(ct); | |
238 | ||
239 | if (nf_conntrack_event(IPCT_DESTROY, ct) < 0) { | |
240 | /* bad luck, let's retry again */ | |
241 | ct->timeout.expires = jiffies + | |
242 | (random32() % net->ct.sysctl_events_retry_timeout); | |
243 | add_timer(&ct->timeout); | |
244 | return; | |
245 | } | |
246 | /* we've got the event delivered, now it's dying */ | |
247 | set_bit(IPS_DYING_BIT, &ct->status); | |
248 | spin_lock(&nf_conntrack_lock); | |
249 | hlist_nulls_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode); | |
250 | spin_unlock(&nf_conntrack_lock); | |
251 | nf_ct_put(ct); | |
252 | } | |
253 | ||
254 | void nf_ct_insert_dying_list(struct nf_conn *ct) | |
255 | { | |
256 | struct net *net = nf_ct_net(ct); | |
257 | ||
258 | /* add this conntrack to the dying list */ | |
259 | spin_lock_bh(&nf_conntrack_lock); | |
260 | hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode, | |
261 | &net->ct.dying); | |
262 | spin_unlock_bh(&nf_conntrack_lock); | |
263 | /* set a new timer to retry event delivery */ | |
264 | setup_timer(&ct->timeout, death_by_event, (unsigned long)ct); | |
265 | ct->timeout.expires = jiffies + | |
266 | (random32() % net->ct.sysctl_events_retry_timeout); | |
267 | add_timer(&ct->timeout); | |
268 | } | |
269 | EXPORT_SYMBOL_GPL(nf_ct_insert_dying_list); | |
270 | ||
271 | static void death_by_timeout(unsigned long ul_conntrack) | |
272 | { | |
273 | struct nf_conn *ct = (void *)ul_conntrack; | |
274 | ||
275 | if (!test_bit(IPS_DYING_BIT, &ct->status) && | |
276 | unlikely(nf_conntrack_event(IPCT_DESTROY, ct) < 0)) { | |
277 | /* destroy event was not delivered */ | |
278 | nf_ct_delete_from_lists(ct); | |
279 | nf_ct_insert_dying_list(ct); | |
280 | return; | |
281 | } | |
282 | set_bit(IPS_DYING_BIT, &ct->status); | |
283 | nf_ct_delete_from_lists(ct); | |
284 | nf_ct_put(ct); | |
285 | } | |
286 | ||
287 | /* | |
288 | * Warning : | |
289 | * - Caller must take a reference on returned object | |
290 | * and recheck nf_ct_tuple_equal(tuple, &h->tuple) | |
291 | * OR | |
292 | * - Caller must lock nf_conntrack_lock before calling this function | |
293 | */ | |
294 | struct nf_conntrack_tuple_hash * | |
295 | __nf_conntrack_find(struct net *net, const struct nf_conntrack_tuple *tuple) | |
296 | { | |
297 | struct nf_conntrack_tuple_hash *h; | |
298 | struct hlist_nulls_node *n; | |
299 | unsigned int hash = hash_conntrack(tuple); | |
300 | ||
301 | /* Disable BHs the entire time since we normally need to disable them | |
302 | * at least once for the stats anyway. | |
303 | */ | |
304 | local_bh_disable(); | |
305 | begin: | |
306 | hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) { | |
307 | if (nf_ct_tuple_equal(tuple, &h->tuple)) { | |
308 | NF_CT_STAT_INC(net, found); | |
309 | local_bh_enable(); | |
310 | return h; | |
311 | } | |
312 | NF_CT_STAT_INC(net, searched); | |
313 | } | |
314 | /* | |
315 | * if the nulls value we got at the end of this lookup is | |
316 | * not the expected one, we must restart lookup. | |
317 | * We probably met an item that was moved to another chain. | |
318 | */ | |
319 | if (get_nulls_value(n) != hash) | |
320 | goto begin; | |
321 | local_bh_enable(); | |
322 | ||
323 | return NULL; | |
324 | } | |
325 | EXPORT_SYMBOL_GPL(__nf_conntrack_find); | |
326 | ||
327 | /* Find a connection corresponding to a tuple. */ | |
328 | struct nf_conntrack_tuple_hash * | |
329 | nf_conntrack_find_get(struct net *net, const struct nf_conntrack_tuple *tuple) | |
330 | { | |
331 | struct nf_conntrack_tuple_hash *h; | |
332 | struct nf_conn *ct; | |
333 | ||
334 | rcu_read_lock(); | |
335 | begin: | |
336 | h = __nf_conntrack_find(net, tuple); | |
337 | if (h) { | |
338 | ct = nf_ct_tuplehash_to_ctrack(h); | |
339 | if (unlikely(nf_ct_is_dying(ct) || | |
340 | !atomic_inc_not_zero(&ct->ct_general.use))) | |
341 | h = NULL; | |
342 | else { | |
343 | if (unlikely(!nf_ct_tuple_equal(tuple, &h->tuple))) { | |
344 | nf_ct_put(ct); | |
345 | goto begin; | |
346 | } | |
347 | } | |
348 | } | |
349 | rcu_read_unlock(); | |
350 | ||
351 | return h; | |
352 | } | |
353 | EXPORT_SYMBOL_GPL(nf_conntrack_find_get); | |
354 | ||
355 | static void __nf_conntrack_hash_insert(struct nf_conn *ct, | |
356 | unsigned int hash, | |
357 | unsigned int repl_hash) | |
358 | { | |
359 | struct net *net = nf_ct_net(ct); | |
360 | ||
361 | hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode, | |
362 | &net->ct.hash[hash]); | |
363 | hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode, | |
364 | &net->ct.hash[repl_hash]); | |
365 | } | |
366 | ||
367 | void nf_conntrack_hash_insert(struct nf_conn *ct) | |
368 | { | |
369 | unsigned int hash, repl_hash; | |
370 | ||
371 | hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); | |
372 | repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); | |
373 | ||
374 | __nf_conntrack_hash_insert(ct, hash, repl_hash); | |
375 | } | |
376 | EXPORT_SYMBOL_GPL(nf_conntrack_hash_insert); | |
377 | ||
378 | /* Confirm a connection given skb; places it in hash table */ | |
379 | int | |
380 | __nf_conntrack_confirm(struct sk_buff *skb) | |
381 | { | |
382 | unsigned int hash, repl_hash; | |
383 | struct nf_conntrack_tuple_hash *h; | |
384 | struct nf_conn *ct; | |
385 | struct nf_conn_help *help; | |
386 | struct hlist_nulls_node *n; | |
387 | enum ip_conntrack_info ctinfo; | |
388 | struct net *net; | |
389 | ||
390 | ct = nf_ct_get(skb, &ctinfo); | |
391 | net = nf_ct_net(ct); | |
392 | ||
393 | /* ipt_REJECT uses nf_conntrack_attach to attach related | |
394 | ICMP/TCP RST packets in other direction. Actual packet | |
395 | which created connection will be IP_CT_NEW or for an | |
396 | expected connection, IP_CT_RELATED. */ | |
397 | if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) | |
398 | return NF_ACCEPT; | |
399 | ||
400 | hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); | |
401 | repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple); | |
402 | ||
403 | /* We're not in hash table, and we refuse to set up related | |
404 | connections for unconfirmed conns. But packet copies and | |
405 | REJECT will give spurious warnings here. */ | |
406 | /* NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 1); */ | |
407 | ||
408 | /* No external references means noone else could have | |
409 | confirmed us. */ | |
410 | NF_CT_ASSERT(!nf_ct_is_confirmed(ct)); | |
411 | pr_debug("Confirming conntrack %p\n", ct); | |
412 | ||
413 | spin_lock_bh(&nf_conntrack_lock); | |
414 | ||
415 | /* See if there's one in the list already, including reverse: | |
416 | NAT could have grabbed it without realizing, since we're | |
417 | not in the hash. If there is, we lost race. */ | |
418 | hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode) | |
419 | if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, | |
420 | &h->tuple)) | |
421 | goto out; | |
422 | hlist_nulls_for_each_entry(h, n, &net->ct.hash[repl_hash], hnnode) | |
423 | if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple, | |
424 | &h->tuple)) | |
425 | goto out; | |
426 | ||
427 | /* Remove from unconfirmed list */ | |
428 | hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode); | |
429 | ||
430 | /* Timer relative to confirmation time, not original | |
431 | setting time, otherwise we'd get timer wrap in | |
432 | weird delay cases. */ | |
433 | ct->timeout.expires += jiffies; | |
434 | add_timer(&ct->timeout); | |
435 | atomic_inc(&ct->ct_general.use); | |
436 | set_bit(IPS_CONFIRMED_BIT, &ct->status); | |
437 | ||
438 | /* Since the lookup is lockless, hash insertion must be done after | |
439 | * starting the timer and setting the CONFIRMED bit. The RCU barriers | |
440 | * guarantee that no other CPU can find the conntrack before the above | |
441 | * stores are visible. | |
442 | */ | |
443 | __nf_conntrack_hash_insert(ct, hash, repl_hash); | |
444 | NF_CT_STAT_INC(net, insert); | |
445 | spin_unlock_bh(&nf_conntrack_lock); | |
446 | ||
447 | help = nfct_help(ct); | |
448 | if (help && help->helper) | |
449 | nf_conntrack_event_cache(IPCT_HELPER, ct); | |
450 | ||
451 | nf_conntrack_event_cache(master_ct(ct) ? | |
452 | IPCT_RELATED : IPCT_NEW, ct); | |
453 | return NF_ACCEPT; | |
454 | ||
455 | out: | |
456 | NF_CT_STAT_INC(net, insert_failed); | |
457 | spin_unlock_bh(&nf_conntrack_lock); | |
458 | return NF_DROP; | |
459 | } | |
460 | EXPORT_SYMBOL_GPL(__nf_conntrack_confirm); | |
461 | ||
462 | /* Returns true if a connection correspondings to the tuple (required | |
463 | for NAT). */ | |
464 | int | |
465 | nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple, | |
466 | const struct nf_conn *ignored_conntrack) | |
467 | { | |
468 | struct net *net = nf_ct_net(ignored_conntrack); | |
469 | struct nf_conntrack_tuple_hash *h; | |
470 | struct hlist_nulls_node *n; | |
471 | unsigned int hash = hash_conntrack(tuple); | |
472 | ||
473 | /* Disable BHs the entire time since we need to disable them at | |
474 | * least once for the stats anyway. | |
475 | */ | |
476 | rcu_read_lock_bh(); | |
477 | hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) { | |
478 | if (nf_ct_tuplehash_to_ctrack(h) != ignored_conntrack && | |
479 | nf_ct_tuple_equal(tuple, &h->tuple)) { | |
480 | NF_CT_STAT_INC(net, found); | |
481 | rcu_read_unlock_bh(); | |
482 | return 1; | |
483 | } | |
484 | NF_CT_STAT_INC(net, searched); | |
485 | } | |
486 | rcu_read_unlock_bh(); | |
487 | ||
488 | return 0; | |
489 | } | |
490 | EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken); | |
491 | ||
492 | #define NF_CT_EVICTION_RANGE 8 | |
493 | ||
494 | /* There's a small race here where we may free a just-assured | |
495 | connection. Too bad: we're in trouble anyway. */ | |
496 | static noinline int early_drop(struct net *net, unsigned int hash) | |
497 | { | |
498 | /* Use oldest entry, which is roughly LRU */ | |
499 | struct nf_conntrack_tuple_hash *h; | |
500 | struct nf_conn *ct = NULL, *tmp; | |
501 | struct hlist_nulls_node *n; | |
502 | unsigned int i, cnt = 0; | |
503 | int dropped = 0; | |
504 | ||
505 | rcu_read_lock(); | |
506 | for (i = 0; i < nf_conntrack_htable_size; i++) { | |
507 | hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], | |
508 | hnnode) { | |
509 | tmp = nf_ct_tuplehash_to_ctrack(h); | |
510 | if (!test_bit(IPS_ASSURED_BIT, &tmp->status)) | |
511 | ct = tmp; | |
512 | cnt++; | |
513 | } | |
514 | ||
515 | if (ct != NULL) { | |
516 | if (likely(!nf_ct_is_dying(ct) && | |
517 | atomic_inc_not_zero(&ct->ct_general.use))) | |
518 | break; | |
519 | else | |
520 | ct = NULL; | |
521 | } | |
522 | ||
523 | if (cnt >= NF_CT_EVICTION_RANGE) | |
524 | break; | |
525 | ||
526 | hash = (hash + 1) % nf_conntrack_htable_size; | |
527 | } | |
528 | rcu_read_unlock(); | |
529 | ||
530 | if (!ct) | |
531 | return dropped; | |
532 | ||
533 | if (del_timer(&ct->timeout)) { | |
534 | death_by_timeout((unsigned long)ct); | |
535 | dropped = 1; | |
536 | NF_CT_STAT_INC_ATOMIC(net, early_drop); | |
537 | } | |
538 | nf_ct_put(ct); | |
539 | return dropped; | |
540 | } | |
541 | ||
542 | struct nf_conn *nf_conntrack_alloc(struct net *net, | |
543 | const struct nf_conntrack_tuple *orig, | |
544 | const struct nf_conntrack_tuple *repl, | |
545 | gfp_t gfp) | |
546 | { | |
547 | struct nf_conn *ct; | |
548 | ||
549 | if (unlikely(!nf_conntrack_hash_rnd_initted)) { | |
550 | get_random_bytes(&nf_conntrack_hash_rnd, | |
551 | sizeof(nf_conntrack_hash_rnd)); | |
552 | nf_conntrack_hash_rnd_initted = 1; | |
553 | } | |
554 | ||
555 | /* We don't want any race condition at early drop stage */ | |
556 | atomic_inc(&net->ct.count); | |
557 | ||
558 | if (nf_conntrack_max && | |
559 | unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) { | |
560 | unsigned int hash = hash_conntrack(orig); | |
561 | if (!early_drop(net, hash)) { | |
562 | atomic_dec(&net->ct.count); | |
563 | if (net_ratelimit()) | |
564 | printk(KERN_WARNING | |
565 | "nf_conntrack: table full, dropping" | |
566 | " packet.\n"); | |
567 | return ERR_PTR(-ENOMEM); | |
568 | } | |
569 | } | |
570 | ||
571 | /* | |
572 | * Do not use kmem_cache_zalloc(), as this cache uses | |
573 | * SLAB_DESTROY_BY_RCU. | |
574 | */ | |
575 | ct = kmem_cache_alloc(nf_conntrack_cachep, gfp); | |
576 | if (ct == NULL) { | |
577 | pr_debug("nf_conntrack_alloc: Can't alloc conntrack.\n"); | |
578 | atomic_dec(&net->ct.count); | |
579 | return ERR_PTR(-ENOMEM); | |
580 | } | |
581 | /* | |
582 | * Let ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.next | |
583 | * and ct->tuplehash[IP_CT_DIR_REPLY].hnnode.next unchanged. | |
584 | */ | |
585 | memset(&ct->tuplehash[IP_CT_DIR_MAX], 0, | |
586 | sizeof(*ct) - offsetof(struct nf_conn, tuplehash[IP_CT_DIR_MAX])); | |
587 | spin_lock_init(&ct->lock); | |
588 | ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig; | |
589 | ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL; | |
590 | ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl; | |
591 | ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev = NULL; | |
592 | /* Don't set timer yet: wait for confirmation */ | |
593 | setup_timer(&ct->timeout, death_by_timeout, (unsigned long)ct); | |
594 | #ifdef CONFIG_NET_NS | |
595 | ct->ct_net = net; | |
596 | #endif | |
597 | ||
598 | /* | |
599 | * changes to lookup keys must be done before setting refcnt to 1 | |
600 | */ | |
601 | smp_wmb(); | |
602 | atomic_set(&ct->ct_general.use, 1); | |
603 | return ct; | |
604 | } | |
605 | EXPORT_SYMBOL_GPL(nf_conntrack_alloc); | |
606 | ||
607 | void nf_conntrack_free(struct nf_conn *ct) | |
608 | { | |
609 | struct net *net = nf_ct_net(ct); | |
610 | ||
611 | nf_ct_ext_destroy(ct); | |
612 | atomic_dec(&net->ct.count); | |
613 | nf_ct_ext_free(ct); | |
614 | kmem_cache_free(nf_conntrack_cachep, ct); | |
615 | } | |
616 | EXPORT_SYMBOL_GPL(nf_conntrack_free); | |
617 | ||
618 | /* Allocate a new conntrack: we return -ENOMEM if classification | |
619 | failed due to stress. Otherwise it really is unclassifiable. */ | |
620 | static struct nf_conntrack_tuple_hash * | |
621 | init_conntrack(struct net *net, | |
622 | const struct nf_conntrack_tuple *tuple, | |
623 | struct nf_conntrack_l3proto *l3proto, | |
624 | struct nf_conntrack_l4proto *l4proto, | |
625 | struct sk_buff *skb, | |
626 | unsigned int dataoff) | |
627 | { | |
628 | struct nf_conn *ct; | |
629 | struct nf_conn_help *help; | |
630 | struct nf_conntrack_tuple repl_tuple; | |
631 | struct nf_conntrack_expect *exp; | |
632 | ||
633 | if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) { | |
634 | pr_debug("Can't invert tuple.\n"); | |
635 | return NULL; | |
636 | } | |
637 | ||
638 | ct = nf_conntrack_alloc(net, tuple, &repl_tuple, GFP_ATOMIC); | |
639 | if (IS_ERR(ct)) { | |
640 | pr_debug("Can't allocate conntrack.\n"); | |
641 | return (struct nf_conntrack_tuple_hash *)ct; | |
642 | } | |
643 | ||
644 | if (!l4proto->new(ct, skb, dataoff)) { | |
645 | nf_conntrack_free(ct); | |
646 | pr_debug("init conntrack: can't track with proto module\n"); | |
647 | return NULL; | |
648 | } | |
649 | ||
650 | nf_ct_acct_ext_add(ct, GFP_ATOMIC); | |
651 | nf_ct_ecache_ext_add(ct, 0, 0, GFP_ATOMIC); | |
652 | ||
653 | spin_lock_bh(&nf_conntrack_lock); | |
654 | exp = nf_ct_find_expectation(net, tuple); | |
655 | if (exp) { | |
656 | pr_debug("conntrack: expectation arrives ct=%p exp=%p\n", | |
657 | ct, exp); | |
658 | /* Welcome, Mr. Bond. We've been expecting you... */ | |
659 | __set_bit(IPS_EXPECTED_BIT, &ct->status); | |
660 | ct->master = exp->master; | |
661 | if (exp->helper) { | |
662 | help = nf_ct_helper_ext_add(ct, GFP_ATOMIC); | |
663 | if (help) | |
664 | rcu_assign_pointer(help->helper, exp->helper); | |
665 | } | |
666 | ||
667 | #ifdef CONFIG_NF_CONNTRACK_MARK | |
668 | ct->mark = exp->master->mark; | |
669 | #endif | |
670 | #ifdef CONFIG_NF_CONNTRACK_SECMARK | |
671 | ct->secmark = exp->master->secmark; | |
672 | #endif | |
673 | nf_conntrack_get(&ct->master->ct_general); | |
674 | NF_CT_STAT_INC(net, expect_new); | |
675 | } else { | |
676 | __nf_ct_try_assign_helper(ct, GFP_ATOMIC); | |
677 | NF_CT_STAT_INC(net, new); | |
678 | } | |
679 | ||
680 | /* Overload tuple linked list to put us in unconfirmed list. */ | |
681 | hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode, | |
682 | &net->ct.unconfirmed); | |
683 | ||
684 | spin_unlock_bh(&nf_conntrack_lock); | |
685 | ||
686 | if (exp) { | |
687 | if (exp->expectfn) | |
688 | exp->expectfn(ct, exp); | |
689 | nf_ct_expect_put(exp); | |
690 | } | |
691 | ||
692 | return &ct->tuplehash[IP_CT_DIR_ORIGINAL]; | |
693 | } | |
694 | ||
695 | /* On success, returns conntrack ptr, sets skb->nfct and ctinfo */ | |
696 | static inline struct nf_conn * | |
697 | resolve_normal_ct(struct net *net, | |
698 | struct sk_buff *skb, | |
699 | unsigned int dataoff, | |
700 | u_int16_t l3num, | |
701 | u_int8_t protonum, | |
702 | struct nf_conntrack_l3proto *l3proto, | |
703 | struct nf_conntrack_l4proto *l4proto, | |
704 | int *set_reply, | |
705 | enum ip_conntrack_info *ctinfo) | |
706 | { | |
707 | struct nf_conntrack_tuple tuple; | |
708 | struct nf_conntrack_tuple_hash *h; | |
709 | struct nf_conn *ct; | |
710 | ||
711 | if (!nf_ct_get_tuple(skb, skb_network_offset(skb), | |
712 | dataoff, l3num, protonum, &tuple, l3proto, | |
713 | l4proto)) { | |
714 | pr_debug("resolve_normal_ct: Can't get tuple\n"); | |
715 | return NULL; | |
716 | } | |
717 | ||
718 | /* look for tuple match */ | |
719 | h = nf_conntrack_find_get(net, &tuple); | |
720 | if (!h) { | |
721 | h = init_conntrack(net, &tuple, l3proto, l4proto, skb, dataoff); | |
722 | if (!h) | |
723 | return NULL; | |
724 | if (IS_ERR(h)) | |
725 | return (void *)h; | |
726 | } | |
727 | ct = nf_ct_tuplehash_to_ctrack(h); | |
728 | ||
729 | /* It exists; we have (non-exclusive) reference. */ | |
730 | if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) { | |
731 | *ctinfo = IP_CT_ESTABLISHED + IP_CT_IS_REPLY; | |
732 | /* Please set reply bit if this packet OK */ | |
733 | *set_reply = 1; | |
734 | } else { | |
735 | /* Once we've had two way comms, always ESTABLISHED. */ | |
736 | if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) { | |
737 | pr_debug("nf_conntrack_in: normal packet for %p\n", ct); | |
738 | *ctinfo = IP_CT_ESTABLISHED; | |
739 | } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) { | |
740 | pr_debug("nf_conntrack_in: related packet for %p\n", | |
741 | ct); | |
742 | *ctinfo = IP_CT_RELATED; | |
743 | } else { | |
744 | pr_debug("nf_conntrack_in: new packet for %p\n", ct); | |
745 | *ctinfo = IP_CT_NEW; | |
746 | } | |
747 | *set_reply = 0; | |
748 | } | |
749 | skb->nfct = &ct->ct_general; | |
750 | skb->nfctinfo = *ctinfo; | |
751 | return ct; | |
752 | } | |
753 | ||
754 | unsigned int | |
755 | nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum, | |
756 | struct sk_buff *skb) | |
757 | { | |
758 | struct nf_conn *ct; | |
759 | enum ip_conntrack_info ctinfo; | |
760 | struct nf_conntrack_l3proto *l3proto; | |
761 | struct nf_conntrack_l4proto *l4proto; | |
762 | unsigned int dataoff; | |
763 | u_int8_t protonum; | |
764 | int set_reply = 0; | |
765 | int ret; | |
766 | ||
767 | /* Previously seen (loopback or untracked)? Ignore. */ | |
768 | if (skb->nfct) { | |
769 | NF_CT_STAT_INC_ATOMIC(net, ignore); | |
770 | return NF_ACCEPT; | |
771 | } | |
772 | ||
773 | /* rcu_read_lock()ed by nf_hook_slow */ | |
774 | l3proto = __nf_ct_l3proto_find(pf); | |
775 | ret = l3proto->get_l4proto(skb, skb_network_offset(skb), | |
776 | &dataoff, &protonum); | |
777 | if (ret <= 0) { | |
778 | pr_debug("not prepared to track yet or error occured\n"); | |
779 | NF_CT_STAT_INC_ATOMIC(net, error); | |
780 | NF_CT_STAT_INC_ATOMIC(net, invalid); | |
781 | return -ret; | |
782 | } | |
783 | ||
784 | l4proto = __nf_ct_l4proto_find(pf, protonum); | |
785 | ||
786 | /* It may be an special packet, error, unclean... | |
787 | * inverse of the return code tells to the netfilter | |
788 | * core what to do with the packet. */ | |
789 | if (l4proto->error != NULL) { | |
790 | ret = l4proto->error(net, skb, dataoff, &ctinfo, pf, hooknum); | |
791 | if (ret <= 0) { | |
792 | NF_CT_STAT_INC_ATOMIC(net, error); | |
793 | NF_CT_STAT_INC_ATOMIC(net, invalid); | |
794 | return -ret; | |
795 | } | |
796 | } | |
797 | ||
798 | ct = resolve_normal_ct(net, skb, dataoff, pf, protonum, | |
799 | l3proto, l4proto, &set_reply, &ctinfo); | |
800 | if (!ct) { | |
801 | /* Not valid part of a connection */ | |
802 | NF_CT_STAT_INC_ATOMIC(net, invalid); | |
803 | return NF_ACCEPT; | |
804 | } | |
805 | ||
806 | if (IS_ERR(ct)) { | |
807 | /* Too stressed to deal. */ | |
808 | NF_CT_STAT_INC_ATOMIC(net, drop); | |
809 | return NF_DROP; | |
810 | } | |
811 | ||
812 | NF_CT_ASSERT(skb->nfct); | |
813 | ||
814 | ret = l4proto->packet(ct, skb, dataoff, ctinfo, pf, hooknum); | |
815 | if (ret <= 0) { | |
816 | /* Invalid: inverse of the return code tells | |
817 | * the netfilter core what to do */ | |
818 | pr_debug("nf_conntrack_in: Can't track with proto module\n"); | |
819 | nf_conntrack_put(skb->nfct); | |
820 | skb->nfct = NULL; | |
821 | NF_CT_STAT_INC_ATOMIC(net, invalid); | |
822 | if (ret == -NF_DROP) | |
823 | NF_CT_STAT_INC_ATOMIC(net, drop); | |
824 | return -ret; | |
825 | } | |
826 | ||
827 | if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status)) | |
828 | nf_conntrack_event_cache(IPCT_REPLY, ct); | |
829 | ||
830 | return ret; | |
831 | } | |
832 | EXPORT_SYMBOL_GPL(nf_conntrack_in); | |
833 | ||
834 | bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse, | |
835 | const struct nf_conntrack_tuple *orig) | |
836 | { | |
837 | bool ret; | |
838 | ||
839 | rcu_read_lock(); | |
840 | ret = nf_ct_invert_tuple(inverse, orig, | |
841 | __nf_ct_l3proto_find(orig->src.l3num), | |
842 | __nf_ct_l4proto_find(orig->src.l3num, | |
843 | orig->dst.protonum)); | |
844 | rcu_read_unlock(); | |
845 | return ret; | |
846 | } | |
847 | EXPORT_SYMBOL_GPL(nf_ct_invert_tuplepr); | |
848 | ||
849 | /* Alter reply tuple (maybe alter helper). This is for NAT, and is | |
850 | implicitly racy: see __nf_conntrack_confirm */ | |
851 | void nf_conntrack_alter_reply(struct nf_conn *ct, | |
852 | const struct nf_conntrack_tuple *newreply) | |
853 | { | |
854 | struct nf_conn_help *help = nfct_help(ct); | |
855 | ||
856 | /* Should be unconfirmed, so not in hash table yet */ | |
857 | NF_CT_ASSERT(!nf_ct_is_confirmed(ct)); | |
858 | ||
859 | pr_debug("Altering reply tuple of %p to ", ct); | |
860 | nf_ct_dump_tuple(newreply); | |
861 | ||
862 | ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply; | |
863 | if (ct->master || (help && !hlist_empty(&help->expectations))) | |
864 | return; | |
865 | ||
866 | rcu_read_lock(); | |
867 | __nf_ct_try_assign_helper(ct, GFP_ATOMIC); | |
868 | rcu_read_unlock(); | |
869 | } | |
870 | EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply); | |
871 | ||
872 | /* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */ | |
873 | void __nf_ct_refresh_acct(struct nf_conn *ct, | |
874 | enum ip_conntrack_info ctinfo, | |
875 | const struct sk_buff *skb, | |
876 | unsigned long extra_jiffies, | |
877 | int do_acct) | |
878 | { | |
879 | NF_CT_ASSERT(ct->timeout.data == (unsigned long)ct); | |
880 | NF_CT_ASSERT(skb); | |
881 | ||
882 | /* Only update if this is not a fixed timeout */ | |
883 | if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status)) | |
884 | goto acct; | |
885 | ||
886 | /* If not in hash table, timer will not be active yet */ | |
887 | if (!nf_ct_is_confirmed(ct)) { | |
888 | ct->timeout.expires = extra_jiffies; | |
889 | } else { | |
890 | unsigned long newtime = jiffies + extra_jiffies; | |
891 | ||
892 | /* Only update the timeout if the new timeout is at least | |
893 | HZ jiffies from the old timeout. Need del_timer for race | |
894 | avoidance (may already be dying). */ | |
895 | if (newtime - ct->timeout.expires >= HZ) | |
896 | mod_timer_pending(&ct->timeout, newtime); | |
897 | } | |
898 | ||
899 | acct: | |
900 | if (do_acct) { | |
901 | struct nf_conn_counter *acct; | |
902 | ||
903 | acct = nf_conn_acct_find(ct); | |
904 | if (acct) { | |
905 | spin_lock_bh(&ct->lock); | |
906 | acct[CTINFO2DIR(ctinfo)].packets++; | |
907 | acct[CTINFO2DIR(ctinfo)].bytes += | |
908 | skb->len - skb_network_offset(skb); | |
909 | spin_unlock_bh(&ct->lock); | |
910 | } | |
911 | } | |
912 | } | |
913 | EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct); | |
914 | ||
915 | bool __nf_ct_kill_acct(struct nf_conn *ct, | |
916 | enum ip_conntrack_info ctinfo, | |
917 | const struct sk_buff *skb, | |
918 | int do_acct) | |
919 | { | |
920 | if (do_acct) { | |
921 | struct nf_conn_counter *acct; | |
922 | ||
923 | acct = nf_conn_acct_find(ct); | |
924 | if (acct) { | |
925 | spin_lock_bh(&ct->lock); | |
926 | acct[CTINFO2DIR(ctinfo)].packets++; | |
927 | acct[CTINFO2DIR(ctinfo)].bytes += | |
928 | skb->len - skb_network_offset(skb); | |
929 | spin_unlock_bh(&ct->lock); | |
930 | } | |
931 | } | |
932 | ||
933 | if (del_timer(&ct->timeout)) { | |
934 | ct->timeout.function((unsigned long)ct); | |
935 | return true; | |
936 | } | |
937 | return false; | |
938 | } | |
939 | EXPORT_SYMBOL_GPL(__nf_ct_kill_acct); | |
940 | ||
941 | #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE) | |
942 | ||
943 | #include <linux/netfilter/nfnetlink.h> | |
944 | #include <linux/netfilter/nfnetlink_conntrack.h> | |
945 | #include <linux/mutex.h> | |
946 | ||
947 | /* Generic function for tcp/udp/sctp/dccp and alike. This needs to be | |
948 | * in ip_conntrack_core, since we don't want the protocols to autoload | |
949 | * or depend on ctnetlink */ | |
950 | int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb, | |
951 | const struct nf_conntrack_tuple *tuple) | |
952 | { | |
953 | NLA_PUT_BE16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port); | |
954 | NLA_PUT_BE16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port); | |
955 | return 0; | |
956 | ||
957 | nla_put_failure: | |
958 | return -1; | |
959 | } | |
960 | EXPORT_SYMBOL_GPL(nf_ct_port_tuple_to_nlattr); | |
961 | ||
962 | const struct nla_policy nf_ct_port_nla_policy[CTA_PROTO_MAX+1] = { | |
963 | [CTA_PROTO_SRC_PORT] = { .type = NLA_U16 }, | |
964 | [CTA_PROTO_DST_PORT] = { .type = NLA_U16 }, | |
965 | }; | |
966 | EXPORT_SYMBOL_GPL(nf_ct_port_nla_policy); | |
967 | ||
968 | int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[], | |
969 | struct nf_conntrack_tuple *t) | |
970 | { | |
971 | if (!tb[CTA_PROTO_SRC_PORT] || !tb[CTA_PROTO_DST_PORT]) | |
972 | return -EINVAL; | |
973 | ||
974 | t->src.u.tcp.port = nla_get_be16(tb[CTA_PROTO_SRC_PORT]); | |
975 | t->dst.u.tcp.port = nla_get_be16(tb[CTA_PROTO_DST_PORT]); | |
976 | ||
977 | return 0; | |
978 | } | |
979 | EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_to_tuple); | |
980 | ||
981 | int nf_ct_port_nlattr_tuple_size(void) | |
982 | { | |
983 | return nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1); | |
984 | } | |
985 | EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_tuple_size); | |
986 | #endif | |
987 | ||
988 | /* Used by ipt_REJECT and ip6t_REJECT. */ | |
989 | static void nf_conntrack_attach(struct sk_buff *nskb, struct sk_buff *skb) | |
990 | { | |
991 | struct nf_conn *ct; | |
992 | enum ip_conntrack_info ctinfo; | |
993 | ||
994 | /* This ICMP is in reverse direction to the packet which caused it */ | |
995 | ct = nf_ct_get(skb, &ctinfo); | |
996 | if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) | |
997 | ctinfo = IP_CT_RELATED + IP_CT_IS_REPLY; | |
998 | else | |
999 | ctinfo = IP_CT_RELATED; | |
1000 | ||
1001 | /* Attach to new skbuff, and increment count */ | |
1002 | nskb->nfct = &ct->ct_general; | |
1003 | nskb->nfctinfo = ctinfo; | |
1004 | nf_conntrack_get(nskb->nfct); | |
1005 | } | |
1006 | ||
1007 | /* Bring out ya dead! */ | |
1008 | static struct nf_conn * | |
1009 | get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data), | |
1010 | void *data, unsigned int *bucket) | |
1011 | { | |
1012 | struct nf_conntrack_tuple_hash *h; | |
1013 | struct nf_conn *ct; | |
1014 | struct hlist_nulls_node *n; | |
1015 | ||
1016 | spin_lock_bh(&nf_conntrack_lock); | |
1017 | for (; *bucket < nf_conntrack_htable_size; (*bucket)++) { | |
1018 | hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) { | |
1019 | ct = nf_ct_tuplehash_to_ctrack(h); | |
1020 | if (iter(ct, data)) | |
1021 | goto found; | |
1022 | } | |
1023 | } | |
1024 | hlist_nulls_for_each_entry(h, n, &net->ct.unconfirmed, hnnode) { | |
1025 | ct = nf_ct_tuplehash_to_ctrack(h); | |
1026 | if (iter(ct, data)) | |
1027 | set_bit(IPS_DYING_BIT, &ct->status); | |
1028 | } | |
1029 | spin_unlock_bh(&nf_conntrack_lock); | |
1030 | return NULL; | |
1031 | found: | |
1032 | atomic_inc(&ct->ct_general.use); | |
1033 | spin_unlock_bh(&nf_conntrack_lock); | |
1034 | return ct; | |
1035 | } | |
1036 | ||
1037 | void nf_ct_iterate_cleanup(struct net *net, | |
1038 | int (*iter)(struct nf_conn *i, void *data), | |
1039 | void *data) | |
1040 | { | |
1041 | struct nf_conn *ct; | |
1042 | unsigned int bucket = 0; | |
1043 | ||
1044 | while ((ct = get_next_corpse(net, iter, data, &bucket)) != NULL) { | |
1045 | /* Time to push up daises... */ | |
1046 | if (del_timer(&ct->timeout)) | |
1047 | death_by_timeout((unsigned long)ct); | |
1048 | /* ... else the timer will get him soon. */ | |
1049 | ||
1050 | nf_ct_put(ct); | |
1051 | } | |
1052 | } | |
1053 | EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup); | |
1054 | ||
1055 | struct __nf_ct_flush_report { | |
1056 | u32 pid; | |
1057 | int report; | |
1058 | }; | |
1059 | ||
1060 | static int kill_report(struct nf_conn *i, void *data) | |
1061 | { | |
1062 | struct __nf_ct_flush_report *fr = (struct __nf_ct_flush_report *)data; | |
1063 | ||
1064 | /* If we fail to deliver the event, death_by_timeout() will retry */ | |
1065 | if (nf_conntrack_event_report(IPCT_DESTROY, i, | |
1066 | fr->pid, fr->report) < 0) | |
1067 | return 1; | |
1068 | ||
1069 | /* Avoid the delivery of the destroy event in death_by_timeout(). */ | |
1070 | set_bit(IPS_DYING_BIT, &i->status); | |
1071 | return 1; | |
1072 | } | |
1073 | ||
1074 | static int kill_all(struct nf_conn *i, void *data) | |
1075 | { | |
1076 | return 1; | |
1077 | } | |
1078 | ||
1079 | void nf_ct_free_hashtable(void *hash, int vmalloced, unsigned int size) | |
1080 | { | |
1081 | if (vmalloced) | |
1082 | vfree(hash); | |
1083 | else | |
1084 | free_pages((unsigned long)hash, | |
1085 | get_order(sizeof(struct hlist_head) * size)); | |
1086 | } | |
1087 | EXPORT_SYMBOL_GPL(nf_ct_free_hashtable); | |
1088 | ||
1089 | void nf_conntrack_flush_report(struct net *net, u32 pid, int report) | |
1090 | { | |
1091 | struct __nf_ct_flush_report fr = { | |
1092 | .pid = pid, | |
1093 | .report = report, | |
1094 | }; | |
1095 | nf_ct_iterate_cleanup(net, kill_report, &fr); | |
1096 | } | |
1097 | EXPORT_SYMBOL_GPL(nf_conntrack_flush_report); | |
1098 | ||
1099 | static void nf_ct_release_dying_list(struct net *net) | |
1100 | { | |
1101 | struct nf_conntrack_tuple_hash *h; | |
1102 | struct nf_conn *ct; | |
1103 | struct hlist_nulls_node *n; | |
1104 | ||
1105 | spin_lock_bh(&nf_conntrack_lock); | |
1106 | hlist_nulls_for_each_entry(h, n, &net->ct.dying, hnnode) { | |
1107 | ct = nf_ct_tuplehash_to_ctrack(h); | |
1108 | /* never fails to remove them, no listeners at this point */ | |
1109 | nf_ct_kill(ct); | |
1110 | } | |
1111 | spin_unlock_bh(&nf_conntrack_lock); | |
1112 | } | |
1113 | ||
1114 | static void nf_conntrack_cleanup_init_net(void) | |
1115 | { | |
1116 | nf_conntrack_helper_fini(); | |
1117 | nf_conntrack_proto_fini(); | |
1118 | kmem_cache_destroy(nf_conntrack_cachep); | |
1119 | } | |
1120 | ||
1121 | static void nf_conntrack_cleanup_net(struct net *net) | |
1122 | { | |
1123 | i_see_dead_people: | |
1124 | nf_ct_iterate_cleanup(net, kill_all, NULL); | |
1125 | nf_ct_release_dying_list(net); | |
1126 | if (atomic_read(&net->ct.count) != 0) { | |
1127 | schedule(); | |
1128 | goto i_see_dead_people; | |
1129 | } | |
1130 | /* wait until all references to nf_conntrack_untracked are dropped */ | |
1131 | while (atomic_read(&nf_conntrack_untracked.ct_general.use) > 1) | |
1132 | schedule(); | |
1133 | ||
1134 | nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc, | |
1135 | nf_conntrack_htable_size); | |
1136 | nf_conntrack_ecache_fini(net); | |
1137 | nf_conntrack_acct_fini(net); | |
1138 | nf_conntrack_expect_fini(net); | |
1139 | free_percpu(net->ct.stat); | |
1140 | } | |
1141 | ||
1142 | /* Mishearing the voices in his head, our hero wonders how he's | |
1143 | supposed to kill the mall. */ | |
1144 | void nf_conntrack_cleanup(struct net *net) | |
1145 | { | |
1146 | if (net_eq(net, &init_net)) | |
1147 | rcu_assign_pointer(ip_ct_attach, NULL); | |
1148 | ||
1149 | /* This makes sure all current packets have passed through | |
1150 | netfilter framework. Roll on, two-stage module | |
1151 | delete... */ | |
1152 | synchronize_net(); | |
1153 | ||
1154 | nf_conntrack_cleanup_net(net); | |
1155 | ||
1156 | if (net_eq(net, &init_net)) { | |
1157 | rcu_assign_pointer(nf_ct_destroy, NULL); | |
1158 | nf_conntrack_cleanup_init_net(); | |
1159 | } | |
1160 | } | |
1161 | ||
1162 | void *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced, int nulls) | |
1163 | { | |
1164 | struct hlist_nulls_head *hash; | |
1165 | unsigned int nr_slots, i; | |
1166 | size_t sz; | |
1167 | ||
1168 | *vmalloced = 0; | |
1169 | ||
1170 | BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head)); | |
1171 | nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head)); | |
1172 | sz = nr_slots * sizeof(struct hlist_nulls_head); | |
1173 | hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, | |
1174 | get_order(sz)); | |
1175 | if (!hash) { | |
1176 | *vmalloced = 1; | |
1177 | printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n"); | |
1178 | hash = __vmalloc(sz, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL); | |
1179 | } | |
1180 | ||
1181 | if (hash && nulls) | |
1182 | for (i = 0; i < nr_slots; i++) | |
1183 | INIT_HLIST_NULLS_HEAD(&hash[i], i); | |
1184 | ||
1185 | return hash; | |
1186 | } | |
1187 | EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable); | |
1188 | ||
1189 | int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp) | |
1190 | { | |
1191 | int i, bucket, vmalloced, old_vmalloced; | |
1192 | unsigned int hashsize, old_size; | |
1193 | int rnd; | |
1194 | struct hlist_nulls_head *hash, *old_hash; | |
1195 | struct nf_conntrack_tuple_hash *h; | |
1196 | ||
1197 | /* On boot, we can set this without any fancy locking. */ | |
1198 | if (!nf_conntrack_htable_size) | |
1199 | return param_set_uint(val, kp); | |
1200 | ||
1201 | hashsize = simple_strtoul(val, NULL, 0); | |
1202 | if (!hashsize) | |
1203 | return -EINVAL; | |
1204 | ||
1205 | hash = nf_ct_alloc_hashtable(&hashsize, &vmalloced, 1); | |
1206 | if (!hash) | |
1207 | return -ENOMEM; | |
1208 | ||
1209 | /* We have to rehahs for the new table anyway, so we also can | |
1210 | * use a newrandom seed */ | |
1211 | get_random_bytes(&rnd, sizeof(rnd)); | |
1212 | ||
1213 | /* Lookups in the old hash might happen in parallel, which means we | |
1214 | * might get false negatives during connection lookup. New connections | |
1215 | * created because of a false negative won't make it into the hash | |
1216 | * though since that required taking the lock. | |
1217 | */ | |
1218 | spin_lock_bh(&nf_conntrack_lock); | |
1219 | for (i = 0; i < nf_conntrack_htable_size; i++) { | |
1220 | while (!hlist_nulls_empty(&init_net.ct.hash[i])) { | |
1221 | h = hlist_nulls_entry(init_net.ct.hash[i].first, | |
1222 | struct nf_conntrack_tuple_hash, hnnode); | |
1223 | hlist_nulls_del_rcu(&h->hnnode); | |
1224 | bucket = __hash_conntrack(&h->tuple, hashsize, rnd); | |
1225 | hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]); | |
1226 | } | |
1227 | } | |
1228 | old_size = nf_conntrack_htable_size; | |
1229 | old_vmalloced = init_net.ct.hash_vmalloc; | |
1230 | old_hash = init_net.ct.hash; | |
1231 | ||
1232 | nf_conntrack_htable_size = hashsize; | |
1233 | init_net.ct.hash_vmalloc = vmalloced; | |
1234 | init_net.ct.hash = hash; | |
1235 | nf_conntrack_hash_rnd = rnd; | |
1236 | spin_unlock_bh(&nf_conntrack_lock); | |
1237 | ||
1238 | nf_ct_free_hashtable(old_hash, old_vmalloced, old_size); | |
1239 | return 0; | |
1240 | } | |
1241 | EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize); | |
1242 | ||
1243 | module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint, | |
1244 | &nf_conntrack_htable_size, 0600); | |
1245 | ||
1246 | static int nf_conntrack_init_init_net(void) | |
1247 | { | |
1248 | int max_factor = 8; | |
1249 | int ret; | |
1250 | ||
1251 | /* Idea from tcp.c: use 1/16384 of memory. On i386: 32MB | |
1252 | * machine has 512 buckets. >= 1GB machines have 16384 buckets. */ | |
1253 | if (!nf_conntrack_htable_size) { | |
1254 | nf_conntrack_htable_size | |
1255 | = (((totalram_pages << PAGE_SHIFT) / 16384) | |
1256 | / sizeof(struct hlist_head)); | |
1257 | if (totalram_pages > (1024 * 1024 * 1024 / PAGE_SIZE)) | |
1258 | nf_conntrack_htable_size = 16384; | |
1259 | if (nf_conntrack_htable_size < 32) | |
1260 | nf_conntrack_htable_size = 32; | |
1261 | ||
1262 | /* Use a max. factor of four by default to get the same max as | |
1263 | * with the old struct list_heads. When a table size is given | |
1264 | * we use the old value of 8 to avoid reducing the max. | |
1265 | * entries. */ | |
1266 | max_factor = 4; | |
1267 | } | |
1268 | nf_conntrack_max = max_factor * nf_conntrack_htable_size; | |
1269 | ||
1270 | printk("nf_conntrack version %s (%u buckets, %d max)\n", | |
1271 | NF_CONNTRACK_VERSION, nf_conntrack_htable_size, | |
1272 | nf_conntrack_max); | |
1273 | ||
1274 | nf_conntrack_cachep = kmem_cache_create("nf_conntrack", | |
1275 | sizeof(struct nf_conn), | |
1276 | 0, SLAB_DESTROY_BY_RCU, NULL); | |
1277 | if (!nf_conntrack_cachep) { | |
1278 | printk(KERN_ERR "Unable to create nf_conn slab cache\n"); | |
1279 | ret = -ENOMEM; | |
1280 | goto err_cache; | |
1281 | } | |
1282 | ||
1283 | ret = nf_conntrack_proto_init(); | |
1284 | if (ret < 0) | |
1285 | goto err_proto; | |
1286 | ||
1287 | ret = nf_conntrack_helper_init(); | |
1288 | if (ret < 0) | |
1289 | goto err_helper; | |
1290 | ||
1291 | return 0; | |
1292 | ||
1293 | err_helper: | |
1294 | nf_conntrack_proto_fini(); | |
1295 | err_proto: | |
1296 | kmem_cache_destroy(nf_conntrack_cachep); | |
1297 | err_cache: | |
1298 | return ret; | |
1299 | } | |
1300 | ||
1301 | /* | |
1302 | * We need to use special "null" values, not used in hash table | |
1303 | */ | |
1304 | #define UNCONFIRMED_NULLS_VAL ((1<<30)+0) | |
1305 | #define DYING_NULLS_VAL ((1<<30)+1) | |
1306 | ||
1307 | static int nf_conntrack_init_net(struct net *net) | |
1308 | { | |
1309 | int ret; | |
1310 | ||
1311 | atomic_set(&net->ct.count, 0); | |
1312 | INIT_HLIST_NULLS_HEAD(&net->ct.unconfirmed, UNCONFIRMED_NULLS_VAL); | |
1313 | INIT_HLIST_NULLS_HEAD(&net->ct.dying, DYING_NULLS_VAL); | |
1314 | net->ct.stat = alloc_percpu(struct ip_conntrack_stat); | |
1315 | if (!net->ct.stat) { | |
1316 | ret = -ENOMEM; | |
1317 | goto err_stat; | |
1318 | } | |
1319 | net->ct.hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size, | |
1320 | &net->ct.hash_vmalloc, 1); | |
1321 | if (!net->ct.hash) { | |
1322 | ret = -ENOMEM; | |
1323 | printk(KERN_ERR "Unable to create nf_conntrack_hash\n"); | |
1324 | goto err_hash; | |
1325 | } | |
1326 | ret = nf_conntrack_expect_init(net); | |
1327 | if (ret < 0) | |
1328 | goto err_expect; | |
1329 | ret = nf_conntrack_acct_init(net); | |
1330 | if (ret < 0) | |
1331 | goto err_acct; | |
1332 | ret = nf_conntrack_ecache_init(net); | |
1333 | if (ret < 0) | |
1334 | goto err_ecache; | |
1335 | ||
1336 | /* Set up fake conntrack: | |
1337 | - to never be deleted, not in any hashes */ | |
1338 | #ifdef CONFIG_NET_NS | |
1339 | nf_conntrack_untracked.ct_net = &init_net; | |
1340 | #endif | |
1341 | atomic_set(&nf_conntrack_untracked.ct_general.use, 1); | |
1342 | /* - and look it like as a confirmed connection */ | |
1343 | set_bit(IPS_CONFIRMED_BIT, &nf_conntrack_untracked.status); | |
1344 | ||
1345 | return 0; | |
1346 | ||
1347 | err_ecache: | |
1348 | nf_conntrack_acct_fini(net); | |
1349 | err_acct: | |
1350 | nf_conntrack_expect_fini(net); | |
1351 | err_expect: | |
1352 | nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc, | |
1353 | nf_conntrack_htable_size); | |
1354 | err_hash: | |
1355 | free_percpu(net->ct.stat); | |
1356 | err_stat: | |
1357 | return ret; | |
1358 | } | |
1359 | ||
1360 | s16 (*nf_ct_nat_offset)(const struct nf_conn *ct, | |
1361 | enum ip_conntrack_dir dir, | |
1362 | u32 seq); | |
1363 | EXPORT_SYMBOL_GPL(nf_ct_nat_offset); | |
1364 | ||
1365 | int nf_conntrack_init(struct net *net) | |
1366 | { | |
1367 | int ret; | |
1368 | ||
1369 | if (net_eq(net, &init_net)) { | |
1370 | ret = nf_conntrack_init_init_net(); | |
1371 | if (ret < 0) | |
1372 | goto out_init_net; | |
1373 | } | |
1374 | ret = nf_conntrack_init_net(net); | |
1375 | if (ret < 0) | |
1376 | goto out_net; | |
1377 | ||
1378 | if (net_eq(net, &init_net)) { | |
1379 | /* For use by REJECT target */ | |
1380 | rcu_assign_pointer(ip_ct_attach, nf_conntrack_attach); | |
1381 | rcu_assign_pointer(nf_ct_destroy, destroy_conntrack); | |
1382 | ||
1383 | /* Howto get NAT offsets */ | |
1384 | rcu_assign_pointer(nf_ct_nat_offset, NULL); | |
1385 | } | |
1386 | return 0; | |
1387 | ||
1388 | out_net: | |
1389 | if (net_eq(net, &init_net)) | |
1390 | nf_conntrack_cleanup_init_net(); | |
1391 | out_init_net: | |
1392 | return ret; | |
1393 | } |