]> bbs.cooldavid.org Git - net-next-2.6.git/blame - net/netfilter/nf_conntrack_expect.c
scm: lower SCM_MAX_FD
[net-next-2.6.git] / net / netfilter / nf_conntrack_expect.c
CommitLineData
77ab9cff
MJ
1/* Expectation handling for nf_conntrack. */
2
3/* (C) 1999-2001 Paul `Rusty' Russell
4 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
5 * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/types.h>
13#include <linux/netfilter.h>
14#include <linux/skbuff.h>
15#include <linux/proc_fs.h>
16#include <linux/seq_file.h>
17#include <linux/stddef.h>
18#include <linux/slab.h>
19#include <linux/err.h>
20#include <linux/percpu.h>
21#include <linux/kernel.h>
a71c0855 22#include <linux/jhash.h>
457c4cbc 23#include <net/net_namespace.h>
77ab9cff
MJ
24
25#include <net/netfilter/nf_conntrack.h>
26#include <net/netfilter/nf_conntrack_core.h>
27#include <net/netfilter/nf_conntrack_expect.h>
28#include <net/netfilter/nf_conntrack_helper.h>
29#include <net/netfilter/nf_conntrack_tuple.h>
5d0aa2cc 30#include <net/netfilter/nf_conntrack_zones.h>
77ab9cff 31
a71c0855
PM
32unsigned int nf_ct_expect_hsize __read_mostly;
33EXPORT_SYMBOL_GPL(nf_ct_expect_hsize);
34
35static unsigned int nf_ct_expect_hash_rnd __read_mostly;
f264a7df 36unsigned int nf_ct_expect_max __read_mostly;
a71c0855 37static int nf_ct_expect_hash_rnd_initted __read_mostly;
a71c0855 38
e9c1b084 39static struct kmem_cache *nf_ct_expect_cachep __read_mostly;
77ab9cff 40
bc01befd
PNA
41static HLIST_HEAD(nf_ct_userspace_expect_list);
42
77ab9cff 43/* nf_conntrack_expect helper functions */
ebbf41df
PNA
44void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp,
45 u32 pid, int report)
77ab9cff
MJ
46{
47 struct nf_conn_help *master_help = nfct_help(exp->master);
9b03f38d 48 struct net *net = nf_ct_exp_net(exp);
77ab9cff 49
77ab9cff
MJ
50 NF_CT_ASSERT(!timer_pending(&exp->timeout));
51
7d0742da 52 hlist_del_rcu(&exp->hnode);
9b03f38d 53 net->ct.expect_count--;
a71c0855 54
b560580a 55 hlist_del(&exp->lnode);
bc01befd
PNA
56 if (!(exp->flags & NF_CT_EXPECT_USERSPACE))
57 master_help->expecting[exp->class]--;
58
ebbf41df 59 nf_ct_expect_event_report(IPEXP_DESTROY, exp, pid, report);
6823645d 60 nf_ct_expect_put(exp);
b560580a 61
0d55af87 62 NF_CT_STAT_INC(net, expect_delete);
77ab9cff 63}
ebbf41df 64EXPORT_SYMBOL_GPL(nf_ct_unlink_expect_report);
77ab9cff 65
6823645d 66static void nf_ct_expectation_timed_out(unsigned long ul_expect)
77ab9cff
MJ
67{
68 struct nf_conntrack_expect *exp = (void *)ul_expect;
69
f8ba1aff 70 spin_lock_bh(&nf_conntrack_lock);
77ab9cff 71 nf_ct_unlink_expect(exp);
f8ba1aff 72 spin_unlock_bh(&nf_conntrack_lock);
6823645d 73 nf_ct_expect_put(exp);
77ab9cff
MJ
74}
75
a71c0855
PM
76static unsigned int nf_ct_expect_dst_hash(const struct nf_conntrack_tuple *tuple)
77{
34498825
PM
78 unsigned int hash;
79
a71c0855 80 if (unlikely(!nf_ct_expect_hash_rnd_initted)) {
af07d241
HPP
81 get_random_bytes(&nf_ct_expect_hash_rnd,
82 sizeof(nf_ct_expect_hash_rnd));
a71c0855
PM
83 nf_ct_expect_hash_rnd_initted = 1;
84 }
85
34498825 86 hash = jhash2(tuple->dst.u3.all, ARRAY_SIZE(tuple->dst.u3.all),
a71c0855 87 (((tuple->dst.protonum ^ tuple->src.l3num) << 16) |
34498825
PM
88 (__force __u16)tuple->dst.u.all) ^ nf_ct_expect_hash_rnd);
89 return ((u64)hash * nf_ct_expect_hsize) >> 32;
a71c0855
PM
90}
91
77ab9cff 92struct nf_conntrack_expect *
5d0aa2cc
PM
93__nf_ct_expect_find(struct net *net, u16 zone,
94 const struct nf_conntrack_tuple *tuple)
77ab9cff
MJ
95{
96 struct nf_conntrack_expect *i;
a71c0855
PM
97 struct hlist_node *n;
98 unsigned int h;
99
9b03f38d 100 if (!net->ct.expect_count)
a71c0855 101 return NULL;
77ab9cff 102
a71c0855 103 h = nf_ct_expect_dst_hash(tuple);
9b03f38d 104 hlist_for_each_entry_rcu(i, n, &net->ct.expect_hash[h], hnode) {
5d0aa2cc
PM
105 if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
106 nf_ct_zone(i->master) == zone)
77ab9cff
MJ
107 return i;
108 }
109 return NULL;
110}
6823645d 111EXPORT_SYMBOL_GPL(__nf_ct_expect_find);
77ab9cff
MJ
112
113/* Just find a expectation corresponding to a tuple. */
114struct nf_conntrack_expect *
5d0aa2cc
PM
115nf_ct_expect_find_get(struct net *net, u16 zone,
116 const struct nf_conntrack_tuple *tuple)
77ab9cff
MJ
117{
118 struct nf_conntrack_expect *i;
119
7d0742da 120 rcu_read_lock();
5d0aa2cc 121 i = __nf_ct_expect_find(net, zone, tuple);
7d0742da
PM
122 if (i && !atomic_inc_not_zero(&i->use))
123 i = NULL;
124 rcu_read_unlock();
77ab9cff
MJ
125
126 return i;
127}
6823645d 128EXPORT_SYMBOL_GPL(nf_ct_expect_find_get);
77ab9cff
MJ
129
130/* If an expectation for this connection is found, it gets delete from
131 * global list then returned. */
132struct nf_conntrack_expect *
5d0aa2cc
PM
133nf_ct_find_expectation(struct net *net, u16 zone,
134 const struct nf_conntrack_tuple *tuple)
77ab9cff 135{
359b9ab6
PM
136 struct nf_conntrack_expect *i, *exp = NULL;
137 struct hlist_node *n;
138 unsigned int h;
139
9b03f38d 140 if (!net->ct.expect_count)
359b9ab6 141 return NULL;
ece00641 142
359b9ab6 143 h = nf_ct_expect_dst_hash(tuple);
9b03f38d 144 hlist_for_each_entry(i, n, &net->ct.expect_hash[h], hnode) {
359b9ab6 145 if (!(i->flags & NF_CT_EXPECT_INACTIVE) &&
5d0aa2cc
PM
146 nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
147 nf_ct_zone(i->master) == zone) {
359b9ab6
PM
148 exp = i;
149 break;
150 }
151 }
ece00641
YK
152 if (!exp)
153 return NULL;
77ab9cff 154
77ab9cff
MJ
155 /* If master is not in hash table yet (ie. packet hasn't left
156 this machine yet), how can other end know about expected?
157 Hence these are not the droids you are looking for (if
158 master ct never got confirmed, we'd hold a reference to it
159 and weird things would happen to future packets). */
ece00641
YK
160 if (!nf_ct_is_confirmed(exp->master))
161 return NULL;
162
163 if (exp->flags & NF_CT_EXPECT_PERMANENT) {
164 atomic_inc(&exp->use);
165 return exp;
166 } else if (del_timer(&exp->timeout)) {
167 nf_ct_unlink_expect(exp);
168 return exp;
77ab9cff 169 }
ece00641 170
77ab9cff
MJ
171 return NULL;
172}
173
174/* delete all expectations for this conntrack */
175void nf_ct_remove_expectations(struct nf_conn *ct)
176{
77ab9cff 177 struct nf_conn_help *help = nfct_help(ct);
b560580a
PM
178 struct nf_conntrack_expect *exp;
179 struct hlist_node *n, *next;
77ab9cff
MJ
180
181 /* Optimization: most connection never expect any others. */
6002f266 182 if (!help)
77ab9cff
MJ
183 return;
184
b560580a
PM
185 hlist_for_each_entry_safe(exp, n, next, &help->expectations, lnode) {
186 if (del_timer(&exp->timeout)) {
187 nf_ct_unlink_expect(exp);
188 nf_ct_expect_put(exp);
601e68e1 189 }
77ab9cff
MJ
190 }
191}
13b18339 192EXPORT_SYMBOL_GPL(nf_ct_remove_expectations);
77ab9cff
MJ
193
194/* Would two expected things clash? */
195static inline int expect_clash(const struct nf_conntrack_expect *a,
196 const struct nf_conntrack_expect *b)
197{
198 /* Part covered by intersection of masks must be unequal,
199 otherwise they clash */
d4156e8c 200 struct nf_conntrack_tuple_mask intersect_mask;
77ab9cff
MJ
201 int count;
202
77ab9cff 203 intersect_mask.src.u.all = a->mask.src.u.all & b->mask.src.u.all;
77ab9cff
MJ
204
205 for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){
206 intersect_mask.src.u3.all[count] =
207 a->mask.src.u3.all[count] & b->mask.src.u3.all[count];
208 }
209
77ab9cff
MJ
210 return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask);
211}
212
213static inline int expect_matches(const struct nf_conntrack_expect *a,
214 const struct nf_conntrack_expect *b)
215{
f64f9e71
JP
216 return a->master == b->master && a->class == b->class &&
217 nf_ct_tuple_equal(&a->tuple, &b->tuple) &&
5d0aa2cc
PM
218 nf_ct_tuple_mask_equal(&a->mask, &b->mask) &&
219 nf_ct_zone(a->master) == nf_ct_zone(b->master);
77ab9cff
MJ
220}
221
222/* Generally a bad idea to call this: could have matched already. */
6823645d 223void nf_ct_unexpect_related(struct nf_conntrack_expect *exp)
77ab9cff 224{
f8ba1aff 225 spin_lock_bh(&nf_conntrack_lock);
4e1d4e6c
PM
226 if (del_timer(&exp->timeout)) {
227 nf_ct_unlink_expect(exp);
228 nf_ct_expect_put(exp);
77ab9cff 229 }
f8ba1aff 230 spin_unlock_bh(&nf_conntrack_lock);
77ab9cff 231}
6823645d 232EXPORT_SYMBOL_GPL(nf_ct_unexpect_related);
77ab9cff
MJ
233
234/* We don't increase the master conntrack refcount for non-fulfilled
235 * conntracks. During the conntrack destruction, the expectations are
236 * always killed before the conntrack itself */
6823645d 237struct nf_conntrack_expect *nf_ct_expect_alloc(struct nf_conn *me)
77ab9cff
MJ
238{
239 struct nf_conntrack_expect *new;
240
6823645d 241 new = kmem_cache_alloc(nf_ct_expect_cachep, GFP_ATOMIC);
77ab9cff
MJ
242 if (!new)
243 return NULL;
244
245 new->master = me;
246 atomic_set(&new->use, 1);
247 return new;
248}
6823645d 249EXPORT_SYMBOL_GPL(nf_ct_expect_alloc);
77ab9cff 250
6002f266 251void nf_ct_expect_init(struct nf_conntrack_expect *exp, unsigned int class,
76108cea 252 u_int8_t family,
1d9d7522
PM
253 const union nf_inet_addr *saddr,
254 const union nf_inet_addr *daddr,
255 u_int8_t proto, const __be16 *src, const __be16 *dst)
d6a9b650
PM
256{
257 int len;
258
259 if (family == AF_INET)
260 len = 4;
261 else
262 len = 16;
263
264 exp->flags = 0;
6002f266 265 exp->class = class;
d6a9b650
PM
266 exp->expectfn = NULL;
267 exp->helper = NULL;
268 exp->tuple.src.l3num = family;
269 exp->tuple.dst.protonum = proto;
d6a9b650
PM
270
271 if (saddr) {
272 memcpy(&exp->tuple.src.u3, saddr, len);
273 if (sizeof(exp->tuple.src.u3) > len)
274 /* address needs to be cleared for nf_ct_tuple_equal */
275 memset((void *)&exp->tuple.src.u3 + len, 0x00,
276 sizeof(exp->tuple.src.u3) - len);
277 memset(&exp->mask.src.u3, 0xFF, len);
278 if (sizeof(exp->mask.src.u3) > len)
279 memset((void *)&exp->mask.src.u3 + len, 0x00,
280 sizeof(exp->mask.src.u3) - len);
281 } else {
282 memset(&exp->tuple.src.u3, 0x00, sizeof(exp->tuple.src.u3));
283 memset(&exp->mask.src.u3, 0x00, sizeof(exp->mask.src.u3));
284 }
285
d6a9b650 286 if (src) {
a34c4589
AV
287 exp->tuple.src.u.all = *src;
288 exp->mask.src.u.all = htons(0xFFFF);
d6a9b650
PM
289 } else {
290 exp->tuple.src.u.all = 0;
291 exp->mask.src.u.all = 0;
292 }
293
d4156e8c
PM
294 memcpy(&exp->tuple.dst.u3, daddr, len);
295 if (sizeof(exp->tuple.dst.u3) > len)
296 /* address needs to be cleared for nf_ct_tuple_equal */
297 memset((void *)&exp->tuple.dst.u3 + len, 0x00,
298 sizeof(exp->tuple.dst.u3) - len);
299
a34c4589 300 exp->tuple.dst.u.all = *dst;
d6a9b650 301}
6823645d 302EXPORT_SYMBOL_GPL(nf_ct_expect_init);
d6a9b650 303
7d0742da
PM
304static void nf_ct_expect_free_rcu(struct rcu_head *head)
305{
306 struct nf_conntrack_expect *exp;
307
308 exp = container_of(head, struct nf_conntrack_expect, rcu);
309 kmem_cache_free(nf_ct_expect_cachep, exp);
310}
311
6823645d 312void nf_ct_expect_put(struct nf_conntrack_expect *exp)
77ab9cff
MJ
313{
314 if (atomic_dec_and_test(&exp->use))
7d0742da 315 call_rcu(&exp->rcu, nf_ct_expect_free_rcu);
77ab9cff 316}
6823645d 317EXPORT_SYMBOL_GPL(nf_ct_expect_put);
77ab9cff 318
6823645d 319static void nf_ct_expect_insert(struct nf_conntrack_expect *exp)
77ab9cff
MJ
320{
321 struct nf_conn_help *master_help = nfct_help(exp->master);
9b03f38d 322 struct net *net = nf_ct_exp_net(exp);
6002f266 323 const struct nf_conntrack_expect_policy *p;
a71c0855 324 unsigned int h = nf_ct_expect_dst_hash(&exp->tuple);
77ab9cff
MJ
325
326 atomic_inc(&exp->use);
b560580a 327
bc01befd
PNA
328 if (master_help) {
329 hlist_add_head(&exp->lnode, &master_help->expectations);
330 master_help->expecting[exp->class]++;
331 } else if (exp->flags & NF_CT_EXPECT_USERSPACE)
332 hlist_add_head(&exp->lnode, &nf_ct_userspace_expect_list);
a71c0855 333
9b03f38d
AD
334 hlist_add_head_rcu(&exp->hnode, &net->ct.expect_hash[h]);
335 net->ct.expect_count++;
77ab9cff 336
6823645d
PM
337 setup_timer(&exp->timeout, nf_ct_expectation_timed_out,
338 (unsigned long)exp);
bc01befd
PNA
339 if (master_help) {
340 p = &master_help->helper->expect_policy[exp->class];
341 exp->timeout.expires = jiffies + p->timeout * HZ;
342 }
77ab9cff
MJ
343 add_timer(&exp->timeout);
344
77ab9cff 345 atomic_inc(&exp->use);
0d55af87 346 NF_CT_STAT_INC(net, expect_create);
77ab9cff
MJ
347}
348
349/* Race with expectations being used means we could have none to find; OK. */
6002f266
PM
350static void evict_oldest_expect(struct nf_conn *master,
351 struct nf_conntrack_expect *new)
77ab9cff 352{
b560580a 353 struct nf_conn_help *master_help = nfct_help(master);
6002f266 354 struct nf_conntrack_expect *exp, *last = NULL;
b560580a 355 struct hlist_node *n;
77ab9cff 356
6002f266
PM
357 hlist_for_each_entry(exp, n, &master_help->expectations, lnode) {
358 if (exp->class == new->class)
359 last = exp;
360 }
b560580a 361
6002f266
PM
362 if (last && del_timer(&last->timeout)) {
363 nf_ct_unlink_expect(last);
364 nf_ct_expect_put(last);
77ab9cff
MJ
365 }
366}
367
368static inline int refresh_timer(struct nf_conntrack_expect *i)
369{
370 struct nf_conn_help *master_help = nfct_help(i->master);
6002f266 371 const struct nf_conntrack_expect_policy *p;
77ab9cff
MJ
372
373 if (!del_timer(&i->timeout))
374 return 0;
375
6002f266
PM
376 p = &master_help->helper->expect_policy[i->class];
377 i->timeout.expires = jiffies + p->timeout * HZ;
77ab9cff
MJ
378 add_timer(&i->timeout);
379 return 1;
380}
381
19abb7b0 382static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
77ab9cff 383{
6002f266 384 const struct nf_conntrack_expect_policy *p;
77ab9cff
MJ
385 struct nf_conntrack_expect *i;
386 struct nf_conn *master = expect->master;
387 struct nf_conn_help *master_help = nfct_help(master);
9b03f38d 388 struct net *net = nf_ct_exp_net(expect);
a71c0855
PM
389 struct hlist_node *n;
390 unsigned int h;
83731671 391 int ret = 1;
77ab9cff 392
bc01befd
PNA
393 /* Don't allow expectations created from kernel-space with no helper */
394 if (!(expect->flags & NF_CT_EXPECT_USERSPACE) &&
395 (!master_help || (master_help && !master_help->helper))) {
3c158f7f
PM
396 ret = -ESHUTDOWN;
397 goto out;
398 }
a71c0855 399 h = nf_ct_expect_dst_hash(&expect->tuple);
9b03f38d 400 hlist_for_each_entry(i, n, &net->ct.expect_hash[h], hnode) {
77ab9cff
MJ
401 if (expect_matches(i, expect)) {
402 /* Refresh timer: if it's dying, ignore.. */
403 if (refresh_timer(i)) {
404 ret = 0;
405 goto out;
406 }
407 } else if (expect_clash(i, expect)) {
408 ret = -EBUSY;
409 goto out;
410 }
411 }
412 /* Will be over limit? */
bc01befd
PNA
413 if (master_help) {
414 p = &master_help->helper->expect_policy[expect->class];
415 if (p->max_expected &&
416 master_help->expecting[expect->class] >= p->max_expected) {
417 evict_oldest_expect(master, expect);
418 if (master_help->expecting[expect->class]
419 >= p->max_expected) {
420 ret = -EMFILE;
421 goto out;
422 }
6002f266
PM
423 }
424 }
77ab9cff 425
9b03f38d 426 if (net->ct.expect_count >= nf_ct_expect_max) {
f264a7df
PM
427 if (net_ratelimit())
428 printk(KERN_WARNING
3d89e9cf 429 "nf_conntrack: expectation table full\n");
f264a7df 430 ret = -EMFILE;
f264a7df 431 }
19abb7b0
PNA
432out:
433 return ret;
434}
435
83731671
PNA
436int nf_ct_expect_related_report(struct nf_conntrack_expect *expect,
437 u32 pid, int report)
19abb7b0
PNA
438{
439 int ret;
440
441 spin_lock_bh(&nf_conntrack_lock);
442 ret = __nf_ct_expect_check(expect);
83731671 443 if (ret <= 0)
19abb7b0 444 goto out;
f264a7df 445
83731671 446 ret = 0;
6823645d 447 nf_ct_expect_insert(expect);
f8ba1aff 448 spin_unlock_bh(&nf_conntrack_lock);
83731671 449 nf_ct_expect_event_report(IPEXP_NEW, expect, pid, report);
77ab9cff 450 return ret;
19abb7b0
PNA
451out:
452 spin_unlock_bh(&nf_conntrack_lock);
19abb7b0
PNA
453 return ret;
454}
455EXPORT_SYMBOL_GPL(nf_ct_expect_related_report);
456
bc01befd
PNA
457void nf_ct_remove_userspace_expectations(void)
458{
459 struct nf_conntrack_expect *exp;
460 struct hlist_node *n, *next;
461
462 hlist_for_each_entry_safe(exp, n, next,
463 &nf_ct_userspace_expect_list, lnode) {
464 if (del_timer(&exp->timeout)) {
465 nf_ct_unlink_expect(exp);
466 nf_ct_expect_put(exp);
467 }
468 }
469}
470EXPORT_SYMBOL_GPL(nf_ct_remove_userspace_expectations);
471
77ab9cff 472#ifdef CONFIG_PROC_FS
5d08ad44 473struct ct_expect_iter_state {
dc5129f8 474 struct seq_net_private p;
5d08ad44
PM
475 unsigned int bucket;
476};
477
478static struct hlist_node *ct_expect_get_first(struct seq_file *seq)
77ab9cff 479{
dc5129f8 480 struct net *net = seq_file_net(seq);
5d08ad44 481 struct ct_expect_iter_state *st = seq->private;
7d0742da 482 struct hlist_node *n;
77ab9cff 483
5d08ad44 484 for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
9b03f38d 485 n = rcu_dereference(net->ct.expect_hash[st->bucket].first);
7d0742da
PM
486 if (n)
487 return n;
5d08ad44
PM
488 }
489 return NULL;
490}
77ab9cff 491
5d08ad44
PM
492static struct hlist_node *ct_expect_get_next(struct seq_file *seq,
493 struct hlist_node *head)
494{
dc5129f8 495 struct net *net = seq_file_net(seq);
5d08ad44 496 struct ct_expect_iter_state *st = seq->private;
77ab9cff 497
7d0742da 498 head = rcu_dereference(head->next);
5d08ad44
PM
499 while (head == NULL) {
500 if (++st->bucket >= nf_ct_expect_hsize)
77ab9cff 501 return NULL;
9b03f38d 502 head = rcu_dereference(net->ct.expect_hash[st->bucket].first);
77ab9cff 503 }
5d08ad44 504 return head;
77ab9cff
MJ
505}
506
5d08ad44 507static struct hlist_node *ct_expect_get_idx(struct seq_file *seq, loff_t pos)
77ab9cff 508{
5d08ad44 509 struct hlist_node *head = ct_expect_get_first(seq);
77ab9cff 510
5d08ad44
PM
511 if (head)
512 while (pos && (head = ct_expect_get_next(seq, head)))
513 pos--;
514 return pos ? NULL : head;
515}
77ab9cff 516
5d08ad44 517static void *exp_seq_start(struct seq_file *seq, loff_t *pos)
7d0742da 518 __acquires(RCU)
5d08ad44 519{
7d0742da 520 rcu_read_lock();
5d08ad44
PM
521 return ct_expect_get_idx(seq, *pos);
522}
77ab9cff 523
5d08ad44
PM
524static void *exp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
525{
526 (*pos)++;
527 return ct_expect_get_next(seq, v);
77ab9cff
MJ
528}
529
5d08ad44 530static void exp_seq_stop(struct seq_file *seq, void *v)
7d0742da 531 __releases(RCU)
77ab9cff 532{
7d0742da 533 rcu_read_unlock();
77ab9cff
MJ
534}
535
536static int exp_seq_show(struct seq_file *s, void *v)
537{
5d08ad44 538 struct nf_conntrack_expect *expect;
b87921bd 539 struct nf_conntrack_helper *helper;
5d08ad44 540 struct hlist_node *n = v;
359b9ab6 541 char *delim = "";
5d08ad44
PM
542
543 expect = hlist_entry(n, struct nf_conntrack_expect, hnode);
77ab9cff
MJ
544
545 if (expect->timeout.function)
546 seq_printf(s, "%ld ", timer_pending(&expect->timeout)
547 ? (long)(expect->timeout.expires - jiffies)/HZ : 0);
548 else
549 seq_printf(s, "- ");
550 seq_printf(s, "l3proto = %u proto=%u ",
551 expect->tuple.src.l3num,
552 expect->tuple.dst.protonum);
553 print_tuple(s, &expect->tuple,
554 __nf_ct_l3proto_find(expect->tuple.src.l3num),
605dcad6 555 __nf_ct_l4proto_find(expect->tuple.src.l3num,
77ab9cff 556 expect->tuple.dst.protonum));
4bb119ea 557
359b9ab6
PM
558 if (expect->flags & NF_CT_EXPECT_PERMANENT) {
559 seq_printf(s, "PERMANENT");
560 delim = ",";
561 }
bc01befd 562 if (expect->flags & NF_CT_EXPECT_INACTIVE) {
359b9ab6 563 seq_printf(s, "%sINACTIVE", delim);
bc01befd
PNA
564 delim = ",";
565 }
566 if (expect->flags & NF_CT_EXPECT_USERSPACE)
567 seq_printf(s, "%sUSERSPACE", delim);
4bb119ea 568
b87921bd
PM
569 helper = rcu_dereference(nfct_help(expect->master)->helper);
570 if (helper) {
571 seq_printf(s, "%s%s", expect->flags ? " " : "", helper->name);
572 if (helper->expect_policy[expect->class].name)
573 seq_printf(s, "/%s",
574 helper->expect_policy[expect->class].name);
575 }
576
77ab9cff
MJ
577 return seq_putc(s, '\n');
578}
579
56b3d975 580static const struct seq_operations exp_seq_ops = {
77ab9cff
MJ
581 .start = exp_seq_start,
582 .next = exp_seq_next,
583 .stop = exp_seq_stop,
584 .show = exp_seq_show
585};
586
587static int exp_open(struct inode *inode, struct file *file)
588{
dc5129f8 589 return seq_open_net(inode, file, &exp_seq_ops,
e2da5913 590 sizeof(struct ct_expect_iter_state));
77ab9cff
MJ
591}
592
5d08ad44 593static const struct file_operations exp_file_ops = {
77ab9cff
MJ
594 .owner = THIS_MODULE,
595 .open = exp_open,
596 .read = seq_read,
597 .llseek = seq_lseek,
dc5129f8 598 .release = seq_release_net,
77ab9cff
MJ
599};
600#endif /* CONFIG_PROC_FS */
e9c1b084 601
dc5129f8 602static int exp_proc_init(struct net *net)
e9c1b084
PM
603{
604#ifdef CONFIG_PROC_FS
605 struct proc_dir_entry *proc;
606
dc5129f8 607 proc = proc_net_fops_create(net, "nf_conntrack_expect", 0440, &exp_file_ops);
e9c1b084
PM
608 if (!proc)
609 return -ENOMEM;
610#endif /* CONFIG_PROC_FS */
611 return 0;
612}
613
dc5129f8 614static void exp_proc_remove(struct net *net)
e9c1b084
PM
615{
616#ifdef CONFIG_PROC_FS
dc5129f8 617 proc_net_remove(net, "nf_conntrack_expect");
e9c1b084
PM
618#endif /* CONFIG_PROC_FS */
619}
620
13ccdfc2 621module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0400);
a71c0855 622
9b03f38d 623int nf_conntrack_expect_init(struct net *net)
e9c1b084 624{
a71c0855
PM
625 int err = -ENOMEM;
626
08f6547d
AD
627 if (net_eq(net, &init_net)) {
628 if (!nf_ct_expect_hsize) {
d696c7bd 629 nf_ct_expect_hsize = net->ct.htable_size / 256;
08f6547d
AD
630 if (!nf_ct_expect_hsize)
631 nf_ct_expect_hsize = 1;
632 }
633 nf_ct_expect_max = nf_ct_expect_hsize * 4;
a71c0855
PM
634 }
635
9b03f38d
AD
636 net->ct.expect_count = 0;
637 net->ct.expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize,
ea781f19 638 &net->ct.expect_vmalloc, 0);
9b03f38d 639 if (net->ct.expect_hash == NULL)
a71c0855 640 goto err1;
e9c1b084 641
08f6547d
AD
642 if (net_eq(net, &init_net)) {
643 nf_ct_expect_cachep = kmem_cache_create("nf_conntrack_expect",
e9c1b084 644 sizeof(struct nf_conntrack_expect),
20c2df83 645 0, 0, NULL);
08f6547d
AD
646 if (!nf_ct_expect_cachep)
647 goto err2;
648 }
e9c1b084 649
dc5129f8 650 err = exp_proc_init(net);
e9c1b084 651 if (err < 0)
a71c0855 652 goto err3;
e9c1b084
PM
653
654 return 0;
655
a71c0855 656err3:
08f6547d
AD
657 if (net_eq(net, &init_net))
658 kmem_cache_destroy(nf_ct_expect_cachep);
12293bf9 659err2:
9b03f38d 660 nf_ct_free_hashtable(net->ct.expect_hash, net->ct.expect_vmalloc,
a71c0855 661 nf_ct_expect_hsize);
a71c0855 662err1:
e9c1b084
PM
663 return err;
664}
665
9b03f38d 666void nf_conntrack_expect_fini(struct net *net)
e9c1b084 667{
dc5129f8 668 exp_proc_remove(net);
308ff823
JDB
669 if (net_eq(net, &init_net)) {
670 rcu_barrier(); /* Wait for call_rcu() before destroy */
08f6547d 671 kmem_cache_destroy(nf_ct_expect_cachep);
308ff823 672 }
9b03f38d 673 nf_ct_free_hashtable(net->ct.expect_hash, net->ct.expect_vmalloc,
a71c0855 674 nf_ct_expect_hsize);
e9c1b084 675}