]> bbs.cooldavid.org Git - net-next-2.6.git/blame - net/core/flow.c
flow: virtualize flow cache entry methods
[net-next-2.6.git] / net / core / flow.c
CommitLineData
1da177e4
LT
1/* flow.c: Generic flow cache.
2 *
3 * Copyright (C) 2003 Alexey N. Kuznetsov (kuznet@ms2.inr.ac.ru)
4 * Copyright (C) 2003 David S. Miller (davem@redhat.com)
5 */
6
7#include <linux/kernel.h>
8#include <linux/module.h>
9#include <linux/list.h>
10#include <linux/jhash.h>
11#include <linux/interrupt.h>
12#include <linux/mm.h>
13#include <linux/random.h>
14#include <linux/init.h>
15#include <linux/slab.h>
16#include <linux/smp.h>
17#include <linux/completion.h>
18#include <linux/percpu.h>
19#include <linux/bitops.h>
20#include <linux/notifier.h>
21#include <linux/cpu.h>
22#include <linux/cpumask.h>
4a3e2f71 23#include <linux/mutex.h>
1da177e4
LT
24#include <net/flow.h>
25#include <asm/atomic.h>
df71837d 26#include <linux/security.h>
1da177e4
LT
27
28struct flow_cache_entry {
fe1a5f03
TT
29 struct flow_cache_entry *next;
30 u16 family;
31 u8 dir;
32 u32 genid;
33 struct flowi key;
34 struct flow_cache_object *object;
1da177e4
LT
35};
36
d7997fe1 37struct flow_cache_percpu {
fe1a5f03 38 struct flow_cache_entry **hash_table;
d7997fe1
TT
39 int hash_count;
40 u32 hash_rnd;
41 int hash_rnd_recalc;
42 struct tasklet_struct flush_tasklet;
5f58a5c8 43};
1da177e4
LT
44
45struct flow_flush_info {
fe1a5f03 46 struct flow_cache *cache;
d7997fe1
TT
47 atomic_t cpuleft;
48 struct completion completion;
1da177e4 49};
1da177e4 50
d7997fe1
TT
51struct flow_cache {
52 u32 hash_shift;
53 unsigned long order;
fe1a5f03 54 struct flow_cache_percpu *percpu;
d7997fe1
TT
55 struct notifier_block hotcpu_notifier;
56 int low_watermark;
57 int high_watermark;
58 struct timer_list rnd_timer;
59};
60
61atomic_t flow_cache_genid = ATOMIC_INIT(0);
62static struct flow_cache flow_cache_global;
63static struct kmem_cache *flow_cachep;
64
65#define flow_cache_hash_size(cache) (1 << (cache)->hash_shift)
66#define FLOW_HASH_RND_PERIOD (10 * 60 * HZ)
1da177e4
LT
67
68static void flow_cache_new_hashrnd(unsigned long arg)
69{
d7997fe1 70 struct flow_cache *fc = (void *) arg;
1da177e4
LT
71 int i;
72
6f912042 73 for_each_possible_cpu(i)
d7997fe1 74 per_cpu_ptr(fc->percpu, i)->hash_rnd_recalc = 1;
1da177e4 75
d7997fe1
TT
76 fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
77 add_timer(&fc->rnd_timer);
1da177e4
LT
78}
79
fe1a5f03
TT
80static int flow_entry_valid(struct flow_cache_entry *fle)
81{
82 if (atomic_read(&flow_cache_genid) != fle->genid)
83 return 0;
84 if (fle->object && !fle->object->ops->check(fle->object))
85 return 0;
86 return 1;
87}
88
d7997fe1
TT
89static void flow_entry_kill(struct flow_cache *fc,
90 struct flow_cache_percpu *fcp,
91 struct flow_cache_entry *fle)
134b0fc5
JM
92{
93 if (fle->object)
fe1a5f03 94 fle->object->ops->delete(fle->object);
134b0fc5 95 kmem_cache_free(flow_cachep, fle);
d7997fe1 96 fcp->hash_count--;
134b0fc5
JM
97}
98
d7997fe1
TT
99static void __flow_cache_shrink(struct flow_cache *fc,
100 struct flow_cache_percpu *fcp,
101 int shrink_to)
1da177e4
LT
102{
103 struct flow_cache_entry *fle, **flp;
104 int i;
105
d7997fe1 106 for (i = 0; i < flow_cache_hash_size(fc); i++) {
fe1a5f03 107 int saved = 0;
1da177e4 108
d7997fe1 109 flp = &fcp->hash_table[i];
1da177e4 110 while ((fle = *flp) != NULL) {
fe1a5f03
TT
111 if (saved < shrink_to &&
112 flow_entry_valid(fle)) {
113 saved++;
114 flp = &fle->next;
115 } else {
116 *flp = fle->next;
117 flow_entry_kill(fc, fcp, fle);
118 }
1da177e4
LT
119 }
120 }
121}
122
d7997fe1
TT
123static void flow_cache_shrink(struct flow_cache *fc,
124 struct flow_cache_percpu *fcp)
1da177e4 125{
d7997fe1 126 int shrink_to = fc->low_watermark / flow_cache_hash_size(fc);
1da177e4 127
d7997fe1 128 __flow_cache_shrink(fc, fcp, shrink_to);
1da177e4
LT
129}
130
d7997fe1
TT
131static void flow_new_hash_rnd(struct flow_cache *fc,
132 struct flow_cache_percpu *fcp)
1da177e4 133{
d7997fe1
TT
134 get_random_bytes(&fcp->hash_rnd, sizeof(u32));
135 fcp->hash_rnd_recalc = 0;
136 __flow_cache_shrink(fc, fcp, 0);
1da177e4
LT
137}
138
d7997fe1
TT
139static u32 flow_hash_code(struct flow_cache *fc,
140 struct flow_cache_percpu *fcp,
141 struct flowi *key)
1da177e4
LT
142{
143 u32 *k = (u32 *) key;
144
d7997fe1
TT
145 return (jhash2(k, (sizeof(*key) / sizeof(u32)), fcp->hash_rnd)
146 & (flow_cache_hash_size(fc) - 1));
1da177e4
LT
147}
148
149#if (BITS_PER_LONG == 64)
150typedef u64 flow_compare_t;
151#else
152typedef u32 flow_compare_t;
153#endif
154
1da177e4
LT
155/* I hear what you're saying, use memcmp. But memcmp cannot make
156 * important assumptions that we can here, such as alignment and
157 * constant size.
158 */
159static int flow_key_compare(struct flowi *key1, struct flowi *key2)
160{
161 flow_compare_t *k1, *k1_lim, *k2;
162 const int n_elem = sizeof(struct flowi) / sizeof(flow_compare_t);
163
f0fe91de 164 BUILD_BUG_ON(sizeof(struct flowi) % sizeof(flow_compare_t));
1da177e4
LT
165
166 k1 = (flow_compare_t *) key1;
167 k1_lim = k1 + n_elem;
168
169 k2 = (flow_compare_t *) key2;
170
171 do {
172 if (*k1++ != *k2++)
173 return 1;
174 } while (k1 < k1_lim);
175
176 return 0;
177}
178
fe1a5f03
TT
179struct flow_cache_object *
180flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir,
181 flow_resolve_t resolver, void *ctx)
1da177e4 182{
d7997fe1
TT
183 struct flow_cache *fc = &flow_cache_global;
184 struct flow_cache_percpu *fcp;
1da177e4 185 struct flow_cache_entry *fle, **head;
fe1a5f03 186 struct flow_cache_object *flo;
1da177e4 187 unsigned int hash;
1da177e4
LT
188
189 local_bh_disable();
d7997fe1 190 fcp = per_cpu_ptr(fc->percpu, smp_processor_id());
1da177e4
LT
191
192 fle = NULL;
fe1a5f03 193 flo = NULL;
1da177e4
LT
194 /* Packet really early in init? Making flow_cache_init a
195 * pre-smp initcall would solve this. --RR */
d7997fe1 196 if (!fcp->hash_table)
1da177e4
LT
197 goto nocache;
198
d7997fe1
TT
199 if (fcp->hash_rnd_recalc)
200 flow_new_hash_rnd(fc, fcp);
1da177e4 201
fe1a5f03 202 hash = flow_hash_code(fc, fcp, key);
d7997fe1 203 head = &fcp->hash_table[hash];
1da177e4
LT
204 for (fle = *head; fle; fle = fle->next) {
205 if (fle->family == family &&
206 fle->dir == dir &&
fe1a5f03 207 flow_key_compare(key, &fle->key) == 0)
1da177e4 208 break;
1da177e4
LT
209 }
210
fe1a5f03 211 if (unlikely(!fle)) {
d7997fe1
TT
212 if (fcp->hash_count > fc->high_watermark)
213 flow_cache_shrink(fc, fcp);
1da177e4 214
54e6ecb2 215 fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC);
1da177e4
LT
216 if (fle) {
217 fle->next = *head;
218 *head = fle;
219 fle->family = family;
220 fle->dir = dir;
221 memcpy(&fle->key, key, sizeof(*key));
222 fle->object = NULL;
d7997fe1 223 fcp->hash_count++;
1da177e4 224 }
fe1a5f03
TT
225 } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
226 flo = fle->object;
227 if (!flo)
228 goto ret_object;
229 flo = flo->ops->get(flo);
230 if (flo)
231 goto ret_object;
232 } else if (fle->object) {
233 flo = fle->object;
234 flo->ops->delete(flo);
235 fle->object = NULL;
1da177e4
LT
236 }
237
238nocache:
fe1a5f03
TT
239 flo = NULL;
240 if (fle) {
241 flo = fle->object;
242 fle->object = NULL;
243 }
244 flo = resolver(net, key, family, dir, flo, ctx);
245 if (fle) {
246 fle->genid = atomic_read(&flow_cache_genid);
247 if (!IS_ERR(flo))
248 fle->object = flo;
249 else
250 fle->genid--;
251 } else {
252 if (flo && !IS_ERR(flo))
253 flo->ops->delete(flo);
1da177e4 254 }
fe1a5f03
TT
255ret_object:
256 local_bh_enable();
257 return flo;
1da177e4
LT
258}
259
260static void flow_cache_flush_tasklet(unsigned long data)
261{
262 struct flow_flush_info *info = (void *)data;
d7997fe1
TT
263 struct flow_cache *fc = info->cache;
264 struct flow_cache_percpu *fcp;
1da177e4 265 int i;
1da177e4 266
d7997fe1
TT
267 fcp = per_cpu_ptr(fc->percpu, smp_processor_id());
268 for (i = 0; i < flow_cache_hash_size(fc); i++) {
1da177e4
LT
269 struct flow_cache_entry *fle;
270
d7997fe1 271 fle = fcp->hash_table[i];
1da177e4 272 for (; fle; fle = fle->next) {
fe1a5f03 273 if (flow_entry_valid(fle))
1da177e4
LT
274 continue;
275
fe1a5f03
TT
276 if (fle->object)
277 fle->object->ops->delete(fle->object);
1da177e4 278 fle->object = NULL;
1da177e4
LT
279 }
280 }
281
282 if (atomic_dec_and_test(&info->cpuleft))
283 complete(&info->completion);
284}
285
1da177e4
LT
286static void flow_cache_flush_per_cpu(void *data)
287{
288 struct flow_flush_info *info = data;
289 int cpu;
290 struct tasklet_struct *tasklet;
291
292 cpu = smp_processor_id();
d7997fe1 293 tasklet = &per_cpu_ptr(info->cache->percpu, cpu)->flush_tasklet;
1da177e4
LT
294 tasklet->data = (unsigned long)info;
295 tasklet_schedule(tasklet);
296}
297
298void flow_cache_flush(void)
299{
300 struct flow_flush_info info;
4a3e2f71 301 static DEFINE_MUTEX(flow_flush_sem);
1da177e4
LT
302
303 /* Don't want cpus going down or up during this. */
86ef5c9a 304 get_online_cpus();
4a3e2f71 305 mutex_lock(&flow_flush_sem);
d7997fe1 306 info.cache = &flow_cache_global;
1da177e4
LT
307 atomic_set(&info.cpuleft, num_online_cpus());
308 init_completion(&info.completion);
309
310 local_bh_disable();
8691e5a8 311 smp_call_function(flow_cache_flush_per_cpu, &info, 0);
1da177e4
LT
312 flow_cache_flush_tasklet((unsigned long)&info);
313 local_bh_enable();
314
315 wait_for_completion(&info.completion);
4a3e2f71 316 mutex_unlock(&flow_flush_sem);
86ef5c9a 317 put_online_cpus();
1da177e4
LT
318}
319
d7997fe1
TT
320static void __init flow_cache_cpu_prepare(struct flow_cache *fc,
321 struct flow_cache_percpu *fcp)
1da177e4 322{
d7997fe1
TT
323 fcp->hash_table = (struct flow_cache_entry **)
324 __get_free_pages(GFP_KERNEL|__GFP_ZERO, fc->order);
325 if (!fcp->hash_table)
326 panic("NET: failed to allocate flow cache order %lu\n", fc->order);
327
328 fcp->hash_rnd_recalc = 1;
329 fcp->hash_count = 0;
330 tasklet_init(&fcp->flush_tasklet, flow_cache_flush_tasklet, 0);
1da177e4
LT
331}
332
1da177e4
LT
333static int flow_cache_cpu(struct notifier_block *nfb,
334 unsigned long action,
335 void *hcpu)
336{
d7997fe1
TT
337 struct flow_cache *fc = container_of(nfb, struct flow_cache, hotcpu_notifier);
338 int cpu = (unsigned long) hcpu;
339 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
340
8bb78442 341 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
d7997fe1 342 __flow_cache_shrink(fc, fcp, 0);
1da177e4
LT
343 return NOTIFY_OK;
344}
1da177e4 345
d7997fe1 346static int flow_cache_init(struct flow_cache *fc)
1da177e4 347{
d7997fe1 348 unsigned long order;
1da177e4
LT
349 int i;
350
d7997fe1
TT
351 fc->hash_shift = 10;
352 fc->low_watermark = 2 * flow_cache_hash_size(fc);
353 fc->high_watermark = 4 * flow_cache_hash_size(fc);
354
355 for (order = 0;
356 (PAGE_SIZE << order) <
357 (sizeof(struct flow_cache_entry *)*flow_cache_hash_size(fc));
358 order++)
359 /* NOTHING */;
360 fc->order = order;
361 fc->percpu = alloc_percpu(struct flow_cache_percpu);
1da177e4 362
d7997fe1
TT
363 setup_timer(&fc->rnd_timer, flow_cache_new_hashrnd,
364 (unsigned long) fc);
365 fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
366 add_timer(&fc->rnd_timer);
1da177e4 367
6f912042 368 for_each_possible_cpu(i)
d7997fe1
TT
369 flow_cache_cpu_prepare(fc, per_cpu_ptr(fc->percpu, i));
370
371 fc->hotcpu_notifier = (struct notifier_block){
372 .notifier_call = flow_cache_cpu,
373 };
374 register_hotcpu_notifier(&fc->hotcpu_notifier);
1da177e4 375
1da177e4
LT
376 return 0;
377}
378
d7997fe1
TT
379static int __init flow_cache_init_global(void)
380{
381 flow_cachep = kmem_cache_create("flow_cache",
382 sizeof(struct flow_cache_entry),
383 0, SLAB_PANIC, NULL);
384
385 return flow_cache_init(&flow_cache_global);
386}
387
388module_init(flow_cache_init_global);
1da177e4
LT
389
390EXPORT_SYMBOL(flow_cache_genid);
391EXPORT_SYMBOL(flow_cache_lookup);