1 /* flow.c: Generic flow cache.
3 * Copyright (C) 2003 Alexey N. Kuznetsov (kuznet@ms2.inr.ac.ru)
4 * Copyright (C) 2003 David S. Miller (davem@redhat.com)
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/list.h>
10 #include <linux/jhash.h>
11 #include <linux/interrupt.h>
13 #include <linux/random.h>
14 #include <linux/init.h>
15 #include <linux/slab.h>
16 #include <linux/smp.h>
17 #include <linux/completion.h>
18 #include <linux/percpu.h>
19 #include <linux/bitops.h>
20 #include <linux/notifier.h>
21 #include <linux/cpu.h>
22 #include <linux/cpumask.h>
23 #include <linux/mutex.h>
25 #include <asm/atomic.h>
26 #include <linux/security.h>
28 struct flow_cache_entry {
29 struct flow_cache_entry *next;
38 struct flow_cache_percpu {
39 struct flow_cache_entry ** hash_table;
43 struct tasklet_struct flush_tasklet;
46 struct flow_flush_info {
47 struct flow_cache * cache;
49 struct completion completion;
55 struct flow_cache_percpu * percpu;
56 struct notifier_block hotcpu_notifier;
59 struct timer_list rnd_timer;
62 atomic_t flow_cache_genid = ATOMIC_INIT(0);
63 static struct flow_cache flow_cache_global;
64 static struct kmem_cache *flow_cachep;
66 #define flow_cache_hash_size(cache) (1 << (cache)->hash_shift)
67 #define FLOW_HASH_RND_PERIOD (10 * 60 * HZ)
69 static void flow_cache_new_hashrnd(unsigned long arg)
71 struct flow_cache *fc = (void *) arg;
74 for_each_possible_cpu(i)
75 per_cpu_ptr(fc->percpu, i)->hash_rnd_recalc = 1;
77 fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
78 add_timer(&fc->rnd_timer);
81 static void flow_entry_kill(struct flow_cache *fc,
82 struct flow_cache_percpu *fcp,
83 struct flow_cache_entry *fle)
86 atomic_dec(fle->object_ref);
87 kmem_cache_free(flow_cachep, fle);
91 static void __flow_cache_shrink(struct flow_cache *fc,
92 struct flow_cache_percpu *fcp,
95 struct flow_cache_entry *fle, **flp;
98 for (i = 0; i < flow_cache_hash_size(fc); i++) {
101 flp = &fcp->hash_table[i];
102 while ((fle = *flp) != NULL && k < shrink_to) {
106 while ((fle = *flp) != NULL) {
108 flow_entry_kill(fc, fcp, fle);
113 static void flow_cache_shrink(struct flow_cache *fc,
114 struct flow_cache_percpu *fcp)
116 int shrink_to = fc->low_watermark / flow_cache_hash_size(fc);
118 __flow_cache_shrink(fc, fcp, shrink_to);
121 static void flow_new_hash_rnd(struct flow_cache *fc,
122 struct flow_cache_percpu *fcp)
124 get_random_bytes(&fcp->hash_rnd, sizeof(u32));
125 fcp->hash_rnd_recalc = 0;
126 __flow_cache_shrink(fc, fcp, 0);
129 static u32 flow_hash_code(struct flow_cache *fc,
130 struct flow_cache_percpu *fcp,
133 u32 *k = (u32 *) key;
135 return (jhash2(k, (sizeof(*key) / sizeof(u32)), fcp->hash_rnd)
136 & (flow_cache_hash_size(fc) - 1));
139 #if (BITS_PER_LONG == 64)
140 typedef u64 flow_compare_t;
142 typedef u32 flow_compare_t;
145 /* I hear what you're saying, use memcmp. But memcmp cannot make
146 * important assumptions that we can here, such as alignment and
149 static int flow_key_compare(struct flowi *key1, struct flowi *key2)
151 flow_compare_t *k1, *k1_lim, *k2;
152 const int n_elem = sizeof(struct flowi) / sizeof(flow_compare_t);
154 BUILD_BUG_ON(sizeof(struct flowi) % sizeof(flow_compare_t));
156 k1 = (flow_compare_t *) key1;
157 k1_lim = k1 + n_elem;
159 k2 = (flow_compare_t *) key2;
164 } while (k1 < k1_lim);
169 void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir,
170 flow_resolve_t resolver)
172 struct flow_cache *fc = &flow_cache_global;
173 struct flow_cache_percpu *fcp;
174 struct flow_cache_entry *fle, **head;
178 fcp = per_cpu_ptr(fc->percpu, smp_processor_id());
181 /* Packet really early in init? Making flow_cache_init a
182 * pre-smp initcall would solve this. --RR */
183 if (!fcp->hash_table)
186 if (fcp->hash_rnd_recalc)
187 flow_new_hash_rnd(fc, fcp);
188 hash = flow_hash_code(fc, fcp, key);
190 head = &fcp->hash_table[hash];
191 for (fle = *head; fle; fle = fle->next) {
192 if (fle->family == family &&
194 flow_key_compare(key, &fle->key) == 0) {
195 if (fle->genid == atomic_read(&flow_cache_genid)) {
196 void *ret = fle->object;
199 atomic_inc(fle->object_ref);
209 if (fcp->hash_count > fc->high_watermark)
210 flow_cache_shrink(fc, fcp);
212 fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC);
216 fle->family = family;
218 memcpy(&fle->key, key, sizeof(*key));
230 err = resolver(net, key, family, dir, &obj, &obj_ref);
233 fle->genid = atomic_read(&flow_cache_genid);
236 atomic_dec(fle->object_ref);
239 fle->object_ref = obj_ref;
241 atomic_inc(fle->object_ref);
251 static void flow_cache_flush_tasklet(unsigned long data)
253 struct flow_flush_info *info = (void *)data;
254 struct flow_cache *fc = info->cache;
255 struct flow_cache_percpu *fcp;
258 fcp = per_cpu_ptr(fc->percpu, smp_processor_id());
259 for (i = 0; i < flow_cache_hash_size(fc); i++) {
260 struct flow_cache_entry *fle;
262 fle = fcp->hash_table[i];
263 for (; fle; fle = fle->next) {
264 unsigned genid = atomic_read(&flow_cache_genid);
266 if (!fle->object || fle->genid == genid)
270 atomic_dec(fle->object_ref);
274 if (atomic_dec_and_test(&info->cpuleft))
275 complete(&info->completion);
278 static void flow_cache_flush_per_cpu(void *data)
280 struct flow_flush_info *info = data;
282 struct tasklet_struct *tasklet;
284 cpu = smp_processor_id();
285 tasklet = &per_cpu_ptr(info->cache->percpu, cpu)->flush_tasklet;
286 tasklet->data = (unsigned long)info;
287 tasklet_schedule(tasklet);
290 void flow_cache_flush(void)
292 struct flow_flush_info info;
293 static DEFINE_MUTEX(flow_flush_sem);
295 /* Don't want cpus going down or up during this. */
297 mutex_lock(&flow_flush_sem);
298 info.cache = &flow_cache_global;
299 atomic_set(&info.cpuleft, num_online_cpus());
300 init_completion(&info.completion);
303 smp_call_function(flow_cache_flush_per_cpu, &info, 0);
304 flow_cache_flush_tasklet((unsigned long)&info);
307 wait_for_completion(&info.completion);
308 mutex_unlock(&flow_flush_sem);
312 static void __init flow_cache_cpu_prepare(struct flow_cache *fc,
313 struct flow_cache_percpu *fcp)
315 fcp->hash_table = (struct flow_cache_entry **)
316 __get_free_pages(GFP_KERNEL|__GFP_ZERO, fc->order);
317 if (!fcp->hash_table)
318 panic("NET: failed to allocate flow cache order %lu\n", fc->order);
320 fcp->hash_rnd_recalc = 1;
322 tasklet_init(&fcp->flush_tasklet, flow_cache_flush_tasklet, 0);
325 static int flow_cache_cpu(struct notifier_block *nfb,
326 unsigned long action,
329 struct flow_cache *fc = container_of(nfb, struct flow_cache, hotcpu_notifier);
330 int cpu = (unsigned long) hcpu;
331 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
333 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
334 __flow_cache_shrink(fc, fcp, 0);
338 static int flow_cache_init(struct flow_cache *fc)
344 fc->low_watermark = 2 * flow_cache_hash_size(fc);
345 fc->high_watermark = 4 * flow_cache_hash_size(fc);
348 (PAGE_SIZE << order) <
349 (sizeof(struct flow_cache_entry *)*flow_cache_hash_size(fc));
353 fc->percpu = alloc_percpu(struct flow_cache_percpu);
355 setup_timer(&fc->rnd_timer, flow_cache_new_hashrnd,
357 fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
358 add_timer(&fc->rnd_timer);
360 for_each_possible_cpu(i)
361 flow_cache_cpu_prepare(fc, per_cpu_ptr(fc->percpu, i));
363 fc->hotcpu_notifier = (struct notifier_block){
364 .notifier_call = flow_cache_cpu,
366 register_hotcpu_notifier(&fc->hotcpu_notifier);
371 static int __init flow_cache_init_global(void)
373 flow_cachep = kmem_cache_create("flow_cache",
374 sizeof(struct flow_cache_entry),
375 0, SLAB_PANIC, NULL);
377 return flow_cache_init(&flow_cache_global);
380 module_init(flow_cache_init_global);
382 EXPORT_SYMBOL(flow_cache_genid);
383 EXPORT_SYMBOL(flow_cache_lookup);