]> bbs.cooldavid.org Git - net-next-2.6.git/blame - net/core/dst.c
[NET]: Make device event notification network namespace safe
[net-next-2.6.git] / net / core / dst.c
CommitLineData
1da177e4
LT
1/*
2 * net/core/dst.c Protocol independent destination cache.
3 *
4 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
5 *
6 */
7
8#include <linux/bitops.h>
9#include <linux/errno.h>
10#include <linux/init.h>
11#include <linux/kernel.h>
12#include <linux/mm.h>
13#include <linux/module.h>
14#include <linux/netdevice.h>
1da177e4
LT
15#include <linux/skbuff.h>
16#include <linux/string.h>
17#include <linux/types.h>
e9dc8653 18#include <net/net_namespace.h>
1da177e4
LT
19
20#include <net/dst.h>
21
22/* Locking strategy:
23 * 1) Garbage collection state of dead destination cache
24 * entries is protected by dst_lock.
25 * 2) GC is run only from BH context, and is the only remover
26 * of entries.
27 * 3) Entries are added to the garbage list from both BH
28 * and non-BH context, so local BH disabling is needed.
29 * 4) All operations modify state, so a spinlock is used.
30 */
31static struct dst_entry *dst_garbage_list;
4ec93edb 32#if RT_CACHE_DEBUG >= 2
1da177e4
LT
33static atomic_t dst_total = ATOMIC_INIT(0);
34#endif
35static DEFINE_SPINLOCK(dst_lock);
36
37static unsigned long dst_gc_timer_expires;
38static unsigned long dst_gc_timer_inc = DST_GC_MAX;
39static void dst_run_gc(unsigned long);
40static void ___dst_free(struct dst_entry * dst);
41
8d06afab 42static DEFINE_TIMER(dst_gc_timer, dst_run_gc, DST_GC_MIN, 0);
1da177e4
LT
43
44static void dst_run_gc(unsigned long dummy)
45{
46 int delayed = 0;
f0098f78 47 int work_performed;
1da177e4
LT
48 struct dst_entry * dst, **dstp;
49
50 if (!spin_trylock(&dst_lock)) {
51 mod_timer(&dst_gc_timer, jiffies + HZ/10);
52 return;
53 }
54
1da177e4
LT
55 del_timer(&dst_gc_timer);
56 dstp = &dst_garbage_list;
f0098f78 57 work_performed = 0;
1da177e4
LT
58 while ((dst = *dstp) != NULL) {
59 if (atomic_read(&dst->__refcnt)) {
60 dstp = &dst->next;
61 delayed++;
62 continue;
63 }
64 *dstp = dst->next;
f0098f78 65 work_performed = 1;
1da177e4
LT
66
67 dst = dst_destroy(dst);
68 if (dst) {
69 /* NOHASH and still referenced. Unless it is already
70 * on gc list, invalidate it and add to gc list.
71 *
72 * Note: this is temporary. Actually, NOHASH dst's
73 * must be obsoleted when parent is obsoleted.
74 * But we do not have state "obsoleted, but
75 * referenced by parent", so it is right.
76 */
77 if (dst->obsolete > 1)
78 continue;
79
80 ___dst_free(dst);
81 dst->next = *dstp;
82 *dstp = dst;
83 dstp = &dst->next;
84 }
85 }
86 if (!dst_garbage_list) {
87 dst_gc_timer_inc = DST_GC_MAX;
88 goto out;
89 }
f0098f78
DL
90 if (!work_performed) {
91 if ((dst_gc_timer_expires += dst_gc_timer_inc) > DST_GC_MAX)
92 dst_gc_timer_expires = DST_GC_MAX;
93 dst_gc_timer_inc += DST_GC_INC;
94 } else {
95 dst_gc_timer_inc = DST_GC_INC;
96 dst_gc_timer_expires = DST_GC_MIN;
97 }
1da177e4
LT
98#if RT_CACHE_DEBUG >= 2
99 printk("dst_total: %d/%d %ld\n",
100 atomic_read(&dst_total), delayed, dst_gc_timer_expires);
101#endif
f5a6e01c
AV
102 /* if the next desired timer is more than 4 seconds in the future
103 * then round the timer to whole seconds
104 */
105 if (dst_gc_timer_expires > 4*HZ)
106 mod_timer(&dst_gc_timer,
107 round_jiffies(jiffies + dst_gc_timer_expires));
108 else
109 mod_timer(&dst_gc_timer, jiffies + dst_gc_timer_expires);
1da177e4
LT
110
111out:
112 spin_unlock(&dst_lock);
113}
114
c4b1010f 115static int dst_discard(struct sk_buff *skb)
1da177e4
LT
116{
117 kfree_skb(skb);
118 return 0;
119}
120
121void * dst_alloc(struct dst_ops * ops)
122{
123 struct dst_entry * dst;
124
125 if (ops->gc && atomic_read(&ops->entries) > ops->gc_thresh) {
126 if (ops->gc())
127 return NULL;
128 }
c3762229 129 dst = kmem_cache_zalloc(ops->kmem_cachep, GFP_ATOMIC);
1da177e4
LT
130 if (!dst)
131 return NULL;
1da177e4
LT
132 atomic_set(&dst->__refcnt, 0);
133 dst->ops = ops;
134 dst->lastuse = jiffies;
135 dst->path = dst;
c4b1010f 136 dst->input = dst->output = dst_discard;
4ec93edb 137#if RT_CACHE_DEBUG >= 2
1da177e4
LT
138 atomic_inc(&dst_total);
139#endif
140 atomic_inc(&ops->entries);
141 return dst;
142}
143
144static void ___dst_free(struct dst_entry * dst)
145{
146 /* The first case (dev==NULL) is required, when
147 protocol module is unloaded.
148 */
149 if (dst->dev == NULL || !(dst->dev->flags&IFF_UP)) {
c4b1010f 150 dst->input = dst->output = dst_discard;
1da177e4
LT
151 }
152 dst->obsolete = 2;
153}
154
155void __dst_free(struct dst_entry * dst)
156{
157 spin_lock_bh(&dst_lock);
158 ___dst_free(dst);
159 dst->next = dst_garbage_list;
160 dst_garbage_list = dst;
161 if (dst_gc_timer_inc > DST_GC_INC) {
162 dst_gc_timer_inc = DST_GC_INC;
163 dst_gc_timer_expires = DST_GC_MIN;
164 mod_timer(&dst_gc_timer, jiffies + dst_gc_timer_expires);
165 }
166 spin_unlock_bh(&dst_lock);
167}
168
169struct dst_entry *dst_destroy(struct dst_entry * dst)
170{
171 struct dst_entry *child;
172 struct neighbour *neigh;
173 struct hh_cache *hh;
174
175 smp_rmb();
176
177again:
178 neigh = dst->neighbour;
179 hh = dst->hh;
180 child = dst->child;
181
182 dst->hh = NULL;
183 if (hh && atomic_dec_and_test(&hh->hh_refcnt))
184 kfree(hh);
185
186 if (neigh) {
187 dst->neighbour = NULL;
188 neigh_release(neigh);
189 }
190
191 atomic_dec(&dst->ops->entries);
192
193 if (dst->ops->destroy)
194 dst->ops->destroy(dst);
195 if (dst->dev)
196 dev_put(dst->dev);
4ec93edb 197#if RT_CACHE_DEBUG >= 2
1da177e4
LT
198 atomic_dec(&dst_total);
199#endif
200 kmem_cache_free(dst->ops->kmem_cachep, dst);
201
202 dst = child;
203 if (dst) {
6775cab9
HX
204 int nohash = dst->flags & DST_NOHASH;
205
1da177e4
LT
206 if (atomic_dec_and_test(&dst->__refcnt)) {
207 /* We were real parent of this dst, so kill child. */
6775cab9 208 if (nohash)
1da177e4
LT
209 goto again;
210 } else {
211 /* Child is still referenced, return it for freeing. */
6775cab9 212 if (nohash)
1da177e4
LT
213 return dst;
214 /* Child is still in his hash table */
215 }
216 }
217 return NULL;
218}
219
220/* Dirty hack. We did it in 2.2 (in __dst_free),
221 * we have _very_ good reasons not to repeat
222 * this mistake in 2.3, but we have no choice
223 * now. _It_ _is_ _explicit_ _deliberate_
224 * _race_ _condition_.
225 *
226 * Commented and originally written by Alexey.
227 */
228static inline void dst_ifdown(struct dst_entry *dst, struct net_device *dev,
229 int unregister)
230{
231 if (dst->ops->ifdown)
232 dst->ops->ifdown(dst, dev, unregister);
233
234 if (dev != dst->dev)
235 return;
236
237 if (!unregister) {
c4b1010f 238 dst->input = dst->output = dst_discard;
1da177e4
LT
239 } else {
240 dst->dev = &loopback_dev;
241 dev_hold(&loopback_dev);
242 dev_put(dev);
243 if (dst->neighbour && dst->neighbour->dev == dev) {
244 dst->neighbour->dev = &loopback_dev;
245 dev_put(dev);
246 dev_hold(&loopback_dev);
247 }
248 }
249}
250
251static int dst_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
252{
253 struct net_device *dev = ptr;
254 struct dst_entry *dst;
255
e9dc8653
EB
256 if (dev->nd_net != &init_net)
257 return NOTIFY_DONE;
258
1da177e4
LT
259 switch (event) {
260 case NETDEV_UNREGISTER:
261 case NETDEV_DOWN:
262 spin_lock_bh(&dst_lock);
263 for (dst = dst_garbage_list; dst; dst = dst->next) {
264 dst_ifdown(dst, dev, event != NETDEV_DOWN);
265 }
266 spin_unlock_bh(&dst_lock);
267 break;
268 }
269 return NOTIFY_DONE;
270}
271
272static struct notifier_block dst_dev_notifier = {
273 .notifier_call = dst_dev_event,
274};
275
276void __init dst_init(void)
277{
278 register_netdevice_notifier(&dst_dev_notifier);
279}
280
281EXPORT_SYMBOL(__dst_free);
282EXPORT_SYMBOL(dst_alloc);
283EXPORT_SYMBOL(dst_destroy);