]> bbs.cooldavid.org Git - net-next-2.6.git/blame - include/net/dst.h
net: check for refcount if pop a stacked dst_entry
[net-next-2.6.git] / include / net / dst.h
CommitLineData
1da177e4
LT
1/*
2 * net/dst.h Protocol independent destination cache definitions.
3 *
4 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
5 *
6 */
7
8#ifndef _NET_DST_H
9#define _NET_DST_H
10
86393e52 11#include <net/dst_ops.h>
14c85021 12#include <linux/netdevice.h>
1da177e4
LT
13#include <linux/rtnetlink.h>
14#include <linux/rcupdate.h>
15#include <linux/jiffies.h>
16#include <net/neighbour.h>
17#include <asm/processor.h>
18
19/*
20 * 0 - no debugging messages
21 * 1 - rare events and bugs (default)
22 * 2 - trace mode.
23 */
24#define RT_CACHE_DEBUG 0
25
26#define DST_GC_MIN (HZ/10)
27#define DST_GC_INC (HZ/2)
28#define DST_GC_MAX (120*HZ)
29
30/* Each dst_entry has reference count and sits in some parent list(s).
31 * When it is removed from parent list, it is "freed" (dst_free).
32 * After this it enters dead state (dst->obsolete > 0) and if its refcnt
33 * is zero, it can be destroyed immediately, otherwise it is added
34 * to gc list and garbage collector periodically checks the refcnt.
35 */
36
37struct sk_buff;
38
fd2c3ef7 39struct dst_entry {
1e19e02c 40 struct rcu_head rcu_head;
1da177e4
LT
41 struct dst_entry *child;
42 struct net_device *dev;
c4d54110
HX
43 short error;
44 short obsolete;
1da177e4
LT
45 int flags;
46#define DST_HOST 1
47#define DST_NOXFRM 2
48#define DST_NOPOLICY 4
49#define DST_NOHASH 8
1da177e4
LT
50 unsigned long expires;
51
52 unsigned short header_len; /* more space at head required */
53 unsigned short trailer_len; /* space to reserve at tail */
54
69a73829 55 unsigned int rate_tokens;
f1dd9c37 56 unsigned long rate_last; /* rate limiting for ICMP */
69a73829 57
f1dd9c37 58 struct dst_entry *path;
1da177e4 59
1da177e4
LT
60 struct neighbour *neighbour;
61 struct hh_cache *hh;
def8b4fa 62#ifdef CONFIG_XFRM
1da177e4 63 struct xfrm_state *xfrm;
5635c10d
ED
64#else
65 void *__pad1;
def8b4fa 66#endif
1da177e4
LT
67 int (*input)(struct sk_buff*);
68 int (*output)(struct sk_buff*);
69
1da177e4 70 struct dst_ops *ops;
f1dd9c37
ZY
71
72 u32 metrics[RTAX_MAX];
73
74#ifdef CONFIG_NET_CLS_ROUTE
75 __u32 tclassid;
5635c10d
ED
76#else
77 __u32 __pad2;
f1dd9c37
ZY
78#endif
79
5635c10d
ED
80
81 /*
82 * Align __refcnt to a 64 bytes alignment
83 * (L1_CACHE_SIZE would be too much)
84 */
85#ifdef CONFIG_64BIT
5635c10d
ED
86 long __pad_to_align_refcnt[1];
87#endif
f1dd9c37
ZY
88 /*
89 * __refcnt wants to be on a different cache line from
90 * input/output/ops or performance tanks badly
91 */
1e19e02c
ED
92 atomic_t __refcnt; /* client references */
93 int __use;
f1dd9c37 94 unsigned long lastuse;
1e19e02c
ED
95 union {
96 struct dst_entry *next;
97 struct rtable *rt_next;
98 struct rt6_info *rt6_next;
99 struct dn_route *dn_next;
100 };
1da177e4
LT
101};
102
1da177e4
LT
103#ifdef __KERNEL__
104
105static inline u32
106dst_metric(const struct dst_entry *dst, int metric)
107{
108 return dst->metrics[metric-1];
109}
110
0c3adfb8
GBY
111static inline u32
112dst_feature(const struct dst_entry *dst, u32 feature)
113{
bb5b7c11 114 return dst_metric(dst, RTAX_FEATURES) & feature;
0c3adfb8
GBY
115}
116
1da177e4
LT
117static inline u32 dst_mtu(const struct dst_entry *dst)
118{
119 u32 mtu = dst_metric(dst, RTAX_MTU);
120 /*
121 * Alexey put it here, so ask him about it :)
122 */
123 barrier();
124 return mtu;
125}
126
c1e20f7c
SH
127/* RTT metrics are stored in milliseconds for user ABI, but used as jiffies */
128static inline unsigned long dst_metric_rtt(const struct dst_entry *dst, int metric)
129{
130 return msecs_to_jiffies(dst_metric(dst, metric));
131}
132
133static inline void set_dst_metric_rtt(struct dst_entry *dst, int metric,
134 unsigned long rtt)
135{
136 dst->metrics[metric-1] = jiffies_to_msecs(rtt);
137}
138
1da177e4
LT
139static inline u32
140dst_allfrag(const struct dst_entry *dst)
141{
0c3adfb8 142 int ret = dst_feature(dst, RTAX_FEATURE_ALLFRAG);
1da177e4
LT
143 /* Yes, _exactly_. This is paranoia. */
144 barrier();
145 return ret;
146}
147
148static inline int
149dst_metric_locked(struct dst_entry *dst, int metric)
150{
151 return dst_metric(dst, RTAX_LOCK) & (1<<metric);
152}
153
154static inline void dst_hold(struct dst_entry * dst)
155{
5635c10d
ED
156 /*
157 * If your kernel compilation stops here, please check
158 * __pad_to_align_refcnt declaration in struct dst_entry
159 */
160 BUILD_BUG_ON(offsetof(struct dst_entry, __refcnt) & 63);
1da177e4
LT
161 atomic_inc(&dst->__refcnt);
162}
163
03f49f34
PE
164static inline void dst_use(struct dst_entry *dst, unsigned long time)
165{
166 dst_hold(dst);
167 dst->__use++;
168 dst->lastuse = time;
169}
170
7fee226a
ED
171static inline void dst_use_noref(struct dst_entry *dst, unsigned long time)
172{
173 dst->__use++;
174 dst->lastuse = time;
175}
176
1da177e4
LT
177static inline
178struct dst_entry * dst_clone(struct dst_entry * dst)
179{
180 if (dst)
181 atomic_inc(&dst->__refcnt);
182 return dst;
183}
184
8d330868 185extern void dst_release(struct dst_entry *dst);
7fee226a
ED
186
187static inline void refdst_drop(unsigned long refdst)
188{
189 if (!(refdst & SKB_DST_NOREF))
190 dst_release((struct dst_entry *)(refdst & SKB_DST_PTRMASK));
191}
192
193/**
194 * skb_dst_drop - drops skb dst
195 * @skb: buffer
196 *
197 * Drops dst reference count if a reference was taken.
198 */
adf30907
ED
199static inline void skb_dst_drop(struct sk_buff *skb)
200{
7fee226a
ED
201 if (skb->_skb_refdst) {
202 refdst_drop(skb->_skb_refdst);
203 skb->_skb_refdst = 0UL;
204 }
205}
206
207static inline void skb_dst_copy(struct sk_buff *nskb, const struct sk_buff *oskb)
208{
209 nskb->_skb_refdst = oskb->_skb_refdst;
210 if (!(nskb->_skb_refdst & SKB_DST_NOREF))
211 dst_clone(skb_dst(nskb));
212}
213
214/**
215 * skb_dst_force - makes sure skb dst is refcounted
216 * @skb: buffer
217 *
218 * If dst is not yet refcounted, let's do it
219 */
220static inline void skb_dst_force(struct sk_buff *skb)
221{
222 if (skb_dst_is_noref(skb)) {
223 WARN_ON(!rcu_read_lock_held());
224 skb->_skb_refdst &= ~SKB_DST_NOREF;
225 dst_clone(skb_dst(skb));
226 }
adf30907 227}
1da177e4 228
d19d56dd
ED
229
230/**
231 * skb_tunnel_rx - prepare skb for rx reinsert
232 * @skb: buffer
233 * @dev: tunnel device
234 *
235 * After decapsulation, packet is going to re-enter (netif_rx()) our stack,
236 * so make some cleanups, and perform accounting.
237 */
238static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev)
239{
240 skb->dev = dev;
241 /* TODO : stats should be SMP safe */
242 dev->stats.rx_packets++;
243 dev->stats.rx_bytes += skb->len;
244 skb->rxhash = 0;
245 skb_dst_drop(skb);
246 nf_reset(skb);
247}
248
1da177e4
LT
249/* Children define the path of the packet through the
250 * Linux networking. Thus, destinations are stackable.
251 */
252
8764ab2c 253static inline struct dst_entry *skb_dst_pop(struct sk_buff *skb)
1da177e4 254{
8764ab2c 255 struct dst_entry *child = skb_dst(skb)->child;
1da177e4 256
8764ab2c 257 skb_dst_drop(skb);
1da177e4
LT
258 return child;
259}
260
352e512c 261extern int dst_discard(struct sk_buff *skb);
1da177e4
LT
262extern void * dst_alloc(struct dst_ops * ops);
263extern void __dst_free(struct dst_entry * dst);
264extern struct dst_entry *dst_destroy(struct dst_entry * dst);
265
266static inline void dst_free(struct dst_entry * dst)
267{
268 if (dst->obsolete > 1)
269 return;
270 if (!atomic_read(&dst->__refcnt)) {
271 dst = dst_destroy(dst);
272 if (!dst)
273 return;
274 }
275 __dst_free(dst);
276}
277
278static inline void dst_rcu_free(struct rcu_head *head)
279{
280 struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head);
281 dst_free(dst);
282}
283
284static inline void dst_confirm(struct dst_entry *dst)
285{
286 if (dst)
287 neigh_confirm(dst->neighbour);
288}
289
1da177e4
LT
290static inline void dst_link_failure(struct sk_buff *skb)
291{
adf30907 292 struct dst_entry *dst = skb_dst(skb);
1da177e4
LT
293 if (dst && dst->ops && dst->ops->link_failure)
294 dst->ops->link_failure(skb);
295}
296
297static inline void dst_set_expires(struct dst_entry *dst, int timeout)
298{
299 unsigned long expires = jiffies + timeout;
300
301 if (expires == 0)
302 expires = 1;
303
304 if (dst->expires == 0 || time_before(expires, dst->expires))
305 dst->expires = expires;
306}
307
308/* Output packet to network from transport. */
309static inline int dst_output(struct sk_buff *skb)
310{
adf30907 311 return skb_dst(skb)->output(skb);
1da177e4
LT
312}
313
314/* Input packet from network to transport. */
315static inline int dst_input(struct sk_buff *skb)
316{
adf30907 317 return skb_dst(skb)->input(skb);
1da177e4
LT
318}
319
320static inline struct dst_entry *dst_check(struct dst_entry *dst, u32 cookie)
321{
322 if (dst->obsolete)
323 dst = dst->ops->check(dst, cookie);
324 return dst;
325}
326
327extern void dst_init(void);
328
815f4e57
HX
329/* Flags for xfrm_lookup flags argument. */
330enum {
331 XFRM_LOOKUP_WAIT = 1 << 0,
8b7817f3 332 XFRM_LOOKUP_ICMP = 1 << 1,
815f4e57
HX
333};
334
1da177e4
LT
335struct flowi;
336#ifndef CONFIG_XFRM
52479b62
AD
337static inline int xfrm_lookup(struct net *net, struct dst_entry **dst_p,
338 struct flowi *fl, struct sock *sk, int flags)
1da177e4
LT
339{
340 return 0;
341}
52479b62
AD
342static inline int __xfrm_lookup(struct net *net, struct dst_entry **dst_p,
343 struct flowi *fl, struct sock *sk, int flags)
14e50e57
DM
344{
345 return 0;
346}
1da177e4 347#else
52479b62
AD
348extern int xfrm_lookup(struct net *net, struct dst_entry **dst_p,
349 struct flowi *fl, struct sock *sk, int flags);
350extern int __xfrm_lookup(struct net *net, struct dst_entry **dst_p,
351 struct flowi *fl, struct sock *sk, int flags);
1da177e4
LT
352#endif
353#endif
354
355#endif /* _NET_DST_H */