]> bbs.cooldavid.org Git - net-next-2.6.git/blame - net/core/skbuff.c
net: infrastructure for hardware time stamping
[net-next-2.6.git] / net / core / skbuff.c
CommitLineData
1da177e4
LT
1/*
2 * Routines having to do with the 'struct sk_buff' memory handlers.
3 *
113aa838 4 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>
1da177e4
LT
5 * Florian La Roche <rzsfl@rz.uni-sb.de>
6 *
1da177e4
LT
7 * Fixes:
8 * Alan Cox : Fixed the worst of the load
9 * balancer bugs.
10 * Dave Platt : Interrupt stacking fix.
11 * Richard Kooijman : Timestamp fixes.
12 * Alan Cox : Changed buffer format.
13 * Alan Cox : destructor hook for AF_UNIX etc.
14 * Linus Torvalds : Better skb_clone.
15 * Alan Cox : Added skb_copy.
16 * Alan Cox : Added all the changed routines Linus
17 * only put in the headers
18 * Ray VanTassle : Fixed --skb->lock in free
19 * Alan Cox : skb_copy copy arp field
20 * Andi Kleen : slabified it.
21 * Robert Olsson : Removed skb_head_pool
22 *
23 * NOTE:
24 * The __skb_ routines should be called with interrupts
25 * disabled, or you better be *real* sure that the operation is atomic
26 * with respect to whatever list is being frobbed (e.g. via lock_sock()
27 * or via disabling bottom half handlers, etc).
28 *
29 * This program is free software; you can redistribute it and/or
30 * modify it under the terms of the GNU General Public License
31 * as published by the Free Software Foundation; either version
32 * 2 of the License, or (at your option) any later version.
33 */
34
35/*
36 * The functions in this file will not compile correctly with gcc 2.4.x
37 */
38
1da177e4
LT
39#include <linux/module.h>
40#include <linux/types.h>
41#include <linux/kernel.h>
1da177e4
LT
42#include <linux/mm.h>
43#include <linux/interrupt.h>
44#include <linux/in.h>
45#include <linux/inet.h>
46#include <linux/slab.h>
47#include <linux/netdevice.h>
48#ifdef CONFIG_NET_CLS_ACT
49#include <net/pkt_sched.h>
50#endif
51#include <linux/string.h>
52#include <linux/skbuff.h>
9c55e01c 53#include <linux/splice.h>
1da177e4
LT
54#include <linux/cache.h>
55#include <linux/rtnetlink.h>
56#include <linux/init.h>
716ea3a7 57#include <linux/scatterlist.h>
ac45f602 58#include <linux/errqueue.h>
1da177e4
LT
59
60#include <net/protocol.h>
61#include <net/dst.h>
62#include <net/sock.h>
63#include <net/checksum.h>
64#include <net/xfrm.h>
65
66#include <asm/uaccess.h>
67#include <asm/system.h>
68
a1f8e7f7
AV
69#include "kmap_skb.h"
70
e18b890b
CL
71static struct kmem_cache *skbuff_head_cache __read_mostly;
72static struct kmem_cache *skbuff_fclone_cache __read_mostly;
1da177e4 73
9c55e01c
JA
74static void sock_pipe_buf_release(struct pipe_inode_info *pipe,
75 struct pipe_buffer *buf)
76{
8b9d3728 77 put_page(buf->page);
9c55e01c
JA
78}
79
80static void sock_pipe_buf_get(struct pipe_inode_info *pipe,
81 struct pipe_buffer *buf)
82{
8b9d3728 83 get_page(buf->page);
9c55e01c
JA
84}
85
86static int sock_pipe_buf_steal(struct pipe_inode_info *pipe,
87 struct pipe_buffer *buf)
88{
89 return 1;
90}
91
92
93/* Pipe buffer operations for a socket. */
94static struct pipe_buf_operations sock_pipe_buf_ops = {
95 .can_merge = 0,
96 .map = generic_pipe_buf_map,
97 .unmap = generic_pipe_buf_unmap,
98 .confirm = generic_pipe_buf_confirm,
99 .release = sock_pipe_buf_release,
100 .steal = sock_pipe_buf_steal,
101 .get = sock_pipe_buf_get,
102};
103
1da177e4
LT
104/*
105 * Keep out-of-line to prevent kernel bloat.
106 * __builtin_return_address is not used because it is not always
107 * reliable.
108 */
109
110/**
111 * skb_over_panic - private function
112 * @skb: buffer
113 * @sz: size
114 * @here: address
115 *
116 * Out of line support code for skb_put(). Not user callable.
117 */
118void skb_over_panic(struct sk_buff *skb, int sz, void *here)
119{
26095455 120 printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p "
4305b541 121 "data:%p tail:%#lx end:%#lx dev:%s\n",
27a884dc 122 here, skb->len, sz, skb->head, skb->data,
4305b541 123 (unsigned long)skb->tail, (unsigned long)skb->end,
26095455 124 skb->dev ? skb->dev->name : "<NULL>");
1da177e4
LT
125 BUG();
126}
b4ac530f 127EXPORT_SYMBOL(skb_over_panic);
1da177e4
LT
128
129/**
130 * skb_under_panic - private function
131 * @skb: buffer
132 * @sz: size
133 * @here: address
134 *
135 * Out of line support code for skb_push(). Not user callable.
136 */
137
138void skb_under_panic(struct sk_buff *skb, int sz, void *here)
139{
26095455 140 printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p "
4305b541 141 "data:%p tail:%#lx end:%#lx dev:%s\n",
27a884dc 142 here, skb->len, sz, skb->head, skb->data,
4305b541 143 (unsigned long)skb->tail, (unsigned long)skb->end,
26095455 144 skb->dev ? skb->dev->name : "<NULL>");
1da177e4
LT
145 BUG();
146}
b4ac530f 147EXPORT_SYMBOL(skb_under_panic);
1da177e4 148
dc6de336
DM
149void skb_truesize_bug(struct sk_buff *skb)
150{
8f480c0e 151 WARN(net_ratelimit(), KERN_ERR "SKB BUG: Invalid truesize (%u) "
dc6de336
DM
152 "len=%u, sizeof(sk_buff)=%Zd\n",
153 skb->truesize, skb->len, sizeof(struct sk_buff));
154}
155EXPORT_SYMBOL(skb_truesize_bug);
156
1da177e4
LT
157/* Allocate a new skbuff. We do this ourselves so we can fill in a few
158 * 'private' fields and also do memory statistics to find all the
159 * [BEEP] leaks.
160 *
161 */
162
163/**
d179cd12 164 * __alloc_skb - allocate a network buffer
1da177e4
LT
165 * @size: size to allocate
166 * @gfp_mask: allocation mask
c83c2486
RD
167 * @fclone: allocate from fclone cache instead of head cache
168 * and allocate a cloned (child) skb
b30973f8 169 * @node: numa node to allocate memory on
1da177e4
LT
170 *
171 * Allocate a new &sk_buff. The returned buffer has no headroom and a
172 * tail room of size bytes. The object has a reference count of one.
173 * The return is the buffer. On a failure the return is %NULL.
174 *
175 * Buffers may only be allocated from interrupts using a @gfp_mask of
176 * %GFP_ATOMIC.
177 */
dd0fc66f 178struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
b30973f8 179 int fclone, int node)
1da177e4 180{
e18b890b 181 struct kmem_cache *cache;
4947d3ef 182 struct skb_shared_info *shinfo;
1da177e4
LT
183 struct sk_buff *skb;
184 u8 *data;
185
8798b3fb
HX
186 cache = fclone ? skbuff_fclone_cache : skbuff_head_cache;
187
1da177e4 188 /* Get the HEAD */
b30973f8 189 skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node);
1da177e4
LT
190 if (!skb)
191 goto out;
192
1da177e4 193 size = SKB_DATA_ALIGN(size);
b30973f8
CH
194 data = kmalloc_node_track_caller(size + sizeof(struct skb_shared_info),
195 gfp_mask, node);
1da177e4
LT
196 if (!data)
197 goto nodata;
198
ca0605a7 199 /*
c8005785
JB
200 * Only clear those fields we need to clear, not those that we will
201 * actually initialise below. Hence, don't put any more fields after
202 * the tail pointer in struct sk_buff!
ca0605a7
ACM
203 */
204 memset(skb, 0, offsetof(struct sk_buff, tail));
1da177e4
LT
205 skb->truesize = size + sizeof(struct sk_buff);
206 atomic_set(&skb->users, 1);
207 skb->head = data;
208 skb->data = data;
27a884dc 209 skb_reset_tail_pointer(skb);
4305b541 210 skb->end = skb->tail + size;
4947d3ef
BL
211 /* make sure we initialize shinfo sequentially */
212 shinfo = skb_shinfo(skb);
213 atomic_set(&shinfo->dataref, 1);
214 shinfo->nr_frags = 0;
7967168c
HX
215 shinfo->gso_size = 0;
216 shinfo->gso_segs = 0;
217 shinfo->gso_type = 0;
4947d3ef 218 shinfo->ip6_frag_id = 0;
ac45f602 219 shinfo->tx_flags.flags = 0;
4947d3ef 220 shinfo->frag_list = NULL;
ac45f602 221 memset(&shinfo->hwtstamps, 0, sizeof(shinfo->hwtstamps));
4947d3ef 222
d179cd12
DM
223 if (fclone) {
224 struct sk_buff *child = skb + 1;
225 atomic_t *fclone_ref = (atomic_t *) (child + 1);
1da177e4 226
d179cd12
DM
227 skb->fclone = SKB_FCLONE_ORIG;
228 atomic_set(fclone_ref, 1);
229
230 child->fclone = SKB_FCLONE_UNAVAILABLE;
231 }
1da177e4
LT
232out:
233 return skb;
234nodata:
8798b3fb 235 kmem_cache_free(cache, skb);
1da177e4
LT
236 skb = NULL;
237 goto out;
1da177e4 238}
b4ac530f 239EXPORT_SYMBOL(__alloc_skb);
1da177e4 240
8af27456
CH
241/**
242 * __netdev_alloc_skb - allocate an skbuff for rx on a specific device
243 * @dev: network device to receive on
244 * @length: length to allocate
245 * @gfp_mask: get_free_pages mask, passed to alloc_skb
246 *
247 * Allocate a new &sk_buff and assign it a usage count of one. The
248 * buffer has unspecified headroom built in. Users should allocate
249 * the headroom they think they need without accounting for the
250 * built in space. The built in space is used for optimisations.
251 *
252 * %NULL is returned if there is no free memory.
253 */
254struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
255 unsigned int length, gfp_t gfp_mask)
256{
43cb76d9 257 int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
8af27456
CH
258 struct sk_buff *skb;
259
4ec93edb 260 skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, node);
7b2e497a 261 if (likely(skb)) {
8af27456 262 skb_reserve(skb, NET_SKB_PAD);
7b2e497a
CH
263 skb->dev = dev;
264 }
8af27456
CH
265 return skb;
266}
b4ac530f 267EXPORT_SYMBOL(__netdev_alloc_skb);
1da177e4 268
654bed16
PZ
269struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask)
270{
271 int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
272 struct page *page;
273
274 page = alloc_pages_node(node, gfp_mask, 0);
275 return page;
276}
277EXPORT_SYMBOL(__netdev_alloc_page);
278
279void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
280 int size)
281{
282 skb_fill_page_desc(skb, i, page, off, size);
283 skb->len += size;
284 skb->data_len += size;
285 skb->truesize += size;
286}
287EXPORT_SYMBOL(skb_add_rx_frag);
288
f58518e6
IJ
289/**
290 * dev_alloc_skb - allocate an skbuff for receiving
291 * @length: length to allocate
292 *
293 * Allocate a new &sk_buff and assign it a usage count of one. The
294 * buffer has unspecified headroom built in. Users should allocate
295 * the headroom they think they need without accounting for the
296 * built in space. The built in space is used for optimisations.
297 *
298 * %NULL is returned if there is no free memory. Although this function
299 * allocates memory it can be called from an interrupt.
300 */
301struct sk_buff *dev_alloc_skb(unsigned int length)
302{
1483b874
DV
303 /*
304 * There is more code here than it seems:
a0f55e0e 305 * __dev_alloc_skb is an inline
1483b874 306 */
f58518e6
IJ
307 return __dev_alloc_skb(length, GFP_ATOMIC);
308}
309EXPORT_SYMBOL(dev_alloc_skb);
310
27b437c8 311static void skb_drop_list(struct sk_buff **listp)
1da177e4 312{
27b437c8 313 struct sk_buff *list = *listp;
1da177e4 314
27b437c8 315 *listp = NULL;
1da177e4
LT
316
317 do {
318 struct sk_buff *this = list;
319 list = list->next;
320 kfree_skb(this);
321 } while (list);
322}
323
27b437c8
HX
324static inline void skb_drop_fraglist(struct sk_buff *skb)
325{
326 skb_drop_list(&skb_shinfo(skb)->frag_list);
327}
328
1da177e4
LT
329static void skb_clone_fraglist(struct sk_buff *skb)
330{
331 struct sk_buff *list;
332
333 for (list = skb_shinfo(skb)->frag_list; list; list = list->next)
334 skb_get(list);
335}
336
5bba1712 337static void skb_release_data(struct sk_buff *skb)
1da177e4
LT
338{
339 if (!skb->cloned ||
340 !atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1,
341 &skb_shinfo(skb)->dataref)) {
342 if (skb_shinfo(skb)->nr_frags) {
343 int i;
344 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
345 put_page(skb_shinfo(skb)->frags[i].page);
346 }
347
348 if (skb_shinfo(skb)->frag_list)
349 skb_drop_fraglist(skb);
350
351 kfree(skb->head);
352 }
353}
354
355/*
356 * Free an skbuff by memory without cleaning the state.
357 */
2d4baff8 358static void kfree_skbmem(struct sk_buff *skb)
1da177e4 359{
d179cd12
DM
360 struct sk_buff *other;
361 atomic_t *fclone_ref;
362
d179cd12
DM
363 switch (skb->fclone) {
364 case SKB_FCLONE_UNAVAILABLE:
365 kmem_cache_free(skbuff_head_cache, skb);
366 break;
367
368 case SKB_FCLONE_ORIG:
369 fclone_ref = (atomic_t *) (skb + 2);
370 if (atomic_dec_and_test(fclone_ref))
371 kmem_cache_free(skbuff_fclone_cache, skb);
372 break;
373
374 case SKB_FCLONE_CLONE:
375 fclone_ref = (atomic_t *) (skb + 1);
376 other = skb - 1;
377
378 /* The clone portion is available for
379 * fast-cloning again.
380 */
381 skb->fclone = SKB_FCLONE_UNAVAILABLE;
382
383 if (atomic_dec_and_test(fclone_ref))
384 kmem_cache_free(skbuff_fclone_cache, other);
385 break;
3ff50b79 386 }
1da177e4
LT
387}
388
04a4bb55 389static void skb_release_head_state(struct sk_buff *skb)
1da177e4 390{
1da177e4
LT
391 dst_release(skb->dst);
392#ifdef CONFIG_XFRM
393 secpath_put(skb->sp);
394#endif
9c2b3328
SH
395 if (skb->destructor) {
396 WARN_ON(in_irq());
1da177e4
LT
397 skb->destructor(skb);
398 }
9fb9cbb1 399#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
5f79e0f9 400 nf_conntrack_put(skb->nfct);
9fb9cbb1
YK
401 nf_conntrack_put_reasm(skb->nfct_reasm);
402#endif
1da177e4
LT
403#ifdef CONFIG_BRIDGE_NETFILTER
404 nf_bridge_put(skb->nf_bridge);
405#endif
1da177e4
LT
406/* XXX: IS this still necessary? - JHS */
407#ifdef CONFIG_NET_SCHED
408 skb->tc_index = 0;
409#ifdef CONFIG_NET_CLS_ACT
410 skb->tc_verd = 0;
1da177e4
LT
411#endif
412#endif
04a4bb55
LB
413}
414
415/* Free everything but the sk_buff shell. */
416static void skb_release_all(struct sk_buff *skb)
417{
418 skb_release_head_state(skb);
2d4baff8
HX
419 skb_release_data(skb);
420}
421
422/**
423 * __kfree_skb - private function
424 * @skb: buffer
425 *
426 * Free an sk_buff. Release anything attached to the buffer.
427 * Clean the state. This is an internal helper function. Users should
428 * always call kfree_skb
429 */
1da177e4 430
2d4baff8
HX
431void __kfree_skb(struct sk_buff *skb)
432{
433 skb_release_all(skb);
1da177e4
LT
434 kfree_skbmem(skb);
435}
b4ac530f 436EXPORT_SYMBOL(__kfree_skb);
1da177e4 437