]> bbs.cooldavid.org Git - net-next-2.6.git/blame - include/linux/skbuff.h
[NET]: Use unused bit for ipvs_property field in struct sk_buff
[net-next-2.6.git] / include / linux / skbuff.h
CommitLineData
1da177e4
LT
1/*
2 * Definitions for the 'struct sk_buff' memory handlers.
3 *
4 * Authors:
5 * Alan Cox, <gw4pts@gw4pts.ampr.org>
6 * Florian La Roche, <rzsfl@rz.uni-sb.de>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14#ifndef _LINUX_SKBUFF_H
15#define _LINUX_SKBUFF_H
16
17#include <linux/config.h>
18#include <linux/kernel.h>
19#include <linux/compiler.h>
20#include <linux/time.h>
21#include <linux/cache.h>
22
23#include <asm/atomic.h>
24#include <asm/types.h>
25#include <linux/spinlock.h>
26#include <linux/mm.h>
27#include <linux/highmem.h>
28#include <linux/poll.h>
29#include <linux/net.h>
3fc7e8a6 30#include <linux/textsearch.h>
1da177e4
LT
31#include <net/checksum.h>
32
33#define HAVE_ALLOC_SKB /* For the drivers to know */
34#define HAVE_ALIGNABLE_SKB /* Ditto 8) */
35#define SLAB_SKB /* Slabified skbuffs */
36
37#define CHECKSUM_NONE 0
38#define CHECKSUM_HW 1
39#define CHECKSUM_UNNECESSARY 2
40
41#define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES - 1)) & \
42 ~(SMP_CACHE_BYTES - 1))
43#define SKB_MAX_ORDER(X, ORDER) (((PAGE_SIZE << (ORDER)) - (X) - \
44 sizeof(struct skb_shared_info)) & \
45 ~(SMP_CACHE_BYTES - 1))
46#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0))
47#define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2))
48
49/* A. Checksumming of received packets by device.
50 *
51 * NONE: device failed to checksum this packet.
52 * skb->csum is undefined.
53 *
54 * UNNECESSARY: device parsed packet and wouldbe verified checksum.
55 * skb->csum is undefined.
56 * It is bad option, but, unfortunately, many of vendors do this.
57 * Apparently with secret goal to sell you new device, when you
58 * will add new protocol to your host. F.e. IPv6. 8)
59 *
60 * HW: the most generic way. Device supplied checksum of _all_
61 * the packet as seen by netif_rx in skb->csum.
62 * NOTE: Even if device supports only some protocols, but
63 * is able to produce some skb->csum, it MUST use HW,
64 * not UNNECESSARY.
65 *
66 * B. Checksumming on output.
67 *
68 * NONE: skb is checksummed by protocol or csum is not required.
69 *
70 * HW: device is required to csum packet as seen by hard_start_xmit
71 * from skb->h.raw to the end and to record the checksum
72 * at skb->h.raw+skb->csum.
73 *
74 * Device must show its capabilities in dev->features, set
75 * at device setup time.
76 * NETIF_F_HW_CSUM - it is clever device, it is able to checksum
77 * everything.
78 * NETIF_F_NO_CSUM - loopback or reliable single hop media.
79 * NETIF_F_IP_CSUM - device is dumb. It is able to csum only
80 * TCP/UDP over IPv4. Sigh. Vendors like this
81 * way by an unknown reason. Though, see comment above
82 * about CHECKSUM_UNNECESSARY. 8)
83 *
84 * Any questions? No questions, good. --ANK
85 */
86
1da177e4
LT
87struct net_device;
88
89#ifdef CONFIG_NETFILTER
90struct nf_conntrack {
91 atomic_t use;
92 void (*destroy)(struct nf_conntrack *);
93};
94
95#ifdef CONFIG_BRIDGE_NETFILTER
96struct nf_bridge_info {
97 atomic_t use;
98 struct net_device *physindev;
99 struct net_device *physoutdev;
100#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
101 struct net_device *netoutdev;
102#endif
103 unsigned int mask;
104 unsigned long data[32 / sizeof(unsigned long)];
105};
106#endif
107
108#endif
109
110struct sk_buff_head {
111 /* These two members must be first. */
112 struct sk_buff *next;
113 struct sk_buff *prev;
114
115 __u32 qlen;
116 spinlock_t lock;
117};
118
119struct sk_buff;
120
121/* To allow 64K frame to be packed as single skb without frag_list */
122#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 2)
123
124typedef struct skb_frag_struct skb_frag_t;
125
126struct skb_frag_struct {
127 struct page *page;
128 __u16 page_offset;
129 __u16 size;
130};
131
132/* This data is invariant across clones and lives at
133 * the end of the header data, ie. at skb->end.
134 */
135struct skb_shared_info {
136 atomic_t dataref;
137 unsigned int nr_frags;
138 unsigned short tso_size;
139 unsigned short tso_segs;
e89e9cf5
AR
140 unsigned short ufo_size;
141 unsigned int ip6_frag_id;
1da177e4
LT
142 struct sk_buff *frag_list;
143 skb_frag_t frags[MAX_SKB_FRAGS];
144};
145
146/* We divide dataref into two halves. The higher 16 bits hold references
147 * to the payload part of skb->data. The lower 16 bits hold references to
148 * the entire skb->data. It is up to the users of the skb to agree on
149 * where the payload starts.
150 *
151 * All users must obey the rule that the skb->data reference count must be
152 * greater than or equal to the payload reference count.
153 *
154 * Holding a reference to the payload part means that the user does not
155 * care about modifications to the header part of skb->data.
156 */
157#define SKB_DATAREF_SHIFT 16
158#define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
159
a61bbcf2
PM
160struct skb_timeval {
161 u32 off_sec;
162 u32 off_usec;
163};
164
d179cd12
DM
165
166enum {
167 SKB_FCLONE_UNAVAILABLE,
168 SKB_FCLONE_ORIG,
169 SKB_FCLONE_CLONE,
170};
171
1da177e4
LT
172/**
173 * struct sk_buff - socket buffer
174 * @next: Next buffer in list
175 * @prev: Previous buffer in list
1da177e4 176 * @sk: Socket we are owned by
325ed823 177 * @tstamp: Time we arrived
1da177e4
LT
178 * @dev: Device we arrived on/are leaving by
179 * @input_dev: Device we arrived on
1da177e4
LT
180 * @h: Transport layer header
181 * @nh: Network layer header
182 * @mac: Link layer header
67be2dd1
MW
183 * @dst: destination entry
184 * @sp: the security path, used for xfrm
1da177e4
LT
185 * @cb: Control buffer. Free for use by every layer. Put private vars here
186 * @len: Length of actual data
187 * @data_len: Data length
188 * @mac_len: Length of link layer header
189 * @csum: Checksum
67be2dd1 190 * @local_df: allow local fragmentation
1da177e4
LT
191 * @cloned: Head may be cloned (check refcnt to be sure)
192 * @nohdr: Payload reference only, must not modify header
193 * @pkt_type: Packet class
c83c2486 194 * @fclone: skbuff clone status
1da177e4
LT
195 * @ip_summed: Driver fed us an IP checksum
196 * @priority: Packet queueing priority
197 * @users: User count - see {datagram,tcp}.c
198 * @protocol: Packet protocol from driver
1da177e4
LT
199 * @truesize: Buffer size
200 * @head: Head of buffer
201 * @data: Data head pointer
202 * @tail: Tail pointer
203 * @end: End pointer
204 * @destructor: Destruct function
205 * @nfmark: Can be used for communication between hooks
1da177e4 206 * @nfct: Associated connection, if any
c83c2486 207 * @ipvs_property: skbuff is owned by ipvs
1da177e4 208 * @nfctinfo: Relationship of this skb to the connection
1da177e4 209 * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c
1da177e4
LT
210 * @tc_index: Traffic control index
211 * @tc_verd: traffic control verdict
1da177e4
LT
212 */
213
214struct sk_buff {
215 /* These two members must be first. */
216 struct sk_buff *next;
217 struct sk_buff *prev;
218
1da177e4 219 struct sock *sk;
a61bbcf2 220 struct skb_timeval tstamp;
1da177e4
LT
221 struct net_device *dev;
222 struct net_device *input_dev;
1da177e4
LT
223
224 union {
225 struct tcphdr *th;
226 struct udphdr *uh;
227 struct icmphdr *icmph;
228 struct igmphdr *igmph;
229 struct iphdr *ipiph;
230 struct ipv6hdr *ipv6h;
231 unsigned char *raw;
232 } h;
233
234 union {
235 struct iphdr *iph;
236 struct ipv6hdr *ipv6h;
237 struct arphdr *arph;
238 unsigned char *raw;
239 } nh;
240
241 union {
242 unsigned char *raw;
243 } mac;
244
245 struct dst_entry *dst;
246 struct sec_path *sp;
247
248 /*
249 * This is the control buffer. It is free to use for every
250 * layer. Please put your private variables there. If you
251 * want to keep them across layers you have to do a skb_clone()
252 * first. This is owned by whoever has the skb queued ATM.
253 */
254 char cb[40];
255
256 unsigned int len,
257 data_len,
258 mac_len,
259 csum;
1da177e4 260 __u32 priority;
1cbb3380
TG
261 __u8 local_df:1,
262 cloned:1,
263 ip_summed:2,
6869c4d8
HW
264 nohdr:1,
265 nfctinfo:3;
d179cd12 266 __u8 pkt_type:3,
b84f4cc9
PM
267 fclone:2,
268 ipvs_property:1;
a0d3bea3 269 __be16 protocol;
1da177e4
LT
270
271 void (*destructor)(struct sk_buff *skb);
272#ifdef CONFIG_NETFILTER
bf3a46aa 273 __u32 nfmark;
1da177e4 274 struct nf_conntrack *nfct;
9fb9cbb1
YK
275#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
276 struct sk_buff *nfct_reasm;
277#endif
1da177e4
LT
278#ifdef CONFIG_BRIDGE_NETFILTER
279 struct nf_bridge_info *nf_bridge;
280#endif
281#endif /* CONFIG_NETFILTER */
1da177e4 282#ifdef CONFIG_NET_SCHED
b6b99eb5 283 __u16 tc_index; /* traffic control index */
1da177e4 284#ifdef CONFIG_NET_CLS_ACT
b6b99eb5 285 __u16 tc_verd; /* traffic control verdict */
1da177e4 286#endif
1da177e4
LT
287#endif
288
289
290 /* These elements must be at the end, see alloc_skb() for details. */
291 unsigned int truesize;
292 atomic_t users;
293 unsigned char *head,
294 *data,
295 *tail,
296 *end;
297};
298
299#ifdef __KERNEL__
300/*
301 * Handling routines are only of interest to the kernel
302 */
303#include <linux/slab.h>
304
305#include <asm/system.h>
306
307extern void __kfree_skb(struct sk_buff *skb);
d179cd12 308extern struct sk_buff *__alloc_skb(unsigned int size,
dd0fc66f 309 gfp_t priority, int fclone);
d179cd12 310static inline struct sk_buff *alloc_skb(unsigned int size,
dd0fc66f 311 gfp_t priority)
d179cd12
DM
312{
313 return __alloc_skb(size, priority, 0);
314}
315
316static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
dd0fc66f 317 gfp_t priority)
d179cd12
DM
318{
319 return __alloc_skb(size, priority, 1);
320}
321
1da177e4 322extern struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp,
86a76caf 323 unsigned int size,
dd0fc66f 324 gfp_t priority);
1da177e4 325extern void kfree_skbmem(struct sk_buff *skb);
86a76caf 326extern struct sk_buff *skb_clone(struct sk_buff *skb,
dd0fc66f 327 gfp_t priority);
86a76caf 328extern struct sk_buff *skb_copy(const struct sk_buff *skb,
dd0fc66f 329 gfp_t priority);
86a76caf 330extern struct sk_buff *pskb_copy(struct sk_buff *skb,
dd0fc66f 331 gfp_t gfp_mask);
1da177e4 332extern int pskb_expand_head(struct sk_buff *skb,
86a76caf 333 int nhead, int ntail,
dd0fc66f 334 gfp_t gfp_mask);
1da177e4
LT
335extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
336 unsigned int headroom);
337extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
338 int newheadroom, int newtailroom,
dd0fc66f 339 gfp_t priority);
1da177e4
LT
340extern struct sk_buff * skb_pad(struct sk_buff *skb, int pad);
341#define dev_kfree_skb(a) kfree_skb(a)
342extern void skb_over_panic(struct sk_buff *skb, int len,
343 void *here);
344extern void skb_under_panic(struct sk_buff *skb, int len,
345 void *here);
346
e89e9cf5
AR
347extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
348 int getfrag(void *from, char *to, int offset,
349 int len,int odd, struct sk_buff *skb),
350 void *from, int length);
351
677e90ed
TG
352struct skb_seq_state
353{
354 __u32 lower_offset;
355 __u32 upper_offset;
356 __u32 frag_idx;
357 __u32 stepped_offset;
358 struct sk_buff *root_skb;
359 struct sk_buff *cur_skb;
360 __u8 *frag_data;
361};
362
363extern void skb_prepare_seq_read(struct sk_buff *skb,
364 unsigned int from, unsigned int to,
365 struct skb_seq_state *st);
366extern unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
367 struct skb_seq_state *st);
368extern void skb_abort_seq_read(struct skb_seq_state *st);
369
3fc7e8a6
TG
370extern unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
371 unsigned int to, struct ts_config *config,
372 struct ts_state *state);
373
1da177e4
LT
374/* Internal */
375#define skb_shinfo(SKB) ((struct skb_shared_info *)((SKB)->end))
376
377/**
378 * skb_queue_empty - check if a queue is empty
379 * @list: queue head
380 *
381 * Returns true if the queue is empty, false otherwise.
382 */
383static inline int skb_queue_empty(const struct sk_buff_head *list)
384{
385 return list->next == (struct sk_buff *)list;
386}
387
388/**
389 * skb_get - reference buffer
390 * @skb: buffer to reference
391 *
392 * Makes another reference to a socket buffer and returns a pointer
393 * to the buffer.
394 */
395static inline struct sk_buff *skb_get(struct sk_buff *skb)
396{
397 atomic_inc(&skb->users);
398 return skb;
399}
400
401/*
402 * If users == 1, we are the only owner and are can avoid redundant
403 * atomic change.
404 */
405
406/**
407 * kfree_skb - free an sk_buff
408 * @skb: buffer to free
409 *
410 * Drop a reference to the buffer and free it if the usage count has
411 * hit zero.
412 */
413static inline void kfree_skb(struct sk_buff *skb)
414{
415 if (likely(atomic_read(&skb->users) == 1))
416 smp_rmb();
417 else if (likely(!atomic_dec_and_test(&skb->users)))
418 return;
419 __kfree_skb(skb);
420}
421
422/**
423 * skb_cloned - is the buffer a clone
424 * @skb: buffer to check
425 *
426 * Returns true if the buffer was generated with skb_clone() and is
427 * one of multiple shared copies of the buffer. Cloned buffers are
428 * shared data so must not be written to under normal circumstances.
429 */
430static inline int skb_cloned(const struct sk_buff *skb)
431{
432 return skb->cloned &&
433 (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
434}
435
436/**
437 * skb_header_cloned - is the header a clone
438 * @skb: buffer to check
439 *
440 * Returns true if modifying the header part of the buffer requires
441 * the data to be copied.
442 */
443static inline int skb_header_cloned(const struct sk_buff *skb)
444{
445 int dataref;
446
447 if (!skb->cloned)
448 return 0;
449
450 dataref = atomic_read(&skb_shinfo(skb)->dataref);
451 dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT);
452 return dataref != 1;
453}
454
455/**
456 * skb_header_release - release reference to header
457 * @skb: buffer to operate on
458 *
459 * Drop a reference to the header part of the buffer. This is done
460 * by acquiring a payload reference. You must not read from the header
461 * part of skb->data after this.
462 */
463static inline void skb_header_release(struct sk_buff *skb)
464{
465 BUG_ON(skb->nohdr);
466 skb->nohdr = 1;
467 atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref);
468}
469
470/**
471 * skb_shared - is the buffer shared
472 * @skb: buffer to check
473 *
474 * Returns true if more than one person has a reference to this
475 * buffer.
476 */
477static inline int skb_shared(const struct sk_buff *skb)
478{
479 return atomic_read(&skb->users) != 1;
480}
481
482/**
483 * skb_share_check - check if buffer is shared and if so clone it
484 * @skb: buffer to check
485 * @pri: priority for memory allocation
486 *
487 * If the buffer is shared the buffer is cloned and the old copy
488 * drops a reference. A new clone with a single reference is returned.
489 * If the buffer is not shared the original buffer is returned. When
490 * being called from interrupt status or with spinlocks held pri must
491 * be GFP_ATOMIC.
492 *
493 * NULL is returned on a memory allocation failure.
494 */
86a76caf 495static inline struct sk_buff *skb_share_check(struct sk_buff *skb,
dd0fc66f 496 gfp_t pri)
1da177e4
LT
497{
498 might_sleep_if(pri & __GFP_WAIT);
499 if (skb_shared(skb)) {
500 struct sk_buff *nskb = skb_clone(skb, pri);
501 kfree_skb(skb);
502 skb = nskb;
503 }
504 return skb;
505}
506
507/*
508 * Copy shared buffers into a new sk_buff. We effectively do COW on
509 * packets to handle cases where we have a local reader and forward
510 * and a couple of other messy ones. The normal one is tcpdumping
511 * a packet thats being forwarded.
512 */
513
514/**
515 * skb_unshare - make a copy of a shared buffer
516 * @skb: buffer to check
517 * @pri: priority for memory allocation
518 *
519 * If the socket buffer is a clone then this function creates a new
520 * copy of the data, drops a reference count on the old copy and returns
521 * the new copy with the reference count at 1. If the buffer is not a clone
522 * the original buffer is returned. When called with a spinlock held or
523 * from interrupt state @pri must be %GFP_ATOMIC
524 *
525 * %NULL is returned on a memory allocation failure.
526 */
e2bf521d 527static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
dd0fc66f 528 gfp_t pri)
1da177e4
LT
529{
530 might_sleep_if(pri & __GFP_WAIT);
531 if (skb_cloned(skb)) {
532 struct sk_buff *nskb = skb_copy(skb, pri);
533 kfree_skb(skb); /* Free our shared copy */
534 skb = nskb;
535 }
536 return skb;
537}
538
539/**
540 * skb_peek
541 * @list_: list to peek at
542 *
543 * Peek an &sk_buff. Unlike most other operations you _MUST_
544 * be careful with this one. A peek leaves the buffer on the
545 * list and someone else may run off with it. You must hold
546 * the appropriate locks or have a private queue to do this.
547 *
548 * Returns %NULL for an empty list or a pointer to the head element.
549 * The reference count is not incremented and the reference is therefore
550 * volatile. Use with caution.
551 */
552static inline struct sk_buff *skb_peek(struct sk_buff_head *list_)
553{
554 struct sk_buff *list = ((struct sk_buff *)list_)->next;
555 if (list == (struct sk_buff *)list_)
556 list = NULL;
557 return list;
558}
559
560/**
561 * skb_peek_tail
562 * @list_: list to peek at
563 *
564 * Peek an &sk_buff. Unlike most other operations you _MUST_
565 * be careful with this one. A peek leaves the buffer on the
566 * list and someone else may run off with it. You must hold
567 * the appropriate locks or have a private queue to do this.
568 *
569 * Returns %NULL for an empty list or a pointer to the tail element.
570 * The reference count is not incremented and the reference is therefore
571 * volatile. Use with caution.
572 */
573static inline struct sk_buff *skb_peek_tail(struct sk_buff_head *list_)
574{
575 struct sk_buff *list = ((struct sk_buff *)list_)->prev;
576 if (list == (struct sk_buff *)list_)
577 list = NULL;
578 return list;
579}
580
581/**
582 * skb_queue_len - get queue length
583 * @list_: list to measure
584 *
585 * Return the length of an &sk_buff queue.
586 */
587static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
588{
589 return list_->qlen;
590}
591
592static inline void skb_queue_head_init(struct sk_buff_head *list)
593{
594 spin_lock_init(&list->lock);
595 list->prev = list->next = (struct sk_buff *)list;
596 list->qlen = 0;
597}
598
599/*
600 * Insert an sk_buff at the start of a list.
601 *
602 * The "__skb_xxxx()" functions are the non-atomic ones that
603 * can only be called with interrupts disabled.
604 */
605
606/**
300ce174 607 * __skb_queue_after - queue a buffer at the list head
1da177e4 608 * @list: list to use
300ce174 609 * @prev: place after this buffer
1da177e4
LT
610 * @newsk: buffer to queue
611 *
300ce174 612 * Queue a buffer int the middle of a list. This function takes no locks
1da177e4
LT
613 * and you must therefore hold required locks before calling it.
614 *
615 * A buffer cannot be placed on two lists at the same time.
616 */
300ce174
SH
617static inline void __skb_queue_after(struct sk_buff_head *list,
618 struct sk_buff *prev,
619 struct sk_buff *newsk)
1da177e4 620{
300ce174 621 struct sk_buff *next;
1da177e4 622 list->qlen++;
300ce174 623
1da177e4
LT
624 next = prev->next;
625 newsk->next = next;
626 newsk->prev = prev;
627 next->prev = prev->next = newsk;
628}
629
300ce174
SH
630/**
631 * __skb_queue_head - queue a buffer at the list head
632 * @list: list to use
633 * @newsk: buffer to queue
634 *
635 * Queue a buffer at the start of a list. This function takes no locks
636 * and you must therefore hold required locks before calling it.
637 *
638 * A buffer cannot be placed on two lists at the same time.
639 */
640extern void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
641static inline void __skb_queue_head(struct sk_buff_head *list,
642 struct sk_buff *newsk)
643{
644 __skb_queue_after(list, (struct sk_buff *)list, newsk);
645}
646
1da177e4
LT
647/**
648 * __skb_queue_tail - queue a buffer at the list tail
649 * @list: list to use
650 * @newsk: buffer to queue
651 *
652 * Queue a buffer at the end of a list. This function takes no locks
653 * and you must therefore hold required locks before calling it.
654 *
655 * A buffer cannot be placed on two lists at the same time.
656 */
657extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
658static inline void __skb_queue_tail(struct sk_buff_head *list,
659 struct sk_buff *newsk)
660{
661 struct sk_buff *prev, *next;
662
1da177e4
LT
663 list->qlen++;
664 next = (struct sk_buff *)list;
665 prev = next->prev;
666 newsk->next = next;
667 newsk->prev = prev;
668 next->prev = prev->next = newsk;
669}
670
671
672/**
673 * __skb_dequeue - remove from the head of the queue
674 * @list: list to dequeue from
675 *
676 * Remove the head of the list. This function does not take any locks
677 * so must be used with appropriate locks held only. The head item is
678 * returned or %NULL if the list is empty.
679 */
680extern struct sk_buff *skb_dequeue(struct sk_buff_head *list);
681static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
682{
683 struct sk_buff *next, *prev, *result;
684
685 prev = (struct sk_buff *) list;
686 next = prev->next;
687 result = NULL;
688 if (next != prev) {
689 result = next;
690 next = next->next;
691 list->qlen--;
692 next->prev = prev;
693 prev->next = next;
694 result->next = result->prev = NULL;
1da177e4
LT
695 }
696 return result;
697}
698
699
700/*
701 * Insert a packet on a list.
702 */
8728b834 703extern void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list);
1da177e4
LT
704static inline void __skb_insert(struct sk_buff *newsk,
705 struct sk_buff *prev, struct sk_buff *next,
706 struct sk_buff_head *list)
707{
708 newsk->next = next;
709 newsk->prev = prev;
710 next->prev = prev->next = newsk;
1da177e4
LT
711 list->qlen++;
712}
713
714/*
715 * Place a packet after a given packet in a list.
716 */
8728b834
DM
717extern void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list);
718static inline void __skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
1da177e4 719{
8728b834 720 __skb_insert(newsk, old, old->next, list);
1da177e4
LT
721}
722
723/*
724 * remove sk_buff from list. _Must_ be called atomically, and with
725 * the list known..
726 */
8728b834 727extern void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
1da177e4
LT
728static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
729{
730 struct sk_buff *next, *prev;
731
732 list->qlen--;
733 next = skb->next;
734 prev = skb->prev;
735 skb->next = skb->prev = NULL;
1da177e4
LT
736 next->prev = prev;
737 prev->next = next;
738}
739
740
741/* XXX: more streamlined implementation */
742
743/**
744 * __skb_dequeue_tail - remove from the tail of the queue
745 * @list: list to dequeue from
746 *
747 * Remove the tail of the list. This function does not take any locks
748 * so must be used with appropriate locks held only. The tail item is
749 * returned or %NULL if the list is empty.
750 */
751extern struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
752static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
753{
754 struct sk_buff *skb = skb_peek_tail(list);
755 if (skb)
756 __skb_unlink(skb, list);
757 return skb;
758}
759
760
761static inline int skb_is_nonlinear(const struct sk_buff *skb)
762{
763 return skb->data_len;
764}
765
766static inline unsigned int skb_headlen(const struct sk_buff *skb)
767{
768 return skb->len - skb->data_len;
769}
770
771static inline int skb_pagelen(const struct sk_buff *skb)
772{
773 int i, len = 0;
774
775 for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--)
776 len += skb_shinfo(skb)->frags[i].size;
777 return len + skb_headlen(skb);
778}
779
780static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
781 struct page *page, int off, int size)
782{
783 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
784
785 frag->page = page;
786 frag->page_offset = off;
787 frag->size = size;
788 skb_shinfo(skb)->nr_frags = i + 1;
789}
790
791#define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags)
792#define SKB_FRAG_ASSERT(skb) BUG_ON(skb_shinfo(skb)->frag_list)
793#define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb))
794
795/*
796 * Add data to an sk_buff
797 */
798static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
799{
800 unsigned char *tmp = skb->tail;
801 SKB_LINEAR_ASSERT(skb);
802 skb->tail += len;
803 skb->len += len;
804 return tmp;
805}
806
807/**
808 * skb_put - add data to a buffer
809 * @skb: buffer to use
810 * @len: amount of data to add
811 *
812 * This function extends the used data area of the buffer. If this would
813 * exceed the total buffer size the kernel will panic. A pointer to the
814 * first byte of the extra data is returned.
815 */
816static inline unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
817{
818 unsigned char *tmp = skb->tail;
819 SKB_LINEAR_ASSERT(skb);
820 skb->tail += len;
821 skb->len += len;
822 if (unlikely(skb->tail>skb->end))
823 skb_over_panic(skb, len, current_text_addr());
824 return tmp;
825}
826
827static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
828{
829 skb->data -= len;
830 skb->len += len;
831 return skb->data;
832}
833
834/**
835 * skb_push - add data to the start of a buffer
836 * @skb: buffer to use
837 * @len: amount of data to add
838 *
839 * This function extends the used data area of the buffer at the buffer
840 * start. If this would exceed the total buffer headroom the kernel will
841 * panic. A pointer to the first byte of the extra data is returned.
842 */
843static inline unsigned char *skb_push(struct sk_buff *skb, unsigned int len)
844{
845 skb->data -= len;
846 skb->len += len;
847 if (unlikely(skb->data<skb->head))
848 skb_under_panic(skb, len, current_text_addr());
849 return skb->data;
850}
851
852static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
853{
854 skb->len -= len;
855 BUG_ON(skb->len < skb->data_len);
856 return skb->data += len;
857}
858
859/**
860 * skb_pull - remove data from the start of a buffer
861 * @skb: buffer to use
862 * @len: amount of data to remove
863 *
864 * This function removes data from the start of a buffer, returning
865 * the memory to the headroom. A pointer to the next data in the buffer
866 * is returned. Once the data has been pulled future pushes will overwrite
867 * the old data.
868 */
869static inline unsigned char *skb_pull(struct sk_buff *skb, unsigned int len)
870{
871 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
872}
873
874extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
875
876static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
877{
878 if (len > skb_headlen(skb) &&
879 !__pskb_pull_tail(skb, len-skb_headlen(skb)))
880 return NULL;
881 skb->len -= len;
882 return skb->data += len;
883}
884
885static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len)
886{
887 return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
888}
889
890static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
891{
892 if (likely(len <= skb_headlen(skb)))
893 return 1;
894 if (unlikely(len > skb->len))
895 return 0;
896 return __pskb_pull_tail(skb, len-skb_headlen(skb)) != NULL;
897}
898
899/**
900 * skb_headroom - bytes at buffer head
901 * @skb: buffer to check
902 *
903 * Return the number of bytes of free space at the head of an &sk_buff.
904 */
905static inline int skb_headroom(const struct sk_buff *skb)
906{
907 return skb->data - skb->head;
908}
909
910/**
911 * skb_tailroom - bytes at buffer end
912 * @skb: buffer to check
913 *
914 * Return the number of bytes of free space at the tail of an sk_buff
915 */
916static inline int skb_tailroom(const struct sk_buff *skb)
917{
918 return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
919}
920
921/**
922 * skb_reserve - adjust headroom
923 * @skb: buffer to alter
924 * @len: bytes to move
925 *
926 * Increase the headroom of an empty &sk_buff by reducing the tail
927 * room. This is only allowed for an empty buffer.
928 */
929static inline void skb_reserve(struct sk_buff *skb, unsigned int len)
930{
931 skb->data += len;
932 skb->tail += len;
933}
934
935/*
936 * CPUs often take a performance hit when accessing unaligned memory
937 * locations. The actual performance hit varies, it can be small if the
938 * hardware handles it or large if we have to take an exception and fix it
939 * in software.
940 *
941 * Since an ethernet header is 14 bytes network drivers often end up with
942 * the IP header at an unaligned offset. The IP header can be aligned by
943 * shifting the start of the packet by 2 bytes. Drivers should do this
944 * with:
945 *
946 * skb_reserve(NET_IP_ALIGN);
947 *
948 * The downside to this alignment of the IP header is that the DMA is now
949 * unaligned. On some architectures the cost of an unaligned DMA is high
950 * and this cost outweighs the gains made by aligning the IP header.
951 *
952 * Since this trade off varies between architectures, we allow NET_IP_ALIGN
953 * to be overridden.
954 */
955#ifndef NET_IP_ALIGN
956#define NET_IP_ALIGN 2
957#endif
958
959extern int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc);
960
961static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
962{
963 if (!skb->data_len) {
964 skb->len = len;
965 skb->tail = skb->data + len;
966 } else
967 ___pskb_trim(skb, len, 0);
968}
969
970/**
971 * skb_trim - remove end from a buffer
972 * @skb: buffer to alter
973 * @len: new length
974 *
975 * Cut the length of a buffer down by removing data from the tail. If
976 * the buffer is already under the length specified it is not modified.
977 */
978static inline void skb_trim(struct sk_buff *skb, unsigned int len)
979{
980 if (skb->len > len)
981 __skb_trim(skb, len);
982}
983
984
985static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
986{
987 if (!skb->data_len) {
988 skb->len = len;
989 skb->tail = skb->data+len;
990 return 0;
991 }
992 return ___pskb_trim(skb, len, 1);
993}
994
995static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
996{
997 return (len < skb->len) ? __pskb_trim(skb, len) : 0;
998}
999
1000/**
1001 * skb_orphan - orphan a buffer
1002 * @skb: buffer to orphan
1003 *
1004 * If a buffer currently has an owner then we call the owner's
1005 * destructor function and make the @skb unowned. The buffer continues
1006 * to exist but is no longer charged to its former owner.
1007 */
1008static inline void skb_orphan(struct sk_buff *skb)
1009{
1010 if (skb->destructor)
1011 skb->destructor(skb);
1012 skb->destructor = NULL;
1013 skb->sk = NULL;
1014}
1015
1016/**
1017 * __skb_queue_purge - empty a list
1018 * @list: list to empty
1019 *
1020 * Delete all buffers on an &sk_buff list. Each buffer is removed from
1021 * the list and one reference dropped. This function does not take the
1022 * list lock and the caller must hold the relevant locks to use it.
1023 */
1024extern void skb_queue_purge(struct sk_buff_head *list);
1025static inline void __skb_queue_purge(struct sk_buff_head *list)
1026{
1027 struct sk_buff *skb;
1028 while ((skb = __skb_dequeue(list)) != NULL)
1029 kfree_skb(skb);
1030}
1031
4dc3b16b 1032#ifndef CONFIG_HAVE_ARCH_DEV_ALLOC_SKB
1da177e4
LT
1033/**
1034 * __dev_alloc_skb - allocate an skbuff for sending
1035 * @length: length to allocate
1036 * @gfp_mask: get_free_pages mask, passed to alloc_skb
1037 *
1038 * Allocate a new &sk_buff and assign it a usage count of one. The
1039 * buffer has unspecified headroom built in. Users should allocate
1040 * the headroom they think they need without accounting for the
1041 * built in space. The built in space is used for optimisations.
1042 *
1043 * %NULL is returned in there is no free memory.
1044 */
1da177e4 1045static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
dd0fc66f 1046 gfp_t gfp_mask)
1da177e4
LT
1047{
1048 struct sk_buff *skb = alloc_skb(length + 16, gfp_mask);
1049 if (likely(skb))
1050 skb_reserve(skb, 16);
1051 return skb;
1052}
1053#else
1054extern struct sk_buff *__dev_alloc_skb(unsigned int length, int gfp_mask);
1055#endif
1056
1057/**
1058 * dev_alloc_skb - allocate an skbuff for sending
1059 * @length: length to allocate
1060 *
1061 * Allocate a new &sk_buff and assign it a usage count of one. The
1062 * buffer has unspecified headroom built in. Users should allocate
1063 * the headroom they think they need without accounting for the
1064 * built in space. The built in space is used for optimisations.
1065 *
1066 * %NULL is returned in there is no free memory. Although this function
1067 * allocates memory it can be called from an interrupt.
1068 */
1069static inline struct sk_buff *dev_alloc_skb(unsigned int length)
1070{
1071 return __dev_alloc_skb(length, GFP_ATOMIC);
1072}
1073
1074/**
1075 * skb_cow - copy header of skb when it is required
1076 * @skb: buffer to cow
1077 * @headroom: needed headroom
1078 *
1079 * If the skb passed lacks sufficient headroom or its data part
1080 * is shared, data is reallocated. If reallocation fails, an error
1081 * is returned and original skb is not changed.
1082 *
1083 * The result is skb with writable area skb->head...skb->tail
1084 * and at least @headroom of space at head.
1085 */
1086static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
1087{
1088 int delta = (headroom > 16 ? headroom : 16) - skb_headroom(skb);
1089
1090 if (delta < 0)
1091 delta = 0;
1092
1093 if (delta || skb_cloned(skb))
1094 return pskb_expand_head(skb, (delta + 15) & ~15, 0, GFP_ATOMIC);
1095 return 0;
1096}
1097
1098/**
1099 * skb_padto - pad an skbuff up to a minimal size
1100 * @skb: buffer to pad
1101 * @len: minimal length
1102 *
1103 * Pads up a buffer to ensure the trailing bytes exist and are
1104 * blanked. If the buffer already contains sufficient data it
1105 * is untouched. Returns the buffer, which may be a replacement
1106 * for the original, or NULL for out of memory - in which case
1107 * the original buffer is still freed.
1108 */
1109
1110static inline struct sk_buff *skb_padto(struct sk_buff *skb, unsigned int len)
1111{
1112 unsigned int size = skb->len;
1113 if (likely(size >= len))
1114 return skb;
1115 return skb_pad(skb, len-size);
1116}
1117
1118static inline int skb_add_data(struct sk_buff *skb,
1119 char __user *from, int copy)
1120{
1121 const int off = skb->len;
1122
1123 if (skb->ip_summed == CHECKSUM_NONE) {
1124 int err = 0;
1125 unsigned int csum = csum_and_copy_from_user(from,
1126 skb_put(skb, copy),
1127 copy, 0, &err);
1128 if (!err) {
1129 skb->csum = csum_block_add(skb->csum, csum, off);
1130 return 0;
1131 }
1132 } else if (!copy_from_user(skb_put(skb, copy), from, copy))
1133 return 0;
1134
1135 __skb_trim(skb, off);
1136 return -EFAULT;
1137}
1138
1139static inline int skb_can_coalesce(struct sk_buff *skb, int i,
1140 struct page *page, int off)
1141{
1142 if (i) {
1143 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
1144
1145 return page == frag->page &&
1146 off == frag->page_offset + frag->size;
1147 }
1148 return 0;
1149}
1150
1151/**
1152 * skb_linearize - convert paged skb to linear one
1153 * @skb: buffer to linarize
1154 * @gfp: allocation mode
1155 *
1156 * If there is no free memory -ENOMEM is returned, otherwise zero
1157 * is returned and the old skb data released.
1158 */
dd0fc66f
AV
1159extern int __skb_linearize(struct sk_buff *skb, gfp_t gfp);
1160static inline int skb_linearize(struct sk_buff *skb, gfp_t gfp)
1da177e4
LT
1161{
1162 return __skb_linearize(skb, gfp);
1163}
1164
1165/**
1166 * skb_postpull_rcsum - update checksum for received skb after pull
1167 * @skb: buffer to update
1168 * @start: start of data before pull
1169 * @len: length of data pulled
1170 *
1171 * After doing a pull on a received packet, you need to call this to
1172 * update the CHECKSUM_HW checksum, or set ip_summed to CHECKSUM_NONE
1173 * so that it can be recomputed from scratch.
1174 */
1175
1176static inline void skb_postpull_rcsum(struct sk_buff *skb,
1177 const void *start, int len)
1178{
1179 if (skb->ip_summed == CHECKSUM_HW)
1180 skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
1181}
1182
1183/**
1184 * pskb_trim_rcsum - trim received skb and update checksum
1185 * @skb: buffer to trim
1186 * @len: new length
1187 *
1188 * This is exactly the same as pskb_trim except that it ensures the
1189 * checksum of received packets are still valid after the operation.
1190 */
1191
1192static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
1193{
0e4e4220 1194 if (likely(len >= skb->len))
1da177e4
LT
1195 return 0;
1196 if (skb->ip_summed == CHECKSUM_HW)
1197 skb->ip_summed = CHECKSUM_NONE;
1198 return __pskb_trim(skb, len);
1199}
1200
1201static inline void *kmap_skb_frag(const skb_frag_t *frag)
1202{
1203#ifdef CONFIG_HIGHMEM
1204 BUG_ON(in_irq());
1205
1206 local_bh_disable();
1207#endif
1208 return kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ);
1209}
1210
1211static inline void kunmap_skb_frag(void *vaddr)
1212{
1213 kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
1214#ifdef CONFIG_HIGHMEM
1215 local_bh_enable();
1216#endif
1217}
1218
1219#define skb_queue_walk(queue, skb) \
1220 for (skb = (queue)->next; \
1221 prefetch(skb->next), (skb != (struct sk_buff *)(queue)); \
1222 skb = skb->next)
1223
300ce174
SH
1224#define skb_queue_reverse_walk(queue, skb) \
1225 for (skb = (queue)->prev; \
1226 prefetch(skb->prev), (skb != (struct sk_buff *)(queue)); \
1227 skb = skb->prev)
1228
1da177e4
LT
1229
1230extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
1231 int noblock, int *err);
1232extern unsigned int datagram_poll(struct file *file, struct socket *sock,
1233 struct poll_table_struct *wait);
1234extern int skb_copy_datagram_iovec(const struct sk_buff *from,
1235 int offset, struct iovec *to,
1236 int size);
fb286bb2 1237extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
1da177e4
LT
1238 int hlen,
1239 struct iovec *iov);
1240extern void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
1241extern unsigned int skb_checksum(const struct sk_buff *skb, int offset,
1242 int len, unsigned int csum);
1243extern int skb_copy_bits(const struct sk_buff *skb, int offset,
1244 void *to, int len);
357b40a1
HX
1245extern int skb_store_bits(const struct sk_buff *skb, int offset,
1246 void *from, int len);
1da177e4
LT
1247extern unsigned int skb_copy_and_csum_bits(const struct sk_buff *skb,
1248 int offset, u8 *to, int len,
1249 unsigned int csum);
1250extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
1251extern void skb_split(struct sk_buff *skb,
1252 struct sk_buff *skb1, const u32 len);
1253
20380731
ACM
1254extern void skb_release_data(struct sk_buff *skb);
1255
1da177e4
LT
1256static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
1257 int len, void *buffer)
1258{
1259 int hlen = skb_headlen(skb);
1260
55820ee2 1261 if (hlen - offset >= len)
1da177e4
LT
1262 return skb->data + offset;
1263
1264 if (skb_copy_bits(skb, offset, buffer, len) < 0)
1265 return NULL;
1266
1267 return buffer;
1268}
1269
1270extern void skb_init(void);
1271extern void skb_add_mtu(int mtu);
1272
a61bbcf2
PM
1273/**
1274 * skb_get_timestamp - get timestamp from a skb
1275 * @skb: skb to get stamp from
1276 * @stamp: pointer to struct timeval to store stamp in
1277 *
1278 * Timestamps are stored in the skb as offsets to a base timestamp.
1279 * This function converts the offset back to a struct timeval and stores
1280 * it in stamp.
1281 */
f2c38398 1282static inline void skb_get_timestamp(const struct sk_buff *skb, struct timeval *stamp)
a61bbcf2
PM
1283{
1284 stamp->tv_sec = skb->tstamp.off_sec;
1285 stamp->tv_usec = skb->tstamp.off_usec;
a61bbcf2
PM
1286}
1287
1288/**
1289 * skb_set_timestamp - set timestamp of a skb
1290 * @skb: skb to set stamp of
1291 * @stamp: pointer to struct timeval to get stamp from
1292 *
1293 * Timestamps are stored in the skb as offsets to a base timestamp.
1294 * This function converts a struct timeval to an offset and stores
1295 * it in the skb.
1296 */
f2c38398 1297static inline void skb_set_timestamp(struct sk_buff *skb, const struct timeval *stamp)
a61bbcf2 1298{
325ed823
HX
1299 skb->tstamp.off_sec = stamp->tv_sec;
1300 skb->tstamp.off_usec = stamp->tv_usec;
a61bbcf2
PM
1301}
1302
1303extern void __net_timestamp(struct sk_buff *skb);
1304
fb286bb2
HX
1305extern unsigned int __skb_checksum_complete(struct sk_buff *skb);
1306
1307/**
1308 * skb_checksum_complete - Calculate checksum of an entire packet
1309 * @skb: packet to process
1310 *
1311 * This function calculates the checksum over the entire packet plus
1312 * the value of skb->csum. The latter can be used to supply the
1313 * checksum of a pseudo header as used by TCP/UDP. It returns the
1314 * checksum.
1315 *
1316 * For protocols that contain complete checksums such as ICMP/TCP/UDP,
1317 * this function can be used to verify that checksum on received
1318 * packets. In that case the function should return zero if the
1319 * checksum is correct. In particular, this function will return zero
1320 * if skb->ip_summed is CHECKSUM_UNNECESSARY which indicates that the
1321 * hardware has already verified the correctness of the checksum.
1322 */
1323static inline unsigned int skb_checksum_complete(struct sk_buff *skb)
1324{
1325 return skb->ip_summed != CHECKSUM_UNNECESSARY &&
1326 __skb_checksum_complete(skb);
1327}
1328
1da177e4
LT
1329#ifdef CONFIG_NETFILTER
1330static inline void nf_conntrack_put(struct nf_conntrack *nfct)
1331{
1332 if (nfct && atomic_dec_and_test(&nfct->use))
1333 nfct->destroy(nfct);
1334}
1335static inline void nf_conntrack_get(struct nf_conntrack *nfct)
1336{
1337 if (nfct)
1338 atomic_inc(&nfct->use);
1339}
9fb9cbb1
YK
1340#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
1341static inline void nf_conntrack_get_reasm(struct sk_buff *skb)
1342{
1343 if (skb)
1344 atomic_inc(&skb->users);
1345}
1346static inline void nf_conntrack_put_reasm(struct sk_buff *skb)
1347{
1348 if (skb)
1349 kfree_skb(skb);
1350}
1351#endif
1da177e4
LT
1352static inline void nf_reset(struct sk_buff *skb)
1353{
1354 nf_conntrack_put(skb->nfct);
1355 skb->nfct = NULL;
9fb9cbb1
YK
1356#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
1357 nf_conntrack_put_reasm(skb->nfct_reasm);
1358 skb->nfct_reasm = NULL;
1359#endif
1da177e4
LT
1360}
1361
1362#ifdef CONFIG_BRIDGE_NETFILTER
1363static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
1364{
1365 if (nf_bridge && atomic_dec_and_test(&nf_bridge->use))
1366 kfree(nf_bridge);
1367}
1368static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge)
1369{
1370 if (nf_bridge)
1371 atomic_inc(&nf_bridge->use);
1372}
1373#endif /* CONFIG_BRIDGE_NETFILTER */
1374#else /* CONFIG_NETFILTER */
1375static inline void nf_reset(struct sk_buff *skb) {}
1376#endif /* CONFIG_NETFILTER */
1377
1378#endif /* __KERNEL__ */
1379#endif /* _LINUX_SKBUFF_H */