]> bbs.cooldavid.org Git - net-next-2.6.git/blame_incremental - include/linux/skbuff.h
net: propagate NETIF_F_HIGHDMA to vlans
[net-next-2.6.git] / include / linux / skbuff.h
... / ...
CommitLineData
1/*
2 * Definitions for the 'struct sk_buff' memory handlers.
3 *
4 * Authors:
5 * Alan Cox, <gw4pts@gw4pts.ampr.org>
6 * Florian La Roche, <rzsfl@rz.uni-sb.de>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14#ifndef _LINUX_SKBUFF_H
15#define _LINUX_SKBUFF_H
16
17#include <linux/kernel.h>
18#include <linux/kmemcheck.h>
19#include <linux/compiler.h>
20#include <linux/time.h>
21#include <linux/cache.h>
22
23#include <asm/atomic.h>
24#include <asm/types.h>
25#include <linux/spinlock.h>
26#include <linux/net.h>
27#include <linux/textsearch.h>
28#include <net/checksum.h>
29#include <linux/rcupdate.h>
30#include <linux/dmaengine.h>
31#include <linux/hrtimer.h>
32
33/* Don't change this without changing skb_csum_unnecessary! */
34#define CHECKSUM_NONE 0
35#define CHECKSUM_UNNECESSARY 1
36#define CHECKSUM_COMPLETE 2
37#define CHECKSUM_PARTIAL 3
38
39#define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES - 1)) & \
40 ~(SMP_CACHE_BYTES - 1))
41#define SKB_WITH_OVERHEAD(X) \
42 ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
43#define SKB_MAX_ORDER(X, ORDER) \
44 SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
45#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0))
46#define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2))
47
48/* A. Checksumming of received packets by device.
49 *
50 * NONE: device failed to checksum this packet.
51 * skb->csum is undefined.
52 *
53 * UNNECESSARY: device parsed packet and wouldbe verified checksum.
54 * skb->csum is undefined.
55 * It is bad option, but, unfortunately, many of vendors do this.
56 * Apparently with secret goal to sell you new device, when you
57 * will add new protocol to your host. F.e. IPv6. 8)
58 *
59 * COMPLETE: the most generic way. Device supplied checksum of _all_
60 * the packet as seen by netif_rx in skb->csum.
61 * NOTE: Even if device supports only some protocols, but
62 * is able to produce some skb->csum, it MUST use COMPLETE,
63 * not UNNECESSARY.
64 *
65 * PARTIAL: identical to the case for output below. This may occur
66 * on a packet received directly from another Linux OS, e.g.,
67 * a virtualised Linux kernel on the same host. The packet can
68 * be treated in the same way as UNNECESSARY except that on
69 * output (i.e., forwarding) the checksum must be filled in
70 * by the OS or the hardware.
71 *
72 * B. Checksumming on output.
73 *
74 * NONE: skb is checksummed by protocol or csum is not required.
75 *
76 * PARTIAL: device is required to csum packet as seen by hard_start_xmit
77 * from skb->csum_start to the end and to record the checksum
78 * at skb->csum_start + skb->csum_offset.
79 *
80 * Device must show its capabilities in dev->features, set
81 * at device setup time.
82 * NETIF_F_HW_CSUM - it is clever device, it is able to checksum
83 * everything.
84 * NETIF_F_NO_CSUM - loopback or reliable single hop media.
85 * NETIF_F_IP_CSUM - device is dumb. It is able to csum only
86 * TCP/UDP over IPv4. Sigh. Vendors like this
87 * way by an unknown reason. Though, see comment above
88 * about CHECKSUM_UNNECESSARY. 8)
89 * NETIF_F_IPV6_CSUM about as dumb as the last one but does IPv6 instead.
90 *
91 * Any questions? No questions, good. --ANK
92 */
93
94struct net_device;
95struct scatterlist;
96struct pipe_inode_info;
97
98#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
99struct nf_conntrack {
100 atomic_t use;
101};
102#endif
103
104#ifdef CONFIG_BRIDGE_NETFILTER
105struct nf_bridge_info {
106 atomic_t use;
107 struct net_device *physindev;
108 struct net_device *physoutdev;
109 unsigned int mask;
110 unsigned long data[32 / sizeof(unsigned long)];
111};
112#endif
113
114struct sk_buff_head {
115 /* These two members must be first. */
116 struct sk_buff *next;
117 struct sk_buff *prev;
118
119 __u32 qlen;
120 spinlock_t lock;
121};
122
123struct sk_buff;
124
125/* To allow 64K frame to be packed as single skb without frag_list */
126#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 2)
127
128typedef struct skb_frag_struct skb_frag_t;
129
130struct skb_frag_struct {
131 struct page *page;
132 __u32 page_offset;
133 __u32 size;
134};
135
136#define HAVE_HW_TIME_STAMP
137
138/**
139 * struct skb_shared_hwtstamps - hardware time stamps
140 * @hwtstamp: hardware time stamp transformed into duration
141 * since arbitrary point in time
142 * @syststamp: hwtstamp transformed to system time base
143 *
144 * Software time stamps generated by ktime_get_real() are stored in
145 * skb->tstamp. The relation between the different kinds of time
146 * stamps is as follows:
147 *
148 * syststamp and tstamp can be compared against each other in
149 * arbitrary combinations. The accuracy of a
150 * syststamp/tstamp/"syststamp from other device" comparison is
151 * limited by the accuracy of the transformation into system time
152 * base. This depends on the device driver and its underlying
153 * hardware.
154 *
155 * hwtstamps can only be compared against other hwtstamps from
156 * the same device.
157 *
158 * This structure is attached to packets as part of the
159 * &skb_shared_info. Use skb_hwtstamps() to get a pointer.
160 */
161struct skb_shared_hwtstamps {
162 ktime_t hwtstamp;
163 ktime_t syststamp;
164};
165
166/* Definitions for tx_flags in struct skb_shared_info */
167enum {
168 /* generate hardware time stamp */
169 SKBTX_HW_TSTAMP = 1 << 0,
170
171 /* generate software time stamp */
172 SKBTX_SW_TSTAMP = 1 << 1,
173
174 /* device driver is going to provide hardware time stamp */
175 SKBTX_IN_PROGRESS = 1 << 2,
176
177 /* ensure the originating sk reference is available on driver level */
178 SKBTX_DRV_NEEDS_SK_REF = 1 << 3,
179};
180
181/* This data is invariant across clones and lives at
182 * the end of the header data, ie. at skb->end.
183 */
184struct skb_shared_info {
185 unsigned short nr_frags;
186 unsigned short gso_size;
187 /* Warning: this field is not always filled in (UFO)! */
188 unsigned short gso_segs;
189 unsigned short gso_type;
190 __be32 ip6_frag_id;
191 __u8 tx_flags;
192 struct sk_buff *frag_list;
193 struct skb_shared_hwtstamps hwtstamps;
194
195 /*
196 * Warning : all fields before dataref are cleared in __alloc_skb()
197 */
198 atomic_t dataref;
199
200 /* Intermediate layers must ensure that destructor_arg
201 * remains valid until skb destructor */
202 void * destructor_arg;
203 /* must be last field, see pskb_expand_head() */
204 skb_frag_t frags[MAX_SKB_FRAGS];
205};
206
207/* We divide dataref into two halves. The higher 16 bits hold references
208 * to the payload part of skb->data. The lower 16 bits hold references to
209 * the entire skb->data. A clone of a headerless skb holds the length of
210 * the header in skb->hdr_len.
211 *
212 * All users must obey the rule that the skb->data reference count must be
213 * greater than or equal to the payload reference count.
214 *
215 * Holding a reference to the payload part means that the user does not
216 * care about modifications to the header part of skb->data.
217 */
218#define SKB_DATAREF_SHIFT 16
219#define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
220
221
222enum {
223 SKB_FCLONE_UNAVAILABLE,
224 SKB_FCLONE_ORIG,
225 SKB_FCLONE_CLONE,
226};
227
228enum {
229 SKB_GSO_TCPV4 = 1 << 0,
230 SKB_GSO_UDP = 1 << 1,
231
232 /* This indicates the skb is from an untrusted source. */
233 SKB_GSO_DODGY = 1 << 2,
234
235 /* This indicates the tcp segment has CWR set. */
236 SKB_GSO_TCP_ECN = 1 << 3,
237
238 SKB_GSO_TCPV6 = 1 << 4,
239
240 SKB_GSO_FCOE = 1 << 5,
241};
242
243#if BITS_PER_LONG > 32
244#define NET_SKBUFF_DATA_USES_OFFSET 1
245#endif
246
247#ifdef NET_SKBUFF_DATA_USES_OFFSET
248typedef unsigned int sk_buff_data_t;
249#else
250typedef unsigned char *sk_buff_data_t;
251#endif
252
253/**
254 * struct sk_buff - socket buffer
255 * @next: Next buffer in list
256 * @prev: Previous buffer in list
257 * @sk: Socket we are owned by
258 * @tstamp: Time we arrived
259 * @dev: Device we arrived on/are leaving by
260 * @transport_header: Transport layer header
261 * @network_header: Network layer header
262 * @mac_header: Link layer header
263 * @_skb_refdst: destination entry (with norefcount bit)
264 * @sp: the security path, used for xfrm
265 * @cb: Control buffer. Free for use by every layer. Put private vars here
266 * @len: Length of actual data
267 * @data_len: Data length
268 * @mac_len: Length of link layer header
269 * @hdr_len: writable header length of cloned skb
270 * @csum: Checksum (must include start/offset pair)
271 * @csum_start: Offset from skb->head where checksumming should start
272 * @csum_offset: Offset from csum_start where checksum should be stored
273 * @local_df: allow local fragmentation
274 * @cloned: Head may be cloned (check refcnt to be sure)
275 * @nohdr: Payload reference only, must not modify header
276 * @pkt_type: Packet class
277 * @fclone: skbuff clone status
278 * @ip_summed: Driver fed us an IP checksum
279 * @priority: Packet queueing priority
280 * @users: User count - see {datagram,tcp}.c
281 * @protocol: Packet protocol from driver
282 * @truesize: Buffer size
283 * @head: Head of buffer
284 * @data: Data head pointer
285 * @tail: Tail pointer
286 * @end: End pointer
287 * @destructor: Destruct function
288 * @mark: Generic packet mark
289 * @nfct: Associated connection, if any
290 * @ipvs_property: skbuff is owned by ipvs
291 * @peeked: this packet has been seen already, so stats have been
292 * done for it, don't do them again
293 * @nf_trace: netfilter packet trace flag
294 * @nfctinfo: Relationship of this skb to the connection
295 * @nfct_reasm: netfilter conntrack re-assembly pointer
296 * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c
297 * @skb_iif: ifindex of device we arrived on
298 * @rxhash: the packet hash computed on receive
299 * @queue_mapping: Queue mapping for multiqueue devices
300 * @tc_index: Traffic control index
301 * @tc_verd: traffic control verdict
302 * @ndisc_nodetype: router type (from link layer)
303 * @dma_cookie: a cookie to one of several possible DMA operations
304 * done by skb DMA functions
305 * @secmark: security marking
306 * @vlan_tci: vlan tag control information
307 */
308
309struct sk_buff {
310 /* These two members must be first. */
311 struct sk_buff *next;
312 struct sk_buff *prev;
313
314 ktime_t tstamp;
315
316 struct sock *sk;
317 struct net_device *dev;
318
319 /*
320 * This is the control buffer. It is free to use for every
321 * layer. Please put your private variables there. If you
322 * want to keep them across layers you have to do a skb_clone()
323 * first. This is owned by whoever has the skb queued ATM.
324 */
325 char cb[48] __aligned(8);
326
327 unsigned long _skb_refdst;
328#ifdef CONFIG_XFRM
329 struct sec_path *sp;
330#endif
331 unsigned int len,
332 data_len;
333 __u16 mac_len,
334 hdr_len;
335 union {
336 __wsum csum;
337 struct {
338 __u16 csum_start;
339 __u16 csum_offset;
340 };
341 };
342 __u32 priority;
343 kmemcheck_bitfield_begin(flags1);
344 __u8 local_df:1,
345 cloned:1,
346 ip_summed:2,
347 nohdr:1,
348 nfctinfo:3;
349 __u8 pkt_type:3,
350 fclone:2,
351 ipvs_property:1,
352 peeked:1,
353 nf_trace:1;
354 kmemcheck_bitfield_end(flags1);
355 __be16 protocol;
356
357 void (*destructor)(struct sk_buff *skb);
358#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
359 struct nf_conntrack *nfct;
360 struct sk_buff *nfct_reasm;
361#endif
362#ifdef CONFIG_BRIDGE_NETFILTER
363 struct nf_bridge_info *nf_bridge;
364#endif
365
366 int skb_iif;
367#ifdef CONFIG_NET_SCHED
368 __u16 tc_index; /* traffic control index */
369#ifdef CONFIG_NET_CLS_ACT
370 __u16 tc_verd; /* traffic control verdict */
371#endif
372#endif
373
374 __u32 rxhash;
375
376 kmemcheck_bitfield_begin(flags2);
377 __u16 queue_mapping:16;
378#ifdef CONFIG_IPV6_NDISC_NODETYPE
379 __u8 ndisc_nodetype:2,
380 deliver_no_wcard:1;
381#else
382 __u8 deliver_no_wcard:1;
383#endif
384 kmemcheck_bitfield_end(flags2);
385
386 /* 0/14 bit hole */
387
388#ifdef CONFIG_NET_DMA
389 dma_cookie_t dma_cookie;
390#endif
391#ifdef CONFIG_NETWORK_SECMARK
392 __u32 secmark;
393#endif
394 union {
395 __u32 mark;
396 __u32 dropcount;
397 };
398
399 __u16 vlan_tci;
400
401 sk_buff_data_t transport_header;
402 sk_buff_data_t network_header;
403 sk_buff_data_t mac_header;
404 /* These elements must be at the end, see alloc_skb() for details. */
405 sk_buff_data_t tail;
406 sk_buff_data_t end;
407 unsigned char *head,
408 *data;
409 unsigned int truesize;
410 atomic_t users;
411};
412
413#ifdef __KERNEL__
414/*
415 * Handling routines are only of interest to the kernel
416 */
417#include <linux/slab.h>
418
419#include <asm/system.h>
420
421/*
422 * skb might have a dst pointer attached, refcounted or not.
423 * _skb_refdst low order bit is set if refcount was _not_ taken
424 */
425#define SKB_DST_NOREF 1UL
426#define SKB_DST_PTRMASK ~(SKB_DST_NOREF)
427
428/**
429 * skb_dst - returns skb dst_entry
430 * @skb: buffer
431 *
432 * Returns skb dst_entry, regardless of reference taken or not.
433 */
434static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
435{
436 /* If refdst was not refcounted, check we still are in a
437 * rcu_read_lock section
438 */
439 WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) &&
440 !rcu_read_lock_held() &&
441 !rcu_read_lock_bh_held());
442 return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK);
443}
444
445/**
446 * skb_dst_set - sets skb dst
447 * @skb: buffer
448 * @dst: dst entry
449 *
450 * Sets skb dst, assuming a reference was taken on dst and should
451 * be released by skb_dst_drop()
452 */
453static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
454{
455 skb->_skb_refdst = (unsigned long)dst;
456}
457
458/**
459 * skb_dst_set_noref - sets skb dst, without a reference
460 * @skb: buffer
461 * @dst: dst entry
462 *
463 * Sets skb dst, assuming a reference was not taken on dst
464 * skb_dst_drop() should not dst_release() this dst
465 */
466static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst)
467{
468 WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
469 skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF;
470}
471
472/**
473 * skb_dst_is_noref - Test if skb dst isnt refcounted
474 * @skb: buffer
475 */
476static inline bool skb_dst_is_noref(const struct sk_buff *skb)
477{
478 return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb);
479}
480
481static inline struct rtable *skb_rtable(const struct sk_buff *skb)
482{
483 return (struct rtable *)skb_dst(skb);
484}
485
486extern void kfree_skb(struct sk_buff *skb);
487extern void consume_skb(struct sk_buff *skb);
488extern void __kfree_skb(struct sk_buff *skb);
489extern struct sk_buff *__alloc_skb(unsigned int size,
490 gfp_t priority, int fclone, int node);
491static inline struct sk_buff *alloc_skb(unsigned int size,
492 gfp_t priority)
493{
494 return __alloc_skb(size, priority, 0, -1);
495}
496
497static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
498 gfp_t priority)
499{
500 return __alloc_skb(size, priority, 1, -1);
501}
502
503extern bool skb_recycle_check(struct sk_buff *skb, int skb_size);
504
505extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
506extern struct sk_buff *skb_clone(struct sk_buff *skb,
507 gfp_t priority);
508extern struct sk_buff *skb_copy(const struct sk_buff *skb,
509 gfp_t priority);
510extern struct sk_buff *pskb_copy(struct sk_buff *skb,
511 gfp_t gfp_mask);
512extern int pskb_expand_head(struct sk_buff *skb,
513 int nhead, int ntail,
514 gfp_t gfp_mask);
515extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
516 unsigned int headroom);
517extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
518 int newheadroom, int newtailroom,
519 gfp_t priority);
520extern int skb_to_sgvec(struct sk_buff *skb,
521 struct scatterlist *sg, int offset,
522 int len);
523extern int skb_cow_data(struct sk_buff *skb, int tailbits,
524 struct sk_buff **trailer);
525extern int skb_pad(struct sk_buff *skb, int pad);
526#define dev_kfree_skb(a) consume_skb(a)
527
528extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
529 int getfrag(void *from, char *to, int offset,
530 int len,int odd, struct sk_buff *skb),
531 void *from, int length);
532
533struct skb_seq_state {
534 __u32 lower_offset;
535 __u32 upper_offset;
536 __u32 frag_idx;
537 __u32 stepped_offset;
538 struct sk_buff *root_skb;
539 struct sk_buff *cur_skb;
540 __u8 *frag_data;
541};
542
543extern void skb_prepare_seq_read(struct sk_buff *skb,
544 unsigned int from, unsigned int to,
545 struct skb_seq_state *st);
546extern unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
547 struct skb_seq_state *st);
548extern void skb_abort_seq_read(struct skb_seq_state *st);
549
550extern unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
551 unsigned int to, struct ts_config *config,
552 struct ts_state *state);
553
554extern __u32 __skb_get_rxhash(struct sk_buff *skb);
555static inline __u32 skb_get_rxhash(struct sk_buff *skb)
556{
557 if (!skb->rxhash)
558 skb->rxhash = __skb_get_rxhash(skb);
559
560 return skb->rxhash;
561}
562
563#ifdef NET_SKBUFF_DATA_USES_OFFSET
564static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
565{
566 return skb->head + skb->end;
567}
568#else
569static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
570{
571 return skb->end;
572}
573#endif
574
575/* Internal */
576#define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB)))
577
578static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
579{
580 return &skb_shinfo(skb)->hwtstamps;
581}
582
583/**
584 * skb_queue_empty - check if a queue is empty
585 * @list: queue head
586 *
587 * Returns true if the queue is empty, false otherwise.
588 */
589static inline int skb_queue_empty(const struct sk_buff_head *list)
590{
591 return list->next == (struct sk_buff *)list;
592}
593
594/**
595 * skb_queue_is_last - check if skb is the last entry in the queue
596 * @list: queue head
597 * @skb: buffer
598 *
599 * Returns true if @skb is the last buffer on the list.
600 */
601static inline bool skb_queue_is_last(const struct sk_buff_head *list,
602 const struct sk_buff *skb)
603{
604 return skb->next == (struct sk_buff *)list;
605}
606
607/**
608 * skb_queue_is_first - check if skb is the first entry in the queue
609 * @list: queue head
610 * @skb: buffer
611 *
612 * Returns true if @skb is the first buffer on the list.
613 */
614static inline bool skb_queue_is_first(const struct sk_buff_head *list,
615 const struct sk_buff *skb)
616{
617 return skb->prev == (struct sk_buff *)list;
618}
619
620/**
621 * skb_queue_next - return the next packet in the queue
622 * @list: queue head
623 * @skb: current buffer
624 *
625 * Return the next packet in @list after @skb. It is only valid to
626 * call this if skb_queue_is_last() evaluates to false.
627 */
628static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list,
629 const struct sk_buff *skb)
630{
631 /* This BUG_ON may seem severe, but if we just return then we
632 * are going to dereference garbage.
633 */
634 BUG_ON(skb_queue_is_last(list, skb));
635 return skb->next;
636}
637
638/**
639 * skb_queue_prev - return the prev packet in the queue
640 * @list: queue head
641 * @skb: current buffer
642 *
643 * Return the prev packet in @list before @skb. It is only valid to
644 * call this if skb_queue_is_first() evaluates to false.
645 */
646static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list,
647 const struct sk_buff *skb)
648{
649 /* This BUG_ON may seem severe, but if we just return then we
650 * are going to dereference garbage.
651 */
652 BUG_ON(skb_queue_is_first(list, skb));
653 return skb->prev;
654}
655
656/**
657 * skb_get - reference buffer
658 * @skb: buffer to reference
659 *
660 * Makes another reference to a socket buffer and returns a pointer
661 * to the buffer.
662 */
663static inline struct sk_buff *skb_get(struct sk_buff *skb)
664{
665 atomic_inc(&skb->users);
666 return skb;
667}
668
669/*
670 * If users == 1, we are the only owner and are can avoid redundant
671 * atomic change.
672 */
673
674/**
675 * skb_cloned - is the buffer a clone
676 * @skb: buffer to check
677 *
678 * Returns true if the buffer was generated with skb_clone() and is
679 * one of multiple shared copies of the buffer. Cloned buffers are
680 * shared data so must not be written to under normal circumstances.
681 */
682static inline int skb_cloned(const struct sk_buff *skb)
683{
684 return skb->cloned &&
685 (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
686}
687
688/**
689 * skb_header_cloned - is the header a clone
690 * @skb: buffer to check
691 *
692 * Returns true if modifying the header part of the buffer requires
693 * the data to be copied.
694 */
695static inline int skb_header_cloned(const struct sk_buff *skb)
696{
697 int dataref;
698
699 if (!skb->cloned)
700 return 0;
701
702 dataref = atomic_read(&skb_shinfo(skb)->dataref);
703 dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT);
704 return dataref != 1;
705}
706
707/**
708 * skb_header_release - release reference to header
709 * @skb: buffer to operate on
710 *
711 * Drop a reference to the header part of the buffer. This is done
712 * by acquiring a payload reference. You must not read from the header
713 * part of skb->data after this.
714 */
715static inline void skb_header_release(struct sk_buff *skb)
716{
717 BUG_ON(skb->nohdr);
718 skb->nohdr = 1;
719 atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref);
720}
721
722/**
723 * skb_shared - is the buffer shared
724 * @skb: buffer to check
725 *
726 * Returns true if more than one person has a reference to this
727 * buffer.
728 */
729static inline int skb_shared(const struct sk_buff *skb)
730{
731 return atomic_read(&skb->users) != 1;
732}
733
734/**
735 * skb_share_check - check if buffer is shared and if so clone it
736 * @skb: buffer to check
737 * @pri: priority for memory allocation
738 *
739 * If the buffer is shared the buffer is cloned and the old copy
740 * drops a reference. A new clone with a single reference is returned.
741 * If the buffer is not shared the original buffer is returned. When
742 * being called from interrupt status or with spinlocks held pri must
743 * be GFP_ATOMIC.
744 *
745 * NULL is returned on a memory allocation failure.
746 */
747static inline struct sk_buff *skb_share_check(struct sk_buff *skb,
748 gfp_t pri)
749{
750 might_sleep_if(pri & __GFP_WAIT);
751 if (skb_shared(skb)) {
752 struct sk_buff *nskb = skb_clone(skb, pri);
753 kfree_skb(skb);
754 skb = nskb;
755 }
756 return skb;
757}
758
759/*
760 * Copy shared buffers into a new sk_buff. We effectively do COW on
761 * packets to handle cases where we have a local reader and forward
762 * and a couple of other messy ones. The normal one is tcpdumping
763 * a packet thats being forwarded.
764 */
765
766/**
767 * skb_unshare - make a copy of a shared buffer
768 * @skb: buffer to check
769 * @pri: priority for memory allocation
770 *
771 * If the socket buffer is a clone then this function creates a new
772 * copy of the data, drops a reference count on the old copy and returns
773 * the new copy with the reference count at 1. If the buffer is not a clone
774 * the original buffer is returned. When called with a spinlock held or
775 * from interrupt state @pri must be %GFP_ATOMIC
776 *
777 * %NULL is returned on a memory allocation failure.
778 */
779static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
780 gfp_t pri)
781{
782 might_sleep_if(pri & __GFP_WAIT);
783 if (skb_cloned(skb)) {
784 struct sk_buff *nskb = skb_copy(skb, pri);
785 kfree_skb(skb); /* Free our shared copy */
786 skb = nskb;
787 }
788 return skb;
789}
790
791/**
792 * skb_peek - peek at the head of an &sk_buff_head
793 * @list_: list to peek at
794 *
795 * Peek an &sk_buff. Unlike most other operations you _MUST_
796 * be careful with this one. A peek leaves the buffer on the
797 * list and someone else may run off with it. You must hold
798 * the appropriate locks or have a private queue to do this.
799 *
800 * Returns %NULL for an empty list or a pointer to the head element.
801 * The reference count is not incremented and the reference is therefore
802 * volatile. Use with caution.
803 */
804static inline struct sk_buff *skb_peek(struct sk_buff_head *list_)
805{
806 struct sk_buff *list = ((struct sk_buff *)list_)->next;
807 if (list == (struct sk_buff *)list_)
808 list = NULL;
809 return list;
810}
811
812/**
813 * skb_peek_tail - peek at the tail of an &sk_buff_head
814 * @list_: list to peek at
815 *
816 * Peek an &sk_buff. Unlike most other operations you _MUST_
817 * be careful with this one. A peek leaves the buffer on the
818 * list and someone else may run off with it. You must hold
819 * the appropriate locks or have a private queue to do this.
820 *
821 * Returns %NULL for an empty list or a pointer to the tail element.
822 * The reference count is not incremented and the reference is therefore
823 * volatile. Use with caution.
824 */
825static inline struct sk_buff *skb_peek_tail(struct sk_buff_head *list_)
826{
827 struct sk_buff *list = ((struct sk_buff *)list_)->prev;
828 if (list == (struct sk_buff *)list_)
829 list = NULL;
830 return list;
831}
832
833/**
834 * skb_queue_len - get queue length
835 * @list_: list to measure
836 *
837 * Return the length of an &sk_buff queue.
838 */
839static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
840{
841 return list_->qlen;
842}
843
844/**
845 * __skb_queue_head_init - initialize non-spinlock portions of sk_buff_head
846 * @list: queue to initialize
847 *
848 * This initializes only the list and queue length aspects of
849 * an sk_buff_head object. This allows to initialize the list
850 * aspects of an sk_buff_head without reinitializing things like
851 * the spinlock. It can also be used for on-stack sk_buff_head
852 * objects where the spinlock is known to not be used.
853 */
854static inline void __skb_queue_head_init(struct sk_buff_head *list)
855{
856 list->prev = list->next = (struct sk_buff *)list;
857 list->qlen = 0;
858}
859
860/*
861 * This function creates a split out lock class for each invocation;
862 * this is needed for now since a whole lot of users of the skb-queue
863 * infrastructure in drivers have different locking usage (in hardirq)
864 * than the networking core (in softirq only). In the long run either the
865 * network layer or drivers should need annotation to consolidate the
866 * main types of usage into 3 classes.
867 */
868static inline void skb_queue_head_init(struct sk_buff_head *list)
869{
870 spin_lock_init(&list->lock);
871 __skb_queue_head_init(list);
872}
873
874static inline void skb_queue_head_init_class(struct sk_buff_head *list,
875 struct lock_class_key *class)
876{
877 skb_queue_head_init(list);
878 lockdep_set_class(&list->lock, class);
879}
880
881/*
882 * Insert an sk_buff on a list.
883 *
884 * The "__skb_xxxx()" functions are the non-atomic ones that
885 * can only be called with interrupts disabled.
886 */
887extern void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list);
888static inline void __skb_insert(struct sk_buff *newsk,
889 struct sk_buff *prev, struct sk_buff *next,
890 struct sk_buff_head *list)
891{
892 newsk->next = next;
893 newsk->prev = prev;
894 next->prev = prev->next = newsk;
895 list->qlen++;
896}
897
898static inline void __skb_queue_splice(const struct sk_buff_head *list,
899 struct sk_buff *prev,
900 struct sk_buff *next)
901{
902 struct sk_buff *first = list->next;
903 struct sk_buff *last = list->prev;
904
905 first->prev = prev;
906 prev->next = first;
907
908 last->next = next;
909 next->prev = last;
910}
911
912/**
913 * skb_queue_splice - join two skb lists, this is designed for stacks
914 * @list: the new list to add
915 * @head: the place to add it in the first list
916 */
917static inline void skb_queue_splice(const struct sk_buff_head *list,
918 struct sk_buff_head *head)
919{
920 if (!skb_queue_empty(list)) {
921 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
922 head->qlen += list->qlen;
923 }
924}
925
926/**
927 * skb_queue_splice - join two skb lists and reinitialise the emptied list
928 * @list: the new list to add
929 * @head: the place to add it in the first list
930 *
931 * The list at @list is reinitialised
932 */
933static inline void skb_queue_splice_init(struct sk_buff_head *list,
934 struct sk_buff_head *head)
935{
936 if (!skb_queue_empty(list)) {
937 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
938 head->qlen += list->qlen;
939 __skb_queue_head_init(list);
940 }
941}
942
943/**
944 * skb_queue_splice_tail - join two skb lists, each list being a queue
945 * @list: the new list to add
946 * @head: the place to add it in the first list
947 */
948static inline void skb_queue_splice_tail(const struct sk_buff_head *list,
949 struct sk_buff_head *head)
950{
951 if (!skb_queue_empty(list)) {
952 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
953 head->qlen += list->qlen;
954 }
955}
956
957/**
958 * skb_queue_splice_tail - join two skb lists and reinitialise the emptied list
959 * @list: the new list to add
960 * @head: the place to add it in the first list
961 *
962 * Each of the lists is a queue.
963 * The list at @list is reinitialised
964 */
965static inline void skb_queue_splice_tail_init(struct sk_buff_head *list,
966 struct sk_buff_head *head)
967{
968 if (!skb_queue_empty(list)) {
969 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
970 head->qlen += list->qlen;
971 __skb_queue_head_init(list);
972 }
973}
974
975/**
976 * __skb_queue_after - queue a buffer at the list head
977 * @list: list to use
978 * @prev: place after this buffer
979 * @newsk: buffer to queue
980 *
981 * Queue a buffer int the middle of a list. This function takes no locks
982 * and you must therefore hold required locks before calling it.
983 *
984 * A buffer cannot be placed on two lists at the same time.
985 */
986static inline void __skb_queue_after(struct sk_buff_head *list,
987 struct sk_buff *prev,
988 struct sk_buff *newsk)
989{
990 __skb_insert(newsk, prev, prev->next, list);
991}
992
993extern void skb_append(struct sk_buff *old, struct sk_buff *newsk,
994 struct sk_buff_head *list);
995
996static inline void __skb_queue_before(struct sk_buff_head *list,
997 struct sk_buff *next,
998 struct sk_buff *newsk)
999{
1000 __skb_insert(newsk, next->prev, next, list);
1001}
1002
1003/**
1004 * __skb_queue_head - queue a buffer at the list head
1005 * @list: list to use
1006 * @newsk: buffer to queue
1007 *
1008 * Queue a buffer at the start of a list. This function takes no locks
1009 * and you must therefore hold required locks before calling it.
1010 *
1011 * A buffer cannot be placed on two lists at the same time.
1012 */
1013extern void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
1014static inline void __skb_queue_head(struct sk_buff_head *list,
1015 struct sk_buff *newsk)
1016{
1017 __skb_queue_after(list, (struct sk_buff *)list, newsk);
1018}
1019
1020/**
1021 * __skb_queue_tail - queue a buffer at the list tail
1022 * @list: list to use
1023 * @newsk: buffer to queue
1024 *
1025 * Queue a buffer at the end of a list. This function takes no locks
1026 * and you must therefore hold required locks before calling it.
1027 *
1028 * A buffer cannot be placed on two lists at the same time.
1029 */
1030extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
1031static inline void __skb_queue_tail(struct sk_buff_head *list,
1032 struct sk_buff *newsk)
1033{
1034 __skb_queue_before(list, (struct sk_buff *)list, newsk);
1035}
1036
1037/*
1038 * remove sk_buff from list. _Must_ be called atomically, and with
1039 * the list known..
1040 */
1041extern void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
1042static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
1043{
1044 struct sk_buff *next, *prev;
1045
1046 list->qlen--;
1047 next = skb->next;
1048 prev = skb->prev;
1049 skb->next = skb->prev = NULL;
1050 next->prev = prev;
1051 prev->next = next;
1052}
1053
1054/**
1055 * __skb_dequeue - remove from the head of the queue
1056 * @list: list to dequeue from
1057 *
1058 * Remove the head of the list. This function does not take any locks
1059 * so must be used with appropriate locks held only. The head item is
1060 * returned or %NULL if the list is empty.
1061 */
1062extern struct sk_buff *skb_dequeue(struct sk_buff_head *list);
1063static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
1064{
1065 struct sk_buff *skb = skb_peek(list);
1066 if (skb)
1067 __skb_unlink(skb, list);
1068 return skb;
1069}
1070
1071/**
1072 * __skb_dequeue_tail - remove from the tail of the queue
1073 * @list: list to dequeue from
1074 *
1075 * Remove the tail of the list. This function does not take any locks
1076 * so must be used with appropriate locks held only. The tail item is
1077 * returned or %NULL if the list is empty.
1078 */
1079extern struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
1080static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
1081{
1082 struct sk_buff *skb = skb_peek_tail(list);
1083 if (skb)
1084 __skb_unlink(skb, list);
1085 return skb;
1086}
1087
1088
1089static inline int skb_is_nonlinear(const struct sk_buff *skb)
1090{
1091 return skb->data_len;
1092}
1093
1094static inline unsigned int skb_headlen(const struct sk_buff *skb)
1095{
1096 return skb->len - skb->data_len;
1097}
1098
1099static inline int skb_pagelen(const struct sk_buff *skb)
1100{
1101 int i, len = 0;
1102
1103 for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--)
1104 len += skb_shinfo(skb)->frags[i].size;
1105 return len + skb_headlen(skb);
1106}
1107
1108static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
1109 struct page *page, int off, int size)
1110{
1111 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1112
1113 frag->page = page;
1114 frag->page_offset = off;
1115 frag->size = size;
1116 skb_shinfo(skb)->nr_frags = i + 1;
1117}
1118
1119extern void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page,
1120 int off, int size);
1121
1122#define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags)
1123#define SKB_FRAG_ASSERT(skb) BUG_ON(skb_has_frag_list(skb))
1124#define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb))
1125
1126#ifdef NET_SKBUFF_DATA_USES_OFFSET
1127static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
1128{
1129 return skb->head + skb->tail;
1130}
1131
1132static inline void skb_reset_tail_pointer(struct sk_buff *skb)
1133{
1134 skb->tail = skb->data - skb->head;
1135}
1136
1137static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
1138{
1139 skb_reset_tail_pointer(skb);
1140 skb->tail += offset;
1141}
1142#else /* NET_SKBUFF_DATA_USES_OFFSET */
1143static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
1144{
1145 return skb->tail;
1146}
1147
1148static inline void skb_reset_tail_pointer(struct sk_buff *skb)
1149{
1150 skb->tail = skb->data;
1151}
1152
1153static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
1154{
1155 skb->tail = skb->data + offset;
1156}
1157
1158#endif /* NET_SKBUFF_DATA_USES_OFFSET */
1159
1160/*
1161 * Add data to an sk_buff
1162 */
1163extern unsigned char *skb_put(struct sk_buff *skb, unsigned int len);
1164static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
1165{
1166 unsigned char *tmp = skb_tail_pointer(skb);
1167 SKB_LINEAR_ASSERT(skb);
1168 skb->tail += len;
1169 skb->len += len;
1170 return tmp;
1171}
1172
1173extern unsigned char *skb_push(struct sk_buff *skb, unsigned int len);
1174static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
1175{
1176 skb->data -= len;
1177 skb->len += len;
1178 return skb->data;
1179}
1180
1181extern unsigned char *skb_pull(struct sk_buff *skb, unsigned int len);
1182static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
1183{
1184 skb->len -= len;
1185 BUG_ON(skb->len < skb->data_len);
1186 return skb->data += len;
1187}
1188
1189static inline unsigned char *skb_pull_inline(struct sk_buff *skb, unsigned int len)
1190{
1191 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
1192}
1193
1194extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
1195
1196static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
1197{
1198 if (len > skb_headlen(skb) &&
1199 !__pskb_pull_tail(skb, len - skb_headlen(skb)))
1200 return NULL;
1201 skb->len -= len;
1202 return skb->data += len;
1203}
1204
1205static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len)
1206{
1207 return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
1208}
1209
1210static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
1211{
1212 if (likely(len <= skb_headlen(skb)))
1213 return 1;
1214 if (unlikely(len > skb->len))
1215 return 0;
1216 return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
1217}
1218
1219/**
1220 * skb_headroom - bytes at buffer head
1221 * @skb: buffer to check
1222 *
1223 * Return the number of bytes of free space at the head of an &sk_buff.
1224 */
1225static inline unsigned int skb_headroom(const struct sk_buff *skb)
1226{
1227 return skb->data - skb->head;
1228}
1229
1230/**
1231 * skb_tailroom - bytes at buffer end
1232 * @skb: buffer to check
1233 *
1234 * Return the number of bytes of free space at the tail of an sk_buff
1235 */
1236static inline int skb_tailroom(const struct sk_buff *skb)
1237{
1238 return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
1239}
1240
1241/**
1242 * skb_reserve - adjust headroom
1243 * @skb: buffer to alter
1244 * @len: bytes to move
1245 *
1246 * Increase the headroom of an empty &sk_buff by reducing the tail
1247 * room. This is only allowed for an empty buffer.
1248 */
1249static inline void skb_reserve(struct sk_buff *skb, int len)
1250{
1251 skb->data += len;
1252 skb->tail += len;
1253}
1254
1255#ifdef NET_SKBUFF_DATA_USES_OFFSET
1256static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
1257{
1258 return skb->head + skb->transport_header;
1259}
1260
1261static inline void skb_reset_transport_header(struct sk_buff *skb)
1262{
1263 skb->transport_header = skb->data - skb->head;
1264}
1265
1266static inline void skb_set_transport_header(struct sk_buff *skb,
1267 const int offset)
1268{
1269 skb_reset_transport_header(skb);
1270 skb->transport_header += offset;
1271}
1272
1273static inline unsigned char *skb_network_header(const struct sk_buff *skb)
1274{
1275 return skb->head + skb->network_header;
1276}
1277
1278static inline void skb_reset_network_header(struct sk_buff *skb)
1279{
1280 skb->network_header = skb->data - skb->head;
1281}
1282
1283static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
1284{
1285 skb_reset_network_header(skb);
1286 skb->network_header += offset;
1287}
1288
1289static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
1290{
1291 return skb->head + skb->mac_header;
1292}
1293
1294static inline int skb_mac_header_was_set(const struct sk_buff *skb)
1295{
1296 return skb->mac_header != ~0U;
1297}
1298
1299static inline void skb_reset_mac_header(struct sk_buff *skb)
1300{
1301 skb->mac_header = skb->data - skb->head;
1302}
1303
1304static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
1305{
1306 skb_reset_mac_header(skb);
1307 skb->mac_header += offset;
1308}
1309
1310#else /* NET_SKBUFF_DATA_USES_OFFSET */
1311
1312static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
1313{
1314 return skb->transport_header;
1315}
1316
1317static inline void skb_reset_transport_header(struct sk_buff *skb)
1318{
1319 skb->transport_header = skb->data;
1320}
1321
1322static inline void skb_set_transport_header(struct sk_buff *skb,
1323 const int offset)
1324{
1325 skb->transport_header = skb->data + offset;
1326}
1327
1328static inline unsigned char *skb_network_header(const struct sk_buff *skb)
1329{
1330 return skb->network_header;
1331}
1332
1333static inline void skb_reset_network_header(struct sk_buff *skb)
1334{
1335 skb->network_header = skb->data;
1336}
1337
1338static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
1339{
1340 skb->network_header = skb->data + offset;
1341}
1342
1343static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
1344{
1345 return skb->mac_header;
1346}
1347
1348static inline int skb_mac_header_was_set(const struct sk_buff *skb)
1349{
1350 return skb->mac_header != NULL;
1351}
1352
1353static inline void skb_reset_mac_header(struct sk_buff *skb)
1354{
1355 skb->mac_header = skb->data;
1356}
1357
1358static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
1359{
1360 skb->mac_header = skb->data + offset;
1361}
1362#endif /* NET_SKBUFF_DATA_USES_OFFSET */
1363
1364static inline int skb_transport_offset(const struct sk_buff *skb)
1365{
1366 return skb_transport_header(skb) - skb->data;
1367}
1368
1369static inline u32 skb_network_header_len(const struct sk_buff *skb)
1370{
1371 return skb->transport_header - skb->network_header;
1372}
1373
1374static inline int skb_network_offset(const struct sk_buff *skb)
1375{
1376 return skb_network_header(skb) - skb->data;
1377}
1378
1379static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
1380{
1381 return pskb_may_pull(skb, skb_network_offset(skb) + len);
1382}
1383
1384/*
1385 * CPUs often take a performance hit when accessing unaligned memory
1386 * locations. The actual performance hit varies, it can be small if the
1387 * hardware handles it or large if we have to take an exception and fix it
1388 * in software.
1389 *
1390 * Since an ethernet header is 14 bytes network drivers often end up with
1391 * the IP header at an unaligned offset. The IP header can be aligned by
1392 * shifting the start of the packet by 2 bytes. Drivers should do this
1393 * with:
1394 *
1395 * skb_reserve(skb, NET_IP_ALIGN);
1396 *
1397 * The downside to this alignment of the IP header is that the DMA is now
1398 * unaligned. On some architectures the cost of an unaligned DMA is high
1399 * and this cost outweighs the gains made by aligning the IP header.
1400 *
1401 * Since this trade off varies between architectures, we allow NET_IP_ALIGN
1402 * to be overridden.
1403 */
1404#ifndef NET_IP_ALIGN
1405#define NET_IP_ALIGN 2
1406#endif
1407
1408/*
1409 * The networking layer reserves some headroom in skb data (via
1410 * dev_alloc_skb). This is used to avoid having to reallocate skb data when
1411 * the header has to grow. In the default case, if the header has to grow
1412 * 32 bytes or less we avoid the reallocation.
1413 *
1414 * Unfortunately this headroom changes the DMA alignment of the resulting
1415 * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive
1416 * on some architectures. An architecture can override this value,
1417 * perhaps setting it to a cacheline in size (since that will maintain
1418 * cacheline alignment of the DMA). It must be a power of 2.
1419 *
1420 * Various parts of the networking layer expect at least 32 bytes of
1421 * headroom, you should not reduce this.
1422 *
1423 * Using max(32, L1_CACHE_BYTES) makes sense (especially with RPS)
1424 * to reduce average number of cache lines per packet.
1425 * get_rps_cpus() for example only access one 64 bytes aligned block :
1426 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
1427 */
1428#ifndef NET_SKB_PAD
1429#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
1430#endif
1431
1432extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
1433
1434static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
1435{
1436 if (unlikely(skb->data_len)) {
1437 WARN_ON(1);
1438 return;
1439 }
1440 skb->len = len;
1441 skb_set_tail_pointer(skb, len);
1442}
1443
1444extern void skb_trim(struct sk_buff *skb, unsigned int len);
1445
1446static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
1447{
1448 if (skb->data_len)
1449 return ___pskb_trim(skb, len);
1450 __skb_trim(skb, len);
1451 return 0;
1452}
1453
1454static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
1455{
1456 return (len < skb->len) ? __pskb_trim(skb, len) : 0;
1457}
1458
1459/**
1460 * pskb_trim_unique - remove end from a paged unique (not cloned) buffer
1461 * @skb: buffer to alter
1462 * @len: new length
1463 *
1464 * This is identical to pskb_trim except that the caller knows that
1465 * the skb is not cloned so we should never get an error due to out-
1466 * of-memory.
1467 */
1468static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
1469{
1470 int err = pskb_trim(skb, len);
1471 BUG_ON(err);
1472}
1473
1474/**
1475 * skb_orphan - orphan a buffer
1476 * @skb: buffer to orphan
1477 *
1478 * If a buffer currently has an owner then we call the owner's
1479 * destructor function and make the @skb unowned. The buffer continues
1480 * to exist but is no longer charged to its former owner.
1481 */
1482static inline void skb_orphan(struct sk_buff *skb)
1483{
1484 if (skb->destructor)
1485 skb->destructor(skb);
1486 skb->destructor = NULL;
1487 skb->sk = NULL;
1488}
1489
1490/**
1491 * __skb_queue_purge - empty a list
1492 * @list: list to empty
1493 *
1494 * Delete all buffers on an &sk_buff list. Each buffer is removed from
1495 * the list and one reference dropped. This function does not take the
1496 * list lock and the caller must hold the relevant locks to use it.
1497 */
1498extern void skb_queue_purge(struct sk_buff_head *list);
1499static inline void __skb_queue_purge(struct sk_buff_head *list)
1500{
1501 struct sk_buff *skb;
1502 while ((skb = __skb_dequeue(list)) != NULL)
1503 kfree_skb(skb);
1504}
1505
1506/**
1507 * __dev_alloc_skb - allocate an skbuff for receiving
1508 * @length: length to allocate
1509 * @gfp_mask: get_free_pages mask, passed to alloc_skb
1510 *
1511 * Allocate a new &sk_buff and assign it a usage count of one. The
1512 * buffer has unspecified headroom built in. Users should allocate
1513 * the headroom they think they need without accounting for the
1514 * built in space. The built in space is used for optimisations.
1515 *
1516 * %NULL is returned if there is no free memory.
1517 */
1518static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
1519 gfp_t gfp_mask)
1520{
1521 struct sk_buff *skb = alloc_skb(length + NET_SKB_PAD, gfp_mask);
1522 if (likely(skb))
1523 skb_reserve(skb, NET_SKB_PAD);
1524 return skb;
1525}
1526
1527extern struct sk_buff *dev_alloc_skb(unsigned int length);
1528
1529extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
1530 unsigned int length, gfp_t gfp_mask);
1531
1532/**
1533 * netdev_alloc_skb - allocate an skbuff for rx on a specific device
1534 * @dev: network device to receive on
1535 * @length: length to allocate
1536 *
1537 * Allocate a new &sk_buff and assign it a usage count of one. The
1538 * buffer has unspecified headroom built in. Users should allocate
1539 * the headroom they think they need without accounting for the
1540 * built in space. The built in space is used for optimisations.
1541 *
1542 * %NULL is returned if there is no free memory. Although this function
1543 * allocates memory it can be called from an interrupt.
1544 */
1545static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
1546 unsigned int length)
1547{
1548 return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
1549}
1550
1551static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
1552 unsigned int length)
1553{
1554 struct sk_buff *skb = netdev_alloc_skb(dev, length + NET_IP_ALIGN);
1555
1556 if (NET_IP_ALIGN && skb)
1557 skb_reserve(skb, NET_IP_ALIGN);
1558 return skb;
1559}
1560
1561extern struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask);
1562
1563/**
1564 * netdev_alloc_page - allocate a page for ps-rx on a specific device
1565 * @dev: network device to receive on
1566 *
1567 * Allocate a new page node local to the specified device.
1568 *
1569 * %NULL is returned if there is no free memory.
1570 */
1571static inline struct page *netdev_alloc_page(struct net_device *dev)
1572{
1573 return __netdev_alloc_page(dev, GFP_ATOMIC);
1574}
1575
1576static inline void netdev_free_page(struct net_device *dev, struct page *page)
1577{
1578 __free_page(page);
1579}
1580
1581/**
1582 * skb_clone_writable - is the header of a clone writable
1583 * @skb: buffer to check
1584 * @len: length up to which to write
1585 *
1586 * Returns true if modifying the header part of the cloned buffer
1587 * does not requires the data to be copied.
1588 */
1589static inline int skb_clone_writable(struct sk_buff *skb, unsigned int len)
1590{
1591 return !skb_header_cloned(skb) &&
1592 skb_headroom(skb) + len <= skb->hdr_len;
1593}
1594
1595static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
1596 int cloned)
1597{
1598 int delta = 0;
1599
1600 if (headroom < NET_SKB_PAD)
1601 headroom = NET_SKB_PAD;
1602 if (headroom > skb_headroom(skb))
1603 delta = headroom - skb_headroom(skb);
1604
1605 if (delta || cloned)
1606 return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
1607 GFP_ATOMIC);
1608 return 0;
1609}
1610
1611/**
1612 * skb_cow - copy header of skb when it is required
1613 * @skb: buffer to cow
1614 * @headroom: needed headroom
1615 *
1616 * If the skb passed lacks sufficient headroom or its data part
1617 * is shared, data is reallocated. If reallocation fails, an error
1618 * is returned and original skb is not changed.
1619 *
1620 * The result is skb with writable area skb->head...skb->tail
1621 * and at least @headroom of space at head.
1622 */
1623static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
1624{
1625 return __skb_cow(skb, headroom, skb_cloned(skb));
1626}
1627
1628/**
1629 * skb_cow_head - skb_cow but only making the head writable
1630 * @skb: buffer to cow
1631 * @headroom: needed headroom
1632 *
1633 * This function is identical to skb_cow except that we replace the
1634 * skb_cloned check by skb_header_cloned. It should be used when
1635 * you only need to push on some header and do not need to modify
1636 * the data.
1637 */
1638static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
1639{
1640 return __skb_cow(skb, headroom, skb_header_cloned(skb));
1641}
1642
1643/**
1644 * skb_padto - pad an skbuff up to a minimal size
1645 * @skb: buffer to pad
1646 * @len: minimal length
1647 *
1648 * Pads up a buffer to ensure the trailing bytes exist and are
1649 * blanked. If the buffer already contains sufficient data it
1650 * is untouched. Otherwise it is extended. Returns zero on
1651 * success. The skb is freed on error.
1652 */
1653
1654static inline int skb_padto(struct sk_buff *skb, unsigned int len)
1655{
1656 unsigned int size = skb->len;
1657 if (likely(size >= len))
1658 return 0;
1659 return skb_pad(skb, len - size);
1660}
1661
1662static inline int skb_add_data(struct sk_buff *skb,
1663 char __user *from, int copy)
1664{
1665 const int off = skb->len;
1666
1667 if (skb->ip_summed == CHECKSUM_NONE) {
1668 int err = 0;
1669 __wsum csum = csum_and_copy_from_user(from, skb_put(skb, copy),
1670 copy, 0, &err);
1671 if (!err) {
1672 skb->csum = csum_block_add(skb->csum, csum, off);
1673 return 0;
1674 }
1675 } else if (!copy_from_user(skb_put(skb, copy), from, copy))
1676 return 0;
1677
1678 __skb_trim(skb, off);
1679 return -EFAULT;
1680}
1681
1682static inline int skb_can_coalesce(struct sk_buff *skb, int i,
1683 struct page *page, int off)
1684{
1685 if (i) {
1686 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
1687
1688 return page == frag->page &&
1689 off == frag->page_offset + frag->size;
1690 }
1691 return 0;
1692}
1693
1694static inline int __skb_linearize(struct sk_buff *skb)
1695{
1696 return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
1697}
1698
1699/**
1700 * skb_linearize - convert paged skb to linear one
1701 * @skb: buffer to linarize
1702 *
1703 * If there is no free memory -ENOMEM is returned, otherwise zero
1704 * is returned and the old skb data released.
1705 */
1706static inline int skb_linearize(struct sk_buff *skb)
1707{
1708 return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
1709}
1710
1711/**
1712 * skb_linearize_cow - make sure skb is linear and writable
1713 * @skb: buffer to process
1714 *
1715 * If there is no free memory -ENOMEM is returned, otherwise zero
1716 * is returned and the old skb data released.
1717 */
1718static inline int skb_linearize_cow(struct sk_buff *skb)
1719{
1720 return skb_is_nonlinear(skb) || skb_cloned(skb) ?
1721 __skb_linearize(skb) : 0;
1722}
1723
1724/**
1725 * skb_postpull_rcsum - update checksum for received skb after pull
1726 * @skb: buffer to update
1727 * @start: start of data before pull
1728 * @len: length of data pulled
1729 *
1730 * After doing a pull on a received packet, you need to call this to
1731 * update the CHECKSUM_COMPLETE checksum, or set ip_summed to
1732 * CHECKSUM_NONE so that it can be recomputed from scratch.
1733 */
1734
1735static inline void skb_postpull_rcsum(struct sk_buff *skb,
1736 const void *start, unsigned int len)
1737{
1738 if (skb->ip_summed == CHECKSUM_COMPLETE)
1739 skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
1740}
1741
1742unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
1743
1744/**
1745 * pskb_trim_rcsum - trim received skb and update checksum
1746 * @skb: buffer to trim
1747 * @len: new length
1748 *
1749 * This is exactly the same as pskb_trim except that it ensures the
1750 * checksum of received packets are still valid after the operation.
1751 */
1752
1753static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
1754{
1755 if (likely(len >= skb->len))
1756 return 0;
1757 if (skb->ip_summed == CHECKSUM_COMPLETE)
1758 skb->ip_summed = CHECKSUM_NONE;
1759 return __pskb_trim(skb, len);
1760}
1761
1762#define skb_queue_walk(queue, skb) \
1763 for (skb = (queue)->next; \
1764 prefetch(skb->next), (skb != (struct sk_buff *)(queue)); \
1765 skb = skb->next)
1766
1767#define skb_queue_walk_safe(queue, skb, tmp) \
1768 for (skb = (queue)->next, tmp = skb->next; \
1769 skb != (struct sk_buff *)(queue); \
1770 skb = tmp, tmp = skb->next)
1771
1772#define skb_queue_walk_from(queue, skb) \
1773 for (; prefetch(skb->next), (skb != (struct sk_buff *)(queue)); \
1774 skb = skb->next)
1775
1776#define skb_queue_walk_from_safe(queue, skb, tmp) \
1777 for (tmp = skb->next; \
1778 skb != (struct sk_buff *)(queue); \
1779 skb = tmp, tmp = skb->next)
1780
1781#define skb_queue_reverse_walk(queue, skb) \
1782 for (skb = (queue)->prev; \
1783 prefetch(skb->prev), (skb != (struct sk_buff *)(queue)); \
1784 skb = skb->prev)
1785
1786
1787static inline bool skb_has_frag_list(const struct sk_buff *skb)
1788{
1789 return skb_shinfo(skb)->frag_list != NULL;
1790}
1791
1792static inline void skb_frag_list_init(struct sk_buff *skb)
1793{
1794 skb_shinfo(skb)->frag_list = NULL;
1795}
1796
1797static inline void skb_frag_add_head(struct sk_buff *skb, struct sk_buff *frag)
1798{
1799 frag->next = skb_shinfo(skb)->frag_list;
1800 skb_shinfo(skb)->frag_list = frag;
1801}
1802
1803#define skb_walk_frags(skb, iter) \
1804 for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
1805
1806extern struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
1807 int *peeked, int *err);
1808extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
1809 int noblock, int *err);
1810extern unsigned int datagram_poll(struct file *file, struct socket *sock,
1811 struct poll_table_struct *wait);
1812extern int skb_copy_datagram_iovec(const struct sk_buff *from,
1813 int offset, struct iovec *to,
1814 int size);
1815extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
1816 int hlen,
1817 struct iovec *iov);
1818extern int skb_copy_datagram_from_iovec(struct sk_buff *skb,
1819 int offset,
1820 const struct iovec *from,
1821 int from_offset,
1822 int len);
1823extern int skb_copy_datagram_const_iovec(const struct sk_buff *from,
1824 int offset,
1825 const struct iovec *to,
1826 int to_offset,
1827 int size);
1828extern void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
1829extern void skb_free_datagram_locked(struct sock *sk,
1830 struct sk_buff *skb);
1831extern int skb_kill_datagram(struct sock *sk, struct sk_buff *skb,
1832 unsigned int flags);
1833extern __wsum skb_checksum(const struct sk_buff *skb, int offset,
1834 int len, __wsum csum);
1835extern int skb_copy_bits(const struct sk_buff *skb, int offset,
1836 void *to, int len);
1837extern int skb_store_bits(struct sk_buff *skb, int offset,
1838 const void *from, int len);
1839extern __wsum skb_copy_and_csum_bits(const struct sk_buff *skb,
1840 int offset, u8 *to, int len,
1841 __wsum csum);
1842extern int skb_splice_bits(struct sk_buff *skb,
1843 unsigned int offset,
1844 struct pipe_inode_info *pipe,
1845 unsigned int len,
1846 unsigned int flags);
1847extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
1848extern void skb_split(struct sk_buff *skb,
1849 struct sk_buff *skb1, const u32 len);
1850extern int skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
1851 int shiftlen);
1852
1853extern struct sk_buff *skb_segment(struct sk_buff *skb, int features);
1854
1855static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
1856 int len, void *buffer)
1857{
1858 int hlen = skb_headlen(skb);
1859
1860 if (hlen - offset >= len)
1861 return skb->data + offset;
1862
1863 if (skb_copy_bits(skb, offset, buffer, len) < 0)
1864 return NULL;
1865
1866 return buffer;
1867}
1868
1869static inline void skb_copy_from_linear_data(const struct sk_buff *skb,
1870 void *to,
1871 const unsigned int len)
1872{
1873 memcpy(to, skb->data, len);
1874}
1875
1876static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
1877 const int offset, void *to,
1878 const unsigned int len)
1879{
1880 memcpy(to, skb->data + offset, len);
1881}
1882
1883static inline void skb_copy_to_linear_data(struct sk_buff *skb,
1884 const void *from,
1885 const unsigned int len)
1886{
1887 memcpy(skb->data, from, len);
1888}
1889
1890static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
1891 const int offset,
1892 const void *from,
1893 const unsigned int len)
1894{
1895 memcpy(skb->data + offset, from, len);
1896}
1897
1898extern void skb_init(void);
1899
1900static inline ktime_t skb_get_ktime(const struct sk_buff *skb)
1901{
1902 return skb->tstamp;
1903}
1904
1905/**
1906 * skb_get_timestamp - get timestamp from a skb
1907 * @skb: skb to get stamp from
1908 * @stamp: pointer to struct timeval to store stamp in
1909 *
1910 * Timestamps are stored in the skb as offsets to a base timestamp.
1911 * This function converts the offset back to a struct timeval and stores
1912 * it in stamp.
1913 */
1914static inline void skb_get_timestamp(const struct sk_buff *skb,
1915 struct timeval *stamp)
1916{
1917 *stamp = ktime_to_timeval(skb->tstamp);
1918}
1919
1920static inline void skb_get_timestampns(const struct sk_buff *skb,
1921 struct timespec *stamp)
1922{
1923 *stamp = ktime_to_timespec(skb->tstamp);
1924}
1925
1926static inline void __net_timestamp(struct sk_buff *skb)
1927{
1928 skb->tstamp = ktime_get_real();
1929}
1930
1931static inline ktime_t net_timedelta(ktime_t t)
1932{
1933 return ktime_sub(ktime_get_real(), t);
1934}
1935
1936static inline ktime_t net_invalid_timestamp(void)
1937{
1938 return ktime_set(0, 0);
1939}
1940
1941extern void skb_timestamping_init(void);
1942
1943#ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
1944
1945extern void skb_clone_tx_timestamp(struct sk_buff *skb);
1946extern bool skb_defer_rx_timestamp(struct sk_buff *skb);
1947
1948#else /* CONFIG_NETWORK_PHY_TIMESTAMPING */
1949
1950static inline void skb_clone_tx_timestamp(struct sk_buff *skb)
1951{
1952}
1953
1954static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
1955{
1956 return false;
1957}
1958
1959#endif /* !CONFIG_NETWORK_PHY_TIMESTAMPING */
1960
1961/**
1962 * skb_complete_tx_timestamp() - deliver cloned skb with tx timestamps
1963 *
1964 * @skb: clone of the the original outgoing packet
1965 * @hwtstamps: hardware time stamps
1966 *
1967 */
1968void skb_complete_tx_timestamp(struct sk_buff *skb,
1969 struct skb_shared_hwtstamps *hwtstamps);
1970
1971/**
1972 * skb_tstamp_tx - queue clone of skb with send time stamps
1973 * @orig_skb: the original outgoing packet
1974 * @hwtstamps: hardware time stamps, may be NULL if not available
1975 *
1976 * If the skb has a socket associated, then this function clones the
1977 * skb (thus sharing the actual data and optional structures), stores
1978 * the optional hardware time stamping information (if non NULL) or
1979 * generates a software time stamp (otherwise), then queues the clone
1980 * to the error queue of the socket. Errors are silently ignored.
1981 */
1982extern void skb_tstamp_tx(struct sk_buff *orig_skb,
1983 struct skb_shared_hwtstamps *hwtstamps);
1984
1985static inline void sw_tx_timestamp(struct sk_buff *skb)
1986{
1987 if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP &&
1988 !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
1989 skb_tstamp_tx(skb, NULL);
1990}
1991
1992/**
1993 * skb_tx_timestamp() - Driver hook for transmit timestamping
1994 *
1995 * Ethernet MAC Drivers should call this function in their hard_xmit()
1996 * function as soon as possible after giving the sk_buff to the MAC
1997 * hardware, but before freeing the sk_buff.
1998 *
1999 * @skb: A socket buffer.
2000 */
2001static inline void skb_tx_timestamp(struct sk_buff *skb)
2002{
2003 skb_clone_tx_timestamp(skb);
2004 sw_tx_timestamp(skb);
2005}
2006
2007extern __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
2008extern __sum16 __skb_checksum_complete(struct sk_buff *skb);
2009
2010static inline int skb_csum_unnecessary(const struct sk_buff *skb)
2011{
2012 return skb->ip_summed & CHECKSUM_UNNECESSARY;
2013}
2014
2015/**
2016 * skb_checksum_complete - Calculate checksum of an entire packet
2017 * @skb: packet to process
2018 *
2019 * This function calculates the checksum over the entire packet plus
2020 * the value of skb->csum. The latter can be used to supply the
2021 * checksum of a pseudo header as used by TCP/UDP. It returns the
2022 * checksum.
2023 *
2024 * For protocols that contain complete checksums such as ICMP/TCP/UDP,
2025 * this function can be used to verify that checksum on received
2026 * packets. In that case the function should return zero if the
2027 * checksum is correct. In particular, this function will return zero
2028 * if skb->ip_summed is CHECKSUM_UNNECESSARY which indicates that the
2029 * hardware has already verified the correctness of the checksum.
2030 */
2031static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
2032{
2033 return skb_csum_unnecessary(skb) ?
2034 0 : __skb_checksum_complete(skb);
2035}
2036
2037#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2038extern void nf_conntrack_destroy(struct nf_conntrack *nfct);
2039static inline void nf_conntrack_put(struct nf_conntrack *nfct)
2040{
2041 if (nfct && atomic_dec_and_test(&nfct->use))
2042 nf_conntrack_destroy(nfct);
2043}
2044static inline void nf_conntrack_get(struct nf_conntrack *nfct)
2045{
2046 if (nfct)
2047 atomic_inc(&nfct->use);
2048}
2049static inline void nf_conntrack_get_reasm(struct sk_buff *skb)
2050{
2051 if (skb)
2052 atomic_inc(&skb->users);
2053}
2054static inline void nf_conntrack_put_reasm(struct sk_buff *skb)
2055{
2056 if (skb)
2057 kfree_skb(skb);
2058}
2059#endif
2060#ifdef CONFIG_BRIDGE_NETFILTER
2061static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
2062{
2063 if (nf_bridge && atomic_dec_and_test(&nf_bridge->use))
2064 kfree(nf_bridge);
2065}
2066static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge)
2067{
2068 if (nf_bridge)
2069 atomic_inc(&nf_bridge->use);
2070}
2071#endif /* CONFIG_BRIDGE_NETFILTER */
2072static inline void nf_reset(struct sk_buff *skb)
2073{
2074#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2075 nf_conntrack_put(skb->nfct);
2076 skb->nfct = NULL;
2077 nf_conntrack_put_reasm(skb->nfct_reasm);
2078 skb->nfct_reasm = NULL;
2079#endif
2080#ifdef CONFIG_BRIDGE_NETFILTER
2081 nf_bridge_put(skb->nf_bridge);
2082 skb->nf_bridge = NULL;
2083#endif
2084}
2085
2086/* Note: This doesn't put any conntrack and bridge info in dst. */
2087static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src)
2088{
2089#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2090 dst->nfct = src->nfct;
2091 nf_conntrack_get(src->nfct);
2092 dst->nfctinfo = src->nfctinfo;
2093 dst->nfct_reasm = src->nfct_reasm;
2094 nf_conntrack_get_reasm(src->nfct_reasm);
2095#endif
2096#ifdef CONFIG_BRIDGE_NETFILTER
2097 dst->nf_bridge = src->nf_bridge;
2098 nf_bridge_get(src->nf_bridge);
2099#endif
2100}
2101
2102static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
2103{
2104#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2105 nf_conntrack_put(dst->nfct);
2106 nf_conntrack_put_reasm(dst->nfct_reasm);
2107#endif
2108#ifdef CONFIG_BRIDGE_NETFILTER
2109 nf_bridge_put(dst->nf_bridge);
2110#endif
2111 __nf_copy(dst, src);
2112}
2113
2114#ifdef CONFIG_NETWORK_SECMARK
2115static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
2116{
2117 to->secmark = from->secmark;
2118}
2119
2120static inline void skb_init_secmark(struct sk_buff *skb)
2121{
2122 skb->secmark = 0;
2123}
2124#else
2125static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
2126{ }
2127
2128static inline void skb_init_secmark(struct sk_buff *skb)
2129{ }
2130#endif
2131
2132static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping)
2133{
2134 skb->queue_mapping = queue_mapping;
2135}
2136
2137static inline u16 skb_get_queue_mapping(const struct sk_buff *skb)
2138{
2139 return skb->queue_mapping;
2140}
2141
2142static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from)
2143{
2144 to->queue_mapping = from->queue_mapping;
2145}
2146
2147static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue)
2148{
2149 skb->queue_mapping = rx_queue + 1;
2150}
2151
2152static inline u16 skb_get_rx_queue(const struct sk_buff *skb)
2153{
2154 return skb->queue_mapping - 1;
2155}
2156
2157static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
2158{
2159 return skb->queue_mapping != 0;
2160}
2161
2162extern u16 skb_tx_hash(const struct net_device *dev,
2163 const struct sk_buff *skb);
2164
2165#ifdef CONFIG_XFRM
2166static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
2167{
2168 return skb->sp;
2169}
2170#else
2171static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
2172{
2173 return NULL;
2174}
2175#endif
2176
2177static inline int skb_is_gso(const struct sk_buff *skb)
2178{
2179 return skb_shinfo(skb)->gso_size;
2180}
2181
2182static inline int skb_is_gso_v6(const struct sk_buff *skb)
2183{
2184 return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
2185}
2186
2187extern void __skb_warn_lro_forwarding(const struct sk_buff *skb);
2188
2189static inline bool skb_warn_if_lro(const struct sk_buff *skb)
2190{
2191 /* LRO sets gso_size but not gso_type, whereas if GSO is really
2192 * wanted then gso_type will be set. */
2193 struct skb_shared_info *shinfo = skb_shinfo(skb);
2194 if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 &&
2195 unlikely(shinfo->gso_type == 0)) {
2196 __skb_warn_lro_forwarding(skb);
2197 return true;
2198 }
2199 return false;
2200}
2201
2202static inline void skb_forward_csum(struct sk_buff *skb)
2203{
2204 /* Unfortunately we don't support this one. Any brave souls? */
2205 if (skb->ip_summed == CHECKSUM_COMPLETE)
2206 skb->ip_summed = CHECKSUM_NONE;
2207}
2208
2209/**
2210 * skb_checksum_none_assert - make sure skb ip_summed is CHECKSUM_NONE
2211 * @skb: skb to check
2212 *
2213 * fresh skbs have their ip_summed set to CHECKSUM_NONE.
2214 * Instead of forcing ip_summed to CHECKSUM_NONE, we can
2215 * use this helper, to document places where we make this assertion.
2216 */
2217static inline void skb_checksum_none_assert(struct sk_buff *skb)
2218{
2219#ifdef DEBUG
2220 BUG_ON(skb->ip_summed != CHECKSUM_NONE);
2221#endif
2222}
2223
2224bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
2225#endif /* __KERNEL__ */
2226#endif /* _LINUX_SKBUFF_H */