]> bbs.cooldavid.org Git - net-next-2.6.git/blame - include/linux/netdevice.h
[CORE] Stack changes to add multiqueue hardware support API
[net-next-2.6.git] / include / linux / netdevice.h
CommitLineData
1da177e4
LT
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Definitions for the Interfaces handler.
7 *
8 * Version: @(#)dev.h 1.0.10 08/12/93
9 *
02c30a84 10 * Authors: Ross Biro
1da177e4
LT
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Corey Minyard <wf-rch!minyard@relay.EU.net>
13 * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov>
14 * Alan Cox, <Alan.Cox@linux.org>
15 * Bjorn Ekwall. <bj0rn@blox.se>
16 * Pekka Riikonen <priikone@poseidon.pspt.fi>
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 *
23 * Moved to /usr/include/linux for NET3
24 */
25#ifndef _LINUX_NETDEVICE_H
26#define _LINUX_NETDEVICE_H
27
28#include <linux/if.h>
29#include <linux/if_ether.h>
30#include <linux/if_packet.h>
31
32#ifdef __KERNEL__
d7fe0f24 33#include <linux/timer.h>
1da177e4
LT
34#include <asm/atomic.h>
35#include <asm/cache.h>
36#include <asm/byteorder.h>
37
1da177e4
LT
38#include <linux/device.h>
39#include <linux/percpu.h>
db217334 40#include <linux/dmaengine.h>
1da177e4 41
1da177e4
LT
42struct vlan_group;
43struct ethtool_ops;
115c1d6e 44struct netpoll_info;
704232c2
JB
45/* 802.11 specific */
46struct wireless_dev;
1da177e4
LT
47 /* source back-compat hooks */
48#define SET_ETHTOOL_OPS(netdev,ops) \
49 ( (netdev)->ethtool_ops = (ops) )
50
51#define HAVE_ALLOC_NETDEV /* feature macro: alloc_xxxdev
52 functions are available. */
53#define HAVE_FREE_NETDEV /* free_netdev() */
54#define HAVE_NETDEV_PRIV /* netdev_priv() */
55
56#define NET_XMIT_SUCCESS 0
57#define NET_XMIT_DROP 1 /* skb dropped */
58#define NET_XMIT_CN 2 /* congestion notification */
59#define NET_XMIT_POLICED 3 /* skb is shot by police */
60#define NET_XMIT_BYPASS 4 /* packet does not leave via dequeue;
61 (TC use only - dev_queue_xmit
62 returns this as NET_XMIT_SUCCESS) */
63
64/* Backlog congestion levels */
65#define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
66#define NET_RX_DROP 1 /* packet dropped */
67#define NET_RX_CN_LOW 2 /* storm alert, just in case */
68#define NET_RX_CN_MOD 3 /* Storm on its way! */
69#define NET_RX_CN_HIGH 4 /* The storm is here */
70#define NET_RX_BAD 5 /* packet dropped due to kernel error */
71
b9df3cb8
GR
72/* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
73 * indicates that the device will soon be dropping packets, or already drops
74 * some packets of the same priority; prompting us to send less aggressively. */
75#define net_xmit_eval(e) ((e) == NET_XMIT_CN? 0 : (e))
1da177e4
LT
76#define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
77
78#endif
79
80#define MAX_ADDR_LEN 32 /* Largest hardware address length */
81
82/* Driver transmit return codes */
83#define NETDEV_TX_OK 0 /* driver took care of packet */
84#define NETDEV_TX_BUSY 1 /* driver tx path was busy*/
85#define NETDEV_TX_LOCKED -1 /* driver tx lock was already taken */
86
87/*
88 * Compute the worst case header length according to the protocols
89 * used.
90 */
91
92#if !defined(CONFIG_AX25) && !defined(CONFIG_AX25_MODULE) && !defined(CONFIG_TR)
93#define LL_MAX_HEADER 32
94#else
95#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
96#define LL_MAX_HEADER 96
97#else
98#define LL_MAX_HEADER 48
99#endif
100#endif
101
e81c7359
DM
102#if !defined(CONFIG_NET_IPIP) && !defined(CONFIG_NET_IPIP_MODULE) && \
103 !defined(CONFIG_NET_IPGRE) && !defined(CONFIG_NET_IPGRE_MODULE) && \
104 !defined(CONFIG_IPV6_SIT) && !defined(CONFIG_IPV6_SIT_MODULE) && \
105 !defined(CONFIG_IPV6_TUNNEL) && !defined(CONFIG_IPV6_TUNNEL_MODULE)
1da177e4
LT
106#define MAX_HEADER LL_MAX_HEADER
107#else
108#define MAX_HEADER (LL_MAX_HEADER + 48)
109#endif
110
f25f4e44
PWJ
111struct net_device_subqueue
112{
113 /* Give a control state for each queue. This struct may contain
114 * per-queue locks in the future.
115 */
116 unsigned long state;
117};
118
1da177e4
LT
119/*
120 * Network device statistics. Akin to the 2.0 ether stats but
121 * with byte counters.
122 */
123
124struct net_device_stats
125{
126 unsigned long rx_packets; /* total packets received */
127 unsigned long tx_packets; /* total packets transmitted */
128 unsigned long rx_bytes; /* total bytes received */
129 unsigned long tx_bytes; /* total bytes transmitted */
130 unsigned long rx_errors; /* bad packets received */
131 unsigned long tx_errors; /* packet transmit problems */
132 unsigned long rx_dropped; /* no space in linux buffers */
133 unsigned long tx_dropped; /* no space available in linux */
134 unsigned long multicast; /* multicast packets received */
135 unsigned long collisions;
136
137 /* detailed rx_errors: */
138 unsigned long rx_length_errors;
139 unsigned long rx_over_errors; /* receiver ring buff overflow */
140 unsigned long rx_crc_errors; /* recved pkt with crc error */
141 unsigned long rx_frame_errors; /* recv'd frame alignment error */
142 unsigned long rx_fifo_errors; /* recv'r fifo overrun */
143 unsigned long rx_missed_errors; /* receiver missed packet */
144
145 /* detailed tx_errors */
146 unsigned long tx_aborted_errors;
147 unsigned long tx_carrier_errors;
148 unsigned long tx_fifo_errors;
149 unsigned long tx_heartbeat_errors;
150 unsigned long tx_window_errors;
151
152 /* for cslip etc */
153 unsigned long rx_compressed;
154 unsigned long tx_compressed;
155};
156
157
158/* Media selection options. */
159enum {
160 IF_PORT_UNKNOWN = 0,
161 IF_PORT_10BASE2,
162 IF_PORT_10BASET,
163 IF_PORT_AUI,
164 IF_PORT_100BASET,
165 IF_PORT_100BASETX,
166 IF_PORT_100BASEFX
167};
168
169#ifdef __KERNEL__
170
171#include <linux/cache.h>
172#include <linux/skbuff.h>
173
174struct neighbour;
175struct neigh_parms;
176struct sk_buff;
177
178struct netif_rx_stats
179{
180 unsigned total;
181 unsigned dropped;
182 unsigned time_squeeze;
1da177e4
LT
183 unsigned cpu_collision;
184};
185
186DECLARE_PER_CPU(struct netif_rx_stats, netdev_rx_stat);
187
bf742482
PM
188struct dev_addr_list
189{
190 struct dev_addr_list *next;
191 u8 da_addr[MAX_ADDR_LEN];
192 u8 da_addrlen;
193 int da_users;
194 int da_gusers;
195};
1da177e4
LT
196
197/*
198 * We tag multicasts with these structures.
199 */
3fba5a8b
PM
200
201#define dev_mc_list dev_addr_list
202#define dmi_addr da_addr
203#define dmi_addrlen da_addrlen
204#define dmi_users da_users
205#define dmi_gusers da_gusers
1da177e4
LT
206
207struct hh_cache
208{
209 struct hh_cache *hh_next; /* Next entry */
210 atomic_t hh_refcnt; /* number of users */
f0490980
ED
211/*
212 * We want hh_output, hh_len, hh_lock and hh_data be a in a separate
213 * cache line on SMP.
214 * They are mostly read, but hh_refcnt may be changed quite frequently,
215 * incurring cache line ping pongs.
216 */
217 __be16 hh_type ____cacheline_aligned_in_smp;
218 /* protocol identifier, f.e ETH_P_IP
1da177e4
LT
219 * NOTE: For VLANs, this will be the
220 * encapuslated type. --BLG
221 */
d5c42c0e 222 u16 hh_len; /* length of header */
1da177e4 223 int (*hh_output)(struct sk_buff *skb);
3644f0ce 224 seqlock_t hh_lock;
1da177e4
LT
225
226 /* cached hardware header; allow for machine alignment needs. */
227#define HH_DATA_MOD 16
228#define HH_DATA_OFF(__len) \
5ba0eac6 229 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
1da177e4
LT
230#define HH_DATA_ALIGN(__len) \
231 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
232 unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
233};
234
235/* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much.
236 * Alternative is:
237 * dev->hard_header_len ? (dev->hard_header_len +
238 * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
239 *
240 * We could use other alignment values, but we must maintain the
241 * relationship HH alignment <= LL alignment.
242 */
243#define LL_RESERVED_SPACE(dev) \
244 (((dev)->hard_header_len&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
245#define LL_RESERVED_SPACE_EXTRA(dev,extra) \
246 ((((dev)->hard_header_len+extra)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
247
248/* These flag bits are private to the generic network queueing
249 * layer, they may not be explicitly referenced by any other
250 * code.
251 */
252
253enum netdev_state_t
254{
255 __LINK_STATE_XOFF=0,
256 __LINK_STATE_START,
257 __LINK_STATE_PRESENT,
258 __LINK_STATE_SCHED,
259 __LINK_STATE_NOCARRIER,
260 __LINK_STATE_RX_SCHED,
b00055aa
SR
261 __LINK_STATE_LINKWATCH_PENDING,
262 __LINK_STATE_DORMANT,
48d83325 263 __LINK_STATE_QDISC_RUNNING,
1da177e4
LT
264};
265
266
267/*
268 * This structure holds at boot time configured netdevice settings. They
269 * are then used in the device probing.
270 */
271struct netdev_boot_setup {
272 char name[IFNAMSIZ];
273 struct ifmap map;
274};
275#define NETDEV_BOOT_SETUP_MAX 8
276
20380731 277extern int __init netdev_boot_setup(char *str);
1da177e4
LT
278
279/*
280 * The DEVICE structure.
281 * Actually, this whole structure is a big mistake. It mixes I/O
282 * data with strictly "high-level" data, and it has to know about
283 * almost every data structure used in the INET module.
284 *
285 * FIXME: cleanup struct net_device such that network protocol info
286 * moves out.
287 */
288
289struct net_device
290{
291
292 /*
293 * This is the first field of the "visible" part of this structure
294 * (i.e. as seen by users in the "Space.c" file). It is the name
295 * the interface.
296 */
297 char name[IFNAMSIZ];
9356b8fc
ED
298 /* device name hash chain */
299 struct hlist_node name_hlist;
1da177e4
LT
300
301 /*
302 * I/O specific fields
303 * FIXME: Merge these and struct ifmap into one
304 */
305 unsigned long mem_end; /* shared mem end */
306 unsigned long mem_start; /* shared mem start */
307 unsigned long base_addr; /* device I/O address */
308 unsigned int irq; /* device IRQ number */
309
310 /*
311 * Some hardware also needs these fields, but they are not
312 * part of the usual set specified in Space.c.
313 */
314
315 unsigned char if_port; /* Selectable AUI, TP,..*/
316 unsigned char dma; /* DMA channel */
317
318 unsigned long state;
319
7562f876 320 struct list_head dev_list;
1da177e4
LT
321
322 /* The device initialization function. Called only once. */
323 int (*init)(struct net_device *dev);
324
325 /* ------- Fields preinitialized in Space.c finish here ------- */
326
9356b8fc
ED
327 /* Net device features */
328 unsigned long features;
329#define NETIF_F_SG 1 /* Scatter/gather IO. */
d212f87b 330#define NETIF_F_IP_CSUM 2 /* Can checksum TCP/UDP over IPv4. */
9356b8fc
ED
331#define NETIF_F_NO_CSUM 4 /* Does not require checksum. F.e. loopack. */
332#define NETIF_F_HW_CSUM 8 /* Can checksum all the packets. */
d212f87b 333#define NETIF_F_IPV6_CSUM 16 /* Can checksum TCP/UDP over IPV6 */
9356b8fc
ED
334#define NETIF_F_HIGHDMA 32 /* Can DMA to high memory. */
335#define NETIF_F_FRAGLIST 64 /* Scatter/gather IO. */
336#define NETIF_F_HW_VLAN_TX 128 /* Transmit VLAN hw acceleration */
337#define NETIF_F_HW_VLAN_RX 256 /* Receive VLAN hw acceleration */
338#define NETIF_F_HW_VLAN_FILTER 512 /* Receive filtering on VLAN */
339#define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */
37c3185a 340#define NETIF_F_GSO 2048 /* Enable software GSO. */
9356b8fc 341#define NETIF_F_LLTX 4096 /* LockLess TX */
f25f4e44 342#define NETIF_F_MULTI_QUEUE 16384 /* Has multiple TX/RX queues */
7967168c
HX
343
344 /* Segmentation offload features */
345#define NETIF_F_GSO_SHIFT 16
bcd76111 346#define NETIF_F_GSO_MASK 0xffff0000
7967168c 347#define NETIF_F_TSO (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT)
f83ef8c0 348#define NETIF_F_UFO (SKB_GSO_UDP << NETIF_F_GSO_SHIFT)
576a30eb 349#define NETIF_F_GSO_ROBUST (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT)
f83ef8c0
HX
350#define NETIF_F_TSO_ECN (SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT)
351#define NETIF_F_TSO6 (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT)
9356b8fc 352
78eb8877
HX
353 /* List of features with software fallbacks. */
354#define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6)
355
d212f87b 356
8648b305 357#define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
d212f87b
SH
358#define NETIF_F_V4_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IP_CSUM)
359#define NETIF_F_V6_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM)
360#define NETIF_F_ALL_CSUM (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM)
8648b305 361
1da177e4
LT
362 struct net_device *next_sched;
363
364 /* Interface index. Unique device identifier */
365 int ifindex;
366 int iflink;
367
368
369 struct net_device_stats* (*get_stats)(struct net_device *dev);
c45d286e 370 struct net_device_stats stats;
1da177e4 371
b86e0280 372#ifdef CONFIG_WIRELESS_EXT
1da177e4
LT
373 /* List of functions to handle Wireless Extensions (instead of ioctl).
374 * See <net/iw_handler.h> for details. Jean II */
375 const struct iw_handler_def * wireless_handlers;
376 /* Instance data managed by the core of Wireless Extensions. */
377 struct iw_public_data * wireless_data;
b86e0280 378#endif
76fd8593 379 const struct ethtool_ops *ethtool_ops;
1da177e4
LT
380
381 /*
382 * This marks the end of the "visible" part of the structure. All
383 * fields hereafter are internal to the system, and may change at
384 * will (read: may be cleaned up at will).
385 */
386
1da177e4 387
b00055aa 388 unsigned int flags; /* interface flags (a la BSD) */
1da177e4
LT
389 unsigned short gflags;
390 unsigned short priv_flags; /* Like 'flags' but invisible to userspace. */
391 unsigned short padded; /* How much padding added by alloc_netdev() */
392
b00055aa
SR
393 unsigned char operstate; /* RFC2863 operstate */
394 unsigned char link_mode; /* mapping policy to operstate */
395
1da177e4
LT
396 unsigned mtu; /* interface MTU value */
397 unsigned short type; /* interface hardware type */
398 unsigned short hard_header_len; /* hardware hdr length */
1da177e4
LT
399
400 struct net_device *master; /* Pointer to master device of a group,
401 * which this device is member of.
402 */
403
404 /* Interface address info. */
a6f9a705 405 unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */
1da177e4
LT
406 unsigned char addr_len; /* hardware address length */
407 unsigned short dev_id; /* for shared network cards */
408
4417da66
PM
409 struct dev_addr_list *uc_list; /* Secondary unicast mac addresses */
410 int uc_count; /* Number of installed ucasts */
411 int uc_promisc;
3fba5a8b 412 struct dev_addr_list *mc_list; /* Multicast mac addresses */
1da177e4
LT
413 int mc_count; /* Number of installed mcasts */
414 int promiscuity;
415 int allmulti;
416
1da177e4
LT
417
418 /* Protocol specific pointers */
419
420 void *atalk_ptr; /* AppleTalk link */
421 void *ip_ptr; /* IPv4 specific data */
422 void *dn_ptr; /* DECnet specific data */
423 void *ip6_ptr; /* IPv6 specific data */
424 void *ec_ptr; /* Econet specific data */
425 void *ax25_ptr; /* AX.25 specific data */
704232c2
JB
426 struct wireless_dev *ieee80211_ptr; /* IEEE 802.11 specific data,
427 assign before registering */
1da177e4 428
9356b8fc
ED
429/*
430 * Cache line mostly used on receive path (including eth_type_trans())
431 */
432 struct list_head poll_list ____cacheline_aligned_in_smp;
433 /* Link to poll list */
434
435 int (*poll) (struct net_device *dev, int *quota);
1da177e4
LT
436 int quota;
437 int weight;
9356b8fc
ED
438 unsigned long last_rx; /* Time of last Rx */
439 /* Interface address info used in eth_type_trans() */
440 unsigned char dev_addr[MAX_ADDR_LEN]; /* hw address, (before bcast
441 because most packets are unicast) */
442
443 unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */
1da177e4 444
9356b8fc
ED
445/*
446 * Cache line mostly used on queue transmit path (qdisc)
447 */
448 /* device queue lock */
449 spinlock_t queue_lock ____cacheline_aligned_in_smp;
1da177e4
LT
450 struct Qdisc *qdisc;
451 struct Qdisc *qdisc_sleeping;
1da177e4
LT
452 struct list_head qdisc_list;
453 unsigned long tx_queue_len; /* Max frames per queue allowed */
454
f6a78bfc
HX
455 /* Partially transmitted GSO packet. */
456 struct sk_buff *gso_skb;
457
1da177e4
LT
458 /* ingress path synchronizer */
459 spinlock_t ingress_lock;
9356b8fc
ED
460 struct Qdisc *qdisc_ingress;
461
462/*
463 * One part is mostly used on xmit path (device)
464 */
1da177e4 465 /* hard_start_xmit synchronizer */
932ff279 466 spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
1da177e4
LT
467 /* cpu id of processor entered to hard_start_xmit or -1,
468 if nobody entered there.
469 */
470 int xmit_lock_owner;
9356b8fc
ED
471 void *priv; /* pointer to private data */
472 int (*hard_start_xmit) (struct sk_buff *skb,
473 struct net_device *dev);
474 /* These may be needed for future network-power-down code. */
475 unsigned long trans_start; /* Time (in jiffies) of last Tx */
476
477 int watchdog_timeo; /* used by dev_watchdog() */
478 struct timer_list watchdog_timer;
479
480/*
481 * refcnt is a very hot point, so align it on SMP
482 */
1da177e4 483 /* Number of references to this device */
9356b8fc
ED
484 atomic_t refcnt ____cacheline_aligned_in_smp;
485
1da177e4
LT
486 /* delayed register/unregister */
487 struct list_head todo_list;
1da177e4
LT
488 /* device index hash chain */
489 struct hlist_node index_hlist;
490
572a103d
HX
491 struct net_device *link_watch_next;
492
1da177e4
LT
493 /* register/unregister state machine */
494 enum { NETREG_UNINITIALIZED=0,
b17a7c17 495 NETREG_REGISTERED, /* completed register_netdevice */
1da177e4
LT
496 NETREG_UNREGISTERING, /* called unregister_netdevice */
497 NETREG_UNREGISTERED, /* completed unregister todo */
498 NETREG_RELEASED, /* called free_netdev */
499 } reg_state;
500
1da177e4
LT
501 /* Called after device is detached from network. */
502 void (*uninit)(struct net_device *dev);
503 /* Called after last user reference disappears. */
504 void (*destructor)(struct net_device *dev);
505
506 /* Pointers to interface service routines. */
507 int (*open)(struct net_device *dev);
508 int (*stop)(struct net_device *dev);
1da177e4 509#define HAVE_NETDEV_POLL
1da177e4
LT
510 int (*hard_header) (struct sk_buff *skb,
511 struct net_device *dev,
512 unsigned short type,
513 void *daddr,
514 void *saddr,
515 unsigned len);
516 int (*rebuild_header)(struct sk_buff *skb);
4417da66
PM
517#define HAVE_SET_RX_MODE
518 void (*set_rx_mode)(struct net_device *dev);
1da177e4
LT
519#define HAVE_MULTICAST
520 void (*set_multicast_list)(struct net_device *dev);
521#define HAVE_SET_MAC_ADDR
522 int (*set_mac_address)(struct net_device *dev,
523 void *addr);
524#define HAVE_PRIVATE_IOCTL
525 int (*do_ioctl)(struct net_device *dev,
526 struct ifreq *ifr, int cmd);
527#define HAVE_SET_CONFIG
528 int (*set_config)(struct net_device *dev,
529 struct ifmap *map);
530#define HAVE_HEADER_CACHE
531 int (*hard_header_cache)(struct neighbour *neigh,
532 struct hh_cache *hh);
533 void (*header_cache_update)(struct hh_cache *hh,
534 struct net_device *dev,
535 unsigned char * haddr);
536#define HAVE_CHANGE_MTU
537 int (*change_mtu)(struct net_device *dev, int new_mtu);
538
539#define HAVE_TX_TIMEOUT
540 void (*tx_timeout) (struct net_device *dev);
541
542 void (*vlan_rx_register)(struct net_device *dev,
543 struct vlan_group *grp);
544 void (*vlan_rx_add_vid)(struct net_device *dev,
545 unsigned short vid);
546 void (*vlan_rx_kill_vid)(struct net_device *dev,
547 unsigned short vid);
548
549 int (*hard_header_parse)(struct sk_buff *skb,
550 unsigned char *haddr);
551 int (*neigh_setup)(struct net_device *dev, struct neigh_parms *);
552#ifdef CONFIG_NETPOLL
115c1d6e 553 struct netpoll_info *npinfo;
1da177e4
LT
554#endif
555#ifdef CONFIG_NET_POLL_CONTROLLER
556 void (*poll_controller)(struct net_device *dev);
557#endif
558
559 /* bridge stuff */
560 struct net_bridge_port *br_port;
561
1da177e4 562 /* class/net/name entry */
43cb76d9 563 struct device dev;
fe9925b5
SH
564 /* space for optional statistics and wireless sysfs groups */
565 struct attribute_group *sysfs_groups[3];
38f7b870
PM
566
567 /* rtnetlink link ops */
568 const struct rtnl_link_ops *rtnl_link_ops;
f25f4e44
PWJ
569
570 /* The TX queue control structures */
571 unsigned int egress_subqueue_count;
572 struct net_device_subqueue egress_subqueue[0];
1da177e4 573};
43cb76d9 574#define to_net_dev(d) container_of(d, struct net_device, dev)
1da177e4
LT
575
576#define NETDEV_ALIGN 32
577#define NETDEV_ALIGN_CONST (NETDEV_ALIGN - 1)
578
6472ce60 579static inline void *netdev_priv(const struct net_device *dev)
1da177e4 580{
f25f4e44 581 return dev->priv;
1da177e4
LT
582}
583
584#define SET_MODULE_OWNER(dev) do { } while (0)
585/* Set the sysfs physical device reference for the network logical device
586 * if set prior to registration will cause a symlink during initialization.
587 */
43cb76d9 588#define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
1da177e4
LT
589
590struct packet_type {
f2ccd8fa
DM
591 __be16 type; /* This is really htons(ether_type). */
592 struct net_device *dev; /* NULL is wildcarded here */
593 int (*func) (struct sk_buff *,
594 struct net_device *,
595 struct packet_type *,
596 struct net_device *);
576a30eb
HX
597 struct sk_buff *(*gso_segment)(struct sk_buff *skb,
598 int features);
a430a43d 599 int (*gso_send_check)(struct sk_buff *skb);
1da177e4
LT
600 void *af_packet_priv;
601 struct list_head list;
602};
603
604#include <linux/interrupt.h>
605#include <linux/notifier.h>
606
607extern struct net_device loopback_dev; /* The loopback */
7562f876 608extern struct list_head dev_base_head; /* All devices */
1da177e4
LT
609extern rwlock_t dev_base_lock; /* Device list lock */
610
7562f876
PE
611#define for_each_netdev(d) \
612 list_for_each_entry(d, &dev_base_head, dev_list)
613#define for_each_netdev_safe(d, n) \
614 list_for_each_entry_safe(d, n, &dev_base_head, dev_list)
615#define for_each_netdev_continue(d) \
616 list_for_each_entry_continue(d, &dev_base_head, dev_list)
617#define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
618
619static inline struct net_device *next_net_device(struct net_device *dev)
620{
621 struct list_head *lh;
622
623 lh = dev->dev_list.next;
624 return lh == &dev_base_head ? NULL : net_device_entry(lh);
625}
626
627static inline struct net_device *first_net_device(void)
628{
629 return list_empty(&dev_base_head) ? NULL :
630 net_device_entry(dev_base_head.next);
631}
632
1da177e4
LT
633extern int netdev_boot_setup_check(struct net_device *dev);
634extern unsigned long netdev_boot_base(const char *prefix, int unit);
635extern struct net_device *dev_getbyhwaddr(unsigned short type, char *hwaddr);
636extern struct net_device *dev_getfirstbyhwtype(unsigned short type);
4e9cac2b 637extern struct net_device *__dev_getfirstbyhwtype(unsigned short type);
1da177e4
LT
638extern void dev_add_pack(struct packet_type *pt);
639extern void dev_remove_pack(struct packet_type *pt);
640extern void __dev_remove_pack(struct packet_type *pt);
641
642extern struct net_device *dev_get_by_flags(unsigned short flags,
643 unsigned short mask);
644extern struct net_device *dev_get_by_name(const char *name);
645extern struct net_device *__dev_get_by_name(const char *name);
646extern int dev_alloc_name(struct net_device *dev, const char *name);
647extern int dev_open(struct net_device *dev);
648extern int dev_close(struct net_device *dev);
649extern int dev_queue_xmit(struct sk_buff *skb);
650extern int register_netdevice(struct net_device *dev);
22f8cde5 651extern void unregister_netdevice(struct net_device *dev);
1da177e4
LT
652extern void free_netdev(struct net_device *dev);
653extern void synchronize_net(void);
654extern int register_netdevice_notifier(struct notifier_block *nb);
655extern int unregister_netdevice_notifier(struct notifier_block *nb);
656extern int call_netdevice_notifiers(unsigned long val, void *v);
657extern struct net_device *dev_get_by_index(int ifindex);
658extern struct net_device *__dev_get_by_index(int ifindex);
659extern int dev_restart(struct net_device *dev);
660#ifdef CONFIG_NETPOLL_TRAP
661extern int netpoll_trap(void);
662#endif
663
664typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
665extern int register_gifconf(unsigned int family, gifconf_func_t * gifconf);
666static inline int unregister_gifconf(unsigned int family)
667{
668 return register_gifconf(family, NULL);
669}
670
671/*
672 * Incoming packets are placed on per-cpu queues so that
673 * no locking is needed.
674 */
675
676struct softnet_data
677{
31aa02c5 678 struct net_device *output_queue;
1da177e4
LT
679 struct sk_buff_head input_pkt_queue;
680 struct list_head poll_list;
1da177e4
LT
681 struct sk_buff *completion_queue;
682
683 struct net_device backlog_dev; /* Sorry. 8) */
db217334
CL
684#ifdef CONFIG_NET_DMA
685 struct dma_chan *net_dma;
686#endif
1da177e4
LT
687};
688
689DECLARE_PER_CPU(struct softnet_data,softnet_data);
690
691#define HAVE_NETIF_QUEUE
692
56079431 693extern void __netif_schedule(struct net_device *dev);
1da177e4
LT
694
695static inline void netif_schedule(struct net_device *dev)
696{
697 if (!test_bit(__LINK_STATE_XOFF, &dev->state))
698 __netif_schedule(dev);
699}
700
701static inline void netif_start_queue(struct net_device *dev)
702{
703 clear_bit(__LINK_STATE_XOFF, &dev->state);
704}
705
706static inline void netif_wake_queue(struct net_device *dev)
707{
708#ifdef CONFIG_NETPOLL_TRAP
5f286e11
SS
709 if (netpoll_trap()) {
710 clear_bit(__LINK_STATE_XOFF, &dev->state);
1da177e4 711 return;
5f286e11 712 }
1da177e4
LT
713#endif
714 if (test_and_clear_bit(__LINK_STATE_XOFF, &dev->state))
715 __netif_schedule(dev);
716}
717
718static inline void netif_stop_queue(struct net_device *dev)
719{
1da177e4
LT
720 set_bit(__LINK_STATE_XOFF, &dev->state);
721}
722
723static inline int netif_queue_stopped(const struct net_device *dev)
724{
725 return test_bit(__LINK_STATE_XOFF, &dev->state);
726}
727
728static inline int netif_running(const struct net_device *dev)
729{
730 return test_bit(__LINK_STATE_START, &dev->state);
731}
732
f25f4e44
PWJ
733/*
734 * Routines to manage the subqueues on a device. We only need start
735 * stop, and a check if it's stopped. All other device management is
736 * done at the overall netdevice level.
737 * Also test the device if we're multiqueue.
738 */
739static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
740{
741#ifdef CONFIG_NETDEVICES_MULTIQUEUE
742 clear_bit(__LINK_STATE_XOFF, &dev->egress_subqueue[queue_index].state);
743#endif
744}
745
746static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
747{
748#ifdef CONFIG_NETDEVICES_MULTIQUEUE
749#ifdef CONFIG_NETPOLL_TRAP
750 if (netpoll_trap())
751 return;
752#endif
753 set_bit(__LINK_STATE_XOFF, &dev->egress_subqueue[queue_index].state);
754#endif
755}
756
757static inline int netif_subqueue_stopped(const struct net_device *dev,
758 u16 queue_index)
759{
760#ifdef CONFIG_NETDEVICES_MULTIQUEUE
761 return test_bit(__LINK_STATE_XOFF,
762 &dev->egress_subqueue[queue_index].state);
763#else
764 return 0;
765#endif
766}
767
768static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
769{
770#ifdef CONFIG_NETDEVICES_MULTIQUEUE
771#ifdef CONFIG_NETPOLL_TRAP
772 if (netpoll_trap())
773 return;
774#endif
775 if (test_and_clear_bit(__LINK_STATE_XOFF,
776 &dev->egress_subqueue[queue_index].state))
777 __netif_schedule(dev);
778#endif
779}
780
781static inline int netif_is_multiqueue(const struct net_device *dev)
782{
783#ifdef CONFIG_NETDEVICES_MULTIQUEUE
784 return (!!(NETIF_F_MULTI_QUEUE & dev->features));
785#else
786 return 0;
787#endif
788}
1da177e4
LT
789
790/* Use this variant when it is known for sure that it
791 * is executing from interrupt context.
792 */
793static inline void dev_kfree_skb_irq(struct sk_buff *skb)
794{
795 if (atomic_dec_and_test(&skb->users)) {
796 struct softnet_data *sd;
797 unsigned long flags;
798
799 local_irq_save(flags);
800 sd = &__get_cpu_var(softnet_data);
801 skb->next = sd->completion_queue;
802 sd->completion_queue = skb;
803 raise_softirq_irqoff(NET_TX_SOFTIRQ);
804 local_irq_restore(flags);
805 }
806}
807
808/* Use this variant in places where it could be invoked
809 * either from interrupt or non-interrupt context.
810 */
56079431 811extern void dev_kfree_skb_any(struct sk_buff *skb);
1da177e4
LT
812
813#define HAVE_NETIF_RX 1
814extern int netif_rx(struct sk_buff *skb);
815extern int netif_rx_ni(struct sk_buff *skb);
816#define HAVE_NETIF_RECEIVE_SKB 1
817extern int netif_receive_skb(struct sk_buff *skb);
c2373ee9 818extern int dev_valid_name(const char *name);
1da177e4
LT
819extern int dev_ioctl(unsigned int cmd, void __user *);
820extern int dev_ethtool(struct ifreq *);
821extern unsigned dev_get_flags(const struct net_device *);
822extern int dev_change_flags(struct net_device *, unsigned);
823extern int dev_change_name(struct net_device *, char *);
824extern int dev_set_mtu(struct net_device *, int);
825extern int dev_set_mac_address(struct net_device *,
826 struct sockaddr *);
f6a78bfc
HX
827extern int dev_hard_start_xmit(struct sk_buff *skb,
828 struct net_device *dev);
1da177e4
LT
829
830extern void dev_init(void);
831
20380731 832extern int netdev_budget;
1da177e4
LT
833
834/* Called by rtnetlink.c:rtnl_unlock() */
835extern void netdev_run_todo(void);
836
837static inline void dev_put(struct net_device *dev)
838{
839 atomic_dec(&dev->refcnt);
840}
841
15333061
SH
842static inline void dev_hold(struct net_device *dev)
843{
844 atomic_inc(&dev->refcnt);
845}
1da177e4
LT
846
847/* Carrier loss detection, dial on demand. The functions netif_carrier_on
848 * and _off may be called from IRQ context, but it is caller
849 * who is responsible for serialization of these calls.
b00055aa
SR
850 *
851 * The name carrier is inappropriate, these functions should really be
852 * called netif_lowerlayer_*() because they represent the state of any
853 * kind of lower layer not just hardware media.
1da177e4
LT
854 */
855
856extern void linkwatch_fire_event(struct net_device *dev);
857
858static inline int netif_carrier_ok(const struct net_device *dev)
859{
860 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
861}
862
863extern void __netdev_watchdog_up(struct net_device *dev);
864
0a242efc 865extern void netif_carrier_on(struct net_device *dev);
1da177e4 866
0a242efc 867extern void netif_carrier_off(struct net_device *dev);
1da177e4 868
b00055aa
SR
869static inline void netif_dormant_on(struct net_device *dev)
870{
871 if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
872 linkwatch_fire_event(dev);
873}
874
875static inline void netif_dormant_off(struct net_device *dev)
876{
877 if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
878 linkwatch_fire_event(dev);
879}
880
881static inline int netif_dormant(const struct net_device *dev)
882{
883 return test_bit(__LINK_STATE_DORMANT, &dev->state);
884}
885
886
887static inline int netif_oper_up(const struct net_device *dev) {
888 return (dev->operstate == IF_OPER_UP ||
889 dev->operstate == IF_OPER_UNKNOWN /* backward compat */);
890}
891
1da177e4
LT
892/* Hot-plugging. */
893static inline int netif_device_present(struct net_device *dev)
894{
895 return test_bit(__LINK_STATE_PRESENT, &dev->state);
896}
897
56079431 898extern void netif_device_detach(struct net_device *dev);
1da177e4 899
56079431 900extern void netif_device_attach(struct net_device *dev);
1da177e4
LT
901
902/*
903 * Network interface message level settings
904 */
905#define HAVE_NETIF_MSG 1
906
907enum {
908 NETIF_MSG_DRV = 0x0001,
909 NETIF_MSG_PROBE = 0x0002,
910 NETIF_MSG_LINK = 0x0004,
911 NETIF_MSG_TIMER = 0x0008,
912 NETIF_MSG_IFDOWN = 0x0010,
913 NETIF_MSG_IFUP = 0x0020,
914 NETIF_MSG_RX_ERR = 0x0040,
915 NETIF_MSG_TX_ERR = 0x0080,
916 NETIF_MSG_TX_QUEUED = 0x0100,
917 NETIF_MSG_INTR = 0x0200,
918 NETIF_MSG_TX_DONE = 0x0400,
919 NETIF_MSG_RX_STATUS = 0x0800,
920 NETIF_MSG_PKTDATA = 0x1000,
921 NETIF_MSG_HW = 0x2000,
922 NETIF_MSG_WOL = 0x4000,
923};
924
925#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
926#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
927#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
928#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
929#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
930#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
931#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
932#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
933#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
934#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
935#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
936#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
937#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
938#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
939#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
940
941static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
942{
943 /* use default */
944 if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
945 return default_msg_enable_bits;
946 if (debug_value == 0) /* no output */
947 return 0;
948 /* set low N bits */
949 return (1 << debug_value) - 1;
950}
951
0a122576
SH
952/* Test if receive needs to be scheduled */
953static inline int __netif_rx_schedule_prep(struct net_device *dev)
793b883e
SH
954{
955 return !test_and_set_bit(__LINK_STATE_RX_SCHED, &dev->state);
956}
1da177e4 957
0a122576 958/* Test if receive needs to be scheduled but only if up */
1da177e4
LT
959static inline int netif_rx_schedule_prep(struct net_device *dev)
960{
0a122576 961 return netif_running(dev) && __netif_rx_schedule_prep(dev);
1da177e4
LT
962}
963
964/* Add interface to tail of rx poll list. This assumes that _prep has
965 * already been called and returned 1.
966 */
967
56079431 968extern void __netif_rx_schedule(struct net_device *dev);
1da177e4
LT
969
970/* Try to reschedule poll. Called by irq handler. */
971
972static inline void netif_rx_schedule(struct net_device *dev)
973{
974 if (netif_rx_schedule_prep(dev))
975 __netif_rx_schedule(dev);
976}
977
d3240312
SH
978/* Try to reschedule poll. Called by dev->poll() after netif_rx_complete().
979 * Do not inline this?
980 */
1da177e4
LT
981static inline int netif_rx_reschedule(struct net_device *dev, int undo)
982{
983 if (netif_rx_schedule_prep(dev)) {
984 unsigned long flags;
d3240312
SH
985
986 dev->quota += undo;
987
1da177e4 988 local_irq_save(flags);
d3240312
SH
989 list_add_tail(&dev->poll_list, &__get_cpu_var(softnet_data).poll_list);
990 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
1da177e4
LT
991 local_irq_restore(flags);
992 return 1;
993 }
994 return 0;
995}
996
b0ba6667
HX
997/* same as netif_rx_complete, except that local_irq_save(flags)
998 * has already been issued
999 */
1000static inline void __netif_rx_complete(struct net_device *dev)
1001{
1002 BUG_ON(!test_bit(__LINK_STATE_RX_SCHED, &dev->state));
1003 list_del(&dev->poll_list);
1004 smp_mb__before_clear_bit();
1005 clear_bit(__LINK_STATE_RX_SCHED, &dev->state);
1006}
1007
1da177e4
LT
1008/* Remove interface from poll list: it must be in the poll list
1009 * on current cpu. This primitive is called by dev->poll(), when
1010 * it completes the work. The device cannot be out of poll list at this
1011 * moment, it is BUG().
1012 */
1013static inline void netif_rx_complete(struct net_device *dev)
1014{
1015 unsigned long flags;
1016
1017 local_irq_save(flags);
b0ba6667 1018 __netif_rx_complete(dev);
1da177e4
LT
1019 local_irq_restore(flags);
1020}
1021
1022static inline void netif_poll_disable(struct net_device *dev)
1023{
3173c890 1024 while (test_and_set_bit(__LINK_STATE_RX_SCHED, &dev->state))
1da177e4 1025 /* No hurry. */
3173c890 1026 schedule_timeout_interruptible(1);
1da177e4
LT
1027}
1028
1029static inline void netif_poll_enable(struct net_device *dev)
1030{
e44c39bd 1031 smp_mb__before_clear_bit();
1da177e4 1032 clear_bit(__LINK_STATE_RX_SCHED, &dev->state);
1da177e4
LT
1033}
1034
932ff279
HX
1035static inline void netif_tx_lock(struct net_device *dev)
1036{
1037 spin_lock(&dev->_xmit_lock);
1038 dev->xmit_lock_owner = smp_processor_id();
1039}
1040
1041static inline void netif_tx_lock_bh(struct net_device *dev)
1042{
1043 spin_lock_bh(&dev->_xmit_lock);
1044 dev->xmit_lock_owner = smp_processor_id();
1045}
1046
1047static inline int netif_tx_trylock(struct net_device *dev)
1048{
53c4b2cc
HX
1049 int ok = spin_trylock(&dev->_xmit_lock);
1050 if (likely(ok))
932ff279 1051 dev->xmit_lock_owner = smp_processor_id();
53c4b2cc 1052 return ok;
932ff279
HX
1053}
1054
1055static inline void netif_tx_unlock(struct net_device *dev)
1056{
1057 dev->xmit_lock_owner = -1;
1058 spin_unlock(&dev->_xmit_lock);
1059}
1060
1061static inline void netif_tx_unlock_bh(struct net_device *dev)
1062{
1063 dev->xmit_lock_owner = -1;
1064 spin_unlock_bh(&dev->_xmit_lock);
1065}
1066
1da177e4
LT
1067static inline void netif_tx_disable(struct net_device *dev)
1068{
932ff279 1069 netif_tx_lock_bh(dev);
1da177e4 1070 netif_stop_queue(dev);
932ff279 1071 netif_tx_unlock_bh(dev);
1da177e4
LT
1072}
1073
1074/* These functions live elsewhere (drivers/net/net_init.c, but related) */
1075
1076extern void ether_setup(struct net_device *dev);
1077
1078/* Support for loadable net-drivers */
f25f4e44
PWJ
1079extern struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
1080 void (*setup)(struct net_device *),
1081 unsigned int queue_count);
1082#define alloc_netdev(sizeof_priv, name, setup) \
1083 alloc_netdev_mq(sizeof_priv, name, setup, 1)
1da177e4
LT
1084extern int register_netdev(struct net_device *dev);
1085extern void unregister_netdev(struct net_device *dev);
4417da66
PM
1086/* Functions used for secondary unicast and multicast support */
1087extern void dev_set_rx_mode(struct net_device *dev);
1088extern void __dev_set_rx_mode(struct net_device *dev);
1089extern int dev_unicast_delete(struct net_device *dev, void *addr, int alen);
1090extern int dev_unicast_add(struct net_device *dev, void *addr, int alen);
1da177e4
LT
1091extern int dev_mc_delete(struct net_device *dev, void *addr, int alen, int all);
1092extern int dev_mc_add(struct net_device *dev, void *addr, int alen, int newonly);
1093extern void dev_mc_discard(struct net_device *dev);
bf742482
PM
1094extern int __dev_addr_delete(struct dev_addr_list **list, void *addr, int alen, int all);
1095extern int __dev_addr_add(struct dev_addr_list **list, void *addr, int alen, int newonly);
1096extern void __dev_addr_discard(struct dev_addr_list **list);
1da177e4
LT
1097extern void dev_set_promiscuity(struct net_device *dev, int inc);
1098extern void dev_set_allmulti(struct net_device *dev, int inc);
1099extern void netdev_state_change(struct net_device *dev);
d8a33ac4 1100extern void netdev_features_change(struct net_device *dev);
1da177e4
LT
1101/* Load a device via the kmod */
1102extern void dev_load(const char *name);
1103extern void dev_mcast_init(void);
1104extern int netdev_max_backlog;
1105extern int weight_p;
1106extern int netdev_set_master(struct net_device *dev, struct net_device *master);
84fa7933 1107extern int skb_checksum_help(struct sk_buff *skb);
576a30eb 1108extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features);
fb286bb2
HX
1109#ifdef CONFIG_BUG
1110extern void netdev_rx_csum_fault(struct net_device *dev);
1111#else
1112static inline void netdev_rx_csum_fault(struct net_device *dev)
1113{
1114}
1115#endif
1da177e4
LT
1116/* rx skb timestamps */
1117extern void net_enable_timestamp(void);
1118extern void net_disable_timestamp(void);
1119
20380731
ACM
1120#ifdef CONFIG_PROC_FS
1121extern void *dev_seq_start(struct seq_file *seq, loff_t *pos);
1122extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos);
1123extern void dev_seq_stop(struct seq_file *seq, void *v);
1124#endif
1125
1126extern void linkwatch_run_queue(void);
1127
bcd76111 1128static inline int net_gso_ok(int features, int gso_type)
576a30eb 1129{
bcd76111 1130 int feature = gso_type << NETIF_F_GSO_SHIFT;
d6b4991a 1131 return (features & feature) == feature;
576a30eb
HX
1132}
1133
bcd76111
HX
1134static inline int skb_gso_ok(struct sk_buff *skb, int features)
1135{
a430a43d 1136 return net_gso_ok(features, skb_shinfo(skb)->gso_type);
bcd76111
HX
1137}
1138
7967168c
HX
1139static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
1140{
a430a43d
HX
1141 return skb_is_gso(skb) &&
1142 (!skb_gso_ok(skb, dev->features) ||
84fa7933 1143 unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
7967168c
HX
1144}
1145
7ea49ed7 1146/* On bonding slaves other than the currently active slave, suppress
f5b2b966
JV
1147 * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and
1148 * ARP on active-backup slaves with arp_validate enabled.
7ea49ed7
DM
1149 */
1150static inline int skb_bond_should_drop(struct sk_buff *skb)
1151{
1152 struct net_device *dev = skb->dev;
1153 struct net_device *master = dev->master;
1154
1155 if (master &&
1156 (dev->priv_flags & IFF_SLAVE_INACTIVE)) {
f5b2b966
JV
1157 if ((dev->priv_flags & IFF_SLAVE_NEEDARP) &&
1158 skb->protocol == __constant_htons(ETH_P_ARP))
1159 return 0;
1160
7ea49ed7
DM
1161 if (master->priv_flags & IFF_MASTER_ALB) {
1162 if (skb->pkt_type != PACKET_BROADCAST &&
1163 skb->pkt_type != PACKET_MULTICAST)
1164 return 0;
1165 }
1166 if (master->priv_flags & IFF_MASTER_8023AD &&
1167 skb->protocol == __constant_htons(ETH_P_SLOW))
1168 return 0;
1169
1170 return 1;
1171 }
1172 return 0;
1173}
1174
1da177e4
LT
1175#endif /* __KERNEL__ */
1176
1177#endif /* _LINUX_DEV_H */