]> bbs.cooldavid.org Git - net-next-2.6.git/blame - include/linux/netdevice.h
[NETNS]: Compilation fix for include/linux/netdevice.h.
[net-next-2.6.git] / include / linux / netdevice.h
CommitLineData
1da177e4
LT
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Definitions for the Interfaces handler.
7 *
8 * Version: @(#)dev.h 1.0.10 08/12/93
9 *
02c30a84 10 * Authors: Ross Biro
1da177e4
LT
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Corey Minyard <wf-rch!minyard@relay.EU.net>
13 * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov>
14 * Alan Cox, <Alan.Cox@linux.org>
15 * Bjorn Ekwall. <bj0rn@blox.se>
16 * Pekka Riikonen <priikone@poseidon.pspt.fi>
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 *
23 * Moved to /usr/include/linux for NET3
24 */
25#ifndef _LINUX_NETDEVICE_H
26#define _LINUX_NETDEVICE_H
27
28#include <linux/if.h>
29#include <linux/if_ether.h>
30#include <linux/if_packet.h>
31
32#ifdef __KERNEL__
d7fe0f24 33#include <linux/timer.h>
bea3348e 34#include <linux/delay.h>
1da177e4
LT
35#include <asm/atomic.h>
36#include <asm/cache.h>
37#include <asm/byteorder.h>
38
1da177e4
LT
39#include <linux/device.h>
40#include <linux/percpu.h>
db217334 41#include <linux/dmaengine.h>
bea3348e 42#include <linux/workqueue.h>
1da177e4 43
a050c33f
DL
44#include <net/net_namespace.h>
45
1da177e4
LT
46struct vlan_group;
47struct ethtool_ops;
115c1d6e 48struct netpoll_info;
704232c2
JB
49/* 802.11 specific */
50struct wireless_dev;
1da177e4
LT
51 /* source back-compat hooks */
52#define SET_ETHTOOL_OPS(netdev,ops) \
53 ( (netdev)->ethtool_ops = (ops) )
54
55#define HAVE_ALLOC_NETDEV /* feature macro: alloc_xxxdev
56 functions are available. */
57#define HAVE_FREE_NETDEV /* free_netdev() */
58#define HAVE_NETDEV_PRIV /* netdev_priv() */
59
60#define NET_XMIT_SUCCESS 0
61#define NET_XMIT_DROP 1 /* skb dropped */
62#define NET_XMIT_CN 2 /* congestion notification */
63#define NET_XMIT_POLICED 3 /* skb is shot by police */
64#define NET_XMIT_BYPASS 4 /* packet does not leave via dequeue;
65 (TC use only - dev_queue_xmit
66 returns this as NET_XMIT_SUCCESS) */
67
68/* Backlog congestion levels */
69#define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
70#define NET_RX_DROP 1 /* packet dropped */
71#define NET_RX_CN_LOW 2 /* storm alert, just in case */
72#define NET_RX_CN_MOD 3 /* Storm on its way! */
73#define NET_RX_CN_HIGH 4 /* The storm is here */
74#define NET_RX_BAD 5 /* packet dropped due to kernel error */
75
b9df3cb8
GR
76/* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
77 * indicates that the device will soon be dropping packets, or already drops
78 * some packets of the same priority; prompting us to send less aggressively. */
79#define net_xmit_eval(e) ((e) == NET_XMIT_CN? 0 : (e))
1da177e4
LT
80#define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
81
82#endif
83
84#define MAX_ADDR_LEN 32 /* Largest hardware address length */
85
86/* Driver transmit return codes */
87#define NETDEV_TX_OK 0 /* driver took care of packet */
88#define NETDEV_TX_BUSY 1 /* driver tx path was busy*/
89#define NETDEV_TX_LOCKED -1 /* driver tx lock was already taken */
90
91/*
92 * Compute the worst case header length according to the protocols
93 * used.
94 */
95
96#if !defined(CONFIG_AX25) && !defined(CONFIG_AX25_MODULE) && !defined(CONFIG_TR)
97#define LL_MAX_HEADER 32
98#else
99#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
100#define LL_MAX_HEADER 96
101#else
102#define LL_MAX_HEADER 48
103#endif
104#endif
105
e81c7359
DM
106#if !defined(CONFIG_NET_IPIP) && !defined(CONFIG_NET_IPIP_MODULE) && \
107 !defined(CONFIG_NET_IPGRE) && !defined(CONFIG_NET_IPGRE_MODULE) && \
108 !defined(CONFIG_IPV6_SIT) && !defined(CONFIG_IPV6_SIT_MODULE) && \
109 !defined(CONFIG_IPV6_TUNNEL) && !defined(CONFIG_IPV6_TUNNEL_MODULE)
1da177e4
LT
110#define MAX_HEADER LL_MAX_HEADER
111#else
112#define MAX_HEADER (LL_MAX_HEADER + 48)
113#endif
114
f25f4e44
PWJ
115struct net_device_subqueue
116{
117 /* Give a control state for each queue. This struct may contain
118 * per-queue locks in the future.
119 */
120 unsigned long state;
121};
122
1da177e4
LT
123/*
124 * Network device statistics. Akin to the 2.0 ether stats but
125 * with byte counters.
126 */
127
128struct net_device_stats
129{
130 unsigned long rx_packets; /* total packets received */
131 unsigned long tx_packets; /* total packets transmitted */
132 unsigned long rx_bytes; /* total bytes received */
133 unsigned long tx_bytes; /* total bytes transmitted */
134 unsigned long rx_errors; /* bad packets received */
135 unsigned long tx_errors; /* packet transmit problems */
136 unsigned long rx_dropped; /* no space in linux buffers */
137 unsigned long tx_dropped; /* no space available in linux */
138 unsigned long multicast; /* multicast packets received */
139 unsigned long collisions;
140
141 /* detailed rx_errors: */
142 unsigned long rx_length_errors;
143 unsigned long rx_over_errors; /* receiver ring buff overflow */
144 unsigned long rx_crc_errors; /* recved pkt with crc error */
145 unsigned long rx_frame_errors; /* recv'd frame alignment error */
146 unsigned long rx_fifo_errors; /* recv'r fifo overrun */
147 unsigned long rx_missed_errors; /* receiver missed packet */
148
149 /* detailed tx_errors */
150 unsigned long tx_aborted_errors;
151 unsigned long tx_carrier_errors;
152 unsigned long tx_fifo_errors;
153 unsigned long tx_heartbeat_errors;
154 unsigned long tx_window_errors;
155
156 /* for cslip etc */
157 unsigned long rx_compressed;
158 unsigned long tx_compressed;
159};
160
161
162/* Media selection options. */
163enum {
164 IF_PORT_UNKNOWN = 0,
165 IF_PORT_10BASE2,
166 IF_PORT_10BASET,
167 IF_PORT_AUI,
168 IF_PORT_100BASET,
169 IF_PORT_100BASETX,
170 IF_PORT_100BASEFX
171};
172
173#ifdef __KERNEL__
174
175#include <linux/cache.h>
176#include <linux/skbuff.h>
177
178struct neighbour;
179struct neigh_parms;
180struct sk_buff;
181
182struct netif_rx_stats
183{
184 unsigned total;
185 unsigned dropped;
186 unsigned time_squeeze;
1da177e4
LT
187 unsigned cpu_collision;
188};
189
190DECLARE_PER_CPU(struct netif_rx_stats, netdev_rx_stat);
191
bf742482
PM
192struct dev_addr_list
193{
194 struct dev_addr_list *next;
195 u8 da_addr[MAX_ADDR_LEN];
196 u8 da_addrlen;
a0a400d7 197 u8 da_synced;
bf742482
PM
198 int da_users;
199 int da_gusers;
200};
1da177e4
LT
201
202/*
203 * We tag multicasts with these structures.
204 */
3fba5a8b
PM
205
206#define dev_mc_list dev_addr_list
207#define dmi_addr da_addr
208#define dmi_addrlen da_addrlen
209#define dmi_users da_users
210#define dmi_gusers da_gusers
1da177e4
LT
211
212struct hh_cache
213{
214 struct hh_cache *hh_next; /* Next entry */
215 atomic_t hh_refcnt; /* number of users */
f0490980
ED
216/*
217 * We want hh_output, hh_len, hh_lock and hh_data be a in a separate
218 * cache line on SMP.
219 * They are mostly read, but hh_refcnt may be changed quite frequently,
220 * incurring cache line ping pongs.
221 */
222 __be16 hh_type ____cacheline_aligned_in_smp;
223 /* protocol identifier, f.e ETH_P_IP
1da177e4
LT
224 * NOTE: For VLANs, this will be the
225 * encapuslated type. --BLG
226 */
d5c42c0e 227 u16 hh_len; /* length of header */
1da177e4 228 int (*hh_output)(struct sk_buff *skb);
3644f0ce 229 seqlock_t hh_lock;
1da177e4
LT
230
231 /* cached hardware header; allow for machine alignment needs. */
232#define HH_DATA_MOD 16
233#define HH_DATA_OFF(__len) \
5ba0eac6 234 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
1da177e4
LT
235#define HH_DATA_ALIGN(__len) \
236 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
237 unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
238};
239
240/* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much.
241 * Alternative is:
242 * dev->hard_header_len ? (dev->hard_header_len +
243 * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
244 *
245 * We could use other alignment values, but we must maintain the
246 * relationship HH alignment <= LL alignment.
247 */
248#define LL_RESERVED_SPACE(dev) \
249 (((dev)->hard_header_len&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
250#define LL_RESERVED_SPACE_EXTRA(dev,extra) \
251 ((((dev)->hard_header_len+extra)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
252
3b04ddde
SH
253struct header_ops {
254 int (*create) (struct sk_buff *skb, struct net_device *dev,
255 unsigned short type, const void *daddr,
256 const void *saddr, unsigned len);
257 int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
258 int (*rebuild)(struct sk_buff *skb);
259#define HAVE_HEADER_CACHE
260 int (*cache)(const struct neighbour *neigh, struct hh_cache *hh);
261 void (*cache_update)(struct hh_cache *hh,
262 const struct net_device *dev,
263 const unsigned char *haddr);
264};
265
1da177e4
LT
266/* These flag bits are private to the generic network queueing
267 * layer, they may not be explicitly referenced by any other
268 * code.
269 */
270
271enum netdev_state_t
272{
273 __LINK_STATE_XOFF=0,
274 __LINK_STATE_START,
275 __LINK_STATE_PRESENT,
276 __LINK_STATE_SCHED,
277 __LINK_STATE_NOCARRIER,
b00055aa
SR
278 __LINK_STATE_LINKWATCH_PENDING,
279 __LINK_STATE_DORMANT,
48d83325 280 __LINK_STATE_QDISC_RUNNING,
1da177e4
LT
281};
282
283
284/*
285 * This structure holds at boot time configured netdevice settings. They
286 * are then used in the device probing.
287 */
288struct netdev_boot_setup {
289 char name[IFNAMSIZ];
290 struct ifmap map;
291};
292#define NETDEV_BOOT_SETUP_MAX 8
293
20380731 294extern int __init netdev_boot_setup(char *str);
1da177e4 295
bea3348e
SH
296/*
297 * Structure for NAPI scheduling similar to tasklet but with weighting
298 */
299struct napi_struct {
300 /* The poll_list must only be managed by the entity which
301 * changes the state of the NAPI_STATE_SCHED bit. This means
302 * whoever atomically sets that bit can add this napi_struct
303 * to the per-cpu poll_list, and whoever clears that bit
304 * can remove from the list right before clearing the bit.
305 */
306 struct list_head poll_list;
307
308 unsigned long state;
309 int weight;
310 int (*poll)(struct napi_struct *, int);
311#ifdef CONFIG_NETPOLL
312 spinlock_t poll_lock;
313 int poll_owner;
314 struct net_device *dev;
315 struct list_head dev_list;
316#endif
317};
318
319enum
320{
321 NAPI_STATE_SCHED, /* Poll is scheduled */
a0a46196 322 NAPI_STATE_DISABLE, /* Disable pending */
bea3348e
SH
323};
324
b3c97528 325extern void __napi_schedule(struct napi_struct *n);
bea3348e 326
a0a46196
DM
327static inline int napi_disable_pending(struct napi_struct *n)
328{
329 return test_bit(NAPI_STATE_DISABLE, &n->state);
330}
331
bea3348e
SH
332/**
333 * napi_schedule_prep - check if napi can be scheduled
334 * @n: napi context
335 *
336 * Test if NAPI routine is already running, and if not mark
337 * it as running. This is used as a condition variable
a0a46196
DM
338 * insure only one NAPI poll instance runs. We also make
339 * sure there is no pending NAPI disable.
bea3348e
SH
340 */
341static inline int napi_schedule_prep(struct napi_struct *n)
342{
a0a46196
DM
343 return !napi_disable_pending(n) &&
344 !test_and_set_bit(NAPI_STATE_SCHED, &n->state);
bea3348e
SH
345}
346
347/**
348 * napi_schedule - schedule NAPI poll
349 * @n: napi context
350 *
351 * Schedule NAPI poll routine to be called if it is not already
352 * running.
353 */
354static inline void napi_schedule(struct napi_struct *n)
355{
356 if (napi_schedule_prep(n))
357 __napi_schedule(n);
358}
359
bfe13f54
RD
360/* Try to reschedule poll. Called by dev->poll() after napi_complete(). */
361static inline int napi_reschedule(struct napi_struct *napi)
362{
363 if (napi_schedule_prep(napi)) {
364 __napi_schedule(napi);
365 return 1;
366 }
367 return 0;
368}
369
bea3348e
SH
370/**
371 * napi_complete - NAPI processing complete
372 * @n: napi context
373 *
374 * Mark NAPI processing as complete.
375 */
376static inline void __napi_complete(struct napi_struct *n)
377{
378 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
379 list_del(&n->poll_list);
380 smp_mb__before_clear_bit();
381 clear_bit(NAPI_STATE_SCHED, &n->state);
382}
383
384static inline void napi_complete(struct napi_struct *n)
385{
386 local_irq_disable();
387 __napi_complete(n);
388 local_irq_enable();
389}
390
391/**
392 * napi_disable - prevent NAPI from scheduling
393 * @n: napi context
394 *
395 * Stop NAPI from being scheduled on this context.
396 * Waits till any outstanding processing completes.
397 */
398static inline void napi_disable(struct napi_struct *n)
399{
a0a46196 400 set_bit(NAPI_STATE_DISABLE, &n->state);
bea3348e 401 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
43cc7380 402 msleep(1);
a0a46196 403 clear_bit(NAPI_STATE_DISABLE, &n->state);
bea3348e
SH
404}
405
406/**
407 * napi_enable - enable NAPI scheduling
408 * @n: napi context
409 *
410 * Resume NAPI from being scheduled on this context.
411 * Must be paired with napi_disable.
412 */
413static inline void napi_enable(struct napi_struct *n)
414{
415 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
416 smp_mb__before_clear_bit();
417 clear_bit(NAPI_STATE_SCHED, &n->state);
418}
419
c264c3de
SH
420#ifdef CONFIG_SMP
421/**
422 * napi_synchronize - wait until NAPI is not running
423 * @n: napi context
424 *
425 * Wait until NAPI is done being scheduled on this context.
426 * Waits till any outstanding processing completes but
427 * does not disable future activations.
428 */
429static inline void napi_synchronize(const struct napi_struct *n)
430{
431 while (test_bit(NAPI_STATE_SCHED, &n->state))
432 msleep(1);
433}
434#else
435# define napi_synchronize(n) barrier()
436#endif
437
1da177e4
LT
438/*
439 * The DEVICE structure.
440 * Actually, this whole structure is a big mistake. It mixes I/O
441 * data with strictly "high-level" data, and it has to know about
442 * almost every data structure used in the INET module.
443 *
444 * FIXME: cleanup struct net_device such that network protocol info
445 * moves out.
446 */
447
448struct net_device
449{
450
451 /*
452 * This is the first field of the "visible" part of this structure
453 * (i.e. as seen by users in the "Space.c" file). It is the name
454 * the interface.
455 */
456 char name[IFNAMSIZ];
9356b8fc
ED
457 /* device name hash chain */
458 struct hlist_node name_hlist;
1da177e4
LT
459
460 /*
461 * I/O specific fields
462 * FIXME: Merge these and struct ifmap into one
463 */
464 unsigned long mem_end; /* shared mem end */
465 unsigned long mem_start; /* shared mem start */
466 unsigned long base_addr; /* device I/O address */
467 unsigned int irq; /* device IRQ number */
468
469 /*
470 * Some hardware also needs these fields, but they are not
471 * part of the usual set specified in Space.c.
472 */
473
474 unsigned char if_port; /* Selectable AUI, TP,..*/
475 unsigned char dma; /* DMA channel */
476
477 unsigned long state;
478
7562f876 479 struct list_head dev_list;
bea3348e
SH
480#ifdef CONFIG_NETPOLL
481 struct list_head napi_list;
482#endif
1da177e4
LT
483
484 /* The device initialization function. Called only once. */
485 int (*init)(struct net_device *dev);
486
487 /* ------- Fields preinitialized in Space.c finish here ------- */
488
9356b8fc
ED
489 /* Net device features */
490 unsigned long features;
491#define NETIF_F_SG 1 /* Scatter/gather IO. */
d212f87b 492#define NETIF_F_IP_CSUM 2 /* Can checksum TCP/UDP over IPv4. */
9356b8fc
ED
493#define NETIF_F_NO_CSUM 4 /* Does not require checksum. F.e. loopack. */
494#define NETIF_F_HW_CSUM 8 /* Can checksum all the packets. */
d212f87b 495#define NETIF_F_IPV6_CSUM 16 /* Can checksum TCP/UDP over IPV6 */
9356b8fc
ED
496#define NETIF_F_HIGHDMA 32 /* Can DMA to high memory. */
497#define NETIF_F_FRAGLIST 64 /* Scatter/gather IO. */
498#define NETIF_F_HW_VLAN_TX 128 /* Transmit VLAN hw acceleration */
499#define NETIF_F_HW_VLAN_RX 256 /* Receive VLAN hw acceleration */
500#define NETIF_F_HW_VLAN_FILTER 512 /* Receive filtering on VLAN */
501#define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */
37c3185a 502#define NETIF_F_GSO 2048 /* Enable software GSO. */
e24eb521
CB
503#define NETIF_F_LLTX 4096 /* LockLess TX - deprecated. Please */
504 /* do not use LLTX in new drivers */
ce286d32 505#define NETIF_F_NETNS_LOCAL 8192 /* Does not change network namespaces */
f25f4e44 506#define NETIF_F_MULTI_QUEUE 16384 /* Has multiple TX/RX queues */
3ae7c0b2 507#define NETIF_F_LRO 32768 /* large receive offload */
7967168c
HX
508
509 /* Segmentation offload features */
510#define NETIF_F_GSO_SHIFT 16
bcd76111 511#define NETIF_F_GSO_MASK 0xffff0000
7967168c 512#define NETIF_F_TSO (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT)
f83ef8c0 513#define NETIF_F_UFO (SKB_GSO_UDP << NETIF_F_GSO_SHIFT)
576a30eb 514#define NETIF_F_GSO_ROBUST (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT)
f83ef8c0
HX
515#define NETIF_F_TSO_ECN (SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT)
516#define NETIF_F_TSO6 (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT)
9356b8fc 517
78eb8877
HX
518 /* List of features with software fallbacks. */
519#define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6)
520
d212f87b 521
8648b305 522#define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
d212f87b
SH
523#define NETIF_F_V4_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IP_CSUM)
524#define NETIF_F_V6_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM)
525#define NETIF_F_ALL_CSUM (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM)
8648b305 526
1da177e4
LT
527 struct net_device *next_sched;
528
529 /* Interface index. Unique device identifier */
530 int ifindex;
531 int iflink;
532
533
534 struct net_device_stats* (*get_stats)(struct net_device *dev);
c45d286e 535 struct net_device_stats stats;
1da177e4 536
b86e0280 537#ifdef CONFIG_WIRELESS_EXT
1da177e4
LT
538 /* List of functions to handle Wireless Extensions (instead of ioctl).
539 * See <net/iw_handler.h> for details. Jean II */
540 const struct iw_handler_def * wireless_handlers;
541 /* Instance data managed by the core of Wireless Extensions. */
542 struct iw_public_data * wireless_data;
b86e0280 543#endif
76fd8593 544 const struct ethtool_ops *ethtool_ops;
1da177e4 545
3b04ddde
SH
546 /* Hardware header description */
547 const struct header_ops *header_ops;
548
1da177e4
LT
549 /*
550 * This marks the end of the "visible" part of the structure. All
551 * fields hereafter are internal to the system, and may change at
552 * will (read: may be cleaned up at will).
553 */
554
1da177e4 555
b00055aa 556 unsigned int flags; /* interface flags (a la BSD) */
1da177e4
LT
557 unsigned short gflags;
558 unsigned short priv_flags; /* Like 'flags' but invisible to userspace. */
559 unsigned short padded; /* How much padding added by alloc_netdev() */
560
b00055aa
SR
561 unsigned char operstate; /* RFC2863 operstate */
562 unsigned char link_mode; /* mapping policy to operstate */
563
1da177e4
LT
564 unsigned mtu; /* interface MTU value */
565 unsigned short type; /* interface hardware type */
566 unsigned short hard_header_len; /* hardware hdr length */
1da177e4
LT
567
568 struct net_device *master; /* Pointer to master device of a group,
569 * which this device is member of.
570 */
571
572 /* Interface address info. */
a6f9a705 573 unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */
1da177e4
LT
574 unsigned char addr_len; /* hardware address length */
575 unsigned short dev_id; /* for shared network cards */
576
4417da66
PM
577 struct dev_addr_list *uc_list; /* Secondary unicast mac addresses */
578 int uc_count; /* Number of installed ucasts */
579 int uc_promisc;
3fba5a8b 580 struct dev_addr_list *mc_list; /* Multicast mac addresses */
1da177e4
LT
581 int mc_count; /* Number of installed mcasts */
582 int promiscuity;
583 int allmulti;
584
1da177e4
LT
585
586 /* Protocol specific pointers */
587
588 void *atalk_ptr; /* AppleTalk link */
589 void *ip_ptr; /* IPv4 specific data */
590 void *dn_ptr; /* DECnet specific data */
591 void *ip6_ptr; /* IPv6 specific data */
592 void *ec_ptr; /* Econet specific data */
593 void *ax25_ptr; /* AX.25 specific data */
704232c2
JB
594 struct wireless_dev *ieee80211_ptr; /* IEEE 802.11 specific data,
595 assign before registering */
1da177e4 596
9356b8fc
ED
597/*
598 * Cache line mostly used on receive path (including eth_type_trans())
599 */
9356b8fc
ED
600 unsigned long last_rx; /* Time of last Rx */
601 /* Interface address info used in eth_type_trans() */
602 unsigned char dev_addr[MAX_ADDR_LEN]; /* hw address, (before bcast
603 because most packets are unicast) */
604
605 unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */
1da177e4 606
d5bd0146
NT
607 /* ingress path synchronizer */
608 spinlock_t ingress_lock;
609 struct Qdisc *qdisc_ingress;
610
9356b8fc
ED
611/*
612 * Cache line mostly used on queue transmit path (qdisc)
613 */
614 /* device queue lock */
615 spinlock_t queue_lock ____cacheline_aligned_in_smp;
1da177e4
LT
616 struct Qdisc *qdisc;
617 struct Qdisc *qdisc_sleeping;
1da177e4
LT
618 struct list_head qdisc_list;
619 unsigned long tx_queue_len; /* Max frames per queue allowed */
620
f6a78bfc
HX
621 /* Partially transmitted GSO packet. */
622 struct sk_buff *gso_skb;
623
9356b8fc
ED
624/*
625 * One part is mostly used on xmit path (device)
626 */
1da177e4 627 /* hard_start_xmit synchronizer */
932ff279 628 spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
1da177e4
LT
629 /* cpu id of processor entered to hard_start_xmit or -1,
630 if nobody entered there.
631 */
632 int xmit_lock_owner;
9356b8fc
ED
633 void *priv; /* pointer to private data */
634 int (*hard_start_xmit) (struct sk_buff *skb,
635 struct net_device *dev);
636 /* These may be needed for future network-power-down code. */
637 unsigned long trans_start; /* Time (in jiffies) of last Tx */
638
639 int watchdog_timeo; /* used by dev_watchdog() */
640 struct timer_list watchdog_timer;
641
642/*
643 * refcnt is a very hot point, so align it on SMP
644 */
1da177e4 645 /* Number of references to this device */
9356b8fc
ED
646 atomic_t refcnt ____cacheline_aligned_in_smp;
647
1da177e4
LT
648 /* delayed register/unregister */
649 struct list_head todo_list;
1da177e4
LT
650 /* device index hash chain */
651 struct hlist_node index_hlist;
652
572a103d
HX
653 struct net_device *link_watch_next;
654
1da177e4
LT
655 /* register/unregister state machine */
656 enum { NETREG_UNINITIALIZED=0,
b17a7c17 657 NETREG_REGISTERED, /* completed register_netdevice */
1da177e4
LT
658 NETREG_UNREGISTERING, /* called unregister_netdevice */
659 NETREG_UNREGISTERED, /* completed unregister todo */
660 NETREG_RELEASED, /* called free_netdev */
661 } reg_state;
662
1da177e4
LT
663 /* Called after device is detached from network. */
664 void (*uninit)(struct net_device *dev);
665 /* Called after last user reference disappears. */
666 void (*destructor)(struct net_device *dev);
667
668 /* Pointers to interface service routines. */
669 int (*open)(struct net_device *dev);
670 int (*stop)(struct net_device *dev);
1da177e4 671#define HAVE_NETDEV_POLL
24023451
PM
672#define HAVE_CHANGE_RX_FLAGS
673 void (*change_rx_flags)(struct net_device *dev,
674 int flags);
4417da66
PM
675#define HAVE_SET_RX_MODE
676 void (*set_rx_mode)(struct net_device *dev);
1da177e4
LT
677#define HAVE_MULTICAST
678 void (*set_multicast_list)(struct net_device *dev);
679#define HAVE_SET_MAC_ADDR
680 int (*set_mac_address)(struct net_device *dev,
681 void *addr);
bada339b
JG
682#define HAVE_VALIDATE_ADDR
683 int (*validate_addr)(struct net_device *dev);
1da177e4
LT
684#define HAVE_PRIVATE_IOCTL
685 int (*do_ioctl)(struct net_device *dev,
686 struct ifreq *ifr, int cmd);
687#define HAVE_SET_CONFIG
688 int (*set_config)(struct net_device *dev,
689 struct ifmap *map);
1da177e4
LT
690#define HAVE_CHANGE_MTU
691 int (*change_mtu)(struct net_device *dev, int new_mtu);
692
693#define HAVE_TX_TIMEOUT
694 void (*tx_timeout) (struct net_device *dev);
695
696 void (*vlan_rx_register)(struct net_device *dev,
697 struct vlan_group *grp);
698 void (*vlan_rx_add_vid)(struct net_device *dev,
699 unsigned short vid);
700 void (*vlan_rx_kill_vid)(struct net_device *dev,
701 unsigned short vid);
702
1da177e4
LT
703 int (*neigh_setup)(struct net_device *dev, struct neigh_parms *);
704#ifdef CONFIG_NETPOLL
115c1d6e 705 struct netpoll_info *npinfo;
1da177e4
LT
706#endif
707#ifdef CONFIG_NET_POLL_CONTROLLER
708 void (*poll_controller)(struct net_device *dev);
709#endif
710
c346dca1 711#ifdef CONFIG_NET_NS
4a1c5371
EB
712 /* Network namespace this network device is inside */
713 struct net *nd_net;
c346dca1 714#endif
4a1c5371 715
1da177e4
LT
716 /* bridge stuff */
717 struct net_bridge_port *br_port;
b863ceb7
PM
718 /* macvlan */
719 struct macvlan_port *macvlan_port;
1da177e4 720
1da177e4 721 /* class/net/name entry */
43cb76d9 722 struct device dev;
fe9925b5
SH
723 /* space for optional statistics and wireless sysfs groups */
724 struct attribute_group *sysfs_groups[3];
38f7b870
PM
725
726 /* rtnetlink link ops */
727 const struct rtnl_link_ops *rtnl_link_ops;
f25f4e44 728
82cc1a7a
PWJ
729 /* for setting kernel sock attribute on TCP connection setup */
730#define GSO_MAX_SIZE 65536
731 unsigned int gso_max_size;
732
f25f4e44
PWJ
733 /* The TX queue control structures */
734 unsigned int egress_subqueue_count;
31ce72a6 735 struct net_device_subqueue egress_subqueue[1];
1da177e4 736};
43cb76d9 737#define to_net_dev(d) container_of(d, struct net_device, dev)
1da177e4
LT
738
739#define NETDEV_ALIGN 32
740#define NETDEV_ALIGN_CONST (NETDEV_ALIGN - 1)
741
c346dca1
YH
742/*
743 * Net namespace inlines
744 */
745static inline
746struct net *dev_net(const struct net_device *dev)
747{
748#ifdef CONFIG_NET_NS
749 return dev->nd_net;
750#else
751 return &init_net;
752#endif
753}
754
755static inline
756void dev_net_set(struct net_device *dev, const struct net *net)
757{
758#ifdef CONFIG_NET_NS
9c2f5746 759 dev->nd_net = net;
c346dca1
YH
760#endif
761}
762
bea3348e
SH
763/**
764 * netdev_priv - access network device private data
765 * @dev: network device
766 *
767 * Get network device private data
768 */
6472ce60 769static inline void *netdev_priv(const struct net_device *dev)
1da177e4 770{
f25f4e44 771 return dev->priv;
1da177e4
LT
772}
773
1da177e4
LT
774/* Set the sysfs physical device reference for the network logical device
775 * if set prior to registration will cause a symlink during initialization.
776 */
43cb76d9 777#define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
1da177e4 778
3b582cc1
SH
779/**
780 * netif_napi_add - initialize a napi context
781 * @dev: network device
782 * @napi: napi context
783 * @poll: polling function
784 * @weight: default weight
785 *
786 * netif_napi_add() must be used to initialize a napi context prior to calling
787 * *any* of the other napi related functions.
788 */
bea3348e
SH
789static inline void netif_napi_add(struct net_device *dev,
790 struct napi_struct *napi,
791 int (*poll)(struct napi_struct *, int),
792 int weight)
793{
794 INIT_LIST_HEAD(&napi->poll_list);
795 napi->poll = poll;
796 napi->weight = weight;
797#ifdef CONFIG_NETPOLL
798 napi->dev = dev;
799 list_add(&napi->dev_list, &dev->napi_list);
800 spin_lock_init(&napi->poll_lock);
801 napi->poll_owner = -1;
802#endif
803 set_bit(NAPI_STATE_SCHED, &napi->state);
804}
805
1da177e4 806struct packet_type {
f2ccd8fa
DM
807 __be16 type; /* This is really htons(ether_type). */
808 struct net_device *dev; /* NULL is wildcarded here */
809 int (*func) (struct sk_buff *,
810 struct net_device *,
811 struct packet_type *,
812 struct net_device *);
576a30eb
HX
813 struct sk_buff *(*gso_segment)(struct sk_buff *skb,
814 int features);
a430a43d 815 int (*gso_send_check)(struct sk_buff *skb);
1da177e4
LT
816 void *af_packet_priv;
817 struct list_head list;
818};
819
820#include <linux/interrupt.h>
821#include <linux/notifier.h>
822
1da177e4
LT
823extern rwlock_t dev_base_lock; /* Device list lock */
824
7562f876 825
881d966b
EB
826#define for_each_netdev(net, d) \
827 list_for_each_entry(d, &(net)->dev_base_head, dev_list)
828#define for_each_netdev_safe(net, d, n) \
829 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
830#define for_each_netdev_continue(net, d) \
831 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
832#define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
7562f876 833
a050c33f
DL
834static inline struct net_device *next_net_device(struct net_device *dev)
835{
836 struct list_head *lh;
837 struct net *net;
838
c346dca1 839 net = dev_net(dev);
a050c33f
DL
840 lh = dev->dev_list.next;
841 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
842}
843
844static inline struct net_device *first_net_device(struct net *net)
845{
846 return list_empty(&net->dev_base_head) ? NULL :
847 net_device_entry(net->dev_base_head.next);
848}
7562f876 849
1da177e4
LT
850extern int netdev_boot_setup_check(struct net_device *dev);
851extern unsigned long netdev_boot_base(const char *prefix, int unit);
881d966b
EB
852extern struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *hwaddr);
853extern struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
854extern struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
1da177e4
LT
855extern void dev_add_pack(struct packet_type *pt);
856extern void dev_remove_pack(struct packet_type *pt);
857extern void __dev_remove_pack(struct packet_type *pt);
858
881d966b 859extern struct net_device *dev_get_by_flags(struct net *net, unsigned short flags,
1da177e4 860 unsigned short mask);
881d966b
EB
861extern struct net_device *dev_get_by_name(struct net *net, const char *name);
862extern struct net_device *__dev_get_by_name(struct net *net, const char *name);
1da177e4
LT
863extern int dev_alloc_name(struct net_device *dev, const char *name);
864extern int dev_open(struct net_device *dev);
865extern int dev_close(struct net_device *dev);
866extern int dev_queue_xmit(struct sk_buff *skb);
867extern int register_netdevice(struct net_device *dev);
22f8cde5 868extern void unregister_netdevice(struct net_device *dev);
1da177e4
LT
869extern void free_netdev(struct net_device *dev);
870extern void synchronize_net(void);
871extern int register_netdevice_notifier(struct notifier_block *nb);
872extern int unregister_netdevice_notifier(struct notifier_block *nb);
ad7379d4 873extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
881d966b
EB
874extern struct net_device *dev_get_by_index(struct net *net, int ifindex);
875extern struct net_device *__dev_get_by_index(struct net *net, int ifindex);
1da177e4
LT
876extern int dev_restart(struct net_device *dev);
877#ifdef CONFIG_NETPOLL_TRAP
878extern int netpoll_trap(void);
879#endif
880
0c4e8581
SH
881static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
882 unsigned short type,
3b04ddde
SH
883 const void *daddr, const void *saddr,
884 unsigned len)
0c4e8581 885{
f1ecfd5d 886 if (!dev->header_ops || !dev->header_ops->create)
0c4e8581 887 return 0;
3b04ddde
SH
888
889 return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
0c4e8581
SH
890}
891
b95cce35
SH
892static inline int dev_parse_header(const struct sk_buff *skb,
893 unsigned char *haddr)
894{
895 const struct net_device *dev = skb->dev;
896
1b83336b 897 if (!dev->header_ops || !dev->header_ops->parse)
b95cce35 898 return 0;
3b04ddde 899 return dev->header_ops->parse(skb, haddr);
b95cce35
SH
900}
901
1da177e4
LT
902typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
903extern int register_gifconf(unsigned int family, gifconf_func_t * gifconf);
904static inline int unregister_gifconf(unsigned int family)
905{
906 return register_gifconf(family, NULL);
907}
908
909/*
910 * Incoming packets are placed on per-cpu queues so that
911 * no locking is needed.
912 */
1da177e4
LT
913struct softnet_data
914{
31aa02c5 915 struct net_device *output_queue;
1da177e4
LT
916 struct sk_buff_head input_pkt_queue;
917 struct list_head poll_list;
1da177e4
LT
918 struct sk_buff *completion_queue;
919
bea3348e 920 struct napi_struct backlog;
db217334
CL
921#ifdef CONFIG_NET_DMA
922 struct dma_chan *net_dma;
923#endif
1da177e4
LT
924};
925
926DECLARE_PER_CPU(struct softnet_data,softnet_data);
927
928#define HAVE_NETIF_QUEUE
929
56079431 930extern void __netif_schedule(struct net_device *dev);
1da177e4
LT
931
932static inline void netif_schedule(struct net_device *dev)
933{
934 if (!test_bit(__LINK_STATE_XOFF, &dev->state))
935 __netif_schedule(dev);
936}
937
bea3348e
SH
938/**
939 * netif_start_queue - allow transmit
940 * @dev: network device
941 *
942 * Allow upper layers to call the device hard_start_xmit routine.
943 */
1da177e4
LT
944static inline void netif_start_queue(struct net_device *dev)
945{
946 clear_bit(__LINK_STATE_XOFF, &dev->state);
947}
948
bea3348e
SH
949/**
950 * netif_wake_queue - restart transmit
951 * @dev: network device
952 *
953 * Allow upper layers to call the device hard_start_xmit routine.
954 * Used for flow control when transmit resources are available.
955 */
1da177e4
LT
956static inline void netif_wake_queue(struct net_device *dev)
957{
958#ifdef CONFIG_NETPOLL_TRAP
5f286e11
SS
959 if (netpoll_trap()) {
960 clear_bit(__LINK_STATE_XOFF, &dev->state);
1da177e4 961 return;
5f286e11 962 }
1da177e4
LT
963#endif
964 if (test_and_clear_bit(__LINK_STATE_XOFF, &dev->state))
965 __netif_schedule(dev);
966}
967
bea3348e
SH
968/**
969 * netif_stop_queue - stop transmitted packets
970 * @dev: network device
971 *
972 * Stop upper layers calling the device hard_start_xmit routine.
973 * Used for flow control when transmit resources are unavailable.
974 */
1da177e4
LT
975static inline void netif_stop_queue(struct net_device *dev)
976{
1da177e4
LT
977 set_bit(__LINK_STATE_XOFF, &dev->state);
978}
979
bea3348e
SH
980/**
981 * netif_queue_stopped - test if transmit queue is flowblocked
982 * @dev: network device
983 *
984 * Test if transmit queue on device is currently unable to send.
985 */
1da177e4
LT
986static inline int netif_queue_stopped(const struct net_device *dev)
987{
988 return test_bit(__LINK_STATE_XOFF, &dev->state);
989}
990
bea3348e
SH
991/**
992 * netif_running - test if up
993 * @dev: network device
994 *
995 * Test if the device has been brought up.
996 */
1da177e4
LT
997static inline int netif_running(const struct net_device *dev)
998{
999 return test_bit(__LINK_STATE_START, &dev->state);
1000}
1001
f25f4e44
PWJ
1002/*
1003 * Routines to manage the subqueues on a device. We only need start
1004 * stop, and a check if it's stopped. All other device management is
1005 * done at the overall netdevice level.
1006 * Also test the device if we're multiqueue.
1007 */
bea3348e
SH
1008
1009/**
1010 * netif_start_subqueue - allow sending packets on subqueue
1011 * @dev: network device
1012 * @queue_index: sub queue index
1013 *
1014 * Start individual transmit queue of a device with multiple transmit queues.
1015 */
f25f4e44
PWJ
1016static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
1017{
1018#ifdef CONFIG_NETDEVICES_MULTIQUEUE
1019 clear_bit(__LINK_STATE_XOFF, &dev->egress_subqueue[queue_index].state);
1020#endif
1021}
1022
bea3348e
SH
1023/**
1024 * netif_stop_subqueue - stop sending packets on subqueue
1025 * @dev: network device
1026 * @queue_index: sub queue index
1027 *
1028 * Stop individual transmit queue of a device with multiple transmit queues.
1029 */
f25f4e44
PWJ
1030static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
1031{
1032#ifdef CONFIG_NETDEVICES_MULTIQUEUE
1033#ifdef CONFIG_NETPOLL_TRAP
1034 if (netpoll_trap())
1035 return;
1036#endif
1037 set_bit(__LINK_STATE_XOFF, &dev->egress_subqueue[queue_index].state);
1038#endif
1039}
1040
bea3348e
SH
1041/**
1042 * netif_subqueue_stopped - test status of subqueue
1043 * @dev: network device
1044 * @queue_index: sub queue index
1045 *
1046 * Check individual transmit queue of a device with multiple transmit queues.
1047 */
668f895a 1048static inline int __netif_subqueue_stopped(const struct net_device *dev,
f25f4e44
PWJ
1049 u16 queue_index)
1050{
1051#ifdef CONFIG_NETDEVICES_MULTIQUEUE
1052 return test_bit(__LINK_STATE_XOFF,
1053 &dev->egress_subqueue[queue_index].state);
1054#else
1055 return 0;
1056#endif
1057}
1058
668f895a
PE
1059static inline int netif_subqueue_stopped(const struct net_device *dev,
1060 struct sk_buff *skb)
1061{
1062 return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
1063}
bea3348e
SH
1064
1065/**
1066 * netif_wake_subqueue - allow sending packets on subqueue
1067 * @dev: network device
1068 * @queue_index: sub queue index
1069 *
1070 * Resume individual transmit queue of a device with multiple transmit queues.
1071 */
f25f4e44
PWJ
1072static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
1073{
1074#ifdef CONFIG_NETDEVICES_MULTIQUEUE
1075#ifdef CONFIG_NETPOLL_TRAP
1076 if (netpoll_trap())
1077 return;
1078#endif
1079 if (test_and_clear_bit(__LINK_STATE_XOFF,
1080 &dev->egress_subqueue[queue_index].state))
1081 __netif_schedule(dev);
1082#endif
1083}
1084
bea3348e
SH
1085/**
1086 * netif_is_multiqueue - test if device has multiple transmit queues
1087 * @dev: network device
1088 *
1089 * Check if device has multiple transmit queues
1090 * Always falls if NETDEVICE_MULTIQUEUE is not configured
1091 */
f25f4e44
PWJ
1092static inline int netif_is_multiqueue(const struct net_device *dev)
1093{
1094#ifdef CONFIG_NETDEVICES_MULTIQUEUE
1095 return (!!(NETIF_F_MULTI_QUEUE & dev->features));
1096#else
1097 return 0;
1098#endif
1099}
1da177e4
LT
1100
1101/* Use this variant when it is known for sure that it
1102 * is executing from interrupt context.
1103 */
bea3348e 1104extern void dev_kfree_skb_irq(struct sk_buff *skb);
1da177e4
LT
1105
1106/* Use this variant in places where it could be invoked
1107 * either from interrupt or non-interrupt context.
1108 */
56079431 1109extern void dev_kfree_skb_any(struct sk_buff *skb);
1da177e4
LT
1110
1111#define HAVE_NETIF_RX 1
1112extern int netif_rx(struct sk_buff *skb);
1113extern int netif_rx_ni(struct sk_buff *skb);
1114#define HAVE_NETIF_RECEIVE_SKB 1
1115extern int netif_receive_skb(struct sk_buff *skb);
c2373ee9 1116extern int dev_valid_name(const char *name);
881d966b
EB
1117extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
1118extern int dev_ethtool(struct net *net, struct ifreq *);
1da177e4
LT
1119extern unsigned dev_get_flags(const struct net_device *);
1120extern int dev_change_flags(struct net_device *, unsigned);
1121extern int dev_change_name(struct net_device *, char *);
ce286d32
EB
1122extern int dev_change_net_namespace(struct net_device *,
1123 struct net *, const char *);
1da177e4
LT
1124extern int dev_set_mtu(struct net_device *, int);
1125extern int dev_set_mac_address(struct net_device *,
1126 struct sockaddr *);
f6a78bfc
HX
1127extern int dev_hard_start_xmit(struct sk_buff *skb,
1128 struct net_device *dev);
1da177e4 1129
20380731 1130extern int netdev_budget;
1da177e4
LT
1131
1132/* Called by rtnetlink.c:rtnl_unlock() */
1133extern void netdev_run_todo(void);
1134
bea3348e
SH
1135/**
1136 * dev_put - release reference to device
1137 * @dev: network device
1138 *
9ef4429b 1139 * Release reference to device to allow it to be freed.
bea3348e 1140 */
1da177e4
LT
1141static inline void dev_put(struct net_device *dev)
1142{
1143 atomic_dec(&dev->refcnt);
1144}
1145
bea3348e
SH
1146/**
1147 * dev_hold - get reference to device
1148 * @dev: network device
1149 *
9ef4429b 1150 * Hold reference to device to keep it from being freed.
bea3348e 1151 */
15333061
SH
1152static inline void dev_hold(struct net_device *dev)
1153{
1154 atomic_inc(&dev->refcnt);
1155}
1da177e4
LT
1156
1157/* Carrier loss detection, dial on demand. The functions netif_carrier_on
1158 * and _off may be called from IRQ context, but it is caller
1159 * who is responsible for serialization of these calls.
b00055aa
SR
1160 *
1161 * The name carrier is inappropriate, these functions should really be
1162 * called netif_lowerlayer_*() because they represent the state of any
1163 * kind of lower layer not just hardware media.
1da177e4
LT
1164 */
1165
1166extern void linkwatch_fire_event(struct net_device *dev);
1167
bea3348e
SH
1168/**
1169 * netif_carrier_ok - test if carrier present
1170 * @dev: network device
1171 *
1172 * Check if carrier is present on device
1173 */
1da177e4
LT
1174static inline int netif_carrier_ok(const struct net_device *dev)
1175{
1176 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
1177}
1178
1179extern void __netdev_watchdog_up(struct net_device *dev);
1180
0a242efc 1181extern void netif_carrier_on(struct net_device *dev);
1da177e4 1182
0a242efc 1183extern void netif_carrier_off(struct net_device *dev);
1da177e4 1184
bea3348e
SH
1185/**
1186 * netif_dormant_on - mark device as dormant.
1187 * @dev: network device
1188 *
1189 * Mark device as dormant (as per RFC2863).
1190 *
1191 * The dormant state indicates that the relevant interface is not
1192 * actually in a condition to pass packets (i.e., it is not 'up') but is
1193 * in a "pending" state, waiting for some external event. For "on-
1194 * demand" interfaces, this new state identifies the situation where the
1195 * interface is waiting for events to place it in the up state.
1196 *
1197 */
b00055aa
SR
1198static inline void netif_dormant_on(struct net_device *dev)
1199{
1200 if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
1201 linkwatch_fire_event(dev);
1202}
1203
bea3348e
SH
1204/**
1205 * netif_dormant_off - set device as not dormant.
1206 * @dev: network device
1207 *
1208 * Device is not in dormant state.
1209 */
b00055aa
SR
1210static inline void netif_dormant_off(struct net_device *dev)
1211{
1212 if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
1213 linkwatch_fire_event(dev);
1214}
1215
bea3348e
SH
1216/**
1217 * netif_dormant - test if carrier present
1218 * @dev: network device
1219 *
1220 * Check if carrier is present on device
1221 */
b00055aa
SR
1222static inline int netif_dormant(const struct net_device *dev)
1223{
1224 return test_bit(__LINK_STATE_DORMANT, &dev->state);
1225}
1226
1227
bea3348e
SH
1228/**
1229 * netif_oper_up - test if device is operational
1230 * @dev: network device
1231 *
1232 * Check if carrier is operational
1233 */
b00055aa
SR
1234static inline int netif_oper_up(const struct net_device *dev) {
1235 return (dev->operstate == IF_OPER_UP ||
1236 dev->operstate == IF_OPER_UNKNOWN /* backward compat */);
1237}
1238
bea3348e
SH
1239/**
1240 * netif_device_present - is device available or removed
1241 * @dev: network device
1242 *
1243 * Check if device has not been removed from system.
1244 */
1da177e4
LT
1245static inline int netif_device_present(struct net_device *dev)
1246{
1247 return test_bit(__LINK_STATE_PRESENT, &dev->state);
1248}
1249
56079431 1250extern void netif_device_detach(struct net_device *dev);
1da177e4 1251
56079431 1252extern void netif_device_attach(struct net_device *dev);
1da177e4
LT
1253
1254/*
1255 * Network interface message level settings
1256 */
1257#define HAVE_NETIF_MSG 1
1258
1259enum {
1260 NETIF_MSG_DRV = 0x0001,
1261 NETIF_MSG_PROBE = 0x0002,
1262 NETIF_MSG_LINK = 0x0004,
1263 NETIF_MSG_TIMER = 0x0008,
1264 NETIF_MSG_IFDOWN = 0x0010,
1265 NETIF_MSG_IFUP = 0x0020,
1266 NETIF_MSG_RX_ERR = 0x0040,
1267 NETIF_MSG_TX_ERR = 0x0080,
1268 NETIF_MSG_TX_QUEUED = 0x0100,
1269 NETIF_MSG_INTR = 0x0200,
1270 NETIF_MSG_TX_DONE = 0x0400,
1271 NETIF_MSG_RX_STATUS = 0x0800,
1272 NETIF_MSG_PKTDATA = 0x1000,
1273 NETIF_MSG_HW = 0x2000,
1274 NETIF_MSG_WOL = 0x4000,
1275};
1276
1277#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
1278#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
1279#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
1280#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
1281#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
1282#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
1283#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
1284#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
1285#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
1286#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
1287#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
1288#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
1289#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
1290#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
1291#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
1292
1293static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
1294{
1295 /* use default */
1296 if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
1297 return default_msg_enable_bits;
1298 if (debug_value == 0) /* no output */
1299 return 0;
1300 /* set low N bits */
1301 return (1 << debug_value) - 1;
1302}
1303
0a122576 1304/* Test if receive needs to be scheduled but only if up */
bea3348e
SH
1305static inline int netif_rx_schedule_prep(struct net_device *dev,
1306 struct napi_struct *napi)
1da177e4 1307{
a0a46196 1308 return napi_schedule_prep(napi);
1da177e4
LT
1309}
1310
1311/* Add interface to tail of rx poll list. This assumes that _prep has
1312 * already been called and returned 1.
1313 */
bea3348e
SH
1314static inline void __netif_rx_schedule(struct net_device *dev,
1315 struct napi_struct *napi)
1316{
bea3348e
SH
1317 __napi_schedule(napi);
1318}
1da177e4
LT
1319
1320/* Try to reschedule poll. Called by irq handler. */
1321
bea3348e
SH
1322static inline void netif_rx_schedule(struct net_device *dev,
1323 struct napi_struct *napi)
1da177e4 1324{
bea3348e
SH
1325 if (netif_rx_schedule_prep(dev, napi))
1326 __netif_rx_schedule(dev, napi);
1da177e4
LT
1327}
1328
bea3348e
SH
1329/* Try to reschedule poll. Called by dev->poll() after netif_rx_complete(). */
1330static inline int netif_rx_reschedule(struct net_device *dev,
1331 struct napi_struct *napi)
1da177e4 1332{
bea3348e
SH
1333 if (napi_schedule_prep(napi)) {
1334 __netif_rx_schedule(dev, napi);
1da177e4
LT
1335 return 1;
1336 }
1337 return 0;
1338}
1339
b0ba6667
HX
1340/* same as netif_rx_complete, except that local_irq_save(flags)
1341 * has already been issued
1342 */
bea3348e
SH
1343static inline void __netif_rx_complete(struct net_device *dev,
1344 struct napi_struct *napi)
b0ba6667 1345{
bea3348e 1346 __napi_complete(napi);
b0ba6667
HX
1347}
1348
1da177e4
LT
1349/* Remove interface from poll list: it must be in the poll list
1350 * on current cpu. This primitive is called by dev->poll(), when
1351 * it completes the work. The device cannot be out of poll list at this
1352 * moment, it is BUG().
1353 */
bea3348e
SH
1354static inline void netif_rx_complete(struct net_device *dev,
1355 struct napi_struct *napi)
1da177e4
LT
1356{
1357 unsigned long flags;
1358
1359 local_irq_save(flags);
bea3348e 1360 __netif_rx_complete(dev, napi);
1da177e4
LT
1361 local_irq_restore(flags);
1362}
1363
bea3348e
SH
1364/**
1365 * netif_tx_lock - grab network device transmit lock
1366 * @dev: network device
c4ea43c5 1367 * @cpu: cpu number of lock owner
bea3348e
SH
1368 *
1369 * Get network device transmit lock
1370 */
22dd7495 1371static inline void __netif_tx_lock(struct net_device *dev, int cpu)
932ff279
HX
1372{
1373 spin_lock(&dev->_xmit_lock);
22dd7495
JHS
1374 dev->xmit_lock_owner = cpu;
1375}
1376
1377static inline void netif_tx_lock(struct net_device *dev)
1378{
1379 __netif_tx_lock(dev, smp_processor_id());
932ff279
HX
1380}
1381
1382static inline void netif_tx_lock_bh(struct net_device *dev)
1383{
1384 spin_lock_bh(&dev->_xmit_lock);
1385 dev->xmit_lock_owner = smp_processor_id();
1386}
1387
1388static inline int netif_tx_trylock(struct net_device *dev)
1389{
53c4b2cc
HX
1390 int ok = spin_trylock(&dev->_xmit_lock);
1391 if (likely(ok))
932ff279 1392 dev->xmit_lock_owner = smp_processor_id();
53c4b2cc 1393 return ok;
932ff279
HX
1394}
1395
1396static inline void netif_tx_unlock(struct net_device *dev)
1397{
1398 dev->xmit_lock_owner = -1;
1399 spin_unlock(&dev->_xmit_lock);
1400}
1401
1402static inline void netif_tx_unlock_bh(struct net_device *dev)
1403{
1404 dev->xmit_lock_owner = -1;
1405 spin_unlock_bh(&dev->_xmit_lock);
1406}
1407
22dd7495
JHS
1408#define HARD_TX_LOCK(dev, cpu) { \
1409 if ((dev->features & NETIF_F_LLTX) == 0) { \
1410 __netif_tx_lock(dev, cpu); \
1411 } \
1412}
1413
1414#define HARD_TX_UNLOCK(dev) { \
1415 if ((dev->features & NETIF_F_LLTX) == 0) { \
1416 netif_tx_unlock(dev); \
1417 } \
1418}
1419
1da177e4
LT
1420static inline void netif_tx_disable(struct net_device *dev)
1421{
932ff279 1422 netif_tx_lock_bh(dev);
1da177e4 1423 netif_stop_queue(dev);
932ff279 1424 netif_tx_unlock_bh(dev);
1da177e4
LT
1425}
1426
1427/* These functions live elsewhere (drivers/net/net_init.c, but related) */
1428
1429extern void ether_setup(struct net_device *dev);
1430
1431/* Support for loadable net-drivers */
f25f4e44
PWJ
1432extern struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
1433 void (*setup)(struct net_device *),
1434 unsigned int queue_count);
1435#define alloc_netdev(sizeof_priv, name, setup) \
1436 alloc_netdev_mq(sizeof_priv, name, setup, 1)
1da177e4
LT
1437extern int register_netdev(struct net_device *dev);
1438extern void unregister_netdev(struct net_device *dev);
4417da66
PM
1439/* Functions used for secondary unicast and multicast support */
1440extern void dev_set_rx_mode(struct net_device *dev);
1441extern void __dev_set_rx_mode(struct net_device *dev);
1442extern int dev_unicast_delete(struct net_device *dev, void *addr, int alen);
1443extern int dev_unicast_add(struct net_device *dev, void *addr, int alen);
e83a2ea8
CL
1444extern int dev_unicast_sync(struct net_device *to, struct net_device *from);
1445extern void dev_unicast_unsync(struct net_device *to, struct net_device *from);
1da177e4
LT
1446extern int dev_mc_delete(struct net_device *dev, void *addr, int alen, int all);
1447extern int dev_mc_add(struct net_device *dev, void *addr, int alen, int newonly);
a0a400d7
PM
1448extern int dev_mc_sync(struct net_device *to, struct net_device *from);
1449extern void dev_mc_unsync(struct net_device *to, struct net_device *from);
61cbc2fc
PM
1450extern int __dev_addr_delete(struct dev_addr_list **list, int *count, void *addr, int alen, int all);
1451extern int __dev_addr_add(struct dev_addr_list **list, int *count, void *addr, int alen, int newonly);
e83a2ea8
CL
1452extern int __dev_addr_sync(struct dev_addr_list **to, int *to_count, struct dev_addr_list **from, int *from_count);
1453extern void __dev_addr_unsync(struct dev_addr_list **to, int *to_count, struct dev_addr_list **from, int *from_count);
1da177e4
LT
1454extern void dev_set_promiscuity(struct net_device *dev, int inc);
1455extern void dev_set_allmulti(struct net_device *dev, int inc);
1456extern void netdev_state_change(struct net_device *dev);
d8a33ac4 1457extern void netdev_features_change(struct net_device *dev);
1da177e4 1458/* Load a device via the kmod */
881d966b 1459extern void dev_load(struct net *net, const char *name);
1da177e4
LT
1460extern void dev_mcast_init(void);
1461extern int netdev_max_backlog;
1462extern int weight_p;
1463extern int netdev_set_master(struct net_device *dev, struct net_device *master);
84fa7933 1464extern int skb_checksum_help(struct sk_buff *skb);
576a30eb 1465extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features);
fb286bb2
HX
1466#ifdef CONFIG_BUG
1467extern void netdev_rx_csum_fault(struct net_device *dev);
1468#else
1469static inline void netdev_rx_csum_fault(struct net_device *dev)
1470{
1471}
1472#endif
1da177e4
LT
1473/* rx skb timestamps */
1474extern void net_enable_timestamp(void);
1475extern void net_disable_timestamp(void);
1476
20380731
ACM
1477#ifdef CONFIG_PROC_FS
1478extern void *dev_seq_start(struct seq_file *seq, loff_t *pos);
1479extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos);
1480extern void dev_seq_stop(struct seq_file *seq, void *v);
1481#endif
1482
1483extern void linkwatch_run_queue(void);
1484
7f353bf2
HX
1485extern int netdev_compute_features(unsigned long all, unsigned long one);
1486
bcd76111 1487static inline int net_gso_ok(int features, int gso_type)
576a30eb 1488{
bcd76111 1489 int feature = gso_type << NETIF_F_GSO_SHIFT;
d6b4991a 1490 return (features & feature) == feature;
576a30eb
HX
1491}
1492
bcd76111
HX
1493static inline int skb_gso_ok(struct sk_buff *skb, int features)
1494{
a430a43d 1495 return net_gso_ok(features, skb_shinfo(skb)->gso_type);
bcd76111
HX
1496}
1497
7967168c
HX
1498static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
1499{
a430a43d
HX
1500 return skb_is_gso(skb) &&
1501 (!skb_gso_ok(skb, dev->features) ||
84fa7933 1502 unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
7967168c
HX
1503}
1504
82cc1a7a
PWJ
1505static inline void netif_set_gso_max_size(struct net_device *dev,
1506 unsigned int size)
1507{
1508 dev->gso_max_size = size;
1509}
1510
7ea49ed7 1511/* On bonding slaves other than the currently active slave, suppress
f5b2b966
JV
1512 * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and
1513 * ARP on active-backup slaves with arp_validate enabled.
7ea49ed7
DM
1514 */
1515static inline int skb_bond_should_drop(struct sk_buff *skb)
1516{
1517 struct net_device *dev = skb->dev;
1518 struct net_device *master = dev->master;
1519
1520 if (master &&
1521 (dev->priv_flags & IFF_SLAVE_INACTIVE)) {
f5b2b966
JV
1522 if ((dev->priv_flags & IFF_SLAVE_NEEDARP) &&
1523 skb->protocol == __constant_htons(ETH_P_ARP))
1524 return 0;
1525
7ea49ed7
DM
1526 if (master->priv_flags & IFF_MASTER_ALB) {
1527 if (skb->pkt_type != PACKET_BROADCAST &&
1528 skb->pkt_type != PACKET_MULTICAST)
1529 return 0;
1530 }
1531 if (master->priv_flags & IFF_MASTER_8023AD &&
1532 skb->protocol == __constant_htons(ETH_P_SLOW))
1533 return 0;
1534
1535 return 1;
1536 }
1537 return 0;
1538}
1539
1da177e4
LT
1540#endif /* __KERNEL__ */
1541
1542#endif /* _LINUX_DEV_H */