]> bbs.cooldavid.org Git - net-next-2.6.git/blame - include/linux/netdevice.h
igb: Kill CONFIG_NETDEVICES_MULTIQUEUE references, no longer exists.
[net-next-2.6.git] / include / linux / netdevice.h
CommitLineData
1da177e4
LT
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Definitions for the Interfaces handler.
7 *
8 * Version: @(#)dev.h 1.0.10 08/12/93
9 *
02c30a84 10 * Authors: Ross Biro
1da177e4
LT
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Corey Minyard <wf-rch!minyard@relay.EU.net>
13 * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov>
14 * Alan Cox, <Alan.Cox@linux.org>
15 * Bjorn Ekwall. <bj0rn@blox.se>
16 * Pekka Riikonen <priikone@poseidon.pspt.fi>
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 *
23 * Moved to /usr/include/linux for NET3
24 */
25#ifndef _LINUX_NETDEVICE_H
26#define _LINUX_NETDEVICE_H
27
28#include <linux/if.h>
29#include <linux/if_ether.h>
30#include <linux/if_packet.h>
31
32#ifdef __KERNEL__
d7fe0f24 33#include <linux/timer.h>
bea3348e 34#include <linux/delay.h>
1da177e4
LT
35#include <asm/atomic.h>
36#include <asm/cache.h>
37#include <asm/byteorder.h>
38
1da177e4
LT
39#include <linux/device.h>
40#include <linux/percpu.h>
db217334 41#include <linux/dmaengine.h>
bea3348e 42#include <linux/workqueue.h>
1da177e4 43
a050c33f
DL
44#include <net/net_namespace.h>
45
1da177e4
LT
46struct vlan_group;
47struct ethtool_ops;
115c1d6e 48struct netpoll_info;
704232c2
JB
49/* 802.11 specific */
50struct wireless_dev;
1da177e4
LT
51 /* source back-compat hooks */
52#define SET_ETHTOOL_OPS(netdev,ops) \
53 ( (netdev)->ethtool_ops = (ops) )
54
55#define HAVE_ALLOC_NETDEV /* feature macro: alloc_xxxdev
56 functions are available. */
57#define HAVE_FREE_NETDEV /* free_netdev() */
58#define HAVE_NETDEV_PRIV /* netdev_priv() */
59
60#define NET_XMIT_SUCCESS 0
61#define NET_XMIT_DROP 1 /* skb dropped */
62#define NET_XMIT_CN 2 /* congestion notification */
63#define NET_XMIT_POLICED 3 /* skb is shot by police */
64#define NET_XMIT_BYPASS 4 /* packet does not leave via dequeue;
65 (TC use only - dev_queue_xmit
66 returns this as NET_XMIT_SUCCESS) */
67
68/* Backlog congestion levels */
69#define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
70#define NET_RX_DROP 1 /* packet dropped */
71#define NET_RX_CN_LOW 2 /* storm alert, just in case */
72#define NET_RX_CN_MOD 3 /* Storm on its way! */
73#define NET_RX_CN_HIGH 4 /* The storm is here */
74#define NET_RX_BAD 5 /* packet dropped due to kernel error */
75
b9df3cb8
GR
76/* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
77 * indicates that the device will soon be dropping packets, or already drops
78 * some packets of the same priority; prompting us to send less aggressively. */
79#define net_xmit_eval(e) ((e) == NET_XMIT_CN? 0 : (e))
1da177e4
LT
80#define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
81
82#endif
83
84#define MAX_ADDR_LEN 32 /* Largest hardware address length */
85
86/* Driver transmit return codes */
87#define NETDEV_TX_OK 0 /* driver took care of packet */
88#define NETDEV_TX_BUSY 1 /* driver tx path was busy*/
89#define NETDEV_TX_LOCKED -1 /* driver tx lock was already taken */
90
c88e6f51
AB
91#ifdef __KERNEL__
92
1da177e4
LT
93/*
94 * Compute the worst case header length according to the protocols
95 * used.
96 */
97
8388e3da
DM
98#if defined(CONFIG_WLAN_80211) || defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
99# if defined(CONFIG_MAC80211_MESH)
100# define LL_MAX_HEADER 128
101# else
102# define LL_MAX_HEADER 96
103# endif
104#elif defined(CONFIG_TR)
105# define LL_MAX_HEADER 48
1da177e4 106#else
8388e3da 107# define LL_MAX_HEADER 32
1da177e4
LT
108#endif
109
e81c7359
DM
110#if !defined(CONFIG_NET_IPIP) && !defined(CONFIG_NET_IPIP_MODULE) && \
111 !defined(CONFIG_NET_IPGRE) && !defined(CONFIG_NET_IPGRE_MODULE) && \
112 !defined(CONFIG_IPV6_SIT) && !defined(CONFIG_IPV6_SIT_MODULE) && \
113 !defined(CONFIG_IPV6_TUNNEL) && !defined(CONFIG_IPV6_TUNNEL_MODULE)
1da177e4
LT
114#define MAX_HEADER LL_MAX_HEADER
115#else
116#define MAX_HEADER (LL_MAX_HEADER + 48)
117#endif
118
c88e6f51
AB
119#endif /* __KERNEL__ */
120
f25f4e44
PWJ
121struct net_device_subqueue
122{
123 /* Give a control state for each queue. This struct may contain
124 * per-queue locks in the future.
125 */
126 unsigned long state;
127};
128
1da177e4
LT
129/*
130 * Network device statistics. Akin to the 2.0 ether stats but
131 * with byte counters.
132 */
133
134struct net_device_stats
135{
136 unsigned long rx_packets; /* total packets received */
137 unsigned long tx_packets; /* total packets transmitted */
138 unsigned long rx_bytes; /* total bytes received */
139 unsigned long tx_bytes; /* total bytes transmitted */
140 unsigned long rx_errors; /* bad packets received */
141 unsigned long tx_errors; /* packet transmit problems */
142 unsigned long rx_dropped; /* no space in linux buffers */
143 unsigned long tx_dropped; /* no space available in linux */
144 unsigned long multicast; /* multicast packets received */
145 unsigned long collisions;
146
147 /* detailed rx_errors: */
148 unsigned long rx_length_errors;
149 unsigned long rx_over_errors; /* receiver ring buff overflow */
150 unsigned long rx_crc_errors; /* recved pkt with crc error */
151 unsigned long rx_frame_errors; /* recv'd frame alignment error */
152 unsigned long rx_fifo_errors; /* recv'r fifo overrun */
153 unsigned long rx_missed_errors; /* receiver missed packet */
154
155 /* detailed tx_errors */
156 unsigned long tx_aborted_errors;
157 unsigned long tx_carrier_errors;
158 unsigned long tx_fifo_errors;
159 unsigned long tx_heartbeat_errors;
160 unsigned long tx_window_errors;
161
162 /* for cslip etc */
163 unsigned long rx_compressed;
164 unsigned long tx_compressed;
165};
166
167
168/* Media selection options. */
169enum {
170 IF_PORT_UNKNOWN = 0,
171 IF_PORT_10BASE2,
172 IF_PORT_10BASET,
173 IF_PORT_AUI,
174 IF_PORT_100BASET,
175 IF_PORT_100BASETX,
176 IF_PORT_100BASEFX
177};
178
179#ifdef __KERNEL__
180
181#include <linux/cache.h>
182#include <linux/skbuff.h>
183
184struct neighbour;
185struct neigh_parms;
186struct sk_buff;
187
188struct netif_rx_stats
189{
190 unsigned total;
191 unsigned dropped;
192 unsigned time_squeeze;
1da177e4
LT
193 unsigned cpu_collision;
194};
195
196DECLARE_PER_CPU(struct netif_rx_stats, netdev_rx_stat);
197
bf742482
PM
198struct dev_addr_list
199{
200 struct dev_addr_list *next;
201 u8 da_addr[MAX_ADDR_LEN];
202 u8 da_addrlen;
a0a400d7 203 u8 da_synced;
bf742482
PM
204 int da_users;
205 int da_gusers;
206};
1da177e4
LT
207
208/*
209 * We tag multicasts with these structures.
210 */
3fba5a8b
PM
211
212#define dev_mc_list dev_addr_list
213#define dmi_addr da_addr
214#define dmi_addrlen da_addrlen
215#define dmi_users da_users
216#define dmi_gusers da_gusers
1da177e4
LT
217
218struct hh_cache
219{
220 struct hh_cache *hh_next; /* Next entry */
221 atomic_t hh_refcnt; /* number of users */
f0490980
ED
222/*
223 * We want hh_output, hh_len, hh_lock and hh_data be a in a separate
224 * cache line on SMP.
225 * They are mostly read, but hh_refcnt may be changed quite frequently,
226 * incurring cache line ping pongs.
227 */
228 __be16 hh_type ____cacheline_aligned_in_smp;
229 /* protocol identifier, f.e ETH_P_IP
1da177e4
LT
230 * NOTE: For VLANs, this will be the
231 * encapuslated type. --BLG
232 */
d5c42c0e 233 u16 hh_len; /* length of header */
1da177e4 234 int (*hh_output)(struct sk_buff *skb);
3644f0ce 235 seqlock_t hh_lock;
1da177e4
LT
236
237 /* cached hardware header; allow for machine alignment needs. */
238#define HH_DATA_MOD 16
239#define HH_DATA_OFF(__len) \
5ba0eac6 240 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
1da177e4
LT
241#define HH_DATA_ALIGN(__len) \
242 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
243 unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
244};
245
246/* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much.
247 * Alternative is:
248 * dev->hard_header_len ? (dev->hard_header_len +
249 * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
250 *
251 * We could use other alignment values, but we must maintain the
252 * relationship HH alignment <= LL alignment.
f5184d26
JB
253 *
254 * LL_ALLOCATED_SPACE also takes into account the tailroom the device
255 * may need.
1da177e4
LT
256 */
257#define LL_RESERVED_SPACE(dev) \
f5184d26 258 ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
1da177e4 259#define LL_RESERVED_SPACE_EXTRA(dev,extra) \
f5184d26
JB
260 ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
261#define LL_ALLOCATED_SPACE(dev) \
262 ((((dev)->hard_header_len+(dev)->needed_headroom+(dev)->needed_tailroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
1da177e4 263
3b04ddde
SH
264struct header_ops {
265 int (*create) (struct sk_buff *skb, struct net_device *dev,
266 unsigned short type, const void *daddr,
267 const void *saddr, unsigned len);
268 int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
269 int (*rebuild)(struct sk_buff *skb);
270#define HAVE_HEADER_CACHE
271 int (*cache)(const struct neighbour *neigh, struct hh_cache *hh);
272 void (*cache_update)(struct hh_cache *hh,
273 const struct net_device *dev,
274 const unsigned char *haddr);
275};
276
1da177e4
LT
277/* These flag bits are private to the generic network queueing
278 * layer, they may not be explicitly referenced by any other
279 * code.
280 */
281
282enum netdev_state_t
283{
1da177e4
LT
284 __LINK_STATE_START,
285 __LINK_STATE_PRESENT,
286 __LINK_STATE_SCHED,
287 __LINK_STATE_NOCARRIER,
b00055aa
SR
288 __LINK_STATE_LINKWATCH_PENDING,
289 __LINK_STATE_DORMANT,
1da177e4
LT
290};
291
292
293/*
294 * This structure holds at boot time configured netdevice settings. They
295 * are then used in the device probing.
296 */
297struct netdev_boot_setup {
298 char name[IFNAMSIZ];
299 struct ifmap map;
300};
301#define NETDEV_BOOT_SETUP_MAX 8
302
20380731 303extern int __init netdev_boot_setup(char *str);
1da177e4 304
bea3348e
SH
305/*
306 * Structure for NAPI scheduling similar to tasklet but with weighting
307 */
308struct napi_struct {
309 /* The poll_list must only be managed by the entity which
310 * changes the state of the NAPI_STATE_SCHED bit. This means
311 * whoever atomically sets that bit can add this napi_struct
312 * to the per-cpu poll_list, and whoever clears that bit
313 * can remove from the list right before clearing the bit.
314 */
315 struct list_head poll_list;
316
317 unsigned long state;
318 int weight;
319 int (*poll)(struct napi_struct *, int);
320#ifdef CONFIG_NETPOLL
321 spinlock_t poll_lock;
322 int poll_owner;
323 struct net_device *dev;
324 struct list_head dev_list;
325#endif
326};
327
328enum
329{
330 NAPI_STATE_SCHED, /* Poll is scheduled */
a0a46196 331 NAPI_STATE_DISABLE, /* Disable pending */
bea3348e
SH
332};
333
b3c97528 334extern void __napi_schedule(struct napi_struct *n);
bea3348e 335
a0a46196
DM
336static inline int napi_disable_pending(struct napi_struct *n)
337{
338 return test_bit(NAPI_STATE_DISABLE, &n->state);
339}
340
bea3348e
SH
341/**
342 * napi_schedule_prep - check if napi can be scheduled
343 * @n: napi context
344 *
345 * Test if NAPI routine is already running, and if not mark
346 * it as running. This is used as a condition variable
a0a46196
DM
347 * insure only one NAPI poll instance runs. We also make
348 * sure there is no pending NAPI disable.
bea3348e
SH
349 */
350static inline int napi_schedule_prep(struct napi_struct *n)
351{
a0a46196
DM
352 return !napi_disable_pending(n) &&
353 !test_and_set_bit(NAPI_STATE_SCHED, &n->state);
bea3348e
SH
354}
355
356/**
357 * napi_schedule - schedule NAPI poll
358 * @n: napi context
359 *
360 * Schedule NAPI poll routine to be called if it is not already
361 * running.
362 */
363static inline void napi_schedule(struct napi_struct *n)
364{
365 if (napi_schedule_prep(n))
366 __napi_schedule(n);
367}
368
bfe13f54
RD
369/* Try to reschedule poll. Called by dev->poll() after napi_complete(). */
370static inline int napi_reschedule(struct napi_struct *napi)
371{
372 if (napi_schedule_prep(napi)) {
373 __napi_schedule(napi);
374 return 1;
375 }
376 return 0;
377}
378
bea3348e
SH
379/**
380 * napi_complete - NAPI processing complete
381 * @n: napi context
382 *
383 * Mark NAPI processing as complete.
384 */
385static inline void __napi_complete(struct napi_struct *n)
386{
387 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
388 list_del(&n->poll_list);
389 smp_mb__before_clear_bit();
390 clear_bit(NAPI_STATE_SCHED, &n->state);
391}
392
393static inline void napi_complete(struct napi_struct *n)
394{
50fd4407
DM
395 unsigned long flags;
396
397 local_irq_save(flags);
bea3348e 398 __napi_complete(n);
50fd4407 399 local_irq_restore(flags);
bea3348e
SH
400}
401
402/**
403 * napi_disable - prevent NAPI from scheduling
404 * @n: napi context
405 *
406 * Stop NAPI from being scheduled on this context.
407 * Waits till any outstanding processing completes.
408 */
409static inline void napi_disable(struct napi_struct *n)
410{
a0a46196 411 set_bit(NAPI_STATE_DISABLE, &n->state);
bea3348e 412 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
43cc7380 413 msleep(1);
a0a46196 414 clear_bit(NAPI_STATE_DISABLE, &n->state);
bea3348e
SH
415}
416
417/**
418 * napi_enable - enable NAPI scheduling
419 * @n: napi context
420 *
421 * Resume NAPI from being scheduled on this context.
422 * Must be paired with napi_disable.
423 */
424static inline void napi_enable(struct napi_struct *n)
425{
426 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
427 smp_mb__before_clear_bit();
428 clear_bit(NAPI_STATE_SCHED, &n->state);
429}
430
c264c3de
SH
431#ifdef CONFIG_SMP
432/**
433 * napi_synchronize - wait until NAPI is not running
434 * @n: napi context
435 *
436 * Wait until NAPI is done being scheduled on this context.
437 * Waits till any outstanding processing completes but
438 * does not disable future activations.
439 */
440static inline void napi_synchronize(const struct napi_struct *n)
441{
442 while (test_bit(NAPI_STATE_SCHED, &n->state))
443 msleep(1);
444}
445#else
446# define napi_synchronize(n) barrier()
447#endif
448
79d16385
DM
449enum netdev_queue_state_t
450{
451 __QUEUE_STATE_XOFF,
452 __QUEUE_STATE_QDISC_RUNNING,
453};
454
bb949fbd 455struct netdev_queue {
dc2b4847 456 spinlock_t lock;
bb949fbd 457 struct net_device *dev;
b0e1e646 458 struct Qdisc *qdisc;
79d16385 459 unsigned long state;
970565bb 460 struct sk_buff *gso_skb;
c773e847
DM
461 spinlock_t _xmit_lock;
462 int xmit_lock_owner;
b0e1e646
DM
463 struct Qdisc *qdisc_sleeping;
464 struct list_head qdisc_list;
ee609cb3 465 struct netdev_queue *next_sched;
bb949fbd
DM
466};
467
1da177e4
LT
468/*
469 * The DEVICE structure.
470 * Actually, this whole structure is a big mistake. It mixes I/O
471 * data with strictly "high-level" data, and it has to know about
472 * almost every data structure used in the INET module.
473 *
474 * FIXME: cleanup struct net_device such that network protocol info
475 * moves out.
476 */
477
478struct net_device
479{
480
481 /*
482 * This is the first field of the "visible" part of this structure
483 * (i.e. as seen by users in the "Space.c" file). It is the name
484 * the interface.
485 */
486 char name[IFNAMSIZ];
9356b8fc
ED
487 /* device name hash chain */
488 struct hlist_node name_hlist;
1da177e4
LT
489
490 /*
491 * I/O specific fields
492 * FIXME: Merge these and struct ifmap into one
493 */
494 unsigned long mem_end; /* shared mem end */
495 unsigned long mem_start; /* shared mem start */
496 unsigned long base_addr; /* device I/O address */
497 unsigned int irq; /* device IRQ number */
498
499 /*
500 * Some hardware also needs these fields, but they are not
501 * part of the usual set specified in Space.c.
502 */
503
504 unsigned char if_port; /* Selectable AUI, TP,..*/
505 unsigned char dma; /* DMA channel */
506
507 unsigned long state;
508
7562f876 509 struct list_head dev_list;
bea3348e
SH
510#ifdef CONFIG_NETPOLL
511 struct list_head napi_list;
512#endif
1da177e4
LT
513
514 /* The device initialization function. Called only once. */
515 int (*init)(struct net_device *dev);
516
517 /* ------- Fields preinitialized in Space.c finish here ------- */
518
9356b8fc
ED
519 /* Net device features */
520 unsigned long features;
521#define NETIF_F_SG 1 /* Scatter/gather IO. */
d212f87b 522#define NETIF_F_IP_CSUM 2 /* Can checksum TCP/UDP over IPv4. */
9356b8fc
ED
523#define NETIF_F_NO_CSUM 4 /* Does not require checksum. F.e. loopack. */
524#define NETIF_F_HW_CSUM 8 /* Can checksum all the packets. */
d212f87b 525#define NETIF_F_IPV6_CSUM 16 /* Can checksum TCP/UDP over IPV6 */
9356b8fc
ED
526#define NETIF_F_HIGHDMA 32 /* Can DMA to high memory. */
527#define NETIF_F_FRAGLIST 64 /* Scatter/gather IO. */
528#define NETIF_F_HW_VLAN_TX 128 /* Transmit VLAN hw acceleration */
529#define NETIF_F_HW_VLAN_RX 256 /* Receive VLAN hw acceleration */
530#define NETIF_F_HW_VLAN_FILTER 512 /* Receive filtering on VLAN */
531#define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */
37c3185a 532#define NETIF_F_GSO 2048 /* Enable software GSO. */
e24eb521
CB
533#define NETIF_F_LLTX 4096 /* LockLess TX - deprecated. Please */
534 /* do not use LLTX in new drivers */
ce286d32 535#define NETIF_F_NETNS_LOCAL 8192 /* Does not change network namespaces */
f25f4e44 536#define NETIF_F_MULTI_QUEUE 16384 /* Has multiple TX/RX queues */
3ae7c0b2 537#define NETIF_F_LRO 32768 /* large receive offload */
7967168c
HX
538
539 /* Segmentation offload features */
289c79a4
PM
540#define NETIF_F_GSO_SHIFT 16
541#define NETIF_F_GSO_MASK 0xffff0000
7967168c 542#define NETIF_F_TSO (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT)
f83ef8c0 543#define NETIF_F_UFO (SKB_GSO_UDP << NETIF_F_GSO_SHIFT)
576a30eb 544#define NETIF_F_GSO_ROBUST (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT)
f83ef8c0
HX
545#define NETIF_F_TSO_ECN (SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT)
546#define NETIF_F_TSO6 (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT)
9356b8fc 547
78eb8877
HX
548 /* List of features with software fallbacks. */
549#define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6)
550
d212f87b 551
8648b305 552#define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
d212f87b
SH
553#define NETIF_F_V4_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IP_CSUM)
554#define NETIF_F_V6_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM)
555#define NETIF_F_ALL_CSUM (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM)
8648b305 556
1da177e4
LT
557 /* Interface index. Unique device identifier */
558 int ifindex;
559 int iflink;
560
561
562 struct net_device_stats* (*get_stats)(struct net_device *dev);
c45d286e 563 struct net_device_stats stats;
1da177e4 564
b86e0280 565#ifdef CONFIG_WIRELESS_EXT
1da177e4
LT
566 /* List of functions to handle Wireless Extensions (instead of ioctl).
567 * See <net/iw_handler.h> for details. Jean II */
568 const struct iw_handler_def * wireless_handlers;
569 /* Instance data managed by the core of Wireless Extensions. */
570 struct iw_public_data * wireless_data;
b86e0280 571#endif
76fd8593 572 const struct ethtool_ops *ethtool_ops;
1da177e4 573
3b04ddde
SH
574 /* Hardware header description */
575 const struct header_ops *header_ops;
576
1da177e4
LT
577 /*
578 * This marks the end of the "visible" part of the structure. All
579 * fields hereafter are internal to the system, and may change at
580 * will (read: may be cleaned up at will).
581 */
582
1da177e4 583
b00055aa 584 unsigned int flags; /* interface flags (a la BSD) */
1da177e4
LT
585 unsigned short gflags;
586 unsigned short priv_flags; /* Like 'flags' but invisible to userspace. */
587 unsigned short padded; /* How much padding added by alloc_netdev() */
588
b00055aa
SR
589 unsigned char operstate; /* RFC2863 operstate */
590 unsigned char link_mode; /* mapping policy to operstate */
591
1da177e4
LT
592 unsigned mtu; /* interface MTU value */
593 unsigned short type; /* interface hardware type */
594 unsigned short hard_header_len; /* hardware hdr length */
1da177e4 595
f5184d26
JB
596 /* extra head- and tailroom the hardware may need, but not in all cases
597 * can this be guaranteed, especially tailroom. Some cases also use
598 * LL_MAX_HEADER instead to allocate the skb.
599 */
600 unsigned short needed_headroom;
601 unsigned short needed_tailroom;
602
1da177e4
LT
603 struct net_device *master; /* Pointer to master device of a group,
604 * which this device is member of.
605 */
606
607 /* Interface address info. */
a6f9a705 608 unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */
1da177e4
LT
609 unsigned char addr_len; /* hardware address length */
610 unsigned short dev_id; /* for shared network cards */
611
f1f28aa3 612 spinlock_t addr_list_lock;
4417da66
PM
613 struct dev_addr_list *uc_list; /* Secondary unicast mac addresses */
614 int uc_count; /* Number of installed ucasts */
615 int uc_promisc;
3fba5a8b 616 struct dev_addr_list *mc_list; /* Multicast mac addresses */
1da177e4 617 int mc_count; /* Number of installed mcasts */
9d45abe1
WC
618 unsigned int promiscuity;
619 unsigned int allmulti;
1da177e4 620
1da177e4
LT
621
622 /* Protocol specific pointers */
623
624 void *atalk_ptr; /* AppleTalk link */
625 void *ip_ptr; /* IPv4 specific data */
626 void *dn_ptr; /* DECnet specific data */
627 void *ip6_ptr; /* IPv6 specific data */
628 void *ec_ptr; /* Econet specific data */
629 void *ax25_ptr; /* AX.25 specific data */
704232c2
JB
630 struct wireless_dev *ieee80211_ptr; /* IEEE 802.11 specific data,
631 assign before registering */
1da177e4 632
9356b8fc
ED
633/*
634 * Cache line mostly used on receive path (including eth_type_trans())
635 */
9356b8fc
ED
636 unsigned long last_rx; /* Time of last Rx */
637 /* Interface address info used in eth_type_trans() */
638 unsigned char dev_addr[MAX_ADDR_LEN]; /* hw address, (before bcast
639 because most packets are unicast) */
640
641 unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */
1da177e4 642
bb949fbd 643 struct netdev_queue rx_queue;
dc2b4847 644 struct netdev_queue tx_queue ____cacheline_aligned_in_smp;
1da177e4
LT
645 unsigned long tx_queue_len; /* Max frames per queue allowed */
646
9356b8fc
ED
647/*
648 * One part is mostly used on xmit path (device)
649 */
9356b8fc
ED
650 void *priv; /* pointer to private data */
651 int (*hard_start_xmit) (struct sk_buff *skb,
652 struct net_device *dev);
653 /* These may be needed for future network-power-down code. */
654 unsigned long trans_start; /* Time (in jiffies) of last Tx */
655
656 int watchdog_timeo; /* used by dev_watchdog() */
657 struct timer_list watchdog_timer;
658
659/*
660 * refcnt is a very hot point, so align it on SMP
661 */
1da177e4 662 /* Number of references to this device */
9356b8fc
ED
663 atomic_t refcnt ____cacheline_aligned_in_smp;
664
1da177e4
LT
665 /* delayed register/unregister */
666 struct list_head todo_list;
1da177e4
LT
667 /* device index hash chain */
668 struct hlist_node index_hlist;
669
572a103d
HX
670 struct net_device *link_watch_next;
671
1da177e4
LT
672 /* register/unregister state machine */
673 enum { NETREG_UNINITIALIZED=0,
b17a7c17 674 NETREG_REGISTERED, /* completed register_netdevice */
1da177e4
LT
675 NETREG_UNREGISTERING, /* called unregister_netdevice */
676 NETREG_UNREGISTERED, /* completed unregister todo */
677 NETREG_RELEASED, /* called free_netdev */
678 } reg_state;
679
1da177e4
LT
680 /* Called after device is detached from network. */
681 void (*uninit)(struct net_device *dev);
682 /* Called after last user reference disappears. */
683 void (*destructor)(struct net_device *dev);
684
685 /* Pointers to interface service routines. */
686 int (*open)(struct net_device *dev);
687 int (*stop)(struct net_device *dev);
1da177e4 688#define HAVE_NETDEV_POLL
24023451
PM
689#define HAVE_CHANGE_RX_FLAGS
690 void (*change_rx_flags)(struct net_device *dev,
691 int flags);
4417da66
PM
692#define HAVE_SET_RX_MODE
693 void (*set_rx_mode)(struct net_device *dev);
1da177e4
LT
694#define HAVE_MULTICAST
695 void (*set_multicast_list)(struct net_device *dev);
696#define HAVE_SET_MAC_ADDR
697 int (*set_mac_address)(struct net_device *dev,
698 void *addr);
bada339b
JG
699#define HAVE_VALIDATE_ADDR
700 int (*validate_addr)(struct net_device *dev);
1da177e4
LT
701#define HAVE_PRIVATE_IOCTL
702 int (*do_ioctl)(struct net_device *dev,
703 struct ifreq *ifr, int cmd);
704#define HAVE_SET_CONFIG
705 int (*set_config)(struct net_device *dev,
706 struct ifmap *map);
1da177e4
LT
707#define HAVE_CHANGE_MTU
708 int (*change_mtu)(struct net_device *dev, int new_mtu);
709
710#define HAVE_TX_TIMEOUT
711 void (*tx_timeout) (struct net_device *dev);
712
713 void (*vlan_rx_register)(struct net_device *dev,
714 struct vlan_group *grp);
715 void (*vlan_rx_add_vid)(struct net_device *dev,
716 unsigned short vid);
717 void (*vlan_rx_kill_vid)(struct net_device *dev,
718 unsigned short vid);
719
1da177e4
LT
720 int (*neigh_setup)(struct net_device *dev, struct neigh_parms *);
721#ifdef CONFIG_NETPOLL
115c1d6e 722 struct netpoll_info *npinfo;
1da177e4
LT
723#endif
724#ifdef CONFIG_NET_POLL_CONTROLLER
725 void (*poll_controller)(struct net_device *dev);
726#endif
727
c346dca1 728#ifdef CONFIG_NET_NS
4a1c5371
EB
729 /* Network namespace this network device is inside */
730 struct net *nd_net;
c346dca1 731#endif
4a1c5371 732
4951704b
DM
733 /* mid-layer private */
734 void *ml_priv;
735
1da177e4
LT
736 /* bridge stuff */
737 struct net_bridge_port *br_port;
b863ceb7
PM
738 /* macvlan */
739 struct macvlan_port *macvlan_port;
eca9ebac
PM
740 /* GARP */
741 struct garp_port *garp_port;
1da177e4 742
1da177e4 743 /* class/net/name entry */
43cb76d9 744 struct device dev;
fe9925b5
SH
745 /* space for optional statistics and wireless sysfs groups */
746 struct attribute_group *sysfs_groups[3];
38f7b870
PM
747
748 /* rtnetlink link ops */
749 const struct rtnl_link_ops *rtnl_link_ops;
f25f4e44 750
289c79a4
PM
751 /* VLAN feature mask */
752 unsigned long vlan_features;
753
82cc1a7a
PWJ
754 /* for setting kernel sock attribute on TCP connection setup */
755#define GSO_MAX_SIZE 65536
756 unsigned int gso_max_size;
757
f25f4e44
PWJ
758 /* The TX queue control structures */
759 unsigned int egress_subqueue_count;
31ce72a6 760 struct net_device_subqueue egress_subqueue[1];
1da177e4 761};
43cb76d9 762#define to_net_dev(d) container_of(d, struct net_device, dev)
1da177e4
LT
763
764#define NETDEV_ALIGN 32
765#define NETDEV_ALIGN_CONST (NETDEV_ALIGN - 1)
766
c346dca1
YH
767/*
768 * Net namespace inlines
769 */
770static inline
771struct net *dev_net(const struct net_device *dev)
772{
773#ifdef CONFIG_NET_NS
774 return dev->nd_net;
775#else
776 return &init_net;
777#endif
778}
779
780static inline
f5aa23fd 781void dev_net_set(struct net_device *dev, struct net *net)
c346dca1
YH
782{
783#ifdef CONFIG_NET_NS
f3005d7f
DL
784 release_net(dev->nd_net);
785 dev->nd_net = hold_net(net);
c346dca1
YH
786#endif
787}
788
bea3348e
SH
789/**
790 * netdev_priv - access network device private data
791 * @dev: network device
792 *
793 * Get network device private data
794 */
6472ce60 795static inline void *netdev_priv(const struct net_device *dev)
1da177e4 796{
f25f4e44 797 return dev->priv;
1da177e4
LT
798}
799
1da177e4
LT
800/* Set the sysfs physical device reference for the network logical device
801 * if set prior to registration will cause a symlink during initialization.
802 */
43cb76d9 803#define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
1da177e4 804
3b582cc1
SH
805/**
806 * netif_napi_add - initialize a napi context
807 * @dev: network device
808 * @napi: napi context
809 * @poll: polling function
810 * @weight: default weight
811 *
812 * netif_napi_add() must be used to initialize a napi context prior to calling
813 * *any* of the other napi related functions.
814 */
bea3348e
SH
815static inline void netif_napi_add(struct net_device *dev,
816 struct napi_struct *napi,
817 int (*poll)(struct napi_struct *, int),
818 int weight)
819{
820 INIT_LIST_HEAD(&napi->poll_list);
821 napi->poll = poll;
822 napi->weight = weight;
823#ifdef CONFIG_NETPOLL
824 napi->dev = dev;
825 list_add(&napi->dev_list, &dev->napi_list);
826 spin_lock_init(&napi->poll_lock);
827 napi->poll_owner = -1;
828#endif
829 set_bit(NAPI_STATE_SCHED, &napi->state);
830}
831
d8156534
AD
832/**
833 * netif_napi_del - remove a napi context
834 * @napi: napi context
835 *
836 * netif_napi_del() removes a napi context from the network device napi list
837 */
838static inline void netif_napi_del(struct napi_struct *napi)
839{
840#ifdef CONFIG_NETPOLL
841 list_del(&napi->dev_list);
842#endif
843}
844
1da177e4 845struct packet_type {
f2ccd8fa
DM
846 __be16 type; /* This is really htons(ether_type). */
847 struct net_device *dev; /* NULL is wildcarded here */
848 int (*func) (struct sk_buff *,
849 struct net_device *,
850 struct packet_type *,
851 struct net_device *);
576a30eb
HX
852 struct sk_buff *(*gso_segment)(struct sk_buff *skb,
853 int features);
a430a43d 854 int (*gso_send_check)(struct sk_buff *skb);
1da177e4
LT
855 void *af_packet_priv;
856 struct list_head list;
857};
858
859#include <linux/interrupt.h>
860#include <linux/notifier.h>
861
1da177e4
LT
862extern rwlock_t dev_base_lock; /* Device list lock */
863
7562f876 864
881d966b
EB
865#define for_each_netdev(net, d) \
866 list_for_each_entry(d, &(net)->dev_base_head, dev_list)
867#define for_each_netdev_safe(net, d, n) \
868 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
869#define for_each_netdev_continue(net, d) \
870 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
871#define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
7562f876 872
a050c33f
DL
873static inline struct net_device *next_net_device(struct net_device *dev)
874{
875 struct list_head *lh;
876 struct net *net;
877
c346dca1 878 net = dev_net(dev);
a050c33f
DL
879 lh = dev->dev_list.next;
880 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
881}
882
883static inline struct net_device *first_net_device(struct net *net)
884{
885 return list_empty(&net->dev_base_head) ? NULL :
886 net_device_entry(net->dev_base_head.next);
887}
7562f876 888
1da177e4
LT
889extern int netdev_boot_setup_check(struct net_device *dev);
890extern unsigned long netdev_boot_base(const char *prefix, int unit);
881d966b
EB
891extern struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *hwaddr);
892extern struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
893extern struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
1da177e4
LT
894extern void dev_add_pack(struct packet_type *pt);
895extern void dev_remove_pack(struct packet_type *pt);
896extern void __dev_remove_pack(struct packet_type *pt);
897
881d966b 898extern struct net_device *dev_get_by_flags(struct net *net, unsigned short flags,
1da177e4 899 unsigned short mask);
881d966b
EB
900extern struct net_device *dev_get_by_name(struct net *net, const char *name);
901extern struct net_device *__dev_get_by_name(struct net *net, const char *name);
1da177e4
LT
902extern int dev_alloc_name(struct net_device *dev, const char *name);
903extern int dev_open(struct net_device *dev);
904extern int dev_close(struct net_device *dev);
0187bdfb 905extern void dev_disable_lro(struct net_device *dev);
1da177e4
LT
906extern int dev_queue_xmit(struct sk_buff *skb);
907extern int register_netdevice(struct net_device *dev);
22f8cde5 908extern void unregister_netdevice(struct net_device *dev);
1da177e4
LT
909extern void free_netdev(struct net_device *dev);
910extern void synchronize_net(void);
911extern int register_netdevice_notifier(struct notifier_block *nb);
912extern int unregister_netdevice_notifier(struct notifier_block *nb);
ad7379d4 913extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
881d966b
EB
914extern struct net_device *dev_get_by_index(struct net *net, int ifindex);
915extern struct net_device *__dev_get_by_index(struct net *net, int ifindex);
1da177e4
LT
916extern int dev_restart(struct net_device *dev);
917#ifdef CONFIG_NETPOLL_TRAP
918extern int netpoll_trap(void);
919#endif
920
0c4e8581
SH
921static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
922 unsigned short type,
3b04ddde
SH
923 const void *daddr, const void *saddr,
924 unsigned len)
0c4e8581 925{
f1ecfd5d 926 if (!dev->header_ops || !dev->header_ops->create)
0c4e8581 927 return 0;
3b04ddde
SH
928
929 return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
0c4e8581
SH
930}
931
b95cce35
SH
932static inline int dev_parse_header(const struct sk_buff *skb,
933 unsigned char *haddr)
934{
935 const struct net_device *dev = skb->dev;
936
1b83336b 937 if (!dev->header_ops || !dev->header_ops->parse)
b95cce35 938 return 0;
3b04ddde 939 return dev->header_ops->parse(skb, haddr);
b95cce35
SH
940}
941
1da177e4
LT
942typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
943extern int register_gifconf(unsigned int family, gifconf_func_t * gifconf);
944static inline int unregister_gifconf(unsigned int family)
945{
946 return register_gifconf(family, NULL);
947}
948
949/*
950 * Incoming packets are placed on per-cpu queues so that
951 * no locking is needed.
952 */
1da177e4
LT
953struct softnet_data
954{
ee609cb3 955 struct netdev_queue *output_queue;
1da177e4
LT
956 struct sk_buff_head input_pkt_queue;
957 struct list_head poll_list;
1da177e4
LT
958 struct sk_buff *completion_queue;
959
bea3348e 960 struct napi_struct backlog;
db217334
CL
961#ifdef CONFIG_NET_DMA
962 struct dma_chan *net_dma;
963#endif
1da177e4
LT
964};
965
966DECLARE_PER_CPU(struct softnet_data,softnet_data);
967
968#define HAVE_NETIF_QUEUE
969
86d804e1 970extern void __netif_schedule(struct netdev_queue *txq);
1da177e4 971
86d804e1 972static inline void netif_schedule_queue(struct netdev_queue *txq)
1da177e4 973{
79d16385 974 if (!test_bit(__QUEUE_STATE_XOFF, &txq->state))
86d804e1
DM
975 __netif_schedule(txq);
976}
977
978static inline void netif_schedule(struct net_device *dev)
979{
980 netif_schedule_queue(&dev->tx_queue);
1da177e4
LT
981}
982
bea3348e
SH
983/**
984 * netif_start_queue - allow transmit
985 * @dev: network device
986 *
987 * Allow upper layers to call the device hard_start_xmit routine.
988 */
79d16385
DM
989static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
990{
991 clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
992}
993
1da177e4
LT
994static inline void netif_start_queue(struct net_device *dev)
995{
79d16385 996 netif_tx_start_queue(&dev->tx_queue);
1da177e4
LT
997}
998
bea3348e
SH
999/**
1000 * netif_wake_queue - restart transmit
1001 * @dev: network device
1002 *
1003 * Allow upper layers to call the device hard_start_xmit routine.
1004 * Used for flow control when transmit resources are available.
1005 */
79d16385 1006static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue)
1da177e4
LT
1007{
1008#ifdef CONFIG_NETPOLL_TRAP
5f286e11 1009 if (netpoll_trap()) {
79d16385 1010 clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
1da177e4 1011 return;
5f286e11 1012 }
1da177e4 1013#endif
79d16385
DM
1014 if (test_and_clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state))
1015 __netif_schedule(dev_queue);
1016}
1017
1018static inline void netif_wake_queue(struct net_device *dev)
1019{
1020 netif_tx_wake_queue(&dev->tx_queue);
1da177e4
LT
1021}
1022
bea3348e
SH
1023/**
1024 * netif_stop_queue - stop transmitted packets
1025 * @dev: network device
1026 *
1027 * Stop upper layers calling the device hard_start_xmit routine.
1028 * Used for flow control when transmit resources are unavailable.
1029 */
79d16385
DM
1030static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
1031{
1032 set_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
1033}
1034
1da177e4
LT
1035static inline void netif_stop_queue(struct net_device *dev)
1036{
79d16385 1037 netif_tx_stop_queue(&dev->tx_queue);
1da177e4
LT
1038}
1039
bea3348e
SH
1040/**
1041 * netif_queue_stopped - test if transmit queue is flowblocked
1042 * @dev: network device
1043 *
1044 * Test if transmit queue on device is currently unable to send.
1045 */
79d16385
DM
1046static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
1047{
1048 return test_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
1049}
1050
1da177e4
LT
1051static inline int netif_queue_stopped(const struct net_device *dev)
1052{
79d16385 1053 return netif_tx_queue_stopped(&dev->tx_queue);
1da177e4
LT
1054}
1055
bea3348e
SH
1056/**
1057 * netif_running - test if up
1058 * @dev: network device
1059 *
1060 * Test if the device has been brought up.
1061 */
1da177e4
LT
1062static inline int netif_running(const struct net_device *dev)
1063{
1064 return test_bit(__LINK_STATE_START, &dev->state);
1065}
1066
f25f4e44
PWJ
1067/*
1068 * Routines to manage the subqueues on a device. We only need start
1069 * stop, and a check if it's stopped. All other device management is
1070 * done at the overall netdevice level.
1071 * Also test the device if we're multiqueue.
1072 */
bea3348e
SH
1073
1074/**
1075 * netif_start_subqueue - allow sending packets on subqueue
1076 * @dev: network device
1077 * @queue_index: sub queue index
1078 *
1079 * Start individual transmit queue of a device with multiple transmit queues.
1080 */
f25f4e44
PWJ
1081static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
1082{
79d16385 1083 clear_bit(__QUEUE_STATE_XOFF, &dev->egress_subqueue[queue_index].state);
f25f4e44
PWJ
1084}
1085
bea3348e
SH
1086/**
1087 * netif_stop_subqueue - stop sending packets on subqueue
1088 * @dev: network device
1089 * @queue_index: sub queue index
1090 *
1091 * Stop individual transmit queue of a device with multiple transmit queues.
1092 */
f25f4e44
PWJ
1093static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
1094{
f25f4e44
PWJ
1095#ifdef CONFIG_NETPOLL_TRAP
1096 if (netpoll_trap())
1097 return;
1098#endif
79d16385 1099 set_bit(__QUEUE_STATE_XOFF, &dev->egress_subqueue[queue_index].state);
f25f4e44
PWJ
1100}
1101
bea3348e
SH
1102/**
1103 * netif_subqueue_stopped - test status of subqueue
1104 * @dev: network device
1105 * @queue_index: sub queue index
1106 *
1107 * Check individual transmit queue of a device with multiple transmit queues.
1108 */
668f895a 1109static inline int __netif_subqueue_stopped(const struct net_device *dev,
f25f4e44
PWJ
1110 u16 queue_index)
1111{
79d16385 1112 return test_bit(__QUEUE_STATE_XOFF,
f25f4e44 1113 &dev->egress_subqueue[queue_index].state);
f25f4e44
PWJ
1114}
1115
668f895a
PE
1116static inline int netif_subqueue_stopped(const struct net_device *dev,
1117 struct sk_buff *skb)
1118{
1119 return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
1120}
bea3348e
SH
1121
1122/**
1123 * netif_wake_subqueue - allow sending packets on subqueue
1124 * @dev: network device
1125 * @queue_index: sub queue index
1126 *
1127 * Resume individual transmit queue of a device with multiple transmit queues.
1128 */
f25f4e44
PWJ
1129static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
1130{
f25f4e44
PWJ
1131#ifdef CONFIG_NETPOLL_TRAP
1132 if (netpoll_trap())
1133 return;
1134#endif
79d16385 1135 if (test_and_clear_bit(__QUEUE_STATE_XOFF,
f25f4e44 1136 &dev->egress_subqueue[queue_index].state))
86d804e1 1137 __netif_schedule(&dev->tx_queue);
f25f4e44
PWJ
1138}
1139
bea3348e
SH
1140/**
1141 * netif_is_multiqueue - test if device has multiple transmit queues
1142 * @dev: network device
1143 *
1144 * Check if device has multiple transmit queues
1145 * Always falls if NETDEVICE_MULTIQUEUE is not configured
1146 */
f25f4e44
PWJ
1147static inline int netif_is_multiqueue(const struct net_device *dev)
1148{
f25f4e44 1149 return (!!(NETIF_F_MULTI_QUEUE & dev->features));
f25f4e44 1150}
1da177e4
LT
1151
1152/* Use this variant when it is known for sure that it
0ef47309
ML
1153 * is executing from hardware interrupt context or with hardware interrupts
1154 * disabled.
1da177e4 1155 */
bea3348e 1156extern void dev_kfree_skb_irq(struct sk_buff *skb);
1da177e4
LT
1157
1158/* Use this variant in places where it could be invoked
0ef47309
ML
1159 * from either hardware interrupt or other context, with hardware interrupts
1160 * either disabled or enabled.
1da177e4 1161 */
56079431 1162extern void dev_kfree_skb_any(struct sk_buff *skb);
1da177e4
LT
1163
1164#define HAVE_NETIF_RX 1
1165extern int netif_rx(struct sk_buff *skb);
1166extern int netif_rx_ni(struct sk_buff *skb);
1167#define HAVE_NETIF_RECEIVE_SKB 1
1168extern int netif_receive_skb(struct sk_buff *skb);
bc1d0411 1169extern void netif_nit_deliver(struct sk_buff *skb);
c2373ee9 1170extern int dev_valid_name(const char *name);
881d966b
EB
1171extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
1172extern int dev_ethtool(struct net *net, struct ifreq *);
1da177e4
LT
1173extern unsigned dev_get_flags(const struct net_device *);
1174extern int dev_change_flags(struct net_device *, unsigned);
1175extern int dev_change_name(struct net_device *, char *);
ce286d32
EB
1176extern int dev_change_net_namespace(struct net_device *,
1177 struct net *, const char *);
1da177e4
LT
1178extern int dev_set_mtu(struct net_device *, int);
1179extern int dev_set_mac_address(struct net_device *,
1180 struct sockaddr *);
f6a78bfc
HX
1181extern int dev_hard_start_xmit(struct sk_buff *skb,
1182 struct net_device *dev);
1da177e4 1183
20380731 1184extern int netdev_budget;
1da177e4
LT
1185
1186/* Called by rtnetlink.c:rtnl_unlock() */
1187extern void netdev_run_todo(void);
1188
bea3348e
SH
1189/**
1190 * dev_put - release reference to device
1191 * @dev: network device
1192 *
9ef4429b 1193 * Release reference to device to allow it to be freed.
bea3348e 1194 */
1da177e4
LT
1195static inline void dev_put(struct net_device *dev)
1196{
1197 atomic_dec(&dev->refcnt);
1198}
1199
bea3348e
SH
1200/**
1201 * dev_hold - get reference to device
1202 * @dev: network device
1203 *
9ef4429b 1204 * Hold reference to device to keep it from being freed.
bea3348e 1205 */
15333061
SH
1206static inline void dev_hold(struct net_device *dev)
1207{
1208 atomic_inc(&dev->refcnt);
1209}
1da177e4
LT
1210
1211/* Carrier loss detection, dial on demand. The functions netif_carrier_on
1212 * and _off may be called from IRQ context, but it is caller
1213 * who is responsible for serialization of these calls.
b00055aa
SR
1214 *
1215 * The name carrier is inappropriate, these functions should really be
1216 * called netif_lowerlayer_*() because they represent the state of any
1217 * kind of lower layer not just hardware media.
1da177e4
LT
1218 */
1219
1220extern void linkwatch_fire_event(struct net_device *dev);
1221
bea3348e
SH
1222/**
1223 * netif_carrier_ok - test if carrier present
1224 * @dev: network device
1225 *
1226 * Check if carrier is present on device
1227 */
1da177e4
LT
1228static inline int netif_carrier_ok(const struct net_device *dev)
1229{
1230 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
1231}
1232
1233extern void __netdev_watchdog_up(struct net_device *dev);
1234
0a242efc 1235extern void netif_carrier_on(struct net_device *dev);
1da177e4 1236
0a242efc 1237extern void netif_carrier_off(struct net_device *dev);
1da177e4 1238
bea3348e
SH
1239/**
1240 * netif_dormant_on - mark device as dormant.
1241 * @dev: network device
1242 *
1243 * Mark device as dormant (as per RFC2863).
1244 *
1245 * The dormant state indicates that the relevant interface is not
1246 * actually in a condition to pass packets (i.e., it is not 'up') but is
1247 * in a "pending" state, waiting for some external event. For "on-
1248 * demand" interfaces, this new state identifies the situation where the
1249 * interface is waiting for events to place it in the up state.
1250 *
1251 */
b00055aa
SR
1252static inline void netif_dormant_on(struct net_device *dev)
1253{
1254 if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
1255 linkwatch_fire_event(dev);
1256}
1257
bea3348e
SH
1258/**
1259 * netif_dormant_off - set device as not dormant.
1260 * @dev: network device
1261 *
1262 * Device is not in dormant state.
1263 */
b00055aa
SR
1264static inline void netif_dormant_off(struct net_device *dev)
1265{
1266 if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
1267 linkwatch_fire_event(dev);
1268}
1269
bea3348e
SH
1270/**
1271 * netif_dormant - test if carrier present
1272 * @dev: network device
1273 *
1274 * Check if carrier is present on device
1275 */
b00055aa
SR
1276static inline int netif_dormant(const struct net_device *dev)
1277{
1278 return test_bit(__LINK_STATE_DORMANT, &dev->state);
1279}
1280
1281
bea3348e
SH
1282/**
1283 * netif_oper_up - test if device is operational
1284 * @dev: network device
1285 *
1286 * Check if carrier is operational
1287 */
b00055aa
SR
1288static inline int netif_oper_up(const struct net_device *dev) {
1289 return (dev->operstate == IF_OPER_UP ||
1290 dev->operstate == IF_OPER_UNKNOWN /* backward compat */);
1291}
1292
bea3348e
SH
1293/**
1294 * netif_device_present - is device available or removed
1295 * @dev: network device
1296 *
1297 * Check if device has not been removed from system.
1298 */
1da177e4
LT
1299static inline int netif_device_present(struct net_device *dev)
1300{
1301 return test_bit(__LINK_STATE_PRESENT, &dev->state);
1302}
1303
56079431 1304extern void netif_device_detach(struct net_device *dev);
1da177e4 1305
56079431 1306extern void netif_device_attach(struct net_device *dev);
1da177e4
LT
1307
1308/*
1309 * Network interface message level settings
1310 */
1311#define HAVE_NETIF_MSG 1
1312
1313enum {
1314 NETIF_MSG_DRV = 0x0001,
1315 NETIF_MSG_PROBE = 0x0002,
1316 NETIF_MSG_LINK = 0x0004,
1317 NETIF_MSG_TIMER = 0x0008,
1318 NETIF_MSG_IFDOWN = 0x0010,
1319 NETIF_MSG_IFUP = 0x0020,
1320 NETIF_MSG_RX_ERR = 0x0040,
1321 NETIF_MSG_TX_ERR = 0x0080,
1322 NETIF_MSG_TX_QUEUED = 0x0100,
1323 NETIF_MSG_INTR = 0x0200,
1324 NETIF_MSG_TX_DONE = 0x0400,
1325 NETIF_MSG_RX_STATUS = 0x0800,
1326 NETIF_MSG_PKTDATA = 0x1000,
1327 NETIF_MSG_HW = 0x2000,
1328 NETIF_MSG_WOL = 0x4000,
1329};
1330
1331#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
1332#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
1333#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
1334#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
1335#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
1336#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
1337#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
1338#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
1339#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
1340#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
1341#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
1342#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
1343#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
1344#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
1345#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
1346
1347static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
1348{
1349 /* use default */
1350 if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
1351 return default_msg_enable_bits;
1352 if (debug_value == 0) /* no output */
1353 return 0;
1354 /* set low N bits */
1355 return (1 << debug_value) - 1;
1356}
1357
0a122576 1358/* Test if receive needs to be scheduled but only if up */
bea3348e
SH
1359static inline int netif_rx_schedule_prep(struct net_device *dev,
1360 struct napi_struct *napi)
1da177e4 1361{
a0a46196 1362 return napi_schedule_prep(napi);
1da177e4
LT
1363}
1364
1365/* Add interface to tail of rx poll list. This assumes that _prep has
1366 * already been called and returned 1.
1367 */
bea3348e
SH
1368static inline void __netif_rx_schedule(struct net_device *dev,
1369 struct napi_struct *napi)
1370{
bea3348e
SH
1371 __napi_schedule(napi);
1372}
1da177e4
LT
1373
1374/* Try to reschedule poll. Called by irq handler. */
1375
bea3348e
SH
1376static inline void netif_rx_schedule(struct net_device *dev,
1377 struct napi_struct *napi)
1da177e4 1378{
bea3348e
SH
1379 if (netif_rx_schedule_prep(dev, napi))
1380 __netif_rx_schedule(dev, napi);
1da177e4
LT
1381}
1382
bea3348e
SH
1383/* Try to reschedule poll. Called by dev->poll() after netif_rx_complete(). */
1384static inline int netif_rx_reschedule(struct net_device *dev,
1385 struct napi_struct *napi)
1da177e4 1386{
bea3348e
SH
1387 if (napi_schedule_prep(napi)) {
1388 __netif_rx_schedule(dev, napi);
1da177e4
LT
1389 return 1;
1390 }
1391 return 0;
1392}
1393
b0ba6667
HX
1394/* same as netif_rx_complete, except that local_irq_save(flags)
1395 * has already been issued
1396 */
bea3348e
SH
1397static inline void __netif_rx_complete(struct net_device *dev,
1398 struct napi_struct *napi)
b0ba6667 1399{
bea3348e 1400 __napi_complete(napi);
b0ba6667
HX
1401}
1402
1da177e4
LT
1403/* Remove interface from poll list: it must be in the poll list
1404 * on current cpu. This primitive is called by dev->poll(), when
1405 * it completes the work. The device cannot be out of poll list at this
1406 * moment, it is BUG().
1407 */
bea3348e
SH
1408static inline void netif_rx_complete(struct net_device *dev,
1409 struct napi_struct *napi)
1da177e4
LT
1410{
1411 unsigned long flags;
1412
1413 local_irq_save(flags);
bea3348e 1414 __netif_rx_complete(dev, napi);
1da177e4
LT
1415 local_irq_restore(flags);
1416}
1417
bea3348e
SH
1418/**
1419 * netif_tx_lock - grab network device transmit lock
1420 * @dev: network device
c4ea43c5 1421 * @cpu: cpu number of lock owner
bea3348e
SH
1422 *
1423 * Get network device transmit lock
1424 */
c773e847 1425static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
932ff279 1426{
c773e847
DM
1427 spin_lock(&txq->_xmit_lock);
1428 txq->xmit_lock_owner = cpu;
22dd7495
JHS
1429}
1430
1431static inline void netif_tx_lock(struct net_device *dev)
1432{
c773e847
DM
1433 __netif_tx_lock(&dev->tx_queue, smp_processor_id());
1434}
1435
1436static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
1437{
1438 spin_lock_bh(&txq->_xmit_lock);
1439 txq->xmit_lock_owner = smp_processor_id();
932ff279
HX
1440}
1441
1442static inline void netif_tx_lock_bh(struct net_device *dev)
1443{
c773e847 1444 __netif_tx_lock_bh(&dev->tx_queue);
932ff279
HX
1445}
1446
c773e847 1447static inline int __netif_tx_trylock(struct netdev_queue *txq)
932ff279 1448{
c773e847 1449 int ok = spin_trylock(&txq->_xmit_lock);
53c4b2cc 1450 if (likely(ok))
c773e847 1451 txq->xmit_lock_owner = smp_processor_id();
53c4b2cc 1452 return ok;
932ff279
HX
1453}
1454
c773e847
DM
1455static inline int netif_tx_trylock(struct net_device *dev)
1456{
1457 return __netif_tx_trylock(&dev->tx_queue);
1458}
1459
1460static inline void __netif_tx_unlock(struct netdev_queue *txq)
1461{
1462 txq->xmit_lock_owner = -1;
1463 spin_unlock(&txq->_xmit_lock);
1464}
1465
932ff279
HX
1466static inline void netif_tx_unlock(struct net_device *dev)
1467{
c773e847
DM
1468 __netif_tx_unlock(&dev->tx_queue);
1469}
1470
1471static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
1472{
1473 txq->xmit_lock_owner = -1;
1474 spin_unlock_bh(&txq->_xmit_lock);
932ff279
HX
1475}
1476
1477static inline void netif_tx_unlock_bh(struct net_device *dev)
1478{
c773e847 1479 __netif_tx_unlock_bh(&dev->tx_queue);
932ff279
HX
1480}
1481
c773e847 1482#define HARD_TX_LOCK(dev, txq, cpu) { \
22dd7495 1483 if ((dev->features & NETIF_F_LLTX) == 0) { \
c773e847 1484 __netif_tx_lock(txq, cpu); \
22dd7495
JHS
1485 } \
1486}
1487
c773e847 1488#define HARD_TX_UNLOCK(dev, txq) { \
22dd7495 1489 if ((dev->features & NETIF_F_LLTX) == 0) { \
c773e847 1490 __netif_tx_unlock(txq); \
22dd7495
JHS
1491 } \
1492}
1493
1da177e4
LT
1494static inline void netif_tx_disable(struct net_device *dev)
1495{
932ff279 1496 netif_tx_lock_bh(dev);
1da177e4 1497 netif_stop_queue(dev);
932ff279 1498 netif_tx_unlock_bh(dev);
1da177e4
LT
1499}
1500
e308a5d8
DM
1501static inline void netif_addr_lock(struct net_device *dev)
1502{
1503 spin_lock(&dev->addr_list_lock);
1504}
1505
1506static inline void netif_addr_lock_bh(struct net_device *dev)
1507{
1508 spin_lock_bh(&dev->addr_list_lock);
1509}
1510
1511static inline void netif_addr_unlock(struct net_device *dev)
1512{
1513 spin_unlock(&dev->addr_list_lock);
1514}
1515
1516static inline void netif_addr_unlock_bh(struct net_device *dev)
1517{
1518 spin_unlock_bh(&dev->addr_list_lock);
1519}
1520
1da177e4
LT
1521/* These functions live elsewhere (drivers/net/net_init.c, but related) */
1522
1523extern void ether_setup(struct net_device *dev);
1524
1525/* Support for loadable net-drivers */
f25f4e44
PWJ
1526extern struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
1527 void (*setup)(struct net_device *),
1528 unsigned int queue_count);
1529#define alloc_netdev(sizeof_priv, name, setup) \
1530 alloc_netdev_mq(sizeof_priv, name, setup, 1)
1da177e4
LT
1531extern int register_netdev(struct net_device *dev);
1532extern void unregister_netdev(struct net_device *dev);
4417da66
PM
1533/* Functions used for secondary unicast and multicast support */
1534extern void dev_set_rx_mode(struct net_device *dev);
1535extern void __dev_set_rx_mode(struct net_device *dev);
1536extern int dev_unicast_delete(struct net_device *dev, void *addr, int alen);
1537extern int dev_unicast_add(struct net_device *dev, void *addr, int alen);
e83a2ea8
CL
1538extern int dev_unicast_sync(struct net_device *to, struct net_device *from);
1539extern void dev_unicast_unsync(struct net_device *to, struct net_device *from);
1da177e4
LT
1540extern int dev_mc_delete(struct net_device *dev, void *addr, int alen, int all);
1541extern int dev_mc_add(struct net_device *dev, void *addr, int alen, int newonly);
a0a400d7
PM
1542extern int dev_mc_sync(struct net_device *to, struct net_device *from);
1543extern void dev_mc_unsync(struct net_device *to, struct net_device *from);
61cbc2fc
PM
1544extern int __dev_addr_delete(struct dev_addr_list **list, int *count, void *addr, int alen, int all);
1545extern int __dev_addr_add(struct dev_addr_list **list, int *count, void *addr, int alen, int newonly);
e83a2ea8
CL
1546extern int __dev_addr_sync(struct dev_addr_list **to, int *to_count, struct dev_addr_list **from, int *from_count);
1547extern void __dev_addr_unsync(struct dev_addr_list **to, int *to_count, struct dev_addr_list **from, int *from_count);
dad9b335
WC
1548extern int dev_set_promiscuity(struct net_device *dev, int inc);
1549extern int dev_set_allmulti(struct net_device *dev, int inc);
1da177e4 1550extern void netdev_state_change(struct net_device *dev);
c1da4ac7 1551extern void netdev_bonding_change(struct net_device *dev);
d8a33ac4 1552extern void netdev_features_change(struct net_device *dev);
1da177e4 1553/* Load a device via the kmod */
881d966b 1554extern void dev_load(struct net *net, const char *name);
1da177e4
LT
1555extern void dev_mcast_init(void);
1556extern int netdev_max_backlog;
1557extern int weight_p;
1558extern int netdev_set_master(struct net_device *dev, struct net_device *master);
84fa7933 1559extern int skb_checksum_help(struct sk_buff *skb);
576a30eb 1560extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features);
fb286bb2
HX
1561#ifdef CONFIG_BUG
1562extern void netdev_rx_csum_fault(struct net_device *dev);
1563#else
1564static inline void netdev_rx_csum_fault(struct net_device *dev)
1565{
1566}
1567#endif
1da177e4
LT
1568/* rx skb timestamps */
1569extern void net_enable_timestamp(void);
1570extern void net_disable_timestamp(void);
1571
20380731
ACM
1572#ifdef CONFIG_PROC_FS
1573extern void *dev_seq_start(struct seq_file *seq, loff_t *pos);
1574extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos);
1575extern void dev_seq_stop(struct seq_file *seq, void *v);
1576#endif
1577
b8a9787e
JV
1578extern int netdev_class_create_file(struct class_attribute *class_attr);
1579extern void netdev_class_remove_file(struct class_attribute *class_attr);
1580
20380731
ACM
1581extern void linkwatch_run_queue(void);
1582
7f353bf2
HX
1583extern int netdev_compute_features(unsigned long all, unsigned long one);
1584
bcd76111 1585static inline int net_gso_ok(int features, int gso_type)
576a30eb 1586{
bcd76111 1587 int feature = gso_type << NETIF_F_GSO_SHIFT;
d6b4991a 1588 return (features & feature) == feature;
576a30eb
HX
1589}
1590
bcd76111
HX
1591static inline int skb_gso_ok(struct sk_buff *skb, int features)
1592{
a430a43d 1593 return net_gso_ok(features, skb_shinfo(skb)->gso_type);
bcd76111
HX
1594}
1595
7967168c
HX
1596static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
1597{
a430a43d
HX
1598 return skb_is_gso(skb) &&
1599 (!skb_gso_ok(skb, dev->features) ||
84fa7933 1600 unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
7967168c
HX
1601}
1602
82cc1a7a
PWJ
1603static inline void netif_set_gso_max_size(struct net_device *dev,
1604 unsigned int size)
1605{
1606 dev->gso_max_size = size;
1607}
1608
7ea49ed7 1609/* On bonding slaves other than the currently active slave, suppress
f5b2b966
JV
1610 * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and
1611 * ARP on active-backup slaves with arp_validate enabled.
7ea49ed7
DM
1612 */
1613static inline int skb_bond_should_drop(struct sk_buff *skb)
1614{
1615 struct net_device *dev = skb->dev;
1616 struct net_device *master = dev->master;
1617
1618 if (master &&
1619 (dev->priv_flags & IFF_SLAVE_INACTIVE)) {
f5b2b966
JV
1620 if ((dev->priv_flags & IFF_SLAVE_NEEDARP) &&
1621 skb->protocol == __constant_htons(ETH_P_ARP))
1622 return 0;
1623
7ea49ed7
DM
1624 if (master->priv_flags & IFF_MASTER_ALB) {
1625 if (skb->pkt_type != PACKET_BROADCAST &&
1626 skb->pkt_type != PACKET_MULTICAST)
1627 return 0;
1628 }
1629 if (master->priv_flags & IFF_MASTER_8023AD &&
1630 skb->protocol == __constant_htons(ETH_P_SLOW))
1631 return 0;
1632
1633 return 1;
1634 }
1635 return 0;
1636}
1637
1da177e4
LT
1638#endif /* __KERNEL__ */
1639
1640#endif /* _LINUX_DEV_H */