]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * INET An implementation of the TCP/IP protocol suite for the LINUX | |
3 | * operating system. INET is implemented using the BSD Socket | |
4 | * interface as the means of communication with the user level. | |
5 | * | |
6 | * Definitions for the Interfaces handler. | |
7 | * | |
8 | * Version: @(#)dev.h 1.0.10 08/12/93 | |
9 | * | |
02c30a84 | 10 | * Authors: Ross Biro |
1da177e4 LT |
11 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> |
12 | * Corey Minyard <wf-rch!minyard@relay.EU.net> | |
13 | * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov> | |
14 | * Alan Cox, <Alan.Cox@linux.org> | |
15 | * Bjorn Ekwall. <bj0rn@blox.se> | |
16 | * Pekka Riikonen <priikone@poseidon.pspt.fi> | |
17 | * | |
18 | * This program is free software; you can redistribute it and/or | |
19 | * modify it under the terms of the GNU General Public License | |
20 | * as published by the Free Software Foundation; either version | |
21 | * 2 of the License, or (at your option) any later version. | |
22 | * | |
23 | * Moved to /usr/include/linux for NET3 | |
24 | */ | |
25 | #ifndef _LINUX_NETDEVICE_H | |
26 | #define _LINUX_NETDEVICE_H | |
27 | ||
28 | #include <linux/if.h> | |
29 | #include <linux/if_ether.h> | |
30 | #include <linux/if_packet.h> | |
31 | ||
32 | #ifdef __KERNEL__ | |
d7fe0f24 | 33 | #include <linux/timer.h> |
bea3348e | 34 | #include <linux/delay.h> |
1da177e4 LT |
35 | #include <asm/atomic.h> |
36 | #include <asm/cache.h> | |
37 | #include <asm/byteorder.h> | |
38 | ||
1da177e4 LT |
39 | #include <linux/device.h> |
40 | #include <linux/percpu.h> | |
db217334 | 41 | #include <linux/dmaengine.h> |
bea3348e | 42 | #include <linux/workqueue.h> |
1da177e4 | 43 | |
a050c33f DL |
44 | #include <net/net_namespace.h> |
45 | ||
1da177e4 LT |
46 | struct vlan_group; |
47 | struct ethtool_ops; | |
115c1d6e | 48 | struct netpoll_info; |
704232c2 JB |
49 | /* 802.11 specific */ |
50 | struct wireless_dev; | |
1da177e4 LT |
51 | /* source back-compat hooks */ |
52 | #define SET_ETHTOOL_OPS(netdev,ops) \ | |
53 | ( (netdev)->ethtool_ops = (ops) ) | |
54 | ||
55 | #define HAVE_ALLOC_NETDEV /* feature macro: alloc_xxxdev | |
56 | functions are available. */ | |
57 | #define HAVE_FREE_NETDEV /* free_netdev() */ | |
58 | #define HAVE_NETDEV_PRIV /* netdev_priv() */ | |
59 | ||
60 | #define NET_XMIT_SUCCESS 0 | |
61 | #define NET_XMIT_DROP 1 /* skb dropped */ | |
62 | #define NET_XMIT_CN 2 /* congestion notification */ | |
63 | #define NET_XMIT_POLICED 3 /* skb is shot by police */ | |
378a2f09 | 64 | #define NET_XMIT_MASK 0xFFFF /* qdisc flags in net/sch_generic.h */ |
1da177e4 LT |
65 | |
66 | /* Backlog congestion levels */ | |
67 | #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */ | |
68 | #define NET_RX_DROP 1 /* packet dropped */ | |
69 | #define NET_RX_CN_LOW 2 /* storm alert, just in case */ | |
70 | #define NET_RX_CN_MOD 3 /* Storm on its way! */ | |
71 | #define NET_RX_CN_HIGH 4 /* The storm is here */ | |
72 | #define NET_RX_BAD 5 /* packet dropped due to kernel error */ | |
73 | ||
b9df3cb8 GR |
74 | /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It |
75 | * indicates that the device will soon be dropping packets, or already drops | |
76 | * some packets of the same priority; prompting us to send less aggressively. */ | |
77 | #define net_xmit_eval(e) ((e) == NET_XMIT_CN? 0 : (e)) | |
1da177e4 LT |
78 | #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0) |
79 | ||
80 | #endif | |
81 | ||
82 | #define MAX_ADDR_LEN 32 /* Largest hardware address length */ | |
83 | ||
84 | /* Driver transmit return codes */ | |
85 | #define NETDEV_TX_OK 0 /* driver took care of packet */ | |
86 | #define NETDEV_TX_BUSY 1 /* driver tx path was busy*/ | |
87 | #define NETDEV_TX_LOCKED -1 /* driver tx lock was already taken */ | |
88 | ||
c88e6f51 AB |
89 | #ifdef __KERNEL__ |
90 | ||
1da177e4 LT |
91 | /* |
92 | * Compute the worst case header length according to the protocols | |
93 | * used. | |
94 | */ | |
95 | ||
8388e3da DM |
96 | #if defined(CONFIG_WLAN_80211) || defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE) |
97 | # if defined(CONFIG_MAC80211_MESH) | |
98 | # define LL_MAX_HEADER 128 | |
99 | # else | |
100 | # define LL_MAX_HEADER 96 | |
101 | # endif | |
102 | #elif defined(CONFIG_TR) | |
103 | # define LL_MAX_HEADER 48 | |
1da177e4 | 104 | #else |
8388e3da | 105 | # define LL_MAX_HEADER 32 |
1da177e4 LT |
106 | #endif |
107 | ||
e81c7359 DM |
108 | #if !defined(CONFIG_NET_IPIP) && !defined(CONFIG_NET_IPIP_MODULE) && \ |
109 | !defined(CONFIG_NET_IPGRE) && !defined(CONFIG_NET_IPGRE_MODULE) && \ | |
110 | !defined(CONFIG_IPV6_SIT) && !defined(CONFIG_IPV6_SIT_MODULE) && \ | |
111 | !defined(CONFIG_IPV6_TUNNEL) && !defined(CONFIG_IPV6_TUNNEL_MODULE) | |
1da177e4 LT |
112 | #define MAX_HEADER LL_MAX_HEADER |
113 | #else | |
114 | #define MAX_HEADER (LL_MAX_HEADER + 48) | |
115 | #endif | |
116 | ||
c88e6f51 AB |
117 | #endif /* __KERNEL__ */ |
118 | ||
1da177e4 LT |
119 | /* |
120 | * Network device statistics. Akin to the 2.0 ether stats but | |
121 | * with byte counters. | |
122 | */ | |
123 | ||
124 | struct net_device_stats | |
125 | { | |
126 | unsigned long rx_packets; /* total packets received */ | |
127 | unsigned long tx_packets; /* total packets transmitted */ | |
128 | unsigned long rx_bytes; /* total bytes received */ | |
129 | unsigned long tx_bytes; /* total bytes transmitted */ | |
130 | unsigned long rx_errors; /* bad packets received */ | |
131 | unsigned long tx_errors; /* packet transmit problems */ | |
132 | unsigned long rx_dropped; /* no space in linux buffers */ | |
133 | unsigned long tx_dropped; /* no space available in linux */ | |
134 | unsigned long multicast; /* multicast packets received */ | |
135 | unsigned long collisions; | |
136 | ||
137 | /* detailed rx_errors: */ | |
138 | unsigned long rx_length_errors; | |
139 | unsigned long rx_over_errors; /* receiver ring buff overflow */ | |
140 | unsigned long rx_crc_errors; /* recved pkt with crc error */ | |
141 | unsigned long rx_frame_errors; /* recv'd frame alignment error */ | |
142 | unsigned long rx_fifo_errors; /* recv'r fifo overrun */ | |
143 | unsigned long rx_missed_errors; /* receiver missed packet */ | |
144 | ||
145 | /* detailed tx_errors */ | |
146 | unsigned long tx_aborted_errors; | |
147 | unsigned long tx_carrier_errors; | |
148 | unsigned long tx_fifo_errors; | |
149 | unsigned long tx_heartbeat_errors; | |
150 | unsigned long tx_window_errors; | |
151 | ||
152 | /* for cslip etc */ | |
153 | unsigned long rx_compressed; | |
154 | unsigned long tx_compressed; | |
155 | }; | |
156 | ||
157 | ||
158 | /* Media selection options. */ | |
159 | enum { | |
160 | IF_PORT_UNKNOWN = 0, | |
161 | IF_PORT_10BASE2, | |
162 | IF_PORT_10BASET, | |
163 | IF_PORT_AUI, | |
164 | IF_PORT_100BASET, | |
165 | IF_PORT_100BASETX, | |
166 | IF_PORT_100BASEFX | |
167 | }; | |
168 | ||
169 | #ifdef __KERNEL__ | |
170 | ||
171 | #include <linux/cache.h> | |
172 | #include <linux/skbuff.h> | |
173 | ||
174 | struct neighbour; | |
175 | struct neigh_parms; | |
176 | struct sk_buff; | |
177 | ||
178 | struct netif_rx_stats | |
179 | { | |
180 | unsigned total; | |
181 | unsigned dropped; | |
182 | unsigned time_squeeze; | |
1da177e4 LT |
183 | unsigned cpu_collision; |
184 | }; | |
185 | ||
186 | DECLARE_PER_CPU(struct netif_rx_stats, netdev_rx_stat); | |
187 | ||
bf742482 PM |
188 | struct dev_addr_list |
189 | { | |
190 | struct dev_addr_list *next; | |
191 | u8 da_addr[MAX_ADDR_LEN]; | |
192 | u8 da_addrlen; | |
a0a400d7 | 193 | u8 da_synced; |
bf742482 PM |
194 | int da_users; |
195 | int da_gusers; | |
196 | }; | |
1da177e4 LT |
197 | |
198 | /* | |
199 | * We tag multicasts with these structures. | |
200 | */ | |
3fba5a8b PM |
201 | |
202 | #define dev_mc_list dev_addr_list | |
203 | #define dmi_addr da_addr | |
204 | #define dmi_addrlen da_addrlen | |
205 | #define dmi_users da_users | |
206 | #define dmi_gusers da_gusers | |
1da177e4 LT |
207 | |
208 | struct hh_cache | |
209 | { | |
210 | struct hh_cache *hh_next; /* Next entry */ | |
211 | atomic_t hh_refcnt; /* number of users */ | |
f0490980 ED |
212 | /* |
213 | * We want hh_output, hh_len, hh_lock and hh_data be a in a separate | |
214 | * cache line on SMP. | |
215 | * They are mostly read, but hh_refcnt may be changed quite frequently, | |
216 | * incurring cache line ping pongs. | |
217 | */ | |
218 | __be16 hh_type ____cacheline_aligned_in_smp; | |
219 | /* protocol identifier, f.e ETH_P_IP | |
1da177e4 LT |
220 | * NOTE: For VLANs, this will be the |
221 | * encapuslated type. --BLG | |
222 | */ | |
d5c42c0e | 223 | u16 hh_len; /* length of header */ |
1da177e4 | 224 | int (*hh_output)(struct sk_buff *skb); |
3644f0ce | 225 | seqlock_t hh_lock; |
1da177e4 LT |
226 | |
227 | /* cached hardware header; allow for machine alignment needs. */ | |
228 | #define HH_DATA_MOD 16 | |
229 | #define HH_DATA_OFF(__len) \ | |
5ba0eac6 | 230 | (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1)) |
1da177e4 LT |
231 | #define HH_DATA_ALIGN(__len) \ |
232 | (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1)) | |
233 | unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)]; | |
234 | }; | |
235 | ||
236 | /* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much. | |
237 | * Alternative is: | |
238 | * dev->hard_header_len ? (dev->hard_header_len + | |
239 | * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0 | |
240 | * | |
241 | * We could use other alignment values, but we must maintain the | |
242 | * relationship HH alignment <= LL alignment. | |
f5184d26 JB |
243 | * |
244 | * LL_ALLOCATED_SPACE also takes into account the tailroom the device | |
245 | * may need. | |
1da177e4 LT |
246 | */ |
247 | #define LL_RESERVED_SPACE(dev) \ | |
f5184d26 | 248 | ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) |
1da177e4 | 249 | #define LL_RESERVED_SPACE_EXTRA(dev,extra) \ |
f5184d26 JB |
250 | ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) |
251 | #define LL_ALLOCATED_SPACE(dev) \ | |
252 | ((((dev)->hard_header_len+(dev)->needed_headroom+(dev)->needed_tailroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) | |
1da177e4 | 253 | |
3b04ddde SH |
254 | struct header_ops { |
255 | int (*create) (struct sk_buff *skb, struct net_device *dev, | |
256 | unsigned short type, const void *daddr, | |
257 | const void *saddr, unsigned len); | |
258 | int (*parse)(const struct sk_buff *skb, unsigned char *haddr); | |
259 | int (*rebuild)(struct sk_buff *skb); | |
260 | #define HAVE_HEADER_CACHE | |
261 | int (*cache)(const struct neighbour *neigh, struct hh_cache *hh); | |
262 | void (*cache_update)(struct hh_cache *hh, | |
263 | const struct net_device *dev, | |
264 | const unsigned char *haddr); | |
265 | }; | |
266 | ||
1da177e4 LT |
267 | /* These flag bits are private to the generic network queueing |
268 | * layer, they may not be explicitly referenced by any other | |
269 | * code. | |
270 | */ | |
271 | ||
272 | enum netdev_state_t | |
273 | { | |
1da177e4 LT |
274 | __LINK_STATE_START, |
275 | __LINK_STATE_PRESENT, | |
1da177e4 | 276 | __LINK_STATE_NOCARRIER, |
b00055aa SR |
277 | __LINK_STATE_LINKWATCH_PENDING, |
278 | __LINK_STATE_DORMANT, | |
1da177e4 LT |
279 | }; |
280 | ||
281 | ||
282 | /* | |
283 | * This structure holds at boot time configured netdevice settings. They | |
284 | * are then used in the device probing. | |
285 | */ | |
286 | struct netdev_boot_setup { | |
287 | char name[IFNAMSIZ]; | |
288 | struct ifmap map; | |
289 | }; | |
290 | #define NETDEV_BOOT_SETUP_MAX 8 | |
291 | ||
20380731 | 292 | extern int __init netdev_boot_setup(char *str); |
1da177e4 | 293 | |
bea3348e SH |
294 | /* |
295 | * Structure for NAPI scheduling similar to tasklet but with weighting | |
296 | */ | |
297 | struct napi_struct { | |
298 | /* The poll_list must only be managed by the entity which | |
299 | * changes the state of the NAPI_STATE_SCHED bit. This means | |
300 | * whoever atomically sets that bit can add this napi_struct | |
301 | * to the per-cpu poll_list, and whoever clears that bit | |
302 | * can remove from the list right before clearing the bit. | |
303 | */ | |
304 | struct list_head poll_list; | |
305 | ||
306 | unsigned long state; | |
307 | int weight; | |
308 | int (*poll)(struct napi_struct *, int); | |
309 | #ifdef CONFIG_NETPOLL | |
310 | spinlock_t poll_lock; | |
311 | int poll_owner; | |
312 | struct net_device *dev; | |
313 | struct list_head dev_list; | |
314 | #endif | |
315 | }; | |
316 | ||
317 | enum | |
318 | { | |
319 | NAPI_STATE_SCHED, /* Poll is scheduled */ | |
a0a46196 | 320 | NAPI_STATE_DISABLE, /* Disable pending */ |
bea3348e SH |
321 | }; |
322 | ||
b3c97528 | 323 | extern void __napi_schedule(struct napi_struct *n); |
bea3348e | 324 | |
a0a46196 DM |
325 | static inline int napi_disable_pending(struct napi_struct *n) |
326 | { | |
327 | return test_bit(NAPI_STATE_DISABLE, &n->state); | |
328 | } | |
329 | ||
bea3348e SH |
330 | /** |
331 | * napi_schedule_prep - check if napi can be scheduled | |
332 | * @n: napi context | |
333 | * | |
334 | * Test if NAPI routine is already running, and if not mark | |
335 | * it as running. This is used as a condition variable | |
a0a46196 DM |
336 | * insure only one NAPI poll instance runs. We also make |
337 | * sure there is no pending NAPI disable. | |
bea3348e SH |
338 | */ |
339 | static inline int napi_schedule_prep(struct napi_struct *n) | |
340 | { | |
a0a46196 DM |
341 | return !napi_disable_pending(n) && |
342 | !test_and_set_bit(NAPI_STATE_SCHED, &n->state); | |
bea3348e SH |
343 | } |
344 | ||
345 | /** | |
346 | * napi_schedule - schedule NAPI poll | |
347 | * @n: napi context | |
348 | * | |
349 | * Schedule NAPI poll routine to be called if it is not already | |
350 | * running. | |
351 | */ | |
352 | static inline void napi_schedule(struct napi_struct *n) | |
353 | { | |
354 | if (napi_schedule_prep(n)) | |
355 | __napi_schedule(n); | |
356 | } | |
357 | ||
bfe13f54 RD |
358 | /* Try to reschedule poll. Called by dev->poll() after napi_complete(). */ |
359 | static inline int napi_reschedule(struct napi_struct *napi) | |
360 | { | |
361 | if (napi_schedule_prep(napi)) { | |
362 | __napi_schedule(napi); | |
363 | return 1; | |
364 | } | |
365 | return 0; | |
366 | } | |
367 | ||
bea3348e SH |
368 | /** |
369 | * napi_complete - NAPI processing complete | |
370 | * @n: napi context | |
371 | * | |
372 | * Mark NAPI processing as complete. | |
373 | */ | |
374 | static inline void __napi_complete(struct napi_struct *n) | |
375 | { | |
376 | BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); | |
377 | list_del(&n->poll_list); | |
378 | smp_mb__before_clear_bit(); | |
379 | clear_bit(NAPI_STATE_SCHED, &n->state); | |
380 | } | |
381 | ||
382 | static inline void napi_complete(struct napi_struct *n) | |
383 | { | |
50fd4407 DM |
384 | unsigned long flags; |
385 | ||
386 | local_irq_save(flags); | |
bea3348e | 387 | __napi_complete(n); |
50fd4407 | 388 | local_irq_restore(flags); |
bea3348e SH |
389 | } |
390 | ||
391 | /** | |
392 | * napi_disable - prevent NAPI from scheduling | |
393 | * @n: napi context | |
394 | * | |
395 | * Stop NAPI from being scheduled on this context. | |
396 | * Waits till any outstanding processing completes. | |
397 | */ | |
398 | static inline void napi_disable(struct napi_struct *n) | |
399 | { | |
a0a46196 | 400 | set_bit(NAPI_STATE_DISABLE, &n->state); |
bea3348e | 401 | while (test_and_set_bit(NAPI_STATE_SCHED, &n->state)) |
43cc7380 | 402 | msleep(1); |
a0a46196 | 403 | clear_bit(NAPI_STATE_DISABLE, &n->state); |
bea3348e SH |
404 | } |
405 | ||
406 | /** | |
407 | * napi_enable - enable NAPI scheduling | |
408 | * @n: napi context | |
409 | * | |
410 | * Resume NAPI from being scheduled on this context. | |
411 | * Must be paired with napi_disable. | |
412 | */ | |
413 | static inline void napi_enable(struct napi_struct *n) | |
414 | { | |
415 | BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); | |
416 | smp_mb__before_clear_bit(); | |
417 | clear_bit(NAPI_STATE_SCHED, &n->state); | |
418 | } | |
419 | ||
c264c3de SH |
420 | #ifdef CONFIG_SMP |
421 | /** | |
422 | * napi_synchronize - wait until NAPI is not running | |
423 | * @n: napi context | |
424 | * | |
425 | * Wait until NAPI is done being scheduled on this context. | |
426 | * Waits till any outstanding processing completes but | |
427 | * does not disable future activations. | |
428 | */ | |
429 | static inline void napi_synchronize(const struct napi_struct *n) | |
430 | { | |
431 | while (test_bit(NAPI_STATE_SCHED, &n->state)) | |
432 | msleep(1); | |
433 | } | |
434 | #else | |
435 | # define napi_synchronize(n) barrier() | |
436 | #endif | |
437 | ||
79d16385 DM |
438 | enum netdev_queue_state_t |
439 | { | |
440 | __QUEUE_STATE_XOFF, | |
c3f26a26 | 441 | __QUEUE_STATE_FROZEN, |
79d16385 DM |
442 | }; |
443 | ||
bb949fbd DM |
444 | struct netdev_queue { |
445 | struct net_device *dev; | |
b0e1e646 | 446 | struct Qdisc *qdisc; |
79d16385 | 447 | unsigned long state; |
c773e847 DM |
448 | spinlock_t _xmit_lock; |
449 | int xmit_lock_owner; | |
b0e1e646 | 450 | struct Qdisc *qdisc_sleeping; |
e8a0464c | 451 | } ____cacheline_aligned_in_smp; |
bb949fbd | 452 | |
1da177e4 LT |
453 | /* |
454 | * The DEVICE structure. | |
455 | * Actually, this whole structure is a big mistake. It mixes I/O | |
456 | * data with strictly "high-level" data, and it has to know about | |
457 | * almost every data structure used in the INET module. | |
458 | * | |
459 | * FIXME: cleanup struct net_device such that network protocol info | |
460 | * moves out. | |
461 | */ | |
462 | ||
463 | struct net_device | |
464 | { | |
465 | ||
466 | /* | |
467 | * This is the first field of the "visible" part of this structure | |
468 | * (i.e. as seen by users in the "Space.c" file). It is the name | |
469 | * the interface. | |
470 | */ | |
471 | char name[IFNAMSIZ]; | |
9356b8fc ED |
472 | /* device name hash chain */ |
473 | struct hlist_node name_hlist; | |
1da177e4 LT |
474 | |
475 | /* | |
476 | * I/O specific fields | |
477 | * FIXME: Merge these and struct ifmap into one | |
478 | */ | |
479 | unsigned long mem_end; /* shared mem end */ | |
480 | unsigned long mem_start; /* shared mem start */ | |
481 | unsigned long base_addr; /* device I/O address */ | |
482 | unsigned int irq; /* device IRQ number */ | |
483 | ||
484 | /* | |
485 | * Some hardware also needs these fields, but they are not | |
486 | * part of the usual set specified in Space.c. | |
487 | */ | |
488 | ||
489 | unsigned char if_port; /* Selectable AUI, TP,..*/ | |
490 | unsigned char dma; /* DMA channel */ | |
491 | ||
492 | unsigned long state; | |
493 | ||
7562f876 | 494 | struct list_head dev_list; |
bea3348e SH |
495 | #ifdef CONFIG_NETPOLL |
496 | struct list_head napi_list; | |
497 | #endif | |
1da177e4 LT |
498 | |
499 | /* The device initialization function. Called only once. */ | |
500 | int (*init)(struct net_device *dev); | |
501 | ||
502 | /* ------- Fields preinitialized in Space.c finish here ------- */ | |
503 | ||
9356b8fc ED |
504 | /* Net device features */ |
505 | unsigned long features; | |
506 | #define NETIF_F_SG 1 /* Scatter/gather IO. */ | |
d212f87b | 507 | #define NETIF_F_IP_CSUM 2 /* Can checksum TCP/UDP over IPv4. */ |
9356b8fc ED |
508 | #define NETIF_F_NO_CSUM 4 /* Does not require checksum. F.e. loopack. */ |
509 | #define NETIF_F_HW_CSUM 8 /* Can checksum all the packets. */ | |
d212f87b | 510 | #define NETIF_F_IPV6_CSUM 16 /* Can checksum TCP/UDP over IPV6 */ |
9356b8fc ED |
511 | #define NETIF_F_HIGHDMA 32 /* Can DMA to high memory. */ |
512 | #define NETIF_F_FRAGLIST 64 /* Scatter/gather IO. */ | |
513 | #define NETIF_F_HW_VLAN_TX 128 /* Transmit VLAN hw acceleration */ | |
514 | #define NETIF_F_HW_VLAN_RX 256 /* Receive VLAN hw acceleration */ | |
515 | #define NETIF_F_HW_VLAN_FILTER 512 /* Receive filtering on VLAN */ | |
516 | #define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */ | |
37c3185a | 517 | #define NETIF_F_GSO 2048 /* Enable software GSO. */ |
e24eb521 CB |
518 | #define NETIF_F_LLTX 4096 /* LockLess TX - deprecated. Please */ |
519 | /* do not use LLTX in new drivers */ | |
ce286d32 | 520 | #define NETIF_F_NETNS_LOCAL 8192 /* Does not change network namespaces */ |
3ae7c0b2 | 521 | #define NETIF_F_LRO 32768 /* large receive offload */ |
7967168c HX |
522 | |
523 | /* Segmentation offload features */ | |
289c79a4 PM |
524 | #define NETIF_F_GSO_SHIFT 16 |
525 | #define NETIF_F_GSO_MASK 0xffff0000 | |
7967168c | 526 | #define NETIF_F_TSO (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT) |
f83ef8c0 | 527 | #define NETIF_F_UFO (SKB_GSO_UDP << NETIF_F_GSO_SHIFT) |
576a30eb | 528 | #define NETIF_F_GSO_ROBUST (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT) |
f83ef8c0 HX |
529 | #define NETIF_F_TSO_ECN (SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT) |
530 | #define NETIF_F_TSO6 (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT) | |
9356b8fc | 531 | |
78eb8877 HX |
532 | /* List of features with software fallbacks. */ |
533 | #define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6) | |
534 | ||
d212f87b | 535 | |
8648b305 | 536 | #define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM) |
d212f87b SH |
537 | #define NETIF_F_V4_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IP_CSUM) |
538 | #define NETIF_F_V6_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM) | |
539 | #define NETIF_F_ALL_CSUM (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM) | |
8648b305 | 540 | |
1da177e4 LT |
541 | /* Interface index. Unique device identifier */ |
542 | int ifindex; | |
543 | int iflink; | |
544 | ||
545 | ||
546 | struct net_device_stats* (*get_stats)(struct net_device *dev); | |
c45d286e | 547 | struct net_device_stats stats; |
1da177e4 | 548 | |
b86e0280 | 549 | #ifdef CONFIG_WIRELESS_EXT |
1da177e4 LT |
550 | /* List of functions to handle Wireless Extensions (instead of ioctl). |
551 | * See <net/iw_handler.h> for details. Jean II */ | |
552 | const struct iw_handler_def * wireless_handlers; | |
553 | /* Instance data managed by the core of Wireless Extensions. */ | |
554 | struct iw_public_data * wireless_data; | |
b86e0280 | 555 | #endif |
76fd8593 | 556 | const struct ethtool_ops *ethtool_ops; |
1da177e4 | 557 | |
3b04ddde SH |
558 | /* Hardware header description */ |
559 | const struct header_ops *header_ops; | |
560 | ||
1da177e4 LT |
561 | /* |
562 | * This marks the end of the "visible" part of the structure. All | |
563 | * fields hereafter are internal to the system, and may change at | |
564 | * will (read: may be cleaned up at will). | |
565 | */ | |
566 | ||
1da177e4 | 567 | |
b00055aa | 568 | unsigned int flags; /* interface flags (a la BSD) */ |
1da177e4 LT |
569 | unsigned short gflags; |
570 | unsigned short priv_flags; /* Like 'flags' but invisible to userspace. */ | |
571 | unsigned short padded; /* How much padding added by alloc_netdev() */ | |
572 | ||
b00055aa SR |
573 | unsigned char operstate; /* RFC2863 operstate */ |
574 | unsigned char link_mode; /* mapping policy to operstate */ | |
575 | ||
1da177e4 LT |
576 | unsigned mtu; /* interface MTU value */ |
577 | unsigned short type; /* interface hardware type */ | |
578 | unsigned short hard_header_len; /* hardware hdr length */ | |
1da177e4 | 579 | |
f5184d26 JB |
580 | /* extra head- and tailroom the hardware may need, but not in all cases |
581 | * can this be guaranteed, especially tailroom. Some cases also use | |
582 | * LL_MAX_HEADER instead to allocate the skb. | |
583 | */ | |
584 | unsigned short needed_headroom; | |
585 | unsigned short needed_tailroom; | |
586 | ||
1da177e4 LT |
587 | struct net_device *master; /* Pointer to master device of a group, |
588 | * which this device is member of. | |
589 | */ | |
590 | ||
591 | /* Interface address info. */ | |
a6f9a705 | 592 | unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */ |
1da177e4 LT |
593 | unsigned char addr_len; /* hardware address length */ |
594 | unsigned short dev_id; /* for shared network cards */ | |
595 | ||
f1f28aa3 | 596 | spinlock_t addr_list_lock; |
4417da66 PM |
597 | struct dev_addr_list *uc_list; /* Secondary unicast mac addresses */ |
598 | int uc_count; /* Number of installed ucasts */ | |
599 | int uc_promisc; | |
3fba5a8b | 600 | struct dev_addr_list *mc_list; /* Multicast mac addresses */ |
1da177e4 | 601 | int mc_count; /* Number of installed mcasts */ |
9d45abe1 WC |
602 | unsigned int promiscuity; |
603 | unsigned int allmulti; | |
1da177e4 | 604 | |
1da177e4 LT |
605 | |
606 | /* Protocol specific pointers */ | |
607 | ||
608 | void *atalk_ptr; /* AppleTalk link */ | |
609 | void *ip_ptr; /* IPv4 specific data */ | |
610 | void *dn_ptr; /* DECnet specific data */ | |
611 | void *ip6_ptr; /* IPv6 specific data */ | |
612 | void *ec_ptr; /* Econet specific data */ | |
613 | void *ax25_ptr; /* AX.25 specific data */ | |
704232c2 JB |
614 | struct wireless_dev *ieee80211_ptr; /* IEEE 802.11 specific data, |
615 | assign before registering */ | |
1da177e4 | 616 | |
9356b8fc ED |
617 | /* |
618 | * Cache line mostly used on receive path (including eth_type_trans()) | |
619 | */ | |
9356b8fc ED |
620 | unsigned long last_rx; /* Time of last Rx */ |
621 | /* Interface address info used in eth_type_trans() */ | |
622 | unsigned char dev_addr[MAX_ADDR_LEN]; /* hw address, (before bcast | |
623 | because most packets are unicast) */ | |
624 | ||
625 | unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */ | |
1da177e4 | 626 | |
bb949fbd | 627 | struct netdev_queue rx_queue; |
e8a0464c DM |
628 | |
629 | struct netdev_queue *_tx ____cacheline_aligned_in_smp; | |
fd2ea0a7 DM |
630 | |
631 | /* Number of TX queues allocated at alloc_netdev_mq() time */ | |
e8a0464c | 632 | unsigned int num_tx_queues; |
fd2ea0a7 DM |
633 | |
634 | /* Number of TX queues currently active in device */ | |
635 | unsigned int real_num_tx_queues; | |
636 | ||
1da177e4 | 637 | unsigned long tx_queue_len; /* Max frames per queue allowed */ |
c3f26a26 | 638 | spinlock_t tx_global_lock; |
9356b8fc ED |
639 | /* |
640 | * One part is mostly used on xmit path (device) | |
641 | */ | |
9356b8fc ED |
642 | void *priv; /* pointer to private data */ |
643 | int (*hard_start_xmit) (struct sk_buff *skb, | |
644 | struct net_device *dev); | |
645 | /* These may be needed for future network-power-down code. */ | |
646 | unsigned long trans_start; /* Time (in jiffies) of last Tx */ | |
647 | ||
648 | int watchdog_timeo; /* used by dev_watchdog() */ | |
649 | struct timer_list watchdog_timer; | |
650 | ||
651 | /* | |
652 | * refcnt is a very hot point, so align it on SMP | |
653 | */ | |
1da177e4 | 654 | /* Number of references to this device */ |
9356b8fc ED |
655 | atomic_t refcnt ____cacheline_aligned_in_smp; |
656 | ||
1da177e4 LT |
657 | /* delayed register/unregister */ |
658 | struct list_head todo_list; | |
1da177e4 LT |
659 | /* device index hash chain */ |
660 | struct hlist_node index_hlist; | |
661 | ||
572a103d HX |
662 | struct net_device *link_watch_next; |
663 | ||
1da177e4 LT |
664 | /* register/unregister state machine */ |
665 | enum { NETREG_UNINITIALIZED=0, | |
b17a7c17 | 666 | NETREG_REGISTERED, /* completed register_netdevice */ |
1da177e4 LT |
667 | NETREG_UNREGISTERING, /* called unregister_netdevice */ |
668 | NETREG_UNREGISTERED, /* completed unregister todo */ | |
669 | NETREG_RELEASED, /* called free_netdev */ | |
670 | } reg_state; | |
671 | ||
1da177e4 LT |
672 | /* Called after device is detached from network. */ |
673 | void (*uninit)(struct net_device *dev); | |
674 | /* Called after last user reference disappears. */ | |
675 | void (*destructor)(struct net_device *dev); | |
676 | ||
677 | /* Pointers to interface service routines. */ | |
678 | int (*open)(struct net_device *dev); | |
679 | int (*stop)(struct net_device *dev); | |
1da177e4 | 680 | #define HAVE_NETDEV_POLL |
24023451 PM |
681 | #define HAVE_CHANGE_RX_FLAGS |
682 | void (*change_rx_flags)(struct net_device *dev, | |
683 | int flags); | |
4417da66 PM |
684 | #define HAVE_SET_RX_MODE |
685 | void (*set_rx_mode)(struct net_device *dev); | |
1da177e4 LT |
686 | #define HAVE_MULTICAST |
687 | void (*set_multicast_list)(struct net_device *dev); | |
688 | #define HAVE_SET_MAC_ADDR | |
689 | int (*set_mac_address)(struct net_device *dev, | |
690 | void *addr); | |
bada339b JG |
691 | #define HAVE_VALIDATE_ADDR |
692 | int (*validate_addr)(struct net_device *dev); | |
1da177e4 LT |
693 | #define HAVE_PRIVATE_IOCTL |
694 | int (*do_ioctl)(struct net_device *dev, | |
695 | struct ifreq *ifr, int cmd); | |
696 | #define HAVE_SET_CONFIG | |
697 | int (*set_config)(struct net_device *dev, | |
698 | struct ifmap *map); | |
1da177e4 LT |
699 | #define HAVE_CHANGE_MTU |
700 | int (*change_mtu)(struct net_device *dev, int new_mtu); | |
701 | ||
702 | #define HAVE_TX_TIMEOUT | |
703 | void (*tx_timeout) (struct net_device *dev); | |
704 | ||
705 | void (*vlan_rx_register)(struct net_device *dev, | |
706 | struct vlan_group *grp); | |
707 | void (*vlan_rx_add_vid)(struct net_device *dev, | |
708 | unsigned short vid); | |
709 | void (*vlan_rx_kill_vid)(struct net_device *dev, | |
710 | unsigned short vid); | |
711 | ||
1da177e4 LT |
712 | int (*neigh_setup)(struct net_device *dev, struct neigh_parms *); |
713 | #ifdef CONFIG_NETPOLL | |
115c1d6e | 714 | struct netpoll_info *npinfo; |
1da177e4 LT |
715 | #endif |
716 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
717 | void (*poll_controller)(struct net_device *dev); | |
718 | #endif | |
719 | ||
eae792b7 DM |
720 | u16 (*select_queue)(struct net_device *dev, |
721 | struct sk_buff *skb); | |
722 | ||
c346dca1 | 723 | #ifdef CONFIG_NET_NS |
4a1c5371 EB |
724 | /* Network namespace this network device is inside */ |
725 | struct net *nd_net; | |
c346dca1 | 726 | #endif |
4a1c5371 | 727 | |
4951704b DM |
728 | /* mid-layer private */ |
729 | void *ml_priv; | |
730 | ||
1da177e4 LT |
731 | /* bridge stuff */ |
732 | struct net_bridge_port *br_port; | |
b863ceb7 PM |
733 | /* macvlan */ |
734 | struct macvlan_port *macvlan_port; | |
eca9ebac PM |
735 | /* GARP */ |
736 | struct garp_port *garp_port; | |
1da177e4 | 737 | |
1da177e4 | 738 | /* class/net/name entry */ |
43cb76d9 | 739 | struct device dev; |
fe9925b5 SH |
740 | /* space for optional statistics and wireless sysfs groups */ |
741 | struct attribute_group *sysfs_groups[3]; | |
38f7b870 PM |
742 | |
743 | /* rtnetlink link ops */ | |
744 | const struct rtnl_link_ops *rtnl_link_ops; | |
f25f4e44 | 745 | |
289c79a4 PM |
746 | /* VLAN feature mask */ |
747 | unsigned long vlan_features; | |
748 | ||
82cc1a7a PWJ |
749 | /* for setting kernel sock attribute on TCP connection setup */ |
750 | #define GSO_MAX_SIZE 65536 | |
751 | unsigned int gso_max_size; | |
1da177e4 | 752 | }; |
43cb76d9 | 753 | #define to_net_dev(d) container_of(d, struct net_device, dev) |
1da177e4 LT |
754 | |
755 | #define NETDEV_ALIGN 32 | |
756 | #define NETDEV_ALIGN_CONST (NETDEV_ALIGN - 1) | |
757 | ||
e8a0464c DM |
758 | static inline |
759 | struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev, | |
760 | unsigned int index) | |
761 | { | |
762 | return &dev->_tx[index]; | |
763 | } | |
764 | ||
765 | static inline void netdev_for_each_tx_queue(struct net_device *dev, | |
766 | void (*f)(struct net_device *, | |
767 | struct netdev_queue *, | |
768 | void *), | |
769 | void *arg) | |
770 | { | |
771 | unsigned int i; | |
772 | ||
773 | for (i = 0; i < dev->num_tx_queues; i++) | |
774 | f(dev, &dev->_tx[i], arg); | |
775 | } | |
776 | ||
c346dca1 YH |
777 | /* |
778 | * Net namespace inlines | |
779 | */ | |
780 | static inline | |
781 | struct net *dev_net(const struct net_device *dev) | |
782 | { | |
783 | #ifdef CONFIG_NET_NS | |
784 | return dev->nd_net; | |
785 | #else | |
786 | return &init_net; | |
787 | #endif | |
788 | } | |
789 | ||
790 | static inline | |
f5aa23fd | 791 | void dev_net_set(struct net_device *dev, struct net *net) |
c346dca1 YH |
792 | { |
793 | #ifdef CONFIG_NET_NS | |
f3005d7f DL |
794 | release_net(dev->nd_net); |
795 | dev->nd_net = hold_net(net); | |
c346dca1 YH |
796 | #endif |
797 | } | |
798 | ||
bea3348e SH |
799 | /** |
800 | * netdev_priv - access network device private data | |
801 | * @dev: network device | |
802 | * | |
803 | * Get network device private data | |
804 | */ | |
6472ce60 | 805 | static inline void *netdev_priv(const struct net_device *dev) |
1da177e4 | 806 | { |
e3c50d5d DM |
807 | return (char *)dev + ((sizeof(struct net_device) |
808 | + NETDEV_ALIGN_CONST) | |
809 | & ~NETDEV_ALIGN_CONST); | |
1da177e4 LT |
810 | } |
811 | ||
1da177e4 LT |
812 | /* Set the sysfs physical device reference for the network logical device |
813 | * if set prior to registration will cause a symlink during initialization. | |
814 | */ | |
43cb76d9 | 815 | #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev)) |
1da177e4 | 816 | |
3b582cc1 SH |
817 | /** |
818 | * netif_napi_add - initialize a napi context | |
819 | * @dev: network device | |
820 | * @napi: napi context | |
821 | * @poll: polling function | |
822 | * @weight: default weight | |
823 | * | |
824 | * netif_napi_add() must be used to initialize a napi context prior to calling | |
825 | * *any* of the other napi related functions. | |
826 | */ | |
bea3348e SH |
827 | static inline void netif_napi_add(struct net_device *dev, |
828 | struct napi_struct *napi, | |
829 | int (*poll)(struct napi_struct *, int), | |
830 | int weight) | |
831 | { | |
832 | INIT_LIST_HEAD(&napi->poll_list); | |
833 | napi->poll = poll; | |
834 | napi->weight = weight; | |
835 | #ifdef CONFIG_NETPOLL | |
836 | napi->dev = dev; | |
837 | list_add(&napi->dev_list, &dev->napi_list); | |
838 | spin_lock_init(&napi->poll_lock); | |
839 | napi->poll_owner = -1; | |
840 | #endif | |
841 | set_bit(NAPI_STATE_SCHED, &napi->state); | |
842 | } | |
843 | ||
d8156534 AD |
844 | /** |
845 | * netif_napi_del - remove a napi context | |
846 | * @napi: napi context | |
847 | * | |
848 | * netif_napi_del() removes a napi context from the network device napi list | |
849 | */ | |
850 | static inline void netif_napi_del(struct napi_struct *napi) | |
851 | { | |
852 | #ifdef CONFIG_NETPOLL | |
853 | list_del(&napi->dev_list); | |
854 | #endif | |
855 | } | |
856 | ||
1da177e4 | 857 | struct packet_type { |
f2ccd8fa DM |
858 | __be16 type; /* This is really htons(ether_type). */ |
859 | struct net_device *dev; /* NULL is wildcarded here */ | |
860 | int (*func) (struct sk_buff *, | |
861 | struct net_device *, | |
862 | struct packet_type *, | |
863 | struct net_device *); | |
576a30eb HX |
864 | struct sk_buff *(*gso_segment)(struct sk_buff *skb, |
865 | int features); | |
a430a43d | 866 | int (*gso_send_check)(struct sk_buff *skb); |
1da177e4 LT |
867 | void *af_packet_priv; |
868 | struct list_head list; | |
869 | }; | |
870 | ||
871 | #include <linux/interrupt.h> | |
872 | #include <linux/notifier.h> | |
873 | ||
1da177e4 LT |
874 | extern rwlock_t dev_base_lock; /* Device list lock */ |
875 | ||
7562f876 | 876 | |
881d966b EB |
877 | #define for_each_netdev(net, d) \ |
878 | list_for_each_entry(d, &(net)->dev_base_head, dev_list) | |
879 | #define for_each_netdev_safe(net, d, n) \ | |
880 | list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list) | |
881 | #define for_each_netdev_continue(net, d) \ | |
882 | list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list) | |
883 | #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list) | |
7562f876 | 884 | |
a050c33f DL |
885 | static inline struct net_device *next_net_device(struct net_device *dev) |
886 | { | |
887 | struct list_head *lh; | |
888 | struct net *net; | |
889 | ||
c346dca1 | 890 | net = dev_net(dev); |
a050c33f DL |
891 | lh = dev->dev_list.next; |
892 | return lh == &net->dev_base_head ? NULL : net_device_entry(lh); | |
893 | } | |
894 | ||
895 | static inline struct net_device *first_net_device(struct net *net) | |
896 | { | |
897 | return list_empty(&net->dev_base_head) ? NULL : | |
898 | net_device_entry(net->dev_base_head.next); | |
899 | } | |
7562f876 | 900 | |
1da177e4 LT |
901 | extern int netdev_boot_setup_check(struct net_device *dev); |
902 | extern unsigned long netdev_boot_base(const char *prefix, int unit); | |
881d966b EB |
903 | extern struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *hwaddr); |
904 | extern struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type); | |
905 | extern struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type); | |
1da177e4 LT |
906 | extern void dev_add_pack(struct packet_type *pt); |
907 | extern void dev_remove_pack(struct packet_type *pt); | |
908 | extern void __dev_remove_pack(struct packet_type *pt); | |
909 | ||
881d966b | 910 | extern struct net_device *dev_get_by_flags(struct net *net, unsigned short flags, |
1da177e4 | 911 | unsigned short mask); |
881d966b EB |
912 | extern struct net_device *dev_get_by_name(struct net *net, const char *name); |
913 | extern struct net_device *__dev_get_by_name(struct net *net, const char *name); | |
1da177e4 LT |
914 | extern int dev_alloc_name(struct net_device *dev, const char *name); |
915 | extern int dev_open(struct net_device *dev); | |
916 | extern int dev_close(struct net_device *dev); | |
0187bdfb | 917 | extern void dev_disable_lro(struct net_device *dev); |
1da177e4 LT |
918 | extern int dev_queue_xmit(struct sk_buff *skb); |
919 | extern int register_netdevice(struct net_device *dev); | |
22f8cde5 | 920 | extern void unregister_netdevice(struct net_device *dev); |
1da177e4 LT |
921 | extern void free_netdev(struct net_device *dev); |
922 | extern void synchronize_net(void); | |
923 | extern int register_netdevice_notifier(struct notifier_block *nb); | |
924 | extern int unregister_netdevice_notifier(struct notifier_block *nb); | |
ad7379d4 | 925 | extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev); |
881d966b EB |
926 | extern struct net_device *dev_get_by_index(struct net *net, int ifindex); |
927 | extern struct net_device *__dev_get_by_index(struct net *net, int ifindex); | |
1da177e4 LT |
928 | extern int dev_restart(struct net_device *dev); |
929 | #ifdef CONFIG_NETPOLL_TRAP | |
930 | extern int netpoll_trap(void); | |
931 | #endif | |
932 | ||
0c4e8581 SH |
933 | static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, |
934 | unsigned short type, | |
3b04ddde SH |
935 | const void *daddr, const void *saddr, |
936 | unsigned len) | |
0c4e8581 | 937 | { |
f1ecfd5d | 938 | if (!dev->header_ops || !dev->header_ops->create) |
0c4e8581 | 939 | return 0; |
3b04ddde SH |
940 | |
941 | return dev->header_ops->create(skb, dev, type, daddr, saddr, len); | |
0c4e8581 SH |
942 | } |
943 | ||
b95cce35 SH |
944 | static inline int dev_parse_header(const struct sk_buff *skb, |
945 | unsigned char *haddr) | |
946 | { | |
947 | const struct net_device *dev = skb->dev; | |
948 | ||
1b83336b | 949 | if (!dev->header_ops || !dev->header_ops->parse) |
b95cce35 | 950 | return 0; |
3b04ddde | 951 | return dev->header_ops->parse(skb, haddr); |
b95cce35 SH |
952 | } |
953 | ||
1da177e4 LT |
954 | typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len); |
955 | extern int register_gifconf(unsigned int family, gifconf_func_t * gifconf); | |
956 | static inline int unregister_gifconf(unsigned int family) | |
957 | { | |
958 | return register_gifconf(family, NULL); | |
959 | } | |
960 | ||
961 | /* | |
962 | * Incoming packets are placed on per-cpu queues so that | |
963 | * no locking is needed. | |
964 | */ | |
1da177e4 LT |
965 | struct softnet_data |
966 | { | |
37437bb2 | 967 | struct Qdisc *output_queue; |
1da177e4 LT |
968 | struct sk_buff_head input_pkt_queue; |
969 | struct list_head poll_list; | |
1da177e4 LT |
970 | struct sk_buff *completion_queue; |
971 | ||
bea3348e | 972 | struct napi_struct backlog; |
db217334 CL |
973 | #ifdef CONFIG_NET_DMA |
974 | struct dma_chan *net_dma; | |
975 | #endif | |
1da177e4 LT |
976 | }; |
977 | ||
978 | DECLARE_PER_CPU(struct softnet_data,softnet_data); | |
979 | ||
980 | #define HAVE_NETIF_QUEUE | |
981 | ||
37437bb2 | 982 | extern void __netif_schedule(struct Qdisc *q); |
1da177e4 | 983 | |
86d804e1 | 984 | static inline void netif_schedule_queue(struct netdev_queue *txq) |
1da177e4 | 985 | { |
79d16385 | 986 | if (!test_bit(__QUEUE_STATE_XOFF, &txq->state)) |
37437bb2 | 987 | __netif_schedule(txq->qdisc); |
86d804e1 DM |
988 | } |
989 | ||
fd2ea0a7 DM |
990 | static inline void netif_tx_schedule_all(struct net_device *dev) |
991 | { | |
992 | unsigned int i; | |
993 | ||
994 | for (i = 0; i < dev->num_tx_queues; i++) | |
995 | netif_schedule_queue(netdev_get_tx_queue(dev, i)); | |
996 | } | |
997 | ||
d29f749e DJ |
998 | static inline void netif_tx_start_queue(struct netdev_queue *dev_queue) |
999 | { | |
1000 | clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state); | |
1001 | } | |
1002 | ||
bea3348e SH |
1003 | /** |
1004 | * netif_start_queue - allow transmit | |
1005 | * @dev: network device | |
1006 | * | |
1007 | * Allow upper layers to call the device hard_start_xmit routine. | |
1008 | */ | |
1da177e4 LT |
1009 | static inline void netif_start_queue(struct net_device *dev) |
1010 | { | |
e8a0464c | 1011 | netif_tx_start_queue(netdev_get_tx_queue(dev, 0)); |
1da177e4 LT |
1012 | } |
1013 | ||
fd2ea0a7 DM |
1014 | static inline void netif_tx_start_all_queues(struct net_device *dev) |
1015 | { | |
1016 | unsigned int i; | |
1017 | ||
1018 | for (i = 0; i < dev->num_tx_queues; i++) { | |
1019 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | |
1020 | netif_tx_start_queue(txq); | |
1021 | } | |
1022 | } | |
1023 | ||
79d16385 | 1024 | static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue) |
1da177e4 LT |
1025 | { |
1026 | #ifdef CONFIG_NETPOLL_TRAP | |
5f286e11 | 1027 | if (netpoll_trap()) { |
79d16385 | 1028 | clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state); |
1da177e4 | 1029 | return; |
5f286e11 | 1030 | } |
1da177e4 | 1031 | #endif |
79d16385 | 1032 | if (test_and_clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state)) |
37437bb2 | 1033 | __netif_schedule(dev_queue->qdisc); |
79d16385 DM |
1034 | } |
1035 | ||
d29f749e DJ |
1036 | /** |
1037 | * netif_wake_queue - restart transmit | |
1038 | * @dev: network device | |
1039 | * | |
1040 | * Allow upper layers to call the device hard_start_xmit routine. | |
1041 | * Used for flow control when transmit resources are available. | |
1042 | */ | |
79d16385 DM |
1043 | static inline void netif_wake_queue(struct net_device *dev) |
1044 | { | |
e8a0464c | 1045 | netif_tx_wake_queue(netdev_get_tx_queue(dev, 0)); |
1da177e4 LT |
1046 | } |
1047 | ||
fd2ea0a7 DM |
1048 | static inline void netif_tx_wake_all_queues(struct net_device *dev) |
1049 | { | |
1050 | unsigned int i; | |
1051 | ||
1052 | for (i = 0; i < dev->num_tx_queues; i++) { | |
1053 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | |
1054 | netif_tx_wake_queue(txq); | |
1055 | } | |
1056 | } | |
1057 | ||
d29f749e DJ |
1058 | static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue) |
1059 | { | |
1060 | set_bit(__QUEUE_STATE_XOFF, &dev_queue->state); | |
1061 | } | |
1062 | ||
bea3348e SH |
1063 | /** |
1064 | * netif_stop_queue - stop transmitted packets | |
1065 | * @dev: network device | |
1066 | * | |
1067 | * Stop upper layers calling the device hard_start_xmit routine. | |
1068 | * Used for flow control when transmit resources are unavailable. | |
1069 | */ | |
1da177e4 LT |
1070 | static inline void netif_stop_queue(struct net_device *dev) |
1071 | { | |
e8a0464c | 1072 | netif_tx_stop_queue(netdev_get_tx_queue(dev, 0)); |
1da177e4 LT |
1073 | } |
1074 | ||
fd2ea0a7 DM |
1075 | static inline void netif_tx_stop_all_queues(struct net_device *dev) |
1076 | { | |
1077 | unsigned int i; | |
1078 | ||
1079 | for (i = 0; i < dev->num_tx_queues; i++) { | |
1080 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | |
1081 | netif_tx_stop_queue(txq); | |
1082 | } | |
1083 | } | |
1084 | ||
d29f749e DJ |
1085 | static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue) |
1086 | { | |
1087 | return test_bit(__QUEUE_STATE_XOFF, &dev_queue->state); | |
1088 | } | |
1089 | ||
bea3348e SH |
1090 | /** |
1091 | * netif_queue_stopped - test if transmit queue is flowblocked | |
1092 | * @dev: network device | |
1093 | * | |
1094 | * Test if transmit queue on device is currently unable to send. | |
1095 | */ | |
1da177e4 LT |
1096 | static inline int netif_queue_stopped(const struct net_device *dev) |
1097 | { | |
e8a0464c | 1098 | return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0)); |
1da177e4 LT |
1099 | } |
1100 | ||
c3f26a26 DM |
1101 | static inline int netif_tx_queue_frozen(const struct netdev_queue *dev_queue) |
1102 | { | |
1103 | return test_bit(__QUEUE_STATE_FROZEN, &dev_queue->state); | |
1104 | } | |
1105 | ||
bea3348e SH |
1106 | /** |
1107 | * netif_running - test if up | |
1108 | * @dev: network device | |
1109 | * | |
1110 | * Test if the device has been brought up. | |
1111 | */ | |
1da177e4 LT |
1112 | static inline int netif_running(const struct net_device *dev) |
1113 | { | |
1114 | return test_bit(__LINK_STATE_START, &dev->state); | |
1115 | } | |
1116 | ||
f25f4e44 PWJ |
1117 | /* |
1118 | * Routines to manage the subqueues on a device. We only need start | |
1119 | * stop, and a check if it's stopped. All other device management is | |
1120 | * done at the overall netdevice level. | |
1121 | * Also test the device if we're multiqueue. | |
1122 | */ | |
bea3348e SH |
1123 | |
1124 | /** | |
1125 | * netif_start_subqueue - allow sending packets on subqueue | |
1126 | * @dev: network device | |
1127 | * @queue_index: sub queue index | |
1128 | * | |
1129 | * Start individual transmit queue of a device with multiple transmit queues. | |
1130 | */ | |
f25f4e44 PWJ |
1131 | static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index) |
1132 | { | |
fd2ea0a7 DM |
1133 | struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); |
1134 | clear_bit(__QUEUE_STATE_XOFF, &txq->state); | |
f25f4e44 PWJ |
1135 | } |
1136 | ||
bea3348e SH |
1137 | /** |
1138 | * netif_stop_subqueue - stop sending packets on subqueue | |
1139 | * @dev: network device | |
1140 | * @queue_index: sub queue index | |
1141 | * | |
1142 | * Stop individual transmit queue of a device with multiple transmit queues. | |
1143 | */ | |
f25f4e44 PWJ |
1144 | static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index) |
1145 | { | |
fd2ea0a7 | 1146 | struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); |
f25f4e44 PWJ |
1147 | #ifdef CONFIG_NETPOLL_TRAP |
1148 | if (netpoll_trap()) | |
1149 | return; | |
1150 | #endif | |
fd2ea0a7 | 1151 | set_bit(__QUEUE_STATE_XOFF, &txq->state); |
f25f4e44 PWJ |
1152 | } |
1153 | ||
bea3348e SH |
1154 | /** |
1155 | * netif_subqueue_stopped - test status of subqueue | |
1156 | * @dev: network device | |
1157 | * @queue_index: sub queue index | |
1158 | * | |
1159 | * Check individual transmit queue of a device with multiple transmit queues. | |
1160 | */ | |
668f895a | 1161 | static inline int __netif_subqueue_stopped(const struct net_device *dev, |
f25f4e44 PWJ |
1162 | u16 queue_index) |
1163 | { | |
fd2ea0a7 DM |
1164 | struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); |
1165 | return test_bit(__QUEUE_STATE_XOFF, &txq->state); | |
f25f4e44 PWJ |
1166 | } |
1167 | ||
668f895a PE |
1168 | static inline int netif_subqueue_stopped(const struct net_device *dev, |
1169 | struct sk_buff *skb) | |
1170 | { | |
1171 | return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb)); | |
1172 | } | |
bea3348e SH |
1173 | |
1174 | /** | |
1175 | * netif_wake_subqueue - allow sending packets on subqueue | |
1176 | * @dev: network device | |
1177 | * @queue_index: sub queue index | |
1178 | * | |
1179 | * Resume individual transmit queue of a device with multiple transmit queues. | |
1180 | */ | |
f25f4e44 PWJ |
1181 | static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) |
1182 | { | |
fd2ea0a7 | 1183 | struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); |
f25f4e44 PWJ |
1184 | #ifdef CONFIG_NETPOLL_TRAP |
1185 | if (netpoll_trap()) | |
1186 | return; | |
1187 | #endif | |
fd2ea0a7 | 1188 | if (test_and_clear_bit(__QUEUE_STATE_XOFF, &txq->state)) |
37437bb2 | 1189 | __netif_schedule(txq->qdisc); |
f25f4e44 PWJ |
1190 | } |
1191 | ||
bea3348e SH |
1192 | /** |
1193 | * netif_is_multiqueue - test if device has multiple transmit queues | |
1194 | * @dev: network device | |
1195 | * | |
1196 | * Check if device has multiple transmit queues | |
bea3348e | 1197 | */ |
f25f4e44 PWJ |
1198 | static inline int netif_is_multiqueue(const struct net_device *dev) |
1199 | { | |
09e83b5d | 1200 | return (dev->num_tx_queues > 1); |
f25f4e44 | 1201 | } |
1da177e4 LT |
1202 | |
1203 | /* Use this variant when it is known for sure that it | |
0ef47309 ML |
1204 | * is executing from hardware interrupt context or with hardware interrupts |
1205 | * disabled. | |
1da177e4 | 1206 | */ |
bea3348e | 1207 | extern void dev_kfree_skb_irq(struct sk_buff *skb); |
1da177e4 LT |
1208 | |
1209 | /* Use this variant in places where it could be invoked | |
0ef47309 ML |
1210 | * from either hardware interrupt or other context, with hardware interrupts |
1211 | * either disabled or enabled. | |
1da177e4 | 1212 | */ |
56079431 | 1213 | extern void dev_kfree_skb_any(struct sk_buff *skb); |
1da177e4 LT |
1214 | |
1215 | #define HAVE_NETIF_RX 1 | |
1216 | extern int netif_rx(struct sk_buff *skb); | |
1217 | extern int netif_rx_ni(struct sk_buff *skb); | |
1218 | #define HAVE_NETIF_RECEIVE_SKB 1 | |
1219 | extern int netif_receive_skb(struct sk_buff *skb); | |
bc1d0411 | 1220 | extern void netif_nit_deliver(struct sk_buff *skb); |
c2373ee9 | 1221 | extern int dev_valid_name(const char *name); |
881d966b EB |
1222 | extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *); |
1223 | extern int dev_ethtool(struct net *net, struct ifreq *); | |
1da177e4 LT |
1224 | extern unsigned dev_get_flags(const struct net_device *); |
1225 | extern int dev_change_flags(struct net_device *, unsigned); | |
1226 | extern int dev_change_name(struct net_device *, char *); | |
ce286d32 EB |
1227 | extern int dev_change_net_namespace(struct net_device *, |
1228 | struct net *, const char *); | |
1da177e4 LT |
1229 | extern int dev_set_mtu(struct net_device *, int); |
1230 | extern int dev_set_mac_address(struct net_device *, | |
1231 | struct sockaddr *); | |
f6a78bfc | 1232 | extern int dev_hard_start_xmit(struct sk_buff *skb, |
fd2ea0a7 DM |
1233 | struct net_device *dev, |
1234 | struct netdev_queue *txq); | |
1da177e4 | 1235 | |
20380731 | 1236 | extern int netdev_budget; |
1da177e4 LT |
1237 | |
1238 | /* Called by rtnetlink.c:rtnl_unlock() */ | |
1239 | extern void netdev_run_todo(void); | |
1240 | ||
bea3348e SH |
1241 | /** |
1242 | * dev_put - release reference to device | |
1243 | * @dev: network device | |
1244 | * | |
9ef4429b | 1245 | * Release reference to device to allow it to be freed. |
bea3348e | 1246 | */ |
1da177e4 LT |
1247 | static inline void dev_put(struct net_device *dev) |
1248 | { | |
1249 | atomic_dec(&dev->refcnt); | |
1250 | } | |
1251 | ||
bea3348e SH |
1252 | /** |
1253 | * dev_hold - get reference to device | |
1254 | * @dev: network device | |
1255 | * | |
9ef4429b | 1256 | * Hold reference to device to keep it from being freed. |
bea3348e | 1257 | */ |
15333061 SH |
1258 | static inline void dev_hold(struct net_device *dev) |
1259 | { | |
1260 | atomic_inc(&dev->refcnt); | |
1261 | } | |
1da177e4 LT |
1262 | |
1263 | /* Carrier loss detection, dial on demand. The functions netif_carrier_on | |
1264 | * and _off may be called from IRQ context, but it is caller | |
1265 | * who is responsible for serialization of these calls. | |
b00055aa SR |
1266 | * |
1267 | * The name carrier is inappropriate, these functions should really be | |
1268 | * called netif_lowerlayer_*() because they represent the state of any | |
1269 | * kind of lower layer not just hardware media. | |
1da177e4 LT |
1270 | */ |
1271 | ||
1272 | extern void linkwatch_fire_event(struct net_device *dev); | |
1273 | ||
bea3348e SH |
1274 | /** |
1275 | * netif_carrier_ok - test if carrier present | |
1276 | * @dev: network device | |
1277 | * | |
1278 | * Check if carrier is present on device | |
1279 | */ | |
1da177e4 LT |
1280 | static inline int netif_carrier_ok(const struct net_device *dev) |
1281 | { | |
1282 | return !test_bit(__LINK_STATE_NOCARRIER, &dev->state); | |
1283 | } | |
1284 | ||
1285 | extern void __netdev_watchdog_up(struct net_device *dev); | |
1286 | ||
0a242efc | 1287 | extern void netif_carrier_on(struct net_device *dev); |
1da177e4 | 1288 | |
0a242efc | 1289 | extern void netif_carrier_off(struct net_device *dev); |
1da177e4 | 1290 | |
bea3348e SH |
1291 | /** |
1292 | * netif_dormant_on - mark device as dormant. | |
1293 | * @dev: network device | |
1294 | * | |
1295 | * Mark device as dormant (as per RFC2863). | |
1296 | * | |
1297 | * The dormant state indicates that the relevant interface is not | |
1298 | * actually in a condition to pass packets (i.e., it is not 'up') but is | |
1299 | * in a "pending" state, waiting for some external event. For "on- | |
1300 | * demand" interfaces, this new state identifies the situation where the | |
1301 | * interface is waiting for events to place it in the up state. | |
1302 | * | |
1303 | */ | |
b00055aa SR |
1304 | static inline void netif_dormant_on(struct net_device *dev) |
1305 | { | |
1306 | if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state)) | |
1307 | linkwatch_fire_event(dev); | |
1308 | } | |
1309 | ||
bea3348e SH |
1310 | /** |
1311 | * netif_dormant_off - set device as not dormant. | |
1312 | * @dev: network device | |
1313 | * | |
1314 | * Device is not in dormant state. | |
1315 | */ | |
b00055aa SR |
1316 | static inline void netif_dormant_off(struct net_device *dev) |
1317 | { | |
1318 | if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state)) | |
1319 | linkwatch_fire_event(dev); | |
1320 | } | |
1321 | ||
bea3348e SH |
1322 | /** |
1323 | * netif_dormant - test if carrier present | |
1324 | * @dev: network device | |
1325 | * | |
1326 | * Check if carrier is present on device | |
1327 | */ | |
b00055aa SR |
1328 | static inline int netif_dormant(const struct net_device *dev) |
1329 | { | |
1330 | return test_bit(__LINK_STATE_DORMANT, &dev->state); | |
1331 | } | |
1332 | ||
1333 | ||
bea3348e SH |
1334 | /** |
1335 | * netif_oper_up - test if device is operational | |
1336 | * @dev: network device | |
1337 | * | |
1338 | * Check if carrier is operational | |
1339 | */ | |
b00055aa SR |
1340 | static inline int netif_oper_up(const struct net_device *dev) { |
1341 | return (dev->operstate == IF_OPER_UP || | |
1342 | dev->operstate == IF_OPER_UNKNOWN /* backward compat */); | |
1343 | } | |
1344 | ||
bea3348e SH |
1345 | /** |
1346 | * netif_device_present - is device available or removed | |
1347 | * @dev: network device | |
1348 | * | |
1349 | * Check if device has not been removed from system. | |
1350 | */ | |
1da177e4 LT |
1351 | static inline int netif_device_present(struct net_device *dev) |
1352 | { | |
1353 | return test_bit(__LINK_STATE_PRESENT, &dev->state); | |
1354 | } | |
1355 | ||
56079431 | 1356 | extern void netif_device_detach(struct net_device *dev); |
1da177e4 | 1357 | |
56079431 | 1358 | extern void netif_device_attach(struct net_device *dev); |
1da177e4 LT |
1359 | |
1360 | /* | |
1361 | * Network interface message level settings | |
1362 | */ | |
1363 | #define HAVE_NETIF_MSG 1 | |
1364 | ||
1365 | enum { | |
1366 | NETIF_MSG_DRV = 0x0001, | |
1367 | NETIF_MSG_PROBE = 0x0002, | |
1368 | NETIF_MSG_LINK = 0x0004, | |
1369 | NETIF_MSG_TIMER = 0x0008, | |
1370 | NETIF_MSG_IFDOWN = 0x0010, | |
1371 | NETIF_MSG_IFUP = 0x0020, | |
1372 | NETIF_MSG_RX_ERR = 0x0040, | |
1373 | NETIF_MSG_TX_ERR = 0x0080, | |
1374 | NETIF_MSG_TX_QUEUED = 0x0100, | |
1375 | NETIF_MSG_INTR = 0x0200, | |
1376 | NETIF_MSG_TX_DONE = 0x0400, | |
1377 | NETIF_MSG_RX_STATUS = 0x0800, | |
1378 | NETIF_MSG_PKTDATA = 0x1000, | |
1379 | NETIF_MSG_HW = 0x2000, | |
1380 | NETIF_MSG_WOL = 0x4000, | |
1381 | }; | |
1382 | ||
1383 | #define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV) | |
1384 | #define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE) | |
1385 | #define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK) | |
1386 | #define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER) | |
1387 | #define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN) | |
1388 | #define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP) | |
1389 | #define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR) | |
1390 | #define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR) | |
1391 | #define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED) | |
1392 | #define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR) | |
1393 | #define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE) | |
1394 | #define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS) | |
1395 | #define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA) | |
1396 | #define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW) | |
1397 | #define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL) | |
1398 | ||
1399 | static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits) | |
1400 | { | |
1401 | /* use default */ | |
1402 | if (debug_value < 0 || debug_value >= (sizeof(u32) * 8)) | |
1403 | return default_msg_enable_bits; | |
1404 | if (debug_value == 0) /* no output */ | |
1405 | return 0; | |
1406 | /* set low N bits */ | |
1407 | return (1 << debug_value) - 1; | |
1408 | } | |
1409 | ||
0a122576 | 1410 | /* Test if receive needs to be scheduled but only if up */ |
bea3348e SH |
1411 | static inline int netif_rx_schedule_prep(struct net_device *dev, |
1412 | struct napi_struct *napi) | |
1da177e4 | 1413 | { |
a0a46196 | 1414 | return napi_schedule_prep(napi); |
1da177e4 LT |
1415 | } |
1416 | ||
1417 | /* Add interface to tail of rx poll list. This assumes that _prep has | |
1418 | * already been called and returned 1. | |
1419 | */ | |
bea3348e SH |
1420 | static inline void __netif_rx_schedule(struct net_device *dev, |
1421 | struct napi_struct *napi) | |
1422 | { | |
bea3348e SH |
1423 | __napi_schedule(napi); |
1424 | } | |
1da177e4 LT |
1425 | |
1426 | /* Try to reschedule poll. Called by irq handler. */ | |
1427 | ||
bea3348e SH |
1428 | static inline void netif_rx_schedule(struct net_device *dev, |
1429 | struct napi_struct *napi) | |
1da177e4 | 1430 | { |
bea3348e SH |
1431 | if (netif_rx_schedule_prep(dev, napi)) |
1432 | __netif_rx_schedule(dev, napi); | |
1da177e4 LT |
1433 | } |
1434 | ||
bea3348e SH |
1435 | /* Try to reschedule poll. Called by dev->poll() after netif_rx_complete(). */ |
1436 | static inline int netif_rx_reschedule(struct net_device *dev, | |
1437 | struct napi_struct *napi) | |
1da177e4 | 1438 | { |
bea3348e SH |
1439 | if (napi_schedule_prep(napi)) { |
1440 | __netif_rx_schedule(dev, napi); | |
1da177e4 LT |
1441 | return 1; |
1442 | } | |
1443 | return 0; | |
1444 | } | |
1445 | ||
b0ba6667 HX |
1446 | /* same as netif_rx_complete, except that local_irq_save(flags) |
1447 | * has already been issued | |
1448 | */ | |
bea3348e SH |
1449 | static inline void __netif_rx_complete(struct net_device *dev, |
1450 | struct napi_struct *napi) | |
b0ba6667 | 1451 | { |
bea3348e | 1452 | __napi_complete(napi); |
b0ba6667 HX |
1453 | } |
1454 | ||
1da177e4 LT |
1455 | /* Remove interface from poll list: it must be in the poll list |
1456 | * on current cpu. This primitive is called by dev->poll(), when | |
1457 | * it completes the work. The device cannot be out of poll list at this | |
1458 | * moment, it is BUG(). | |
1459 | */ | |
bea3348e SH |
1460 | static inline void netif_rx_complete(struct net_device *dev, |
1461 | struct napi_struct *napi) | |
1da177e4 LT |
1462 | { |
1463 | unsigned long flags; | |
1464 | ||
1465 | local_irq_save(flags); | |
bea3348e | 1466 | __netif_rx_complete(dev, napi); |
1da177e4 LT |
1467 | local_irq_restore(flags); |
1468 | } | |
1469 | ||
c773e847 | 1470 | static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) |
932ff279 | 1471 | { |
c773e847 DM |
1472 | spin_lock(&txq->_xmit_lock); |
1473 | txq->xmit_lock_owner = cpu; | |
22dd7495 JHS |
1474 | } |
1475 | ||
fd2ea0a7 DM |
1476 | static inline void __netif_tx_lock_bh(struct netdev_queue *txq) |
1477 | { | |
1478 | spin_lock_bh(&txq->_xmit_lock); | |
1479 | txq->xmit_lock_owner = smp_processor_id(); | |
1480 | } | |
1481 | ||
c3f26a26 DM |
1482 | static inline int __netif_tx_trylock(struct netdev_queue *txq) |
1483 | { | |
1484 | int ok = spin_trylock(&txq->_xmit_lock); | |
1485 | if (likely(ok)) | |
1486 | txq->xmit_lock_owner = smp_processor_id(); | |
1487 | return ok; | |
1488 | } | |
1489 | ||
1490 | static inline void __netif_tx_unlock(struct netdev_queue *txq) | |
1491 | { | |
1492 | txq->xmit_lock_owner = -1; | |
1493 | spin_unlock(&txq->_xmit_lock); | |
1494 | } | |
1495 | ||
1496 | static inline void __netif_tx_unlock_bh(struct netdev_queue *txq) | |
1497 | { | |
1498 | txq->xmit_lock_owner = -1; | |
1499 | spin_unlock_bh(&txq->_xmit_lock); | |
1500 | } | |
1501 | ||
d29f749e DJ |
1502 | /** |
1503 | * netif_tx_lock - grab network device transmit lock | |
1504 | * @dev: network device | |
1505 | * @cpu: cpu number of lock owner | |
1506 | * | |
1507 | * Get network device transmit lock | |
1508 | */ | |
22dd7495 JHS |
1509 | static inline void netif_tx_lock(struct net_device *dev) |
1510 | { | |
e8a0464c | 1511 | unsigned int i; |
c3f26a26 | 1512 | int cpu; |
c773e847 | 1513 | |
c3f26a26 DM |
1514 | spin_lock(&dev->tx_global_lock); |
1515 | cpu = smp_processor_id(); | |
e8a0464c DM |
1516 | for (i = 0; i < dev->num_tx_queues; i++) { |
1517 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | |
c3f26a26 DM |
1518 | |
1519 | /* We are the only thread of execution doing a | |
1520 | * freeze, but we have to grab the _xmit_lock in | |
1521 | * order to synchronize with threads which are in | |
1522 | * the ->hard_start_xmit() handler and already | |
1523 | * checked the frozen bit. | |
1524 | */ | |
e8a0464c | 1525 | __netif_tx_lock(txq, cpu); |
c3f26a26 DM |
1526 | set_bit(__QUEUE_STATE_FROZEN, &txq->state); |
1527 | __netif_tx_unlock(txq); | |
e8a0464c | 1528 | } |
932ff279 HX |
1529 | } |
1530 | ||
1531 | static inline void netif_tx_lock_bh(struct net_device *dev) | |
1532 | { | |
e8a0464c DM |
1533 | local_bh_disable(); |
1534 | netif_tx_lock(dev); | |
932ff279 HX |
1535 | } |
1536 | ||
932ff279 HX |
1537 | static inline void netif_tx_unlock(struct net_device *dev) |
1538 | { | |
e8a0464c DM |
1539 | unsigned int i; |
1540 | ||
1541 | for (i = 0; i < dev->num_tx_queues; i++) { | |
1542 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | |
c773e847 | 1543 | |
c3f26a26 DM |
1544 | /* No need to grab the _xmit_lock here. If the |
1545 | * queue is not stopped for another reason, we | |
1546 | * force a schedule. | |
1547 | */ | |
1548 | clear_bit(__QUEUE_STATE_FROZEN, &txq->state); | |
1549 | if (!test_bit(__QUEUE_STATE_XOFF, &txq->state)) | |
1550 | __netif_schedule(txq->qdisc); | |
1551 | } | |
1552 | spin_unlock(&dev->tx_global_lock); | |
932ff279 HX |
1553 | } |
1554 | ||
1555 | static inline void netif_tx_unlock_bh(struct net_device *dev) | |
1556 | { | |
e8a0464c DM |
1557 | netif_tx_unlock(dev); |
1558 | local_bh_enable(); | |
932ff279 HX |
1559 | } |
1560 | ||
c773e847 | 1561 | #define HARD_TX_LOCK(dev, txq, cpu) { \ |
22dd7495 | 1562 | if ((dev->features & NETIF_F_LLTX) == 0) { \ |
c773e847 | 1563 | __netif_tx_lock(txq, cpu); \ |
22dd7495 JHS |
1564 | } \ |
1565 | } | |
1566 | ||
c773e847 | 1567 | #define HARD_TX_UNLOCK(dev, txq) { \ |
22dd7495 | 1568 | if ((dev->features & NETIF_F_LLTX) == 0) { \ |
c773e847 | 1569 | __netif_tx_unlock(txq); \ |
22dd7495 JHS |
1570 | } \ |
1571 | } | |
1572 | ||
1da177e4 LT |
1573 | static inline void netif_tx_disable(struct net_device *dev) |
1574 | { | |
fd2ea0a7 | 1575 | unsigned int i; |
c3f26a26 | 1576 | int cpu; |
fd2ea0a7 | 1577 | |
c3f26a26 DM |
1578 | local_bh_disable(); |
1579 | cpu = smp_processor_id(); | |
fd2ea0a7 DM |
1580 | for (i = 0; i < dev->num_tx_queues; i++) { |
1581 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | |
c3f26a26 DM |
1582 | |
1583 | __netif_tx_lock(txq, cpu); | |
fd2ea0a7 | 1584 | netif_tx_stop_queue(txq); |
c3f26a26 | 1585 | __netif_tx_unlock(txq); |
fd2ea0a7 | 1586 | } |
c3f26a26 | 1587 | local_bh_enable(); |
1da177e4 LT |
1588 | } |
1589 | ||
e308a5d8 DM |
1590 | static inline void netif_addr_lock(struct net_device *dev) |
1591 | { | |
1592 | spin_lock(&dev->addr_list_lock); | |
1593 | } | |
1594 | ||
1595 | static inline void netif_addr_lock_bh(struct net_device *dev) | |
1596 | { | |
1597 | spin_lock_bh(&dev->addr_list_lock); | |
1598 | } | |
1599 | ||
1600 | static inline void netif_addr_unlock(struct net_device *dev) | |
1601 | { | |
1602 | spin_unlock(&dev->addr_list_lock); | |
1603 | } | |
1604 | ||
1605 | static inline void netif_addr_unlock_bh(struct net_device *dev) | |
1606 | { | |
1607 | spin_unlock_bh(&dev->addr_list_lock); | |
1608 | } | |
1609 | ||
1da177e4 LT |
1610 | /* These functions live elsewhere (drivers/net/net_init.c, but related) */ |
1611 | ||
1612 | extern void ether_setup(struct net_device *dev); | |
1613 | ||
1614 | /* Support for loadable net-drivers */ | |
f25f4e44 PWJ |
1615 | extern struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, |
1616 | void (*setup)(struct net_device *), | |
1617 | unsigned int queue_count); | |
1618 | #define alloc_netdev(sizeof_priv, name, setup) \ | |
1619 | alloc_netdev_mq(sizeof_priv, name, setup, 1) | |
1da177e4 LT |
1620 | extern int register_netdev(struct net_device *dev); |
1621 | extern void unregister_netdev(struct net_device *dev); | |
4417da66 PM |
1622 | /* Functions used for secondary unicast and multicast support */ |
1623 | extern void dev_set_rx_mode(struct net_device *dev); | |
1624 | extern void __dev_set_rx_mode(struct net_device *dev); | |
1625 | extern int dev_unicast_delete(struct net_device *dev, void *addr, int alen); | |
1626 | extern int dev_unicast_add(struct net_device *dev, void *addr, int alen); | |
e83a2ea8 CL |
1627 | extern int dev_unicast_sync(struct net_device *to, struct net_device *from); |
1628 | extern void dev_unicast_unsync(struct net_device *to, struct net_device *from); | |
1da177e4 LT |
1629 | extern int dev_mc_delete(struct net_device *dev, void *addr, int alen, int all); |
1630 | extern int dev_mc_add(struct net_device *dev, void *addr, int alen, int newonly); | |
a0a400d7 PM |
1631 | extern int dev_mc_sync(struct net_device *to, struct net_device *from); |
1632 | extern void dev_mc_unsync(struct net_device *to, struct net_device *from); | |
61cbc2fc PM |
1633 | extern int __dev_addr_delete(struct dev_addr_list **list, int *count, void *addr, int alen, int all); |
1634 | extern int __dev_addr_add(struct dev_addr_list **list, int *count, void *addr, int alen, int newonly); | |
e83a2ea8 CL |
1635 | extern int __dev_addr_sync(struct dev_addr_list **to, int *to_count, struct dev_addr_list **from, int *from_count); |
1636 | extern void __dev_addr_unsync(struct dev_addr_list **to, int *to_count, struct dev_addr_list **from, int *from_count); | |
dad9b335 WC |
1637 | extern int dev_set_promiscuity(struct net_device *dev, int inc); |
1638 | extern int dev_set_allmulti(struct net_device *dev, int inc); | |
1da177e4 | 1639 | extern void netdev_state_change(struct net_device *dev); |
c1da4ac7 | 1640 | extern void netdev_bonding_change(struct net_device *dev); |
d8a33ac4 | 1641 | extern void netdev_features_change(struct net_device *dev); |
1da177e4 | 1642 | /* Load a device via the kmod */ |
881d966b | 1643 | extern void dev_load(struct net *net, const char *name); |
1da177e4 LT |
1644 | extern void dev_mcast_init(void); |
1645 | extern int netdev_max_backlog; | |
1646 | extern int weight_p; | |
1647 | extern int netdev_set_master(struct net_device *dev, struct net_device *master); | |
84fa7933 | 1648 | extern int skb_checksum_help(struct sk_buff *skb); |
576a30eb | 1649 | extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features); |
fb286bb2 HX |
1650 | #ifdef CONFIG_BUG |
1651 | extern void netdev_rx_csum_fault(struct net_device *dev); | |
1652 | #else | |
1653 | static inline void netdev_rx_csum_fault(struct net_device *dev) | |
1654 | { | |
1655 | } | |
1656 | #endif | |
1da177e4 LT |
1657 | /* rx skb timestamps */ |
1658 | extern void net_enable_timestamp(void); | |
1659 | extern void net_disable_timestamp(void); | |
1660 | ||
20380731 ACM |
1661 | #ifdef CONFIG_PROC_FS |
1662 | extern void *dev_seq_start(struct seq_file *seq, loff_t *pos); | |
1663 | extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos); | |
1664 | extern void dev_seq_stop(struct seq_file *seq, void *v); | |
1665 | #endif | |
1666 | ||
b8a9787e JV |
1667 | extern int netdev_class_create_file(struct class_attribute *class_attr); |
1668 | extern void netdev_class_remove_file(struct class_attribute *class_attr); | |
1669 | ||
6579e57b AV |
1670 | extern char *netdev_drivername(struct net_device *dev, char *buffer, int len); |
1671 | ||
20380731 ACM |
1672 | extern void linkwatch_run_queue(void); |
1673 | ||
7f353bf2 HX |
1674 | extern int netdev_compute_features(unsigned long all, unsigned long one); |
1675 | ||
bcd76111 | 1676 | static inline int net_gso_ok(int features, int gso_type) |
576a30eb | 1677 | { |
bcd76111 | 1678 | int feature = gso_type << NETIF_F_GSO_SHIFT; |
d6b4991a | 1679 | return (features & feature) == feature; |
576a30eb HX |
1680 | } |
1681 | ||
bcd76111 HX |
1682 | static inline int skb_gso_ok(struct sk_buff *skb, int features) |
1683 | { | |
a430a43d | 1684 | return net_gso_ok(features, skb_shinfo(skb)->gso_type); |
bcd76111 HX |
1685 | } |
1686 | ||
7967168c HX |
1687 | static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb) |
1688 | { | |
a430a43d HX |
1689 | return skb_is_gso(skb) && |
1690 | (!skb_gso_ok(skb, dev->features) || | |
84fa7933 | 1691 | unlikely(skb->ip_summed != CHECKSUM_PARTIAL)); |
7967168c HX |
1692 | } |
1693 | ||
82cc1a7a PWJ |
1694 | static inline void netif_set_gso_max_size(struct net_device *dev, |
1695 | unsigned int size) | |
1696 | { | |
1697 | dev->gso_max_size = size; | |
1698 | } | |
1699 | ||
7ea49ed7 | 1700 | /* On bonding slaves other than the currently active slave, suppress |
f5b2b966 JV |
1701 | * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and |
1702 | * ARP on active-backup slaves with arp_validate enabled. | |
7ea49ed7 DM |
1703 | */ |
1704 | static inline int skb_bond_should_drop(struct sk_buff *skb) | |
1705 | { | |
1706 | struct net_device *dev = skb->dev; | |
1707 | struct net_device *master = dev->master; | |
1708 | ||
1709 | if (master && | |
1710 | (dev->priv_flags & IFF_SLAVE_INACTIVE)) { | |
f5b2b966 JV |
1711 | if ((dev->priv_flags & IFF_SLAVE_NEEDARP) && |
1712 | skb->protocol == __constant_htons(ETH_P_ARP)) | |
1713 | return 0; | |
1714 | ||
7ea49ed7 DM |
1715 | if (master->priv_flags & IFF_MASTER_ALB) { |
1716 | if (skb->pkt_type != PACKET_BROADCAST && | |
1717 | skb->pkt_type != PACKET_MULTICAST) | |
1718 | return 0; | |
1719 | } | |
1720 | if (master->priv_flags & IFF_MASTER_8023AD && | |
1721 | skb->protocol == __constant_htons(ETH_P_SLOW)) | |
1722 | return 0; | |
1723 | ||
1724 | return 1; | |
1725 | } | |
1726 | return 0; | |
1727 | } | |
1728 | ||
1da177e4 LT |
1729 | #endif /* __KERNEL__ */ |
1730 | ||
1731 | #endif /* _LINUX_DEV_H */ |