2 * NET3 Protocol independent device support routines.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Derived from the non IP parts of dev.c 1.0.19
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
51 * Rudi Cilibrasi : Pass the right thing to
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
75 #include <asm/uaccess.h>
76 #include <asm/system.h>
77 #include <linux/bitops.h>
78 #include <linux/capability.h>
79 #include <linux/cpu.h>
80 #include <linux/types.h>
81 #include <linux/kernel.h>
82 #include <linux/hash.h>
83 #include <linux/slab.h>
84 #include <linux/sched.h>
85 #include <linux/mutex.h>
86 #include <linux/string.h>
88 #include <linux/socket.h>
89 #include <linux/sockios.h>
90 #include <linux/errno.h>
91 #include <linux/interrupt.h>
92 #include <linux/if_ether.h>
93 #include <linux/netdevice.h>
94 #include <linux/etherdevice.h>
95 #include <linux/ethtool.h>
96 #include <linux/notifier.h>
97 #include <linux/skbuff.h>
98 #include <net/net_namespace.h>
100 #include <linux/rtnetlink.h>
101 #include <linux/proc_fs.h>
102 #include <linux/seq_file.h>
103 #include <linux/stat.h>
105 #include <net/pkt_sched.h>
106 #include <net/checksum.h>
107 #include <net/xfrm.h>
108 #include <linux/highmem.h>
109 #include <linux/init.h>
110 #include <linux/kmod.h>
111 #include <linux/module.h>
112 #include <linux/netpoll.h>
113 #include <linux/rcupdate.h>
114 #include <linux/delay.h>
115 #include <net/wext.h>
116 #include <net/iw_handler.h>
117 #include <asm/current.h>
118 #include <linux/audit.h>
119 #include <linux/dmaengine.h>
120 #include <linux/err.h>
121 #include <linux/ctype.h>
122 #include <linux/if_arp.h>
123 #include <linux/if_vlan.h>
124 #include <linux/ip.h>
126 #include <linux/ipv6.h>
127 #include <linux/in.h>
128 #include <linux/jhash.h>
129 #include <linux/random.h>
130 #include <trace/events/napi.h>
131 #include <linux/pci.h>
132 #include <linux/inetdevice.h>
134 #include "net-sysfs.h"
136 /* Instead of increasing this, you should create a hash table. */
137 #define MAX_GRO_SKBS 8
139 /* This should be increased if a protocol with a bigger head is added. */
140 #define GRO_MAX_HEAD (MAX_HEADER + 128)
143 * The list of packet types we will receive (as opposed to discard)
144 * and the routines to invoke.
146 * Why 16. Because with 16 the only overlap we get on a hash of the
147 * low nibble of the protocol value is RARP/SNAP/X.25.
149 * NOTE: That is no longer true with the addition of VLAN tags. Not
150 * sure which should go first, but I bet it won't make much
151 * difference if we are running VLANs. The good news is that
152 * this protocol won't be in the list unless compiled in, so
153 * the average user (w/out VLANs) will not be adversely affected.
170 #define PTYPE_HASH_SIZE (16)
171 #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
173 static DEFINE_SPINLOCK(ptype_lock);
174 static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
175 static struct list_head ptype_all __read_mostly; /* Taps */
178 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
181 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
183 * Writers must hold the rtnl semaphore while they loop through the
184 * dev_base_head list, and hold dev_base_lock for writing when they do the
185 * actual updates. This allows pure readers to access the list even
186 * while a writer is preparing to update it.
188 * To put it another way, dev_base_lock is held for writing only to
189 * protect against pure readers; the rtnl semaphore provides the
190 * protection against other writers.
192 * See, for example usages, register_netdevice() and
193 * unregister_netdevice(), which must be called with the rtnl
196 DEFINE_RWLOCK(dev_base_lock);
197 EXPORT_SYMBOL(dev_base_lock);
199 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
201 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
202 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
205 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
207 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
210 static inline void rps_lock(struct softnet_data *sd)
213 spin_lock(&sd->input_pkt_queue.lock);
217 static inline void rps_unlock(struct softnet_data *sd)
220 spin_unlock(&sd->input_pkt_queue.lock);
224 /* Device list insertion */
225 static int list_netdevice(struct net_device *dev)
227 struct net *net = dev_net(dev);
231 write_lock_bh(&dev_base_lock);
232 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
233 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
234 hlist_add_head_rcu(&dev->index_hlist,
235 dev_index_hash(net, dev->ifindex));
236 write_unlock_bh(&dev_base_lock);
240 /* Device list removal
241 * caller must respect a RCU grace period before freeing/reusing dev
243 static void unlist_netdevice(struct net_device *dev)
247 /* Unlink dev from the device chain */
248 write_lock_bh(&dev_base_lock);
249 list_del_rcu(&dev->dev_list);
250 hlist_del_rcu(&dev->name_hlist);
251 hlist_del_rcu(&dev->index_hlist);
252 write_unlock_bh(&dev_base_lock);
259 static RAW_NOTIFIER_HEAD(netdev_chain);
262 * Device drivers call our routines to queue packets here. We empty the
263 * queue in the local softnet handler.
266 DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
267 EXPORT_PER_CPU_SYMBOL(softnet_data);
269 #ifdef CONFIG_LOCKDEP
271 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
272 * according to dev->type
274 static const unsigned short netdev_lock_type[] =
275 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
276 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
277 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
278 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
279 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
280 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
281 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
282 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
283 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
284 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
285 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
286 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
287 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
288 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET,
289 ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154,
290 ARPHRD_VOID, ARPHRD_NONE};
292 static const char *const netdev_lock_name[] =
293 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
294 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
295 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
296 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
297 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
298 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
299 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
300 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
301 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
302 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
303 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
304 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
305 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
306 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET",
307 "_xmit_PHONET_PIPE", "_xmit_IEEE802154",
308 "_xmit_VOID", "_xmit_NONE"};
310 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
311 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
313 static inline unsigned short netdev_lock_pos(unsigned short dev_type)
317 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
318 if (netdev_lock_type[i] == dev_type)
320 /* the last key is used by default */
321 return ARRAY_SIZE(netdev_lock_type) - 1;
324 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
325 unsigned short dev_type)
329 i = netdev_lock_pos(dev_type);
330 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
331 netdev_lock_name[i]);
334 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
338 i = netdev_lock_pos(dev->type);
339 lockdep_set_class_and_name(&dev->addr_list_lock,
340 &netdev_addr_lock_key[i],
341 netdev_lock_name[i]);
344 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
345 unsigned short dev_type)
348 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
353 /*******************************************************************************
355 Protocol management and registration routines
357 *******************************************************************************/
360 * Add a protocol ID to the list. Now that the input handler is
361 * smarter we can dispense with all the messy stuff that used to be
364 * BEWARE!!! Protocol handlers, mangling input packets,
365 * MUST BE last in hash buckets and checking protocol handlers
366 * MUST start from promiscuous ptype_all chain in net_bh.
367 * It is true now, do not change it.
368 * Explanation follows: if protocol handler, mangling packet, will
369 * be the first on list, it is not able to sense, that packet
370 * is cloned and should be copied-on-write, so that it will
371 * change it and subsequent readers will get broken packet.
375 static inline struct list_head *ptype_head(const struct packet_type *pt)
377 if (pt->type == htons(ETH_P_ALL))
380 return &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
384 * dev_add_pack - add packet handler
385 * @pt: packet type declaration
387 * Add a protocol handler to the networking stack. The passed &packet_type
388 * is linked into kernel lists and may not be freed until it has been
389 * removed from the kernel lists.
391 * This call does not sleep therefore it can not
392 * guarantee all CPU's that are in middle of receiving packets
393 * will see the new packet type (until the next received packet).
396 void dev_add_pack(struct packet_type *pt)
398 struct list_head *head = ptype_head(pt);
400 spin_lock(&ptype_lock);
401 list_add_rcu(&pt->list, head);
402 spin_unlock(&ptype_lock);
404 EXPORT_SYMBOL(dev_add_pack);
407 * __dev_remove_pack - remove packet handler
408 * @pt: packet type declaration
410 * Remove a protocol handler that was previously added to the kernel
411 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
412 * from the kernel lists and can be freed or reused once this function
415 * The packet type might still be in use by receivers
416 * and must not be freed until after all the CPU's have gone
417 * through a quiescent state.
419 void __dev_remove_pack(struct packet_type *pt)
421 struct list_head *head = ptype_head(pt);
422 struct packet_type *pt1;
424 spin_lock(&ptype_lock);
426 list_for_each_entry(pt1, head, list) {
428 list_del_rcu(&pt->list);
433 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
435 spin_unlock(&ptype_lock);
437 EXPORT_SYMBOL(__dev_remove_pack);
440 * dev_remove_pack - remove packet handler
441 * @pt: packet type declaration
443 * Remove a protocol handler that was previously added to the kernel
444 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
445 * from the kernel lists and can be freed or reused once this function
448 * This call sleeps to guarantee that no CPU is looking at the packet
451 void dev_remove_pack(struct packet_type *pt)
453 __dev_remove_pack(pt);
457 EXPORT_SYMBOL(dev_remove_pack);
459 /******************************************************************************
461 Device Boot-time Settings Routines
463 *******************************************************************************/
465 /* Boot time configuration table */
466 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
469 * netdev_boot_setup_add - add new setup entry
470 * @name: name of the device
471 * @map: configured settings for the device
473 * Adds new setup entry to the dev_boot_setup list. The function
474 * returns 0 on error and 1 on success. This is a generic routine to
477 static int netdev_boot_setup_add(char *name, struct ifmap *map)
479 struct netdev_boot_setup *s;
483 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
484 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
485 memset(s[i].name, 0, sizeof(s[i].name));
486 strlcpy(s[i].name, name, IFNAMSIZ);
487 memcpy(&s[i].map, map, sizeof(s[i].map));
492 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
496 * netdev_boot_setup_check - check boot time settings
497 * @dev: the netdevice
499 * Check boot time settings for the device.
500 * The found settings are set for the device to be used
501 * later in the device probing.
502 * Returns 0 if no settings found, 1 if they are.
504 int netdev_boot_setup_check(struct net_device *dev)
506 struct netdev_boot_setup *s = dev_boot_setup;
509 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
510 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
511 !strcmp(dev->name, s[i].name)) {
512 dev->irq = s[i].map.irq;
513 dev->base_addr = s[i].map.base_addr;
514 dev->mem_start = s[i].map.mem_start;
515 dev->mem_end = s[i].map.mem_end;
521 EXPORT_SYMBOL(netdev_boot_setup_check);
525 * netdev_boot_base - get address from boot time settings
526 * @prefix: prefix for network device
527 * @unit: id for network device
529 * Check boot time settings for the base address of device.
530 * The found settings are set for the device to be used
531 * later in the device probing.
532 * Returns 0 if no settings found.
534 unsigned long netdev_boot_base(const char *prefix, int unit)
536 const struct netdev_boot_setup *s = dev_boot_setup;
540 sprintf(name, "%s%d", prefix, unit);
543 * If device already registered then return base of 1
544 * to indicate not to probe for this interface
546 if (__dev_get_by_name(&init_net, name))
549 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
550 if (!strcmp(name, s[i].name))
551 return s[i].map.base_addr;
556 * Saves at boot time configured settings for any netdevice.
558 int __init netdev_boot_setup(char *str)
563 str = get_options(str, ARRAY_SIZE(ints), ints);
568 memset(&map, 0, sizeof(map));
572 map.base_addr = ints[2];
574 map.mem_start = ints[3];
576 map.mem_end = ints[4];
578 /* Add new entry to the list */
579 return netdev_boot_setup_add(str, &map);
582 __setup("netdev=", netdev_boot_setup);
584 /*******************************************************************************
586 Device Interface Subroutines
588 *******************************************************************************/
591 * __dev_get_by_name - find a device by its name
592 * @net: the applicable net namespace
593 * @name: name to find
595 * Find an interface by name. Must be called under RTNL semaphore
596 * or @dev_base_lock. If the name is found a pointer to the device
597 * is returned. If the name is not found then %NULL is returned. The
598 * reference counters are not incremented so the caller must be
599 * careful with locks.
602 struct net_device *__dev_get_by_name(struct net *net, const char *name)
604 struct hlist_node *p;
605 struct net_device *dev;
606 struct hlist_head *head = dev_name_hash(net, name);
608 hlist_for_each_entry(dev, p, head, name_hlist)
609 if (!strncmp(dev->name, name, IFNAMSIZ))
614 EXPORT_SYMBOL(__dev_get_by_name);
617 * dev_get_by_name_rcu - find a device by its name
618 * @net: the applicable net namespace
619 * @name: name to find
621 * Find an interface by name.
622 * If the name is found a pointer to the device is returned.
623 * If the name is not found then %NULL is returned.
624 * The reference counters are not incremented so the caller must be
625 * careful with locks. The caller must hold RCU lock.
628 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
630 struct hlist_node *p;
631 struct net_device *dev;
632 struct hlist_head *head = dev_name_hash(net, name);
634 hlist_for_each_entry_rcu(dev, p, head, name_hlist)
635 if (!strncmp(dev->name, name, IFNAMSIZ))
640 EXPORT_SYMBOL(dev_get_by_name_rcu);
643 * dev_get_by_name - find a device by its name
644 * @net: the applicable net namespace
645 * @name: name to find
647 * Find an interface by name. This can be called from any
648 * context and does its own locking. The returned handle has
649 * the usage count incremented and the caller must use dev_put() to
650 * release it when it is no longer needed. %NULL is returned if no
651 * matching device is found.
654 struct net_device *dev_get_by_name(struct net *net, const char *name)
656 struct net_device *dev;
659 dev = dev_get_by_name_rcu(net, name);
665 EXPORT_SYMBOL(dev_get_by_name);
668 * __dev_get_by_index - find a device by its ifindex
669 * @net: the applicable net namespace
670 * @ifindex: index of device
672 * Search for an interface by index. Returns %NULL if the device
673 * is not found or a pointer to the device. The device has not
674 * had its reference counter increased so the caller must be careful
675 * about locking. The caller must hold either the RTNL semaphore
679 struct net_device *__dev_get_by_index(struct net *net, int ifindex)
681 struct hlist_node *p;
682 struct net_device *dev;
683 struct hlist_head *head = dev_index_hash(net, ifindex);
685 hlist_for_each_entry(dev, p, head, index_hlist)
686 if (dev->ifindex == ifindex)
691 EXPORT_SYMBOL(__dev_get_by_index);
694 * dev_get_by_index_rcu - find a device by its ifindex
695 * @net: the applicable net namespace
696 * @ifindex: index of device
698 * Search for an interface by index. Returns %NULL if the device
699 * is not found or a pointer to the device. The device has not
700 * had its reference counter increased so the caller must be careful
701 * about locking. The caller must hold RCU lock.
704 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
706 struct hlist_node *p;
707 struct net_device *dev;
708 struct hlist_head *head = dev_index_hash(net, ifindex);
710 hlist_for_each_entry_rcu(dev, p, head, index_hlist)
711 if (dev->ifindex == ifindex)
716 EXPORT_SYMBOL(dev_get_by_index_rcu);
720 * dev_get_by_index - find a device by its ifindex
721 * @net: the applicable net namespace
722 * @ifindex: index of device
724 * Search for an interface by index. Returns NULL if the device
725 * is not found or a pointer to the device. The device returned has
726 * had a reference added and the pointer is safe until the user calls
727 * dev_put to indicate they have finished with it.
730 struct net_device *dev_get_by_index(struct net *net, int ifindex)
732 struct net_device *dev;
735 dev = dev_get_by_index_rcu(net, ifindex);
741 EXPORT_SYMBOL(dev_get_by_index);
744 * dev_getbyhwaddr - find a device by its hardware address
745 * @net: the applicable net namespace
746 * @type: media type of device
747 * @ha: hardware address
749 * Search for an interface by MAC address. Returns NULL if the device
750 * is not found or a pointer to the device. The caller must hold the
751 * rtnl semaphore. The returned device has not had its ref count increased
752 * and the caller must therefore be careful about locking
755 * If the API was consistent this would be __dev_get_by_hwaddr
758 struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
760 struct net_device *dev;
764 for_each_netdev(net, dev)
765 if (dev->type == type &&
766 !memcmp(dev->dev_addr, ha, dev->addr_len))
771 EXPORT_SYMBOL(dev_getbyhwaddr);
773 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
775 struct net_device *dev;
778 for_each_netdev(net, dev)
779 if (dev->type == type)
784 EXPORT_SYMBOL(__dev_getfirstbyhwtype);
786 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
788 struct net_device *dev, *ret = NULL;
791 for_each_netdev_rcu(net, dev)
792 if (dev->type == type) {
800 EXPORT_SYMBOL(dev_getfirstbyhwtype);
803 * dev_get_by_flags_rcu - find any device with given flags
804 * @net: the applicable net namespace
805 * @if_flags: IFF_* values
806 * @mask: bitmask of bits in if_flags to check
808 * Search for any interface with the given flags. Returns NULL if a device
809 * is not found or a pointer to the device. Must be called inside
810 * rcu_read_lock(), and result refcount is unchanged.
813 struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags,
816 struct net_device *dev, *ret;
819 for_each_netdev_rcu(net, dev) {
820 if (((dev->flags ^ if_flags) & mask) == 0) {
827 EXPORT_SYMBOL(dev_get_by_flags_rcu);
830 * dev_valid_name - check if name is okay for network device
833 * Network device names need to be valid file names to
834 * to allow sysfs to work. We also disallow any kind of
837 int dev_valid_name(const char *name)
841 if (strlen(name) >= IFNAMSIZ)
843 if (!strcmp(name, ".") || !strcmp(name, ".."))
847 if (*name == '/' || isspace(*name))
853 EXPORT_SYMBOL(dev_valid_name);
856 * __dev_alloc_name - allocate a name for a device
857 * @net: network namespace to allocate the device name in
858 * @name: name format string
859 * @buf: scratch buffer and result name string
861 * Passed a format string - eg "lt%d" it will try and find a suitable
862 * id. It scans list of devices to build up a free map, then chooses
863 * the first empty slot. The caller must hold the dev_base or rtnl lock
864 * while allocating the name and adding the device in order to avoid
866 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
867 * Returns the number of the unit assigned or a negative errno code.
870 static int __dev_alloc_name(struct net *net, const char *name, char *buf)
874 const int max_netdevices = 8*PAGE_SIZE;
875 unsigned long *inuse;
876 struct net_device *d;
878 p = strnchr(name, IFNAMSIZ-1, '%');
881 * Verify the string as this thing may have come from
882 * the user. There must be either one "%d" and no other "%"
885 if (p[1] != 'd' || strchr(p + 2, '%'))
888 /* Use one page as a bit array of possible slots */
889 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
893 for_each_netdev(net, d) {
894 if (!sscanf(d->name, name, &i))
896 if (i < 0 || i >= max_netdevices)
899 /* avoid cases where sscanf is not exact inverse of printf */
900 snprintf(buf, IFNAMSIZ, name, i);
901 if (!strncmp(buf, d->name, IFNAMSIZ))
905 i = find_first_zero_bit(inuse, max_netdevices);
906 free_page((unsigned long) inuse);
910 snprintf(buf, IFNAMSIZ, name, i);
911 if (!__dev_get_by_name(net, buf))
914 /* It is possible to run out of possible slots
915 * when the name is long and there isn't enough space left
916 * for the digits, or if all bits are used.
922 * dev_alloc_name - allocate a name for a device
924 * @name: name format string
926 * Passed a format string - eg "lt%d" it will try and find a suitable
927 * id. It scans list of devices to build up a free map, then chooses
928 * the first empty slot. The caller must hold the dev_base or rtnl lock
929 * while allocating the name and adding the device in order to avoid
931 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
932 * Returns the number of the unit assigned or a negative errno code.
935 int dev_alloc_name(struct net_device *dev, const char *name)
941 BUG_ON(!dev_net(dev));
943 ret = __dev_alloc_name(net, name, buf);
945 strlcpy(dev->name, buf, IFNAMSIZ);
948 EXPORT_SYMBOL(dev_alloc_name);
950 static int dev_get_valid_name(struct net_device *dev, const char *name, bool fmt)
954 BUG_ON(!dev_net(dev));
957 if (!dev_valid_name(name))
960 if (fmt && strchr(name, '%'))
961 return dev_alloc_name(dev, name);
962 else if (__dev_get_by_name(net, name))
964 else if (dev->name != name)
965 strlcpy(dev->name, name, IFNAMSIZ);
971 * dev_change_name - change name of a device
973 * @newname: name (or format string) must be at least IFNAMSIZ
975 * Change name of a device, can pass format strings "eth%d".
978 int dev_change_name(struct net_device *dev, const char *newname)
980 char oldname[IFNAMSIZ];
986 BUG_ON(!dev_net(dev));
989 if (dev->flags & IFF_UP)
992 if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
995 memcpy(oldname, dev->name, IFNAMSIZ);
997 err = dev_get_valid_name(dev, newname, 1);
1002 ret = device_rename(&dev->dev, dev->name);
1004 memcpy(dev->name, oldname, IFNAMSIZ);
1008 write_lock_bh(&dev_base_lock);
1009 hlist_del(&dev->name_hlist);
1010 write_unlock_bh(&dev_base_lock);
1014 write_lock_bh(&dev_base_lock);
1015 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
1016 write_unlock_bh(&dev_base_lock);
1018 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
1019 ret = notifier_to_errno(ret);
1022 /* err >= 0 after dev_alloc_name() or stores the first errno */
1025 memcpy(dev->name, oldname, IFNAMSIZ);
1029 "%s: name change rollback failed: %d.\n",
1038 * dev_set_alias - change ifalias of a device
1040 * @alias: name up to IFALIASZ
1041 * @len: limit of bytes to copy from info
1043 * Set ifalias for a device,
1045 int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1049 if (len >= IFALIASZ)
1054 kfree(dev->ifalias);
1055 dev->ifalias = NULL;
1060 dev->ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
1064 strlcpy(dev->ifalias, alias, len+1);
1070 * netdev_features_change - device changes features
1071 * @dev: device to cause notification
1073 * Called to indicate a device has changed features.
1075 void netdev_features_change(struct net_device *dev)
1077 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
1079 EXPORT_SYMBOL(netdev_features_change);
1082 * netdev_state_change - device changes state
1083 * @dev: device to cause notification
1085 * Called to indicate a device has changed state. This function calls
1086 * the notifier chains for netdev_chain and sends a NEWLINK message
1087 * to the routing socket.
1089 void netdev_state_change(struct net_device *dev)
1091 if (dev->flags & IFF_UP) {
1092 call_netdevice_notifiers(NETDEV_CHANGE, dev);
1093 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1096 EXPORT_SYMBOL(netdev_state_change);
1098 int netdev_bonding_change(struct net_device *dev, unsigned long event)
1100 return call_netdevice_notifiers(event, dev);
1102 EXPORT_SYMBOL(netdev_bonding_change);
1105 * dev_load - load a network module
1106 * @net: the applicable net namespace
1107 * @name: name of interface
1109 * If a network interface is not present and the process has suitable
1110 * privileges this function loads the module. If module loading is not
1111 * available in this kernel then it becomes a nop.
1114 void dev_load(struct net *net, const char *name)
1116 struct net_device *dev;
1119 dev = dev_get_by_name_rcu(net, name);
1122 if (!dev && capable(CAP_NET_ADMIN))
1123 request_module("%s", name);
1125 EXPORT_SYMBOL(dev_load);
1127 static int __dev_open(struct net_device *dev)
1129 const struct net_device_ops *ops = dev->netdev_ops;
1135 * Is it even present?
1137 if (!netif_device_present(dev))
1140 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1141 ret = notifier_to_errno(ret);
1146 * Call device private open method
1148 set_bit(__LINK_STATE_START, &dev->state);
1150 if (ops->ndo_validate_addr)
1151 ret = ops->ndo_validate_addr(dev);
1153 if (!ret && ops->ndo_open)
1154 ret = ops->ndo_open(dev);
1157 * If it went open OK then:
1161 clear_bit(__LINK_STATE_START, &dev->state);
1166 dev->flags |= IFF_UP;
1171 net_dmaengine_get();
1174 * Initialize multicasting status
1176 dev_set_rx_mode(dev);
1179 * Wakeup transmit queue engine
1188 * dev_open - prepare an interface for use.
1189 * @dev: device to open
1191 * Takes a device from down to up state. The device's private open
1192 * function is invoked and then the multicast lists are loaded. Finally
1193 * the device is moved into the up state and a %NETDEV_UP message is
1194 * sent to the netdev notifier chain.
1196 * Calling this function on an active interface is a nop. On a failure
1197 * a negative errno code is returned.
1199 int dev_open(struct net_device *dev)
1206 if (dev->flags & IFF_UP)
1212 ret = __dev_open(dev);
1217 * ... and announce new interface.
1219 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1220 call_netdevice_notifiers(NETDEV_UP, dev);
1224 EXPORT_SYMBOL(dev_open);
1226 static int __dev_close(struct net_device *dev)
1228 const struct net_device_ops *ops = dev->netdev_ops;
1234 * Tell people we are going down, so that they can
1235 * prepare to death, when device is still operating.
1237 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1239 clear_bit(__LINK_STATE_START, &dev->state);
1241 /* Synchronize to scheduled poll. We cannot touch poll list,
1242 * it can be even on different cpu. So just clear netif_running().
1244 * dev->stop() will invoke napi_disable() on all of it's
1245 * napi_struct instances on this device.
1247 smp_mb__after_clear_bit(); /* Commit netif_running(). */
1249 dev_deactivate(dev);
1252 * Call the device specific close. This cannot fail.
1253 * Only if device is UP
1255 * We allow it to be called even after a DETACH hot-plug
1262 * Device is now down.
1265 dev->flags &= ~IFF_UP;
1270 net_dmaengine_put();
1276 * dev_close - shutdown an interface.
1277 * @dev: device to shutdown
1279 * This function moves an active device into down state. A
1280 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1281 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1284 int dev_close(struct net_device *dev)
1286 if (!(dev->flags & IFF_UP))
1292 * Tell people we are down
1294 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1295 call_netdevice_notifiers(NETDEV_DOWN, dev);
1299 EXPORT_SYMBOL(dev_close);
1303 * dev_disable_lro - disable Large Receive Offload on a device
1306 * Disable Large Receive Offload (LRO) on a net device. Must be
1307 * called under RTNL. This is needed if received packets may be
1308 * forwarded to another interface.
1310 void dev_disable_lro(struct net_device *dev)
1312 if (dev->ethtool_ops && dev->ethtool_ops->get_flags &&
1313 dev->ethtool_ops->set_flags) {
1314 u32 flags = dev->ethtool_ops->get_flags(dev);
1315 if (flags & ETH_FLAG_LRO) {
1316 flags &= ~ETH_FLAG_LRO;
1317 dev->ethtool_ops->set_flags(dev, flags);
1320 WARN_ON(dev->features & NETIF_F_LRO);
1322 EXPORT_SYMBOL(dev_disable_lro);
1325 static int dev_boot_phase = 1;
1328 * Device change register/unregister. These are not inline or static
1329 * as we export them to the world.
1333 * register_netdevice_notifier - register a network notifier block
1336 * Register a notifier to be called when network device events occur.
1337 * The notifier passed is linked into the kernel structures and must
1338 * not be reused until it has been unregistered. A negative errno code
1339 * is returned on a failure.
1341 * When registered all registration and up events are replayed
1342 * to the new notifier to allow device to have a race free
1343 * view of the network device list.
1346 int register_netdevice_notifier(struct notifier_block *nb)
1348 struct net_device *dev;
1349 struct net_device *last;
1354 err = raw_notifier_chain_register(&netdev_chain, nb);
1360 for_each_netdev(net, dev) {
1361 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1362 err = notifier_to_errno(err);
1366 if (!(dev->flags & IFF_UP))
1369 nb->notifier_call(nb, NETDEV_UP, dev);
1380 for_each_netdev(net, dev) {
1384 if (dev->flags & IFF_UP) {
1385 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1386 nb->notifier_call(nb, NETDEV_DOWN, dev);
1388 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
1389 nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev);
1393 raw_notifier_chain_unregister(&netdev_chain, nb);
1396 EXPORT_SYMBOL(register_netdevice_notifier);
1399 * unregister_netdevice_notifier - unregister a network notifier block
1402 * Unregister a notifier previously registered by
1403 * register_netdevice_notifier(). The notifier is unlinked into the
1404 * kernel structures and may then be reused. A negative errno code
1405 * is returned on a failure.
1408 int unregister_netdevice_notifier(struct notifier_block *nb)
1413 err = raw_notifier_chain_unregister(&netdev_chain, nb);
1417 EXPORT_SYMBOL(unregister_netdevice_notifier);
1420 * call_netdevice_notifiers - call all network notifier blocks
1421 * @val: value passed unmodified to notifier function
1422 * @dev: net_device pointer passed unmodified to notifier function
1424 * Call all network notifier blocks. Parameters and return value
1425 * are as for raw_notifier_call_chain().
1428 int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1431 return raw_notifier_call_chain(&netdev_chain, val, dev);
1434 /* When > 0 there are consumers of rx skb time stamps */
1435 static atomic_t netstamp_needed = ATOMIC_INIT(0);
1437 void net_enable_timestamp(void)
1439 atomic_inc(&netstamp_needed);
1441 EXPORT_SYMBOL(net_enable_timestamp);
1443 void net_disable_timestamp(void)
1445 atomic_dec(&netstamp_needed);
1447 EXPORT_SYMBOL(net_disable_timestamp);
1449 static inline void net_timestamp_set(struct sk_buff *skb)
1451 if (atomic_read(&netstamp_needed))
1452 __net_timestamp(skb);
1454 skb->tstamp.tv64 = 0;
1457 static inline void net_timestamp_check(struct sk_buff *skb)
1459 if (!skb->tstamp.tv64 && atomic_read(&netstamp_needed))
1460 __net_timestamp(skb);
1464 * dev_forward_skb - loopback an skb to another netif
1466 * @dev: destination network device
1467 * @skb: buffer to forward
1470 * NET_RX_SUCCESS (no congestion)
1471 * NET_RX_DROP (packet was dropped, but freed)
1473 * dev_forward_skb can be used for injecting an skb from the
1474 * start_xmit function of one device into the receive queue
1475 * of another device.
1477 * The receiving device may be in another namespace, so
1478 * we have to clear all information in the skb that could
1479 * impact namespace isolation.
1481 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1486 if (unlikely(!(dev->flags & IFF_UP) ||
1487 (skb->len > (dev->mtu + dev->hard_header_len)))) {
1488 atomic_long_inc(&dev->rx_dropped);
1492 skb_set_dev(skb, dev);
1493 skb->tstamp.tv64 = 0;
1494 skb->pkt_type = PACKET_HOST;
1495 skb->protocol = eth_type_trans(skb, dev);
1496 return netif_rx(skb);
1498 EXPORT_SYMBOL_GPL(dev_forward_skb);
1501 * Support routine. Sends outgoing frames to any network
1502 * taps currently in use.
1505 static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1507 struct packet_type *ptype;
1509 #ifdef CONFIG_NET_CLS_ACT
1510 if (!(skb->tstamp.tv64 && (G_TC_FROM(skb->tc_verd) & AT_INGRESS)))
1511 net_timestamp_set(skb);
1513 net_timestamp_set(skb);
1517 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1518 /* Never send packets back to the socket
1519 * they originated from - MvS (miquels@drinkel.ow.org)
1521 if ((ptype->dev == dev || !ptype->dev) &&
1522 (ptype->af_packet_priv == NULL ||
1523 (struct sock *)ptype->af_packet_priv != skb->sk)) {
1524 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1528 /* skb->nh should be correctly
1529 set by sender, so that the second statement is
1530 just protection against buggy protocols.
1532 skb_reset_mac_header(skb2);
1534 if (skb_network_header(skb2) < skb2->data ||
1535 skb2->network_header > skb2->tail) {
1536 if (net_ratelimit())
1537 printk(KERN_CRIT "protocol %04x is "
1539 ntohs(skb2->protocol),
1541 skb_reset_network_header(skb2);
1544 skb2->transport_header = skb2->network_header;
1545 skb2->pkt_type = PACKET_OUTGOING;
1546 ptype->func(skb2, skb->dev, ptype, skb->dev);
1553 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
1554 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
1556 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
1558 if (txq < 1 || txq > dev->num_tx_queues)
1561 if (dev->reg_state == NETREG_REGISTERED) {
1564 if (txq < dev->real_num_tx_queues)
1565 qdisc_reset_all_tx_gt(dev, txq);
1568 dev->real_num_tx_queues = txq;
1571 EXPORT_SYMBOL(netif_set_real_num_tx_queues);
1575 * netif_set_real_num_rx_queues - set actual number of RX queues used
1576 * @dev: Network device
1577 * @rxq: Actual number of RX queues
1579 * This must be called either with the rtnl_lock held or before
1580 * registration of the net device. Returns 0 on success, or a
1581 * negative error code. If called before registration, it always
1584 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
1588 if (rxq < 1 || rxq > dev->num_rx_queues)
1591 if (dev->reg_state == NETREG_REGISTERED) {
1594 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
1600 dev->real_num_rx_queues = rxq;
1603 EXPORT_SYMBOL(netif_set_real_num_rx_queues);
1606 static inline void __netif_reschedule(struct Qdisc *q)
1608 struct softnet_data *sd;
1609 unsigned long flags;
1611 local_irq_save(flags);
1612 sd = &__get_cpu_var(softnet_data);
1613 q->next_sched = NULL;
1614 *sd->output_queue_tailp = q;
1615 sd->output_queue_tailp = &q->next_sched;
1616 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1617 local_irq_restore(flags);
1620 void __netif_schedule(struct Qdisc *q)
1622 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
1623 __netif_reschedule(q);
1625 EXPORT_SYMBOL(__netif_schedule);
1627 void dev_kfree_skb_irq(struct sk_buff *skb)
1629 if (atomic_dec_and_test(&skb->users)) {
1630 struct softnet_data *sd;
1631 unsigned long flags;
1633 local_irq_save(flags);
1634 sd = &__get_cpu_var(softnet_data);
1635 skb->next = sd->completion_queue;
1636 sd->completion_queue = skb;
1637 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1638 local_irq_restore(flags);
1641 EXPORT_SYMBOL(dev_kfree_skb_irq);
1643 void dev_kfree_skb_any(struct sk_buff *skb)
1645 if (in_irq() || irqs_disabled())
1646 dev_kfree_skb_irq(skb);
1650 EXPORT_SYMBOL(dev_kfree_skb_any);
1654 * netif_device_detach - mark device as removed
1655 * @dev: network device
1657 * Mark device as removed from system and therefore no longer available.
1659 void netif_device_detach(struct net_device *dev)
1661 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1662 netif_running(dev)) {
1663 netif_tx_stop_all_queues(dev);
1666 EXPORT_SYMBOL(netif_device_detach);
1669 * netif_device_attach - mark device as attached
1670 * @dev: network device
1672 * Mark device as attached from system and restart if needed.
1674 void netif_device_attach(struct net_device *dev)
1676 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1677 netif_running(dev)) {
1678 netif_tx_wake_all_queues(dev);
1679 __netdev_watchdog_up(dev);
1682 EXPORT_SYMBOL(netif_device_attach);
1684 static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1686 return ((features & NETIF_F_GEN_CSUM) ||
1687 ((features & NETIF_F_IP_CSUM) &&
1688 protocol == htons(ETH_P_IP)) ||
1689 ((features & NETIF_F_IPV6_CSUM) &&
1690 protocol == htons(ETH_P_IPV6)) ||
1691 ((features & NETIF_F_FCOE_CRC) &&
1692 protocol == htons(ETH_P_FCOE)));
1695 static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
1697 int features = dev->features;
1699 if (vlan_tx_tag_present(skb))
1700 features &= dev->vlan_features;
1702 if (can_checksum_protocol(features, skb->protocol))
1705 if (skb->protocol == htons(ETH_P_8021Q)) {
1706 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1707 if (can_checksum_protocol(dev->features & dev->vlan_features,
1708 veh->h_vlan_encapsulated_proto))
1716 * skb_dev_set -- assign a new device to a buffer
1717 * @skb: buffer for the new device
1718 * @dev: network device
1720 * If an skb is owned by a device already, we have to reset
1721 * all data private to the namespace a device belongs to
1722 * before assigning it a new device.
1724 #ifdef CONFIG_NET_NS
1725 void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
1728 if (skb->dev && !net_eq(dev_net(skb->dev), dev_net(dev))) {
1731 skb_init_secmark(skb);
1735 skb->ipvs_property = 0;
1736 #ifdef CONFIG_NET_SCHED
1742 EXPORT_SYMBOL(skb_set_dev);
1743 #endif /* CONFIG_NET_NS */
1746 * Invalidate hardware checksum when packet is to be mangled, and
1747 * complete checksum manually on outgoing path.
1749 int skb_checksum_help(struct sk_buff *skb)
1752 int ret = 0, offset;
1754 if (skb->ip_summed == CHECKSUM_COMPLETE)
1755 goto out_set_summed;
1757 if (unlikely(skb_shinfo(skb)->gso_size)) {
1758 /* Let GSO fix up the checksum. */
1759 goto out_set_summed;
1762 offset = skb->csum_start - skb_headroom(skb);
1763 BUG_ON(offset >= skb_headlen(skb));
1764 csum = skb_checksum(skb, offset, skb->len - offset, 0);
1766 offset += skb->csum_offset;
1767 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
1769 if (skb_cloned(skb) &&
1770 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
1771 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1776 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
1778 skb->ip_summed = CHECKSUM_NONE;
1782 EXPORT_SYMBOL(skb_checksum_help);
1785 * skb_gso_segment - Perform segmentation on skb.
1786 * @skb: buffer to segment
1787 * @features: features for the output path (see dev->features)
1789 * This function segments the given skb and returns a list of segments.
1791 * It may return NULL if the skb requires no segmentation. This is
1792 * only possible when GSO is used for verifying header integrity.
1794 struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
1796 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1797 struct packet_type *ptype;
1798 __be16 type = skb->protocol;
1801 if (type == htons(ETH_P_8021Q)) {
1802 struct vlan_ethhdr *veh;
1804 if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
1805 return ERR_PTR(-EINVAL);
1807 veh = (struct vlan_ethhdr *)skb->data;
1808 type = veh->h_vlan_encapsulated_proto;
1811 skb_reset_mac_header(skb);
1812 skb->mac_len = skb->network_header - skb->mac_header;
1813 __skb_pull(skb, skb->mac_len);
1815 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1816 struct net_device *dev = skb->dev;
1817 struct ethtool_drvinfo info = {};
1819 if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
1820 dev->ethtool_ops->get_drvinfo(dev, &info);
1822 WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d "
1824 info.driver, dev ? dev->features : 0L,
1825 skb->sk ? skb->sk->sk_route_caps : 0L,
1826 skb->len, skb->data_len, skb->ip_summed);
1828 if (skb_header_cloned(skb) &&
1829 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1830 return ERR_PTR(err);
1834 list_for_each_entry_rcu(ptype,
1835 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
1836 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
1837 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1838 err = ptype->gso_send_check(skb);
1839 segs = ERR_PTR(err);
1840 if (err || skb_gso_ok(skb, features))
1842 __skb_push(skb, (skb->data -
1843 skb_network_header(skb)));
1845 segs = ptype->gso_segment(skb, features);
1851 __skb_push(skb, skb->data - skb_mac_header(skb));
1855 EXPORT_SYMBOL(skb_gso_segment);
1857 /* Take action when hardware reception checksum errors are detected. */
1859 void netdev_rx_csum_fault(struct net_device *dev)
1861 if (net_ratelimit()) {
1862 printk(KERN_ERR "%s: hw csum failure.\n",
1863 dev ? dev->name : "<unknown>");
1867 EXPORT_SYMBOL(netdev_rx_csum_fault);
1870 /* Actually, we should eliminate this check as soon as we know, that:
1871 * 1. IOMMU is present and allows to map all the memory.
1872 * 2. No high memory really exists on this machine.
1875 static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1877 #ifdef CONFIG_HIGHMEM
1879 if (!(dev->features & NETIF_F_HIGHDMA)) {
1880 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1881 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1885 if (PCI_DMA_BUS_IS_PHYS) {
1886 struct device *pdev = dev->dev.parent;
1890 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1891 dma_addr_t addr = page_to_phys(skb_shinfo(skb)->frags[i].page);
1892 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
1901 void (*destructor)(struct sk_buff *skb);
1904 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
1906 static void dev_gso_skb_destructor(struct sk_buff *skb)
1908 struct dev_gso_cb *cb;
1911 struct sk_buff *nskb = skb->next;
1913 skb->next = nskb->next;
1916 } while (skb->next);
1918 cb = DEV_GSO_CB(skb);
1920 cb->destructor(skb);
1924 * dev_gso_segment - Perform emulated hardware segmentation on skb.
1925 * @skb: buffer to segment
1927 * This function segments the given skb and stores the list of segments
1930 static int dev_gso_segment(struct sk_buff *skb)
1932 struct net_device *dev = skb->dev;
1933 struct sk_buff *segs;
1934 int features = dev->features & ~(illegal_highdma(dev, skb) ?
1937 segs = skb_gso_segment(skb, features);
1939 /* Verifying header integrity only. */
1944 return PTR_ERR(segs);
1947 DEV_GSO_CB(skb)->destructor = skb->destructor;
1948 skb->destructor = dev_gso_skb_destructor;
1954 * Try to orphan skb early, right before transmission by the device.
1955 * We cannot orphan skb if tx timestamp is requested or the sk-reference
1956 * is needed on driver level for other reasons, e.g. see net/can/raw.c
1958 static inline void skb_orphan_try(struct sk_buff *skb)
1960 struct sock *sk = skb->sk;
1962 if (sk && !skb_shinfo(skb)->tx_flags) {
1963 /* skb_tx_hash() wont be able to get sk.
1964 * We copy sk_hash into skb->rxhash
1967 skb->rxhash = sk->sk_hash;
1973 * Returns true if either:
1974 * 1. skb has frag_list and the device doesn't support FRAGLIST, or
1975 * 2. skb is fragmented and the device does not support SG, or if
1976 * at least one of fragments is in highmem and device does not
1977 * support DMA from it.
1979 static inline int skb_needs_linearize(struct sk_buff *skb,
1980 struct net_device *dev)
1982 int features = dev->features;
1984 if (skb->protocol == htons(ETH_P_8021Q) || vlan_tx_tag_present(skb))
1985 features &= dev->vlan_features;
1987 return skb_is_nonlinear(skb) &&
1988 ((skb_has_frag_list(skb) && !(features & NETIF_F_FRAGLIST)) ||
1989 (skb_shinfo(skb)->nr_frags && (!(features & NETIF_F_SG) ||
1990 illegal_highdma(dev, skb))));
1993 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1994 struct netdev_queue *txq)
1996 const struct net_device_ops *ops = dev->netdev_ops;
1997 int rc = NETDEV_TX_OK;
1999 if (likely(!skb->next)) {
2000 if (!list_empty(&ptype_all))
2001 dev_queue_xmit_nit(skb, dev);
2004 * If device doesnt need skb->dst, release it right now while
2005 * its hot in this cpu cache
2007 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2010 skb_orphan_try(skb);
2012 if (vlan_tx_tag_present(skb) &&
2013 !(dev->features & NETIF_F_HW_VLAN_TX)) {
2014 skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb));
2021 if (netif_needs_gso(dev, skb)) {
2022 if (unlikely(dev_gso_segment(skb)))
2027 if (skb_needs_linearize(skb, dev) &&
2028 __skb_linearize(skb))
2031 /* If packet is not checksummed and device does not
2032 * support checksumming for this protocol, complete
2033 * checksumming here.
2035 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2036 skb_set_transport_header(skb, skb->csum_start -
2038 if (!dev_can_checksum(dev, skb) &&
2039 skb_checksum_help(skb))
2044 rc = ops->ndo_start_xmit(skb, dev);
2045 if (rc == NETDEV_TX_OK)
2046 txq_trans_update(txq);
2052 struct sk_buff *nskb = skb->next;
2054 skb->next = nskb->next;
2058 * If device doesnt need nskb->dst, release it right now while
2059 * its hot in this cpu cache
2061 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2064 rc = ops->ndo_start_xmit(nskb, dev);
2065 if (unlikely(rc != NETDEV_TX_OK)) {
2066 if (rc & ~NETDEV_TX_MASK)
2067 goto out_kfree_gso_skb;
2068 nskb->next = skb->next;
2072 txq_trans_update(txq);
2073 if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
2074 return NETDEV_TX_BUSY;
2075 } while (skb->next);
2078 if (likely(skb->next == NULL))
2079 skb->destructor = DEV_GSO_CB(skb)->destructor;
2086 static u32 hashrnd __read_mostly;
2088 u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
2092 if (skb_rx_queue_recorded(skb)) {
2093 hash = skb_get_rx_queue(skb);
2094 while (unlikely(hash >= dev->real_num_tx_queues))
2095 hash -= dev->real_num_tx_queues;
2099 if (skb->sk && skb->sk->sk_hash)
2100 hash = skb->sk->sk_hash;
2102 hash = (__force u16) skb->protocol ^ skb->rxhash;
2103 hash = jhash_1word(hash, hashrnd);
2105 return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
2107 EXPORT_SYMBOL(skb_tx_hash);
2109 static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
2111 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
2112 if (net_ratelimit()) {
2113 pr_warning("%s selects TX queue %d, but "
2114 "real number of TX queues is %d\n",
2115 dev->name, queue_index, dev->real_num_tx_queues);
2122 static struct netdev_queue *dev_pick_tx(struct net_device *dev,
2123 struct sk_buff *skb)
2126 const struct net_device_ops *ops = dev->netdev_ops;
2128 if (ops->ndo_select_queue) {
2129 queue_index = ops->ndo_select_queue(dev, skb);
2130 queue_index = dev_cap_txqueue(dev, queue_index);
2132 struct sock *sk = skb->sk;
2133 queue_index = sk_tx_queue_get(sk);
2134 if (queue_index < 0) {
2137 if (dev->real_num_tx_queues > 1)
2138 queue_index = skb_tx_hash(dev, skb);
2141 struct dst_entry *dst = rcu_dereference_check(sk->sk_dst_cache, 1);
2143 if (dst && skb_dst(skb) == dst)
2144 sk_tx_queue_set(sk, queue_index);
2149 skb_set_queue_mapping(skb, queue_index);
2150 return netdev_get_tx_queue(dev, queue_index);
2153 static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2154 struct net_device *dev,
2155 struct netdev_queue *txq)
2157 spinlock_t *root_lock = qdisc_lock(q);
2158 bool contended = qdisc_is_running(q);
2162 * Heuristic to force contended enqueues to serialize on a
2163 * separate lock before trying to get qdisc main lock.
2164 * This permits __QDISC_STATE_RUNNING owner to get the lock more often
2165 * and dequeue packets faster.
2167 if (unlikely(contended))
2168 spin_lock(&q->busylock);
2170 spin_lock(root_lock);
2171 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
2174 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
2175 qdisc_run_begin(q)) {
2177 * This is a work-conserving queue; there are no old skbs
2178 * waiting to be sent out; and the qdisc is not running -
2179 * xmit the skb directly.
2181 if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
2183 __qdisc_update_bstats(q, skb->len);
2184 if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
2185 if (unlikely(contended)) {
2186 spin_unlock(&q->busylock);
2193 rc = NET_XMIT_SUCCESS;
2196 rc = qdisc_enqueue_root(skb, q);
2197 if (qdisc_run_begin(q)) {
2198 if (unlikely(contended)) {
2199 spin_unlock(&q->busylock);
2205 spin_unlock(root_lock);
2206 if (unlikely(contended))
2207 spin_unlock(&q->busylock);
2211 static DEFINE_PER_CPU(int, xmit_recursion);
2212 #define RECURSION_LIMIT 3
2215 * dev_queue_xmit - transmit a buffer
2216 * @skb: buffer to transmit
2218 * Queue a buffer for transmission to a network device. The caller must
2219 * have set the device and priority and built the buffer before calling
2220 * this function. The function can be called from an interrupt.
2222 * A negative errno code is returned on a failure. A success does not
2223 * guarantee the frame will be transmitted as it may be dropped due
2224 * to congestion or traffic shaping.
2226 * -----------------------------------------------------------------------------------
2227 * I notice this method can also return errors from the queue disciplines,
2228 * including NET_XMIT_DROP, which is a positive value. So, errors can also
2231 * Regardless of the return value, the skb is consumed, so it is currently
2232 * difficult to retry a send to this method. (You can bump the ref count
2233 * before sending to hold a reference for retry if you are careful.)
2235 * When calling this method, interrupts MUST be enabled. This is because
2236 * the BH enable code must have IRQs enabled so that it will not deadlock.
2239 int dev_queue_xmit(struct sk_buff *skb)
2241 struct net_device *dev = skb->dev;
2242 struct netdev_queue *txq;
2246 /* Disable soft irqs for various locks below. Also
2247 * stops preemption for RCU.
2251 txq = dev_pick_tx(dev, skb);
2252 q = rcu_dereference_bh(txq->qdisc);
2254 #ifdef CONFIG_NET_CLS_ACT
2255 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
2258 rc = __dev_xmit_skb(skb, q, dev, txq);
2262 /* The device has no queue. Common case for software devices:
2263 loopback, all the sorts of tunnels...
2265 Really, it is unlikely that netif_tx_lock protection is necessary
2266 here. (f.e. loopback and IP tunnels are clean ignoring statistics
2268 However, it is possible, that they rely on protection
2271 Check this and shot the lock. It is not prone from deadlocks.
2272 Either shot noqueue qdisc, it is even simpler 8)
2274 if (dev->flags & IFF_UP) {
2275 int cpu = smp_processor_id(); /* ok because BHs are off */
2277 if (txq->xmit_lock_owner != cpu) {
2279 if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
2280 goto recursion_alert;
2282 HARD_TX_LOCK(dev, txq, cpu);
2284 if (!netif_tx_queue_stopped(txq)) {
2285 __this_cpu_inc(xmit_recursion);
2286 rc = dev_hard_start_xmit(skb, dev, txq);
2287 __this_cpu_dec(xmit_recursion);
2288 if (dev_xmit_complete(rc)) {
2289 HARD_TX_UNLOCK(dev, txq);
2293 HARD_TX_UNLOCK(dev, txq);
2294 if (net_ratelimit())
2295 printk(KERN_CRIT "Virtual device %s asks to "
2296 "queue packet!\n", dev->name);
2298 /* Recursion is detected! It is possible,
2302 if (net_ratelimit())
2303 printk(KERN_CRIT "Dead loop on virtual device "
2304 "%s, fix it urgently!\n", dev->name);
2309 rcu_read_unlock_bh();
2314 rcu_read_unlock_bh();
2317 EXPORT_SYMBOL(dev_queue_xmit);
2320 /*=======================================================================
2322 =======================================================================*/
2324 int netdev_max_backlog __read_mostly = 1000;
2325 int netdev_tstamp_prequeue __read_mostly = 1;
2326 int netdev_budget __read_mostly = 300;
2327 int weight_p __read_mostly = 64; /* old backlog weight */
2329 /* Called with irq disabled */
2330 static inline void ____napi_schedule(struct softnet_data *sd,
2331 struct napi_struct *napi)
2333 list_add_tail(&napi->poll_list, &sd->poll_list);
2334 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2338 * __skb_get_rxhash: calculate a flow hash based on src/dst addresses
2339 * and src/dst port numbers. Returns a non-zero hash number on success
2342 __u32 __skb_get_rxhash(struct sk_buff *skb)
2344 int nhoff, hash = 0, poff;
2345 struct ipv6hdr *ip6;
2348 u32 addr1, addr2, ihl;
2354 nhoff = skb_network_offset(skb);
2356 switch (skb->protocol) {
2357 case __constant_htons(ETH_P_IP):
2358 if (!pskb_may_pull(skb, sizeof(*ip) + nhoff))
2361 ip = (struct iphdr *) (skb->data + nhoff);
2362 if (ip->frag_off & htons(IP_MF | IP_OFFSET))
2365 ip_proto = ip->protocol;
2366 addr1 = (__force u32) ip->saddr;
2367 addr2 = (__force u32) ip->daddr;
2370 case __constant_htons(ETH_P_IPV6):
2371 if (!pskb_may_pull(skb, sizeof(*ip6) + nhoff))
2374 ip6 = (struct ipv6hdr *) (skb->data + nhoff);
2375 ip_proto = ip6->nexthdr;
2376 addr1 = (__force u32) ip6->saddr.s6_addr32[3];
2377 addr2 = (__force u32) ip6->daddr.s6_addr32[3];
2385 poff = proto_ports_offset(ip_proto);
2387 nhoff += ihl * 4 + poff;
2388 if (pskb_may_pull(skb, nhoff + 4)) {
2389 ports.v32 = * (__force u32 *) (skb->data + nhoff);
2390 if (ports.v16[1] < ports.v16[0])
2391 swap(ports.v16[0], ports.v16[1]);
2395 /* get a consistent hash (same value on both flow directions) */
2399 hash = jhash_3words(addr1, addr2, ports.v32, hashrnd);
2406 EXPORT_SYMBOL(__skb_get_rxhash);
2410 /* One global table that all flow-based protocols share. */
2411 struct rps_sock_flow_table *rps_sock_flow_table __read_mostly;
2412 EXPORT_SYMBOL(rps_sock_flow_table);
2415 * get_rps_cpu is called from netif_receive_skb and returns the target
2416 * CPU from the RPS map of the receiving queue for a given skb.
2417 * rcu_read_lock must be held on entry.
2419 static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2420 struct rps_dev_flow **rflowp)
2422 struct netdev_rx_queue *rxqueue;
2423 struct rps_map *map = NULL;
2424 struct rps_dev_flow_table *flow_table;
2425 struct rps_sock_flow_table *sock_flow_table;
2429 if (skb_rx_queue_recorded(skb)) {
2430 u16 index = skb_get_rx_queue(skb);
2431 if (unlikely(index >= dev->real_num_rx_queues)) {
2432 WARN_ONCE(dev->real_num_rx_queues > 1,
2433 "%s received packet on queue %u, but number "
2434 "of RX queues is %u\n",
2435 dev->name, index, dev->real_num_rx_queues);
2438 rxqueue = dev->_rx + index;
2442 if (rxqueue->rps_map) {
2443 map = rcu_dereference(rxqueue->rps_map);
2444 if (map && map->len == 1) {
2445 tcpu = map->cpus[0];
2446 if (cpu_online(tcpu))
2450 } else if (!rxqueue->rps_flow_table) {
2454 skb_reset_network_header(skb);
2455 if (!skb_get_rxhash(skb))
2458 flow_table = rcu_dereference(rxqueue->rps_flow_table);
2459 sock_flow_table = rcu_dereference(rps_sock_flow_table);
2460 if (flow_table && sock_flow_table) {
2462 struct rps_dev_flow *rflow;
2464 rflow = &flow_table->flows[skb->rxhash & flow_table->mask];
2467 next_cpu = sock_flow_table->ents[skb->rxhash &
2468 sock_flow_table->mask];
2471 * If the desired CPU (where last recvmsg was done) is
2472 * different from current CPU (one in the rx-queue flow
2473 * table entry), switch if one of the following holds:
2474 * - Current CPU is unset (equal to RPS_NO_CPU).
2475 * - Current CPU is offline.
2476 * - The current CPU's queue tail has advanced beyond the
2477 * last packet that was enqueued using this table entry.
2478 * This guarantees that all previous packets for the flow
2479 * have been dequeued, thus preserving in order delivery.
2481 if (unlikely(tcpu != next_cpu) &&
2482 (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
2483 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
2484 rflow->last_qtail)) >= 0)) {
2485 tcpu = rflow->cpu = next_cpu;
2486 if (tcpu != RPS_NO_CPU)
2487 rflow->last_qtail = per_cpu(softnet_data,
2488 tcpu).input_queue_head;
2490 if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
2498 tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32];
2500 if (cpu_online(tcpu)) {
2510 /* Called from hardirq (IPI) context */
2511 static void rps_trigger_softirq(void *data)
2513 struct softnet_data *sd = data;
2515 ____napi_schedule(sd, &sd->backlog);
2519 #endif /* CONFIG_RPS */
2522 * Check if this softnet_data structure is another cpu one
2523 * If yes, queue it to our IPI list and return 1
2526 static int rps_ipi_queued(struct softnet_data *sd)
2529 struct softnet_data *mysd = &__get_cpu_var(softnet_data);
2532 sd->rps_ipi_next = mysd->rps_ipi_list;
2533 mysd->rps_ipi_list = sd;
2535 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2538 #endif /* CONFIG_RPS */
2543 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
2544 * queue (may be a remote CPU queue).
2546 static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
2547 unsigned int *qtail)
2549 struct softnet_data *sd;
2550 unsigned long flags;
2552 sd = &per_cpu(softnet_data, cpu);
2554 local_irq_save(flags);
2557 if (skb_queue_len(&sd->input_pkt_queue) <= netdev_max_backlog) {
2558 if (skb_queue_len(&sd->input_pkt_queue)) {
2560 __skb_queue_tail(&sd->input_pkt_queue, skb);
2561 input_queue_tail_incr_save(sd, qtail);
2563 local_irq_restore(flags);
2564 return NET_RX_SUCCESS;
2567 /* Schedule NAPI for backlog device
2568 * We can use non atomic operation since we own the queue lock
2570 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
2571 if (!rps_ipi_queued(sd))
2572 ____napi_schedule(sd, &sd->backlog);
2580 local_irq_restore(flags);
2582 atomic_long_inc(&skb->dev->rx_dropped);
2588 * netif_rx - post buffer to the network code
2589 * @skb: buffer to post
2591 * This function receives a packet from a device driver and queues it for
2592 * the upper (protocol) levels to process. It always succeeds. The buffer
2593 * may be dropped during processing for congestion control or by the
2597 * NET_RX_SUCCESS (no congestion)
2598 * NET_RX_DROP (packet was dropped)
2602 int netif_rx(struct sk_buff *skb)
2606 /* if netpoll wants it, pretend we never saw it */
2607 if (netpoll_rx(skb))
2610 if (netdev_tstamp_prequeue)
2611 net_timestamp_check(skb);
2615 struct rps_dev_flow voidflow, *rflow = &voidflow;
2621 cpu = get_rps_cpu(skb->dev, skb, &rflow);
2623 cpu = smp_processor_id();
2625 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
2633 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
2639 EXPORT_SYMBOL(netif_rx);
2641 int netif_rx_ni(struct sk_buff *skb)
2646 err = netif_rx(skb);
2647 if (local_softirq_pending())
2653 EXPORT_SYMBOL(netif_rx_ni);
2655 static void net_tx_action(struct softirq_action *h)
2657 struct softnet_data *sd = &__get_cpu_var(softnet_data);
2659 if (sd->completion_queue) {
2660 struct sk_buff *clist;
2662 local_irq_disable();
2663 clist = sd->completion_queue;
2664 sd->completion_queue = NULL;
2668 struct sk_buff *skb = clist;
2669 clist = clist->next;
2671 WARN_ON(atomic_read(&skb->users));
2676 if (sd->output_queue) {
2679 local_irq_disable();
2680 head = sd->output_queue;
2681 sd->output_queue = NULL;
2682 sd->output_queue_tailp = &sd->output_queue;
2686 struct Qdisc *q = head;
2687 spinlock_t *root_lock;
2689 head = head->next_sched;
2691 root_lock = qdisc_lock(q);
2692 if (spin_trylock(root_lock)) {
2693 smp_mb__before_clear_bit();
2694 clear_bit(__QDISC_STATE_SCHED,
2697 spin_unlock(root_lock);
2699 if (!test_bit(__QDISC_STATE_DEACTIVATED,
2701 __netif_reschedule(q);
2703 smp_mb__before_clear_bit();
2704 clear_bit(__QDISC_STATE_SCHED,
2712 static inline int deliver_skb(struct sk_buff *skb,
2713 struct packet_type *pt_prev,
2714 struct net_device *orig_dev)
2716 atomic_inc(&skb->users);
2717 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2720 #if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
2721 (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
2722 /* This hook is defined here for ATM LANE */
2723 int (*br_fdb_test_addr_hook)(struct net_device *dev,
2724 unsigned char *addr) __read_mostly;
2725 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
2728 #ifdef CONFIG_NET_CLS_ACT
2729 /* TODO: Maybe we should just force sch_ingress to be compiled in
2730 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
2731 * a compare and 2 stores extra right now if we dont have it on
2732 * but have CONFIG_NET_CLS_ACT
2733 * NOTE: This doesnt stop any functionality; if you dont have
2734 * the ingress scheduler, you just cant add policies on ingress.
2737 static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
2739 struct net_device *dev = skb->dev;
2740 u32 ttl = G_TC_RTTL(skb->tc_verd);
2741 int result = TC_ACT_OK;
2744 if (unlikely(MAX_RED_LOOP < ttl++)) {
2745 if (net_ratelimit())
2746 pr_warning( "Redir loop detected Dropping packet (%d->%d)\n",
2747 skb->skb_iif, dev->ifindex);
2751 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
2752 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
2755 if (q != &noop_qdisc) {
2756 spin_lock(qdisc_lock(q));
2757 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
2758 result = qdisc_enqueue_root(skb, q);
2759 spin_unlock(qdisc_lock(q));
2765 static inline struct sk_buff *handle_ing(struct sk_buff *skb,
2766 struct packet_type **pt_prev,
2767 int *ret, struct net_device *orig_dev)
2769 struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue);
2771 if (!rxq || rxq->qdisc == &noop_qdisc)
2775 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2779 switch (ing_filter(skb, rxq)) {
2793 * netdev_rx_handler_register - register receive handler
2794 * @dev: device to register a handler for
2795 * @rx_handler: receive handler to register
2796 * @rx_handler_data: data pointer that is used by rx handler
2798 * Register a receive hander for a device. This handler will then be
2799 * called from __netif_receive_skb. A negative errno code is returned
2802 * The caller must hold the rtnl_mutex.
2804 int netdev_rx_handler_register(struct net_device *dev,
2805 rx_handler_func_t *rx_handler,
2806 void *rx_handler_data)
2810 if (dev->rx_handler)
2813 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
2814 rcu_assign_pointer(dev->rx_handler, rx_handler);
2818 EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
2821 * netdev_rx_handler_unregister - unregister receive handler
2822 * @dev: device to unregister a handler from
2824 * Unregister a receive hander from a device.
2826 * The caller must hold the rtnl_mutex.
2828 void netdev_rx_handler_unregister(struct net_device *dev)
2832 rcu_assign_pointer(dev->rx_handler, NULL);
2833 rcu_assign_pointer(dev->rx_handler_data, NULL);
2835 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
2837 static inline void skb_bond_set_mac_by_master(struct sk_buff *skb,
2838 struct net_device *master)
2840 if (skb->pkt_type == PACKET_HOST) {
2841 u16 *dest = (u16 *) eth_hdr(skb)->h_dest;
2843 memcpy(dest, master->dev_addr, ETH_ALEN);
2847 /* On bonding slaves other than the currently active slave, suppress
2848 * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and
2849 * ARP on active-backup slaves with arp_validate enabled.
2851 int __skb_bond_should_drop(struct sk_buff *skb, struct net_device *master)
2853 struct net_device *dev = skb->dev;
2855 if (master->priv_flags & IFF_MASTER_ARPMON)
2856 dev->last_rx = jiffies;
2858 if ((master->priv_flags & IFF_MASTER_ALB) &&
2859 (master->priv_flags & IFF_BRIDGE_PORT)) {
2860 /* Do address unmangle. The local destination address
2861 * will be always the one master has. Provides the right
2862 * functionality in a bridge.
2864 skb_bond_set_mac_by_master(skb, master);
2867 if (dev->priv_flags & IFF_SLAVE_INACTIVE) {
2868 if ((dev->priv_flags & IFF_SLAVE_NEEDARP) &&
2869 skb->protocol == __cpu_to_be16(ETH_P_ARP))
2872 if (master->priv_flags & IFF_MASTER_ALB) {
2873 if (skb->pkt_type != PACKET_BROADCAST &&
2874 skb->pkt_type != PACKET_MULTICAST)
2877 if (master->priv_flags & IFF_MASTER_8023AD &&
2878 skb->protocol == __cpu_to_be16(ETH_P_SLOW))
2885 EXPORT_SYMBOL(__skb_bond_should_drop);
2887 static int __netif_receive_skb(struct sk_buff *skb)
2889 struct packet_type *ptype, *pt_prev;
2890 rx_handler_func_t *rx_handler;
2891 struct net_device *orig_dev;
2892 struct net_device *master;
2893 struct net_device *null_or_orig;
2894 struct net_device *orig_or_bond;
2895 int ret = NET_RX_DROP;
2898 if (!netdev_tstamp_prequeue)
2899 net_timestamp_check(skb);
2901 /* if we've gotten here through NAPI, check netpoll */
2902 if (netpoll_receive_skb(skb))
2906 skb->skb_iif = skb->dev->ifindex;
2909 * bonding note: skbs received on inactive slaves should only
2910 * be delivered to pkt handlers that are exact matches. Also
2911 * the deliver_no_wcard flag will be set. If packet handlers
2912 * are sensitive to duplicate packets these skbs will need to
2913 * be dropped at the handler.
2915 null_or_orig = NULL;
2916 orig_dev = skb->dev;
2917 master = ACCESS_ONCE(orig_dev->master);
2918 if (skb->deliver_no_wcard)
2919 null_or_orig = orig_dev;
2921 if (skb_bond_should_drop(skb, master)) {
2922 skb->deliver_no_wcard = 1;
2923 null_or_orig = orig_dev; /* deliver only exact match */
2928 __this_cpu_inc(softnet_data.processed);
2929 skb_reset_network_header(skb);
2930 skb_reset_transport_header(skb);
2931 skb->mac_len = skb->network_header - skb->mac_header;
2937 #ifdef CONFIG_NET_CLS_ACT
2938 if (skb->tc_verd & TC_NCLS) {
2939 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
2944 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2945 if (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2946 ptype->dev == orig_dev) {
2948 ret = deliver_skb(skb, pt_prev, orig_dev);
2953 #ifdef CONFIG_NET_CLS_ACT
2954 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
2960 /* Handle special case of bridge or macvlan */
2961 rx_handler = rcu_dereference(skb->dev->rx_handler);
2964 ret = deliver_skb(skb, pt_prev, orig_dev);
2967 skb = rx_handler(skb);
2972 if (vlan_tx_tag_present(skb)) {
2974 ret = deliver_skb(skb, pt_prev, orig_dev);
2977 if (vlan_hwaccel_do_receive(&skb)) {
2978 ret = __netif_receive_skb(skb);
2980 } else if (unlikely(!skb))
2985 * Make sure frames received on VLAN interfaces stacked on
2986 * bonding interfaces still make their way to any base bonding
2987 * device that may have registered for a specific ptype. The
2988 * handler may have to adjust skb->dev and orig_dev.
2990 orig_or_bond = orig_dev;
2991 if ((skb->dev->priv_flags & IFF_802_1Q_VLAN) &&
2992 (vlan_dev_real_dev(skb->dev)->priv_flags & IFF_BONDING)) {
2993 orig_or_bond = vlan_dev_real_dev(skb->dev);
2996 type = skb->protocol;
2997 list_for_each_entry_rcu(ptype,
2998 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
2999 if (ptype->type == type && (ptype->dev == null_or_orig ||
3000 ptype->dev == skb->dev || ptype->dev == orig_dev ||
3001 ptype->dev == orig_or_bond)) {
3003 ret = deliver_skb(skb, pt_prev, orig_dev);
3009 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
3011 atomic_long_inc(&skb->dev->rx_dropped);
3013 /* Jamal, now you will not able to escape explaining
3014 * me how you were going to use this. :-)
3025 * netif_receive_skb - process receive buffer from network
3026 * @skb: buffer to process
3028 * netif_receive_skb() is the main receive data processing function.
3029 * It always succeeds. The buffer may be dropped during processing
3030 * for congestion control or by the protocol layers.
3032 * This function may only be called from softirq context and interrupts
3033 * should be enabled.
3035 * Return values (usually ignored):
3036 * NET_RX_SUCCESS: no congestion
3037 * NET_RX_DROP: packet was dropped
3039 int netif_receive_skb(struct sk_buff *skb)
3041 if (netdev_tstamp_prequeue)
3042 net_timestamp_check(skb);
3044 if (skb_defer_rx_timestamp(skb))
3045 return NET_RX_SUCCESS;
3049 struct rps_dev_flow voidflow, *rflow = &voidflow;
3054 cpu = get_rps_cpu(skb->dev, skb, &rflow);
3057 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3061 ret = __netif_receive_skb(skb);
3067 return __netif_receive_skb(skb);
3070 EXPORT_SYMBOL(netif_receive_skb);
3072 /* Network device is going away, flush any packets still pending
3073 * Called with irqs disabled.
3075 static void flush_backlog(void *arg)
3077 struct net_device *dev = arg;
3078 struct softnet_data *sd = &__get_cpu_var(softnet_data);
3079 struct sk_buff *skb, *tmp;
3082 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
3083 if (skb->dev == dev) {
3084 __skb_unlink(skb, &sd->input_pkt_queue);
3086 input_queue_head_incr(sd);
3091 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
3092 if (skb->dev == dev) {
3093 __skb_unlink(skb, &sd->process_queue);
3095 input_queue_head_incr(sd);
3100 static int napi_gro_complete(struct sk_buff *skb)
3102 struct packet_type *ptype;
3103 __be16 type = skb->protocol;
3104 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
3107 if (NAPI_GRO_CB(skb)->count == 1) {
3108 skb_shinfo(skb)->gso_size = 0;
3113 list_for_each_entry_rcu(ptype, head, list) {
3114 if (ptype->type != type || ptype->dev || !ptype->gro_complete)
3117 err = ptype->gro_complete(skb);
3123 WARN_ON(&ptype->list == head);
3125 return NET_RX_SUCCESS;
3129 return netif_receive_skb(skb);
3132 inline void napi_gro_flush(struct napi_struct *napi)
3134 struct sk_buff *skb, *next;
3136 for (skb = napi->gro_list; skb; skb = next) {
3139 napi_gro_complete(skb);
3142 napi->gro_count = 0;
3143 napi->gro_list = NULL;
3145 EXPORT_SYMBOL(napi_gro_flush);
3147 enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3149 struct sk_buff **pp = NULL;
3150 struct packet_type *ptype;
3151 __be16 type = skb->protocol;
3152 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
3155 enum gro_result ret;
3157 if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb))
3160 if (skb_is_gso(skb) || skb_has_frag_list(skb))
3164 list_for_each_entry_rcu(ptype, head, list) {
3165 if (ptype->type != type || ptype->dev || !ptype->gro_receive)
3168 skb_set_network_header(skb, skb_gro_offset(skb));
3169 mac_len = skb->network_header - skb->mac_header;
3170 skb->mac_len = mac_len;
3171 NAPI_GRO_CB(skb)->same_flow = 0;
3172 NAPI_GRO_CB(skb)->flush = 0;
3173 NAPI_GRO_CB(skb)->free = 0;
3175 pp = ptype->gro_receive(&napi->gro_list, skb);
3180 if (&ptype->list == head)
3183 same_flow = NAPI_GRO_CB(skb)->same_flow;
3184 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
3187 struct sk_buff *nskb = *pp;
3191 napi_gro_complete(nskb);
3198 if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
3202 NAPI_GRO_CB(skb)->count = 1;
3203 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
3204 skb->next = napi->gro_list;
3205 napi->gro_list = skb;
3209 if (skb_headlen(skb) < skb_gro_offset(skb)) {
3210 int grow = skb_gro_offset(skb) - skb_headlen(skb);
3212 BUG_ON(skb->end - skb->tail < grow);
3214 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
3217 skb->data_len -= grow;
3219 skb_shinfo(skb)->frags[0].page_offset += grow;
3220 skb_shinfo(skb)->frags[0].size -= grow;
3222 if (unlikely(!skb_shinfo(skb)->frags[0].size)) {
3223 put_page(skb_shinfo(skb)->frags[0].page);
3224 memmove(skb_shinfo(skb)->frags,
3225 skb_shinfo(skb)->frags + 1,
3226 --skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
3237 EXPORT_SYMBOL(dev_gro_receive);
3239 static inline gro_result_t
3240 __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3244 for (p = napi->gro_list; p; p = p->next) {
3245 unsigned long diffs;
3247 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
3248 diffs |= p->vlan_tci ^ skb->vlan_tci;
3249 diffs |= compare_ether_header(skb_mac_header(p),
3250 skb_gro_mac_header(skb));
3251 NAPI_GRO_CB(p)->same_flow = !diffs;
3252 NAPI_GRO_CB(p)->flush = 0;
3255 return dev_gro_receive(napi, skb);
3258 gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
3262 if (netif_receive_skb(skb))
3267 case GRO_MERGED_FREE:
3278 EXPORT_SYMBOL(napi_skb_finish);
3280 void skb_gro_reset_offset(struct sk_buff *skb)
3282 NAPI_GRO_CB(skb)->data_offset = 0;
3283 NAPI_GRO_CB(skb)->frag0 = NULL;
3284 NAPI_GRO_CB(skb)->frag0_len = 0;
3286 if (skb->mac_header == skb->tail &&
3287 !PageHighMem(skb_shinfo(skb)->frags[0].page)) {
3288 NAPI_GRO_CB(skb)->frag0 =
3289 page_address(skb_shinfo(skb)->frags[0].page) +
3290 skb_shinfo(skb)->frags[0].page_offset;
3291 NAPI_GRO_CB(skb)->frag0_len = skb_shinfo(skb)->frags[0].size;
3294 EXPORT_SYMBOL(skb_gro_reset_offset);
3296 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3298 skb_gro_reset_offset(skb);
3300 return napi_skb_finish(__napi_gro_receive(napi, skb), skb);
3302 EXPORT_SYMBOL(napi_gro_receive);
3304 void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
3306 __skb_pull(skb, skb_headlen(skb));
3307 skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
3312 EXPORT_SYMBOL(napi_reuse_skb);
3314 struct sk_buff *napi_get_frags(struct napi_struct *napi)
3316 struct sk_buff *skb = napi->skb;
3319 skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
3325 EXPORT_SYMBOL(napi_get_frags);
3327 gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
3333 skb->protocol = eth_type_trans(skb, skb->dev);
3335 if (ret == GRO_HELD)
3336 skb_gro_pull(skb, -ETH_HLEN);
3337 else if (netif_receive_skb(skb))
3342 case GRO_MERGED_FREE:
3343 napi_reuse_skb(napi, skb);
3352 EXPORT_SYMBOL(napi_frags_finish);
3354 struct sk_buff *napi_frags_skb(struct napi_struct *napi)
3356 struct sk_buff *skb = napi->skb;
3363 skb_reset_mac_header(skb);
3364 skb_gro_reset_offset(skb);
3366 off = skb_gro_offset(skb);
3367 hlen = off + sizeof(*eth);
3368 eth = skb_gro_header_fast(skb, off);
3369 if (skb_gro_header_hard(skb, hlen)) {
3370 eth = skb_gro_header_slow(skb, hlen, off);
3371 if (unlikely(!eth)) {
3372 napi_reuse_skb(napi, skb);
3378 skb_gro_pull(skb, sizeof(*eth));
3381 * This works because the only protocols we care about don't require
3382 * special handling. We'll fix it up properly at the end.
3384 skb->protocol = eth->h_proto;
3389 EXPORT_SYMBOL(napi_frags_skb);
3391 gro_result_t napi_gro_frags(struct napi_struct *napi)
3393 struct sk_buff *skb = napi_frags_skb(napi);
3398 return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb));
3400 EXPORT_SYMBOL(napi_gro_frags);
3403 * net_rps_action sends any pending IPI's for rps.
3404 * Note: called with local irq disabled, but exits with local irq enabled.
3406 static void net_rps_action_and_irq_enable(struct softnet_data *sd)
3409 struct softnet_data *remsd = sd->rps_ipi_list;
3412 sd->rps_ipi_list = NULL;
3416 /* Send pending IPI's to kick RPS processing on remote cpus. */
3418 struct softnet_data *next = remsd->rps_ipi_next;
3420 if (cpu_online(remsd->cpu))
3421 __smp_call_function_single(remsd->cpu,
3430 static int process_backlog(struct napi_struct *napi, int quota)
3433 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
3436 /* Check if we have pending ipi, its better to send them now,
3437 * not waiting net_rx_action() end.
3439 if (sd->rps_ipi_list) {
3440 local_irq_disable();
3441 net_rps_action_and_irq_enable(sd);
3444 napi->weight = weight_p;
3445 local_irq_disable();
3446 while (work < quota) {
3447 struct sk_buff *skb;
3450 while ((skb = __skb_dequeue(&sd->process_queue))) {
3452 __netif_receive_skb(skb);
3453 local_irq_disable();
3454 input_queue_head_incr(sd);
3455 if (++work >= quota) {
3462 qlen = skb_queue_len(&sd->input_pkt_queue);
3464 skb_queue_splice_tail_init(&sd->input_pkt_queue,
3465 &sd->process_queue);
3467 if (qlen < quota - work) {
3469 * Inline a custom version of __napi_complete().
3470 * only current cpu owns and manipulates this napi,
3471 * and NAPI_STATE_SCHED is the only possible flag set on backlog.
3472 * we can use a plain write instead of clear_bit(),
3473 * and we dont need an smp_mb() memory barrier.
3475 list_del(&napi->poll_list);
3478 quota = work + qlen;
3488 * __napi_schedule - schedule for receive
3489 * @n: entry to schedule
3491 * The entry's receive function will be scheduled to run
3493 void __napi_schedule(struct napi_struct *n)
3495 unsigned long flags;
3497 local_irq_save(flags);
3498 ____napi_schedule(&__get_cpu_var(softnet_data), n);
3499 local_irq_restore(flags);
3501 EXPORT_SYMBOL(__napi_schedule);
3503 void __napi_complete(struct napi_struct *n)
3505 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
3506 BUG_ON(n->gro_list);
3508 list_del(&n->poll_list);
3509 smp_mb__before_clear_bit();
3510 clear_bit(NAPI_STATE_SCHED, &n->state);
3512 EXPORT_SYMBOL(__napi_complete);
3514 void napi_complete(struct napi_struct *n)
3516 unsigned long flags;
3519 * don't let napi dequeue from the cpu poll list
3520 * just in case its running on a different cpu
3522 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
3526 local_irq_save(flags);
3528 local_irq_restore(flags);
3530 EXPORT_SYMBOL(napi_complete);
3532 void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
3533 int (*poll)(struct napi_struct *, int), int weight)
3535 INIT_LIST_HEAD(&napi->poll_list);
3536 napi->gro_count = 0;
3537 napi->gro_list = NULL;
3540 napi->weight = weight;
3541 list_add(&napi->dev_list, &dev->napi_list);
3543 #ifdef CONFIG_NETPOLL
3544 spin_lock_init(&napi->poll_lock);
3545 napi->poll_owner = -1;
3547 set_bit(NAPI_STATE_SCHED, &napi->state);
3549 EXPORT_SYMBOL(netif_napi_add);
3551 void netif_napi_del(struct napi_struct *napi)
3553 struct sk_buff *skb, *next;
3555 list_del_init(&napi->dev_list);
3556 napi_free_frags(napi);
3558 for (skb = napi->gro_list; skb; skb = next) {
3564 napi->gro_list = NULL;
3565 napi->gro_count = 0;
3567 EXPORT_SYMBOL(netif_napi_del);
3569 static void net_rx_action(struct softirq_action *h)
3571 struct softnet_data *sd = &__get_cpu_var(softnet_data);
3572 unsigned long time_limit = jiffies + 2;
3573 int budget = netdev_budget;
3576 local_irq_disable();
3578 while (!list_empty(&sd->poll_list)) {
3579 struct napi_struct *n;
3582 /* If softirq window is exhuasted then punt.
3583 * Allow this to run for 2 jiffies since which will allow
3584 * an average latency of 1.5/HZ.
3586 if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
3591 /* Even though interrupts have been re-enabled, this
3592 * access is safe because interrupts can only add new
3593 * entries to the tail of this list, and only ->poll()
3594 * calls can remove this head entry from the list.
3596 n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list);
3598 have = netpoll_poll_lock(n);
3602 /* This NAPI_STATE_SCHED test is for avoiding a race
3603 * with netpoll's poll_napi(). Only the entity which
3604 * obtains the lock and sees NAPI_STATE_SCHED set will
3605 * actually make the ->poll() call. Therefore we avoid
3606 * accidently calling ->poll() when NAPI is not scheduled.
3609 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
3610 work = n->poll(n, weight);
3614 WARN_ON_ONCE(work > weight);
3618 local_irq_disable();
3620 /* Drivers must not modify the NAPI state if they
3621 * consume the entire weight. In such cases this code
3622 * still "owns" the NAPI instance and therefore can
3623 * move the instance around on the list at-will.
3625 if (unlikely(work == weight)) {
3626 if (unlikely(napi_disable_pending(n))) {
3629 local_irq_disable();
3631 list_move_tail(&n->poll_list, &sd->poll_list);
3634 netpoll_poll_unlock(have);
3637 net_rps_action_and_irq_enable(sd);
3639 #ifdef CONFIG_NET_DMA
3641 * There may not be any more sk_buffs coming right now, so push
3642 * any pending DMA copies to hardware
3644 dma_issue_pending_all();
3651 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3655 static gifconf_func_t *gifconf_list[NPROTO];
3658 * register_gifconf - register a SIOCGIF handler
3659 * @family: Address family
3660 * @gifconf: Function handler
3662 * Register protocol dependent address dumping routines. The handler
3663 * that is passed must not be freed or reused until it has been replaced
3664 * by another handler.
3666 int register_gifconf(unsigned int family, gifconf_func_t *gifconf)
3668 if (family >= NPROTO)
3670 gifconf_list[family] = gifconf;
3673 EXPORT_SYMBOL(register_gifconf);
3677 * Map an interface index to its name (SIOCGIFNAME)
3681 * We need this ioctl for efficient implementation of the
3682 * if_indextoname() function required by the IPv6 API. Without
3683 * it, we would have to search all the interfaces to find a
3687 static int dev_ifname(struct net *net, struct ifreq __user *arg)
3689 struct net_device *dev;
3693 * Fetch the caller's info block.
3696 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
3700 dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex);
3706 strcpy(ifr.ifr_name, dev->name);
3709 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
3715 * Perform a SIOCGIFCONF call. This structure will change
3716 * size eventually, and there is nothing I can do about it.
3717 * Thus we will need a 'compatibility mode'.
3720 static int dev_ifconf(struct net *net, char __user *arg)
3723 struct net_device *dev;
3730 * Fetch the caller's info block.
3733 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
3740 * Loop over the interfaces, and write an info block for each.
3744 for_each_netdev(net, dev) {
3745 for (i = 0; i < NPROTO; i++) {
3746 if (gifconf_list[i]) {
3749 done = gifconf_list[i](dev, NULL, 0);
3751 done = gifconf_list[i](dev, pos + total,
3761 * All done. Write the updated control block back to the caller.
3763 ifc.ifc_len = total;
3766 * Both BSD and Solaris return 0 here, so we do too.
3768 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
3771 #ifdef CONFIG_PROC_FS
3773 * This is invoked by the /proc filesystem handler to display a device
3776 void *dev_seq_start(struct seq_file *seq, loff_t *pos)
3779 struct net *net = seq_file_net(seq);
3781 struct net_device *dev;
3785 return SEQ_START_TOKEN;
3788 for_each_netdev_rcu(net, dev)
3795 void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3797 struct net_device *dev = (v == SEQ_START_TOKEN) ?
3798 first_net_device(seq_file_net(seq)) :
3799 next_net_device((struct net_device *)v);
3802 return rcu_dereference(dev);
3805 void dev_seq_stop(struct seq_file *seq, void *v)
3811 static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
3813 struct rtnl_link_stats64 temp;
3814 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
3816 seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
3817 "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
3818 dev->name, stats->rx_bytes, stats->rx_packets,
3820 stats->rx_dropped + stats->rx_missed_errors,
3821 stats->rx_fifo_errors,
3822 stats->rx_length_errors + stats->rx_over_errors +
3823 stats->rx_crc_errors + stats->rx_frame_errors,
3824 stats->rx_compressed, stats->multicast,
3825 stats->tx_bytes, stats->tx_packets,
3826 stats->tx_errors, stats->tx_dropped,
3827 stats->tx_fifo_errors, stats->collisions,
3828 stats->tx_carrier_errors +
3829 stats->tx_aborted_errors +
3830 stats->tx_window_errors +
3831 stats->tx_heartbeat_errors,
3832 stats->tx_compressed);
3836 * Called from the PROCfs module. This now uses the new arbitrary sized
3837 * /proc/net interface to create /proc/net/dev
3839 static int dev_seq_show(struct seq_file *seq, void *v)
3841 if (v == SEQ_START_TOKEN)
3842 seq_puts(seq, "Inter-| Receive "
3844 " face |bytes packets errs drop fifo frame "
3845 "compressed multicast|bytes packets errs "
3846 "drop fifo colls carrier compressed\n");
3848 dev_seq_printf_stats(seq, v);
3852 static struct softnet_data *softnet_get_online(loff_t *pos)
3854 struct softnet_data *sd = NULL;
3856 while (*pos < nr_cpu_ids)
3857 if (cpu_online(*pos)) {
3858 sd = &per_cpu(softnet_data, *pos);
3865 static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
3867 return softnet_get_online(pos);
3870 static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3873 return softnet_get_online(pos);
3876 static void softnet_seq_stop(struct seq_file *seq, void *v)
3880 static int softnet_seq_show(struct seq_file *seq, void *v)
3882 struct softnet_data *sd = v;
3884 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
3885 sd->processed, sd->dropped, sd->time_squeeze, 0,
3886 0, 0, 0, 0, /* was fastroute */
3887 sd->cpu_collision, sd->received_rps);
3891 static const struct seq_operations dev_seq_ops = {
3892 .start = dev_seq_start,
3893 .next = dev_seq_next,
3894 .stop = dev_seq_stop,
3895 .show = dev_seq_show,
3898 static int dev_seq_open(struct inode *inode, struct file *file)
3900 return seq_open_net(inode, file, &dev_seq_ops,
3901 sizeof(struct seq_net_private));
3904 static const struct file_operations dev_seq_fops = {
3905 .owner = THIS_MODULE,
3906 .open = dev_seq_open,
3908 .llseek = seq_lseek,
3909 .release = seq_release_net,
3912 static const struct seq_operations softnet_seq_ops = {
3913 .start = softnet_seq_start,
3914 .next = softnet_seq_next,
3915 .stop = softnet_seq_stop,
3916 .show = softnet_seq_show,
3919 static int softnet_seq_open(struct inode *inode, struct file *file)
3921 return seq_open(file, &softnet_seq_ops);
3924 static const struct file_operations softnet_seq_fops = {
3925 .owner = THIS_MODULE,
3926 .open = softnet_seq_open,
3928 .llseek = seq_lseek,
3929 .release = seq_release,
3932 static void *ptype_get_idx(loff_t pos)
3934 struct packet_type *pt = NULL;
3938 list_for_each_entry_rcu(pt, &ptype_all, list) {
3944 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
3945 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
3954 static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
3958 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
3961 static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3963 struct packet_type *pt;
3964 struct list_head *nxt;
3968 if (v == SEQ_START_TOKEN)
3969 return ptype_get_idx(0);
3972 nxt = pt->list.next;
3973 if (pt->type == htons(ETH_P_ALL)) {
3974 if (nxt != &ptype_all)
3977 nxt = ptype_base[0].next;
3979 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
3981 while (nxt == &ptype_base[hash]) {
3982 if (++hash >= PTYPE_HASH_SIZE)
3984 nxt = ptype_base[hash].next;
3987 return list_entry(nxt, struct packet_type, list);
3990 static void ptype_seq_stop(struct seq_file *seq, void *v)
3996 static int ptype_seq_show(struct seq_file *seq, void *v)
3998 struct packet_type *pt = v;
4000 if (v == SEQ_START_TOKEN)
4001 seq_puts(seq, "Type Device Function\n");
4002 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
4003 if (pt->type == htons(ETH_P_ALL))
4004 seq_puts(seq, "ALL ");
4006 seq_printf(seq, "%04x", ntohs(pt->type));
4008 seq_printf(seq, " %-8s %pF\n",
4009 pt->dev ? pt->dev->name : "", pt->func);
4015 static const struct seq_operations ptype_seq_ops = {
4016 .start = ptype_seq_start,
4017 .next = ptype_seq_next,
4018 .stop = ptype_seq_stop,
4019 .show = ptype_seq_show,
4022 static int ptype_seq_open(struct inode *inode, struct file *file)
4024 return seq_open_net(inode, file, &ptype_seq_ops,
4025 sizeof(struct seq_net_private));
4028 static const struct file_operations ptype_seq_fops = {
4029 .owner = THIS_MODULE,
4030 .open = ptype_seq_open,
4032 .llseek = seq_lseek,
4033 .release = seq_release_net,
4037 static int __net_init dev_proc_net_init(struct net *net)
4041 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
4043 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
4045 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
4048 if (wext_proc_init(net))
4054 proc_net_remove(net, "ptype");
4056 proc_net_remove(net, "softnet_stat");
4058 proc_net_remove(net, "dev");
4062 static void __net_exit dev_proc_net_exit(struct net *net)
4064 wext_proc_exit(net);
4066 proc_net_remove(net, "ptype");
4067 proc_net_remove(net, "softnet_stat");
4068 proc_net_remove(net, "dev");
4071 static struct pernet_operations __net_initdata dev_proc_ops = {
4072 .init = dev_proc_net_init,
4073 .exit = dev_proc_net_exit,
4076 static int __init dev_proc_init(void)
4078 return register_pernet_subsys(&dev_proc_ops);
4081 #define dev_proc_init() 0
4082 #endif /* CONFIG_PROC_FS */
4086 * netdev_set_master - set up master/slave pair
4087 * @slave: slave device
4088 * @master: new master device
4090 * Changes the master device of the slave. Pass %NULL to break the
4091 * bonding. The caller must hold the RTNL semaphore. On a failure
4092 * a negative errno code is returned. On success the reference counts
4093 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
4094 * function returns zero.
4096 int netdev_set_master(struct net_device *slave, struct net_device *master)
4098 struct net_device *old = slave->master;
4108 slave->master = master;
4115 slave->flags |= IFF_SLAVE;
4117 slave->flags &= ~IFF_SLAVE;
4119 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
4122 EXPORT_SYMBOL(netdev_set_master);
4124 static void dev_change_rx_flags(struct net_device *dev, int flags)
4126 const struct net_device_ops *ops = dev->netdev_ops;
4128 if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
4129 ops->ndo_change_rx_flags(dev, flags);
4132 static int __dev_set_promiscuity(struct net_device *dev, int inc)
4134 unsigned short old_flags = dev->flags;
4140 dev->flags |= IFF_PROMISC;
4141 dev->promiscuity += inc;
4142 if (dev->promiscuity == 0) {
4145 * If inc causes overflow, untouch promisc and return error.
4148 dev->flags &= ~IFF_PROMISC;
4150 dev->promiscuity -= inc;
4151 printk(KERN_WARNING "%s: promiscuity touches roof, "
4152 "set promiscuity failed, promiscuity feature "
4153 "of device might be broken.\n", dev->name);
4157 if (dev->flags != old_flags) {
4158 printk(KERN_INFO "device %s %s promiscuous mode\n",
4159 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
4161 if (audit_enabled) {
4162 current_uid_gid(&uid, &gid);
4163 audit_log(current->audit_context, GFP_ATOMIC,
4164 AUDIT_ANOM_PROMISCUOUS,
4165 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
4166 dev->name, (dev->flags & IFF_PROMISC),
4167 (old_flags & IFF_PROMISC),
4168 audit_get_loginuid(current),
4170 audit_get_sessionid(current));
4173 dev_change_rx_flags(dev, IFF_PROMISC);
4179 * dev_set_promiscuity - update promiscuity count on a device
4183 * Add or remove promiscuity from a device. While the count in the device
4184 * remains above zero the interface remains promiscuous. Once it hits zero
4185 * the device reverts back to normal filtering operation. A negative inc
4186 * value is used to drop promiscuity on the device.
4187 * Return 0 if successful or a negative errno code on error.
4189 int dev_set_promiscuity(struct net_device *dev, int inc)
4191 unsigned short old_flags = dev->flags;
4194 err = __dev_set_promiscuity(dev, inc);
4197 if (dev->flags != old_flags)
4198 dev_set_rx_mode(dev);
4201 EXPORT_SYMBOL(dev_set_promiscuity);
4204 * dev_set_allmulti - update allmulti count on a device
4208 * Add or remove reception of all multicast frames to a device. While the
4209 * count in the device remains above zero the interface remains listening
4210 * to all interfaces. Once it hits zero the device reverts back to normal
4211 * filtering operation. A negative @inc value is used to drop the counter
4212 * when releasing a resource needing all multicasts.
4213 * Return 0 if successful or a negative errno code on error.
4216 int dev_set_allmulti(struct net_device *dev, int inc)
4218 unsigned short old_flags = dev->flags;
4222 dev->flags |= IFF_ALLMULTI;
4223 dev->allmulti += inc;
4224 if (dev->allmulti == 0) {
4227 * If inc causes overflow, untouch allmulti and return error.
4230 dev->flags &= ~IFF_ALLMULTI;
4232 dev->allmulti -= inc;
4233 printk(KERN_WARNING "%s: allmulti touches roof, "
4234 "set allmulti failed, allmulti feature of "
4235 "device might be broken.\n", dev->name);
4239 if (dev->flags ^ old_flags) {
4240 dev_change_rx_flags(dev, IFF_ALLMULTI);
4241 dev_set_rx_mode(dev);
4245 EXPORT_SYMBOL(dev_set_allmulti);
4248 * Upload unicast and multicast address lists to device and
4249 * configure RX filtering. When the device doesn't support unicast
4250 * filtering it is put in promiscuous mode while unicast addresses
4253 void __dev_set_rx_mode(struct net_device *dev)
4255 const struct net_device_ops *ops = dev->netdev_ops;
4257 /* dev_open will call this function so the list will stay sane. */
4258 if (!(dev->flags&IFF_UP))
4261 if (!netif_device_present(dev))
4264 if (ops->ndo_set_rx_mode)
4265 ops->ndo_set_rx_mode(dev);
4267 /* Unicast addresses changes may only happen under the rtnl,
4268 * therefore calling __dev_set_promiscuity here is safe.
4270 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
4271 __dev_set_promiscuity(dev, 1);
4272 dev->uc_promisc = 1;
4273 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
4274 __dev_set_promiscuity(dev, -1);
4275 dev->uc_promisc = 0;
4278 if (ops->ndo_set_multicast_list)
4279 ops->ndo_set_multicast_list(dev);
4283 void dev_set_rx_mode(struct net_device *dev)
4285 netif_addr_lock_bh(dev);
4286 __dev_set_rx_mode(dev);
4287 netif_addr_unlock_bh(dev);
4291 * dev_get_flags - get flags reported to userspace
4294 * Get the combination of flag bits exported through APIs to userspace.
4296 unsigned dev_get_flags(const struct net_device *dev)
4300 flags = (dev->flags & ~(IFF_PROMISC |
4305 (dev->gflags & (IFF_PROMISC |
4308 if (netif_running(dev)) {
4309 if (netif_oper_up(dev))
4310 flags |= IFF_RUNNING;
4311 if (netif_carrier_ok(dev))
4312 flags |= IFF_LOWER_UP;
4313 if (netif_dormant(dev))
4314 flags |= IFF_DORMANT;
4319 EXPORT_SYMBOL(dev_get_flags);
4321 int __dev_change_flags(struct net_device *dev, unsigned int flags)
4323 int old_flags = dev->flags;
4329 * Set the flags on our device.
4332 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
4333 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
4335 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
4339 * Load in the correct multicast list now the flags have changed.
4342 if ((old_flags ^ flags) & IFF_MULTICAST)
4343 dev_change_rx_flags(dev, IFF_MULTICAST);
4345 dev_set_rx_mode(dev);
4348 * Have we downed the interface. We handle IFF_UP ourselves
4349 * according to user attempts to set it, rather than blindly
4354 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
4355 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
4358 dev_set_rx_mode(dev);
4361 if ((flags ^ dev->gflags) & IFF_PROMISC) {
4362 int inc = (flags & IFF_PROMISC) ? 1 : -1;
4364 dev->gflags ^= IFF_PROMISC;
4365 dev_set_promiscuity(dev, inc);
4368 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
4369 is important. Some (broken) drivers set IFF_PROMISC, when
4370 IFF_ALLMULTI is requested not asking us and not reporting.
4372 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
4373 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
4375 dev->gflags ^= IFF_ALLMULTI;
4376 dev_set_allmulti(dev, inc);
4382 void __dev_notify_flags(struct net_device *dev, unsigned int old_flags)
4384 unsigned int changes = dev->flags ^ old_flags;
4386 if (changes & IFF_UP) {
4387 if (dev->flags & IFF_UP)
4388 call_netdevice_notifiers(NETDEV_UP, dev);
4390 call_netdevice_notifiers(NETDEV_DOWN, dev);
4393 if (dev->flags & IFF_UP &&
4394 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE)))
4395 call_netdevice_notifiers(NETDEV_CHANGE, dev);
4399 * dev_change_flags - change device settings
4401 * @flags: device state flags
4403 * Change settings on device based state flags. The flags are
4404 * in the userspace exported format.
4406 int dev_change_flags(struct net_device *dev, unsigned flags)
4409 int old_flags = dev->flags;
4411 ret = __dev_change_flags(dev, flags);
4415 changes = old_flags ^ dev->flags;
4417 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
4419 __dev_notify_flags(dev, old_flags);
4422 EXPORT_SYMBOL(dev_change_flags);
4425 * dev_set_mtu - Change maximum transfer unit
4427 * @new_mtu: new transfer unit
4429 * Change the maximum transfer size of the network device.
4431 int dev_set_mtu(struct net_device *dev, int new_mtu)
4433 const struct net_device_ops *ops = dev->netdev_ops;
4436 if (new_mtu == dev->mtu)
4439 /* MTU must be positive. */
4443 if (!netif_device_present(dev))
4447 if (ops->ndo_change_mtu)
4448 err = ops->ndo_change_mtu(dev, new_mtu);
4452 if (!err && dev->flags & IFF_UP)
4453 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
4456 EXPORT_SYMBOL(dev_set_mtu);
4459 * dev_set_mac_address - Change Media Access Control Address
4463 * Change the hardware (MAC) address of the device
4465 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
4467 const struct net_device_ops *ops = dev->netdev_ops;
4470 if (!ops->ndo_set_mac_address)
4472 if (sa->sa_family != dev->type)
4474 if (!netif_device_present(dev))
4476 err = ops->ndo_set_mac_address(dev, sa);
4478 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4481 EXPORT_SYMBOL(dev_set_mac_address);
4484 * Perform the SIOCxIFxxx calls, inside rcu_read_lock()
4486 static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
4489 struct net_device *dev = dev_get_by_name_rcu(net, ifr->ifr_name);
4495 case SIOCGIFFLAGS: /* Get interface flags */
4496 ifr->ifr_flags = (short) dev_get_flags(dev);
4499 case SIOCGIFMETRIC: /* Get the metric on the interface
4500 (currently unused) */
4501 ifr->ifr_metric = 0;
4504 case SIOCGIFMTU: /* Get the MTU of a device */
4505 ifr->ifr_mtu = dev->mtu;
4510 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
4512 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
4513 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4514 ifr->ifr_hwaddr.sa_family = dev->type;
4522 ifr->ifr_map.mem_start = dev->mem_start;
4523 ifr->ifr_map.mem_end = dev->mem_end;
4524 ifr->ifr_map.base_addr = dev->base_addr;
4525 ifr->ifr_map.irq = dev->irq;
4526 ifr->ifr_map.dma = dev->dma;
4527 ifr->ifr_map.port = dev->if_port;
4531 ifr->ifr_ifindex = dev->ifindex;
4535 ifr->ifr_qlen = dev->tx_queue_len;
4539 /* dev_ioctl() should ensure this case
4551 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
4553 static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
4556 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
4557 const struct net_device_ops *ops;
4562 ops = dev->netdev_ops;
4565 case SIOCSIFFLAGS: /* Set interface flags */
4566 return dev_change_flags(dev, ifr->ifr_flags);
4568 case SIOCSIFMETRIC: /* Set the metric on the interface
4569 (currently unused) */
4572 case SIOCSIFMTU: /* Set the MTU of a device */
4573 return dev_set_mtu(dev, ifr->ifr_mtu);
4576 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
4578 case SIOCSIFHWBROADCAST:
4579 if (ifr->ifr_hwaddr.sa_family != dev->type)
4581 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
4582 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4583 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4587 if (ops->ndo_set_config) {
4588 if (!netif_device_present(dev))
4590 return ops->ndo_set_config(dev, &ifr->ifr_map);
4595 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
4596 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4598 if (!netif_device_present(dev))
4600 return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data);
4603 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
4604 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4606 if (!netif_device_present(dev))
4608 return dev_mc_del_global(dev, ifr->ifr_hwaddr.sa_data);
4611 if (ifr->ifr_qlen < 0)
4613 dev->tx_queue_len = ifr->ifr_qlen;
4617 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
4618 return dev_change_name(dev, ifr->ifr_newname);
4621 * Unknown or private ioctl
4624 if ((cmd >= SIOCDEVPRIVATE &&
4625 cmd <= SIOCDEVPRIVATE + 15) ||
4626 cmd == SIOCBONDENSLAVE ||
4627 cmd == SIOCBONDRELEASE ||
4628 cmd == SIOCBONDSETHWADDR ||
4629 cmd == SIOCBONDSLAVEINFOQUERY ||
4630 cmd == SIOCBONDINFOQUERY ||
4631 cmd == SIOCBONDCHANGEACTIVE ||
4632 cmd == SIOCGMIIPHY ||
4633 cmd == SIOCGMIIREG ||
4634 cmd == SIOCSMIIREG ||
4635 cmd == SIOCBRADDIF ||
4636 cmd == SIOCBRDELIF ||
4637 cmd == SIOCSHWTSTAMP ||
4638 cmd == SIOCWANDEV) {
4640 if (ops->ndo_do_ioctl) {
4641 if (netif_device_present(dev))
4642 err = ops->ndo_do_ioctl(dev, ifr, cmd);
4654 * This function handles all "interface"-type I/O control requests. The actual
4655 * 'doing' part of this is dev_ifsioc above.
4659 * dev_ioctl - network device ioctl
4660 * @net: the applicable net namespace
4661 * @cmd: command to issue
4662 * @arg: pointer to a struct ifreq in user space
4664 * Issue ioctl functions to devices. This is normally called by the
4665 * user space syscall interfaces but can sometimes be useful for
4666 * other purposes. The return value is the return from the syscall if
4667 * positive or a negative errno code on error.
4670 int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
4676 /* One special case: SIOCGIFCONF takes ifconf argument
4677 and requires shared lock, because it sleeps writing
4681 if (cmd == SIOCGIFCONF) {
4683 ret = dev_ifconf(net, (char __user *) arg);
4687 if (cmd == SIOCGIFNAME)
4688 return dev_ifname(net, (struct ifreq __user *)arg);
4690 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
4693 ifr.ifr_name[IFNAMSIZ-1] = 0;
4695 colon = strchr(ifr.ifr_name, ':');
4700 * See which interface the caller is talking about.
4705 * These ioctl calls:
4706 * - can be done by all.
4707 * - atomic and do not require locking.
4718 dev_load(net, ifr.ifr_name);
4720 ret = dev_ifsioc_locked(net, &ifr, cmd);
4725 if (copy_to_user(arg, &ifr,
4726 sizeof(struct ifreq)))
4732 dev_load(net, ifr.ifr_name);
4734 ret = dev_ethtool(net, &ifr);
4739 if (copy_to_user(arg, &ifr,
4740 sizeof(struct ifreq)))
4746 * These ioctl calls:
4747 * - require superuser power.
4748 * - require strict serialization.
4754 if (!capable(CAP_NET_ADMIN))
4756 dev_load(net, ifr.ifr_name);
4758 ret = dev_ifsioc(net, &ifr, cmd);
4763 if (copy_to_user(arg, &ifr,
4764 sizeof(struct ifreq)))
4770 * These ioctl calls:
4771 * - require superuser power.
4772 * - require strict serialization.
4773 * - do not return a value
4783 case SIOCSIFHWBROADCAST:
4786 case SIOCBONDENSLAVE:
4787 case SIOCBONDRELEASE:
4788 case SIOCBONDSETHWADDR:
4789 case SIOCBONDCHANGEACTIVE:
4793 if (!capable(CAP_NET_ADMIN))
4796 case SIOCBONDSLAVEINFOQUERY:
4797 case SIOCBONDINFOQUERY:
4798 dev_load(net, ifr.ifr_name);
4800 ret = dev_ifsioc(net, &ifr, cmd);
4805 /* Get the per device memory space. We can add this but
4806 * currently do not support it */
4808 /* Set the per device memory buffer space.
4809 * Not applicable in our case */
4814 * Unknown or private ioctl.
4817 if (cmd == SIOCWANDEV ||
4818 (cmd >= SIOCDEVPRIVATE &&
4819 cmd <= SIOCDEVPRIVATE + 15)) {
4820 dev_load(net, ifr.ifr_name);
4822 ret = dev_ifsioc(net, &ifr, cmd);
4824 if (!ret && copy_to_user(arg, &ifr,
4825 sizeof(struct ifreq)))
4829 /* Take care of Wireless Extensions */
4830 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
4831 return wext_handle_ioctl(net, &ifr, cmd, arg);
4838 * dev_new_index - allocate an ifindex
4839 * @net: the applicable net namespace
4841 * Returns a suitable unique value for a new device interface
4842 * number. The caller must hold the rtnl semaphore or the
4843 * dev_base_lock to be sure it remains unique.
4845 static int dev_new_index(struct net *net)
4851 if (!__dev_get_by_index(net, ifindex))
4856 /* Delayed registration/unregisteration */
4857 static LIST_HEAD(net_todo_list);
4859 static void net_set_todo(struct net_device *dev)
4861 list_add_tail(&dev->todo_list, &net_todo_list);
4864 static void rollback_registered_many(struct list_head *head)
4866 struct net_device *dev, *tmp;
4868 BUG_ON(dev_boot_phase);
4871 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
4872 /* Some devices call without registering
4873 * for initialization unwind. Remove those
4874 * devices and proceed with the remaining.
4876 if (dev->reg_state == NETREG_UNINITIALIZED) {
4877 pr_debug("unregister_netdevice: device %s/%p never "
4878 "was registered\n", dev->name, dev);
4881 list_del(&dev->unreg_list);
4885 BUG_ON(dev->reg_state != NETREG_REGISTERED);
4887 /* If device is running, close it first. */
4890 /* And unlink it from device chain. */
4891 unlist_netdevice(dev);
4893 dev->reg_state = NETREG_UNREGISTERING;
4898 list_for_each_entry(dev, head, unreg_list) {
4899 /* Shutdown queueing discipline. */
4903 /* Notify protocols, that we are about to destroy
4904 this device. They should clean all the things.
4906 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4908 if (!dev->rtnl_link_ops ||
4909 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
4910 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
4913 * Flush the unicast and multicast chains
4918 if (dev->netdev_ops->ndo_uninit)
4919 dev->netdev_ops->ndo_uninit(dev);
4921 /* Notifier chain MUST detach us from master device. */
4922 WARN_ON(dev->master);
4924 /* Remove entries from kobject tree */
4925 netdev_unregister_kobject(dev);
4928 /* Process any work delayed until the end of the batch */
4929 dev = list_first_entry(head, struct net_device, unreg_list);
4930 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
4934 list_for_each_entry(dev, head, unreg_list)
4938 static void rollback_registered(struct net_device *dev)
4942 list_add(&dev->unreg_list, &single);
4943 rollback_registered_many(&single);
4946 unsigned long netdev_fix_features(unsigned long features, const char *name)
4948 /* Fix illegal SG+CSUM combinations. */
4949 if ((features & NETIF_F_SG) &&
4950 !(features & NETIF_F_ALL_CSUM)) {
4952 printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no "
4953 "checksum feature.\n", name);
4954 features &= ~NETIF_F_SG;
4957 /* TSO requires that SG is present as well. */
4958 if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) {
4960 printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no "
4961 "SG feature.\n", name);
4962 features &= ~NETIF_F_TSO;
4965 if (features & NETIF_F_UFO) {
4966 if (!(features & NETIF_F_GEN_CSUM)) {
4968 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4969 "since no NETIF_F_HW_CSUM feature.\n",
4971 features &= ~NETIF_F_UFO;
4974 if (!(features & NETIF_F_SG)) {
4976 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4977 "since no NETIF_F_SG feature.\n", name);
4978 features &= ~NETIF_F_UFO;
4984 EXPORT_SYMBOL(netdev_fix_features);
4987 * netif_stacked_transfer_operstate - transfer operstate
4988 * @rootdev: the root or lower level device to transfer state from
4989 * @dev: the device to transfer operstate to
4991 * Transfer operational state from root to device. This is normally
4992 * called when a stacking relationship exists between the root
4993 * device and the device(a leaf device).
4995 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
4996 struct net_device *dev)
4998 if (rootdev->operstate == IF_OPER_DORMANT)
4999 netif_dormant_on(dev);
5001 netif_dormant_off(dev);
5003 if (netif_carrier_ok(rootdev)) {
5004 if (!netif_carrier_ok(dev))
5005 netif_carrier_on(dev);
5007 if (netif_carrier_ok(dev))
5008 netif_carrier_off(dev);
5011 EXPORT_SYMBOL(netif_stacked_transfer_operstate);
5013 static int netif_alloc_rx_queues(struct net_device *dev)
5016 unsigned int i, count = dev->num_rx_queues;
5017 struct netdev_rx_queue *rx;
5021 rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
5023 pr_err("netdev: Unable to allocate %u rx queues.\n", count);
5029 * Set a pointer to first element in the array which holds the
5032 for (i = 0; i < count; i++)
5038 static int netif_alloc_netdev_queues(struct net_device *dev)
5040 unsigned int count = dev->num_tx_queues;
5041 struct netdev_queue *tx;
5045 tx = kcalloc(count, sizeof(struct netdev_queue), GFP_KERNEL);
5047 pr_err("netdev: Unable to allocate %u tx queues.\n",
5055 static void netdev_init_one_queue(struct net_device *dev,
5056 struct netdev_queue *queue,
5061 /* Initialize queue lock */
5062 spin_lock_init(&queue->_xmit_lock);
5063 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
5064 queue->xmit_lock_owner = -1;
5067 static void netdev_init_queues(struct net_device *dev)
5069 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
5070 spin_lock_init(&dev->tx_global_lock);
5074 * register_netdevice - register a network device
5075 * @dev: device to register
5077 * Take a completed network device structure and add it to the kernel
5078 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5079 * chain. 0 is returned on success. A negative errno code is returned
5080 * on a failure to set up the device, or if the name is a duplicate.
5082 * Callers must hold the rtnl semaphore. You may want
5083 * register_netdev() instead of this.
5086 * The locking appears insufficient to guarantee two parallel registers
5087 * will not get the same name.
5090 int register_netdevice(struct net_device *dev)
5093 struct net *net = dev_net(dev);
5095 BUG_ON(dev_boot_phase);
5100 /* When net_device's are persistent, this will be fatal. */
5101 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
5104 spin_lock_init(&dev->addr_list_lock);
5105 netdev_set_addr_lockdep_class(dev);
5109 ret = netif_alloc_rx_queues(dev);
5113 ret = netif_alloc_netdev_queues(dev);
5117 netdev_init_queues(dev);
5119 /* Init, if this function is available */
5120 if (dev->netdev_ops->ndo_init) {
5121 ret = dev->netdev_ops->ndo_init(dev);
5129 ret = dev_get_valid_name(dev, dev->name, 0);
5133 dev->ifindex = dev_new_index(net);
5134 if (dev->iflink == -1)
5135 dev->iflink = dev->ifindex;
5137 /* Fix illegal checksum combinations */
5138 if ((dev->features & NETIF_F_HW_CSUM) &&
5139 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5140 printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
5142 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
5145 if ((dev->features & NETIF_F_NO_CSUM) &&
5146 (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5147 printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
5149 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
5152 dev->features = netdev_fix_features(dev->features, dev->name);
5154 /* Enable software GSO if SG is supported. */
5155 if (dev->features & NETIF_F_SG)
5156 dev->features |= NETIF_F_GSO;
5158 /* Enable GRO and NETIF_F_HIGHDMA for vlans by default,
5159 * vlan_dev_init() will do the dev->features check, so these features
5160 * are enabled only if supported by underlying device.
5162 dev->vlan_features |= (NETIF_F_GRO | NETIF_F_HIGHDMA);
5164 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
5165 ret = notifier_to_errno(ret);
5169 ret = netdev_register_kobject(dev);
5172 dev->reg_state = NETREG_REGISTERED;
5175 * Default initial state at registry is that the
5176 * device is present.
5179 set_bit(__LINK_STATE_PRESENT, &dev->state);
5181 dev_init_scheduler(dev);
5183 list_netdevice(dev);
5185 /* Notify protocols, that a new device appeared. */
5186 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
5187 ret = notifier_to_errno(ret);
5189 rollback_registered(dev);
5190 dev->reg_state = NETREG_UNREGISTERED;
5193 * Prevent userspace races by waiting until the network
5194 * device is fully setup before sending notifications.
5196 if (!dev->rtnl_link_ops ||
5197 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
5198 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
5204 if (dev->netdev_ops->ndo_uninit)
5205 dev->netdev_ops->ndo_uninit(dev);
5208 EXPORT_SYMBOL(register_netdevice);
5211 * init_dummy_netdev - init a dummy network device for NAPI
5212 * @dev: device to init
5214 * This takes a network device structure and initialize the minimum
5215 * amount of fields so it can be used to schedule NAPI polls without
5216 * registering a full blown interface. This is to be used by drivers
5217 * that need to tie several hardware interfaces to a single NAPI
5218 * poll scheduler due to HW limitations.
5220 int init_dummy_netdev(struct net_device *dev)
5222 /* Clear everything. Note we don't initialize spinlocks
5223 * are they aren't supposed to be taken by any of the
5224 * NAPI code and this dummy netdev is supposed to be
5225 * only ever used for NAPI polls
5227 memset(dev, 0, sizeof(struct net_device));
5229 /* make sure we BUG if trying to hit standard
5230 * register/unregister code path
5232 dev->reg_state = NETREG_DUMMY;
5234 /* NAPI wants this */
5235 INIT_LIST_HEAD(&dev->napi_list);
5237 /* a dummy interface is started by default */
5238 set_bit(__LINK_STATE_PRESENT, &dev->state);
5239 set_bit(__LINK_STATE_START, &dev->state);
5241 /* Note : We dont allocate pcpu_refcnt for dummy devices,
5242 * because users of this 'device' dont need to change
5248 EXPORT_SYMBOL_GPL(init_dummy_netdev);
5252 * register_netdev - register a network device
5253 * @dev: device to register
5255 * Take a completed network device structure and add it to the kernel
5256 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5257 * chain. 0 is returned on success. A negative errno code is returned
5258 * on a failure to set up the device, or if the name is a duplicate.
5260 * This is a wrapper around register_netdevice that takes the rtnl semaphore
5261 * and expands the device name if you passed a format string to
5264 int register_netdev(struct net_device *dev)
5271 * If the name is a format string the caller wants us to do a
5274 if (strchr(dev->name, '%')) {
5275 err = dev_alloc_name(dev, dev->name);
5280 err = register_netdevice(dev);
5285 EXPORT_SYMBOL(register_netdev);
5287 int netdev_refcnt_read(const struct net_device *dev)
5291 for_each_possible_cpu(i)
5292 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
5295 EXPORT_SYMBOL(netdev_refcnt_read);
5298 * netdev_wait_allrefs - wait until all references are gone.
5300 * This is called when unregistering network devices.
5302 * Any protocol or device that holds a reference should register
5303 * for netdevice notification, and cleanup and put back the
5304 * reference if they receive an UNREGISTER event.
5305 * We can get stuck here if buggy protocols don't correctly
5308 static void netdev_wait_allrefs(struct net_device *dev)
5310 unsigned long rebroadcast_time, warning_time;
5313 linkwatch_forget_dev(dev);
5315 rebroadcast_time = warning_time = jiffies;
5316 refcnt = netdev_refcnt_read(dev);
5318 while (refcnt != 0) {
5319 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
5322 /* Rebroadcast unregister notification */
5323 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5324 /* don't resend NETDEV_UNREGISTER_BATCH, _BATCH users
5325 * should have already handle it the first time */
5327 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
5329 /* We must not have linkwatch events
5330 * pending on unregister. If this
5331 * happens, we simply run the queue
5332 * unscheduled, resulting in a noop
5335 linkwatch_run_queue();
5340 rebroadcast_time = jiffies;
5345 refcnt = netdev_refcnt_read(dev);
5347 if (time_after(jiffies, warning_time + 10 * HZ)) {
5348 printk(KERN_EMERG "unregister_netdevice: "
5349 "waiting for %s to become free. Usage "
5352 warning_time = jiffies;
5361 * register_netdevice(x1);
5362 * register_netdevice(x2);
5364 * unregister_netdevice(y1);
5365 * unregister_netdevice(y2);
5371 * We are invoked by rtnl_unlock().
5372 * This allows us to deal with problems:
5373 * 1) We can delete sysfs objects which invoke hotplug
5374 * without deadlocking with linkwatch via keventd.
5375 * 2) Since we run with the RTNL semaphore not held, we can sleep
5376 * safely in order to wait for the netdev refcnt to drop to zero.
5378 * We must not return until all unregister events added during
5379 * the interval the lock was held have been completed.
5381 void netdev_run_todo(void)
5383 struct list_head list;
5385 /* Snapshot list, allow later requests */
5386 list_replace_init(&net_todo_list, &list);
5390 while (!list_empty(&list)) {
5391 struct net_device *dev
5392 = list_first_entry(&list, struct net_device, todo_list);
5393 list_del(&dev->todo_list);
5395 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
5396 printk(KERN_ERR "network todo '%s' but state %d\n",
5397 dev->name, dev->reg_state);
5402 dev->reg_state = NETREG_UNREGISTERED;
5404 on_each_cpu(flush_backlog, dev, 1);
5406 netdev_wait_allrefs(dev);
5409 BUG_ON(netdev_refcnt_read(dev));
5410 WARN_ON(rcu_dereference_raw(dev->ip_ptr));
5411 WARN_ON(dev->ip6_ptr);
5412 WARN_ON(dev->dn_ptr);
5414 if (dev->destructor)
5415 dev->destructor(dev);
5417 /* Free network device */
5418 kobject_put(&dev->dev.kobj);
5423 * dev_txq_stats_fold - fold tx_queues stats
5424 * @dev: device to get statistics from
5425 * @stats: struct rtnl_link_stats64 to hold results
5427 void dev_txq_stats_fold(const struct net_device *dev,
5428 struct rtnl_link_stats64 *stats)
5430 u64 tx_bytes = 0, tx_packets = 0, tx_dropped = 0;
5432 struct netdev_queue *txq;
5434 for (i = 0; i < dev->num_tx_queues; i++) {
5435 txq = netdev_get_tx_queue(dev, i);
5436 spin_lock_bh(&txq->_xmit_lock);
5437 tx_bytes += txq->tx_bytes;
5438 tx_packets += txq->tx_packets;
5439 tx_dropped += txq->tx_dropped;
5440 spin_unlock_bh(&txq->_xmit_lock);
5442 if (tx_bytes || tx_packets || tx_dropped) {
5443 stats->tx_bytes = tx_bytes;
5444 stats->tx_packets = tx_packets;
5445 stats->tx_dropped = tx_dropped;
5448 EXPORT_SYMBOL(dev_txq_stats_fold);
5450 /* Convert net_device_stats to rtnl_link_stats64. They have the same
5451 * fields in the same order, with only the type differing.
5453 static void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
5454 const struct net_device_stats *netdev_stats)
5456 #if BITS_PER_LONG == 64
5457 BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
5458 memcpy(stats64, netdev_stats, sizeof(*stats64));
5460 size_t i, n = sizeof(*stats64) / sizeof(u64);
5461 const unsigned long *src = (const unsigned long *)netdev_stats;
5462 u64 *dst = (u64 *)stats64;
5464 BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) !=
5465 sizeof(*stats64) / sizeof(u64));
5466 for (i = 0; i < n; i++)
5472 * dev_get_stats - get network device statistics
5473 * @dev: device to get statistics from
5474 * @storage: place to store stats
5476 * Get network statistics from device. Return @storage.
5477 * The device driver may provide its own method by setting
5478 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
5479 * otherwise the internal statistics structure is used.
5481 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
5482 struct rtnl_link_stats64 *storage)
5484 const struct net_device_ops *ops = dev->netdev_ops;
5486 if (ops->ndo_get_stats64) {
5487 memset(storage, 0, sizeof(*storage));
5488 ops->ndo_get_stats64(dev, storage);
5489 } else if (ops->ndo_get_stats) {
5490 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
5492 netdev_stats_to_stats64(storage, &dev->stats);
5493 dev_txq_stats_fold(dev, storage);
5495 storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
5498 EXPORT_SYMBOL(dev_get_stats);
5500 struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
5502 struct netdev_queue *queue = dev_ingress_queue(dev);
5504 #ifdef CONFIG_NET_CLS_ACT
5507 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
5510 netdev_init_one_queue(dev, queue, NULL);
5511 queue->qdisc = &noop_qdisc;
5512 queue->qdisc_sleeping = &noop_qdisc;
5513 rcu_assign_pointer(dev->ingress_queue, queue);
5519 * alloc_netdev_mq - allocate network device
5520 * @sizeof_priv: size of private data to allocate space for
5521 * @name: device name format string
5522 * @setup: callback to initialize device
5523 * @queue_count: the number of subqueues to allocate
5525 * Allocates a struct net_device with private data area for driver use
5526 * and performs basic initialization. Also allocates subquue structs
5527 * for each queue on the device at the end of the netdevice.
5529 struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
5530 void (*setup)(struct net_device *), unsigned int queue_count)
5532 struct net_device *dev;
5534 struct net_device *p;
5536 BUG_ON(strlen(name) >= sizeof(dev->name));
5538 if (queue_count < 1) {
5539 pr_err("alloc_netdev: Unable to allocate device "
5540 "with zero queues.\n");
5544 alloc_size = sizeof(struct net_device);
5546 /* ensure 32-byte alignment of private area */
5547 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
5548 alloc_size += sizeof_priv;
5550 /* ensure 32-byte alignment of whole construct */
5551 alloc_size += NETDEV_ALIGN - 1;
5553 p = kzalloc(alloc_size, GFP_KERNEL);
5555 printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
5559 dev = PTR_ALIGN(p, NETDEV_ALIGN);
5560 dev->padded = (char *)dev - (char *)p;
5562 dev->pcpu_refcnt = alloc_percpu(int);
5563 if (!dev->pcpu_refcnt)
5566 if (dev_addr_init(dev))
5572 dev_net_set(dev, &init_net);
5574 dev->num_tx_queues = queue_count;
5575 dev->real_num_tx_queues = queue_count;
5578 dev->num_rx_queues = queue_count;
5579 dev->real_num_rx_queues = queue_count;
5582 dev->gso_max_size = GSO_MAX_SIZE;
5584 INIT_LIST_HEAD(&dev->ethtool_ntuple_list.list);
5585 dev->ethtool_ntuple_list.count = 0;
5586 INIT_LIST_HEAD(&dev->napi_list);
5587 INIT_LIST_HEAD(&dev->unreg_list);
5588 INIT_LIST_HEAD(&dev->link_watch_list);
5589 dev->priv_flags = IFF_XMIT_DST_RELEASE;
5591 strcpy(dev->name, name);
5595 free_percpu(dev->pcpu_refcnt);
5600 EXPORT_SYMBOL(alloc_netdev_mq);
5603 * free_netdev - free network device
5606 * This function does the last stage of destroying an allocated device
5607 * interface. The reference to the device object is released.
5608 * If this is the last reference then it will be freed.
5610 void free_netdev(struct net_device *dev)
5612 struct napi_struct *p, *n;
5614 release_net(dev_net(dev));
5618 kfree(rcu_dereference_raw(dev->ingress_queue));
5620 /* Flush device addresses */
5621 dev_addr_flush(dev);
5623 /* Clear ethtool n-tuple list */
5624 ethtool_ntuple_flush(dev);
5626 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
5629 free_percpu(dev->pcpu_refcnt);
5630 dev->pcpu_refcnt = NULL;
5632 /* Compatibility with error handling in drivers */
5633 if (dev->reg_state == NETREG_UNINITIALIZED) {
5634 kfree((char *)dev - dev->padded);
5638 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
5639 dev->reg_state = NETREG_RELEASED;
5641 /* will free via device release */
5642 put_device(&dev->dev);
5644 EXPORT_SYMBOL(free_netdev);
5647 * synchronize_net - Synchronize with packet receive processing
5649 * Wait for packets currently being received to be done.
5650 * Does not block later packets from starting.
5652 void synchronize_net(void)
5657 EXPORT_SYMBOL(synchronize_net);
5660 * unregister_netdevice_queue - remove device from the kernel
5664 * This function shuts down a device interface and removes it
5665 * from the kernel tables.
5666 * If head not NULL, device is queued to be unregistered later.
5668 * Callers must hold the rtnl semaphore. You may want
5669 * unregister_netdev() instead of this.
5672 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
5677 list_move_tail(&dev->unreg_list, head);
5679 rollback_registered(dev);
5680 /* Finish processing unregister after unlock */
5684 EXPORT_SYMBOL(unregister_netdevice_queue);
5687 * unregister_netdevice_many - unregister many devices
5688 * @head: list of devices
5690 void unregister_netdevice_many(struct list_head *head)
5692 struct net_device *dev;
5694 if (!list_empty(head)) {
5695 rollback_registered_many(head);
5696 list_for_each_entry(dev, head, unreg_list)
5700 EXPORT_SYMBOL(unregister_netdevice_many);
5703 * unregister_netdev - remove device from the kernel
5706 * This function shuts down a device interface and removes it
5707 * from the kernel tables.
5709 * This is just a wrapper for unregister_netdevice that takes
5710 * the rtnl semaphore. In general you want to use this and not
5711 * unregister_netdevice.
5713 void unregister_netdev(struct net_device *dev)
5716 unregister_netdevice(dev);
5719 EXPORT_SYMBOL(unregister_netdev);
5722 * dev_change_net_namespace - move device to different nethost namespace
5724 * @net: network namespace
5725 * @pat: If not NULL name pattern to try if the current device name
5726 * is already taken in the destination network namespace.
5728 * This function shuts down a device interface and moves it
5729 * to a new network namespace. On success 0 is returned, on
5730 * a failure a netagive errno code is returned.
5732 * Callers must hold the rtnl semaphore.
5735 int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
5741 /* Don't allow namespace local devices to be moved. */
5743 if (dev->features & NETIF_F_NETNS_LOCAL)
5746 /* Ensure the device has been registrered */
5748 if (dev->reg_state != NETREG_REGISTERED)
5751 /* Get out if there is nothing todo */
5753 if (net_eq(dev_net(dev), net))
5756 /* Pick the destination device name, and ensure
5757 * we can use it in the destination network namespace.
5760 if (__dev_get_by_name(net, dev->name)) {
5761 /* We get here if we can't use the current device name */
5764 if (dev_get_valid_name(dev, pat, 1))
5769 * And now a mini version of register_netdevice unregister_netdevice.
5772 /* If device is running close it first. */
5775 /* And unlink it from device chain */
5777 unlist_netdevice(dev);
5781 /* Shutdown queueing discipline. */
5784 /* Notify protocols, that we are about to destroy
5785 this device. They should clean all the things.
5787 Note that dev->reg_state stays at NETREG_REGISTERED.
5788 This is wanted because this way 8021q and macvlan know
5789 the device is just moving and can keep their slaves up.
5791 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5792 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
5795 * Flush the unicast and multicast chains
5800 /* Actually switch the network namespace */
5801 dev_net_set(dev, net);
5803 /* If there is an ifindex conflict assign a new one */
5804 if (__dev_get_by_index(net, dev->ifindex)) {
5805 int iflink = (dev->iflink == dev->ifindex);
5806 dev->ifindex = dev_new_index(net);
5808 dev->iflink = dev->ifindex;
5811 /* Fixup kobjects */
5812 err = device_rename(&dev->dev, dev->name);
5815 /* Add the device back in the hashes */
5816 list_netdevice(dev);
5818 /* Notify protocols, that a new device appeared. */
5819 call_netdevice_notifiers(NETDEV_REGISTER, dev);
5822 * Prevent userspace races by waiting until the network
5823 * device is fully setup before sending notifications.
5825 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
5832 EXPORT_SYMBOL_GPL(dev_change_net_namespace);
5834 static int dev_cpu_callback(struct notifier_block *nfb,
5835 unsigned long action,
5838 struct sk_buff **list_skb;
5839 struct sk_buff *skb;
5840 unsigned int cpu, oldcpu = (unsigned long)ocpu;
5841 struct softnet_data *sd, *oldsd;
5843 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
5846 local_irq_disable();
5847 cpu = smp_processor_id();
5848 sd = &per_cpu(softnet_data, cpu);
5849 oldsd = &per_cpu(softnet_data, oldcpu);
5851 /* Find end of our completion_queue. */
5852 list_skb = &sd->completion_queue;
5854 list_skb = &(*list_skb)->next;
5855 /* Append completion queue from offline CPU. */
5856 *list_skb = oldsd->completion_queue;
5857 oldsd->completion_queue = NULL;
5859 /* Append output queue from offline CPU. */
5860 if (oldsd->output_queue) {
5861 *sd->output_queue_tailp = oldsd->output_queue;
5862 sd->output_queue_tailp = oldsd->output_queue_tailp;
5863 oldsd->output_queue = NULL;
5864 oldsd->output_queue_tailp = &oldsd->output_queue;
5867 raise_softirq_irqoff(NET_TX_SOFTIRQ);
5870 /* Process offline CPU's input_pkt_queue */
5871 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
5873 input_queue_head_incr(oldsd);
5875 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
5877 input_queue_head_incr(oldsd);
5885 * netdev_increment_features - increment feature set by one
5886 * @all: current feature set
5887 * @one: new feature set
5888 * @mask: mask feature set
5890 * Computes a new feature set after adding a device with feature set
5891 * @one to the master device with current feature set @all. Will not
5892 * enable anything that is off in @mask. Returns the new feature set.
5894 unsigned long netdev_increment_features(unsigned long all, unsigned long one,
5897 /* If device needs checksumming, downgrade to it. */
5898 if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
5899 all ^= NETIF_F_NO_CSUM | (one & NETIF_F_ALL_CSUM);
5900 else if (mask & NETIF_F_ALL_CSUM) {
5901 /* If one device supports v4/v6 checksumming, set for all. */
5902 if (one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM) &&
5903 !(all & NETIF_F_GEN_CSUM)) {
5904 all &= ~NETIF_F_ALL_CSUM;
5905 all |= one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
5908 /* If one device supports hw checksumming, set for all. */
5909 if (one & NETIF_F_GEN_CSUM && !(all & NETIF_F_GEN_CSUM)) {
5910 all &= ~NETIF_F_ALL_CSUM;
5911 all |= NETIF_F_HW_CSUM;
5915 one |= NETIF_F_ALL_CSUM;
5917 one |= all & NETIF_F_ONE_FOR_ALL;
5918 all &= one | NETIF_F_LLTX | NETIF_F_GSO | NETIF_F_UFO;
5919 all |= one & mask & NETIF_F_ONE_FOR_ALL;
5923 EXPORT_SYMBOL(netdev_increment_features);
5925 static struct hlist_head *netdev_create_hash(void)
5928 struct hlist_head *hash;
5930 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
5932 for (i = 0; i < NETDEV_HASHENTRIES; i++)
5933 INIT_HLIST_HEAD(&hash[i]);
5938 /* Initialize per network namespace state */
5939 static int __net_init netdev_init(struct net *net)
5941 INIT_LIST_HEAD(&net->dev_base_head);
5943 net->dev_name_head = netdev_create_hash();
5944 if (net->dev_name_head == NULL)
5947 net->dev_index_head = netdev_create_hash();
5948 if (net->dev_index_head == NULL)
5954 kfree(net->dev_name_head);
5960 * netdev_drivername - network driver for the device
5961 * @dev: network device
5962 * @buffer: buffer for resulting name
5963 * @len: size of buffer
5965 * Determine network driver for device.
5967 char *netdev_drivername(const struct net_device *dev, char *buffer, int len)
5969 const struct device_driver *driver;
5970 const struct device *parent;
5972 if (len <= 0 || !buffer)
5976 parent = dev->dev.parent;
5981 driver = parent->driver;
5982 if (driver && driver->name)
5983 strlcpy(buffer, driver->name, len);
5987 static int __netdev_printk(const char *level, const struct net_device *dev,
5988 struct va_format *vaf)
5992 if (dev && dev->dev.parent)
5993 r = dev_printk(level, dev->dev.parent, "%s: %pV",
5994 netdev_name(dev), vaf);
5996 r = printk("%s%s: %pV", level, netdev_name(dev), vaf);
5998 r = printk("%s(NULL net_device): %pV", level, vaf);
6003 int netdev_printk(const char *level, const struct net_device *dev,
6004 const char *format, ...)
6006 struct va_format vaf;
6010 va_start(args, format);
6015 r = __netdev_printk(level, dev, &vaf);
6020 EXPORT_SYMBOL(netdev_printk);
6022 #define define_netdev_printk_level(func, level) \
6023 int func(const struct net_device *dev, const char *fmt, ...) \
6026 struct va_format vaf; \
6029 va_start(args, fmt); \
6034 r = __netdev_printk(level, dev, &vaf); \
6039 EXPORT_SYMBOL(func);
6041 define_netdev_printk_level(netdev_emerg, KERN_EMERG);
6042 define_netdev_printk_level(netdev_alert, KERN_ALERT);
6043 define_netdev_printk_level(netdev_crit, KERN_CRIT);
6044 define_netdev_printk_level(netdev_err, KERN_ERR);
6045 define_netdev_printk_level(netdev_warn, KERN_WARNING);
6046 define_netdev_printk_level(netdev_notice, KERN_NOTICE);
6047 define_netdev_printk_level(netdev_info, KERN_INFO);
6049 static void __net_exit netdev_exit(struct net *net)
6051 kfree(net->dev_name_head);
6052 kfree(net->dev_index_head);
6055 static struct pernet_operations __net_initdata netdev_net_ops = {
6056 .init = netdev_init,
6057 .exit = netdev_exit,
6060 static void __net_exit default_device_exit(struct net *net)
6062 struct net_device *dev, *aux;
6064 * Push all migratable network devices back to the
6065 * initial network namespace
6068 for_each_netdev_safe(net, dev, aux) {
6070 char fb_name[IFNAMSIZ];
6072 /* Ignore unmoveable devices (i.e. loopback) */
6073 if (dev->features & NETIF_F_NETNS_LOCAL)
6076 /* Leave virtual devices for the generic cleanup */
6077 if (dev->rtnl_link_ops)
6080 /* Push remaing network devices to init_net */
6081 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
6082 err = dev_change_net_namespace(dev, &init_net, fb_name);
6084 printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
6085 __func__, dev->name, err);
6092 static void __net_exit default_device_exit_batch(struct list_head *net_list)
6094 /* At exit all network devices most be removed from a network
6095 * namespace. Do this in the reverse order of registeration.
6096 * Do this across as many network namespaces as possible to
6097 * improve batching efficiency.
6099 struct net_device *dev;
6101 LIST_HEAD(dev_kill_list);
6104 list_for_each_entry(net, net_list, exit_list) {
6105 for_each_netdev_reverse(net, dev) {
6106 if (dev->rtnl_link_ops)
6107 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
6109 unregister_netdevice_queue(dev, &dev_kill_list);
6112 unregister_netdevice_many(&dev_kill_list);
6116 static struct pernet_operations __net_initdata default_device_ops = {
6117 .exit = default_device_exit,
6118 .exit_batch = default_device_exit_batch,
6122 * Initialize the DEV module. At boot time this walks the device list and
6123 * unhooks any devices that fail to initialise (normally hardware not
6124 * present) and leaves us with a valid list of present and active devices.
6129 * This is called single threaded during boot, so no need
6130 * to take the rtnl semaphore.
6132 static int __init net_dev_init(void)
6134 int i, rc = -ENOMEM;
6136 BUG_ON(!dev_boot_phase);
6138 if (dev_proc_init())
6141 if (netdev_kobject_init())
6144 INIT_LIST_HEAD(&ptype_all);
6145 for (i = 0; i < PTYPE_HASH_SIZE; i++)
6146 INIT_LIST_HEAD(&ptype_base[i]);
6148 if (register_pernet_subsys(&netdev_net_ops))
6152 * Initialise the packet receive queues.
6155 for_each_possible_cpu(i) {
6156 struct softnet_data *sd = &per_cpu(softnet_data, i);
6158 memset(sd, 0, sizeof(*sd));
6159 skb_queue_head_init(&sd->input_pkt_queue);
6160 skb_queue_head_init(&sd->process_queue);
6161 sd->completion_queue = NULL;
6162 INIT_LIST_HEAD(&sd->poll_list);
6163 sd->output_queue = NULL;
6164 sd->output_queue_tailp = &sd->output_queue;
6166 sd->csd.func = rps_trigger_softirq;
6172 sd->backlog.poll = process_backlog;
6173 sd->backlog.weight = weight_p;
6174 sd->backlog.gro_list = NULL;
6175 sd->backlog.gro_count = 0;
6180 /* The loopback device is special if any other network devices
6181 * is present in a network namespace the loopback device must
6182 * be present. Since we now dynamically allocate and free the
6183 * loopback device ensure this invariant is maintained by
6184 * keeping the loopback device as the first device on the
6185 * list of network devices. Ensuring the loopback devices
6186 * is the first device that appears and the last network device
6189 if (register_pernet_device(&loopback_net_ops))
6192 if (register_pernet_device(&default_device_ops))
6195 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
6196 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
6198 hotcpu_notifier(dev_cpu_callback, 0);
6206 subsys_initcall(net_dev_init);
6208 static int __init initialize_hashrnd(void)
6210 get_random_bytes(&hashrnd, sizeof(hashrnd));
6214 late_initcall_sync(initialize_hashrnd);