2 * NET3 Protocol independent device support routines.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Derived from the non IP parts of dev.c 1.0.19
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
51 * Rudi Cilibrasi : Pass the right thing to
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
75 #include <asm/uaccess.h>
76 #include <asm/system.h>
77 #include <linux/bitops.h>
78 #include <linux/capability.h>
79 #include <linux/cpu.h>
80 #include <linux/types.h>
81 #include <linux/kernel.h>
82 #include <linux/hash.h>
83 #include <linux/slab.h>
84 #include <linux/sched.h>
85 #include <linux/mutex.h>
86 #include <linux/string.h>
88 #include <linux/socket.h>
89 #include <linux/sockios.h>
90 #include <linux/errno.h>
91 #include <linux/interrupt.h>
92 #include <linux/if_ether.h>
93 #include <linux/netdevice.h>
94 #include <linux/etherdevice.h>
95 #include <linux/ethtool.h>
96 #include <linux/notifier.h>
97 #include <linux/skbuff.h>
98 #include <net/net_namespace.h>
100 #include <linux/rtnetlink.h>
101 #include <linux/proc_fs.h>
102 #include <linux/seq_file.h>
103 #include <linux/stat.h>
104 #include <linux/if_bridge.h>
105 #include <linux/if_macvlan.h>
107 #include <net/pkt_sched.h>
108 #include <net/checksum.h>
109 #include <net/xfrm.h>
110 #include <linux/highmem.h>
111 #include <linux/init.h>
112 #include <linux/kmod.h>
113 #include <linux/module.h>
114 #include <linux/netpoll.h>
115 #include <linux/rcupdate.h>
116 #include <linux/delay.h>
117 #include <net/wext.h>
118 #include <net/iw_handler.h>
119 #include <asm/current.h>
120 #include <linux/audit.h>
121 #include <linux/dmaengine.h>
122 #include <linux/err.h>
123 #include <linux/ctype.h>
124 #include <linux/if_arp.h>
125 #include <linux/if_vlan.h>
126 #include <linux/ip.h>
128 #include <linux/ipv6.h>
129 #include <linux/in.h>
130 #include <linux/jhash.h>
131 #include <linux/random.h>
132 #include <trace/events/napi.h>
133 #include <linux/pci.h>
135 #include "net-sysfs.h"
137 /* Instead of increasing this, you should create a hash table. */
138 #define MAX_GRO_SKBS 8
140 /* This should be increased if a protocol with a bigger head is added. */
141 #define GRO_MAX_HEAD (MAX_HEADER + 128)
144 * The list of packet types we will receive (as opposed to discard)
145 * and the routines to invoke.
147 * Why 16. Because with 16 the only overlap we get on a hash of the
148 * low nibble of the protocol value is RARP/SNAP/X.25.
150 * NOTE: That is no longer true with the addition of VLAN tags. Not
151 * sure which should go first, but I bet it won't make much
152 * difference if we are running VLANs. The good news is that
153 * this protocol won't be in the list unless compiled in, so
154 * the average user (w/out VLANs) will not be adversely affected.
171 #define PTYPE_HASH_SIZE (16)
172 #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
174 static DEFINE_SPINLOCK(ptype_lock);
175 static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
176 static struct list_head ptype_all __read_mostly; /* Taps */
179 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
182 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
184 * Writers must hold the rtnl semaphore while they loop through the
185 * dev_base_head list, and hold dev_base_lock for writing when they do the
186 * actual updates. This allows pure readers to access the list even
187 * while a writer is preparing to update it.
189 * To put it another way, dev_base_lock is held for writing only to
190 * protect against pure readers; the rtnl semaphore provides the
191 * protection against other writers.
193 * See, for example usages, register_netdevice() and
194 * unregister_netdevice(), which must be called with the rtnl
197 DEFINE_RWLOCK(dev_base_lock);
198 EXPORT_SYMBOL(dev_base_lock);
200 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
202 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
203 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
206 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
208 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
211 static inline void rps_lock(struct softnet_data *queue)
214 spin_lock(&queue->input_pkt_queue.lock);
218 static inline void rps_unlock(struct softnet_data *queue)
221 spin_unlock(&queue->input_pkt_queue.lock);
225 /* Device list insertion */
226 static int list_netdevice(struct net_device *dev)
228 struct net *net = dev_net(dev);
232 write_lock_bh(&dev_base_lock);
233 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
234 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
235 hlist_add_head_rcu(&dev->index_hlist,
236 dev_index_hash(net, dev->ifindex));
237 write_unlock_bh(&dev_base_lock);
241 /* Device list removal
242 * caller must respect a RCU grace period before freeing/reusing dev
244 static void unlist_netdevice(struct net_device *dev)
248 /* Unlink dev from the device chain */
249 write_lock_bh(&dev_base_lock);
250 list_del_rcu(&dev->dev_list);
251 hlist_del_rcu(&dev->name_hlist);
252 hlist_del_rcu(&dev->index_hlist);
253 write_unlock_bh(&dev_base_lock);
260 static RAW_NOTIFIER_HEAD(netdev_chain);
263 * Device drivers call our routines to queue packets here. We empty the
264 * queue in the local softnet handler.
267 DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
268 EXPORT_PER_CPU_SYMBOL(softnet_data);
270 #ifdef CONFIG_LOCKDEP
272 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
273 * according to dev->type
275 static const unsigned short netdev_lock_type[] =
276 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
277 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
278 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
279 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
280 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
281 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
282 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
283 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
284 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
285 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
286 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
287 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
288 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
289 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET,
290 ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154,
291 ARPHRD_VOID, ARPHRD_NONE};
293 static const char *const netdev_lock_name[] =
294 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
295 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
296 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
297 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
298 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
299 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
300 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
301 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
302 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
303 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
304 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
305 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
306 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
307 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET",
308 "_xmit_PHONET_PIPE", "_xmit_IEEE802154",
309 "_xmit_VOID", "_xmit_NONE"};
311 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
312 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
314 static inline unsigned short netdev_lock_pos(unsigned short dev_type)
318 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
319 if (netdev_lock_type[i] == dev_type)
321 /* the last key is used by default */
322 return ARRAY_SIZE(netdev_lock_type) - 1;
325 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
326 unsigned short dev_type)
330 i = netdev_lock_pos(dev_type);
331 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
332 netdev_lock_name[i]);
335 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
339 i = netdev_lock_pos(dev->type);
340 lockdep_set_class_and_name(&dev->addr_list_lock,
341 &netdev_addr_lock_key[i],
342 netdev_lock_name[i]);
345 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
346 unsigned short dev_type)
349 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
354 /*******************************************************************************
356 Protocol management and registration routines
358 *******************************************************************************/
361 * Add a protocol ID to the list. Now that the input handler is
362 * smarter we can dispense with all the messy stuff that used to be
365 * BEWARE!!! Protocol handlers, mangling input packets,
366 * MUST BE last in hash buckets and checking protocol handlers
367 * MUST start from promiscuous ptype_all chain in net_bh.
368 * It is true now, do not change it.
369 * Explanation follows: if protocol handler, mangling packet, will
370 * be the first on list, it is not able to sense, that packet
371 * is cloned and should be copied-on-write, so that it will
372 * change it and subsequent readers will get broken packet.
377 * dev_add_pack - add packet handler
378 * @pt: packet type declaration
380 * Add a protocol handler to the networking stack. The passed &packet_type
381 * is linked into kernel lists and may not be freed until it has been
382 * removed from the kernel lists.
384 * This call does not sleep therefore it can not
385 * guarantee all CPU's that are in middle of receiving packets
386 * will see the new packet type (until the next received packet).
389 void dev_add_pack(struct packet_type *pt)
393 spin_lock_bh(&ptype_lock);
394 if (pt->type == htons(ETH_P_ALL))
395 list_add_rcu(&pt->list, &ptype_all);
397 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
398 list_add_rcu(&pt->list, &ptype_base[hash]);
400 spin_unlock_bh(&ptype_lock);
402 EXPORT_SYMBOL(dev_add_pack);
405 * __dev_remove_pack - remove packet handler
406 * @pt: packet type declaration
408 * Remove a protocol handler that was previously added to the kernel
409 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
410 * from the kernel lists and can be freed or reused once this function
413 * The packet type might still be in use by receivers
414 * and must not be freed until after all the CPU's have gone
415 * through a quiescent state.
417 void __dev_remove_pack(struct packet_type *pt)
419 struct list_head *head;
420 struct packet_type *pt1;
422 spin_lock_bh(&ptype_lock);
424 if (pt->type == htons(ETH_P_ALL))
427 head = &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
429 list_for_each_entry(pt1, head, list) {
431 list_del_rcu(&pt->list);
436 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
438 spin_unlock_bh(&ptype_lock);
440 EXPORT_SYMBOL(__dev_remove_pack);
443 * dev_remove_pack - remove packet handler
444 * @pt: packet type declaration
446 * Remove a protocol handler that was previously added to the kernel
447 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
448 * from the kernel lists and can be freed or reused once this function
451 * This call sleeps to guarantee that no CPU is looking at the packet
454 void dev_remove_pack(struct packet_type *pt)
456 __dev_remove_pack(pt);
460 EXPORT_SYMBOL(dev_remove_pack);
462 /******************************************************************************
464 Device Boot-time Settings Routines
466 *******************************************************************************/
468 /* Boot time configuration table */
469 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
472 * netdev_boot_setup_add - add new setup entry
473 * @name: name of the device
474 * @map: configured settings for the device
476 * Adds new setup entry to the dev_boot_setup list. The function
477 * returns 0 on error and 1 on success. This is a generic routine to
480 static int netdev_boot_setup_add(char *name, struct ifmap *map)
482 struct netdev_boot_setup *s;
486 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
487 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
488 memset(s[i].name, 0, sizeof(s[i].name));
489 strlcpy(s[i].name, name, IFNAMSIZ);
490 memcpy(&s[i].map, map, sizeof(s[i].map));
495 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
499 * netdev_boot_setup_check - check boot time settings
500 * @dev: the netdevice
502 * Check boot time settings for the device.
503 * The found settings are set for the device to be used
504 * later in the device probing.
505 * Returns 0 if no settings found, 1 if they are.
507 int netdev_boot_setup_check(struct net_device *dev)
509 struct netdev_boot_setup *s = dev_boot_setup;
512 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
513 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
514 !strcmp(dev->name, s[i].name)) {
515 dev->irq = s[i].map.irq;
516 dev->base_addr = s[i].map.base_addr;
517 dev->mem_start = s[i].map.mem_start;
518 dev->mem_end = s[i].map.mem_end;
524 EXPORT_SYMBOL(netdev_boot_setup_check);
528 * netdev_boot_base - get address from boot time settings
529 * @prefix: prefix for network device
530 * @unit: id for network device
532 * Check boot time settings for the base address of device.
533 * The found settings are set for the device to be used
534 * later in the device probing.
535 * Returns 0 if no settings found.
537 unsigned long netdev_boot_base(const char *prefix, int unit)
539 const struct netdev_boot_setup *s = dev_boot_setup;
543 sprintf(name, "%s%d", prefix, unit);
546 * If device already registered then return base of 1
547 * to indicate not to probe for this interface
549 if (__dev_get_by_name(&init_net, name))
552 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
553 if (!strcmp(name, s[i].name))
554 return s[i].map.base_addr;
559 * Saves at boot time configured settings for any netdevice.
561 int __init netdev_boot_setup(char *str)
566 str = get_options(str, ARRAY_SIZE(ints), ints);
571 memset(&map, 0, sizeof(map));
575 map.base_addr = ints[2];
577 map.mem_start = ints[3];
579 map.mem_end = ints[4];
581 /* Add new entry to the list */
582 return netdev_boot_setup_add(str, &map);
585 __setup("netdev=", netdev_boot_setup);
587 /*******************************************************************************
589 Device Interface Subroutines
591 *******************************************************************************/
594 * __dev_get_by_name - find a device by its name
595 * @net: the applicable net namespace
596 * @name: name to find
598 * Find an interface by name. Must be called under RTNL semaphore
599 * or @dev_base_lock. If the name is found a pointer to the device
600 * is returned. If the name is not found then %NULL is returned. The
601 * reference counters are not incremented so the caller must be
602 * careful with locks.
605 struct net_device *__dev_get_by_name(struct net *net, const char *name)
607 struct hlist_node *p;
608 struct net_device *dev;
609 struct hlist_head *head = dev_name_hash(net, name);
611 hlist_for_each_entry(dev, p, head, name_hlist)
612 if (!strncmp(dev->name, name, IFNAMSIZ))
617 EXPORT_SYMBOL(__dev_get_by_name);
620 * dev_get_by_name_rcu - find a device by its name
621 * @net: the applicable net namespace
622 * @name: name to find
624 * Find an interface by name.
625 * If the name is found a pointer to the device is returned.
626 * If the name is not found then %NULL is returned.
627 * The reference counters are not incremented so the caller must be
628 * careful with locks. The caller must hold RCU lock.
631 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
633 struct hlist_node *p;
634 struct net_device *dev;
635 struct hlist_head *head = dev_name_hash(net, name);
637 hlist_for_each_entry_rcu(dev, p, head, name_hlist)
638 if (!strncmp(dev->name, name, IFNAMSIZ))
643 EXPORT_SYMBOL(dev_get_by_name_rcu);
646 * dev_get_by_name - find a device by its name
647 * @net: the applicable net namespace
648 * @name: name to find
650 * Find an interface by name. This can be called from any
651 * context and does its own locking. The returned handle has
652 * the usage count incremented and the caller must use dev_put() to
653 * release it when it is no longer needed. %NULL is returned if no
654 * matching device is found.
657 struct net_device *dev_get_by_name(struct net *net, const char *name)
659 struct net_device *dev;
662 dev = dev_get_by_name_rcu(net, name);
668 EXPORT_SYMBOL(dev_get_by_name);
671 * __dev_get_by_index - find a device by its ifindex
672 * @net: the applicable net namespace
673 * @ifindex: index of device
675 * Search for an interface by index. Returns %NULL if the device
676 * is not found or a pointer to the device. The device has not
677 * had its reference counter increased so the caller must be careful
678 * about locking. The caller must hold either the RTNL semaphore
682 struct net_device *__dev_get_by_index(struct net *net, int ifindex)
684 struct hlist_node *p;
685 struct net_device *dev;
686 struct hlist_head *head = dev_index_hash(net, ifindex);
688 hlist_for_each_entry(dev, p, head, index_hlist)
689 if (dev->ifindex == ifindex)
694 EXPORT_SYMBOL(__dev_get_by_index);
697 * dev_get_by_index_rcu - find a device by its ifindex
698 * @net: the applicable net namespace
699 * @ifindex: index of device
701 * Search for an interface by index. Returns %NULL if the device
702 * is not found or a pointer to the device. The device has not
703 * had its reference counter increased so the caller must be careful
704 * about locking. The caller must hold RCU lock.
707 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
709 struct hlist_node *p;
710 struct net_device *dev;
711 struct hlist_head *head = dev_index_hash(net, ifindex);
713 hlist_for_each_entry_rcu(dev, p, head, index_hlist)
714 if (dev->ifindex == ifindex)
719 EXPORT_SYMBOL(dev_get_by_index_rcu);
723 * dev_get_by_index - find a device by its ifindex
724 * @net: the applicable net namespace
725 * @ifindex: index of device
727 * Search for an interface by index. Returns NULL if the device
728 * is not found or a pointer to the device. The device returned has
729 * had a reference added and the pointer is safe until the user calls
730 * dev_put to indicate they have finished with it.
733 struct net_device *dev_get_by_index(struct net *net, int ifindex)
735 struct net_device *dev;
738 dev = dev_get_by_index_rcu(net, ifindex);
744 EXPORT_SYMBOL(dev_get_by_index);
747 * dev_getbyhwaddr - find a device by its hardware address
748 * @net: the applicable net namespace
749 * @type: media type of device
750 * @ha: hardware address
752 * Search for an interface by MAC address. Returns NULL if the device
753 * is not found or a pointer to the device. The caller must hold the
754 * rtnl semaphore. The returned device has not had its ref count increased
755 * and the caller must therefore be careful about locking
758 * If the API was consistent this would be __dev_get_by_hwaddr
761 struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
763 struct net_device *dev;
767 for_each_netdev(net, dev)
768 if (dev->type == type &&
769 !memcmp(dev->dev_addr, ha, dev->addr_len))
774 EXPORT_SYMBOL(dev_getbyhwaddr);
776 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
778 struct net_device *dev;
781 for_each_netdev(net, dev)
782 if (dev->type == type)
787 EXPORT_SYMBOL(__dev_getfirstbyhwtype);
789 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
791 struct net_device *dev, *ret = NULL;
794 for_each_netdev_rcu(net, dev)
795 if (dev->type == type) {
803 EXPORT_SYMBOL(dev_getfirstbyhwtype);
806 * dev_get_by_flags - find any device with given flags
807 * @net: the applicable net namespace
808 * @if_flags: IFF_* values
809 * @mask: bitmask of bits in if_flags to check
811 * Search for any interface with the given flags. Returns NULL if a device
812 * is not found or a pointer to the device. The device returned has
813 * had a reference added and the pointer is safe until the user calls
814 * dev_put to indicate they have finished with it.
817 struct net_device *dev_get_by_flags(struct net *net, unsigned short if_flags,
820 struct net_device *dev, *ret;
824 for_each_netdev_rcu(net, dev) {
825 if (((dev->flags ^ if_flags) & mask) == 0) {
834 EXPORT_SYMBOL(dev_get_by_flags);
837 * dev_valid_name - check if name is okay for network device
840 * Network device names need to be valid file names to
841 * to allow sysfs to work. We also disallow any kind of
844 int dev_valid_name(const char *name)
848 if (strlen(name) >= IFNAMSIZ)
850 if (!strcmp(name, ".") || !strcmp(name, ".."))
854 if (*name == '/' || isspace(*name))
860 EXPORT_SYMBOL(dev_valid_name);
863 * __dev_alloc_name - allocate a name for a device
864 * @net: network namespace to allocate the device name in
865 * @name: name format string
866 * @buf: scratch buffer and result name string
868 * Passed a format string - eg "lt%d" it will try and find a suitable
869 * id. It scans list of devices to build up a free map, then chooses
870 * the first empty slot. The caller must hold the dev_base or rtnl lock
871 * while allocating the name and adding the device in order to avoid
873 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
874 * Returns the number of the unit assigned or a negative errno code.
877 static int __dev_alloc_name(struct net *net, const char *name, char *buf)
881 const int max_netdevices = 8*PAGE_SIZE;
882 unsigned long *inuse;
883 struct net_device *d;
885 p = strnchr(name, IFNAMSIZ-1, '%');
888 * Verify the string as this thing may have come from
889 * the user. There must be either one "%d" and no other "%"
892 if (p[1] != 'd' || strchr(p + 2, '%'))
895 /* Use one page as a bit array of possible slots */
896 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
900 for_each_netdev(net, d) {
901 if (!sscanf(d->name, name, &i))
903 if (i < 0 || i >= max_netdevices)
906 /* avoid cases where sscanf is not exact inverse of printf */
907 snprintf(buf, IFNAMSIZ, name, i);
908 if (!strncmp(buf, d->name, IFNAMSIZ))
912 i = find_first_zero_bit(inuse, max_netdevices);
913 free_page((unsigned long) inuse);
917 snprintf(buf, IFNAMSIZ, name, i);
918 if (!__dev_get_by_name(net, buf))
921 /* It is possible to run out of possible slots
922 * when the name is long and there isn't enough space left
923 * for the digits, or if all bits are used.
929 * dev_alloc_name - allocate a name for a device
931 * @name: name format string
933 * Passed a format string - eg "lt%d" it will try and find a suitable
934 * id. It scans list of devices to build up a free map, then chooses
935 * the first empty slot. The caller must hold the dev_base or rtnl lock
936 * while allocating the name and adding the device in order to avoid
938 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
939 * Returns the number of the unit assigned or a negative errno code.
942 int dev_alloc_name(struct net_device *dev, const char *name)
948 BUG_ON(!dev_net(dev));
950 ret = __dev_alloc_name(net, name, buf);
952 strlcpy(dev->name, buf, IFNAMSIZ);
955 EXPORT_SYMBOL(dev_alloc_name);
957 static int dev_get_valid_name(struct net *net, const char *name, char *buf,
960 if (!dev_valid_name(name))
963 if (fmt && strchr(name, '%'))
964 return __dev_alloc_name(net, name, buf);
965 else if (__dev_get_by_name(net, name))
967 else if (buf != name)
968 strlcpy(buf, name, IFNAMSIZ);
974 * dev_change_name - change name of a device
976 * @newname: name (or format string) must be at least IFNAMSIZ
978 * Change name of a device, can pass format strings "eth%d".
981 int dev_change_name(struct net_device *dev, const char *newname)
983 char oldname[IFNAMSIZ];
989 BUG_ON(!dev_net(dev));
992 if (dev->flags & IFF_UP)
995 if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
998 memcpy(oldname, dev->name, IFNAMSIZ);
1000 err = dev_get_valid_name(net, newname, dev->name, 1);
1005 /* For now only devices in the initial network namespace
1008 if (net_eq(net, &init_net)) {
1009 ret = device_rename(&dev->dev, dev->name);
1011 memcpy(dev->name, oldname, IFNAMSIZ);
1016 write_lock_bh(&dev_base_lock);
1017 hlist_del(&dev->name_hlist);
1018 write_unlock_bh(&dev_base_lock);
1022 write_lock_bh(&dev_base_lock);
1023 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
1024 write_unlock_bh(&dev_base_lock);
1026 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
1027 ret = notifier_to_errno(ret);
1030 /* err >= 0 after dev_alloc_name() or stores the first errno */
1033 memcpy(dev->name, oldname, IFNAMSIZ);
1037 "%s: name change rollback failed: %d.\n",
1046 * dev_set_alias - change ifalias of a device
1048 * @alias: name up to IFALIASZ
1049 * @len: limit of bytes to copy from info
1051 * Set ifalias for a device,
1053 int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1057 if (len >= IFALIASZ)
1062 kfree(dev->ifalias);
1063 dev->ifalias = NULL;
1068 dev->ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
1072 strlcpy(dev->ifalias, alias, len+1);
1078 * netdev_features_change - device changes features
1079 * @dev: device to cause notification
1081 * Called to indicate a device has changed features.
1083 void netdev_features_change(struct net_device *dev)
1085 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
1087 EXPORT_SYMBOL(netdev_features_change);
1090 * netdev_state_change - device changes state
1091 * @dev: device to cause notification
1093 * Called to indicate a device has changed state. This function calls
1094 * the notifier chains for netdev_chain and sends a NEWLINK message
1095 * to the routing socket.
1097 void netdev_state_change(struct net_device *dev)
1099 if (dev->flags & IFF_UP) {
1100 call_netdevice_notifiers(NETDEV_CHANGE, dev);
1101 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1104 EXPORT_SYMBOL(netdev_state_change);
1106 int netdev_bonding_change(struct net_device *dev, unsigned long event)
1108 return call_netdevice_notifiers(event, dev);
1110 EXPORT_SYMBOL(netdev_bonding_change);
1113 * dev_load - load a network module
1114 * @net: the applicable net namespace
1115 * @name: name of interface
1117 * If a network interface is not present and the process has suitable
1118 * privileges this function loads the module. If module loading is not
1119 * available in this kernel then it becomes a nop.
1122 void dev_load(struct net *net, const char *name)
1124 struct net_device *dev;
1127 dev = dev_get_by_name_rcu(net, name);
1130 if (!dev && capable(CAP_NET_ADMIN))
1131 request_module("%s", name);
1133 EXPORT_SYMBOL(dev_load);
1135 static int __dev_open(struct net_device *dev)
1137 const struct net_device_ops *ops = dev->netdev_ops;
1143 * Is it even present?
1145 if (!netif_device_present(dev))
1148 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1149 ret = notifier_to_errno(ret);
1154 * Call device private open method
1156 set_bit(__LINK_STATE_START, &dev->state);
1158 if (ops->ndo_validate_addr)
1159 ret = ops->ndo_validate_addr(dev);
1161 if (!ret && ops->ndo_open)
1162 ret = ops->ndo_open(dev);
1165 * If it went open OK then:
1169 clear_bit(__LINK_STATE_START, &dev->state);
1174 dev->flags |= IFF_UP;
1179 net_dmaengine_get();
1182 * Initialize multicasting status
1184 dev_set_rx_mode(dev);
1187 * Wakeup transmit queue engine
1196 * dev_open - prepare an interface for use.
1197 * @dev: device to open
1199 * Takes a device from down to up state. The device's private open
1200 * function is invoked and then the multicast lists are loaded. Finally
1201 * the device is moved into the up state and a %NETDEV_UP message is
1202 * sent to the netdev notifier chain.
1204 * Calling this function on an active interface is a nop. On a failure
1205 * a negative errno code is returned.
1207 int dev_open(struct net_device *dev)
1214 if (dev->flags & IFF_UP)
1220 ret = __dev_open(dev);
1225 * ... and announce new interface.
1227 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1228 call_netdevice_notifiers(NETDEV_UP, dev);
1232 EXPORT_SYMBOL(dev_open);
1234 static int __dev_close(struct net_device *dev)
1236 const struct net_device_ops *ops = dev->netdev_ops;
1242 * Tell people we are going down, so that they can
1243 * prepare to death, when device is still operating.
1245 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1247 clear_bit(__LINK_STATE_START, &dev->state);
1249 /* Synchronize to scheduled poll. We cannot touch poll list,
1250 * it can be even on different cpu. So just clear netif_running().
1252 * dev->stop() will invoke napi_disable() on all of it's
1253 * napi_struct instances on this device.
1255 smp_mb__after_clear_bit(); /* Commit netif_running(). */
1257 dev_deactivate(dev);
1260 * Call the device specific close. This cannot fail.
1261 * Only if device is UP
1263 * We allow it to be called even after a DETACH hot-plug
1270 * Device is now down.
1273 dev->flags &= ~IFF_UP;
1278 net_dmaengine_put();
1284 * dev_close - shutdown an interface.
1285 * @dev: device to shutdown
1287 * This function moves an active device into down state. A
1288 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1289 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1292 int dev_close(struct net_device *dev)
1294 if (!(dev->flags & IFF_UP))
1300 * Tell people we are down
1302 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1303 call_netdevice_notifiers(NETDEV_DOWN, dev);
1307 EXPORT_SYMBOL(dev_close);
1311 * dev_disable_lro - disable Large Receive Offload on a device
1314 * Disable Large Receive Offload (LRO) on a net device. Must be
1315 * called under RTNL. This is needed if received packets may be
1316 * forwarded to another interface.
1318 void dev_disable_lro(struct net_device *dev)
1320 if (dev->ethtool_ops && dev->ethtool_ops->get_flags &&
1321 dev->ethtool_ops->set_flags) {
1322 u32 flags = dev->ethtool_ops->get_flags(dev);
1323 if (flags & ETH_FLAG_LRO) {
1324 flags &= ~ETH_FLAG_LRO;
1325 dev->ethtool_ops->set_flags(dev, flags);
1328 WARN_ON(dev->features & NETIF_F_LRO);
1330 EXPORT_SYMBOL(dev_disable_lro);
1333 static int dev_boot_phase = 1;
1336 * Device change register/unregister. These are not inline or static
1337 * as we export them to the world.
1341 * register_netdevice_notifier - register a network notifier block
1344 * Register a notifier to be called when network device events occur.
1345 * The notifier passed is linked into the kernel structures and must
1346 * not be reused until it has been unregistered. A negative errno code
1347 * is returned on a failure.
1349 * When registered all registration and up events are replayed
1350 * to the new notifier to allow device to have a race free
1351 * view of the network device list.
1354 int register_netdevice_notifier(struct notifier_block *nb)
1356 struct net_device *dev;
1357 struct net_device *last;
1362 err = raw_notifier_chain_register(&netdev_chain, nb);
1368 for_each_netdev(net, dev) {
1369 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1370 err = notifier_to_errno(err);
1374 if (!(dev->flags & IFF_UP))
1377 nb->notifier_call(nb, NETDEV_UP, dev);
1388 for_each_netdev(net, dev) {
1392 if (dev->flags & IFF_UP) {
1393 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1394 nb->notifier_call(nb, NETDEV_DOWN, dev);
1396 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
1397 nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev);
1401 raw_notifier_chain_unregister(&netdev_chain, nb);
1404 EXPORT_SYMBOL(register_netdevice_notifier);
1407 * unregister_netdevice_notifier - unregister a network notifier block
1410 * Unregister a notifier previously registered by
1411 * register_netdevice_notifier(). The notifier is unlinked into the
1412 * kernel structures and may then be reused. A negative errno code
1413 * is returned on a failure.
1416 int unregister_netdevice_notifier(struct notifier_block *nb)
1421 err = raw_notifier_chain_unregister(&netdev_chain, nb);
1425 EXPORT_SYMBOL(unregister_netdevice_notifier);
1428 * call_netdevice_notifiers - call all network notifier blocks
1429 * @val: value passed unmodified to notifier function
1430 * @dev: net_device pointer passed unmodified to notifier function
1432 * Call all network notifier blocks. Parameters and return value
1433 * are as for raw_notifier_call_chain().
1436 int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1438 return raw_notifier_call_chain(&netdev_chain, val, dev);
1441 /* When > 0 there are consumers of rx skb time stamps */
1442 static atomic_t netstamp_needed = ATOMIC_INIT(0);
1444 void net_enable_timestamp(void)
1446 atomic_inc(&netstamp_needed);
1448 EXPORT_SYMBOL(net_enable_timestamp);
1450 void net_disable_timestamp(void)
1452 atomic_dec(&netstamp_needed);
1454 EXPORT_SYMBOL(net_disable_timestamp);
1456 static inline void net_timestamp(struct sk_buff *skb)
1458 if (atomic_read(&netstamp_needed))
1459 __net_timestamp(skb);
1461 skb->tstamp.tv64 = 0;
1465 * dev_forward_skb - loopback an skb to another netif
1467 * @dev: destination network device
1468 * @skb: buffer to forward
1471 * NET_RX_SUCCESS (no congestion)
1472 * NET_RX_DROP (packet was dropped)
1474 * dev_forward_skb can be used for injecting an skb from the
1475 * start_xmit function of one device into the receive queue
1476 * of another device.
1478 * The receiving device may be in another namespace, so
1479 * we have to clear all information in the skb that could
1480 * impact namespace isolation.
1482 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1486 if (!(dev->flags & IFF_UP))
1489 if (skb->len > (dev->mtu + dev->hard_header_len))
1492 skb_set_dev(skb, dev);
1493 skb->tstamp.tv64 = 0;
1494 skb->pkt_type = PACKET_HOST;
1495 skb->protocol = eth_type_trans(skb, dev);
1496 return netif_rx(skb);
1498 EXPORT_SYMBOL_GPL(dev_forward_skb);
1501 * Support routine. Sends outgoing frames to any network
1502 * taps currently in use.
1505 static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1507 struct packet_type *ptype;
1509 #ifdef CONFIG_NET_CLS_ACT
1510 if (!(skb->tstamp.tv64 && (G_TC_FROM(skb->tc_verd) & AT_INGRESS)))
1517 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1518 /* Never send packets back to the socket
1519 * they originated from - MvS (miquels@drinkel.ow.org)
1521 if ((ptype->dev == dev || !ptype->dev) &&
1522 (ptype->af_packet_priv == NULL ||
1523 (struct sock *)ptype->af_packet_priv != skb->sk)) {
1524 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1528 /* skb->nh should be correctly
1529 set by sender, so that the second statement is
1530 just protection against buggy protocols.
1532 skb_reset_mac_header(skb2);
1534 if (skb_network_header(skb2) < skb2->data ||
1535 skb2->network_header > skb2->tail) {
1536 if (net_ratelimit())
1537 printk(KERN_CRIT "protocol %04x is "
1539 skb2->protocol, dev->name);
1540 skb_reset_network_header(skb2);
1543 skb2->transport_header = skb2->network_header;
1544 skb2->pkt_type = PACKET_OUTGOING;
1545 ptype->func(skb2, skb->dev, ptype, skb->dev);
1552 static inline void __netif_reschedule(struct Qdisc *q)
1554 struct softnet_data *sd;
1555 unsigned long flags;
1557 local_irq_save(flags);
1558 sd = &__get_cpu_var(softnet_data);
1559 q->next_sched = sd->output_queue;
1560 sd->output_queue = q;
1561 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1562 local_irq_restore(flags);
1565 void __netif_schedule(struct Qdisc *q)
1567 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
1568 __netif_reschedule(q);
1570 EXPORT_SYMBOL(__netif_schedule);
1572 void dev_kfree_skb_irq(struct sk_buff *skb)
1574 if (atomic_dec_and_test(&skb->users)) {
1575 struct softnet_data *sd;
1576 unsigned long flags;
1578 local_irq_save(flags);
1579 sd = &__get_cpu_var(softnet_data);
1580 skb->next = sd->completion_queue;
1581 sd->completion_queue = skb;
1582 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1583 local_irq_restore(flags);
1586 EXPORT_SYMBOL(dev_kfree_skb_irq);
1588 void dev_kfree_skb_any(struct sk_buff *skb)
1590 if (in_irq() || irqs_disabled())
1591 dev_kfree_skb_irq(skb);
1595 EXPORT_SYMBOL(dev_kfree_skb_any);
1599 * netif_device_detach - mark device as removed
1600 * @dev: network device
1602 * Mark device as removed from system and therefore no longer available.
1604 void netif_device_detach(struct net_device *dev)
1606 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1607 netif_running(dev)) {
1608 netif_tx_stop_all_queues(dev);
1611 EXPORT_SYMBOL(netif_device_detach);
1614 * netif_device_attach - mark device as attached
1615 * @dev: network device
1617 * Mark device as attached from system and restart if needed.
1619 void netif_device_attach(struct net_device *dev)
1621 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1622 netif_running(dev)) {
1623 netif_tx_wake_all_queues(dev);
1624 __netdev_watchdog_up(dev);
1627 EXPORT_SYMBOL(netif_device_attach);
1629 static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1631 return ((features & NETIF_F_GEN_CSUM) ||
1632 ((features & NETIF_F_IP_CSUM) &&
1633 protocol == htons(ETH_P_IP)) ||
1634 ((features & NETIF_F_IPV6_CSUM) &&
1635 protocol == htons(ETH_P_IPV6)) ||
1636 ((features & NETIF_F_FCOE_CRC) &&
1637 protocol == htons(ETH_P_FCOE)));
1640 static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
1642 if (can_checksum_protocol(dev->features, skb->protocol))
1645 if (skb->protocol == htons(ETH_P_8021Q)) {
1646 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1647 if (can_checksum_protocol(dev->features & dev->vlan_features,
1648 veh->h_vlan_encapsulated_proto))
1656 * skb_dev_set -- assign a new device to a buffer
1657 * @skb: buffer for the new device
1658 * @dev: network device
1660 * If an skb is owned by a device already, we have to reset
1661 * all data private to the namespace a device belongs to
1662 * before assigning it a new device.
1664 #ifdef CONFIG_NET_NS
1665 void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
1668 if (skb->dev && !net_eq(dev_net(skb->dev), dev_net(dev))) {
1671 skb_init_secmark(skb);
1675 skb->ipvs_property = 0;
1676 #ifdef CONFIG_NET_SCHED
1682 EXPORT_SYMBOL(skb_set_dev);
1683 #endif /* CONFIG_NET_NS */
1686 * Invalidate hardware checksum when packet is to be mangled, and
1687 * complete checksum manually on outgoing path.
1689 int skb_checksum_help(struct sk_buff *skb)
1692 int ret = 0, offset;
1694 if (skb->ip_summed == CHECKSUM_COMPLETE)
1695 goto out_set_summed;
1697 if (unlikely(skb_shinfo(skb)->gso_size)) {
1698 /* Let GSO fix up the checksum. */
1699 goto out_set_summed;
1702 offset = skb->csum_start - skb_headroom(skb);
1703 BUG_ON(offset >= skb_headlen(skb));
1704 csum = skb_checksum(skb, offset, skb->len - offset, 0);
1706 offset += skb->csum_offset;
1707 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
1709 if (skb_cloned(skb) &&
1710 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
1711 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1716 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
1718 skb->ip_summed = CHECKSUM_NONE;
1722 EXPORT_SYMBOL(skb_checksum_help);
1725 * skb_gso_segment - Perform segmentation on skb.
1726 * @skb: buffer to segment
1727 * @features: features for the output path (see dev->features)
1729 * This function segments the given skb and returns a list of segments.
1731 * It may return NULL if the skb requires no segmentation. This is
1732 * only possible when GSO is used for verifying header integrity.
1734 struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
1736 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1737 struct packet_type *ptype;
1738 __be16 type = skb->protocol;
1741 skb_reset_mac_header(skb);
1742 skb->mac_len = skb->network_header - skb->mac_header;
1743 __skb_pull(skb, skb->mac_len);
1745 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1746 struct net_device *dev = skb->dev;
1747 struct ethtool_drvinfo info = {};
1749 if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
1750 dev->ethtool_ops->get_drvinfo(dev, &info);
1752 WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d "
1754 info.driver, dev ? dev->features : 0L,
1755 skb->sk ? skb->sk->sk_route_caps : 0L,
1756 skb->len, skb->data_len, skb->ip_summed);
1758 if (skb_header_cloned(skb) &&
1759 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1760 return ERR_PTR(err);
1764 list_for_each_entry_rcu(ptype,
1765 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
1766 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
1767 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1768 err = ptype->gso_send_check(skb);
1769 segs = ERR_PTR(err);
1770 if (err || skb_gso_ok(skb, features))
1772 __skb_push(skb, (skb->data -
1773 skb_network_header(skb)));
1775 segs = ptype->gso_segment(skb, features);
1781 __skb_push(skb, skb->data - skb_mac_header(skb));
1785 EXPORT_SYMBOL(skb_gso_segment);
1787 /* Take action when hardware reception checksum errors are detected. */
1789 void netdev_rx_csum_fault(struct net_device *dev)
1791 if (net_ratelimit()) {
1792 printk(KERN_ERR "%s: hw csum failure.\n",
1793 dev ? dev->name : "<unknown>");
1797 EXPORT_SYMBOL(netdev_rx_csum_fault);
1800 /* Actually, we should eliminate this check as soon as we know, that:
1801 * 1. IOMMU is present and allows to map all the memory.
1802 * 2. No high memory really exists on this machine.
1805 static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1807 #ifdef CONFIG_HIGHMEM
1809 if (!(dev->features & NETIF_F_HIGHDMA)) {
1810 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1811 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1815 if (PCI_DMA_BUS_IS_PHYS) {
1816 struct device *pdev = dev->dev.parent;
1820 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1821 dma_addr_t addr = page_to_phys(skb_shinfo(skb)->frags[i].page);
1822 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
1831 void (*destructor)(struct sk_buff *skb);
1834 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
1836 static void dev_gso_skb_destructor(struct sk_buff *skb)
1838 struct dev_gso_cb *cb;
1841 struct sk_buff *nskb = skb->next;
1843 skb->next = nskb->next;
1846 } while (skb->next);
1848 cb = DEV_GSO_CB(skb);
1850 cb->destructor(skb);
1854 * dev_gso_segment - Perform emulated hardware segmentation on skb.
1855 * @skb: buffer to segment
1857 * This function segments the given skb and stores the list of segments
1860 static int dev_gso_segment(struct sk_buff *skb)
1862 struct net_device *dev = skb->dev;
1863 struct sk_buff *segs;
1864 int features = dev->features & ~(illegal_highdma(dev, skb) ?
1867 segs = skb_gso_segment(skb, features);
1869 /* Verifying header integrity only. */
1874 return PTR_ERR(segs);
1877 DEV_GSO_CB(skb)->destructor = skb->destructor;
1878 skb->destructor = dev_gso_skb_destructor;
1883 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1884 struct netdev_queue *txq)
1886 const struct net_device_ops *ops = dev->netdev_ops;
1887 int rc = NETDEV_TX_OK;
1889 if (likely(!skb->next)) {
1890 if (!list_empty(&ptype_all))
1891 dev_queue_xmit_nit(skb, dev);
1893 if (netif_needs_gso(dev, skb)) {
1894 if (unlikely(dev_gso_segment(skb)))
1901 * If device doesnt need skb->dst, release it right now while
1902 * its hot in this cpu cache
1904 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
1907 rc = ops->ndo_start_xmit(skb, dev);
1908 if (rc == NETDEV_TX_OK)
1909 txq_trans_update(txq);
1911 * TODO: if skb_orphan() was called by
1912 * dev->hard_start_xmit() (for example, the unmodified
1913 * igb driver does that; bnx2 doesn't), then
1914 * skb_tx_software_timestamp() will be unable to send
1915 * back the time stamp.
1917 * How can this be prevented? Always create another
1918 * reference to the socket before calling
1919 * dev->hard_start_xmit()? Prevent that skb_orphan()
1920 * does anything in dev->hard_start_xmit() by clearing
1921 * the skb destructor before the call and restoring it
1922 * afterwards, then doing the skb_orphan() ourselves?
1929 struct sk_buff *nskb = skb->next;
1931 skb->next = nskb->next;
1935 * If device doesnt need nskb->dst, release it right now while
1936 * its hot in this cpu cache
1938 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
1941 rc = ops->ndo_start_xmit(nskb, dev);
1942 if (unlikely(rc != NETDEV_TX_OK)) {
1943 if (rc & ~NETDEV_TX_MASK)
1944 goto out_kfree_gso_skb;
1945 nskb->next = skb->next;
1949 txq_trans_update(txq);
1950 if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
1951 return NETDEV_TX_BUSY;
1952 } while (skb->next);
1955 if (likely(skb->next == NULL))
1956 skb->destructor = DEV_GSO_CB(skb)->destructor;
1962 static u32 hashrnd __read_mostly;
1964 u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
1968 if (skb_rx_queue_recorded(skb)) {
1969 hash = skb_get_rx_queue(skb);
1970 while (unlikely(hash >= dev->real_num_tx_queues))
1971 hash -= dev->real_num_tx_queues;
1975 if (skb->sk && skb->sk->sk_hash)
1976 hash = skb->sk->sk_hash;
1978 hash = skb->protocol;
1980 hash = jhash_1word(hash, hashrnd);
1982 return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
1984 EXPORT_SYMBOL(skb_tx_hash);
1986 static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
1988 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
1989 if (net_ratelimit()) {
1990 pr_warning("%s selects TX queue %d, but "
1991 "real number of TX queues is %d\n",
1992 dev->name, queue_index, dev->real_num_tx_queues);
1999 static struct netdev_queue *dev_pick_tx(struct net_device *dev,
2000 struct sk_buff *skb)
2003 struct sock *sk = skb->sk;
2005 if (sk_tx_queue_recorded(sk)) {
2006 queue_index = sk_tx_queue_get(sk);
2008 const struct net_device_ops *ops = dev->netdev_ops;
2010 if (ops->ndo_select_queue) {
2011 queue_index = ops->ndo_select_queue(dev, skb);
2012 queue_index = dev_cap_txqueue(dev, queue_index);
2015 if (dev->real_num_tx_queues > 1)
2016 queue_index = skb_tx_hash(dev, skb);
2018 if (sk && rcu_dereference_check(sk->sk_dst_cache, 1))
2019 sk_tx_queue_set(sk, queue_index);
2023 skb_set_queue_mapping(skb, queue_index);
2024 return netdev_get_tx_queue(dev, queue_index);
2027 static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2028 struct net_device *dev,
2029 struct netdev_queue *txq)
2031 spinlock_t *root_lock = qdisc_lock(q);
2034 spin_lock(root_lock);
2035 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
2038 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
2039 !test_and_set_bit(__QDISC_STATE_RUNNING, &q->state)) {
2041 * This is a work-conserving queue; there are no old skbs
2042 * waiting to be sent out; and the qdisc is not running -
2043 * xmit the skb directly.
2045 __qdisc_update_bstats(q, skb->len);
2046 if (sch_direct_xmit(skb, q, dev, txq, root_lock))
2049 clear_bit(__QDISC_STATE_RUNNING, &q->state);
2051 rc = NET_XMIT_SUCCESS;
2053 rc = qdisc_enqueue_root(skb, q);
2056 spin_unlock(root_lock);
2062 * Returns true if either:
2063 * 1. skb has frag_list and the device doesn't support FRAGLIST, or
2064 * 2. skb is fragmented and the device does not support SG, or if
2065 * at least one of fragments is in highmem and device does not
2066 * support DMA from it.
2068 static inline int skb_needs_linearize(struct sk_buff *skb,
2069 struct net_device *dev)
2071 return (skb_has_frags(skb) && !(dev->features & NETIF_F_FRAGLIST)) ||
2072 (skb_shinfo(skb)->nr_frags && (!(dev->features & NETIF_F_SG) ||
2073 illegal_highdma(dev, skb)));
2077 * dev_queue_xmit - transmit a buffer
2078 * @skb: buffer to transmit
2080 * Queue a buffer for transmission to a network device. The caller must
2081 * have set the device and priority and built the buffer before calling
2082 * this function. The function can be called from an interrupt.
2084 * A negative errno code is returned on a failure. A success does not
2085 * guarantee the frame will be transmitted as it may be dropped due
2086 * to congestion or traffic shaping.
2088 * -----------------------------------------------------------------------------------
2089 * I notice this method can also return errors from the queue disciplines,
2090 * including NET_XMIT_DROP, which is a positive value. So, errors can also
2093 * Regardless of the return value, the skb is consumed, so it is currently
2094 * difficult to retry a send to this method. (You can bump the ref count
2095 * before sending to hold a reference for retry if you are careful.)
2097 * When calling this method, interrupts MUST be enabled. This is because
2098 * the BH enable code must have IRQs enabled so that it will not deadlock.
2101 int dev_queue_xmit(struct sk_buff *skb)
2103 struct net_device *dev = skb->dev;
2104 struct netdev_queue *txq;
2108 /* GSO will handle the following emulations directly. */
2109 if (netif_needs_gso(dev, skb))
2112 /* Convert a paged skb to linear, if required */
2113 if (skb_needs_linearize(skb, dev) && __skb_linearize(skb))
2116 /* If packet is not checksummed and device does not support
2117 * checksumming for this protocol, complete checksumming here.
2119 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2120 skb_set_transport_header(skb, skb->csum_start -
2122 if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb))
2127 /* Disable soft irqs for various locks below. Also
2128 * stops preemption for RCU.
2132 txq = dev_pick_tx(dev, skb);
2133 q = rcu_dereference_bh(txq->qdisc);
2135 #ifdef CONFIG_NET_CLS_ACT
2136 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
2139 rc = __dev_xmit_skb(skb, q, dev, txq);
2143 /* The device has no queue. Common case for software devices:
2144 loopback, all the sorts of tunnels...
2146 Really, it is unlikely that netif_tx_lock protection is necessary
2147 here. (f.e. loopback and IP tunnels are clean ignoring statistics
2149 However, it is possible, that they rely on protection
2152 Check this and shot the lock. It is not prone from deadlocks.
2153 Either shot noqueue qdisc, it is even simpler 8)
2155 if (dev->flags & IFF_UP) {
2156 int cpu = smp_processor_id(); /* ok because BHs are off */
2158 if (txq->xmit_lock_owner != cpu) {
2160 HARD_TX_LOCK(dev, txq, cpu);
2162 if (!netif_tx_queue_stopped(txq)) {
2163 rc = dev_hard_start_xmit(skb, dev, txq);
2164 if (dev_xmit_complete(rc)) {
2165 HARD_TX_UNLOCK(dev, txq);
2169 HARD_TX_UNLOCK(dev, txq);
2170 if (net_ratelimit())
2171 printk(KERN_CRIT "Virtual device %s asks to "
2172 "queue packet!\n", dev->name);
2174 /* Recursion is detected! It is possible,
2176 if (net_ratelimit())
2177 printk(KERN_CRIT "Dead loop on virtual device "
2178 "%s, fix it urgently!\n", dev->name);
2183 rcu_read_unlock_bh();
2189 rcu_read_unlock_bh();
2192 EXPORT_SYMBOL(dev_queue_xmit);
2195 /*=======================================================================
2197 =======================================================================*/
2199 int netdev_max_backlog __read_mostly = 1000;
2200 int netdev_budget __read_mostly = 300;
2201 int weight_p __read_mostly = 64; /* old backlog weight */
2203 DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
2207 /* One global table that all flow-based protocols share. */
2208 struct rps_sock_flow_table *rps_sock_flow_table __read_mostly;
2209 EXPORT_SYMBOL(rps_sock_flow_table);
2212 * get_rps_cpu is called from netif_receive_skb and returns the target
2213 * CPU from the RPS map of the receiving queue for a given skb.
2214 * rcu_read_lock must be held on entry.
2216 static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2217 struct rps_dev_flow **rflowp)
2219 struct ipv6hdr *ip6;
2221 struct netdev_rx_queue *rxqueue;
2222 struct rps_map *map;
2223 struct rps_dev_flow_table *flow_table;
2224 struct rps_sock_flow_table *sock_flow_table;
2228 u32 addr1, addr2, ports, ihl;
2230 if (skb_rx_queue_recorded(skb)) {
2231 u16 index = skb_get_rx_queue(skb);
2232 if (unlikely(index >= dev->num_rx_queues)) {
2233 if (net_ratelimit()) {
2234 pr_warning("%s received packet on queue "
2235 "%u, but number of RX queues is %u\n",
2236 dev->name, index, dev->num_rx_queues);
2240 rxqueue = dev->_rx + index;
2244 if (!rxqueue->rps_map && !rxqueue->rps_flow_table)
2248 goto got_hash; /* Skip hash computation on packet header */
2250 switch (skb->protocol) {
2251 case __constant_htons(ETH_P_IP):
2252 if (!pskb_may_pull(skb, sizeof(*ip)))
2255 ip = (struct iphdr *) skb->data;
2256 ip_proto = ip->protocol;
2261 case __constant_htons(ETH_P_IPV6):
2262 if (!pskb_may_pull(skb, sizeof(*ip6)))
2265 ip6 = (struct ipv6hdr *) skb->data;
2266 ip_proto = ip6->nexthdr;
2267 addr1 = ip6->saddr.s6_addr32[3];
2268 addr2 = ip6->daddr.s6_addr32[3];
2282 case IPPROTO_UDPLITE:
2283 if (pskb_may_pull(skb, (ihl * 4) + 4))
2284 ports = *((u32 *) (skb->data + (ihl * 4)));
2291 skb->rxhash = jhash_3words(addr1, addr2, ports, hashrnd);
2296 flow_table = rcu_dereference(rxqueue->rps_flow_table);
2297 sock_flow_table = rcu_dereference(rps_sock_flow_table);
2298 if (flow_table && sock_flow_table) {
2300 struct rps_dev_flow *rflow;
2302 rflow = &flow_table->flows[skb->rxhash & flow_table->mask];
2305 next_cpu = sock_flow_table->ents[skb->rxhash &
2306 sock_flow_table->mask];
2309 * If the desired CPU (where last recvmsg was done) is
2310 * different from current CPU (one in the rx-queue flow
2311 * table entry), switch if one of the following holds:
2312 * - Current CPU is unset (equal to RPS_NO_CPU).
2313 * - Current CPU is offline.
2314 * - The current CPU's queue tail has advanced beyond the
2315 * last packet that was enqueued using this table entry.
2316 * This guarantees that all previous packets for the flow
2317 * have been dequeued, thus preserving in order delivery.
2319 if (unlikely(tcpu != next_cpu) &&
2320 (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
2321 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
2322 rflow->last_qtail)) >= 0)) {
2323 tcpu = rflow->cpu = next_cpu;
2324 if (tcpu != RPS_NO_CPU)
2325 rflow->last_qtail = per_cpu(softnet_data,
2326 tcpu).input_queue_head;
2328 if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
2335 map = rcu_dereference(rxqueue->rps_map);
2337 tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32];
2339 if (cpu_online(tcpu)) {
2350 * This structure holds the per-CPU mask of CPUs for which IPIs are scheduled
2351 * to be sent to kick remote softirq processing. There are two masks since
2352 * the sending of IPIs must be done with interrupts enabled. The select field
2353 * indicates the current mask that enqueue_backlog uses to schedule IPIs.
2354 * select is flipped before net_rps_action is called while still under lock,
2355 * net_rps_action then uses the non-selected mask to send the IPIs and clears
2356 * it without conflicting with enqueue_backlog operation.
2358 struct rps_remote_softirq_cpus {
2362 static DEFINE_PER_CPU(struct rps_remote_softirq_cpus, rps_remote_softirq_cpus);
2364 /* Called from hardirq (IPI) context */
2365 static void trigger_softirq(void *data)
2367 struct softnet_data *queue = data;
2368 __napi_schedule(&queue->backlog);
2369 __get_cpu_var(netdev_rx_stat).received_rps++;
2371 #endif /* CONFIG_RPS */
2374 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
2375 * queue (may be a remote CPU queue).
2377 static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
2378 unsigned int *qtail)
2380 struct softnet_data *queue;
2381 unsigned long flags;
2383 queue = &per_cpu(softnet_data, cpu);
2385 local_irq_save(flags);
2386 __get_cpu_var(netdev_rx_stat).total++;
2389 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
2390 if (queue->input_pkt_queue.qlen) {
2392 __skb_queue_tail(&queue->input_pkt_queue, skb);
2394 *qtail = queue->input_queue_head +
2395 queue->input_pkt_queue.qlen;
2398 local_irq_restore(flags);
2399 return NET_RX_SUCCESS;
2402 /* Schedule NAPI for backlog device */
2403 if (napi_schedule_prep(&queue->backlog)) {
2405 if (cpu != smp_processor_id()) {
2406 struct rps_remote_softirq_cpus *rcpus =
2407 &__get_cpu_var(rps_remote_softirq_cpus);
2409 cpu_set(cpu, rcpus->mask[rcpus->select]);
2410 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2414 __napi_schedule(&queue->backlog);
2421 __get_cpu_var(netdev_rx_stat).dropped++;
2422 local_irq_restore(flags);
2429 * netif_rx - post buffer to the network code
2430 * @skb: buffer to post
2432 * This function receives a packet from a device driver and queues it for
2433 * the upper (protocol) levels to process. It always succeeds. The buffer
2434 * may be dropped during processing for congestion control or by the
2438 * NET_RX_SUCCESS (no congestion)
2439 * NET_RX_DROP (packet was dropped)
2443 int netif_rx(struct sk_buff *skb)
2447 /* if netpoll wants it, pretend we never saw it */
2448 if (netpoll_rx(skb))
2451 if (!skb->tstamp.tv64)
2456 struct rps_dev_flow voidflow, *rflow = &voidflow;
2461 cpu = get_rps_cpu(skb->dev, skb, &rflow);
2463 cpu = smp_processor_id();
2465 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
2472 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
2478 EXPORT_SYMBOL(netif_rx);
2480 int netif_rx_ni(struct sk_buff *skb)
2485 err = netif_rx(skb);
2486 if (local_softirq_pending())
2492 EXPORT_SYMBOL(netif_rx_ni);
2494 static void net_tx_action(struct softirq_action *h)
2496 struct softnet_data *sd = &__get_cpu_var(softnet_data);
2498 if (sd->completion_queue) {
2499 struct sk_buff *clist;
2501 local_irq_disable();
2502 clist = sd->completion_queue;
2503 sd->completion_queue = NULL;
2507 struct sk_buff *skb = clist;
2508 clist = clist->next;
2510 WARN_ON(atomic_read(&skb->users));
2515 if (sd->output_queue) {
2518 local_irq_disable();
2519 head = sd->output_queue;
2520 sd->output_queue = NULL;
2524 struct Qdisc *q = head;
2525 spinlock_t *root_lock;
2527 head = head->next_sched;
2529 root_lock = qdisc_lock(q);
2530 if (spin_trylock(root_lock)) {
2531 smp_mb__before_clear_bit();
2532 clear_bit(__QDISC_STATE_SCHED,
2535 spin_unlock(root_lock);
2537 if (!test_bit(__QDISC_STATE_DEACTIVATED,
2539 __netif_reschedule(q);
2541 smp_mb__before_clear_bit();
2542 clear_bit(__QDISC_STATE_SCHED,
2550 static inline int deliver_skb(struct sk_buff *skb,
2551 struct packet_type *pt_prev,
2552 struct net_device *orig_dev)
2554 atomic_inc(&skb->users);
2555 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2558 #if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
2560 #if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
2561 /* This hook is defined here for ATM LANE */
2562 int (*br_fdb_test_addr_hook)(struct net_device *dev,
2563 unsigned char *addr) __read_mostly;
2564 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
2568 * If bridge module is loaded call bridging hook.
2569 * returns NULL if packet was consumed.
2571 struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
2572 struct sk_buff *skb) __read_mostly;
2573 EXPORT_SYMBOL_GPL(br_handle_frame_hook);
2575 static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
2576 struct packet_type **pt_prev, int *ret,
2577 struct net_device *orig_dev)
2579 struct net_bridge_port *port;
2581 if (skb->pkt_type == PACKET_LOOPBACK ||
2582 (port = rcu_dereference(skb->dev->br_port)) == NULL)
2586 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2590 return br_handle_frame_hook(port, skb);
2593 #define handle_bridge(skb, pt_prev, ret, orig_dev) (skb)
2596 #if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
2597 struct sk_buff *(*macvlan_handle_frame_hook)(struct sk_buff *skb) __read_mostly;
2598 EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook);
2600 static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
2601 struct packet_type **pt_prev,
2603 struct net_device *orig_dev)
2605 if (skb->dev->macvlan_port == NULL)
2609 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2612 return macvlan_handle_frame_hook(skb);
2615 #define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb)
2618 #ifdef CONFIG_NET_CLS_ACT
2619 /* TODO: Maybe we should just force sch_ingress to be compiled in
2620 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
2621 * a compare and 2 stores extra right now if we dont have it on
2622 * but have CONFIG_NET_CLS_ACT
2623 * NOTE: This doesnt stop any functionality; if you dont have
2624 * the ingress scheduler, you just cant add policies on ingress.
2627 static int ing_filter(struct sk_buff *skb)
2629 struct net_device *dev = skb->dev;
2630 u32 ttl = G_TC_RTTL(skb->tc_verd);
2631 struct netdev_queue *rxq;
2632 int result = TC_ACT_OK;
2635 if (MAX_RED_LOOP < ttl++) {
2637 "Redir loop detected Dropping packet (%d->%d)\n",
2638 skb->skb_iif, dev->ifindex);
2642 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
2643 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
2645 rxq = &dev->rx_queue;
2648 if (q != &noop_qdisc) {
2649 spin_lock(qdisc_lock(q));
2650 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
2651 result = qdisc_enqueue_root(skb, q);
2652 spin_unlock(qdisc_lock(q));
2658 static inline struct sk_buff *handle_ing(struct sk_buff *skb,
2659 struct packet_type **pt_prev,
2660 int *ret, struct net_device *orig_dev)
2662 if (skb->dev->rx_queue.qdisc == &noop_qdisc)
2666 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2669 /* Huh? Why does turning on AF_PACKET affect this? */
2670 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
2673 switch (ing_filter(skb)) {
2687 * netif_nit_deliver - deliver received packets to network taps
2690 * This function is used to deliver incoming packets to network
2691 * taps. It should be used when the normal netif_receive_skb path
2692 * is bypassed, for example because of VLAN acceleration.
2694 void netif_nit_deliver(struct sk_buff *skb)
2696 struct packet_type *ptype;
2698 if (list_empty(&ptype_all))
2701 skb_reset_network_header(skb);
2702 skb_reset_transport_header(skb);
2703 skb->mac_len = skb->network_header - skb->mac_header;
2706 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2707 if (!ptype->dev || ptype->dev == skb->dev)
2708 deliver_skb(skb, ptype, skb->dev);
2713 static inline void skb_bond_set_mac_by_master(struct sk_buff *skb,
2714 struct net_device *master)
2716 if (skb->pkt_type == PACKET_HOST) {
2717 u16 *dest = (u16 *) eth_hdr(skb)->h_dest;
2719 memcpy(dest, master->dev_addr, ETH_ALEN);
2723 /* On bonding slaves other than the currently active slave, suppress
2724 * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and
2725 * ARP on active-backup slaves with arp_validate enabled.
2727 int __skb_bond_should_drop(struct sk_buff *skb, struct net_device *master)
2729 struct net_device *dev = skb->dev;
2731 if (master->priv_flags & IFF_MASTER_ARPMON)
2732 dev->last_rx = jiffies;
2734 if ((master->priv_flags & IFF_MASTER_ALB) && master->br_port) {
2735 /* Do address unmangle. The local destination address
2736 * will be always the one master has. Provides the right
2737 * functionality in a bridge.
2739 skb_bond_set_mac_by_master(skb, master);
2742 if (dev->priv_flags & IFF_SLAVE_INACTIVE) {
2743 if ((dev->priv_flags & IFF_SLAVE_NEEDARP) &&
2744 skb->protocol == __cpu_to_be16(ETH_P_ARP))
2747 if (master->priv_flags & IFF_MASTER_ALB) {
2748 if (skb->pkt_type != PACKET_BROADCAST &&
2749 skb->pkt_type != PACKET_MULTICAST)
2752 if (master->priv_flags & IFF_MASTER_8023AD &&
2753 skb->protocol == __cpu_to_be16(ETH_P_SLOW))
2760 EXPORT_SYMBOL(__skb_bond_should_drop);
2762 static int __netif_receive_skb(struct sk_buff *skb)
2764 struct packet_type *ptype, *pt_prev;
2765 struct net_device *orig_dev;
2766 struct net_device *master;
2767 struct net_device *null_or_orig;
2768 struct net_device *null_or_bond;
2769 int ret = NET_RX_DROP;
2772 if (!skb->tstamp.tv64)
2775 if (vlan_tx_tag_present(skb) && vlan_hwaccel_do_receive(skb))
2776 return NET_RX_SUCCESS;
2778 /* if we've gotten here through NAPI, check netpoll */
2779 if (netpoll_receive_skb(skb))
2783 skb->skb_iif = skb->dev->ifindex;
2785 null_or_orig = NULL;
2786 orig_dev = skb->dev;
2787 master = ACCESS_ONCE(orig_dev->master);
2789 if (skb_bond_should_drop(skb, master))
2790 null_or_orig = orig_dev; /* deliver only exact match */
2795 __get_cpu_var(netdev_rx_stat).total++;
2797 skb_reset_network_header(skb);
2798 skb_reset_transport_header(skb);
2799 skb->mac_len = skb->network_header - skb->mac_header;
2805 #ifdef CONFIG_NET_CLS_ACT
2806 if (skb->tc_verd & TC_NCLS) {
2807 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
2812 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2813 if (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2814 ptype->dev == orig_dev) {
2816 ret = deliver_skb(skb, pt_prev, orig_dev);
2821 #ifdef CONFIG_NET_CLS_ACT
2822 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
2828 skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
2831 skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
2836 * Make sure frames received on VLAN interfaces stacked on
2837 * bonding interfaces still make their way to any base bonding
2838 * device that may have registered for a specific ptype. The
2839 * handler may have to adjust skb->dev and orig_dev.
2841 null_or_bond = NULL;
2842 if ((skb->dev->priv_flags & IFF_802_1Q_VLAN) &&
2843 (vlan_dev_real_dev(skb->dev)->priv_flags & IFF_BONDING)) {
2844 null_or_bond = vlan_dev_real_dev(skb->dev);
2847 type = skb->protocol;
2848 list_for_each_entry_rcu(ptype,
2849 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
2850 if (ptype->type == type && (ptype->dev == null_or_orig ||
2851 ptype->dev == skb->dev || ptype->dev == orig_dev ||
2852 ptype->dev == null_or_bond)) {
2854 ret = deliver_skb(skb, pt_prev, orig_dev);
2860 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2863 /* Jamal, now you will not able to escape explaining
2864 * me how you were going to use this. :-)
2875 * netif_receive_skb - process receive buffer from network
2876 * @skb: buffer to process
2878 * netif_receive_skb() is the main receive data processing function.
2879 * It always succeeds. The buffer may be dropped during processing
2880 * for congestion control or by the protocol layers.
2882 * This function may only be called from softirq context and interrupts
2883 * should be enabled.
2885 * Return values (usually ignored):
2886 * NET_RX_SUCCESS: no congestion
2887 * NET_RX_DROP: packet was dropped
2889 int netif_receive_skb(struct sk_buff *skb)
2892 struct rps_dev_flow voidflow, *rflow = &voidflow;
2897 cpu = get_rps_cpu(skb->dev, skb, &rflow);
2900 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
2904 ret = __netif_receive_skb(skb);
2909 return __netif_receive_skb(skb);
2912 EXPORT_SYMBOL(netif_receive_skb);
2914 /* Network device is going away, flush any packets still pending */
2915 static void flush_backlog(void *arg)
2917 struct net_device *dev = arg;
2918 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2919 struct sk_buff *skb, *tmp;
2922 skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp)
2923 if (skb->dev == dev) {
2924 __skb_unlink(skb, &queue->input_pkt_queue);
2926 incr_input_queue_head(queue);
2931 static int napi_gro_complete(struct sk_buff *skb)
2933 struct packet_type *ptype;
2934 __be16 type = skb->protocol;
2935 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
2938 if (NAPI_GRO_CB(skb)->count == 1) {
2939 skb_shinfo(skb)->gso_size = 0;
2944 list_for_each_entry_rcu(ptype, head, list) {
2945 if (ptype->type != type || ptype->dev || !ptype->gro_complete)
2948 err = ptype->gro_complete(skb);
2954 WARN_ON(&ptype->list == head);
2956 return NET_RX_SUCCESS;
2960 return netif_receive_skb(skb);
2963 static void napi_gro_flush(struct napi_struct *napi)
2965 struct sk_buff *skb, *next;
2967 for (skb = napi->gro_list; skb; skb = next) {
2970 napi_gro_complete(skb);
2973 napi->gro_count = 0;
2974 napi->gro_list = NULL;
2977 enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2979 struct sk_buff **pp = NULL;
2980 struct packet_type *ptype;
2981 __be16 type = skb->protocol;
2982 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
2985 enum gro_result ret;
2987 if (!(skb->dev->features & NETIF_F_GRO))
2990 if (skb_is_gso(skb) || skb_has_frags(skb))
2994 list_for_each_entry_rcu(ptype, head, list) {
2995 if (ptype->type != type || ptype->dev || !ptype->gro_receive)
2998 skb_set_network_header(skb, skb_gro_offset(skb));
2999 mac_len = skb->network_header - skb->mac_header;
3000 skb->mac_len = mac_len;
3001 NAPI_GRO_CB(skb)->same_flow = 0;
3002 NAPI_GRO_CB(skb)->flush = 0;
3003 NAPI_GRO_CB(skb)->free = 0;
3005 pp = ptype->gro_receive(&napi->gro_list, skb);
3010 if (&ptype->list == head)
3013 same_flow = NAPI_GRO_CB(skb)->same_flow;
3014 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
3017 struct sk_buff *nskb = *pp;
3021 napi_gro_complete(nskb);
3028 if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
3032 NAPI_GRO_CB(skb)->count = 1;
3033 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
3034 skb->next = napi->gro_list;
3035 napi->gro_list = skb;
3039 if (skb_headlen(skb) < skb_gro_offset(skb)) {
3040 int grow = skb_gro_offset(skb) - skb_headlen(skb);
3042 BUG_ON(skb->end - skb->tail < grow);
3044 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
3047 skb->data_len -= grow;
3049 skb_shinfo(skb)->frags[0].page_offset += grow;
3050 skb_shinfo(skb)->frags[0].size -= grow;
3052 if (unlikely(!skb_shinfo(skb)->frags[0].size)) {
3053 put_page(skb_shinfo(skb)->frags[0].page);
3054 memmove(skb_shinfo(skb)->frags,
3055 skb_shinfo(skb)->frags + 1,
3056 --skb_shinfo(skb)->nr_frags);
3067 EXPORT_SYMBOL(dev_gro_receive);
3070 __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3074 if (netpoll_rx_on(skb))
3077 for (p = napi->gro_list; p; p = p->next) {
3078 NAPI_GRO_CB(p)->same_flow =
3079 (p->dev == skb->dev) &&
3080 !compare_ether_header(skb_mac_header(p),
3081 skb_gro_mac_header(skb));
3082 NAPI_GRO_CB(p)->flush = 0;
3085 return dev_gro_receive(napi, skb);
3088 gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
3092 if (netif_receive_skb(skb))
3097 case GRO_MERGED_FREE:
3108 EXPORT_SYMBOL(napi_skb_finish);
3110 void skb_gro_reset_offset(struct sk_buff *skb)
3112 NAPI_GRO_CB(skb)->data_offset = 0;
3113 NAPI_GRO_CB(skb)->frag0 = NULL;
3114 NAPI_GRO_CB(skb)->frag0_len = 0;
3116 if (skb->mac_header == skb->tail &&
3117 !PageHighMem(skb_shinfo(skb)->frags[0].page)) {
3118 NAPI_GRO_CB(skb)->frag0 =
3119 page_address(skb_shinfo(skb)->frags[0].page) +
3120 skb_shinfo(skb)->frags[0].page_offset;
3121 NAPI_GRO_CB(skb)->frag0_len = skb_shinfo(skb)->frags[0].size;
3124 EXPORT_SYMBOL(skb_gro_reset_offset);
3126 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3128 skb_gro_reset_offset(skb);
3130 return napi_skb_finish(__napi_gro_receive(napi, skb), skb);
3132 EXPORT_SYMBOL(napi_gro_receive);
3134 void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
3136 __skb_pull(skb, skb_headlen(skb));
3137 skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
3141 EXPORT_SYMBOL(napi_reuse_skb);
3143 struct sk_buff *napi_get_frags(struct napi_struct *napi)
3145 struct sk_buff *skb = napi->skb;
3148 skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
3154 EXPORT_SYMBOL(napi_get_frags);
3156 gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
3162 skb->protocol = eth_type_trans(skb, skb->dev);
3164 if (ret == GRO_HELD)
3165 skb_gro_pull(skb, -ETH_HLEN);
3166 else if (netif_receive_skb(skb))
3171 case GRO_MERGED_FREE:
3172 napi_reuse_skb(napi, skb);
3181 EXPORT_SYMBOL(napi_frags_finish);
3183 struct sk_buff *napi_frags_skb(struct napi_struct *napi)
3185 struct sk_buff *skb = napi->skb;
3192 skb_reset_mac_header(skb);
3193 skb_gro_reset_offset(skb);
3195 off = skb_gro_offset(skb);
3196 hlen = off + sizeof(*eth);
3197 eth = skb_gro_header_fast(skb, off);
3198 if (skb_gro_header_hard(skb, hlen)) {
3199 eth = skb_gro_header_slow(skb, hlen, off);
3200 if (unlikely(!eth)) {
3201 napi_reuse_skb(napi, skb);
3207 skb_gro_pull(skb, sizeof(*eth));
3210 * This works because the only protocols we care about don't require
3211 * special handling. We'll fix it up properly at the end.
3213 skb->protocol = eth->h_proto;
3218 EXPORT_SYMBOL(napi_frags_skb);
3220 gro_result_t napi_gro_frags(struct napi_struct *napi)
3222 struct sk_buff *skb = napi_frags_skb(napi);
3227 return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb));
3229 EXPORT_SYMBOL(napi_gro_frags);
3231 static int process_backlog(struct napi_struct *napi, int quota)
3234 struct softnet_data *queue = &__get_cpu_var(softnet_data);
3236 napi->weight = weight_p;
3238 struct sk_buff *skb;
3240 local_irq_disable();
3242 skb = __skb_dequeue(&queue->input_pkt_queue);
3244 __napi_complete(napi);
3249 incr_input_queue_head(queue);
3253 __netif_receive_skb(skb);
3254 } while (++work < quota);
3260 * __napi_schedule - schedule for receive
3261 * @n: entry to schedule
3263 * The entry's receive function will be scheduled to run
3265 void __napi_schedule(struct napi_struct *n)
3267 unsigned long flags;
3269 local_irq_save(flags);
3270 list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list);
3271 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3272 local_irq_restore(flags);
3274 EXPORT_SYMBOL(__napi_schedule);
3276 void __napi_complete(struct napi_struct *n)
3278 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
3279 BUG_ON(n->gro_list);
3281 list_del(&n->poll_list);
3282 smp_mb__before_clear_bit();
3283 clear_bit(NAPI_STATE_SCHED, &n->state);
3285 EXPORT_SYMBOL(__napi_complete);
3287 void napi_complete(struct napi_struct *n)
3289 unsigned long flags;
3292 * don't let napi dequeue from the cpu poll list
3293 * just in case its running on a different cpu
3295 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
3299 local_irq_save(flags);
3301 local_irq_restore(flags);
3303 EXPORT_SYMBOL(napi_complete);
3305 void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
3306 int (*poll)(struct napi_struct *, int), int weight)
3308 INIT_LIST_HEAD(&napi->poll_list);
3309 napi->gro_count = 0;
3310 napi->gro_list = NULL;
3313 napi->weight = weight;
3314 list_add(&napi->dev_list, &dev->napi_list);
3316 #ifdef CONFIG_NETPOLL
3317 spin_lock_init(&napi->poll_lock);
3318 napi->poll_owner = -1;
3320 set_bit(NAPI_STATE_SCHED, &napi->state);
3322 EXPORT_SYMBOL(netif_napi_add);
3324 void netif_napi_del(struct napi_struct *napi)
3326 struct sk_buff *skb, *next;
3328 list_del_init(&napi->dev_list);
3329 napi_free_frags(napi);
3331 for (skb = napi->gro_list; skb; skb = next) {
3337 napi->gro_list = NULL;
3338 napi->gro_count = 0;
3340 EXPORT_SYMBOL(netif_napi_del);
3344 * net_rps_action sends any pending IPI's for rps. This is only called from
3345 * softirq and interrupts must be enabled.
3347 static void net_rps_action(cpumask_t *mask)
3351 /* Send pending IPI's to kick RPS processing on remote cpus. */
3352 for_each_cpu_mask_nr(cpu, *mask) {
3353 struct softnet_data *queue = &per_cpu(softnet_data, cpu);
3354 if (cpu_online(cpu))
3355 __smp_call_function_single(cpu, &queue->csd, 0);
3361 static void net_rx_action(struct softirq_action *h)
3363 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
3364 unsigned long time_limit = jiffies + 2;
3365 int budget = netdev_budget;
3369 struct rps_remote_softirq_cpus *rcpus;
3372 local_irq_disable();
3374 while (!list_empty(list)) {
3375 struct napi_struct *n;
3378 /* If softirq window is exhuasted then punt.
3379 * Allow this to run for 2 jiffies since which will allow
3380 * an average latency of 1.5/HZ.
3382 if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
3387 /* Even though interrupts have been re-enabled, this
3388 * access is safe because interrupts can only add new
3389 * entries to the tail of this list, and only ->poll()
3390 * calls can remove this head entry from the list.
3392 n = list_first_entry(list, struct napi_struct, poll_list);
3394 have = netpoll_poll_lock(n);
3398 /* This NAPI_STATE_SCHED test is for avoiding a race
3399 * with netpoll's poll_napi(). Only the entity which
3400 * obtains the lock and sees NAPI_STATE_SCHED set will
3401 * actually make the ->poll() call. Therefore we avoid
3402 * accidently calling ->poll() when NAPI is not scheduled.
3405 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
3406 work = n->poll(n, weight);
3410 WARN_ON_ONCE(work > weight);
3414 local_irq_disable();
3416 /* Drivers must not modify the NAPI state if they
3417 * consume the entire weight. In such cases this code
3418 * still "owns" the NAPI instance and therefore can
3419 * move the instance around on the list at-will.
3421 if (unlikely(work == weight)) {
3422 if (unlikely(napi_disable_pending(n))) {
3425 local_irq_disable();
3427 list_move_tail(&n->poll_list, list);
3430 netpoll_poll_unlock(have);
3434 rcpus = &__get_cpu_var(rps_remote_softirq_cpus);
3435 select = rcpus->select;
3440 net_rps_action(&rcpus->mask[select]);
3445 #ifdef CONFIG_NET_DMA
3447 * There may not be any more sk_buffs coming right now, so push
3448 * any pending DMA copies to hardware
3450 dma_issue_pending_all();
3456 __get_cpu_var(netdev_rx_stat).time_squeeze++;
3457 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3461 static gifconf_func_t *gifconf_list[NPROTO];
3464 * register_gifconf - register a SIOCGIF handler
3465 * @family: Address family
3466 * @gifconf: Function handler
3468 * Register protocol dependent address dumping routines. The handler
3469 * that is passed must not be freed or reused until it has been replaced
3470 * by another handler.
3472 int register_gifconf(unsigned int family, gifconf_func_t *gifconf)
3474 if (family >= NPROTO)
3476 gifconf_list[family] = gifconf;
3479 EXPORT_SYMBOL(register_gifconf);
3483 * Map an interface index to its name (SIOCGIFNAME)
3487 * We need this ioctl for efficient implementation of the
3488 * if_indextoname() function required by the IPv6 API. Without
3489 * it, we would have to search all the interfaces to find a
3493 static int dev_ifname(struct net *net, struct ifreq __user *arg)
3495 struct net_device *dev;
3499 * Fetch the caller's info block.
3502 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
3506 dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex);
3512 strcpy(ifr.ifr_name, dev->name);
3515 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
3521 * Perform a SIOCGIFCONF call. This structure will change
3522 * size eventually, and there is nothing I can do about it.
3523 * Thus we will need a 'compatibility mode'.
3526 static int dev_ifconf(struct net *net, char __user *arg)
3529 struct net_device *dev;
3536 * Fetch the caller's info block.
3539 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
3546 * Loop over the interfaces, and write an info block for each.
3550 for_each_netdev(net, dev) {
3551 for (i = 0; i < NPROTO; i++) {
3552 if (gifconf_list[i]) {
3555 done = gifconf_list[i](dev, NULL, 0);
3557 done = gifconf_list[i](dev, pos + total,
3567 * All done. Write the updated control block back to the caller.
3569 ifc.ifc_len = total;
3572 * Both BSD and Solaris return 0 here, so we do too.
3574 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
3577 #ifdef CONFIG_PROC_FS
3579 * This is invoked by the /proc filesystem handler to display a device
3582 void *dev_seq_start(struct seq_file *seq, loff_t *pos)
3585 struct net *net = seq_file_net(seq);
3587 struct net_device *dev;
3591 return SEQ_START_TOKEN;
3594 for_each_netdev_rcu(net, dev)
3601 void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3603 struct net_device *dev = (v == SEQ_START_TOKEN) ?
3604 first_net_device(seq_file_net(seq)) :
3605 next_net_device((struct net_device *)v);
3608 return rcu_dereference(dev);
3611 void dev_seq_stop(struct seq_file *seq, void *v)
3617 static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
3619 const struct net_device_stats *stats = dev_get_stats(dev);
3621 seq_printf(seq, "%6s: %7lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
3622 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
3623 dev->name, stats->rx_bytes, stats->rx_packets,
3625 stats->rx_dropped + stats->rx_missed_errors,
3626 stats->rx_fifo_errors,
3627 stats->rx_length_errors + stats->rx_over_errors +
3628 stats->rx_crc_errors + stats->rx_frame_errors,
3629 stats->rx_compressed, stats->multicast,
3630 stats->tx_bytes, stats->tx_packets,
3631 stats->tx_errors, stats->tx_dropped,
3632 stats->tx_fifo_errors, stats->collisions,
3633 stats->tx_carrier_errors +
3634 stats->tx_aborted_errors +
3635 stats->tx_window_errors +
3636 stats->tx_heartbeat_errors,
3637 stats->tx_compressed);
3641 * Called from the PROCfs module. This now uses the new arbitrary sized
3642 * /proc/net interface to create /proc/net/dev
3644 static int dev_seq_show(struct seq_file *seq, void *v)
3646 if (v == SEQ_START_TOKEN)
3647 seq_puts(seq, "Inter-| Receive "
3649 " face |bytes packets errs drop fifo frame "
3650 "compressed multicast|bytes packets errs "
3651 "drop fifo colls carrier compressed\n");
3653 dev_seq_printf_stats(seq, v);
3657 static struct netif_rx_stats *softnet_get_online(loff_t *pos)
3659 struct netif_rx_stats *rc = NULL;
3661 while (*pos < nr_cpu_ids)
3662 if (cpu_online(*pos)) {
3663 rc = &per_cpu(netdev_rx_stat, *pos);
3670 static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
3672 return softnet_get_online(pos);
3675 static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3678 return softnet_get_online(pos);
3681 static void softnet_seq_stop(struct seq_file *seq, void *v)
3685 static int softnet_seq_show(struct seq_file *seq, void *v)
3687 struct netif_rx_stats *s = v;
3689 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
3690 s->total, s->dropped, s->time_squeeze, 0,
3691 0, 0, 0, 0, /* was fastroute */
3692 s->cpu_collision, s->received_rps);
3696 static const struct seq_operations dev_seq_ops = {
3697 .start = dev_seq_start,
3698 .next = dev_seq_next,
3699 .stop = dev_seq_stop,
3700 .show = dev_seq_show,
3703 static int dev_seq_open(struct inode *inode, struct file *file)
3705 return seq_open_net(inode, file, &dev_seq_ops,
3706 sizeof(struct seq_net_private));
3709 static const struct file_operations dev_seq_fops = {
3710 .owner = THIS_MODULE,
3711 .open = dev_seq_open,
3713 .llseek = seq_lseek,
3714 .release = seq_release_net,
3717 static const struct seq_operations softnet_seq_ops = {
3718 .start = softnet_seq_start,
3719 .next = softnet_seq_next,
3720 .stop = softnet_seq_stop,
3721 .show = softnet_seq_show,
3724 static int softnet_seq_open(struct inode *inode, struct file *file)
3726 return seq_open(file, &softnet_seq_ops);
3729 static const struct file_operations softnet_seq_fops = {
3730 .owner = THIS_MODULE,
3731 .open = softnet_seq_open,
3733 .llseek = seq_lseek,
3734 .release = seq_release,
3737 static void *ptype_get_idx(loff_t pos)
3739 struct packet_type *pt = NULL;
3743 list_for_each_entry_rcu(pt, &ptype_all, list) {
3749 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
3750 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
3759 static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
3763 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
3766 static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3768 struct packet_type *pt;
3769 struct list_head *nxt;
3773 if (v == SEQ_START_TOKEN)
3774 return ptype_get_idx(0);
3777 nxt = pt->list.next;
3778 if (pt->type == htons(ETH_P_ALL)) {
3779 if (nxt != &ptype_all)
3782 nxt = ptype_base[0].next;
3784 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
3786 while (nxt == &ptype_base[hash]) {
3787 if (++hash >= PTYPE_HASH_SIZE)
3789 nxt = ptype_base[hash].next;
3792 return list_entry(nxt, struct packet_type, list);
3795 static void ptype_seq_stop(struct seq_file *seq, void *v)
3801 static int ptype_seq_show(struct seq_file *seq, void *v)
3803 struct packet_type *pt = v;
3805 if (v == SEQ_START_TOKEN)
3806 seq_puts(seq, "Type Device Function\n");
3807 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
3808 if (pt->type == htons(ETH_P_ALL))
3809 seq_puts(seq, "ALL ");
3811 seq_printf(seq, "%04x", ntohs(pt->type));
3813 seq_printf(seq, " %-8s %pF\n",
3814 pt->dev ? pt->dev->name : "", pt->func);
3820 static const struct seq_operations ptype_seq_ops = {
3821 .start = ptype_seq_start,
3822 .next = ptype_seq_next,
3823 .stop = ptype_seq_stop,
3824 .show = ptype_seq_show,
3827 static int ptype_seq_open(struct inode *inode, struct file *file)
3829 return seq_open_net(inode, file, &ptype_seq_ops,
3830 sizeof(struct seq_net_private));
3833 static const struct file_operations ptype_seq_fops = {
3834 .owner = THIS_MODULE,
3835 .open = ptype_seq_open,
3837 .llseek = seq_lseek,
3838 .release = seq_release_net,
3842 static int __net_init dev_proc_net_init(struct net *net)
3846 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
3848 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
3850 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
3853 if (wext_proc_init(net))
3859 proc_net_remove(net, "ptype");
3861 proc_net_remove(net, "softnet_stat");
3863 proc_net_remove(net, "dev");
3867 static void __net_exit dev_proc_net_exit(struct net *net)
3869 wext_proc_exit(net);
3871 proc_net_remove(net, "ptype");
3872 proc_net_remove(net, "softnet_stat");
3873 proc_net_remove(net, "dev");
3876 static struct pernet_operations __net_initdata dev_proc_ops = {
3877 .init = dev_proc_net_init,
3878 .exit = dev_proc_net_exit,
3881 static int __init dev_proc_init(void)
3883 return register_pernet_subsys(&dev_proc_ops);
3886 #define dev_proc_init() 0
3887 #endif /* CONFIG_PROC_FS */
3891 * netdev_set_master - set up master/slave pair
3892 * @slave: slave device
3893 * @master: new master device
3895 * Changes the master device of the slave. Pass %NULL to break the
3896 * bonding. The caller must hold the RTNL semaphore. On a failure
3897 * a negative errno code is returned. On success the reference counts
3898 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
3899 * function returns zero.
3901 int netdev_set_master(struct net_device *slave, struct net_device *master)
3903 struct net_device *old = slave->master;
3913 slave->master = master;
3920 slave->flags |= IFF_SLAVE;
3922 slave->flags &= ~IFF_SLAVE;
3924 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
3927 EXPORT_SYMBOL(netdev_set_master);
3929 static void dev_change_rx_flags(struct net_device *dev, int flags)
3931 const struct net_device_ops *ops = dev->netdev_ops;
3933 if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
3934 ops->ndo_change_rx_flags(dev, flags);
3937 static int __dev_set_promiscuity(struct net_device *dev, int inc)
3939 unsigned short old_flags = dev->flags;
3945 dev->flags |= IFF_PROMISC;
3946 dev->promiscuity += inc;
3947 if (dev->promiscuity == 0) {
3950 * If inc causes overflow, untouch promisc and return error.
3953 dev->flags &= ~IFF_PROMISC;
3955 dev->promiscuity -= inc;
3956 printk(KERN_WARNING "%s: promiscuity touches roof, "
3957 "set promiscuity failed, promiscuity feature "
3958 "of device might be broken.\n", dev->name);
3962 if (dev->flags != old_flags) {
3963 printk(KERN_INFO "device %s %s promiscuous mode\n",
3964 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
3966 if (audit_enabled) {
3967 current_uid_gid(&uid, &gid);
3968 audit_log(current->audit_context, GFP_ATOMIC,
3969 AUDIT_ANOM_PROMISCUOUS,
3970 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
3971 dev->name, (dev->flags & IFF_PROMISC),
3972 (old_flags & IFF_PROMISC),
3973 audit_get_loginuid(current),
3975 audit_get_sessionid(current));
3978 dev_change_rx_flags(dev, IFF_PROMISC);
3984 * dev_set_promiscuity - update promiscuity count on a device
3988 * Add or remove promiscuity from a device. While the count in the device
3989 * remains above zero the interface remains promiscuous. Once it hits zero
3990 * the device reverts back to normal filtering operation. A negative inc
3991 * value is used to drop promiscuity on the device.
3992 * Return 0 if successful or a negative errno code on error.
3994 int dev_set_promiscuity(struct net_device *dev, int inc)
3996 unsigned short old_flags = dev->flags;
3999 err = __dev_set_promiscuity(dev, inc);
4002 if (dev->flags != old_flags)
4003 dev_set_rx_mode(dev);
4006 EXPORT_SYMBOL(dev_set_promiscuity);
4009 * dev_set_allmulti - update allmulti count on a device
4013 * Add or remove reception of all multicast frames to a device. While the
4014 * count in the device remains above zero the interface remains listening
4015 * to all interfaces. Once it hits zero the device reverts back to normal
4016 * filtering operation. A negative @inc value is used to drop the counter
4017 * when releasing a resource needing all multicasts.
4018 * Return 0 if successful or a negative errno code on error.
4021 int dev_set_allmulti(struct net_device *dev, int inc)
4023 unsigned short old_flags = dev->flags;
4027 dev->flags |= IFF_ALLMULTI;
4028 dev->allmulti += inc;
4029 if (dev->allmulti == 0) {
4032 * If inc causes overflow, untouch allmulti and return error.
4035 dev->flags &= ~IFF_ALLMULTI;
4037 dev->allmulti -= inc;
4038 printk(KERN_WARNING "%s: allmulti touches roof, "
4039 "set allmulti failed, allmulti feature of "
4040 "device might be broken.\n", dev->name);
4044 if (dev->flags ^ old_flags) {
4045 dev_change_rx_flags(dev, IFF_ALLMULTI);
4046 dev_set_rx_mode(dev);
4050 EXPORT_SYMBOL(dev_set_allmulti);
4053 * Upload unicast and multicast address lists to device and
4054 * configure RX filtering. When the device doesn't support unicast
4055 * filtering it is put in promiscuous mode while unicast addresses
4058 void __dev_set_rx_mode(struct net_device *dev)
4060 const struct net_device_ops *ops = dev->netdev_ops;
4062 /* dev_open will call this function so the list will stay sane. */
4063 if (!(dev->flags&IFF_UP))
4066 if (!netif_device_present(dev))
4069 if (ops->ndo_set_rx_mode)
4070 ops->ndo_set_rx_mode(dev);
4072 /* Unicast addresses changes may only happen under the rtnl,
4073 * therefore calling __dev_set_promiscuity here is safe.
4075 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
4076 __dev_set_promiscuity(dev, 1);
4077 dev->uc_promisc = 1;
4078 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
4079 __dev_set_promiscuity(dev, -1);
4080 dev->uc_promisc = 0;
4083 if (ops->ndo_set_multicast_list)
4084 ops->ndo_set_multicast_list(dev);
4088 void dev_set_rx_mode(struct net_device *dev)
4090 netif_addr_lock_bh(dev);
4091 __dev_set_rx_mode(dev);
4092 netif_addr_unlock_bh(dev);
4096 * dev_get_flags - get flags reported to userspace
4099 * Get the combination of flag bits exported through APIs to userspace.
4101 unsigned dev_get_flags(const struct net_device *dev)
4105 flags = (dev->flags & ~(IFF_PROMISC |
4110 (dev->gflags & (IFF_PROMISC |
4113 if (netif_running(dev)) {
4114 if (netif_oper_up(dev))
4115 flags |= IFF_RUNNING;
4116 if (netif_carrier_ok(dev))
4117 flags |= IFF_LOWER_UP;
4118 if (netif_dormant(dev))
4119 flags |= IFF_DORMANT;
4124 EXPORT_SYMBOL(dev_get_flags);
4126 int __dev_change_flags(struct net_device *dev, unsigned int flags)
4128 int old_flags = dev->flags;
4134 * Set the flags on our device.
4137 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
4138 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
4140 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
4144 * Load in the correct multicast list now the flags have changed.
4147 if ((old_flags ^ flags) & IFF_MULTICAST)
4148 dev_change_rx_flags(dev, IFF_MULTICAST);
4150 dev_set_rx_mode(dev);
4153 * Have we downed the interface. We handle IFF_UP ourselves
4154 * according to user attempts to set it, rather than blindly
4159 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
4160 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
4163 dev_set_rx_mode(dev);
4166 if ((flags ^ dev->gflags) & IFF_PROMISC) {
4167 int inc = (flags & IFF_PROMISC) ? 1 : -1;
4169 dev->gflags ^= IFF_PROMISC;
4170 dev_set_promiscuity(dev, inc);
4173 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
4174 is important. Some (broken) drivers set IFF_PROMISC, when
4175 IFF_ALLMULTI is requested not asking us and not reporting.
4177 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
4178 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
4180 dev->gflags ^= IFF_ALLMULTI;
4181 dev_set_allmulti(dev, inc);
4187 void __dev_notify_flags(struct net_device *dev, unsigned int old_flags)
4189 unsigned int changes = dev->flags ^ old_flags;
4191 if (changes & IFF_UP) {
4192 if (dev->flags & IFF_UP)
4193 call_netdevice_notifiers(NETDEV_UP, dev);
4195 call_netdevice_notifiers(NETDEV_DOWN, dev);
4198 if (dev->flags & IFF_UP &&
4199 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE)))
4200 call_netdevice_notifiers(NETDEV_CHANGE, dev);
4204 * dev_change_flags - change device settings
4206 * @flags: device state flags
4208 * Change settings on device based state flags. The flags are
4209 * in the userspace exported format.
4211 int dev_change_flags(struct net_device *dev, unsigned flags)
4214 int old_flags = dev->flags;
4216 ret = __dev_change_flags(dev, flags);
4220 changes = old_flags ^ dev->flags;
4222 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
4224 __dev_notify_flags(dev, old_flags);
4227 EXPORT_SYMBOL(dev_change_flags);
4230 * dev_set_mtu - Change maximum transfer unit
4232 * @new_mtu: new transfer unit
4234 * Change the maximum transfer size of the network device.
4236 int dev_set_mtu(struct net_device *dev, int new_mtu)
4238 const struct net_device_ops *ops = dev->netdev_ops;
4241 if (new_mtu == dev->mtu)
4244 /* MTU must be positive. */
4248 if (!netif_device_present(dev))
4252 if (ops->ndo_change_mtu)
4253 err = ops->ndo_change_mtu(dev, new_mtu);
4257 if (!err && dev->flags & IFF_UP)
4258 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
4261 EXPORT_SYMBOL(dev_set_mtu);
4264 * dev_set_mac_address - Change Media Access Control Address
4268 * Change the hardware (MAC) address of the device
4270 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
4272 const struct net_device_ops *ops = dev->netdev_ops;
4275 if (!ops->ndo_set_mac_address)
4277 if (sa->sa_family != dev->type)
4279 if (!netif_device_present(dev))
4281 err = ops->ndo_set_mac_address(dev, sa);
4283 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4286 EXPORT_SYMBOL(dev_set_mac_address);
4289 * Perform the SIOCxIFxxx calls, inside rcu_read_lock()
4291 static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
4294 struct net_device *dev = dev_get_by_name_rcu(net, ifr->ifr_name);
4300 case SIOCGIFFLAGS: /* Get interface flags */
4301 ifr->ifr_flags = (short) dev_get_flags(dev);
4304 case SIOCGIFMETRIC: /* Get the metric on the interface
4305 (currently unused) */
4306 ifr->ifr_metric = 0;
4309 case SIOCGIFMTU: /* Get the MTU of a device */
4310 ifr->ifr_mtu = dev->mtu;
4315 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
4317 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
4318 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4319 ifr->ifr_hwaddr.sa_family = dev->type;
4327 ifr->ifr_map.mem_start = dev->mem_start;
4328 ifr->ifr_map.mem_end = dev->mem_end;
4329 ifr->ifr_map.base_addr = dev->base_addr;
4330 ifr->ifr_map.irq = dev->irq;
4331 ifr->ifr_map.dma = dev->dma;
4332 ifr->ifr_map.port = dev->if_port;
4336 ifr->ifr_ifindex = dev->ifindex;
4340 ifr->ifr_qlen = dev->tx_queue_len;
4344 /* dev_ioctl() should ensure this case
4356 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
4358 static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
4361 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
4362 const struct net_device_ops *ops;
4367 ops = dev->netdev_ops;
4370 case SIOCSIFFLAGS: /* Set interface flags */
4371 return dev_change_flags(dev, ifr->ifr_flags);
4373 case SIOCSIFMETRIC: /* Set the metric on the interface
4374 (currently unused) */
4377 case SIOCSIFMTU: /* Set the MTU of a device */
4378 return dev_set_mtu(dev, ifr->ifr_mtu);
4381 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
4383 case SIOCSIFHWBROADCAST:
4384 if (ifr->ifr_hwaddr.sa_family != dev->type)
4386 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
4387 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4388 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4392 if (ops->ndo_set_config) {
4393 if (!netif_device_present(dev))
4395 return ops->ndo_set_config(dev, &ifr->ifr_map);
4400 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
4401 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4403 if (!netif_device_present(dev))
4405 return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data);
4408 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
4409 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4411 if (!netif_device_present(dev))
4413 return dev_mc_del_global(dev, ifr->ifr_hwaddr.sa_data);
4416 if (ifr->ifr_qlen < 0)
4418 dev->tx_queue_len = ifr->ifr_qlen;
4422 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
4423 return dev_change_name(dev, ifr->ifr_newname);
4426 * Unknown or private ioctl
4429 if ((cmd >= SIOCDEVPRIVATE &&
4430 cmd <= SIOCDEVPRIVATE + 15) ||
4431 cmd == SIOCBONDENSLAVE ||
4432 cmd == SIOCBONDRELEASE ||
4433 cmd == SIOCBONDSETHWADDR ||
4434 cmd == SIOCBONDSLAVEINFOQUERY ||
4435 cmd == SIOCBONDINFOQUERY ||
4436 cmd == SIOCBONDCHANGEACTIVE ||
4437 cmd == SIOCGMIIPHY ||
4438 cmd == SIOCGMIIREG ||
4439 cmd == SIOCSMIIREG ||
4440 cmd == SIOCBRADDIF ||
4441 cmd == SIOCBRDELIF ||
4442 cmd == SIOCSHWTSTAMP ||
4443 cmd == SIOCWANDEV) {
4445 if (ops->ndo_do_ioctl) {
4446 if (netif_device_present(dev))
4447 err = ops->ndo_do_ioctl(dev, ifr, cmd);
4459 * This function handles all "interface"-type I/O control requests. The actual
4460 * 'doing' part of this is dev_ifsioc above.
4464 * dev_ioctl - network device ioctl
4465 * @net: the applicable net namespace
4466 * @cmd: command to issue
4467 * @arg: pointer to a struct ifreq in user space
4469 * Issue ioctl functions to devices. This is normally called by the
4470 * user space syscall interfaces but can sometimes be useful for
4471 * other purposes. The return value is the return from the syscall if
4472 * positive or a negative errno code on error.
4475 int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
4481 /* One special case: SIOCGIFCONF takes ifconf argument
4482 and requires shared lock, because it sleeps writing
4486 if (cmd == SIOCGIFCONF) {
4488 ret = dev_ifconf(net, (char __user *) arg);
4492 if (cmd == SIOCGIFNAME)
4493 return dev_ifname(net, (struct ifreq __user *)arg);
4495 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
4498 ifr.ifr_name[IFNAMSIZ-1] = 0;
4500 colon = strchr(ifr.ifr_name, ':');
4505 * See which interface the caller is talking about.
4510 * These ioctl calls:
4511 * - can be done by all.
4512 * - atomic and do not require locking.
4523 dev_load(net, ifr.ifr_name);
4525 ret = dev_ifsioc_locked(net, &ifr, cmd);
4530 if (copy_to_user(arg, &ifr,
4531 sizeof(struct ifreq)))
4537 dev_load(net, ifr.ifr_name);
4539 ret = dev_ethtool(net, &ifr);
4544 if (copy_to_user(arg, &ifr,
4545 sizeof(struct ifreq)))
4551 * These ioctl calls:
4552 * - require superuser power.
4553 * - require strict serialization.
4559 if (!capable(CAP_NET_ADMIN))
4561 dev_load(net, ifr.ifr_name);
4563 ret = dev_ifsioc(net, &ifr, cmd);
4568 if (copy_to_user(arg, &ifr,
4569 sizeof(struct ifreq)))
4575 * These ioctl calls:
4576 * - require superuser power.
4577 * - require strict serialization.
4578 * - do not return a value
4588 case SIOCSIFHWBROADCAST:
4591 case SIOCBONDENSLAVE:
4592 case SIOCBONDRELEASE:
4593 case SIOCBONDSETHWADDR:
4594 case SIOCBONDCHANGEACTIVE:
4598 if (!capable(CAP_NET_ADMIN))
4601 case SIOCBONDSLAVEINFOQUERY:
4602 case SIOCBONDINFOQUERY:
4603 dev_load(net, ifr.ifr_name);
4605 ret = dev_ifsioc(net, &ifr, cmd);
4610 /* Get the per device memory space. We can add this but
4611 * currently do not support it */
4613 /* Set the per device memory buffer space.
4614 * Not applicable in our case */
4619 * Unknown or private ioctl.
4622 if (cmd == SIOCWANDEV ||
4623 (cmd >= SIOCDEVPRIVATE &&
4624 cmd <= SIOCDEVPRIVATE + 15)) {
4625 dev_load(net, ifr.ifr_name);
4627 ret = dev_ifsioc(net, &ifr, cmd);
4629 if (!ret && copy_to_user(arg, &ifr,
4630 sizeof(struct ifreq)))
4634 /* Take care of Wireless Extensions */
4635 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
4636 return wext_handle_ioctl(net, &ifr, cmd, arg);
4643 * dev_new_index - allocate an ifindex
4644 * @net: the applicable net namespace
4646 * Returns a suitable unique value for a new device interface
4647 * number. The caller must hold the rtnl semaphore or the
4648 * dev_base_lock to be sure it remains unique.
4650 static int dev_new_index(struct net *net)
4656 if (!__dev_get_by_index(net, ifindex))
4661 /* Delayed registration/unregisteration */
4662 static LIST_HEAD(net_todo_list);
4664 static void net_set_todo(struct net_device *dev)
4666 list_add_tail(&dev->todo_list, &net_todo_list);
4669 static void rollback_registered_many(struct list_head *head)
4671 struct net_device *dev, *tmp;
4673 BUG_ON(dev_boot_phase);
4676 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
4677 /* Some devices call without registering
4678 * for initialization unwind. Remove those
4679 * devices and proceed with the remaining.
4681 if (dev->reg_state == NETREG_UNINITIALIZED) {
4682 pr_debug("unregister_netdevice: device %s/%p never "
4683 "was registered\n", dev->name, dev);
4686 list_del(&dev->unreg_list);
4690 BUG_ON(dev->reg_state != NETREG_REGISTERED);
4692 /* If device is running, close it first. */
4695 /* And unlink it from device chain. */
4696 unlist_netdevice(dev);
4698 dev->reg_state = NETREG_UNREGISTERING;
4703 list_for_each_entry(dev, head, unreg_list) {
4704 /* Shutdown queueing discipline. */
4708 /* Notify protocols, that we are about to destroy
4709 this device. They should clean all the things.
4711 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4713 if (!dev->rtnl_link_ops ||
4714 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
4715 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
4718 * Flush the unicast and multicast chains
4723 if (dev->netdev_ops->ndo_uninit)
4724 dev->netdev_ops->ndo_uninit(dev);
4726 /* Notifier chain MUST detach us from master device. */
4727 WARN_ON(dev->master);
4729 /* Remove entries from kobject tree */
4730 netdev_unregister_kobject(dev);
4733 /* Process any work delayed until the end of the batch */
4734 dev = list_first_entry(head, struct net_device, unreg_list);
4735 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
4739 list_for_each_entry(dev, head, unreg_list)
4743 static void rollback_registered(struct net_device *dev)
4747 list_add(&dev->unreg_list, &single);
4748 rollback_registered_many(&single);
4751 static void __netdev_init_queue_locks_one(struct net_device *dev,
4752 struct netdev_queue *dev_queue,
4755 spin_lock_init(&dev_queue->_xmit_lock);
4756 netdev_set_xmit_lockdep_class(&dev_queue->_xmit_lock, dev->type);
4757 dev_queue->xmit_lock_owner = -1;
4760 static void netdev_init_queue_locks(struct net_device *dev)
4762 netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL);
4763 __netdev_init_queue_locks_one(dev, &dev->rx_queue, NULL);
4766 unsigned long netdev_fix_features(unsigned long features, const char *name)
4768 /* Fix illegal SG+CSUM combinations. */
4769 if ((features & NETIF_F_SG) &&
4770 !(features & NETIF_F_ALL_CSUM)) {
4772 printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no "
4773 "checksum feature.\n", name);
4774 features &= ~NETIF_F_SG;
4777 /* TSO requires that SG is present as well. */
4778 if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) {
4780 printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no "
4781 "SG feature.\n", name);
4782 features &= ~NETIF_F_TSO;
4785 if (features & NETIF_F_UFO) {
4786 if (!(features & NETIF_F_GEN_CSUM)) {
4788 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4789 "since no NETIF_F_HW_CSUM feature.\n",
4791 features &= ~NETIF_F_UFO;
4794 if (!(features & NETIF_F_SG)) {
4796 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4797 "since no NETIF_F_SG feature.\n", name);
4798 features &= ~NETIF_F_UFO;
4804 EXPORT_SYMBOL(netdev_fix_features);
4807 * netif_stacked_transfer_operstate - transfer operstate
4808 * @rootdev: the root or lower level device to transfer state from
4809 * @dev: the device to transfer operstate to
4811 * Transfer operational state from root to device. This is normally
4812 * called when a stacking relationship exists between the root
4813 * device and the device(a leaf device).
4815 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
4816 struct net_device *dev)
4818 if (rootdev->operstate == IF_OPER_DORMANT)
4819 netif_dormant_on(dev);
4821 netif_dormant_off(dev);
4823 if (netif_carrier_ok(rootdev)) {
4824 if (!netif_carrier_ok(dev))
4825 netif_carrier_on(dev);
4827 if (netif_carrier_ok(dev))
4828 netif_carrier_off(dev);
4831 EXPORT_SYMBOL(netif_stacked_transfer_operstate);
4834 * register_netdevice - register a network device
4835 * @dev: device to register
4837 * Take a completed network device structure and add it to the kernel
4838 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
4839 * chain. 0 is returned on success. A negative errno code is returned
4840 * on a failure to set up the device, or if the name is a duplicate.
4842 * Callers must hold the rtnl semaphore. You may want
4843 * register_netdev() instead of this.
4846 * The locking appears insufficient to guarantee two parallel registers
4847 * will not get the same name.
4850 int register_netdevice(struct net_device *dev)
4853 struct net *net = dev_net(dev);
4855 BUG_ON(dev_boot_phase);
4860 /* When net_device's are persistent, this will be fatal. */
4861 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
4864 spin_lock_init(&dev->addr_list_lock);
4865 netdev_set_addr_lockdep_class(dev);
4866 netdev_init_queue_locks(dev);
4871 if (!dev->num_rx_queues) {
4873 * Allocate a single RX queue if driver never called
4877 dev->_rx = kzalloc(sizeof(struct netdev_rx_queue), GFP_KERNEL);
4883 dev->_rx->first = dev->_rx;
4884 atomic_set(&dev->_rx->count, 1);
4885 dev->num_rx_queues = 1;
4888 /* Init, if this function is available */
4889 if (dev->netdev_ops->ndo_init) {
4890 ret = dev->netdev_ops->ndo_init(dev);
4898 ret = dev_get_valid_name(net, dev->name, dev->name, 0);
4902 dev->ifindex = dev_new_index(net);
4903 if (dev->iflink == -1)
4904 dev->iflink = dev->ifindex;
4906 /* Fix illegal checksum combinations */
4907 if ((dev->features & NETIF_F_HW_CSUM) &&
4908 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
4909 printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
4911 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
4914 if ((dev->features & NETIF_F_NO_CSUM) &&
4915 (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
4916 printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
4918 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
4921 dev->features = netdev_fix_features(dev->features, dev->name);
4923 /* Enable software GSO if SG is supported. */
4924 if (dev->features & NETIF_F_SG)
4925 dev->features |= NETIF_F_GSO;
4927 netdev_initialize_kobject(dev);
4929 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
4930 ret = notifier_to_errno(ret);
4934 ret = netdev_register_kobject(dev);
4937 dev->reg_state = NETREG_REGISTERED;
4940 * Default initial state at registry is that the
4941 * device is present.
4944 set_bit(__LINK_STATE_PRESENT, &dev->state);
4946 dev_init_scheduler(dev);
4948 list_netdevice(dev);
4950 /* Notify protocols, that a new device appeared. */
4951 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
4952 ret = notifier_to_errno(ret);
4954 rollback_registered(dev);
4955 dev->reg_state = NETREG_UNREGISTERED;
4958 * Prevent userspace races by waiting until the network
4959 * device is fully setup before sending notifications.
4961 if (!dev->rtnl_link_ops ||
4962 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
4963 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
4969 if (dev->netdev_ops->ndo_uninit)
4970 dev->netdev_ops->ndo_uninit(dev);
4973 EXPORT_SYMBOL(register_netdevice);
4976 * init_dummy_netdev - init a dummy network device for NAPI
4977 * @dev: device to init
4979 * This takes a network device structure and initialize the minimum
4980 * amount of fields so it can be used to schedule NAPI polls without
4981 * registering a full blown interface. This is to be used by drivers
4982 * that need to tie several hardware interfaces to a single NAPI
4983 * poll scheduler due to HW limitations.
4985 int init_dummy_netdev(struct net_device *dev)
4987 /* Clear everything. Note we don't initialize spinlocks
4988 * are they aren't supposed to be taken by any of the
4989 * NAPI code and this dummy netdev is supposed to be
4990 * only ever used for NAPI polls
4992 memset(dev, 0, sizeof(struct net_device));
4994 /* make sure we BUG if trying to hit standard
4995 * register/unregister code path
4997 dev->reg_state = NETREG_DUMMY;
4999 /* initialize the ref count */
5000 atomic_set(&dev->refcnt, 1);
5002 /* NAPI wants this */
5003 INIT_LIST_HEAD(&dev->napi_list);
5005 /* a dummy interface is started by default */
5006 set_bit(__LINK_STATE_PRESENT, &dev->state);
5007 set_bit(__LINK_STATE_START, &dev->state);
5011 EXPORT_SYMBOL_GPL(init_dummy_netdev);
5015 * register_netdev - register a network device
5016 * @dev: device to register
5018 * Take a completed network device structure and add it to the kernel
5019 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5020 * chain. 0 is returned on success. A negative errno code is returned
5021 * on a failure to set up the device, or if the name is a duplicate.
5023 * This is a wrapper around register_netdevice that takes the rtnl semaphore
5024 * and expands the device name if you passed a format string to
5027 int register_netdev(struct net_device *dev)
5034 * If the name is a format string the caller wants us to do a
5037 if (strchr(dev->name, '%')) {
5038 err = dev_alloc_name(dev, dev->name);
5043 err = register_netdevice(dev);
5048 EXPORT_SYMBOL(register_netdev);
5051 * netdev_wait_allrefs - wait until all references are gone.
5053 * This is called when unregistering network devices.
5055 * Any protocol or device that holds a reference should register
5056 * for netdevice notification, and cleanup and put back the
5057 * reference if they receive an UNREGISTER event.
5058 * We can get stuck here if buggy protocols don't correctly
5061 static void netdev_wait_allrefs(struct net_device *dev)
5063 unsigned long rebroadcast_time, warning_time;
5065 linkwatch_forget_dev(dev);
5067 rebroadcast_time = warning_time = jiffies;
5068 while (atomic_read(&dev->refcnt) != 0) {
5069 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
5072 /* Rebroadcast unregister notification */
5073 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5074 /* don't resend NETDEV_UNREGISTER_BATCH, _BATCH users
5075 * should have already handle it the first time */
5077 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
5079 /* We must not have linkwatch events
5080 * pending on unregister. If this
5081 * happens, we simply run the queue
5082 * unscheduled, resulting in a noop
5085 linkwatch_run_queue();
5090 rebroadcast_time = jiffies;
5095 if (time_after(jiffies, warning_time + 10 * HZ)) {
5096 printk(KERN_EMERG "unregister_netdevice: "
5097 "waiting for %s to become free. Usage "
5099 dev->name, atomic_read(&dev->refcnt));
5100 warning_time = jiffies;
5109 * register_netdevice(x1);
5110 * register_netdevice(x2);
5112 * unregister_netdevice(y1);
5113 * unregister_netdevice(y2);
5119 * We are invoked by rtnl_unlock().
5120 * This allows us to deal with problems:
5121 * 1) We can delete sysfs objects which invoke hotplug
5122 * without deadlocking with linkwatch via keventd.
5123 * 2) Since we run with the RTNL semaphore not held, we can sleep
5124 * safely in order to wait for the netdev refcnt to drop to zero.
5126 * We must not return until all unregister events added during
5127 * the interval the lock was held have been completed.
5129 void netdev_run_todo(void)
5131 struct list_head list;
5133 /* Snapshot list, allow later requests */
5134 list_replace_init(&net_todo_list, &list);
5138 while (!list_empty(&list)) {
5139 struct net_device *dev
5140 = list_first_entry(&list, struct net_device, todo_list);
5141 list_del(&dev->todo_list);
5143 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
5144 printk(KERN_ERR "network todo '%s' but state %d\n",
5145 dev->name, dev->reg_state);
5150 dev->reg_state = NETREG_UNREGISTERED;
5152 on_each_cpu(flush_backlog, dev, 1);
5154 netdev_wait_allrefs(dev);
5157 BUG_ON(atomic_read(&dev->refcnt));
5158 WARN_ON(dev->ip_ptr);
5159 WARN_ON(dev->ip6_ptr);
5160 WARN_ON(dev->dn_ptr);
5162 if (dev->destructor)
5163 dev->destructor(dev);
5165 /* Free network device */
5166 kobject_put(&dev->dev.kobj);
5171 * dev_txq_stats_fold - fold tx_queues stats
5172 * @dev: device to get statistics from
5173 * @stats: struct net_device_stats to hold results
5175 void dev_txq_stats_fold(const struct net_device *dev,
5176 struct net_device_stats *stats)
5178 unsigned long tx_bytes = 0, tx_packets = 0, tx_dropped = 0;
5180 struct netdev_queue *txq;
5182 for (i = 0; i < dev->num_tx_queues; i++) {
5183 txq = netdev_get_tx_queue(dev, i);
5184 tx_bytes += txq->tx_bytes;
5185 tx_packets += txq->tx_packets;
5186 tx_dropped += txq->tx_dropped;
5188 if (tx_bytes || tx_packets || tx_dropped) {
5189 stats->tx_bytes = tx_bytes;
5190 stats->tx_packets = tx_packets;
5191 stats->tx_dropped = tx_dropped;
5194 EXPORT_SYMBOL(dev_txq_stats_fold);
5197 * dev_get_stats - get network device statistics
5198 * @dev: device to get statistics from
5200 * Get network statistics from device. The device driver may provide
5201 * its own method by setting dev->netdev_ops->get_stats; otherwise
5202 * the internal statistics structure is used.
5204 const struct net_device_stats *dev_get_stats(struct net_device *dev)
5206 const struct net_device_ops *ops = dev->netdev_ops;
5208 if (ops->ndo_get_stats)
5209 return ops->ndo_get_stats(dev);
5211 dev_txq_stats_fold(dev, &dev->stats);
5214 EXPORT_SYMBOL(dev_get_stats);
5216 static void netdev_init_one_queue(struct net_device *dev,
5217 struct netdev_queue *queue,
5223 static void netdev_init_queues(struct net_device *dev)
5225 netdev_init_one_queue(dev, &dev->rx_queue, NULL);
5226 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
5227 spin_lock_init(&dev->tx_global_lock);
5231 * alloc_netdev_mq - allocate network device
5232 * @sizeof_priv: size of private data to allocate space for
5233 * @name: device name format string
5234 * @setup: callback to initialize device
5235 * @queue_count: the number of subqueues to allocate
5237 * Allocates a struct net_device with private data area for driver use
5238 * and performs basic initialization. Also allocates subquue structs
5239 * for each queue on the device at the end of the netdevice.
5241 struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
5242 void (*setup)(struct net_device *), unsigned int queue_count)
5244 struct netdev_queue *tx;
5245 struct net_device *dev;
5247 struct net_device *p;
5249 struct netdev_rx_queue *rx;
5253 BUG_ON(strlen(name) >= sizeof(dev->name));
5255 alloc_size = sizeof(struct net_device);
5257 /* ensure 32-byte alignment of private area */
5258 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
5259 alloc_size += sizeof_priv;
5261 /* ensure 32-byte alignment of whole construct */
5262 alloc_size += NETDEV_ALIGN - 1;
5264 p = kzalloc(alloc_size, GFP_KERNEL);
5266 printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
5270 tx = kcalloc(queue_count, sizeof(struct netdev_queue), GFP_KERNEL);
5272 printk(KERN_ERR "alloc_netdev: Unable to allocate "
5278 rx = kcalloc(queue_count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
5280 printk(KERN_ERR "alloc_netdev: Unable to allocate "
5285 atomic_set(&rx->count, queue_count);
5288 * Set a pointer to first element in the array which holds the
5291 for (i = 0; i < queue_count; i++)
5295 dev = PTR_ALIGN(p, NETDEV_ALIGN);
5296 dev->padded = (char *)dev - (char *)p;
5298 if (dev_addr_init(dev))
5304 dev_net_set(dev, &init_net);
5307 dev->num_tx_queues = queue_count;
5308 dev->real_num_tx_queues = queue_count;
5312 dev->num_rx_queues = queue_count;
5315 dev->gso_max_size = GSO_MAX_SIZE;
5317 netdev_init_queues(dev);
5319 INIT_LIST_HEAD(&dev->ethtool_ntuple_list.list);
5320 dev->ethtool_ntuple_list.count = 0;
5321 INIT_LIST_HEAD(&dev->napi_list);
5322 INIT_LIST_HEAD(&dev->unreg_list);
5323 INIT_LIST_HEAD(&dev->link_watch_list);
5324 dev->priv_flags = IFF_XMIT_DST_RELEASE;
5326 strcpy(dev->name, name);
5339 EXPORT_SYMBOL(alloc_netdev_mq);
5342 * free_netdev - free network device
5345 * This function does the last stage of destroying an allocated device
5346 * interface. The reference to the device object is released.
5347 * If this is the last reference then it will be freed.
5349 void free_netdev(struct net_device *dev)
5351 struct napi_struct *p, *n;
5353 release_net(dev_net(dev));
5357 /* Flush device addresses */
5358 dev_addr_flush(dev);
5360 /* Clear ethtool n-tuple list */
5361 ethtool_ntuple_flush(dev);
5363 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
5366 /* Compatibility with error handling in drivers */
5367 if (dev->reg_state == NETREG_UNINITIALIZED) {
5368 kfree((char *)dev - dev->padded);
5372 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
5373 dev->reg_state = NETREG_RELEASED;
5375 /* will free via device release */
5376 put_device(&dev->dev);
5378 EXPORT_SYMBOL(free_netdev);
5381 * synchronize_net - Synchronize with packet receive processing
5383 * Wait for packets currently being received to be done.
5384 * Does not block later packets from starting.
5386 void synchronize_net(void)
5391 EXPORT_SYMBOL(synchronize_net);
5394 * unregister_netdevice_queue - remove device from the kernel
5398 * This function shuts down a device interface and removes it
5399 * from the kernel tables.
5400 * If head not NULL, device is queued to be unregistered later.
5402 * Callers must hold the rtnl semaphore. You may want
5403 * unregister_netdev() instead of this.
5406 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
5411 list_move_tail(&dev->unreg_list, head);
5413 rollback_registered(dev);
5414 /* Finish processing unregister after unlock */
5418 EXPORT_SYMBOL(unregister_netdevice_queue);
5421 * unregister_netdevice_many - unregister many devices
5422 * @head: list of devices
5424 void unregister_netdevice_many(struct list_head *head)
5426 struct net_device *dev;
5428 if (!list_empty(head)) {
5429 rollback_registered_many(head);
5430 list_for_each_entry(dev, head, unreg_list)
5434 EXPORT_SYMBOL(unregister_netdevice_many);
5437 * unregister_netdev - remove device from the kernel
5440 * This function shuts down a device interface and removes it
5441 * from the kernel tables.
5443 * This is just a wrapper for unregister_netdevice that takes
5444 * the rtnl semaphore. In general you want to use this and not
5445 * unregister_netdevice.
5447 void unregister_netdev(struct net_device *dev)
5450 unregister_netdevice(dev);
5453 EXPORT_SYMBOL(unregister_netdev);
5456 * dev_change_net_namespace - move device to different nethost namespace
5458 * @net: network namespace
5459 * @pat: If not NULL name pattern to try if the current device name
5460 * is already taken in the destination network namespace.
5462 * This function shuts down a device interface and moves it
5463 * to a new network namespace. On success 0 is returned, on
5464 * a failure a netagive errno code is returned.
5466 * Callers must hold the rtnl semaphore.
5469 int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
5475 /* Don't allow namespace local devices to be moved. */
5477 if (dev->features & NETIF_F_NETNS_LOCAL)
5481 /* Don't allow real devices to be moved when sysfs
5485 if (dev->dev.parent)
5489 /* Ensure the device has been registrered */
5491 if (dev->reg_state != NETREG_REGISTERED)
5494 /* Get out if there is nothing todo */
5496 if (net_eq(dev_net(dev), net))
5499 /* Pick the destination device name, and ensure
5500 * we can use it in the destination network namespace.
5503 if (__dev_get_by_name(net, dev->name)) {
5504 /* We get here if we can't use the current device name */
5507 if (dev_get_valid_name(net, pat, dev->name, 1))
5512 * And now a mini version of register_netdevice unregister_netdevice.
5515 /* If device is running close it first. */
5518 /* And unlink it from device chain */
5520 unlist_netdevice(dev);
5524 /* Shutdown queueing discipline. */
5527 /* Notify protocols, that we are about to destroy
5528 this device. They should clean all the things.
5530 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5531 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
5534 * Flush the unicast and multicast chains
5539 netdev_unregister_kobject(dev);
5541 /* Actually switch the network namespace */
5542 dev_net_set(dev, net);
5544 /* If there is an ifindex conflict assign a new one */
5545 if (__dev_get_by_index(net, dev->ifindex)) {
5546 int iflink = (dev->iflink == dev->ifindex);
5547 dev->ifindex = dev_new_index(net);
5549 dev->iflink = dev->ifindex;
5552 /* Fixup kobjects */
5553 err = netdev_register_kobject(dev);
5556 /* Add the device back in the hashes */
5557 list_netdevice(dev);
5559 /* Notify protocols, that a new device appeared. */
5560 call_netdevice_notifiers(NETDEV_REGISTER, dev);
5563 * Prevent userspace races by waiting until the network
5564 * device is fully setup before sending notifications.
5566 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
5573 EXPORT_SYMBOL_GPL(dev_change_net_namespace);
5575 static int dev_cpu_callback(struct notifier_block *nfb,
5576 unsigned long action,
5579 struct sk_buff **list_skb;
5580 struct Qdisc **list_net;
5581 struct sk_buff *skb;
5582 unsigned int cpu, oldcpu = (unsigned long)ocpu;
5583 struct softnet_data *sd, *oldsd;
5585 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
5588 local_irq_disable();
5589 cpu = smp_processor_id();
5590 sd = &per_cpu(softnet_data, cpu);
5591 oldsd = &per_cpu(softnet_data, oldcpu);
5593 /* Find end of our completion_queue. */
5594 list_skb = &sd->completion_queue;
5596 list_skb = &(*list_skb)->next;
5597 /* Append completion queue from offline CPU. */
5598 *list_skb = oldsd->completion_queue;
5599 oldsd->completion_queue = NULL;
5601 /* Find end of our output_queue. */
5602 list_net = &sd->output_queue;
5604 list_net = &(*list_net)->next_sched;
5605 /* Append output queue from offline CPU. */
5606 *list_net = oldsd->output_queue;
5607 oldsd->output_queue = NULL;
5609 raise_softirq_irqoff(NET_TX_SOFTIRQ);
5612 /* Process offline CPU's input_pkt_queue */
5613 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
5615 incr_input_queue_head(oldsd);
5623 * netdev_increment_features - increment feature set by one
5624 * @all: current feature set
5625 * @one: new feature set
5626 * @mask: mask feature set
5628 * Computes a new feature set after adding a device with feature set
5629 * @one to the master device with current feature set @all. Will not
5630 * enable anything that is off in @mask. Returns the new feature set.
5632 unsigned long netdev_increment_features(unsigned long all, unsigned long one,
5635 /* If device needs checksumming, downgrade to it. */
5636 if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
5637 all ^= NETIF_F_NO_CSUM | (one & NETIF_F_ALL_CSUM);
5638 else if (mask & NETIF_F_ALL_CSUM) {
5639 /* If one device supports v4/v6 checksumming, set for all. */
5640 if (one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM) &&
5641 !(all & NETIF_F_GEN_CSUM)) {
5642 all &= ~NETIF_F_ALL_CSUM;
5643 all |= one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
5646 /* If one device supports hw checksumming, set for all. */
5647 if (one & NETIF_F_GEN_CSUM && !(all & NETIF_F_GEN_CSUM)) {
5648 all &= ~NETIF_F_ALL_CSUM;
5649 all |= NETIF_F_HW_CSUM;
5653 one |= NETIF_F_ALL_CSUM;
5655 one |= all & NETIF_F_ONE_FOR_ALL;
5656 all &= one | NETIF_F_LLTX | NETIF_F_GSO | NETIF_F_UFO;
5657 all |= one & mask & NETIF_F_ONE_FOR_ALL;
5661 EXPORT_SYMBOL(netdev_increment_features);
5663 static struct hlist_head *netdev_create_hash(void)
5666 struct hlist_head *hash;
5668 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
5670 for (i = 0; i < NETDEV_HASHENTRIES; i++)
5671 INIT_HLIST_HEAD(&hash[i]);
5676 /* Initialize per network namespace state */
5677 static int __net_init netdev_init(struct net *net)
5679 INIT_LIST_HEAD(&net->dev_base_head);
5681 net->dev_name_head = netdev_create_hash();
5682 if (net->dev_name_head == NULL)
5685 net->dev_index_head = netdev_create_hash();
5686 if (net->dev_index_head == NULL)
5692 kfree(net->dev_name_head);
5698 * netdev_drivername - network driver for the device
5699 * @dev: network device
5700 * @buffer: buffer for resulting name
5701 * @len: size of buffer
5703 * Determine network driver for device.
5705 char *netdev_drivername(const struct net_device *dev, char *buffer, int len)
5707 const struct device_driver *driver;
5708 const struct device *parent;
5710 if (len <= 0 || !buffer)
5714 parent = dev->dev.parent;
5719 driver = parent->driver;
5720 if (driver && driver->name)
5721 strlcpy(buffer, driver->name, len);
5725 static void __net_exit netdev_exit(struct net *net)
5727 kfree(net->dev_name_head);
5728 kfree(net->dev_index_head);
5731 static struct pernet_operations __net_initdata netdev_net_ops = {
5732 .init = netdev_init,
5733 .exit = netdev_exit,
5736 static void __net_exit default_device_exit(struct net *net)
5738 struct net_device *dev, *aux;
5740 * Push all migratable network devices back to the
5741 * initial network namespace
5744 for_each_netdev_safe(net, dev, aux) {
5746 char fb_name[IFNAMSIZ];
5748 /* Ignore unmoveable devices (i.e. loopback) */
5749 if (dev->features & NETIF_F_NETNS_LOCAL)
5752 /* Leave virtual devices for the generic cleanup */
5753 if (dev->rtnl_link_ops)
5756 /* Push remaing network devices to init_net */
5757 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
5758 err = dev_change_net_namespace(dev, &init_net, fb_name);
5760 printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
5761 __func__, dev->name, err);
5768 static void __net_exit default_device_exit_batch(struct list_head *net_list)
5770 /* At exit all network devices most be removed from a network
5771 * namespace. Do this in the reverse order of registeration.
5772 * Do this across as many network namespaces as possible to
5773 * improve batching efficiency.
5775 struct net_device *dev;
5777 LIST_HEAD(dev_kill_list);
5780 list_for_each_entry(net, net_list, exit_list) {
5781 for_each_netdev_reverse(net, dev) {
5782 if (dev->rtnl_link_ops)
5783 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
5785 unregister_netdevice_queue(dev, &dev_kill_list);
5788 unregister_netdevice_many(&dev_kill_list);
5792 static struct pernet_operations __net_initdata default_device_ops = {
5793 .exit = default_device_exit,
5794 .exit_batch = default_device_exit_batch,
5798 * Initialize the DEV module. At boot time this walks the device list and
5799 * unhooks any devices that fail to initialise (normally hardware not
5800 * present) and leaves us with a valid list of present and active devices.
5805 * This is called single threaded during boot, so no need
5806 * to take the rtnl semaphore.
5808 static int __init net_dev_init(void)
5810 int i, rc = -ENOMEM;
5812 BUG_ON(!dev_boot_phase);
5814 if (dev_proc_init())
5817 if (netdev_kobject_init())
5820 INIT_LIST_HEAD(&ptype_all);
5821 for (i = 0; i < PTYPE_HASH_SIZE; i++)
5822 INIT_LIST_HEAD(&ptype_base[i]);
5824 if (register_pernet_subsys(&netdev_net_ops))
5828 * Initialise the packet receive queues.
5831 for_each_possible_cpu(i) {
5832 struct softnet_data *queue;
5834 queue = &per_cpu(softnet_data, i);
5835 skb_queue_head_init(&queue->input_pkt_queue);
5836 queue->completion_queue = NULL;
5837 INIT_LIST_HEAD(&queue->poll_list);
5840 queue->csd.func = trigger_softirq;
5841 queue->csd.info = queue;
5842 queue->csd.flags = 0;
5845 queue->backlog.poll = process_backlog;
5846 queue->backlog.weight = weight_p;
5847 queue->backlog.gro_list = NULL;
5848 queue->backlog.gro_count = 0;
5853 /* The loopback device is special if any other network devices
5854 * is present in a network namespace the loopback device must
5855 * be present. Since we now dynamically allocate and free the
5856 * loopback device ensure this invariant is maintained by
5857 * keeping the loopback device as the first device on the
5858 * list of network devices. Ensuring the loopback devices
5859 * is the first device that appears and the last network device
5862 if (register_pernet_device(&loopback_net_ops))
5865 if (register_pernet_device(&default_device_ops))
5868 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
5869 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
5871 hotcpu_notifier(dev_cpu_callback, 0);
5879 subsys_initcall(net_dev_init);
5881 static int __init initialize_hashrnd(void)
5883 get_random_bytes(&hashrnd, sizeof(hashrnd));
5887 late_initcall_sync(initialize_hashrnd);