2 * NET3 Protocol independent device support routines.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Derived from the non IP parts of dev.c 1.0.19
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
51 * Rudi Cilibrasi : Pass the right thing to
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
75 #include <asm/uaccess.h>
76 #include <asm/system.h>
77 #include <linux/bitops.h>
78 #include <linux/capability.h>
79 #include <linux/cpu.h>
80 #include <linux/types.h>
81 #include <linux/kernel.h>
82 #include <linux/hash.h>
83 #include <linux/sched.h>
84 #include <linux/mutex.h>
85 #include <linux/string.h>
87 #include <linux/socket.h>
88 #include <linux/sockios.h>
89 #include <linux/errno.h>
90 #include <linux/interrupt.h>
91 #include <linux/if_ether.h>
92 #include <linux/netdevice.h>
93 #include <linux/etherdevice.h>
94 #include <linux/ethtool.h>
95 #include <linux/notifier.h>
96 #include <linux/skbuff.h>
97 #include <net/net_namespace.h>
99 #include <linux/rtnetlink.h>
100 #include <linux/proc_fs.h>
101 #include <linux/seq_file.h>
102 #include <linux/stat.h>
103 #include <linux/if_bridge.h>
104 #include <linux/if_macvlan.h>
106 #include <net/pkt_sched.h>
107 #include <net/checksum.h>
108 #include <net/xfrm.h>
109 #include <linux/highmem.h>
110 #include <linux/init.h>
111 #include <linux/kmod.h>
112 #include <linux/module.h>
113 #include <linux/netpoll.h>
114 #include <linux/rcupdate.h>
115 #include <linux/delay.h>
116 #include <net/wext.h>
117 #include <net/iw_handler.h>
118 #include <asm/current.h>
119 #include <linux/audit.h>
120 #include <linux/dmaengine.h>
121 #include <linux/err.h>
122 #include <linux/ctype.h>
123 #include <linux/if_arp.h>
124 #include <linux/if_vlan.h>
125 #include <linux/ip.h>
127 #include <linux/ipv6.h>
128 #include <linux/in.h>
129 #include <linux/jhash.h>
130 #include <linux/random.h>
131 #include <trace/events/napi.h>
132 #include <linux/pci.h>
134 #include "net-sysfs.h"
136 /* Instead of increasing this, you should create a hash table. */
137 #define MAX_GRO_SKBS 8
139 /* This should be increased if a protocol with a bigger head is added. */
140 #define GRO_MAX_HEAD (MAX_HEADER + 128)
143 * The list of packet types we will receive (as opposed to discard)
144 * and the routines to invoke.
146 * Why 16. Because with 16 the only overlap we get on a hash of the
147 * low nibble of the protocol value is RARP/SNAP/X.25.
149 * NOTE: That is no longer true with the addition of VLAN tags. Not
150 * sure which should go first, but I bet it won't make much
151 * difference if we are running VLANs. The good news is that
152 * this protocol won't be in the list unless compiled in, so
153 * the average user (w/out VLANs) will not be adversely affected.
170 #define PTYPE_HASH_SIZE (16)
171 #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
173 static DEFINE_SPINLOCK(ptype_lock);
174 static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
175 static struct list_head ptype_all __read_mostly; /* Taps */
178 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
181 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
183 * Writers must hold the rtnl semaphore while they loop through the
184 * dev_base_head list, and hold dev_base_lock for writing when they do the
185 * actual updates. This allows pure readers to access the list even
186 * while a writer is preparing to update it.
188 * To put it another way, dev_base_lock is held for writing only to
189 * protect against pure readers; the rtnl semaphore provides the
190 * protection against other writers.
192 * See, for example usages, register_netdevice() and
193 * unregister_netdevice(), which must be called with the rtnl
196 DEFINE_RWLOCK(dev_base_lock);
197 EXPORT_SYMBOL(dev_base_lock);
199 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
201 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
202 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
205 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
207 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
210 static inline void rps_lock(struct softnet_data *queue)
213 spin_lock(&queue->input_pkt_queue.lock);
217 static inline void rps_unlock(struct softnet_data *queue)
220 spin_unlock(&queue->input_pkt_queue.lock);
224 /* Device list insertion */
225 static int list_netdevice(struct net_device *dev)
227 struct net *net = dev_net(dev);
231 write_lock_bh(&dev_base_lock);
232 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
233 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
234 hlist_add_head_rcu(&dev->index_hlist,
235 dev_index_hash(net, dev->ifindex));
236 write_unlock_bh(&dev_base_lock);
240 /* Device list removal
241 * caller must respect a RCU grace period before freeing/reusing dev
243 static void unlist_netdevice(struct net_device *dev)
247 /* Unlink dev from the device chain */
248 write_lock_bh(&dev_base_lock);
249 list_del_rcu(&dev->dev_list);
250 hlist_del_rcu(&dev->name_hlist);
251 hlist_del_rcu(&dev->index_hlist);
252 write_unlock_bh(&dev_base_lock);
259 static RAW_NOTIFIER_HEAD(netdev_chain);
262 * Device drivers call our routines to queue packets here. We empty the
263 * queue in the local softnet handler.
266 DEFINE_PER_CPU(struct softnet_data, softnet_data);
267 EXPORT_PER_CPU_SYMBOL(softnet_data);
269 #ifdef CONFIG_LOCKDEP
271 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
272 * according to dev->type
274 static const unsigned short netdev_lock_type[] =
275 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
276 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
277 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
278 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
279 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
280 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
281 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
282 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
283 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
284 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
285 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
286 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
287 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
288 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET,
289 ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154,
290 ARPHRD_VOID, ARPHRD_NONE};
292 static const char *const netdev_lock_name[] =
293 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
294 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
295 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
296 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
297 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
298 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
299 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
300 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
301 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
302 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
303 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
304 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
305 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
306 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET",
307 "_xmit_PHONET_PIPE", "_xmit_IEEE802154",
308 "_xmit_VOID", "_xmit_NONE"};
310 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
311 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
313 static inline unsigned short netdev_lock_pos(unsigned short dev_type)
317 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
318 if (netdev_lock_type[i] == dev_type)
320 /* the last key is used by default */
321 return ARRAY_SIZE(netdev_lock_type) - 1;
324 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
325 unsigned short dev_type)
329 i = netdev_lock_pos(dev_type);
330 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
331 netdev_lock_name[i]);
334 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
338 i = netdev_lock_pos(dev->type);
339 lockdep_set_class_and_name(&dev->addr_list_lock,
340 &netdev_addr_lock_key[i],
341 netdev_lock_name[i]);
344 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
345 unsigned short dev_type)
348 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
353 /*******************************************************************************
355 Protocol management and registration routines
357 *******************************************************************************/
360 * Add a protocol ID to the list. Now that the input handler is
361 * smarter we can dispense with all the messy stuff that used to be
364 * BEWARE!!! Protocol handlers, mangling input packets,
365 * MUST BE last in hash buckets and checking protocol handlers
366 * MUST start from promiscuous ptype_all chain in net_bh.
367 * It is true now, do not change it.
368 * Explanation follows: if protocol handler, mangling packet, will
369 * be the first on list, it is not able to sense, that packet
370 * is cloned and should be copied-on-write, so that it will
371 * change it and subsequent readers will get broken packet.
376 * dev_add_pack - add packet handler
377 * @pt: packet type declaration
379 * Add a protocol handler to the networking stack. The passed &packet_type
380 * is linked into kernel lists and may not be freed until it has been
381 * removed from the kernel lists.
383 * This call does not sleep therefore it can not
384 * guarantee all CPU's that are in middle of receiving packets
385 * will see the new packet type (until the next received packet).
388 void dev_add_pack(struct packet_type *pt)
392 spin_lock_bh(&ptype_lock);
393 if (pt->type == htons(ETH_P_ALL))
394 list_add_rcu(&pt->list, &ptype_all);
396 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
397 list_add_rcu(&pt->list, &ptype_base[hash]);
399 spin_unlock_bh(&ptype_lock);
401 EXPORT_SYMBOL(dev_add_pack);
404 * __dev_remove_pack - remove packet handler
405 * @pt: packet type declaration
407 * Remove a protocol handler that was previously added to the kernel
408 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
409 * from the kernel lists and can be freed or reused once this function
412 * The packet type might still be in use by receivers
413 * and must not be freed until after all the CPU's have gone
414 * through a quiescent state.
416 void __dev_remove_pack(struct packet_type *pt)
418 struct list_head *head;
419 struct packet_type *pt1;
421 spin_lock_bh(&ptype_lock);
423 if (pt->type == htons(ETH_P_ALL))
426 head = &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
428 list_for_each_entry(pt1, head, list) {
430 list_del_rcu(&pt->list);
435 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
437 spin_unlock_bh(&ptype_lock);
439 EXPORT_SYMBOL(__dev_remove_pack);
442 * dev_remove_pack - remove packet handler
443 * @pt: packet type declaration
445 * Remove a protocol handler that was previously added to the kernel
446 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
447 * from the kernel lists and can be freed or reused once this function
450 * This call sleeps to guarantee that no CPU is looking at the packet
453 void dev_remove_pack(struct packet_type *pt)
455 __dev_remove_pack(pt);
459 EXPORT_SYMBOL(dev_remove_pack);
461 /******************************************************************************
463 Device Boot-time Settings Routines
465 *******************************************************************************/
467 /* Boot time configuration table */
468 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
471 * netdev_boot_setup_add - add new setup entry
472 * @name: name of the device
473 * @map: configured settings for the device
475 * Adds new setup entry to the dev_boot_setup list. The function
476 * returns 0 on error and 1 on success. This is a generic routine to
479 static int netdev_boot_setup_add(char *name, struct ifmap *map)
481 struct netdev_boot_setup *s;
485 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
486 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
487 memset(s[i].name, 0, sizeof(s[i].name));
488 strlcpy(s[i].name, name, IFNAMSIZ);
489 memcpy(&s[i].map, map, sizeof(s[i].map));
494 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
498 * netdev_boot_setup_check - check boot time settings
499 * @dev: the netdevice
501 * Check boot time settings for the device.
502 * The found settings are set for the device to be used
503 * later in the device probing.
504 * Returns 0 if no settings found, 1 if they are.
506 int netdev_boot_setup_check(struct net_device *dev)
508 struct netdev_boot_setup *s = dev_boot_setup;
511 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
512 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
513 !strcmp(dev->name, s[i].name)) {
514 dev->irq = s[i].map.irq;
515 dev->base_addr = s[i].map.base_addr;
516 dev->mem_start = s[i].map.mem_start;
517 dev->mem_end = s[i].map.mem_end;
523 EXPORT_SYMBOL(netdev_boot_setup_check);
527 * netdev_boot_base - get address from boot time settings
528 * @prefix: prefix for network device
529 * @unit: id for network device
531 * Check boot time settings for the base address of device.
532 * The found settings are set for the device to be used
533 * later in the device probing.
534 * Returns 0 if no settings found.
536 unsigned long netdev_boot_base(const char *prefix, int unit)
538 const struct netdev_boot_setup *s = dev_boot_setup;
542 sprintf(name, "%s%d", prefix, unit);
545 * If device already registered then return base of 1
546 * to indicate not to probe for this interface
548 if (__dev_get_by_name(&init_net, name))
551 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
552 if (!strcmp(name, s[i].name))
553 return s[i].map.base_addr;
558 * Saves at boot time configured settings for any netdevice.
560 int __init netdev_boot_setup(char *str)
565 str = get_options(str, ARRAY_SIZE(ints), ints);
570 memset(&map, 0, sizeof(map));
574 map.base_addr = ints[2];
576 map.mem_start = ints[3];
578 map.mem_end = ints[4];
580 /* Add new entry to the list */
581 return netdev_boot_setup_add(str, &map);
584 __setup("netdev=", netdev_boot_setup);
586 /*******************************************************************************
588 Device Interface Subroutines
590 *******************************************************************************/
593 * __dev_get_by_name - find a device by its name
594 * @net: the applicable net namespace
595 * @name: name to find
597 * Find an interface by name. Must be called under RTNL semaphore
598 * or @dev_base_lock. If the name is found a pointer to the device
599 * is returned. If the name is not found then %NULL is returned. The
600 * reference counters are not incremented so the caller must be
601 * careful with locks.
604 struct net_device *__dev_get_by_name(struct net *net, const char *name)
606 struct hlist_node *p;
607 struct net_device *dev;
608 struct hlist_head *head = dev_name_hash(net, name);
610 hlist_for_each_entry(dev, p, head, name_hlist)
611 if (!strncmp(dev->name, name, IFNAMSIZ))
616 EXPORT_SYMBOL(__dev_get_by_name);
619 * dev_get_by_name_rcu - find a device by its name
620 * @net: the applicable net namespace
621 * @name: name to find
623 * Find an interface by name.
624 * If the name is found a pointer to the device is returned.
625 * If the name is not found then %NULL is returned.
626 * The reference counters are not incremented so the caller must be
627 * careful with locks. The caller must hold RCU lock.
630 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
632 struct hlist_node *p;
633 struct net_device *dev;
634 struct hlist_head *head = dev_name_hash(net, name);
636 hlist_for_each_entry_rcu(dev, p, head, name_hlist)
637 if (!strncmp(dev->name, name, IFNAMSIZ))
642 EXPORT_SYMBOL(dev_get_by_name_rcu);
645 * dev_get_by_name - find a device by its name
646 * @net: the applicable net namespace
647 * @name: name to find
649 * Find an interface by name. This can be called from any
650 * context and does its own locking. The returned handle has
651 * the usage count incremented and the caller must use dev_put() to
652 * release it when it is no longer needed. %NULL is returned if no
653 * matching device is found.
656 struct net_device *dev_get_by_name(struct net *net, const char *name)
658 struct net_device *dev;
661 dev = dev_get_by_name_rcu(net, name);
667 EXPORT_SYMBOL(dev_get_by_name);
670 * __dev_get_by_index - find a device by its ifindex
671 * @net: the applicable net namespace
672 * @ifindex: index of device
674 * Search for an interface by index. Returns %NULL if the device
675 * is not found or a pointer to the device. The device has not
676 * had its reference counter increased so the caller must be careful
677 * about locking. The caller must hold either the RTNL semaphore
681 struct net_device *__dev_get_by_index(struct net *net, int ifindex)
683 struct hlist_node *p;
684 struct net_device *dev;
685 struct hlist_head *head = dev_index_hash(net, ifindex);
687 hlist_for_each_entry(dev, p, head, index_hlist)
688 if (dev->ifindex == ifindex)
693 EXPORT_SYMBOL(__dev_get_by_index);
696 * dev_get_by_index_rcu - find a device by its ifindex
697 * @net: the applicable net namespace
698 * @ifindex: index of device
700 * Search for an interface by index. Returns %NULL if the device
701 * is not found or a pointer to the device. The device has not
702 * had its reference counter increased so the caller must be careful
703 * about locking. The caller must hold RCU lock.
706 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
708 struct hlist_node *p;
709 struct net_device *dev;
710 struct hlist_head *head = dev_index_hash(net, ifindex);
712 hlist_for_each_entry_rcu(dev, p, head, index_hlist)
713 if (dev->ifindex == ifindex)
718 EXPORT_SYMBOL(dev_get_by_index_rcu);
722 * dev_get_by_index - find a device by its ifindex
723 * @net: the applicable net namespace
724 * @ifindex: index of device
726 * Search for an interface by index. Returns NULL if the device
727 * is not found or a pointer to the device. The device returned has
728 * had a reference added and the pointer is safe until the user calls
729 * dev_put to indicate they have finished with it.
732 struct net_device *dev_get_by_index(struct net *net, int ifindex)
734 struct net_device *dev;
737 dev = dev_get_by_index_rcu(net, ifindex);
743 EXPORT_SYMBOL(dev_get_by_index);
746 * dev_getbyhwaddr - find a device by its hardware address
747 * @net: the applicable net namespace
748 * @type: media type of device
749 * @ha: hardware address
751 * Search for an interface by MAC address. Returns NULL if the device
752 * is not found or a pointer to the device. The caller must hold the
753 * rtnl semaphore. The returned device has not had its ref count increased
754 * and the caller must therefore be careful about locking
757 * If the API was consistent this would be __dev_get_by_hwaddr
760 struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
762 struct net_device *dev;
766 for_each_netdev(net, dev)
767 if (dev->type == type &&
768 !memcmp(dev->dev_addr, ha, dev->addr_len))
773 EXPORT_SYMBOL(dev_getbyhwaddr);
775 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
777 struct net_device *dev;
780 for_each_netdev(net, dev)
781 if (dev->type == type)
786 EXPORT_SYMBOL(__dev_getfirstbyhwtype);
788 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
790 struct net_device *dev, *ret = NULL;
793 for_each_netdev_rcu(net, dev)
794 if (dev->type == type) {
802 EXPORT_SYMBOL(dev_getfirstbyhwtype);
805 * dev_get_by_flags - find any device with given flags
806 * @net: the applicable net namespace
807 * @if_flags: IFF_* values
808 * @mask: bitmask of bits in if_flags to check
810 * Search for any interface with the given flags. Returns NULL if a device
811 * is not found or a pointer to the device. The device returned has
812 * had a reference added and the pointer is safe until the user calls
813 * dev_put to indicate they have finished with it.
816 struct net_device *dev_get_by_flags(struct net *net, unsigned short if_flags,
819 struct net_device *dev, *ret;
823 for_each_netdev_rcu(net, dev) {
824 if (((dev->flags ^ if_flags) & mask) == 0) {
833 EXPORT_SYMBOL(dev_get_by_flags);
836 * dev_valid_name - check if name is okay for network device
839 * Network device names need to be valid file names to
840 * to allow sysfs to work. We also disallow any kind of
843 int dev_valid_name(const char *name)
847 if (strlen(name) >= IFNAMSIZ)
849 if (!strcmp(name, ".") || !strcmp(name, ".."))
853 if (*name == '/' || isspace(*name))
859 EXPORT_SYMBOL(dev_valid_name);
862 * __dev_alloc_name - allocate a name for a device
863 * @net: network namespace to allocate the device name in
864 * @name: name format string
865 * @buf: scratch buffer and result name string
867 * Passed a format string - eg "lt%d" it will try and find a suitable
868 * id. It scans list of devices to build up a free map, then chooses
869 * the first empty slot. The caller must hold the dev_base or rtnl lock
870 * while allocating the name and adding the device in order to avoid
872 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
873 * Returns the number of the unit assigned or a negative errno code.
876 static int __dev_alloc_name(struct net *net, const char *name, char *buf)
880 const int max_netdevices = 8*PAGE_SIZE;
881 unsigned long *inuse;
882 struct net_device *d;
884 p = strnchr(name, IFNAMSIZ-1, '%');
887 * Verify the string as this thing may have come from
888 * the user. There must be either one "%d" and no other "%"
891 if (p[1] != 'd' || strchr(p + 2, '%'))
894 /* Use one page as a bit array of possible slots */
895 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
899 for_each_netdev(net, d) {
900 if (!sscanf(d->name, name, &i))
902 if (i < 0 || i >= max_netdevices)
905 /* avoid cases where sscanf is not exact inverse of printf */
906 snprintf(buf, IFNAMSIZ, name, i);
907 if (!strncmp(buf, d->name, IFNAMSIZ))
911 i = find_first_zero_bit(inuse, max_netdevices);
912 free_page((unsigned long) inuse);
916 snprintf(buf, IFNAMSIZ, name, i);
917 if (!__dev_get_by_name(net, buf))
920 /* It is possible to run out of possible slots
921 * when the name is long and there isn't enough space left
922 * for the digits, or if all bits are used.
928 * dev_alloc_name - allocate a name for a device
930 * @name: name format string
932 * Passed a format string - eg "lt%d" it will try and find a suitable
933 * id. It scans list of devices to build up a free map, then chooses
934 * the first empty slot. The caller must hold the dev_base or rtnl lock
935 * while allocating the name and adding the device in order to avoid
937 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
938 * Returns the number of the unit assigned or a negative errno code.
941 int dev_alloc_name(struct net_device *dev, const char *name)
947 BUG_ON(!dev_net(dev));
949 ret = __dev_alloc_name(net, name, buf);
951 strlcpy(dev->name, buf, IFNAMSIZ);
954 EXPORT_SYMBOL(dev_alloc_name);
956 static int dev_get_valid_name(struct net *net, const char *name, char *buf,
959 if (!dev_valid_name(name))
962 if (fmt && strchr(name, '%'))
963 return __dev_alloc_name(net, name, buf);
964 else if (__dev_get_by_name(net, name))
966 else if (buf != name)
967 strlcpy(buf, name, IFNAMSIZ);
973 * dev_change_name - change name of a device
975 * @newname: name (or format string) must be at least IFNAMSIZ
977 * Change name of a device, can pass format strings "eth%d".
980 int dev_change_name(struct net_device *dev, const char *newname)
982 char oldname[IFNAMSIZ];
988 BUG_ON(!dev_net(dev));
991 if (dev->flags & IFF_UP)
994 if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
997 memcpy(oldname, dev->name, IFNAMSIZ);
999 err = dev_get_valid_name(net, newname, dev->name, 1);
1004 /* For now only devices in the initial network namespace
1007 if (net_eq(net, &init_net)) {
1008 ret = device_rename(&dev->dev, dev->name);
1010 memcpy(dev->name, oldname, IFNAMSIZ);
1015 write_lock_bh(&dev_base_lock);
1016 hlist_del(&dev->name_hlist);
1017 write_unlock_bh(&dev_base_lock);
1021 write_lock_bh(&dev_base_lock);
1022 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
1023 write_unlock_bh(&dev_base_lock);
1025 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
1026 ret = notifier_to_errno(ret);
1029 /* err >= 0 after dev_alloc_name() or stores the first errno */
1032 memcpy(dev->name, oldname, IFNAMSIZ);
1036 "%s: name change rollback failed: %d.\n",
1045 * dev_set_alias - change ifalias of a device
1047 * @alias: name up to IFALIASZ
1048 * @len: limit of bytes to copy from info
1050 * Set ifalias for a device,
1052 int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1056 if (len >= IFALIASZ)
1061 kfree(dev->ifalias);
1062 dev->ifalias = NULL;
1067 dev->ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
1071 strlcpy(dev->ifalias, alias, len+1);
1077 * netdev_features_change - device changes features
1078 * @dev: device to cause notification
1080 * Called to indicate a device has changed features.
1082 void netdev_features_change(struct net_device *dev)
1084 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
1086 EXPORT_SYMBOL(netdev_features_change);
1089 * netdev_state_change - device changes state
1090 * @dev: device to cause notification
1092 * Called to indicate a device has changed state. This function calls
1093 * the notifier chains for netdev_chain and sends a NEWLINK message
1094 * to the routing socket.
1096 void netdev_state_change(struct net_device *dev)
1098 if (dev->flags & IFF_UP) {
1099 call_netdevice_notifiers(NETDEV_CHANGE, dev);
1100 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1103 EXPORT_SYMBOL(netdev_state_change);
1105 int netdev_bonding_change(struct net_device *dev, unsigned long event)
1107 return call_netdevice_notifiers(event, dev);
1109 EXPORT_SYMBOL(netdev_bonding_change);
1112 * dev_load - load a network module
1113 * @net: the applicable net namespace
1114 * @name: name of interface
1116 * If a network interface is not present and the process has suitable
1117 * privileges this function loads the module. If module loading is not
1118 * available in this kernel then it becomes a nop.
1121 void dev_load(struct net *net, const char *name)
1123 struct net_device *dev;
1126 dev = dev_get_by_name_rcu(net, name);
1129 if (!dev && capable(CAP_NET_ADMIN))
1130 request_module("%s", name);
1132 EXPORT_SYMBOL(dev_load);
1134 static int __dev_open(struct net_device *dev)
1136 const struct net_device_ops *ops = dev->netdev_ops;
1142 * Is it even present?
1144 if (!netif_device_present(dev))
1147 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1148 ret = notifier_to_errno(ret);
1153 * Call device private open method
1155 set_bit(__LINK_STATE_START, &dev->state);
1157 if (ops->ndo_validate_addr)
1158 ret = ops->ndo_validate_addr(dev);
1160 if (!ret && ops->ndo_open)
1161 ret = ops->ndo_open(dev);
1164 * If it went open OK then:
1168 clear_bit(__LINK_STATE_START, &dev->state);
1173 dev->flags |= IFF_UP;
1178 net_dmaengine_get();
1181 * Initialize multicasting status
1183 dev_set_rx_mode(dev);
1186 * Wakeup transmit queue engine
1195 * dev_open - prepare an interface for use.
1196 * @dev: device to open
1198 * Takes a device from down to up state. The device's private open
1199 * function is invoked and then the multicast lists are loaded. Finally
1200 * the device is moved into the up state and a %NETDEV_UP message is
1201 * sent to the netdev notifier chain.
1203 * Calling this function on an active interface is a nop. On a failure
1204 * a negative errno code is returned.
1206 int dev_open(struct net_device *dev)
1213 if (dev->flags & IFF_UP)
1219 ret = __dev_open(dev);
1224 * ... and announce new interface.
1226 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1227 call_netdevice_notifiers(NETDEV_UP, dev);
1231 EXPORT_SYMBOL(dev_open);
1233 static int __dev_close(struct net_device *dev)
1235 const struct net_device_ops *ops = dev->netdev_ops;
1241 * Tell people we are going down, so that they can
1242 * prepare to death, when device is still operating.
1244 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1246 clear_bit(__LINK_STATE_START, &dev->state);
1248 /* Synchronize to scheduled poll. We cannot touch poll list,
1249 * it can be even on different cpu. So just clear netif_running().
1251 * dev->stop() will invoke napi_disable() on all of it's
1252 * napi_struct instances on this device.
1254 smp_mb__after_clear_bit(); /* Commit netif_running(). */
1256 dev_deactivate(dev);
1259 * Call the device specific close. This cannot fail.
1260 * Only if device is UP
1262 * We allow it to be called even after a DETACH hot-plug
1269 * Device is now down.
1272 dev->flags &= ~IFF_UP;
1277 net_dmaengine_put();
1283 * dev_close - shutdown an interface.
1284 * @dev: device to shutdown
1286 * This function moves an active device into down state. A
1287 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1288 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1291 int dev_close(struct net_device *dev)
1293 if (!(dev->flags & IFF_UP))
1299 * Tell people we are down
1301 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1302 call_netdevice_notifiers(NETDEV_DOWN, dev);
1306 EXPORT_SYMBOL(dev_close);
1310 * dev_disable_lro - disable Large Receive Offload on a device
1313 * Disable Large Receive Offload (LRO) on a net device. Must be
1314 * called under RTNL. This is needed if received packets may be
1315 * forwarded to another interface.
1317 void dev_disable_lro(struct net_device *dev)
1319 if (dev->ethtool_ops && dev->ethtool_ops->get_flags &&
1320 dev->ethtool_ops->set_flags) {
1321 u32 flags = dev->ethtool_ops->get_flags(dev);
1322 if (flags & ETH_FLAG_LRO) {
1323 flags &= ~ETH_FLAG_LRO;
1324 dev->ethtool_ops->set_flags(dev, flags);
1327 WARN_ON(dev->features & NETIF_F_LRO);
1329 EXPORT_SYMBOL(dev_disable_lro);
1332 static int dev_boot_phase = 1;
1335 * Device change register/unregister. These are not inline or static
1336 * as we export them to the world.
1340 * register_netdevice_notifier - register a network notifier block
1343 * Register a notifier to be called when network device events occur.
1344 * The notifier passed is linked into the kernel structures and must
1345 * not be reused until it has been unregistered. A negative errno code
1346 * is returned on a failure.
1348 * When registered all registration and up events are replayed
1349 * to the new notifier to allow device to have a race free
1350 * view of the network device list.
1353 int register_netdevice_notifier(struct notifier_block *nb)
1355 struct net_device *dev;
1356 struct net_device *last;
1361 err = raw_notifier_chain_register(&netdev_chain, nb);
1367 for_each_netdev(net, dev) {
1368 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1369 err = notifier_to_errno(err);
1373 if (!(dev->flags & IFF_UP))
1376 nb->notifier_call(nb, NETDEV_UP, dev);
1387 for_each_netdev(net, dev) {
1391 if (dev->flags & IFF_UP) {
1392 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1393 nb->notifier_call(nb, NETDEV_DOWN, dev);
1395 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
1396 nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev);
1400 raw_notifier_chain_unregister(&netdev_chain, nb);
1403 EXPORT_SYMBOL(register_netdevice_notifier);
1406 * unregister_netdevice_notifier - unregister a network notifier block
1409 * Unregister a notifier previously registered by
1410 * register_netdevice_notifier(). The notifier is unlinked into the
1411 * kernel structures and may then be reused. A negative errno code
1412 * is returned on a failure.
1415 int unregister_netdevice_notifier(struct notifier_block *nb)
1420 err = raw_notifier_chain_unregister(&netdev_chain, nb);
1424 EXPORT_SYMBOL(unregister_netdevice_notifier);
1427 * call_netdevice_notifiers - call all network notifier blocks
1428 * @val: value passed unmodified to notifier function
1429 * @dev: net_device pointer passed unmodified to notifier function
1431 * Call all network notifier blocks. Parameters and return value
1432 * are as for raw_notifier_call_chain().
1435 int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1437 return raw_notifier_call_chain(&netdev_chain, val, dev);
1440 /* When > 0 there are consumers of rx skb time stamps */
1441 static atomic_t netstamp_needed = ATOMIC_INIT(0);
1443 void net_enable_timestamp(void)
1445 atomic_inc(&netstamp_needed);
1447 EXPORT_SYMBOL(net_enable_timestamp);
1449 void net_disable_timestamp(void)
1451 atomic_dec(&netstamp_needed);
1453 EXPORT_SYMBOL(net_disable_timestamp);
1455 static inline void net_timestamp(struct sk_buff *skb)
1457 if (atomic_read(&netstamp_needed))
1458 __net_timestamp(skb);
1460 skb->tstamp.tv64 = 0;
1464 * dev_forward_skb - loopback an skb to another netif
1466 * @dev: destination network device
1467 * @skb: buffer to forward
1470 * NET_RX_SUCCESS (no congestion)
1471 * NET_RX_DROP (packet was dropped)
1473 * dev_forward_skb can be used for injecting an skb from the
1474 * start_xmit function of one device into the receive queue
1475 * of another device.
1477 * The receiving device may be in another namespace, so
1478 * we have to clear all information in the skb that could
1479 * impact namespace isolation.
1481 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1485 if (!(dev->flags & IFF_UP))
1488 if (skb->len > (dev->mtu + dev->hard_header_len))
1491 skb_set_dev(skb, dev);
1492 skb->tstamp.tv64 = 0;
1493 skb->pkt_type = PACKET_HOST;
1494 skb->protocol = eth_type_trans(skb, dev);
1495 return netif_rx(skb);
1497 EXPORT_SYMBOL_GPL(dev_forward_skb);
1500 * Support routine. Sends outgoing frames to any network
1501 * taps currently in use.
1504 static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1506 struct packet_type *ptype;
1508 #ifdef CONFIG_NET_CLS_ACT
1509 if (!(skb->tstamp.tv64 && (G_TC_FROM(skb->tc_verd) & AT_INGRESS)))
1516 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1517 /* Never send packets back to the socket
1518 * they originated from - MvS (miquels@drinkel.ow.org)
1520 if ((ptype->dev == dev || !ptype->dev) &&
1521 (ptype->af_packet_priv == NULL ||
1522 (struct sock *)ptype->af_packet_priv != skb->sk)) {
1523 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1527 /* skb->nh should be correctly
1528 set by sender, so that the second statement is
1529 just protection against buggy protocols.
1531 skb_reset_mac_header(skb2);
1533 if (skb_network_header(skb2) < skb2->data ||
1534 skb2->network_header > skb2->tail) {
1535 if (net_ratelimit())
1536 printk(KERN_CRIT "protocol %04x is "
1538 skb2->protocol, dev->name);
1539 skb_reset_network_header(skb2);
1542 skb2->transport_header = skb2->network_header;
1543 skb2->pkt_type = PACKET_OUTGOING;
1544 ptype->func(skb2, skb->dev, ptype, skb->dev);
1551 static inline void __netif_reschedule(struct Qdisc *q)
1553 struct softnet_data *sd;
1554 unsigned long flags;
1556 local_irq_save(flags);
1557 sd = &__get_cpu_var(softnet_data);
1558 q->next_sched = sd->output_queue;
1559 sd->output_queue = q;
1560 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1561 local_irq_restore(flags);
1564 void __netif_schedule(struct Qdisc *q)
1566 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
1567 __netif_reschedule(q);
1569 EXPORT_SYMBOL(__netif_schedule);
1571 void dev_kfree_skb_irq(struct sk_buff *skb)
1573 if (atomic_dec_and_test(&skb->users)) {
1574 struct softnet_data *sd;
1575 unsigned long flags;
1577 local_irq_save(flags);
1578 sd = &__get_cpu_var(softnet_data);
1579 skb->next = sd->completion_queue;
1580 sd->completion_queue = skb;
1581 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1582 local_irq_restore(flags);
1585 EXPORT_SYMBOL(dev_kfree_skb_irq);
1587 void dev_kfree_skb_any(struct sk_buff *skb)
1589 if (in_irq() || irqs_disabled())
1590 dev_kfree_skb_irq(skb);
1594 EXPORT_SYMBOL(dev_kfree_skb_any);
1598 * netif_device_detach - mark device as removed
1599 * @dev: network device
1601 * Mark device as removed from system and therefore no longer available.
1603 void netif_device_detach(struct net_device *dev)
1605 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1606 netif_running(dev)) {
1607 netif_tx_stop_all_queues(dev);
1610 EXPORT_SYMBOL(netif_device_detach);
1613 * netif_device_attach - mark device as attached
1614 * @dev: network device
1616 * Mark device as attached from system and restart if needed.
1618 void netif_device_attach(struct net_device *dev)
1620 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1621 netif_running(dev)) {
1622 netif_tx_wake_all_queues(dev);
1623 __netdev_watchdog_up(dev);
1626 EXPORT_SYMBOL(netif_device_attach);
1628 static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1630 return ((features & NETIF_F_GEN_CSUM) ||
1631 ((features & NETIF_F_IP_CSUM) &&
1632 protocol == htons(ETH_P_IP)) ||
1633 ((features & NETIF_F_IPV6_CSUM) &&
1634 protocol == htons(ETH_P_IPV6)) ||
1635 ((features & NETIF_F_FCOE_CRC) &&
1636 protocol == htons(ETH_P_FCOE)));
1639 static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
1641 if (can_checksum_protocol(dev->features, skb->protocol))
1644 if (skb->protocol == htons(ETH_P_8021Q)) {
1645 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1646 if (can_checksum_protocol(dev->features & dev->vlan_features,
1647 veh->h_vlan_encapsulated_proto))
1655 * skb_dev_set -- assign a new device to a buffer
1656 * @skb: buffer for the new device
1657 * @dev: network device
1659 * If an skb is owned by a device already, we have to reset
1660 * all data private to the namespace a device belongs to
1661 * before assigning it a new device.
1663 #ifdef CONFIG_NET_NS
1664 void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
1667 if (skb->dev && !net_eq(dev_net(skb->dev), dev_net(dev))) {
1670 skb_init_secmark(skb);
1674 skb->ipvs_property = 0;
1675 #ifdef CONFIG_NET_SCHED
1681 EXPORT_SYMBOL(skb_set_dev);
1682 #endif /* CONFIG_NET_NS */
1685 * Invalidate hardware checksum when packet is to be mangled, and
1686 * complete checksum manually on outgoing path.
1688 int skb_checksum_help(struct sk_buff *skb)
1691 int ret = 0, offset;
1693 if (skb->ip_summed == CHECKSUM_COMPLETE)
1694 goto out_set_summed;
1696 if (unlikely(skb_shinfo(skb)->gso_size)) {
1697 /* Let GSO fix up the checksum. */
1698 goto out_set_summed;
1701 offset = skb->csum_start - skb_headroom(skb);
1702 BUG_ON(offset >= skb_headlen(skb));
1703 csum = skb_checksum(skb, offset, skb->len - offset, 0);
1705 offset += skb->csum_offset;
1706 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
1708 if (skb_cloned(skb) &&
1709 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
1710 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1715 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
1717 skb->ip_summed = CHECKSUM_NONE;
1721 EXPORT_SYMBOL(skb_checksum_help);
1724 * skb_gso_segment - Perform segmentation on skb.
1725 * @skb: buffer to segment
1726 * @features: features for the output path (see dev->features)
1728 * This function segments the given skb and returns a list of segments.
1730 * It may return NULL if the skb requires no segmentation. This is
1731 * only possible when GSO is used for verifying header integrity.
1733 struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
1735 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1736 struct packet_type *ptype;
1737 __be16 type = skb->protocol;
1740 skb_reset_mac_header(skb);
1741 skb->mac_len = skb->network_header - skb->mac_header;
1742 __skb_pull(skb, skb->mac_len);
1744 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1745 struct net_device *dev = skb->dev;
1746 struct ethtool_drvinfo info = {};
1748 if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
1749 dev->ethtool_ops->get_drvinfo(dev, &info);
1751 WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d "
1753 info.driver, dev ? dev->features : 0L,
1754 skb->sk ? skb->sk->sk_route_caps : 0L,
1755 skb->len, skb->data_len, skb->ip_summed);
1757 if (skb_header_cloned(skb) &&
1758 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1759 return ERR_PTR(err);
1763 list_for_each_entry_rcu(ptype,
1764 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
1765 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
1766 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1767 err = ptype->gso_send_check(skb);
1768 segs = ERR_PTR(err);
1769 if (err || skb_gso_ok(skb, features))
1771 __skb_push(skb, (skb->data -
1772 skb_network_header(skb)));
1774 segs = ptype->gso_segment(skb, features);
1780 __skb_push(skb, skb->data - skb_mac_header(skb));
1784 EXPORT_SYMBOL(skb_gso_segment);
1786 /* Take action when hardware reception checksum errors are detected. */
1788 void netdev_rx_csum_fault(struct net_device *dev)
1790 if (net_ratelimit()) {
1791 printk(KERN_ERR "%s: hw csum failure.\n",
1792 dev ? dev->name : "<unknown>");
1796 EXPORT_SYMBOL(netdev_rx_csum_fault);
1799 /* Actually, we should eliminate this check as soon as we know, that:
1800 * 1. IOMMU is present and allows to map all the memory.
1801 * 2. No high memory really exists on this machine.
1804 static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1806 #ifdef CONFIG_HIGHMEM
1808 if (!(dev->features & NETIF_F_HIGHDMA)) {
1809 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1810 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1814 if (PCI_DMA_BUS_IS_PHYS) {
1815 struct device *pdev = dev->dev.parent;
1819 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1820 dma_addr_t addr = page_to_phys(skb_shinfo(skb)->frags[i].page);
1821 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
1830 void (*destructor)(struct sk_buff *skb);
1833 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
1835 static void dev_gso_skb_destructor(struct sk_buff *skb)
1837 struct dev_gso_cb *cb;
1840 struct sk_buff *nskb = skb->next;
1842 skb->next = nskb->next;
1845 } while (skb->next);
1847 cb = DEV_GSO_CB(skb);
1849 cb->destructor(skb);
1853 * dev_gso_segment - Perform emulated hardware segmentation on skb.
1854 * @skb: buffer to segment
1856 * This function segments the given skb and stores the list of segments
1859 static int dev_gso_segment(struct sk_buff *skb)
1861 struct net_device *dev = skb->dev;
1862 struct sk_buff *segs;
1863 int features = dev->features & ~(illegal_highdma(dev, skb) ?
1866 segs = skb_gso_segment(skb, features);
1868 /* Verifying header integrity only. */
1873 return PTR_ERR(segs);
1876 DEV_GSO_CB(skb)->destructor = skb->destructor;
1877 skb->destructor = dev_gso_skb_destructor;
1882 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1883 struct netdev_queue *txq)
1885 const struct net_device_ops *ops = dev->netdev_ops;
1886 int rc = NETDEV_TX_OK;
1888 if (likely(!skb->next)) {
1889 if (!list_empty(&ptype_all))
1890 dev_queue_xmit_nit(skb, dev);
1892 if (netif_needs_gso(dev, skb)) {
1893 if (unlikely(dev_gso_segment(skb)))
1900 * If device doesnt need skb->dst, release it right now while
1901 * its hot in this cpu cache
1903 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
1906 rc = ops->ndo_start_xmit(skb, dev);
1907 if (rc == NETDEV_TX_OK)
1908 txq_trans_update(txq);
1910 * TODO: if skb_orphan() was called by
1911 * dev->hard_start_xmit() (for example, the unmodified
1912 * igb driver does that; bnx2 doesn't), then
1913 * skb_tx_software_timestamp() will be unable to send
1914 * back the time stamp.
1916 * How can this be prevented? Always create another
1917 * reference to the socket before calling
1918 * dev->hard_start_xmit()? Prevent that skb_orphan()
1919 * does anything in dev->hard_start_xmit() by clearing
1920 * the skb destructor before the call and restoring it
1921 * afterwards, then doing the skb_orphan() ourselves?
1928 struct sk_buff *nskb = skb->next;
1930 skb->next = nskb->next;
1934 * If device doesnt need nskb->dst, release it right now while
1935 * its hot in this cpu cache
1937 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
1940 rc = ops->ndo_start_xmit(nskb, dev);
1941 if (unlikely(rc != NETDEV_TX_OK)) {
1942 if (rc & ~NETDEV_TX_MASK)
1943 goto out_kfree_gso_skb;
1944 nskb->next = skb->next;
1948 txq_trans_update(txq);
1949 if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
1950 return NETDEV_TX_BUSY;
1951 } while (skb->next);
1954 if (likely(skb->next == NULL))
1955 skb->destructor = DEV_GSO_CB(skb)->destructor;
1961 static u32 hashrnd __read_mostly;
1963 u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
1967 if (skb_rx_queue_recorded(skb)) {
1968 hash = skb_get_rx_queue(skb);
1969 while (unlikely(hash >= dev->real_num_tx_queues))
1970 hash -= dev->real_num_tx_queues;
1974 if (skb->sk && skb->sk->sk_hash)
1975 hash = skb->sk->sk_hash;
1977 hash = skb->protocol;
1979 hash = jhash_1word(hash, hashrnd);
1981 return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
1983 EXPORT_SYMBOL(skb_tx_hash);
1985 static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
1987 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
1988 if (net_ratelimit()) {
1989 netdev_warn(dev, "selects TX queue %d, but "
1990 "real number of TX queues is %d\n",
1991 queue_index, dev->real_num_tx_queues);
1998 static struct netdev_queue *dev_pick_tx(struct net_device *dev,
1999 struct sk_buff *skb)
2002 struct sock *sk = skb->sk;
2004 if (sk_tx_queue_recorded(sk)) {
2005 queue_index = sk_tx_queue_get(sk);
2007 const struct net_device_ops *ops = dev->netdev_ops;
2009 if (ops->ndo_select_queue) {
2010 queue_index = ops->ndo_select_queue(dev, skb);
2011 queue_index = dev_cap_txqueue(dev, queue_index);
2014 if (dev->real_num_tx_queues > 1)
2015 queue_index = skb_tx_hash(dev, skb);
2017 if (sk && sk->sk_dst_cache)
2018 sk_tx_queue_set(sk, queue_index);
2022 skb_set_queue_mapping(skb, queue_index);
2023 return netdev_get_tx_queue(dev, queue_index);
2026 static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2027 struct net_device *dev,
2028 struct netdev_queue *txq)
2030 spinlock_t *root_lock = qdisc_lock(q);
2033 spin_lock(root_lock);
2034 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
2037 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
2038 !test_and_set_bit(__QDISC_STATE_RUNNING, &q->state)) {
2040 * This is a work-conserving queue; there are no old skbs
2041 * waiting to be sent out; and the qdisc is not running -
2042 * xmit the skb directly.
2044 __qdisc_update_bstats(q, skb->len);
2045 if (sch_direct_xmit(skb, q, dev, txq, root_lock))
2048 clear_bit(__QDISC_STATE_RUNNING, &q->state);
2050 rc = NET_XMIT_SUCCESS;
2052 rc = qdisc_enqueue_root(skb, q);
2055 spin_unlock(root_lock);
2061 * Returns true if either:
2062 * 1. skb has frag_list and the device doesn't support FRAGLIST, or
2063 * 2. skb is fragmented and the device does not support SG, or if
2064 * at least one of fragments is in highmem and device does not
2065 * support DMA from it.
2067 static inline int skb_needs_linearize(struct sk_buff *skb,
2068 struct net_device *dev)
2070 return (skb_has_frags(skb) && !(dev->features & NETIF_F_FRAGLIST)) ||
2071 (skb_shinfo(skb)->nr_frags && (!(dev->features & NETIF_F_SG) ||
2072 illegal_highdma(dev, skb)));
2076 * dev_queue_xmit - transmit a buffer
2077 * @skb: buffer to transmit
2079 * Queue a buffer for transmission to a network device. The caller must
2080 * have set the device and priority and built the buffer before calling
2081 * this function. The function can be called from an interrupt.
2083 * A negative errno code is returned on a failure. A success does not
2084 * guarantee the frame will be transmitted as it may be dropped due
2085 * to congestion or traffic shaping.
2087 * -----------------------------------------------------------------------------------
2088 * I notice this method can also return errors from the queue disciplines,
2089 * including NET_XMIT_DROP, which is a positive value. So, errors can also
2092 * Regardless of the return value, the skb is consumed, so it is currently
2093 * difficult to retry a send to this method. (You can bump the ref count
2094 * before sending to hold a reference for retry if you are careful.)
2096 * When calling this method, interrupts MUST be enabled. This is because
2097 * the BH enable code must have IRQs enabled so that it will not deadlock.
2100 int dev_queue_xmit(struct sk_buff *skb)
2102 struct net_device *dev = skb->dev;
2103 struct netdev_queue *txq;
2107 /* GSO will handle the following emulations directly. */
2108 if (netif_needs_gso(dev, skb))
2111 /* Convert a paged skb to linear, if required */
2112 if (skb_needs_linearize(skb, dev) && __skb_linearize(skb))
2115 /* If packet is not checksummed and device does not support
2116 * checksumming for this protocol, complete checksumming here.
2118 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2119 skb_set_transport_header(skb, skb->csum_start -
2121 if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb))
2126 /* Disable soft irqs for various locks below. Also
2127 * stops preemption for RCU.
2131 txq = dev_pick_tx(dev, skb);
2132 q = rcu_dereference_bh(txq->qdisc);
2134 #ifdef CONFIG_NET_CLS_ACT
2135 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
2138 rc = __dev_xmit_skb(skb, q, dev, txq);
2142 /* The device has no queue. Common case for software devices:
2143 loopback, all the sorts of tunnels...
2145 Really, it is unlikely that netif_tx_lock protection is necessary
2146 here. (f.e. loopback and IP tunnels are clean ignoring statistics
2148 However, it is possible, that they rely on protection
2151 Check this and shot the lock. It is not prone from deadlocks.
2152 Either shot noqueue qdisc, it is even simpler 8)
2154 if (dev->flags & IFF_UP) {
2155 int cpu = smp_processor_id(); /* ok because BHs are off */
2157 if (txq->xmit_lock_owner != cpu) {
2159 HARD_TX_LOCK(dev, txq, cpu);
2161 if (!netif_tx_queue_stopped(txq)) {
2162 rc = dev_hard_start_xmit(skb, dev, txq);
2163 if (dev_xmit_complete(rc)) {
2164 HARD_TX_UNLOCK(dev, txq);
2168 HARD_TX_UNLOCK(dev, txq);
2169 if (net_ratelimit())
2170 printk(KERN_CRIT "Virtual device %s asks to "
2171 "queue packet!\n", dev->name);
2173 /* Recursion is detected! It is possible,
2175 if (net_ratelimit())
2176 printk(KERN_CRIT "Dead loop on virtual device "
2177 "%s, fix it urgently!\n", dev->name);
2182 rcu_read_unlock_bh();
2188 rcu_read_unlock_bh();
2191 EXPORT_SYMBOL(dev_queue_xmit);
2194 /*=======================================================================
2196 =======================================================================*/
2198 int netdev_max_backlog __read_mostly = 1000;
2199 int netdev_budget __read_mostly = 300;
2200 int weight_p __read_mostly = 64; /* old backlog weight */
2202 DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
2206 * get_rps_cpu is called from netif_receive_skb and returns the target
2207 * CPU from the RPS map of the receiving queue for a given skb.
2209 static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb)
2211 struct ipv6hdr *ip6;
2213 struct netdev_rx_queue *rxqueue;
2214 struct rps_map *map;
2217 u32 addr1, addr2, ports, ihl;
2221 if (skb_rx_queue_recorded(skb)) {
2222 u16 index = skb_get_rx_queue(skb);
2223 if (unlikely(index >= dev->num_rx_queues)) {
2224 if (net_ratelimit()) {
2225 netdev_warn(dev, "received packet on queue "
2226 "%u, but number of RX queues is %u\n",
2227 index, dev->num_rx_queues);
2231 rxqueue = dev->_rx + index;
2235 if (!rxqueue->rps_map)
2239 goto got_hash; /* Skip hash computation on packet header */
2241 switch (skb->protocol) {
2242 case __constant_htons(ETH_P_IP):
2243 if (!pskb_may_pull(skb, sizeof(*ip)))
2246 ip = (struct iphdr *) skb->data;
2247 ip_proto = ip->protocol;
2252 case __constant_htons(ETH_P_IPV6):
2253 if (!pskb_may_pull(skb, sizeof(*ip6)))
2256 ip6 = (struct ipv6hdr *) skb->data;
2257 ip_proto = ip6->nexthdr;
2258 addr1 = ip6->saddr.s6_addr32[3];
2259 addr2 = ip6->daddr.s6_addr32[3];
2273 case IPPROTO_UDPLITE:
2274 if (pskb_may_pull(skb, (ihl * 4) + 4))
2275 ports = *((u32 *) (skb->data + (ihl * 4)));
2282 skb->rxhash = jhash_3words(addr1, addr2, ports, hashrnd);
2287 map = rcu_dereference(rxqueue->rps_map);
2289 u16 tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32];
2291 if (cpu_online(tcpu)) {
2303 * This structure holds the per-CPU mask of CPUs for which IPIs are scheduled
2304 * to be sent to kick remote softirq processing. There are two masks since
2305 * the sending of IPIs must be done with interrupts enabled. The select field
2306 * indicates the current mask that enqueue_backlog uses to schedule IPIs.
2307 * select is flipped before net_rps_action is called while still under lock,
2308 * net_rps_action then uses the non-selected mask to send the IPIs and clears
2309 * it without conflicting with enqueue_backlog operation.
2311 struct rps_remote_softirq_cpus {
2315 static DEFINE_PER_CPU(struct rps_remote_softirq_cpus, rps_remote_softirq_cpus);
2317 /* Called from hardirq (IPI) context */
2318 static void trigger_softirq(void *data)
2320 struct softnet_data *queue = data;
2321 __napi_schedule(&queue->backlog);
2322 __get_cpu_var(netdev_rx_stat).received_rps++;
2324 #endif /* CONFIG_SMP */
2327 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
2328 * queue (may be a remote CPU queue).
2330 static int enqueue_to_backlog(struct sk_buff *skb, int cpu)
2332 struct softnet_data *queue;
2333 unsigned long flags;
2335 queue = &per_cpu(softnet_data, cpu);
2337 local_irq_save(flags);
2338 __get_cpu_var(netdev_rx_stat).total++;
2341 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
2342 if (queue->input_pkt_queue.qlen) {
2344 __skb_queue_tail(&queue->input_pkt_queue, skb);
2346 local_irq_restore(flags);
2347 return NET_RX_SUCCESS;
2350 /* Schedule NAPI for backlog device */
2351 if (napi_schedule_prep(&queue->backlog)) {
2353 if (cpu != smp_processor_id()) {
2354 struct rps_remote_softirq_cpus *rcpus =
2355 &__get_cpu_var(rps_remote_softirq_cpus);
2357 cpu_set(cpu, rcpus->mask[rcpus->select]);
2358 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2360 __napi_schedule(&queue->backlog);
2362 __napi_schedule(&queue->backlog);
2370 __get_cpu_var(netdev_rx_stat).dropped++;
2371 local_irq_restore(flags);
2378 * netif_rx - post buffer to the network code
2379 * @skb: buffer to post
2381 * This function receives a packet from a device driver and queues it for
2382 * the upper (protocol) levels to process. It always succeeds. The buffer
2383 * may be dropped during processing for congestion control or by the
2387 * NET_RX_SUCCESS (no congestion)
2388 * NET_RX_DROP (packet was dropped)
2392 int netif_rx(struct sk_buff *skb)
2396 /* if netpoll wants it, pretend we never saw it */
2397 if (netpoll_rx(skb))
2400 if (!skb->tstamp.tv64)
2404 cpu = get_rps_cpu(skb->dev, skb);
2406 cpu = smp_processor_id();
2408 cpu = smp_processor_id();
2411 return enqueue_to_backlog(skb, cpu);
2413 EXPORT_SYMBOL(netif_rx);
2415 int netif_rx_ni(struct sk_buff *skb)
2420 err = netif_rx(skb);
2421 if (local_softirq_pending())
2427 EXPORT_SYMBOL(netif_rx_ni);
2429 static void net_tx_action(struct softirq_action *h)
2431 struct softnet_data *sd = &__get_cpu_var(softnet_data);
2433 if (sd->completion_queue) {
2434 struct sk_buff *clist;
2436 local_irq_disable();
2437 clist = sd->completion_queue;
2438 sd->completion_queue = NULL;
2442 struct sk_buff *skb = clist;
2443 clist = clist->next;
2445 WARN_ON(atomic_read(&skb->users));
2450 if (sd->output_queue) {
2453 local_irq_disable();
2454 head = sd->output_queue;
2455 sd->output_queue = NULL;
2459 struct Qdisc *q = head;
2460 spinlock_t *root_lock;
2462 head = head->next_sched;
2464 root_lock = qdisc_lock(q);
2465 if (spin_trylock(root_lock)) {
2466 smp_mb__before_clear_bit();
2467 clear_bit(__QDISC_STATE_SCHED,
2470 spin_unlock(root_lock);
2472 if (!test_bit(__QDISC_STATE_DEACTIVATED,
2474 __netif_reschedule(q);
2476 smp_mb__before_clear_bit();
2477 clear_bit(__QDISC_STATE_SCHED,
2485 static inline int deliver_skb(struct sk_buff *skb,
2486 struct packet_type *pt_prev,
2487 struct net_device *orig_dev)
2489 atomic_inc(&skb->users);
2490 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2493 #if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
2495 #if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
2496 /* This hook is defined here for ATM LANE */
2497 int (*br_fdb_test_addr_hook)(struct net_device *dev,
2498 unsigned char *addr) __read_mostly;
2499 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
2503 * If bridge module is loaded call bridging hook.
2504 * returns NULL if packet was consumed.
2506 struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
2507 struct sk_buff *skb) __read_mostly;
2508 EXPORT_SYMBOL_GPL(br_handle_frame_hook);
2510 static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
2511 struct packet_type **pt_prev, int *ret,
2512 struct net_device *orig_dev)
2514 struct net_bridge_port *port;
2516 if (skb->pkt_type == PACKET_LOOPBACK ||
2517 (port = rcu_dereference(skb->dev->br_port)) == NULL)
2521 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2525 return br_handle_frame_hook(port, skb);
2528 #define handle_bridge(skb, pt_prev, ret, orig_dev) (skb)
2531 #if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
2532 struct sk_buff *(*macvlan_handle_frame_hook)(struct sk_buff *skb) __read_mostly;
2533 EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook);
2535 static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
2536 struct packet_type **pt_prev,
2538 struct net_device *orig_dev)
2540 if (skb->dev->macvlan_port == NULL)
2544 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2547 return macvlan_handle_frame_hook(skb);
2550 #define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb)
2553 #ifdef CONFIG_NET_CLS_ACT
2554 /* TODO: Maybe we should just force sch_ingress to be compiled in
2555 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
2556 * a compare and 2 stores extra right now if we dont have it on
2557 * but have CONFIG_NET_CLS_ACT
2558 * NOTE: This doesnt stop any functionality; if you dont have
2559 * the ingress scheduler, you just cant add policies on ingress.
2562 static int ing_filter(struct sk_buff *skb)
2564 struct net_device *dev = skb->dev;
2565 u32 ttl = G_TC_RTTL(skb->tc_verd);
2566 struct netdev_queue *rxq;
2567 int result = TC_ACT_OK;
2570 if (MAX_RED_LOOP < ttl++) {
2572 "Redir loop detected Dropping packet (%d->%d)\n",
2573 skb->skb_iif, dev->ifindex);
2577 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
2578 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
2580 rxq = &dev->rx_queue;
2583 if (q != &noop_qdisc) {
2584 spin_lock(qdisc_lock(q));
2585 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
2586 result = qdisc_enqueue_root(skb, q);
2587 spin_unlock(qdisc_lock(q));
2593 static inline struct sk_buff *handle_ing(struct sk_buff *skb,
2594 struct packet_type **pt_prev,
2595 int *ret, struct net_device *orig_dev)
2597 if (skb->dev->rx_queue.qdisc == &noop_qdisc)
2601 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2604 /* Huh? Why does turning on AF_PACKET affect this? */
2605 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
2608 switch (ing_filter(skb)) {
2622 * netif_nit_deliver - deliver received packets to network taps
2625 * This function is used to deliver incoming packets to network
2626 * taps. It should be used when the normal netif_receive_skb path
2627 * is bypassed, for example because of VLAN acceleration.
2629 void netif_nit_deliver(struct sk_buff *skb)
2631 struct packet_type *ptype;
2633 if (list_empty(&ptype_all))
2636 skb_reset_network_header(skb);
2637 skb_reset_transport_header(skb);
2638 skb->mac_len = skb->network_header - skb->mac_header;
2641 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2642 if (!ptype->dev || ptype->dev == skb->dev)
2643 deliver_skb(skb, ptype, skb->dev);
2648 static int __netif_receive_skb(struct sk_buff *skb)
2650 struct packet_type *ptype, *pt_prev;
2651 struct net_device *orig_dev;
2652 struct net_device *master;
2653 struct net_device *null_or_orig;
2654 struct net_device *null_or_bond;
2655 int ret = NET_RX_DROP;
2658 if (!skb->tstamp.tv64)
2661 if (vlan_tx_tag_present(skb) && vlan_hwaccel_do_receive(skb))
2662 return NET_RX_SUCCESS;
2664 /* if we've gotten here through NAPI, check netpoll */
2665 if (netpoll_receive_skb(skb))
2669 skb->skb_iif = skb->dev->ifindex;
2671 null_or_orig = NULL;
2672 orig_dev = skb->dev;
2673 master = ACCESS_ONCE(orig_dev->master);
2675 if (skb_bond_should_drop(skb, master))
2676 null_or_orig = orig_dev; /* deliver only exact match */
2681 __get_cpu_var(netdev_rx_stat).total++;
2683 skb_reset_network_header(skb);
2684 skb_reset_transport_header(skb);
2685 skb->mac_len = skb->network_header - skb->mac_header;
2691 #ifdef CONFIG_NET_CLS_ACT
2692 if (skb->tc_verd & TC_NCLS) {
2693 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
2698 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2699 if (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2700 ptype->dev == orig_dev) {
2702 ret = deliver_skb(skb, pt_prev, orig_dev);
2707 #ifdef CONFIG_NET_CLS_ACT
2708 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
2714 skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
2717 skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
2722 * Make sure frames received on VLAN interfaces stacked on
2723 * bonding interfaces still make their way to any base bonding
2724 * device that may have registered for a specific ptype. The
2725 * handler may have to adjust skb->dev and orig_dev.
2727 null_or_bond = NULL;
2728 if ((skb->dev->priv_flags & IFF_802_1Q_VLAN) &&
2729 (vlan_dev_real_dev(skb->dev)->priv_flags & IFF_BONDING)) {
2730 null_or_bond = vlan_dev_real_dev(skb->dev);
2733 type = skb->protocol;
2734 list_for_each_entry_rcu(ptype,
2735 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
2736 if (ptype->type == type && (ptype->dev == null_or_orig ||
2737 ptype->dev == skb->dev || ptype->dev == orig_dev ||
2738 ptype->dev == null_or_bond)) {
2740 ret = deliver_skb(skb, pt_prev, orig_dev);
2746 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2749 /* Jamal, now you will not able to escape explaining
2750 * me how you were going to use this. :-)
2761 * netif_receive_skb - process receive buffer from network
2762 * @skb: buffer to process
2764 * netif_receive_skb() is the main receive data processing function.
2765 * It always succeeds. The buffer may be dropped during processing
2766 * for congestion control or by the protocol layers.
2768 * This function may only be called from softirq context and interrupts
2769 * should be enabled.
2771 * Return values (usually ignored):
2772 * NET_RX_SUCCESS: no congestion
2773 * NET_RX_DROP: packet was dropped
2775 int netif_receive_skb(struct sk_buff *skb)
2780 cpu = get_rps_cpu(skb->dev, skb);
2783 return __netif_receive_skb(skb);
2785 return enqueue_to_backlog(skb, cpu);
2787 return __netif_receive_skb(skb);
2790 EXPORT_SYMBOL(netif_receive_skb);
2792 /* Network device is going away, flush any packets still pending */
2793 static void flush_backlog(void *arg)
2795 struct net_device *dev = arg;
2796 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2797 struct sk_buff *skb, *tmp;
2800 skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp)
2801 if (skb->dev == dev) {
2802 __skb_unlink(skb, &queue->input_pkt_queue);
2808 static int napi_gro_complete(struct sk_buff *skb)
2810 struct packet_type *ptype;
2811 __be16 type = skb->protocol;
2812 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
2815 if (NAPI_GRO_CB(skb)->count == 1) {
2816 skb_shinfo(skb)->gso_size = 0;
2821 list_for_each_entry_rcu(ptype, head, list) {
2822 if (ptype->type != type || ptype->dev || !ptype->gro_complete)
2825 err = ptype->gro_complete(skb);
2831 WARN_ON(&ptype->list == head);
2833 return NET_RX_SUCCESS;
2837 return netif_receive_skb(skb);
2840 static void napi_gro_flush(struct napi_struct *napi)
2842 struct sk_buff *skb, *next;
2844 for (skb = napi->gro_list; skb; skb = next) {
2847 napi_gro_complete(skb);
2850 napi->gro_count = 0;
2851 napi->gro_list = NULL;
2854 enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2856 struct sk_buff **pp = NULL;
2857 struct packet_type *ptype;
2858 __be16 type = skb->protocol;
2859 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
2862 enum gro_result ret;
2864 if (!(skb->dev->features & NETIF_F_GRO))
2867 if (skb_is_gso(skb) || skb_has_frags(skb))
2871 list_for_each_entry_rcu(ptype, head, list) {
2872 if (ptype->type != type || ptype->dev || !ptype->gro_receive)
2875 skb_set_network_header(skb, skb_gro_offset(skb));
2876 mac_len = skb->network_header - skb->mac_header;
2877 skb->mac_len = mac_len;
2878 NAPI_GRO_CB(skb)->same_flow = 0;
2879 NAPI_GRO_CB(skb)->flush = 0;
2880 NAPI_GRO_CB(skb)->free = 0;
2882 pp = ptype->gro_receive(&napi->gro_list, skb);
2887 if (&ptype->list == head)
2890 same_flow = NAPI_GRO_CB(skb)->same_flow;
2891 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
2894 struct sk_buff *nskb = *pp;
2898 napi_gro_complete(nskb);
2905 if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
2909 NAPI_GRO_CB(skb)->count = 1;
2910 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
2911 skb->next = napi->gro_list;
2912 napi->gro_list = skb;
2916 if (skb_headlen(skb) < skb_gro_offset(skb)) {
2917 int grow = skb_gro_offset(skb) - skb_headlen(skb);
2919 BUG_ON(skb->end - skb->tail < grow);
2921 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
2924 skb->data_len -= grow;
2926 skb_shinfo(skb)->frags[0].page_offset += grow;
2927 skb_shinfo(skb)->frags[0].size -= grow;
2929 if (unlikely(!skb_shinfo(skb)->frags[0].size)) {
2930 put_page(skb_shinfo(skb)->frags[0].page);
2931 memmove(skb_shinfo(skb)->frags,
2932 skb_shinfo(skb)->frags + 1,
2933 --skb_shinfo(skb)->nr_frags);
2944 EXPORT_SYMBOL(dev_gro_receive);
2947 __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2951 if (netpoll_rx_on(skb))
2954 for (p = napi->gro_list; p; p = p->next) {
2955 NAPI_GRO_CB(p)->same_flow =
2956 (p->dev == skb->dev) &&
2957 !compare_ether_header(skb_mac_header(p),
2958 skb_gro_mac_header(skb));
2959 NAPI_GRO_CB(p)->flush = 0;
2962 return dev_gro_receive(napi, skb);
2965 gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
2969 if (netif_receive_skb(skb))
2974 case GRO_MERGED_FREE:
2985 EXPORT_SYMBOL(napi_skb_finish);
2987 void skb_gro_reset_offset(struct sk_buff *skb)
2989 NAPI_GRO_CB(skb)->data_offset = 0;
2990 NAPI_GRO_CB(skb)->frag0 = NULL;
2991 NAPI_GRO_CB(skb)->frag0_len = 0;
2993 if (skb->mac_header == skb->tail &&
2994 !PageHighMem(skb_shinfo(skb)->frags[0].page)) {
2995 NAPI_GRO_CB(skb)->frag0 =
2996 page_address(skb_shinfo(skb)->frags[0].page) +
2997 skb_shinfo(skb)->frags[0].page_offset;
2998 NAPI_GRO_CB(skb)->frag0_len = skb_shinfo(skb)->frags[0].size;
3001 EXPORT_SYMBOL(skb_gro_reset_offset);
3003 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3005 skb_gro_reset_offset(skb);
3007 return napi_skb_finish(__napi_gro_receive(napi, skb), skb);
3009 EXPORT_SYMBOL(napi_gro_receive);
3011 void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
3013 __skb_pull(skb, skb_headlen(skb));
3014 skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
3018 EXPORT_SYMBOL(napi_reuse_skb);
3020 struct sk_buff *napi_get_frags(struct napi_struct *napi)
3022 struct sk_buff *skb = napi->skb;
3025 skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
3031 EXPORT_SYMBOL(napi_get_frags);
3033 gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
3039 skb->protocol = eth_type_trans(skb, skb->dev);
3041 if (ret == GRO_HELD)
3042 skb_gro_pull(skb, -ETH_HLEN);
3043 else if (netif_receive_skb(skb))
3048 case GRO_MERGED_FREE:
3049 napi_reuse_skb(napi, skb);
3058 EXPORT_SYMBOL(napi_frags_finish);
3060 struct sk_buff *napi_frags_skb(struct napi_struct *napi)
3062 struct sk_buff *skb = napi->skb;
3069 skb_reset_mac_header(skb);
3070 skb_gro_reset_offset(skb);
3072 off = skb_gro_offset(skb);
3073 hlen = off + sizeof(*eth);
3074 eth = skb_gro_header_fast(skb, off);
3075 if (skb_gro_header_hard(skb, hlen)) {
3076 eth = skb_gro_header_slow(skb, hlen, off);
3077 if (unlikely(!eth)) {
3078 napi_reuse_skb(napi, skb);
3084 skb_gro_pull(skb, sizeof(*eth));
3087 * This works because the only protocols we care about don't require
3088 * special handling. We'll fix it up properly at the end.
3090 skb->protocol = eth->h_proto;
3095 EXPORT_SYMBOL(napi_frags_skb);
3097 gro_result_t napi_gro_frags(struct napi_struct *napi)
3099 struct sk_buff *skb = napi_frags_skb(napi);
3104 return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb));
3106 EXPORT_SYMBOL(napi_gro_frags);
3108 static int process_backlog(struct napi_struct *napi, int quota)
3111 struct softnet_data *queue = &__get_cpu_var(softnet_data);
3112 unsigned long start_time = jiffies;
3114 napi->weight = weight_p;
3116 struct sk_buff *skb;
3118 local_irq_disable();
3120 skb = __skb_dequeue(&queue->input_pkt_queue);
3122 __napi_complete(napi);
3123 spin_unlock_irq(&queue->input_pkt_queue.lock);
3129 __netif_receive_skb(skb);
3130 } while (++work < quota && jiffies == start_time);
3136 * __napi_schedule - schedule for receive
3137 * @n: entry to schedule
3139 * The entry's receive function will be scheduled to run
3141 void __napi_schedule(struct napi_struct *n)
3143 unsigned long flags;
3145 local_irq_save(flags);
3146 list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list);
3147 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3148 local_irq_restore(flags);
3150 EXPORT_SYMBOL(__napi_schedule);
3152 void __napi_complete(struct napi_struct *n)
3154 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
3155 BUG_ON(n->gro_list);
3157 list_del(&n->poll_list);
3158 smp_mb__before_clear_bit();
3159 clear_bit(NAPI_STATE_SCHED, &n->state);
3161 EXPORT_SYMBOL(__napi_complete);
3163 void napi_complete(struct napi_struct *n)
3165 unsigned long flags;
3168 * don't let napi dequeue from the cpu poll list
3169 * just in case its running on a different cpu
3171 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
3175 local_irq_save(flags);
3177 local_irq_restore(flags);
3179 EXPORT_SYMBOL(napi_complete);
3181 void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
3182 int (*poll)(struct napi_struct *, int), int weight)
3184 INIT_LIST_HEAD(&napi->poll_list);
3185 napi->gro_count = 0;
3186 napi->gro_list = NULL;
3189 napi->weight = weight;
3190 list_add(&napi->dev_list, &dev->napi_list);
3192 #ifdef CONFIG_NETPOLL
3193 spin_lock_init(&napi->poll_lock);
3194 napi->poll_owner = -1;
3196 set_bit(NAPI_STATE_SCHED, &napi->state);
3198 EXPORT_SYMBOL(netif_napi_add);
3200 void netif_napi_del(struct napi_struct *napi)
3202 struct sk_buff *skb, *next;
3204 list_del_init(&napi->dev_list);
3205 napi_free_frags(napi);
3207 for (skb = napi->gro_list; skb; skb = next) {
3213 napi->gro_list = NULL;
3214 napi->gro_count = 0;
3216 EXPORT_SYMBOL(netif_napi_del);
3220 * net_rps_action sends any pending IPI's for rps. This is only called from
3221 * softirq and interrupts must be enabled.
3223 static void net_rps_action(cpumask_t *mask)
3227 /* Send pending IPI's to kick RPS processing on remote cpus. */
3228 for_each_cpu_mask_nr(cpu, *mask) {
3229 struct softnet_data *queue = &per_cpu(softnet_data, cpu);
3230 if (cpu_online(cpu))
3231 __smp_call_function_single(cpu, &queue->csd, 0);
3237 static void net_rx_action(struct softirq_action *h)
3239 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
3240 unsigned long time_limit = jiffies + 2;
3241 int budget = netdev_budget;
3245 struct rps_remote_softirq_cpus *rcpus;
3248 local_irq_disable();
3250 while (!list_empty(list)) {
3251 struct napi_struct *n;
3254 /* If softirq window is exhuasted then punt.
3255 * Allow this to run for 2 jiffies since which will allow
3256 * an average latency of 1.5/HZ.
3258 if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
3263 /* Even though interrupts have been re-enabled, this
3264 * access is safe because interrupts can only add new
3265 * entries to the tail of this list, and only ->poll()
3266 * calls can remove this head entry from the list.
3268 n = list_first_entry(list, struct napi_struct, poll_list);
3270 have = netpoll_poll_lock(n);
3274 /* This NAPI_STATE_SCHED test is for avoiding a race
3275 * with netpoll's poll_napi(). Only the entity which
3276 * obtains the lock and sees NAPI_STATE_SCHED set will
3277 * actually make the ->poll() call. Therefore we avoid
3278 * accidently calling ->poll() when NAPI is not scheduled.
3281 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
3282 work = n->poll(n, weight);
3286 WARN_ON_ONCE(work > weight);
3290 local_irq_disable();
3292 /* Drivers must not modify the NAPI state if they
3293 * consume the entire weight. In such cases this code
3294 * still "owns" the NAPI instance and therefore can
3295 * move the instance around on the list at-will.
3297 if (unlikely(work == weight)) {
3298 if (unlikely(napi_disable_pending(n))) {
3301 local_irq_disable();
3303 list_move_tail(&n->poll_list, list);
3306 netpoll_poll_unlock(have);
3310 rcpus = &__get_cpu_var(rps_remote_softirq_cpus);
3311 select = rcpus->select;
3316 net_rps_action(&rcpus->mask[select]);
3321 #ifdef CONFIG_NET_DMA
3323 * There may not be any more sk_buffs coming right now, so push
3324 * any pending DMA copies to hardware
3326 dma_issue_pending_all();
3332 __get_cpu_var(netdev_rx_stat).time_squeeze++;
3333 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3337 static gifconf_func_t *gifconf_list[NPROTO];
3340 * register_gifconf - register a SIOCGIF handler
3341 * @family: Address family
3342 * @gifconf: Function handler
3344 * Register protocol dependent address dumping routines. The handler
3345 * that is passed must not be freed or reused until it has been replaced
3346 * by another handler.
3348 int register_gifconf(unsigned int family, gifconf_func_t *gifconf)
3350 if (family >= NPROTO)
3352 gifconf_list[family] = gifconf;
3355 EXPORT_SYMBOL(register_gifconf);
3359 * Map an interface index to its name (SIOCGIFNAME)
3363 * We need this ioctl for efficient implementation of the
3364 * if_indextoname() function required by the IPv6 API. Without
3365 * it, we would have to search all the interfaces to find a
3369 static int dev_ifname(struct net *net, struct ifreq __user *arg)
3371 struct net_device *dev;
3375 * Fetch the caller's info block.
3378 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
3382 dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex);
3388 strcpy(ifr.ifr_name, dev->name);
3391 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
3397 * Perform a SIOCGIFCONF call. This structure will change
3398 * size eventually, and there is nothing I can do about it.
3399 * Thus we will need a 'compatibility mode'.
3402 static int dev_ifconf(struct net *net, char __user *arg)
3405 struct net_device *dev;
3412 * Fetch the caller's info block.
3415 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
3422 * Loop over the interfaces, and write an info block for each.
3426 for_each_netdev(net, dev) {
3427 for (i = 0; i < NPROTO; i++) {
3428 if (gifconf_list[i]) {
3431 done = gifconf_list[i](dev, NULL, 0);
3433 done = gifconf_list[i](dev, pos + total,
3443 * All done. Write the updated control block back to the caller.
3445 ifc.ifc_len = total;
3448 * Both BSD and Solaris return 0 here, so we do too.
3450 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
3453 #ifdef CONFIG_PROC_FS
3455 * This is invoked by the /proc filesystem handler to display a device
3458 void *dev_seq_start(struct seq_file *seq, loff_t *pos)
3461 struct net *net = seq_file_net(seq);
3463 struct net_device *dev;
3467 return SEQ_START_TOKEN;
3470 for_each_netdev_rcu(net, dev)
3477 void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3479 struct net_device *dev = (v == SEQ_START_TOKEN) ?
3480 first_net_device(seq_file_net(seq)) :
3481 next_net_device((struct net_device *)v);
3484 return rcu_dereference(dev);
3487 void dev_seq_stop(struct seq_file *seq, void *v)
3493 static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
3495 const struct net_device_stats *stats = dev_get_stats(dev);
3497 seq_printf(seq, "%6s: %7lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
3498 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
3499 dev->name, stats->rx_bytes, stats->rx_packets,
3501 stats->rx_dropped + stats->rx_missed_errors,
3502 stats->rx_fifo_errors,
3503 stats->rx_length_errors + stats->rx_over_errors +
3504 stats->rx_crc_errors + stats->rx_frame_errors,
3505 stats->rx_compressed, stats->multicast,
3506 stats->tx_bytes, stats->tx_packets,
3507 stats->tx_errors, stats->tx_dropped,
3508 stats->tx_fifo_errors, stats->collisions,
3509 stats->tx_carrier_errors +
3510 stats->tx_aborted_errors +
3511 stats->tx_window_errors +
3512 stats->tx_heartbeat_errors,
3513 stats->tx_compressed);
3517 * Called from the PROCfs module. This now uses the new arbitrary sized
3518 * /proc/net interface to create /proc/net/dev
3520 static int dev_seq_show(struct seq_file *seq, void *v)
3522 if (v == SEQ_START_TOKEN)
3523 seq_puts(seq, "Inter-| Receive "
3525 " face |bytes packets errs drop fifo frame "
3526 "compressed multicast|bytes packets errs "
3527 "drop fifo colls carrier compressed\n");
3529 dev_seq_printf_stats(seq, v);
3533 static struct netif_rx_stats *softnet_get_online(loff_t *pos)
3535 struct netif_rx_stats *rc = NULL;
3537 while (*pos < nr_cpu_ids)
3538 if (cpu_online(*pos)) {
3539 rc = &per_cpu(netdev_rx_stat, *pos);
3546 static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
3548 return softnet_get_online(pos);
3551 static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3554 return softnet_get_online(pos);
3557 static void softnet_seq_stop(struct seq_file *seq, void *v)
3561 static int softnet_seq_show(struct seq_file *seq, void *v)
3563 struct netif_rx_stats *s = v;
3565 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
3566 s->total, s->dropped, s->time_squeeze, 0,
3567 0, 0, 0, 0, /* was fastroute */
3568 s->cpu_collision, s->received_rps);
3572 static const struct seq_operations dev_seq_ops = {
3573 .start = dev_seq_start,
3574 .next = dev_seq_next,
3575 .stop = dev_seq_stop,
3576 .show = dev_seq_show,
3579 static int dev_seq_open(struct inode *inode, struct file *file)
3581 return seq_open_net(inode, file, &dev_seq_ops,
3582 sizeof(struct seq_net_private));
3585 static const struct file_operations dev_seq_fops = {
3586 .owner = THIS_MODULE,
3587 .open = dev_seq_open,
3589 .llseek = seq_lseek,
3590 .release = seq_release_net,
3593 static const struct seq_operations softnet_seq_ops = {
3594 .start = softnet_seq_start,
3595 .next = softnet_seq_next,
3596 .stop = softnet_seq_stop,
3597 .show = softnet_seq_show,
3600 static int softnet_seq_open(struct inode *inode, struct file *file)
3602 return seq_open(file, &softnet_seq_ops);
3605 static const struct file_operations softnet_seq_fops = {
3606 .owner = THIS_MODULE,
3607 .open = softnet_seq_open,
3609 .llseek = seq_lseek,
3610 .release = seq_release,
3613 static void *ptype_get_idx(loff_t pos)
3615 struct packet_type *pt = NULL;
3619 list_for_each_entry_rcu(pt, &ptype_all, list) {
3625 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
3626 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
3635 static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
3639 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
3642 static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3644 struct packet_type *pt;
3645 struct list_head *nxt;
3649 if (v == SEQ_START_TOKEN)
3650 return ptype_get_idx(0);
3653 nxt = pt->list.next;
3654 if (pt->type == htons(ETH_P_ALL)) {
3655 if (nxt != &ptype_all)
3658 nxt = ptype_base[0].next;
3660 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
3662 while (nxt == &ptype_base[hash]) {
3663 if (++hash >= PTYPE_HASH_SIZE)
3665 nxt = ptype_base[hash].next;
3668 return list_entry(nxt, struct packet_type, list);
3671 static void ptype_seq_stop(struct seq_file *seq, void *v)
3677 static int ptype_seq_show(struct seq_file *seq, void *v)
3679 struct packet_type *pt = v;
3681 if (v == SEQ_START_TOKEN)
3682 seq_puts(seq, "Type Device Function\n");
3683 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
3684 if (pt->type == htons(ETH_P_ALL))
3685 seq_puts(seq, "ALL ");
3687 seq_printf(seq, "%04x", ntohs(pt->type));
3689 seq_printf(seq, " %-8s %pF\n",
3690 pt->dev ? pt->dev->name : "", pt->func);
3696 static const struct seq_operations ptype_seq_ops = {
3697 .start = ptype_seq_start,
3698 .next = ptype_seq_next,
3699 .stop = ptype_seq_stop,
3700 .show = ptype_seq_show,
3703 static int ptype_seq_open(struct inode *inode, struct file *file)
3705 return seq_open_net(inode, file, &ptype_seq_ops,
3706 sizeof(struct seq_net_private));
3709 static const struct file_operations ptype_seq_fops = {
3710 .owner = THIS_MODULE,
3711 .open = ptype_seq_open,
3713 .llseek = seq_lseek,
3714 .release = seq_release_net,
3718 static int __net_init dev_proc_net_init(struct net *net)
3722 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
3724 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
3726 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
3729 if (wext_proc_init(net))
3735 proc_net_remove(net, "ptype");
3737 proc_net_remove(net, "softnet_stat");
3739 proc_net_remove(net, "dev");
3743 static void __net_exit dev_proc_net_exit(struct net *net)
3745 wext_proc_exit(net);
3747 proc_net_remove(net, "ptype");
3748 proc_net_remove(net, "softnet_stat");
3749 proc_net_remove(net, "dev");
3752 static struct pernet_operations __net_initdata dev_proc_ops = {
3753 .init = dev_proc_net_init,
3754 .exit = dev_proc_net_exit,
3757 static int __init dev_proc_init(void)
3759 return register_pernet_subsys(&dev_proc_ops);
3762 #define dev_proc_init() 0
3763 #endif /* CONFIG_PROC_FS */
3767 * netdev_set_master - set up master/slave pair
3768 * @slave: slave device
3769 * @master: new master device
3771 * Changes the master device of the slave. Pass %NULL to break the
3772 * bonding. The caller must hold the RTNL semaphore. On a failure
3773 * a negative errno code is returned. On success the reference counts
3774 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
3775 * function returns zero.
3777 int netdev_set_master(struct net_device *slave, struct net_device *master)
3779 struct net_device *old = slave->master;
3789 slave->master = master;
3796 slave->flags |= IFF_SLAVE;
3798 slave->flags &= ~IFF_SLAVE;
3800 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
3803 EXPORT_SYMBOL(netdev_set_master);
3805 static void dev_change_rx_flags(struct net_device *dev, int flags)
3807 const struct net_device_ops *ops = dev->netdev_ops;
3809 if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
3810 ops->ndo_change_rx_flags(dev, flags);
3813 static int __dev_set_promiscuity(struct net_device *dev, int inc)
3815 unsigned short old_flags = dev->flags;
3821 dev->flags |= IFF_PROMISC;
3822 dev->promiscuity += inc;
3823 if (dev->promiscuity == 0) {
3826 * If inc causes overflow, untouch promisc and return error.
3829 dev->flags &= ~IFF_PROMISC;
3831 dev->promiscuity -= inc;
3832 printk(KERN_WARNING "%s: promiscuity touches roof, "
3833 "set promiscuity failed, promiscuity feature "
3834 "of device might be broken.\n", dev->name);
3838 if (dev->flags != old_flags) {
3839 printk(KERN_INFO "device %s %s promiscuous mode\n",
3840 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
3842 if (audit_enabled) {
3843 current_uid_gid(&uid, &gid);
3844 audit_log(current->audit_context, GFP_ATOMIC,
3845 AUDIT_ANOM_PROMISCUOUS,
3846 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
3847 dev->name, (dev->flags & IFF_PROMISC),
3848 (old_flags & IFF_PROMISC),
3849 audit_get_loginuid(current),
3851 audit_get_sessionid(current));
3854 dev_change_rx_flags(dev, IFF_PROMISC);
3860 * dev_set_promiscuity - update promiscuity count on a device
3864 * Add or remove promiscuity from a device. While the count in the device
3865 * remains above zero the interface remains promiscuous. Once it hits zero
3866 * the device reverts back to normal filtering operation. A negative inc
3867 * value is used to drop promiscuity on the device.
3868 * Return 0 if successful or a negative errno code on error.
3870 int dev_set_promiscuity(struct net_device *dev, int inc)
3872 unsigned short old_flags = dev->flags;
3875 err = __dev_set_promiscuity(dev, inc);
3878 if (dev->flags != old_flags)
3879 dev_set_rx_mode(dev);
3882 EXPORT_SYMBOL(dev_set_promiscuity);
3885 * dev_set_allmulti - update allmulti count on a device
3889 * Add or remove reception of all multicast frames to a device. While the
3890 * count in the device remains above zero the interface remains listening
3891 * to all interfaces. Once it hits zero the device reverts back to normal
3892 * filtering operation. A negative @inc value is used to drop the counter
3893 * when releasing a resource needing all multicasts.
3894 * Return 0 if successful or a negative errno code on error.
3897 int dev_set_allmulti(struct net_device *dev, int inc)
3899 unsigned short old_flags = dev->flags;
3903 dev->flags |= IFF_ALLMULTI;
3904 dev->allmulti += inc;
3905 if (dev->allmulti == 0) {
3908 * If inc causes overflow, untouch allmulti and return error.
3911 dev->flags &= ~IFF_ALLMULTI;
3913 dev->allmulti -= inc;
3914 printk(KERN_WARNING "%s: allmulti touches roof, "
3915 "set allmulti failed, allmulti feature of "
3916 "device might be broken.\n", dev->name);
3920 if (dev->flags ^ old_flags) {
3921 dev_change_rx_flags(dev, IFF_ALLMULTI);
3922 dev_set_rx_mode(dev);
3926 EXPORT_SYMBOL(dev_set_allmulti);
3929 * Upload unicast and multicast address lists to device and
3930 * configure RX filtering. When the device doesn't support unicast
3931 * filtering it is put in promiscuous mode while unicast addresses
3934 void __dev_set_rx_mode(struct net_device *dev)
3936 const struct net_device_ops *ops = dev->netdev_ops;
3938 /* dev_open will call this function so the list will stay sane. */
3939 if (!(dev->flags&IFF_UP))
3942 if (!netif_device_present(dev))
3945 if (ops->ndo_set_rx_mode)
3946 ops->ndo_set_rx_mode(dev);
3948 /* Unicast addresses changes may only happen under the rtnl,
3949 * therefore calling __dev_set_promiscuity here is safe.
3951 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
3952 __dev_set_promiscuity(dev, 1);
3953 dev->uc_promisc = 1;
3954 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
3955 __dev_set_promiscuity(dev, -1);
3956 dev->uc_promisc = 0;
3959 if (ops->ndo_set_multicast_list)
3960 ops->ndo_set_multicast_list(dev);
3964 void dev_set_rx_mode(struct net_device *dev)
3966 netif_addr_lock_bh(dev);
3967 __dev_set_rx_mode(dev);
3968 netif_addr_unlock_bh(dev);
3971 /* hw addresses list handling functions */
3973 static int __hw_addr_add(struct netdev_hw_addr_list *list, unsigned char *addr,
3974 int addr_len, unsigned char addr_type)
3976 struct netdev_hw_addr *ha;
3979 if (addr_len > MAX_ADDR_LEN)
3982 list_for_each_entry(ha, &list->list, list) {
3983 if (!memcmp(ha->addr, addr, addr_len) &&
3984 ha->type == addr_type) {
3991 alloc_size = sizeof(*ha);
3992 if (alloc_size < L1_CACHE_BYTES)
3993 alloc_size = L1_CACHE_BYTES;
3994 ha = kmalloc(alloc_size, GFP_ATOMIC);
3997 memcpy(ha->addr, addr, addr_len);
3998 ha->type = addr_type;
4001 list_add_tail_rcu(&ha->list, &list->list);
4006 static void ha_rcu_free(struct rcu_head *head)
4008 struct netdev_hw_addr *ha;
4010 ha = container_of(head, struct netdev_hw_addr, rcu_head);
4014 static int __hw_addr_del(struct netdev_hw_addr_list *list, unsigned char *addr,
4015 int addr_len, unsigned char addr_type)
4017 struct netdev_hw_addr *ha;
4019 list_for_each_entry(ha, &list->list, list) {
4020 if (!memcmp(ha->addr, addr, addr_len) &&
4021 (ha->type == addr_type || !addr_type)) {
4024 list_del_rcu(&ha->list);
4025 call_rcu(&ha->rcu_head, ha_rcu_free);
4033 static int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
4034 struct netdev_hw_addr_list *from_list,
4036 unsigned char addr_type)
4039 struct netdev_hw_addr *ha, *ha2;
4042 list_for_each_entry(ha, &from_list->list, list) {
4043 type = addr_type ? addr_type : ha->type;
4044 err = __hw_addr_add(to_list, ha->addr, addr_len, type);
4051 list_for_each_entry(ha2, &from_list->list, list) {
4054 type = addr_type ? addr_type : ha2->type;
4055 __hw_addr_del(to_list, ha2->addr, addr_len, type);
4060 static void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
4061 struct netdev_hw_addr_list *from_list,
4063 unsigned char addr_type)
4065 struct netdev_hw_addr *ha;
4068 list_for_each_entry(ha, &from_list->list, list) {
4069 type = addr_type ? addr_type : ha->type;
4070 __hw_addr_del(to_list, ha->addr, addr_len, addr_type);
4074 static int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
4075 struct netdev_hw_addr_list *from_list,
4079 struct netdev_hw_addr *ha, *tmp;
4081 list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
4083 err = __hw_addr_add(to_list, ha->addr,
4084 addr_len, ha->type);
4089 } else if (ha->refcount == 1) {
4090 __hw_addr_del(to_list, ha->addr, addr_len, ha->type);
4091 __hw_addr_del(from_list, ha->addr, addr_len, ha->type);
4097 static void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
4098 struct netdev_hw_addr_list *from_list,
4101 struct netdev_hw_addr *ha, *tmp;
4103 list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
4105 __hw_addr_del(to_list, ha->addr,
4106 addr_len, ha->type);
4108 __hw_addr_del(from_list, ha->addr,
4109 addr_len, ha->type);
4114 static void __hw_addr_flush(struct netdev_hw_addr_list *list)
4116 struct netdev_hw_addr *ha, *tmp;
4118 list_for_each_entry_safe(ha, tmp, &list->list, list) {
4119 list_del_rcu(&ha->list);
4120 call_rcu(&ha->rcu_head, ha_rcu_free);
4125 static void __hw_addr_init(struct netdev_hw_addr_list *list)
4127 INIT_LIST_HEAD(&list->list);
4131 /* Device addresses handling functions */
4133 static void dev_addr_flush(struct net_device *dev)
4135 /* rtnl_mutex must be held here */
4137 __hw_addr_flush(&dev->dev_addrs);
4138 dev->dev_addr = NULL;
4141 static int dev_addr_init(struct net_device *dev)
4143 unsigned char addr[MAX_ADDR_LEN];
4144 struct netdev_hw_addr *ha;
4147 /* rtnl_mutex must be held here */
4149 __hw_addr_init(&dev->dev_addrs);
4150 memset(addr, 0, sizeof(addr));
4151 err = __hw_addr_add(&dev->dev_addrs, addr, sizeof(addr),
4152 NETDEV_HW_ADDR_T_LAN);
4155 * Get the first (previously created) address from the list
4156 * and set dev_addr pointer to this location.
4158 ha = list_first_entry(&dev->dev_addrs.list,
4159 struct netdev_hw_addr, list);
4160 dev->dev_addr = ha->addr;
4166 * dev_addr_add - Add a device address
4168 * @addr: address to add
4169 * @addr_type: address type
4171 * Add a device address to the device or increase the reference count if
4172 * it already exists.
4174 * The caller must hold the rtnl_mutex.
4176 int dev_addr_add(struct net_device *dev, unsigned char *addr,
4177 unsigned char addr_type)
4183 err = __hw_addr_add(&dev->dev_addrs, addr, dev->addr_len, addr_type);
4185 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4188 EXPORT_SYMBOL(dev_addr_add);
4191 * dev_addr_del - Release a device address.
4193 * @addr: address to delete
4194 * @addr_type: address type
4196 * Release reference to a device address and remove it from the device
4197 * if the reference count drops to zero.
4199 * The caller must hold the rtnl_mutex.
4201 int dev_addr_del(struct net_device *dev, unsigned char *addr,
4202 unsigned char addr_type)
4205 struct netdev_hw_addr *ha;
4210 * We can not remove the first address from the list because
4211 * dev->dev_addr points to that.
4213 ha = list_first_entry(&dev->dev_addrs.list,
4214 struct netdev_hw_addr, list);
4215 if (ha->addr == dev->dev_addr && ha->refcount == 1)
4218 err = __hw_addr_del(&dev->dev_addrs, addr, dev->addr_len,
4221 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4224 EXPORT_SYMBOL(dev_addr_del);
4227 * dev_addr_add_multiple - Add device addresses from another device
4228 * @to_dev: device to which addresses will be added
4229 * @from_dev: device from which addresses will be added
4230 * @addr_type: address type - 0 means type will be used from from_dev
4232 * Add device addresses of the one device to another.
4234 * The caller must hold the rtnl_mutex.
4236 int dev_addr_add_multiple(struct net_device *to_dev,
4237 struct net_device *from_dev,
4238 unsigned char addr_type)
4244 if (from_dev->addr_len != to_dev->addr_len)
4246 err = __hw_addr_add_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
4247 to_dev->addr_len, addr_type);
4249 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
4252 EXPORT_SYMBOL(dev_addr_add_multiple);
4255 * dev_addr_del_multiple - Delete device addresses by another device
4256 * @to_dev: device where the addresses will be deleted
4257 * @from_dev: device by which addresses the addresses will be deleted
4258 * @addr_type: address type - 0 means type will used from from_dev
4260 * Deletes addresses in to device by the list of addresses in from device.
4262 * The caller must hold the rtnl_mutex.
4264 int dev_addr_del_multiple(struct net_device *to_dev,
4265 struct net_device *from_dev,
4266 unsigned char addr_type)
4270 if (from_dev->addr_len != to_dev->addr_len)
4272 __hw_addr_del_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
4273 to_dev->addr_len, addr_type);
4274 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
4277 EXPORT_SYMBOL(dev_addr_del_multiple);
4279 /* multicast addresses handling functions */
4281 int __dev_addr_delete(struct dev_addr_list **list, int *count,
4282 void *addr, int alen, int glbl)
4284 struct dev_addr_list *da;
4286 for (; (da = *list) != NULL; list = &da->next) {
4287 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
4288 alen == da->da_addrlen) {
4290 int old_glbl = da->da_gusers;
4307 int __dev_addr_add(struct dev_addr_list **list, int *count,
4308 void *addr, int alen, int glbl)
4310 struct dev_addr_list *da;
4312 for (da = *list; da != NULL; da = da->next) {
4313 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
4314 da->da_addrlen == alen) {
4316 int old_glbl = da->da_gusers;
4326 da = kzalloc(sizeof(*da), GFP_ATOMIC);
4329 memcpy(da->da_addr, addr, alen);
4330 da->da_addrlen = alen;
4332 da->da_gusers = glbl ? 1 : 0;
4340 * dev_unicast_delete - Release secondary unicast address.
4342 * @addr: address to delete
4344 * Release reference to a secondary unicast address and remove it
4345 * from the device if the reference count drops to zero.
4347 * The caller must hold the rtnl_mutex.
4349 int dev_unicast_delete(struct net_device *dev, void *addr)
4355 netif_addr_lock_bh(dev);
4356 err = __hw_addr_del(&dev->uc, addr, dev->addr_len,
4357 NETDEV_HW_ADDR_T_UNICAST);
4359 __dev_set_rx_mode(dev);
4360 netif_addr_unlock_bh(dev);
4363 EXPORT_SYMBOL(dev_unicast_delete);
4366 * dev_unicast_add - add a secondary unicast address
4368 * @addr: address to add
4370 * Add a secondary unicast address to the device or increase
4371 * the reference count if it already exists.
4373 * The caller must hold the rtnl_mutex.
4375 int dev_unicast_add(struct net_device *dev, void *addr)
4381 netif_addr_lock_bh(dev);
4382 err = __hw_addr_add(&dev->uc, addr, dev->addr_len,
4383 NETDEV_HW_ADDR_T_UNICAST);
4385 __dev_set_rx_mode(dev);
4386 netif_addr_unlock_bh(dev);
4389 EXPORT_SYMBOL(dev_unicast_add);
4391 int __dev_addr_sync(struct dev_addr_list **to, int *to_count,
4392 struct dev_addr_list **from, int *from_count)
4394 struct dev_addr_list *da, *next;
4398 while (da != NULL) {
4400 if (!da->da_synced) {
4401 err = __dev_addr_add(to, to_count,
4402 da->da_addr, da->da_addrlen, 0);
4407 } else if (da->da_users == 1) {
4408 __dev_addr_delete(to, to_count,
4409 da->da_addr, da->da_addrlen, 0);
4410 __dev_addr_delete(from, from_count,
4411 da->da_addr, da->da_addrlen, 0);
4417 EXPORT_SYMBOL_GPL(__dev_addr_sync);
4419 void __dev_addr_unsync(struct dev_addr_list **to, int *to_count,
4420 struct dev_addr_list **from, int *from_count)
4422 struct dev_addr_list *da, *next;
4425 while (da != NULL) {
4427 if (da->da_synced) {
4428 __dev_addr_delete(to, to_count,
4429 da->da_addr, da->da_addrlen, 0);
4431 __dev_addr_delete(from, from_count,
4432 da->da_addr, da->da_addrlen, 0);
4437 EXPORT_SYMBOL_GPL(__dev_addr_unsync);
4440 * dev_unicast_sync - Synchronize device's unicast list to another device
4441 * @to: destination device
4442 * @from: source device
4444 * Add newly added addresses to the destination device and release
4445 * addresses that have no users left. The source device must be
4446 * locked by netif_tx_lock_bh.
4448 * This function is intended to be called from the dev->set_rx_mode
4449 * function of layered software devices.
4451 int dev_unicast_sync(struct net_device *to, struct net_device *from)
4455 if (to->addr_len != from->addr_len)
4458 netif_addr_lock_bh(to);
4459 err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
4461 __dev_set_rx_mode(to);
4462 netif_addr_unlock_bh(to);
4465 EXPORT_SYMBOL(dev_unicast_sync);
4468 * dev_unicast_unsync - Remove synchronized addresses from the destination device
4469 * @to: destination device
4470 * @from: source device
4472 * Remove all addresses that were added to the destination device by
4473 * dev_unicast_sync(). This function is intended to be called from the
4474 * dev->stop function of layered software devices.
4476 void dev_unicast_unsync(struct net_device *to, struct net_device *from)
4478 if (to->addr_len != from->addr_len)
4481 netif_addr_lock_bh(from);
4482 netif_addr_lock(to);
4483 __hw_addr_unsync(&to->uc, &from->uc, to->addr_len);
4484 __dev_set_rx_mode(to);
4485 netif_addr_unlock(to);
4486 netif_addr_unlock_bh(from);
4488 EXPORT_SYMBOL(dev_unicast_unsync);
4490 void dev_unicast_flush(struct net_device *dev)
4492 netif_addr_lock_bh(dev);
4493 __hw_addr_flush(&dev->uc);
4494 netif_addr_unlock_bh(dev);
4496 EXPORT_SYMBOL(dev_unicast_flush);
4498 static void dev_unicast_init(struct net_device *dev)
4500 __hw_addr_init(&dev->uc);
4504 static void __dev_addr_discard(struct dev_addr_list **list)
4506 struct dev_addr_list *tmp;
4508 while (*list != NULL) {
4511 if (tmp->da_users > tmp->da_gusers)
4512 printk("__dev_addr_discard: address leakage! "
4513 "da_users=%d\n", tmp->da_users);
4518 void dev_addr_discard(struct net_device *dev)
4520 netif_addr_lock_bh(dev);
4522 __dev_addr_discard(&dev->mc_list);
4523 netdev_mc_count(dev) = 0;
4525 netif_addr_unlock_bh(dev);
4527 EXPORT_SYMBOL(dev_addr_discard);
4530 * dev_get_flags - get flags reported to userspace
4533 * Get the combination of flag bits exported through APIs to userspace.
4535 unsigned dev_get_flags(const struct net_device *dev)
4539 flags = (dev->flags & ~(IFF_PROMISC |
4544 (dev->gflags & (IFF_PROMISC |
4547 if (netif_running(dev)) {
4548 if (netif_oper_up(dev))
4549 flags |= IFF_RUNNING;
4550 if (netif_carrier_ok(dev))
4551 flags |= IFF_LOWER_UP;
4552 if (netif_dormant(dev))
4553 flags |= IFF_DORMANT;
4558 EXPORT_SYMBOL(dev_get_flags);
4560 int __dev_change_flags(struct net_device *dev, unsigned int flags)
4562 int old_flags = dev->flags;
4568 * Set the flags on our device.
4571 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
4572 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
4574 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
4578 * Load in the correct multicast list now the flags have changed.
4581 if ((old_flags ^ flags) & IFF_MULTICAST)
4582 dev_change_rx_flags(dev, IFF_MULTICAST);
4584 dev_set_rx_mode(dev);
4587 * Have we downed the interface. We handle IFF_UP ourselves
4588 * according to user attempts to set it, rather than blindly
4593 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
4594 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
4597 dev_set_rx_mode(dev);
4600 if ((flags ^ dev->gflags) & IFF_PROMISC) {
4601 int inc = (flags & IFF_PROMISC) ? 1 : -1;
4603 dev->gflags ^= IFF_PROMISC;
4604 dev_set_promiscuity(dev, inc);
4607 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
4608 is important. Some (broken) drivers set IFF_PROMISC, when
4609 IFF_ALLMULTI is requested not asking us and not reporting.
4611 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
4612 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
4614 dev->gflags ^= IFF_ALLMULTI;
4615 dev_set_allmulti(dev, inc);
4621 void __dev_notify_flags(struct net_device *dev, unsigned int old_flags)
4623 unsigned int changes = dev->flags ^ old_flags;
4625 if (changes & IFF_UP) {
4626 if (dev->flags & IFF_UP)
4627 call_netdevice_notifiers(NETDEV_UP, dev);
4629 call_netdevice_notifiers(NETDEV_DOWN, dev);
4632 if (dev->flags & IFF_UP &&
4633 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE)))
4634 call_netdevice_notifiers(NETDEV_CHANGE, dev);
4638 * dev_change_flags - change device settings
4640 * @flags: device state flags
4642 * Change settings on device based state flags. The flags are
4643 * in the userspace exported format.
4645 int dev_change_flags(struct net_device *dev, unsigned flags)
4648 int old_flags = dev->flags;
4650 ret = __dev_change_flags(dev, flags);
4654 changes = old_flags ^ dev->flags;
4656 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
4658 __dev_notify_flags(dev, old_flags);
4661 EXPORT_SYMBOL(dev_change_flags);
4664 * dev_set_mtu - Change maximum transfer unit
4666 * @new_mtu: new transfer unit
4668 * Change the maximum transfer size of the network device.
4670 int dev_set_mtu(struct net_device *dev, int new_mtu)
4672 const struct net_device_ops *ops = dev->netdev_ops;
4675 if (new_mtu == dev->mtu)
4678 /* MTU must be positive. */
4682 if (!netif_device_present(dev))
4686 if (ops->ndo_change_mtu)
4687 err = ops->ndo_change_mtu(dev, new_mtu);
4691 if (!err && dev->flags & IFF_UP)
4692 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
4695 EXPORT_SYMBOL(dev_set_mtu);
4698 * dev_set_mac_address - Change Media Access Control Address
4702 * Change the hardware (MAC) address of the device
4704 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
4706 const struct net_device_ops *ops = dev->netdev_ops;
4709 if (!ops->ndo_set_mac_address)
4711 if (sa->sa_family != dev->type)
4713 if (!netif_device_present(dev))
4715 err = ops->ndo_set_mac_address(dev, sa);
4717 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4720 EXPORT_SYMBOL(dev_set_mac_address);
4723 * Perform the SIOCxIFxxx calls, inside rcu_read_lock()
4725 static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
4728 struct net_device *dev = dev_get_by_name_rcu(net, ifr->ifr_name);
4734 case SIOCGIFFLAGS: /* Get interface flags */
4735 ifr->ifr_flags = (short) dev_get_flags(dev);
4738 case SIOCGIFMETRIC: /* Get the metric on the interface
4739 (currently unused) */
4740 ifr->ifr_metric = 0;
4743 case SIOCGIFMTU: /* Get the MTU of a device */
4744 ifr->ifr_mtu = dev->mtu;
4749 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
4751 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
4752 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4753 ifr->ifr_hwaddr.sa_family = dev->type;
4761 ifr->ifr_map.mem_start = dev->mem_start;
4762 ifr->ifr_map.mem_end = dev->mem_end;
4763 ifr->ifr_map.base_addr = dev->base_addr;
4764 ifr->ifr_map.irq = dev->irq;
4765 ifr->ifr_map.dma = dev->dma;
4766 ifr->ifr_map.port = dev->if_port;
4770 ifr->ifr_ifindex = dev->ifindex;
4774 ifr->ifr_qlen = dev->tx_queue_len;
4778 /* dev_ioctl() should ensure this case
4790 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
4792 static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
4795 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
4796 const struct net_device_ops *ops;
4801 ops = dev->netdev_ops;
4804 case SIOCSIFFLAGS: /* Set interface flags */
4805 return dev_change_flags(dev, ifr->ifr_flags);
4807 case SIOCSIFMETRIC: /* Set the metric on the interface
4808 (currently unused) */
4811 case SIOCSIFMTU: /* Set the MTU of a device */
4812 return dev_set_mtu(dev, ifr->ifr_mtu);
4815 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
4817 case SIOCSIFHWBROADCAST:
4818 if (ifr->ifr_hwaddr.sa_family != dev->type)
4820 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
4821 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4822 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4826 if (ops->ndo_set_config) {
4827 if (!netif_device_present(dev))
4829 return ops->ndo_set_config(dev, &ifr->ifr_map);
4834 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
4835 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4837 if (!netif_device_present(dev))
4839 return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
4843 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
4844 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4846 if (!netif_device_present(dev))
4848 return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
4852 if (ifr->ifr_qlen < 0)
4854 dev->tx_queue_len = ifr->ifr_qlen;
4858 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
4859 return dev_change_name(dev, ifr->ifr_newname);
4862 * Unknown or private ioctl
4865 if ((cmd >= SIOCDEVPRIVATE &&
4866 cmd <= SIOCDEVPRIVATE + 15) ||
4867 cmd == SIOCBONDENSLAVE ||
4868 cmd == SIOCBONDRELEASE ||
4869 cmd == SIOCBONDSETHWADDR ||
4870 cmd == SIOCBONDSLAVEINFOQUERY ||
4871 cmd == SIOCBONDINFOQUERY ||
4872 cmd == SIOCBONDCHANGEACTIVE ||
4873 cmd == SIOCGMIIPHY ||
4874 cmd == SIOCGMIIREG ||
4875 cmd == SIOCSMIIREG ||
4876 cmd == SIOCBRADDIF ||
4877 cmd == SIOCBRDELIF ||
4878 cmd == SIOCSHWTSTAMP ||
4879 cmd == SIOCWANDEV) {
4881 if (ops->ndo_do_ioctl) {
4882 if (netif_device_present(dev))
4883 err = ops->ndo_do_ioctl(dev, ifr, cmd);
4895 * This function handles all "interface"-type I/O control requests. The actual
4896 * 'doing' part of this is dev_ifsioc above.
4900 * dev_ioctl - network device ioctl
4901 * @net: the applicable net namespace
4902 * @cmd: command to issue
4903 * @arg: pointer to a struct ifreq in user space
4905 * Issue ioctl functions to devices. This is normally called by the
4906 * user space syscall interfaces but can sometimes be useful for
4907 * other purposes. The return value is the return from the syscall if
4908 * positive or a negative errno code on error.
4911 int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
4917 /* One special case: SIOCGIFCONF takes ifconf argument
4918 and requires shared lock, because it sleeps writing
4922 if (cmd == SIOCGIFCONF) {
4924 ret = dev_ifconf(net, (char __user *) arg);
4928 if (cmd == SIOCGIFNAME)
4929 return dev_ifname(net, (struct ifreq __user *)arg);
4931 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
4934 ifr.ifr_name[IFNAMSIZ-1] = 0;
4936 colon = strchr(ifr.ifr_name, ':');
4941 * See which interface the caller is talking about.
4946 * These ioctl calls:
4947 * - can be done by all.
4948 * - atomic and do not require locking.
4959 dev_load(net, ifr.ifr_name);
4961 ret = dev_ifsioc_locked(net, &ifr, cmd);
4966 if (copy_to_user(arg, &ifr,
4967 sizeof(struct ifreq)))
4973 dev_load(net, ifr.ifr_name);
4975 ret = dev_ethtool(net, &ifr);
4980 if (copy_to_user(arg, &ifr,
4981 sizeof(struct ifreq)))
4987 * These ioctl calls:
4988 * - require superuser power.
4989 * - require strict serialization.
4995 if (!capable(CAP_NET_ADMIN))
4997 dev_load(net, ifr.ifr_name);
4999 ret = dev_ifsioc(net, &ifr, cmd);
5004 if (copy_to_user(arg, &ifr,
5005 sizeof(struct ifreq)))
5011 * These ioctl calls:
5012 * - require superuser power.
5013 * - require strict serialization.
5014 * - do not return a value
5024 case SIOCSIFHWBROADCAST:
5027 case SIOCBONDENSLAVE:
5028 case SIOCBONDRELEASE:
5029 case SIOCBONDSETHWADDR:
5030 case SIOCBONDCHANGEACTIVE:
5034 if (!capable(CAP_NET_ADMIN))
5037 case SIOCBONDSLAVEINFOQUERY:
5038 case SIOCBONDINFOQUERY:
5039 dev_load(net, ifr.ifr_name);
5041 ret = dev_ifsioc(net, &ifr, cmd);
5046 /* Get the per device memory space. We can add this but
5047 * currently do not support it */
5049 /* Set the per device memory buffer space.
5050 * Not applicable in our case */
5055 * Unknown or private ioctl.
5058 if (cmd == SIOCWANDEV ||
5059 (cmd >= SIOCDEVPRIVATE &&
5060 cmd <= SIOCDEVPRIVATE + 15)) {
5061 dev_load(net, ifr.ifr_name);
5063 ret = dev_ifsioc(net, &ifr, cmd);
5065 if (!ret && copy_to_user(arg, &ifr,
5066 sizeof(struct ifreq)))
5070 /* Take care of Wireless Extensions */
5071 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
5072 return wext_handle_ioctl(net, &ifr, cmd, arg);
5079 * dev_new_index - allocate an ifindex
5080 * @net: the applicable net namespace
5082 * Returns a suitable unique value for a new device interface
5083 * number. The caller must hold the rtnl semaphore or the
5084 * dev_base_lock to be sure it remains unique.
5086 static int dev_new_index(struct net *net)
5092 if (!__dev_get_by_index(net, ifindex))
5097 /* Delayed registration/unregisteration */
5098 static LIST_HEAD(net_todo_list);
5100 static void net_set_todo(struct net_device *dev)
5102 list_add_tail(&dev->todo_list, &net_todo_list);
5105 static void rollback_registered_many(struct list_head *head)
5107 struct net_device *dev, *tmp;
5109 BUG_ON(dev_boot_phase);
5112 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
5113 /* Some devices call without registering
5114 * for initialization unwind. Remove those
5115 * devices and proceed with the remaining.
5117 if (dev->reg_state == NETREG_UNINITIALIZED) {
5118 pr_debug("unregister_netdevice: device %s/%p never "
5119 "was registered\n", dev->name, dev);
5122 list_del(&dev->unreg_list);
5126 BUG_ON(dev->reg_state != NETREG_REGISTERED);
5128 /* If device is running, close it first. */
5131 /* And unlink it from device chain. */
5132 unlist_netdevice(dev);
5134 dev->reg_state = NETREG_UNREGISTERING;
5139 list_for_each_entry(dev, head, unreg_list) {
5140 /* Shutdown queueing discipline. */
5144 /* Notify protocols, that we are about to destroy
5145 this device. They should clean all the things.
5147 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5149 if (!dev->rtnl_link_ops ||
5150 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
5151 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
5154 * Flush the unicast and multicast chains
5156 dev_unicast_flush(dev);
5157 dev_addr_discard(dev);
5159 if (dev->netdev_ops->ndo_uninit)
5160 dev->netdev_ops->ndo_uninit(dev);
5162 /* Notifier chain MUST detach us from master device. */
5163 WARN_ON(dev->master);
5165 /* Remove entries from kobject tree */
5166 netdev_unregister_kobject(dev);
5169 /* Process any work delayed until the end of the batch */
5170 dev = list_first_entry(head, struct net_device, unreg_list);
5171 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
5175 list_for_each_entry(dev, head, unreg_list)
5179 static void rollback_registered(struct net_device *dev)
5183 list_add(&dev->unreg_list, &single);
5184 rollback_registered_many(&single);
5187 static void __netdev_init_queue_locks_one(struct net_device *dev,
5188 struct netdev_queue *dev_queue,
5191 spin_lock_init(&dev_queue->_xmit_lock);
5192 netdev_set_xmit_lockdep_class(&dev_queue->_xmit_lock, dev->type);
5193 dev_queue->xmit_lock_owner = -1;
5196 static void netdev_init_queue_locks(struct net_device *dev)
5198 netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL);
5199 __netdev_init_queue_locks_one(dev, &dev->rx_queue, NULL);
5202 unsigned long netdev_fix_features(unsigned long features, const char *name)
5204 /* Fix illegal SG+CSUM combinations. */
5205 if ((features & NETIF_F_SG) &&
5206 !(features & NETIF_F_ALL_CSUM)) {
5208 printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no "
5209 "checksum feature.\n", name);
5210 features &= ~NETIF_F_SG;
5213 /* TSO requires that SG is present as well. */
5214 if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) {
5216 printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no "
5217 "SG feature.\n", name);
5218 features &= ~NETIF_F_TSO;
5221 if (features & NETIF_F_UFO) {
5222 if (!(features & NETIF_F_GEN_CSUM)) {
5224 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
5225 "since no NETIF_F_HW_CSUM feature.\n",
5227 features &= ~NETIF_F_UFO;
5230 if (!(features & NETIF_F_SG)) {
5232 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
5233 "since no NETIF_F_SG feature.\n", name);
5234 features &= ~NETIF_F_UFO;
5240 EXPORT_SYMBOL(netdev_fix_features);
5243 * netif_stacked_transfer_operstate - transfer operstate
5244 * @rootdev: the root or lower level device to transfer state from
5245 * @dev: the device to transfer operstate to
5247 * Transfer operational state from root to device. This is normally
5248 * called when a stacking relationship exists between the root
5249 * device and the device(a leaf device).
5251 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
5252 struct net_device *dev)
5254 if (rootdev->operstate == IF_OPER_DORMANT)
5255 netif_dormant_on(dev);
5257 netif_dormant_off(dev);
5259 if (netif_carrier_ok(rootdev)) {
5260 if (!netif_carrier_ok(dev))
5261 netif_carrier_on(dev);
5263 if (netif_carrier_ok(dev))
5264 netif_carrier_off(dev);
5267 EXPORT_SYMBOL(netif_stacked_transfer_operstate);
5270 * register_netdevice - register a network device
5271 * @dev: device to register
5273 * Take a completed network device structure and add it to the kernel
5274 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5275 * chain. 0 is returned on success. A negative errno code is returned
5276 * on a failure to set up the device, or if the name is a duplicate.
5278 * Callers must hold the rtnl semaphore. You may want
5279 * register_netdev() instead of this.
5282 * The locking appears insufficient to guarantee two parallel registers
5283 * will not get the same name.
5286 int register_netdevice(struct net_device *dev)
5289 struct net *net = dev_net(dev);
5291 BUG_ON(dev_boot_phase);
5296 /* When net_device's are persistent, this will be fatal. */
5297 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
5300 spin_lock_init(&dev->addr_list_lock);
5301 netdev_set_addr_lockdep_class(dev);
5302 netdev_init_queue_locks(dev);
5307 if (!dev->num_rx_queues) {
5309 * Allocate a single RX queue if driver never called
5313 dev->_rx = kzalloc(sizeof(struct netdev_rx_queue), GFP_KERNEL);
5319 dev->_rx->first = dev->_rx;
5320 atomic_set(&dev->_rx->count, 1);
5321 dev->num_rx_queues = 1;
5324 /* Init, if this function is available */
5325 if (dev->netdev_ops->ndo_init) {
5326 ret = dev->netdev_ops->ndo_init(dev);
5334 ret = dev_get_valid_name(net, dev->name, dev->name, 0);
5338 dev->ifindex = dev_new_index(net);
5339 if (dev->iflink == -1)
5340 dev->iflink = dev->ifindex;
5342 /* Fix illegal checksum combinations */
5343 if ((dev->features & NETIF_F_HW_CSUM) &&
5344 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5345 printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
5347 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
5350 if ((dev->features & NETIF_F_NO_CSUM) &&
5351 (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5352 printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
5354 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
5357 dev->features = netdev_fix_features(dev->features, dev->name);
5359 /* Enable software GSO if SG is supported. */
5360 if (dev->features & NETIF_F_SG)
5361 dev->features |= NETIF_F_GSO;
5363 netdev_initialize_kobject(dev);
5365 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
5366 ret = notifier_to_errno(ret);
5370 ret = netdev_register_kobject(dev);
5373 dev->reg_state = NETREG_REGISTERED;
5376 * Default initial state at registry is that the
5377 * device is present.
5380 set_bit(__LINK_STATE_PRESENT, &dev->state);
5382 dev_init_scheduler(dev);
5384 list_netdevice(dev);
5386 /* Notify protocols, that a new device appeared. */
5387 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
5388 ret = notifier_to_errno(ret);
5390 rollback_registered(dev);
5391 dev->reg_state = NETREG_UNREGISTERED;
5394 * Prevent userspace races by waiting until the network
5395 * device is fully setup before sending notifications.
5397 if (!dev->rtnl_link_ops ||
5398 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
5399 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
5405 if (dev->netdev_ops->ndo_uninit)
5406 dev->netdev_ops->ndo_uninit(dev);
5409 EXPORT_SYMBOL(register_netdevice);
5412 * init_dummy_netdev - init a dummy network device for NAPI
5413 * @dev: device to init
5415 * This takes a network device structure and initialize the minimum
5416 * amount of fields so it can be used to schedule NAPI polls without
5417 * registering a full blown interface. This is to be used by drivers
5418 * that need to tie several hardware interfaces to a single NAPI
5419 * poll scheduler due to HW limitations.
5421 int init_dummy_netdev(struct net_device *dev)
5423 /* Clear everything. Note we don't initialize spinlocks
5424 * are they aren't supposed to be taken by any of the
5425 * NAPI code and this dummy netdev is supposed to be
5426 * only ever used for NAPI polls
5428 memset(dev, 0, sizeof(struct net_device));
5430 /* make sure we BUG if trying to hit standard
5431 * register/unregister code path
5433 dev->reg_state = NETREG_DUMMY;
5435 /* initialize the ref count */
5436 atomic_set(&dev->refcnt, 1);
5438 /* NAPI wants this */
5439 INIT_LIST_HEAD(&dev->napi_list);
5441 /* a dummy interface is started by default */
5442 set_bit(__LINK_STATE_PRESENT, &dev->state);
5443 set_bit(__LINK_STATE_START, &dev->state);
5447 EXPORT_SYMBOL_GPL(init_dummy_netdev);
5451 * register_netdev - register a network device
5452 * @dev: device to register
5454 * Take a completed network device structure and add it to the kernel
5455 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5456 * chain. 0 is returned on success. A negative errno code is returned
5457 * on a failure to set up the device, or if the name is a duplicate.
5459 * This is a wrapper around register_netdevice that takes the rtnl semaphore
5460 * and expands the device name if you passed a format string to
5463 int register_netdev(struct net_device *dev)
5470 * If the name is a format string the caller wants us to do a
5473 if (strchr(dev->name, '%')) {
5474 err = dev_alloc_name(dev, dev->name);
5479 err = register_netdevice(dev);
5484 EXPORT_SYMBOL(register_netdev);
5487 * netdev_wait_allrefs - wait until all references are gone.
5489 * This is called when unregistering network devices.
5491 * Any protocol or device that holds a reference should register
5492 * for netdevice notification, and cleanup and put back the
5493 * reference if they receive an UNREGISTER event.
5494 * We can get stuck here if buggy protocols don't correctly
5497 static void netdev_wait_allrefs(struct net_device *dev)
5499 unsigned long rebroadcast_time, warning_time;
5501 linkwatch_forget_dev(dev);
5503 rebroadcast_time = warning_time = jiffies;
5504 while (atomic_read(&dev->refcnt) != 0) {
5505 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
5508 /* Rebroadcast unregister notification */
5509 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5510 /* don't resend NETDEV_UNREGISTER_BATCH, _BATCH users
5511 * should have already handle it the first time */
5513 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
5515 /* We must not have linkwatch events
5516 * pending on unregister. If this
5517 * happens, we simply run the queue
5518 * unscheduled, resulting in a noop
5521 linkwatch_run_queue();
5526 rebroadcast_time = jiffies;
5531 if (time_after(jiffies, warning_time + 10 * HZ)) {
5532 printk(KERN_EMERG "unregister_netdevice: "
5533 "waiting for %s to become free. Usage "
5535 dev->name, atomic_read(&dev->refcnt));
5536 warning_time = jiffies;
5545 * register_netdevice(x1);
5546 * register_netdevice(x2);
5548 * unregister_netdevice(y1);
5549 * unregister_netdevice(y2);
5555 * We are invoked by rtnl_unlock().
5556 * This allows us to deal with problems:
5557 * 1) We can delete sysfs objects which invoke hotplug
5558 * without deadlocking with linkwatch via keventd.
5559 * 2) Since we run with the RTNL semaphore not held, we can sleep
5560 * safely in order to wait for the netdev refcnt to drop to zero.
5562 * We must not return until all unregister events added during
5563 * the interval the lock was held have been completed.
5565 void netdev_run_todo(void)
5567 struct list_head list;
5569 /* Snapshot list, allow later requests */
5570 list_replace_init(&net_todo_list, &list);
5574 while (!list_empty(&list)) {
5575 struct net_device *dev
5576 = list_first_entry(&list, struct net_device, todo_list);
5577 list_del(&dev->todo_list);
5579 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
5580 printk(KERN_ERR "network todo '%s' but state %d\n",
5581 dev->name, dev->reg_state);
5586 dev->reg_state = NETREG_UNREGISTERED;
5588 on_each_cpu(flush_backlog, dev, 1);
5590 netdev_wait_allrefs(dev);
5593 BUG_ON(atomic_read(&dev->refcnt));
5594 WARN_ON(dev->ip_ptr);
5595 WARN_ON(dev->ip6_ptr);
5596 WARN_ON(dev->dn_ptr);
5598 if (dev->destructor)
5599 dev->destructor(dev);
5601 /* Free network device */
5602 kobject_put(&dev->dev.kobj);
5607 * dev_txq_stats_fold - fold tx_queues stats
5608 * @dev: device to get statistics from
5609 * @stats: struct net_device_stats to hold results
5611 void dev_txq_stats_fold(const struct net_device *dev,
5612 struct net_device_stats *stats)
5614 unsigned long tx_bytes = 0, tx_packets = 0, tx_dropped = 0;
5616 struct netdev_queue *txq;
5618 for (i = 0; i < dev->num_tx_queues; i++) {
5619 txq = netdev_get_tx_queue(dev, i);
5620 tx_bytes += txq->tx_bytes;
5621 tx_packets += txq->tx_packets;
5622 tx_dropped += txq->tx_dropped;
5624 if (tx_bytes || tx_packets || tx_dropped) {
5625 stats->tx_bytes = tx_bytes;
5626 stats->tx_packets = tx_packets;
5627 stats->tx_dropped = tx_dropped;
5630 EXPORT_SYMBOL(dev_txq_stats_fold);
5633 * dev_get_stats - get network device statistics
5634 * @dev: device to get statistics from
5636 * Get network statistics from device. The device driver may provide
5637 * its own method by setting dev->netdev_ops->get_stats; otherwise
5638 * the internal statistics structure is used.
5640 const struct net_device_stats *dev_get_stats(struct net_device *dev)
5642 const struct net_device_ops *ops = dev->netdev_ops;
5644 if (ops->ndo_get_stats)
5645 return ops->ndo_get_stats(dev);
5647 dev_txq_stats_fold(dev, &dev->stats);
5650 EXPORT_SYMBOL(dev_get_stats);
5652 static void netdev_init_one_queue(struct net_device *dev,
5653 struct netdev_queue *queue,
5659 static void netdev_init_queues(struct net_device *dev)
5661 netdev_init_one_queue(dev, &dev->rx_queue, NULL);
5662 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
5663 spin_lock_init(&dev->tx_global_lock);
5667 * alloc_netdev_mq - allocate network device
5668 * @sizeof_priv: size of private data to allocate space for
5669 * @name: device name format string
5670 * @setup: callback to initialize device
5671 * @queue_count: the number of subqueues to allocate
5673 * Allocates a struct net_device with private data area for driver use
5674 * and performs basic initialization. Also allocates subquue structs
5675 * for each queue on the device at the end of the netdevice.
5677 struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
5678 void (*setup)(struct net_device *), unsigned int queue_count)
5680 struct netdev_queue *tx;
5681 struct net_device *dev;
5683 struct net_device *p;
5685 struct netdev_rx_queue *rx;
5689 BUG_ON(strlen(name) >= sizeof(dev->name));
5691 alloc_size = sizeof(struct net_device);
5693 /* ensure 32-byte alignment of private area */
5694 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
5695 alloc_size += sizeof_priv;
5697 /* ensure 32-byte alignment of whole construct */
5698 alloc_size += NETDEV_ALIGN - 1;
5700 p = kzalloc(alloc_size, GFP_KERNEL);
5702 printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
5706 tx = kcalloc(queue_count, sizeof(struct netdev_queue), GFP_KERNEL);
5708 printk(KERN_ERR "alloc_netdev: Unable to allocate "
5714 rx = kcalloc(queue_count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
5716 printk(KERN_ERR "alloc_netdev: Unable to allocate "
5721 atomic_set(&rx->count, queue_count);
5724 * Set a pointer to first element in the array which holds the
5727 for (i = 0; i < queue_count; i++)
5731 dev = PTR_ALIGN(p, NETDEV_ALIGN);
5732 dev->padded = (char *)dev - (char *)p;
5734 if (dev_addr_init(dev))
5737 dev_unicast_init(dev);
5739 dev_net_set(dev, &init_net);
5742 dev->num_tx_queues = queue_count;
5743 dev->real_num_tx_queues = queue_count;
5747 dev->num_rx_queues = queue_count;
5750 dev->gso_max_size = GSO_MAX_SIZE;
5752 netdev_init_queues(dev);
5754 INIT_LIST_HEAD(&dev->ethtool_ntuple_list.list);
5755 dev->ethtool_ntuple_list.count = 0;
5756 INIT_LIST_HEAD(&dev->napi_list);
5757 INIT_LIST_HEAD(&dev->unreg_list);
5758 INIT_LIST_HEAD(&dev->link_watch_list);
5759 dev->priv_flags = IFF_XMIT_DST_RELEASE;
5761 strcpy(dev->name, name);
5774 EXPORT_SYMBOL(alloc_netdev_mq);
5777 * free_netdev - free network device
5780 * This function does the last stage of destroying an allocated device
5781 * interface. The reference to the device object is released.
5782 * If this is the last reference then it will be freed.
5784 void free_netdev(struct net_device *dev)
5786 struct napi_struct *p, *n;
5788 release_net(dev_net(dev));
5792 /* Flush device addresses */
5793 dev_addr_flush(dev);
5795 /* Clear ethtool n-tuple list */
5796 ethtool_ntuple_flush(dev);
5798 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
5801 /* Compatibility with error handling in drivers */
5802 if (dev->reg_state == NETREG_UNINITIALIZED) {
5803 kfree((char *)dev - dev->padded);
5807 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
5808 dev->reg_state = NETREG_RELEASED;
5810 /* will free via device release */
5811 put_device(&dev->dev);
5813 EXPORT_SYMBOL(free_netdev);
5816 * synchronize_net - Synchronize with packet receive processing
5818 * Wait for packets currently being received to be done.
5819 * Does not block later packets from starting.
5821 void synchronize_net(void)
5826 EXPORT_SYMBOL(synchronize_net);
5829 * unregister_netdevice_queue - remove device from the kernel
5833 * This function shuts down a device interface and removes it
5834 * from the kernel tables.
5835 * If head not NULL, device is queued to be unregistered later.
5837 * Callers must hold the rtnl semaphore. You may want
5838 * unregister_netdev() instead of this.
5841 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
5846 list_move_tail(&dev->unreg_list, head);
5848 rollback_registered(dev);
5849 /* Finish processing unregister after unlock */
5853 EXPORT_SYMBOL(unregister_netdevice_queue);
5856 * unregister_netdevice_many - unregister many devices
5857 * @head: list of devices
5859 void unregister_netdevice_many(struct list_head *head)
5861 struct net_device *dev;
5863 if (!list_empty(head)) {
5864 rollback_registered_many(head);
5865 list_for_each_entry(dev, head, unreg_list)
5869 EXPORT_SYMBOL(unregister_netdevice_many);
5872 * unregister_netdev - remove device from the kernel
5875 * This function shuts down a device interface and removes it
5876 * from the kernel tables.
5878 * This is just a wrapper for unregister_netdevice that takes
5879 * the rtnl semaphore. In general you want to use this and not
5880 * unregister_netdevice.
5882 void unregister_netdev(struct net_device *dev)
5885 unregister_netdevice(dev);
5888 EXPORT_SYMBOL(unregister_netdev);
5891 * dev_change_net_namespace - move device to different nethost namespace
5893 * @net: network namespace
5894 * @pat: If not NULL name pattern to try if the current device name
5895 * is already taken in the destination network namespace.
5897 * This function shuts down a device interface and moves it
5898 * to a new network namespace. On success 0 is returned, on
5899 * a failure a netagive errno code is returned.
5901 * Callers must hold the rtnl semaphore.
5904 int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
5910 /* Don't allow namespace local devices to be moved. */
5912 if (dev->features & NETIF_F_NETNS_LOCAL)
5916 /* Don't allow real devices to be moved when sysfs
5920 if (dev->dev.parent)
5924 /* Ensure the device has been registrered */
5926 if (dev->reg_state != NETREG_REGISTERED)
5929 /* Get out if there is nothing todo */
5931 if (net_eq(dev_net(dev), net))
5934 /* Pick the destination device name, and ensure
5935 * we can use it in the destination network namespace.
5938 if (__dev_get_by_name(net, dev->name)) {
5939 /* We get here if we can't use the current device name */
5942 if (dev_get_valid_name(net, pat, dev->name, 1))
5947 * And now a mini version of register_netdevice unregister_netdevice.
5950 /* If device is running close it first. */
5953 /* And unlink it from device chain */
5955 unlist_netdevice(dev);
5959 /* Shutdown queueing discipline. */
5962 /* Notify protocols, that we are about to destroy
5963 this device. They should clean all the things.
5965 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5966 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
5969 * Flush the unicast and multicast chains
5971 dev_unicast_flush(dev);
5972 dev_addr_discard(dev);
5974 netdev_unregister_kobject(dev);
5976 /* Actually switch the network namespace */
5977 dev_net_set(dev, net);
5979 /* If there is an ifindex conflict assign a new one */
5980 if (__dev_get_by_index(net, dev->ifindex)) {
5981 int iflink = (dev->iflink == dev->ifindex);
5982 dev->ifindex = dev_new_index(net);
5984 dev->iflink = dev->ifindex;
5987 /* Fixup kobjects */
5988 err = netdev_register_kobject(dev);
5991 /* Add the device back in the hashes */
5992 list_netdevice(dev);
5994 /* Notify protocols, that a new device appeared. */
5995 call_netdevice_notifiers(NETDEV_REGISTER, dev);
5998 * Prevent userspace races by waiting until the network
5999 * device is fully setup before sending notifications.
6001 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
6008 EXPORT_SYMBOL_GPL(dev_change_net_namespace);
6010 static int dev_cpu_callback(struct notifier_block *nfb,
6011 unsigned long action,
6014 struct sk_buff **list_skb;
6015 struct Qdisc **list_net;
6016 struct sk_buff *skb;
6017 unsigned int cpu, oldcpu = (unsigned long)ocpu;
6018 struct softnet_data *sd, *oldsd;
6020 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
6023 local_irq_disable();
6024 cpu = smp_processor_id();
6025 sd = &per_cpu(softnet_data, cpu);
6026 oldsd = &per_cpu(softnet_data, oldcpu);
6028 /* Find end of our completion_queue. */
6029 list_skb = &sd->completion_queue;
6031 list_skb = &(*list_skb)->next;
6032 /* Append completion queue from offline CPU. */
6033 *list_skb = oldsd->completion_queue;
6034 oldsd->completion_queue = NULL;
6036 /* Find end of our output_queue. */
6037 list_net = &sd->output_queue;
6039 list_net = &(*list_net)->next_sched;
6040 /* Append output queue from offline CPU. */
6041 *list_net = oldsd->output_queue;
6042 oldsd->output_queue = NULL;
6044 raise_softirq_irqoff(NET_TX_SOFTIRQ);
6047 /* Process offline CPU's input_pkt_queue */
6048 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
6056 * netdev_increment_features - increment feature set by one
6057 * @all: current feature set
6058 * @one: new feature set
6059 * @mask: mask feature set
6061 * Computes a new feature set after adding a device with feature set
6062 * @one to the master device with current feature set @all. Will not
6063 * enable anything that is off in @mask. Returns the new feature set.
6065 unsigned long netdev_increment_features(unsigned long all, unsigned long one,
6068 /* If device needs checksumming, downgrade to it. */
6069 if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
6070 all ^= NETIF_F_NO_CSUM | (one & NETIF_F_ALL_CSUM);
6071 else if (mask & NETIF_F_ALL_CSUM) {
6072 /* If one device supports v4/v6 checksumming, set for all. */
6073 if (one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM) &&
6074 !(all & NETIF_F_GEN_CSUM)) {
6075 all &= ~NETIF_F_ALL_CSUM;
6076 all |= one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
6079 /* If one device supports hw checksumming, set for all. */
6080 if (one & NETIF_F_GEN_CSUM && !(all & NETIF_F_GEN_CSUM)) {
6081 all &= ~NETIF_F_ALL_CSUM;
6082 all |= NETIF_F_HW_CSUM;
6086 one |= NETIF_F_ALL_CSUM;
6088 one |= all & NETIF_F_ONE_FOR_ALL;
6089 all &= one | NETIF_F_LLTX | NETIF_F_GSO | NETIF_F_UFO;
6090 all |= one & mask & NETIF_F_ONE_FOR_ALL;
6094 EXPORT_SYMBOL(netdev_increment_features);
6096 static struct hlist_head *netdev_create_hash(void)
6099 struct hlist_head *hash;
6101 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
6103 for (i = 0; i < NETDEV_HASHENTRIES; i++)
6104 INIT_HLIST_HEAD(&hash[i]);
6109 /* Initialize per network namespace state */
6110 static int __net_init netdev_init(struct net *net)
6112 INIT_LIST_HEAD(&net->dev_base_head);
6114 net->dev_name_head = netdev_create_hash();
6115 if (net->dev_name_head == NULL)
6118 net->dev_index_head = netdev_create_hash();
6119 if (net->dev_index_head == NULL)
6125 kfree(net->dev_name_head);
6131 * netdev_drivername - network driver for the device
6132 * @dev: network device
6133 * @buffer: buffer for resulting name
6134 * @len: size of buffer
6136 * Determine network driver for device.
6138 char *netdev_drivername(const struct net_device *dev, char *buffer, int len)
6140 const struct device_driver *driver;
6141 const struct device *parent;
6143 if (len <= 0 || !buffer)
6147 parent = dev->dev.parent;
6152 driver = parent->driver;
6153 if (driver && driver->name)
6154 strlcpy(buffer, driver->name, len);
6158 static void __net_exit netdev_exit(struct net *net)
6160 kfree(net->dev_name_head);
6161 kfree(net->dev_index_head);
6164 static struct pernet_operations __net_initdata netdev_net_ops = {
6165 .init = netdev_init,
6166 .exit = netdev_exit,
6169 static void __net_exit default_device_exit(struct net *net)
6171 struct net_device *dev, *aux;
6173 * Push all migratable network devices back to the
6174 * initial network namespace
6177 for_each_netdev_safe(net, dev, aux) {
6179 char fb_name[IFNAMSIZ];
6181 /* Ignore unmoveable devices (i.e. loopback) */
6182 if (dev->features & NETIF_F_NETNS_LOCAL)
6185 /* Leave virtual devices for the generic cleanup */
6186 if (dev->rtnl_link_ops)
6189 /* Push remaing network devices to init_net */
6190 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
6191 err = dev_change_net_namespace(dev, &init_net, fb_name);
6193 printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
6194 __func__, dev->name, err);
6201 static void __net_exit default_device_exit_batch(struct list_head *net_list)
6203 /* At exit all network devices most be removed from a network
6204 * namespace. Do this in the reverse order of registeration.
6205 * Do this across as many network namespaces as possible to
6206 * improve batching efficiency.
6208 struct net_device *dev;
6210 LIST_HEAD(dev_kill_list);
6213 list_for_each_entry(net, net_list, exit_list) {
6214 for_each_netdev_reverse(net, dev) {
6215 if (dev->rtnl_link_ops)
6216 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
6218 unregister_netdevice_queue(dev, &dev_kill_list);
6221 unregister_netdevice_many(&dev_kill_list);
6225 static struct pernet_operations __net_initdata default_device_ops = {
6226 .exit = default_device_exit,
6227 .exit_batch = default_device_exit_batch,
6231 * Initialize the DEV module. At boot time this walks the device list and
6232 * unhooks any devices that fail to initialise (normally hardware not
6233 * present) and leaves us with a valid list of present and active devices.
6238 * This is called single threaded during boot, so no need
6239 * to take the rtnl semaphore.
6241 static int __init net_dev_init(void)
6243 int i, rc = -ENOMEM;
6245 BUG_ON(!dev_boot_phase);
6247 if (dev_proc_init())
6250 if (netdev_kobject_init())
6253 INIT_LIST_HEAD(&ptype_all);
6254 for (i = 0; i < PTYPE_HASH_SIZE; i++)
6255 INIT_LIST_HEAD(&ptype_base[i]);
6257 if (register_pernet_subsys(&netdev_net_ops))
6261 * Initialise the packet receive queues.
6264 for_each_possible_cpu(i) {
6265 struct softnet_data *queue;
6267 queue = &per_cpu(softnet_data, i);
6268 skb_queue_head_init(&queue->input_pkt_queue);
6269 queue->completion_queue = NULL;
6270 INIT_LIST_HEAD(&queue->poll_list);
6273 queue->csd.func = trigger_softirq;
6274 queue->csd.info = queue;
6275 queue->csd.flags = 0;
6278 queue->backlog.poll = process_backlog;
6279 queue->backlog.weight = weight_p;
6280 queue->backlog.gro_list = NULL;
6281 queue->backlog.gro_count = 0;
6286 /* The loopback device is special if any other network devices
6287 * is present in a network namespace the loopback device must
6288 * be present. Since we now dynamically allocate and free the
6289 * loopback device ensure this invariant is maintained by
6290 * keeping the loopback device as the first device on the
6291 * list of network devices. Ensuring the loopback devices
6292 * is the first device that appears and the last network device
6295 if (register_pernet_device(&loopback_net_ops))
6298 if (register_pernet_device(&default_device_ops))
6301 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
6302 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
6304 hotcpu_notifier(dev_cpu_callback, 0);
6312 subsys_initcall(net_dev_init);
6314 static int __init initialize_hashrnd(void)
6316 get_random_bytes(&hashrnd, sizeof(hashrnd));
6320 late_initcall_sync(initialize_hashrnd);