2 * NET3 Protocol independent device support routines.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Derived from the non IP parts of dev.c 1.0.19
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
51 * Rudi Cilibrasi : Pass the right thing to
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
75 #include <asm/uaccess.h>
76 #include <asm/system.h>
77 #include <linux/bitops.h>
78 #include <linux/capability.h>
79 #include <linux/cpu.h>
80 #include <linux/types.h>
81 #include <linux/kernel.h>
82 #include <linux/sched.h>
83 #include <linux/mutex.h>
84 #include <linux/string.h>
86 #include <linux/socket.h>
87 #include <linux/sockios.h>
88 #include <linux/errno.h>
89 #include <linux/interrupt.h>
90 #include <linux/if_ether.h>
91 #include <linux/netdevice.h>
92 #include <linux/etherdevice.h>
93 #include <linux/ethtool.h>
94 #include <linux/notifier.h>
95 #include <linux/skbuff.h>
96 #include <net/net_namespace.h>
98 #include <linux/rtnetlink.h>
99 #include <linux/proc_fs.h>
100 #include <linux/seq_file.h>
101 #include <linux/stat.h>
102 #include <linux/if_bridge.h>
103 #include <linux/if_macvlan.h>
105 #include <net/pkt_sched.h>
106 #include <net/checksum.h>
107 #include <linux/highmem.h>
108 #include <linux/init.h>
109 #include <linux/kmod.h>
110 #include <linux/module.h>
111 #include <linux/netpoll.h>
112 #include <linux/rcupdate.h>
113 #include <linux/delay.h>
114 #include <net/wext.h>
115 #include <net/iw_handler.h>
116 #include <asm/current.h>
117 #include <linux/audit.h>
118 #include <linux/dmaengine.h>
119 #include <linux/err.h>
120 #include <linux/ctype.h>
121 #include <linux/if_arp.h>
122 #include <linux/if_vlan.h>
123 #include <linux/ip.h>
125 #include <linux/ipv6.h>
126 #include <linux/in.h>
127 #include <linux/jhash.h>
128 #include <linux/random.h>
130 #include "net-sysfs.h"
132 /* Instead of increasing this, you should create a hash table. */
133 #define MAX_GRO_SKBS 8
135 /* This should be increased if a protocol with a bigger head is added. */
136 #define GRO_MAX_HEAD (MAX_HEADER + 128)
147 * The list of packet types we will receive (as opposed to discard)
148 * and the routines to invoke.
150 * Why 16. Because with 16 the only overlap we get on a hash of the
151 * low nibble of the protocol value is RARP/SNAP/X.25.
153 * NOTE: That is no longer true with the addition of VLAN tags. Not
154 * sure which should go first, but I bet it won't make much
155 * difference if we are running VLANs. The good news is that
156 * this protocol won't be in the list unless compiled in, so
157 * the average user (w/out VLANs) will not be adversely affected.
174 #define PTYPE_HASH_SIZE (16)
175 #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
177 static DEFINE_SPINLOCK(ptype_lock);
178 static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
179 static struct list_head ptype_all __read_mostly; /* Taps */
182 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
185 * Pure readers hold dev_base_lock for reading.
187 * Writers must hold the rtnl semaphore while they loop through the
188 * dev_base_head list, and hold dev_base_lock for writing when they do the
189 * actual updates. This allows pure readers to access the list even
190 * while a writer is preparing to update it.
192 * To put it another way, dev_base_lock is held for writing only to
193 * protect against pure readers; the rtnl semaphore provides the
194 * protection against other writers.
196 * See, for example usages, register_netdevice() and
197 * unregister_netdevice(), which must be called with the rtnl
200 DEFINE_RWLOCK(dev_base_lock);
202 EXPORT_SYMBOL(dev_base_lock);
204 #define NETDEV_HASHBITS 8
205 #define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
207 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
209 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
210 return &net->dev_name_head[hash & ((1 << NETDEV_HASHBITS) - 1)];
213 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
215 return &net->dev_index_head[ifindex & ((1 << NETDEV_HASHBITS) - 1)];
218 static inline void *skb_gro_mac_header(struct sk_buff *skb)
220 return skb_headlen(skb) ? skb_mac_header(skb) :
221 page_address(skb_shinfo(skb)->frags[0].page) +
222 skb_shinfo(skb)->frags[0].page_offset;
225 /* Device list insertion */
226 static int list_netdevice(struct net_device *dev)
228 struct net *net = dev_net(dev);
232 write_lock_bh(&dev_base_lock);
233 list_add_tail(&dev->dev_list, &net->dev_base_head);
234 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
235 hlist_add_head(&dev->index_hlist, dev_index_hash(net, dev->ifindex));
236 write_unlock_bh(&dev_base_lock);
240 /* Device list removal */
241 static void unlist_netdevice(struct net_device *dev)
245 /* Unlink dev from the device chain */
246 write_lock_bh(&dev_base_lock);
247 list_del(&dev->dev_list);
248 hlist_del(&dev->name_hlist);
249 hlist_del(&dev->index_hlist);
250 write_unlock_bh(&dev_base_lock);
257 static RAW_NOTIFIER_HEAD(netdev_chain);
260 * Device drivers call our routines to queue packets here. We empty the
261 * queue in the local softnet handler.
264 DEFINE_PER_CPU(struct softnet_data, softnet_data);
266 #ifdef CONFIG_LOCKDEP
268 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
269 * according to dev->type
271 static const unsigned short netdev_lock_type[] =
272 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
273 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
274 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
275 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
276 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
277 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
278 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
279 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
280 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
281 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
282 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
283 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
284 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
285 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET,
286 ARPHRD_PHONET_PIPE, ARPHRD_VOID, ARPHRD_NONE};
288 static const char *netdev_lock_name[] =
289 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
290 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
291 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
292 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
293 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
294 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
295 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
296 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
297 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
298 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
299 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
300 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
301 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
302 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET",
303 "_xmit_PHONET_PIPE", "_xmit_VOID", "_xmit_NONE"};
305 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
306 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
308 static inline unsigned short netdev_lock_pos(unsigned short dev_type)
312 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
313 if (netdev_lock_type[i] == dev_type)
315 /* the last key is used by default */
316 return ARRAY_SIZE(netdev_lock_type) - 1;
319 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
320 unsigned short dev_type)
324 i = netdev_lock_pos(dev_type);
325 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
326 netdev_lock_name[i]);
329 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
333 i = netdev_lock_pos(dev->type);
334 lockdep_set_class_and_name(&dev->addr_list_lock,
335 &netdev_addr_lock_key[i],
336 netdev_lock_name[i]);
339 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
340 unsigned short dev_type)
343 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
348 /*******************************************************************************
350 Protocol management and registration routines
352 *******************************************************************************/
355 * Add a protocol ID to the list. Now that the input handler is
356 * smarter we can dispense with all the messy stuff that used to be
359 * BEWARE!!! Protocol handlers, mangling input packets,
360 * MUST BE last in hash buckets and checking protocol handlers
361 * MUST start from promiscuous ptype_all chain in net_bh.
362 * It is true now, do not change it.
363 * Explanation follows: if protocol handler, mangling packet, will
364 * be the first on list, it is not able to sense, that packet
365 * is cloned and should be copied-on-write, so that it will
366 * change it and subsequent readers will get broken packet.
371 * dev_add_pack - add packet handler
372 * @pt: packet type declaration
374 * Add a protocol handler to the networking stack. The passed &packet_type
375 * is linked into kernel lists and may not be freed until it has been
376 * removed from the kernel lists.
378 * This call does not sleep therefore it can not
379 * guarantee all CPU's that are in middle of receiving packets
380 * will see the new packet type (until the next received packet).
383 void dev_add_pack(struct packet_type *pt)
387 spin_lock_bh(&ptype_lock);
388 if (pt->type == htons(ETH_P_ALL))
389 list_add_rcu(&pt->list, &ptype_all);
391 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
392 list_add_rcu(&pt->list, &ptype_base[hash]);
394 spin_unlock_bh(&ptype_lock);
398 * __dev_remove_pack - remove packet handler
399 * @pt: packet type declaration
401 * Remove a protocol handler that was previously added to the kernel
402 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
403 * from the kernel lists and can be freed or reused once this function
406 * The packet type might still be in use by receivers
407 * and must not be freed until after all the CPU's have gone
408 * through a quiescent state.
410 void __dev_remove_pack(struct packet_type *pt)
412 struct list_head *head;
413 struct packet_type *pt1;
415 spin_lock_bh(&ptype_lock);
417 if (pt->type == htons(ETH_P_ALL))
420 head = &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
422 list_for_each_entry(pt1, head, list) {
424 list_del_rcu(&pt->list);
429 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
431 spin_unlock_bh(&ptype_lock);
434 * dev_remove_pack - remove packet handler
435 * @pt: packet type declaration
437 * Remove a protocol handler that was previously added to the kernel
438 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
439 * from the kernel lists and can be freed or reused once this function
442 * This call sleeps to guarantee that no CPU is looking at the packet
445 void dev_remove_pack(struct packet_type *pt)
447 __dev_remove_pack(pt);
452 /******************************************************************************
454 Device Boot-time Settings Routines
456 *******************************************************************************/
458 /* Boot time configuration table */
459 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
462 * netdev_boot_setup_add - add new setup entry
463 * @name: name of the device
464 * @map: configured settings for the device
466 * Adds new setup entry to the dev_boot_setup list. The function
467 * returns 0 on error and 1 on success. This is a generic routine to
470 static int netdev_boot_setup_add(char *name, struct ifmap *map)
472 struct netdev_boot_setup *s;
476 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
477 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
478 memset(s[i].name, 0, sizeof(s[i].name));
479 strlcpy(s[i].name, name, IFNAMSIZ);
480 memcpy(&s[i].map, map, sizeof(s[i].map));
485 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
489 * netdev_boot_setup_check - check boot time settings
490 * @dev: the netdevice
492 * Check boot time settings for the device.
493 * The found settings are set for the device to be used
494 * later in the device probing.
495 * Returns 0 if no settings found, 1 if they are.
497 int netdev_boot_setup_check(struct net_device *dev)
499 struct netdev_boot_setup *s = dev_boot_setup;
502 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
503 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
504 !strcmp(dev->name, s[i].name)) {
505 dev->irq = s[i].map.irq;
506 dev->base_addr = s[i].map.base_addr;
507 dev->mem_start = s[i].map.mem_start;
508 dev->mem_end = s[i].map.mem_end;
517 * netdev_boot_base - get address from boot time settings
518 * @prefix: prefix for network device
519 * @unit: id for network device
521 * Check boot time settings for the base address of device.
522 * The found settings are set for the device to be used
523 * later in the device probing.
524 * Returns 0 if no settings found.
526 unsigned long netdev_boot_base(const char *prefix, int unit)
528 const struct netdev_boot_setup *s = dev_boot_setup;
532 sprintf(name, "%s%d", prefix, unit);
535 * If device already registered then return base of 1
536 * to indicate not to probe for this interface
538 if (__dev_get_by_name(&init_net, name))
541 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
542 if (!strcmp(name, s[i].name))
543 return s[i].map.base_addr;
548 * Saves at boot time configured settings for any netdevice.
550 int __init netdev_boot_setup(char *str)
555 str = get_options(str, ARRAY_SIZE(ints), ints);
560 memset(&map, 0, sizeof(map));
564 map.base_addr = ints[2];
566 map.mem_start = ints[3];
568 map.mem_end = ints[4];
570 /* Add new entry to the list */
571 return netdev_boot_setup_add(str, &map);
574 __setup("netdev=", netdev_boot_setup);
576 /*******************************************************************************
578 Device Interface Subroutines
580 *******************************************************************************/
583 * __dev_get_by_name - find a device by its name
584 * @net: the applicable net namespace
585 * @name: name to find
587 * Find an interface by name. Must be called under RTNL semaphore
588 * or @dev_base_lock. If the name is found a pointer to the device
589 * is returned. If the name is not found then %NULL is returned. The
590 * reference counters are not incremented so the caller must be
591 * careful with locks.
594 struct net_device *__dev_get_by_name(struct net *net, const char *name)
596 struct hlist_node *p;
598 hlist_for_each(p, dev_name_hash(net, name)) {
599 struct net_device *dev
600 = hlist_entry(p, struct net_device, name_hlist);
601 if (!strncmp(dev->name, name, IFNAMSIZ))
608 * dev_get_by_name - find a device by its name
609 * @net: the applicable net namespace
610 * @name: name to find
612 * Find an interface by name. This can be called from any
613 * context and does its own locking. The returned handle has
614 * the usage count incremented and the caller must use dev_put() to
615 * release it when it is no longer needed. %NULL is returned if no
616 * matching device is found.
619 struct net_device *dev_get_by_name(struct net *net, const char *name)
621 struct net_device *dev;
623 read_lock(&dev_base_lock);
624 dev = __dev_get_by_name(net, name);
627 read_unlock(&dev_base_lock);
632 * __dev_get_by_index - find a device by its ifindex
633 * @net: the applicable net namespace
634 * @ifindex: index of device
636 * Search for an interface by index. Returns %NULL if the device
637 * is not found or a pointer to the device. The device has not
638 * had its reference counter increased so the caller must be careful
639 * about locking. The caller must hold either the RTNL semaphore
643 struct net_device *__dev_get_by_index(struct net *net, int ifindex)
645 struct hlist_node *p;
647 hlist_for_each(p, dev_index_hash(net, ifindex)) {
648 struct net_device *dev
649 = hlist_entry(p, struct net_device, index_hlist);
650 if (dev->ifindex == ifindex)
658 * dev_get_by_index - find a device by its ifindex
659 * @net: the applicable net namespace
660 * @ifindex: index of device
662 * Search for an interface by index. Returns NULL if the device
663 * is not found or a pointer to the device. The device returned has
664 * had a reference added and the pointer is safe until the user calls
665 * dev_put to indicate they have finished with it.
668 struct net_device *dev_get_by_index(struct net *net, int ifindex)
670 struct net_device *dev;
672 read_lock(&dev_base_lock);
673 dev = __dev_get_by_index(net, ifindex);
676 read_unlock(&dev_base_lock);
681 * dev_getbyhwaddr - find a device by its hardware address
682 * @net: the applicable net namespace
683 * @type: media type of device
684 * @ha: hardware address
686 * Search for an interface by MAC address. Returns NULL if the device
687 * is not found or a pointer to the device. The caller must hold the
688 * rtnl semaphore. The returned device has not had its ref count increased
689 * and the caller must therefore be careful about locking
692 * If the API was consistent this would be __dev_get_by_hwaddr
695 struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
697 struct net_device *dev;
701 for_each_netdev(net, dev)
702 if (dev->type == type &&
703 !memcmp(dev->dev_addr, ha, dev->addr_len))
709 EXPORT_SYMBOL(dev_getbyhwaddr);
711 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
713 struct net_device *dev;
716 for_each_netdev(net, dev)
717 if (dev->type == type)
723 EXPORT_SYMBOL(__dev_getfirstbyhwtype);
725 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
727 struct net_device *dev;
730 dev = __dev_getfirstbyhwtype(net, type);
737 EXPORT_SYMBOL(dev_getfirstbyhwtype);
740 * dev_get_by_flags - find any device with given flags
741 * @net: the applicable net namespace
742 * @if_flags: IFF_* values
743 * @mask: bitmask of bits in if_flags to check
745 * Search for any interface with the given flags. Returns NULL if a device
746 * is not found or a pointer to the device. The device returned has
747 * had a reference added and the pointer is safe until the user calls
748 * dev_put to indicate they have finished with it.
751 struct net_device * dev_get_by_flags(struct net *net, unsigned short if_flags, unsigned short mask)
753 struct net_device *dev, *ret;
756 read_lock(&dev_base_lock);
757 for_each_netdev(net, dev) {
758 if (((dev->flags ^ if_flags) & mask) == 0) {
764 read_unlock(&dev_base_lock);
769 * dev_valid_name - check if name is okay for network device
772 * Network device names need to be valid file names to
773 * to allow sysfs to work. We also disallow any kind of
776 int dev_valid_name(const char *name)
780 if (strlen(name) >= IFNAMSIZ)
782 if (!strcmp(name, ".") || !strcmp(name, ".."))
786 if (*name == '/' || isspace(*name))
794 * __dev_alloc_name - allocate a name for a device
795 * @net: network namespace to allocate the device name in
796 * @name: name format string
797 * @buf: scratch buffer and result name string
799 * Passed a format string - eg "lt%d" it will try and find a suitable
800 * id. It scans list of devices to build up a free map, then chooses
801 * the first empty slot. The caller must hold the dev_base or rtnl lock
802 * while allocating the name and adding the device in order to avoid
804 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
805 * Returns the number of the unit assigned or a negative errno code.
808 static int __dev_alloc_name(struct net *net, const char *name, char *buf)
812 const int max_netdevices = 8*PAGE_SIZE;
813 unsigned long *inuse;
814 struct net_device *d;
816 p = strnchr(name, IFNAMSIZ-1, '%');
819 * Verify the string as this thing may have come from
820 * the user. There must be either one "%d" and no other "%"
823 if (p[1] != 'd' || strchr(p + 2, '%'))
826 /* Use one page as a bit array of possible slots */
827 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
831 for_each_netdev(net, d) {
832 if (!sscanf(d->name, name, &i))
834 if (i < 0 || i >= max_netdevices)
837 /* avoid cases where sscanf is not exact inverse of printf */
838 snprintf(buf, IFNAMSIZ, name, i);
839 if (!strncmp(buf, d->name, IFNAMSIZ))
843 i = find_first_zero_bit(inuse, max_netdevices);
844 free_page((unsigned long) inuse);
847 snprintf(buf, IFNAMSIZ, name, i);
848 if (!__dev_get_by_name(net, buf))
851 /* It is possible to run out of possible slots
852 * when the name is long and there isn't enough space left
853 * for the digits, or if all bits are used.
859 * dev_alloc_name - allocate a name for a device
861 * @name: name format string
863 * Passed a format string - eg "lt%d" it will try and find a suitable
864 * id. It scans list of devices to build up a free map, then chooses
865 * the first empty slot. The caller must hold the dev_base or rtnl lock
866 * while allocating the name and adding the device in order to avoid
868 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
869 * Returns the number of the unit assigned or a negative errno code.
872 int dev_alloc_name(struct net_device *dev, const char *name)
878 BUG_ON(!dev_net(dev));
880 ret = __dev_alloc_name(net, name, buf);
882 strlcpy(dev->name, buf, IFNAMSIZ);
888 * dev_change_name - change name of a device
890 * @newname: name (or format string) must be at least IFNAMSIZ
892 * Change name of a device, can pass format strings "eth%d".
895 int dev_change_name(struct net_device *dev, const char *newname)
897 char oldname[IFNAMSIZ];
903 BUG_ON(!dev_net(dev));
906 if (dev->flags & IFF_UP)
909 if (!dev_valid_name(newname))
912 if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
915 memcpy(oldname, dev->name, IFNAMSIZ);
917 if (strchr(newname, '%')) {
918 err = dev_alloc_name(dev, newname);
922 else if (__dev_get_by_name(net, newname))
925 strlcpy(dev->name, newname, IFNAMSIZ);
928 /* For now only devices in the initial network namespace
931 if (net == &init_net) {
932 ret = device_rename(&dev->dev, dev->name);
934 memcpy(dev->name, oldname, IFNAMSIZ);
939 write_lock_bh(&dev_base_lock);
940 hlist_del(&dev->name_hlist);
941 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
942 write_unlock_bh(&dev_base_lock);
944 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
945 ret = notifier_to_errno(ret);
950 "%s: name change rollback failed: %d.\n",
954 memcpy(dev->name, oldname, IFNAMSIZ);
963 * dev_set_alias - change ifalias of a device
965 * @alias: name up to IFALIASZ
966 * @len: limit of bytes to copy from info
968 * Set ifalias for a device,
970 int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
985 dev->ifalias = krealloc(dev->ifalias, len+1, GFP_KERNEL);
989 strlcpy(dev->ifalias, alias, len+1);
995 * netdev_features_change - device changes features
996 * @dev: device to cause notification
998 * Called to indicate a device has changed features.
1000 void netdev_features_change(struct net_device *dev)
1002 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
1004 EXPORT_SYMBOL(netdev_features_change);
1007 * netdev_state_change - device changes state
1008 * @dev: device to cause notification
1010 * Called to indicate a device has changed state. This function calls
1011 * the notifier chains for netdev_chain and sends a NEWLINK message
1012 * to the routing socket.
1014 void netdev_state_change(struct net_device *dev)
1016 if (dev->flags & IFF_UP) {
1017 call_netdevice_notifiers(NETDEV_CHANGE, dev);
1018 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1022 void netdev_bonding_change(struct net_device *dev)
1024 call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, dev);
1026 EXPORT_SYMBOL(netdev_bonding_change);
1029 * dev_load - load a network module
1030 * @net: the applicable net namespace
1031 * @name: name of interface
1033 * If a network interface is not present and the process has suitable
1034 * privileges this function loads the module. If module loading is not
1035 * available in this kernel then it becomes a nop.
1038 void dev_load(struct net *net, const char *name)
1040 struct net_device *dev;
1042 read_lock(&dev_base_lock);
1043 dev = __dev_get_by_name(net, name);
1044 read_unlock(&dev_base_lock);
1046 if (!dev && capable(CAP_SYS_MODULE))
1047 request_module("%s", name);
1051 * dev_open - prepare an interface for use.
1052 * @dev: device to open
1054 * Takes a device from down to up state. The device's private open
1055 * function is invoked and then the multicast lists are loaded. Finally
1056 * the device is moved into the up state and a %NETDEV_UP message is
1057 * sent to the netdev notifier chain.
1059 * Calling this function on an active interface is a nop. On a failure
1060 * a negative errno code is returned.
1062 int dev_open(struct net_device *dev)
1064 const struct net_device_ops *ops = dev->netdev_ops;
1073 if (dev->flags & IFF_UP)
1077 * Is it even present?
1079 if (!netif_device_present(dev))
1083 * Call device private open method
1085 set_bit(__LINK_STATE_START, &dev->state);
1087 if (ops->ndo_validate_addr)
1088 ret = ops->ndo_validate_addr(dev);
1090 if (!ret && ops->ndo_open)
1091 ret = ops->ndo_open(dev);
1094 * If it went open OK then:
1098 clear_bit(__LINK_STATE_START, &dev->state);
1103 dev->flags |= IFF_UP;
1111 * Initialize multicasting status
1113 dev_set_rx_mode(dev);
1116 * Wakeup transmit queue engine
1121 * ... and announce new interface.
1123 call_netdevice_notifiers(NETDEV_UP, dev);
1130 * dev_close - shutdown an interface.
1131 * @dev: device to shutdown
1133 * This function moves an active device into down state. A
1134 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1135 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1138 int dev_close(struct net_device *dev)
1140 const struct net_device_ops *ops = dev->netdev_ops;
1145 if (!(dev->flags & IFF_UP))
1149 * Tell people we are going down, so that they can
1150 * prepare to death, when device is still operating.
1152 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1154 clear_bit(__LINK_STATE_START, &dev->state);
1156 /* Synchronize to scheduled poll. We cannot touch poll list,
1157 * it can be even on different cpu. So just clear netif_running().
1159 * dev->stop() will invoke napi_disable() on all of it's
1160 * napi_struct instances on this device.
1162 smp_mb__after_clear_bit(); /* Commit netif_running(). */
1164 dev_deactivate(dev);
1167 * Call the device specific close. This cannot fail.
1168 * Only if device is UP
1170 * We allow it to be called even after a DETACH hot-plug
1177 * Device is now down.
1180 dev->flags &= ~IFF_UP;
1183 * Tell people we are down
1185 call_netdevice_notifiers(NETDEV_DOWN, dev);
1197 * dev_disable_lro - disable Large Receive Offload on a device
1200 * Disable Large Receive Offload (LRO) on a net device. Must be
1201 * called under RTNL. This is needed if received packets may be
1202 * forwarded to another interface.
1204 void dev_disable_lro(struct net_device *dev)
1206 if (dev->ethtool_ops && dev->ethtool_ops->get_flags &&
1207 dev->ethtool_ops->set_flags) {
1208 u32 flags = dev->ethtool_ops->get_flags(dev);
1209 if (flags & ETH_FLAG_LRO) {
1210 flags &= ~ETH_FLAG_LRO;
1211 dev->ethtool_ops->set_flags(dev, flags);
1214 WARN_ON(dev->features & NETIF_F_LRO);
1216 EXPORT_SYMBOL(dev_disable_lro);
1219 static int dev_boot_phase = 1;
1222 * Device change register/unregister. These are not inline or static
1223 * as we export them to the world.
1227 * register_netdevice_notifier - register a network notifier block
1230 * Register a notifier to be called when network device events occur.
1231 * The notifier passed is linked into the kernel structures and must
1232 * not be reused until it has been unregistered. A negative errno code
1233 * is returned on a failure.
1235 * When registered all registration and up events are replayed
1236 * to the new notifier to allow device to have a race free
1237 * view of the network device list.
1240 int register_netdevice_notifier(struct notifier_block *nb)
1242 struct net_device *dev;
1243 struct net_device *last;
1248 err = raw_notifier_chain_register(&netdev_chain, nb);
1254 for_each_netdev(net, dev) {
1255 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1256 err = notifier_to_errno(err);
1260 if (!(dev->flags & IFF_UP))
1263 nb->notifier_call(nb, NETDEV_UP, dev);
1274 for_each_netdev(net, dev) {
1278 if (dev->flags & IFF_UP) {
1279 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1280 nb->notifier_call(nb, NETDEV_DOWN, dev);
1282 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
1286 raw_notifier_chain_unregister(&netdev_chain, nb);
1291 * unregister_netdevice_notifier - unregister a network notifier block
1294 * Unregister a notifier previously registered by
1295 * register_netdevice_notifier(). The notifier is unlinked into the
1296 * kernel structures and may then be reused. A negative errno code
1297 * is returned on a failure.
1300 int unregister_netdevice_notifier(struct notifier_block *nb)
1305 err = raw_notifier_chain_unregister(&netdev_chain, nb);
1311 * call_netdevice_notifiers - call all network notifier blocks
1312 * @val: value passed unmodified to notifier function
1313 * @dev: net_device pointer passed unmodified to notifier function
1315 * Call all network notifier blocks. Parameters and return value
1316 * are as for raw_notifier_call_chain().
1319 int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1321 return raw_notifier_call_chain(&netdev_chain, val, dev);
1324 /* When > 0 there are consumers of rx skb time stamps */
1325 static atomic_t netstamp_needed = ATOMIC_INIT(0);
1327 void net_enable_timestamp(void)
1329 atomic_inc(&netstamp_needed);
1332 void net_disable_timestamp(void)
1334 atomic_dec(&netstamp_needed);
1337 static inline void net_timestamp(struct sk_buff *skb)
1339 if (atomic_read(&netstamp_needed))
1340 __net_timestamp(skb);
1342 skb->tstamp.tv64 = 0;
1346 * Support routine. Sends outgoing frames to any network
1347 * taps currently in use.
1350 static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1352 struct packet_type *ptype;
1357 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1358 /* Never send packets back to the socket
1359 * they originated from - MvS (miquels@drinkel.ow.org)
1361 if ((ptype->dev == dev || !ptype->dev) &&
1362 (ptype->af_packet_priv == NULL ||
1363 (struct sock *)ptype->af_packet_priv != skb->sk)) {
1364 struct sk_buff *skb2= skb_clone(skb, GFP_ATOMIC);
1368 /* skb->nh should be correctly
1369 set by sender, so that the second statement is
1370 just protection against buggy protocols.
1372 skb_reset_mac_header(skb2);
1374 if (skb_network_header(skb2) < skb2->data ||
1375 skb2->network_header > skb2->tail) {
1376 if (net_ratelimit())
1377 printk(KERN_CRIT "protocol %04x is "
1379 skb2->protocol, dev->name);
1380 skb_reset_network_header(skb2);
1383 skb2->transport_header = skb2->network_header;
1384 skb2->pkt_type = PACKET_OUTGOING;
1385 ptype->func(skb2, skb->dev, ptype, skb->dev);
1392 static inline void __netif_reschedule(struct Qdisc *q)
1394 struct softnet_data *sd;
1395 unsigned long flags;
1397 local_irq_save(flags);
1398 sd = &__get_cpu_var(softnet_data);
1399 q->next_sched = sd->output_queue;
1400 sd->output_queue = q;
1401 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1402 local_irq_restore(flags);
1405 void __netif_schedule(struct Qdisc *q)
1407 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
1408 __netif_reschedule(q);
1410 EXPORT_SYMBOL(__netif_schedule);
1412 void dev_kfree_skb_irq(struct sk_buff *skb)
1414 if (atomic_dec_and_test(&skb->users)) {
1415 struct softnet_data *sd;
1416 unsigned long flags;
1418 local_irq_save(flags);
1419 sd = &__get_cpu_var(softnet_data);
1420 skb->next = sd->completion_queue;
1421 sd->completion_queue = skb;
1422 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1423 local_irq_restore(flags);
1426 EXPORT_SYMBOL(dev_kfree_skb_irq);
1428 void dev_kfree_skb_any(struct sk_buff *skb)
1430 if (in_irq() || irqs_disabled())
1431 dev_kfree_skb_irq(skb);
1435 EXPORT_SYMBOL(dev_kfree_skb_any);
1439 * netif_device_detach - mark device as removed
1440 * @dev: network device
1442 * Mark device as removed from system and therefore no longer available.
1444 void netif_device_detach(struct net_device *dev)
1446 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1447 netif_running(dev)) {
1448 netif_stop_queue(dev);
1451 EXPORT_SYMBOL(netif_device_detach);
1454 * netif_device_attach - mark device as attached
1455 * @dev: network device
1457 * Mark device as attached from system and restart if needed.
1459 void netif_device_attach(struct net_device *dev)
1461 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1462 netif_running(dev)) {
1463 netif_wake_queue(dev);
1464 __netdev_watchdog_up(dev);
1467 EXPORT_SYMBOL(netif_device_attach);
1469 static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1471 return ((features & NETIF_F_GEN_CSUM) ||
1472 ((features & NETIF_F_IP_CSUM) &&
1473 protocol == htons(ETH_P_IP)) ||
1474 ((features & NETIF_F_IPV6_CSUM) &&
1475 protocol == htons(ETH_P_IPV6)));
1478 static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
1480 if (can_checksum_protocol(dev->features, skb->protocol))
1483 if (skb->protocol == htons(ETH_P_8021Q)) {
1484 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1485 if (can_checksum_protocol(dev->features & dev->vlan_features,
1486 veh->h_vlan_encapsulated_proto))
1494 * Invalidate hardware checksum when packet is to be mangled, and
1495 * complete checksum manually on outgoing path.
1497 int skb_checksum_help(struct sk_buff *skb)
1500 int ret = 0, offset;
1502 if (skb->ip_summed == CHECKSUM_COMPLETE)
1503 goto out_set_summed;
1505 if (unlikely(skb_shinfo(skb)->gso_size)) {
1506 /* Let GSO fix up the checksum. */
1507 goto out_set_summed;
1510 offset = skb->csum_start - skb_headroom(skb);
1511 BUG_ON(offset >= skb_headlen(skb));
1512 csum = skb_checksum(skb, offset, skb->len - offset, 0);
1514 offset += skb->csum_offset;
1515 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
1517 if (skb_cloned(skb) &&
1518 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
1519 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1524 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
1526 skb->ip_summed = CHECKSUM_NONE;
1532 * skb_gso_segment - Perform segmentation on skb.
1533 * @skb: buffer to segment
1534 * @features: features for the output path (see dev->features)
1536 * This function segments the given skb and returns a list of segments.
1538 * It may return NULL if the skb requires no segmentation. This is
1539 * only possible when GSO is used for verifying header integrity.
1541 struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
1543 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1544 struct packet_type *ptype;
1545 __be16 type = skb->protocol;
1548 skb_reset_mac_header(skb);
1549 skb->mac_len = skb->network_header - skb->mac_header;
1550 __skb_pull(skb, skb->mac_len);
1552 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1553 struct net_device *dev = skb->dev;
1554 struct ethtool_drvinfo info = {};
1556 if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
1557 dev->ethtool_ops->get_drvinfo(dev, &info);
1559 WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d "
1561 info.driver, dev ? dev->features : 0L,
1562 skb->sk ? skb->sk->sk_route_caps : 0L,
1563 skb->len, skb->data_len, skb->ip_summed);
1565 if (skb_header_cloned(skb) &&
1566 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1567 return ERR_PTR(err);
1571 list_for_each_entry_rcu(ptype,
1572 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
1573 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
1574 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1575 err = ptype->gso_send_check(skb);
1576 segs = ERR_PTR(err);
1577 if (err || skb_gso_ok(skb, features))
1579 __skb_push(skb, (skb->data -
1580 skb_network_header(skb)));
1582 segs = ptype->gso_segment(skb, features);
1588 __skb_push(skb, skb->data - skb_mac_header(skb));
1593 EXPORT_SYMBOL(skb_gso_segment);
1595 /* Take action when hardware reception checksum errors are detected. */
1597 void netdev_rx_csum_fault(struct net_device *dev)
1599 if (net_ratelimit()) {
1600 printk(KERN_ERR "%s: hw csum failure.\n",
1601 dev ? dev->name : "<unknown>");
1605 EXPORT_SYMBOL(netdev_rx_csum_fault);
1608 /* Actually, we should eliminate this check as soon as we know, that:
1609 * 1. IOMMU is present and allows to map all the memory.
1610 * 2. No high memory really exists on this machine.
1613 static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1615 #ifdef CONFIG_HIGHMEM
1618 if (dev->features & NETIF_F_HIGHDMA)
1621 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1622 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1630 void (*destructor)(struct sk_buff *skb);
1633 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
1635 static void dev_gso_skb_destructor(struct sk_buff *skb)
1637 struct dev_gso_cb *cb;
1640 struct sk_buff *nskb = skb->next;
1642 skb->next = nskb->next;
1645 } while (skb->next);
1647 cb = DEV_GSO_CB(skb);
1649 cb->destructor(skb);
1653 * dev_gso_segment - Perform emulated hardware segmentation on skb.
1654 * @skb: buffer to segment
1656 * This function segments the given skb and stores the list of segments
1659 static int dev_gso_segment(struct sk_buff *skb)
1661 struct net_device *dev = skb->dev;
1662 struct sk_buff *segs;
1663 int features = dev->features & ~(illegal_highdma(dev, skb) ?
1666 segs = skb_gso_segment(skb, features);
1668 /* Verifying header integrity only. */
1673 return PTR_ERR(segs);
1676 DEV_GSO_CB(skb)->destructor = skb->destructor;
1677 skb->destructor = dev_gso_skb_destructor;
1682 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1683 struct netdev_queue *txq)
1685 const struct net_device_ops *ops = dev->netdev_ops;
1687 prefetch(&dev->netdev_ops->ndo_start_xmit);
1688 if (likely(!skb->next)) {
1689 if (!list_empty(&ptype_all))
1690 dev_queue_xmit_nit(skb, dev);
1692 if (netif_needs_gso(dev, skb)) {
1693 if (unlikely(dev_gso_segment(skb)))
1699 return ops->ndo_start_xmit(skb, dev);
1704 struct sk_buff *nskb = skb->next;
1707 skb->next = nskb->next;
1709 rc = ops->ndo_start_xmit(nskb, dev);
1711 nskb->next = skb->next;
1715 if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
1716 return NETDEV_TX_BUSY;
1717 } while (skb->next);
1719 skb->destructor = DEV_GSO_CB(skb)->destructor;
1726 static u32 skb_tx_hashrnd;
1727 static int skb_tx_hashrnd_initialized = 0;
1729 static u16 skb_tx_hash(struct net_device *dev, struct sk_buff *skb)
1733 if (unlikely(!skb_tx_hashrnd_initialized)) {
1734 get_random_bytes(&skb_tx_hashrnd, 4);
1735 skb_tx_hashrnd_initialized = 1;
1738 if (skb_rx_queue_recorded(skb)) {
1739 hash = skb_get_rx_queue(skb);
1740 } else if (skb->sk && skb->sk->sk_hash) {
1741 hash = skb->sk->sk_hash;
1743 hash = skb->protocol;
1745 hash = jhash_1word(hash, skb_tx_hashrnd);
1747 return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
1750 static struct netdev_queue *dev_pick_tx(struct net_device *dev,
1751 struct sk_buff *skb)
1753 const struct net_device_ops *ops = dev->netdev_ops;
1754 u16 queue_index = 0;
1756 if (ops->ndo_select_queue)
1757 queue_index = ops->ndo_select_queue(dev, skb);
1758 else if (dev->real_num_tx_queues > 1)
1759 queue_index = skb_tx_hash(dev, skb);
1761 skb_set_queue_mapping(skb, queue_index);
1762 return netdev_get_tx_queue(dev, queue_index);
1766 * dev_queue_xmit - transmit a buffer
1767 * @skb: buffer to transmit
1769 * Queue a buffer for transmission to a network device. The caller must
1770 * have set the device and priority and built the buffer before calling
1771 * this function. The function can be called from an interrupt.
1773 * A negative errno code is returned on a failure. A success does not
1774 * guarantee the frame will be transmitted as it may be dropped due
1775 * to congestion or traffic shaping.
1777 * -----------------------------------------------------------------------------------
1778 * I notice this method can also return errors from the queue disciplines,
1779 * including NET_XMIT_DROP, which is a positive value. So, errors can also
1782 * Regardless of the return value, the skb is consumed, so it is currently
1783 * difficult to retry a send to this method. (You can bump the ref count
1784 * before sending to hold a reference for retry if you are careful.)
1786 * When calling this method, interrupts MUST be enabled. This is because
1787 * the BH enable code must have IRQs enabled so that it will not deadlock.
1790 int dev_queue_xmit(struct sk_buff *skb)
1792 struct net_device *dev = skb->dev;
1793 struct netdev_queue *txq;
1797 /* GSO will handle the following emulations directly. */
1798 if (netif_needs_gso(dev, skb))
1801 if (skb_shinfo(skb)->frag_list &&
1802 !(dev->features & NETIF_F_FRAGLIST) &&
1803 __skb_linearize(skb))
1806 /* Fragmented skb is linearized if device does not support SG,
1807 * or if at least one of fragments is in highmem and device
1808 * does not support DMA from it.
1810 if (skb_shinfo(skb)->nr_frags &&
1811 (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
1812 __skb_linearize(skb))
1815 /* If packet is not checksummed and device does not support
1816 * checksumming for this protocol, complete checksumming here.
1818 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1819 skb_set_transport_header(skb, skb->csum_start -
1821 if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb))
1826 /* Disable soft irqs for various locks below. Also
1827 * stops preemption for RCU.
1831 txq = dev_pick_tx(dev, skb);
1832 q = rcu_dereference(txq->qdisc);
1834 #ifdef CONFIG_NET_CLS_ACT
1835 skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS);
1838 spinlock_t *root_lock = qdisc_lock(q);
1840 spin_lock(root_lock);
1842 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
1846 rc = qdisc_enqueue_root(skb, q);
1849 spin_unlock(root_lock);
1854 /* The device has no queue. Common case for software devices:
1855 loopback, all the sorts of tunnels...
1857 Really, it is unlikely that netif_tx_lock protection is necessary
1858 here. (f.e. loopback and IP tunnels are clean ignoring statistics
1860 However, it is possible, that they rely on protection
1863 Check this and shot the lock. It is not prone from deadlocks.
1864 Either shot noqueue qdisc, it is even simpler 8)
1866 if (dev->flags & IFF_UP) {
1867 int cpu = smp_processor_id(); /* ok because BHs are off */
1869 if (txq->xmit_lock_owner != cpu) {
1871 HARD_TX_LOCK(dev, txq, cpu);
1873 if (!netif_tx_queue_stopped(txq)) {
1875 if (!dev_hard_start_xmit(skb, dev, txq)) {
1876 HARD_TX_UNLOCK(dev, txq);
1880 HARD_TX_UNLOCK(dev, txq);
1881 if (net_ratelimit())
1882 printk(KERN_CRIT "Virtual device %s asks to "
1883 "queue packet!\n", dev->name);
1885 /* Recursion is detected! It is possible,
1887 if (net_ratelimit())
1888 printk(KERN_CRIT "Dead loop on virtual device "
1889 "%s, fix it urgently!\n", dev->name);
1894 rcu_read_unlock_bh();
1900 rcu_read_unlock_bh();
1905 /*=======================================================================
1907 =======================================================================*/
1909 int netdev_max_backlog __read_mostly = 1000;
1910 int netdev_budget __read_mostly = 300;
1911 int weight_p __read_mostly = 64; /* old backlog weight */
1913 DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
1917 * netif_rx - post buffer to the network code
1918 * @skb: buffer to post
1920 * This function receives a packet from a device driver and queues it for
1921 * the upper (protocol) levels to process. It always succeeds. The buffer
1922 * may be dropped during processing for congestion control or by the
1926 * NET_RX_SUCCESS (no congestion)
1927 * NET_RX_DROP (packet was dropped)
1931 int netif_rx(struct sk_buff *skb)
1933 struct softnet_data *queue;
1934 unsigned long flags;
1936 /* if netpoll wants it, pretend we never saw it */
1937 if (netpoll_rx(skb))
1940 if (!skb->tstamp.tv64)
1944 * The code is rearranged so that the path is the most
1945 * short when CPU is congested, but is still operating.
1947 local_irq_save(flags);
1948 queue = &__get_cpu_var(softnet_data);
1950 __get_cpu_var(netdev_rx_stat).total++;
1951 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
1952 if (queue->input_pkt_queue.qlen) {
1954 __skb_queue_tail(&queue->input_pkt_queue, skb);
1955 local_irq_restore(flags);
1956 return NET_RX_SUCCESS;
1959 napi_schedule(&queue->backlog);
1963 __get_cpu_var(netdev_rx_stat).dropped++;
1964 local_irq_restore(flags);
1970 int netif_rx_ni(struct sk_buff *skb)
1975 err = netif_rx(skb);
1976 if (local_softirq_pending())
1983 EXPORT_SYMBOL(netif_rx_ni);
1985 static void net_tx_action(struct softirq_action *h)
1987 struct softnet_data *sd = &__get_cpu_var(softnet_data);
1989 if (sd->completion_queue) {
1990 struct sk_buff *clist;
1992 local_irq_disable();
1993 clist = sd->completion_queue;
1994 sd->completion_queue = NULL;
1998 struct sk_buff *skb = clist;
1999 clist = clist->next;
2001 WARN_ON(atomic_read(&skb->users));
2006 if (sd->output_queue) {
2009 local_irq_disable();
2010 head = sd->output_queue;
2011 sd->output_queue = NULL;
2015 struct Qdisc *q = head;
2016 spinlock_t *root_lock;
2018 head = head->next_sched;
2020 root_lock = qdisc_lock(q);
2021 if (spin_trylock(root_lock)) {
2022 smp_mb__before_clear_bit();
2023 clear_bit(__QDISC_STATE_SCHED,
2026 spin_unlock(root_lock);
2028 if (!test_bit(__QDISC_STATE_DEACTIVATED,
2030 __netif_reschedule(q);
2032 smp_mb__before_clear_bit();
2033 clear_bit(__QDISC_STATE_SCHED,
2041 static inline int deliver_skb(struct sk_buff *skb,
2042 struct packet_type *pt_prev,
2043 struct net_device *orig_dev)
2045 atomic_inc(&skb->users);
2046 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2049 #if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
2050 /* These hooks defined here for ATM */
2052 struct net_bridge_fdb_entry *(*br_fdb_get_hook)(struct net_bridge *br,
2053 unsigned char *addr);
2054 void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent) __read_mostly;
2057 * If bridge module is loaded call bridging hook.
2058 * returns NULL if packet was consumed.
2060 struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
2061 struct sk_buff *skb) __read_mostly;
2062 static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
2063 struct packet_type **pt_prev, int *ret,
2064 struct net_device *orig_dev)
2066 struct net_bridge_port *port;
2068 if (skb->pkt_type == PACKET_LOOPBACK ||
2069 (port = rcu_dereference(skb->dev->br_port)) == NULL)
2073 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2077 return br_handle_frame_hook(port, skb);
2080 #define handle_bridge(skb, pt_prev, ret, orig_dev) (skb)
2083 #if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
2084 struct sk_buff *(*macvlan_handle_frame_hook)(struct sk_buff *skb) __read_mostly;
2085 EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook);
2087 static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
2088 struct packet_type **pt_prev,
2090 struct net_device *orig_dev)
2092 if (skb->dev->macvlan_port == NULL)
2096 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2099 return macvlan_handle_frame_hook(skb);
2102 #define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb)
2105 #ifdef CONFIG_NET_CLS_ACT
2106 /* TODO: Maybe we should just force sch_ingress to be compiled in
2107 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
2108 * a compare and 2 stores extra right now if we dont have it on
2109 * but have CONFIG_NET_CLS_ACT
2110 * NOTE: This doesnt stop any functionality; if you dont have
2111 * the ingress scheduler, you just cant add policies on ingress.
2114 static int ing_filter(struct sk_buff *skb)
2116 struct net_device *dev = skb->dev;
2117 u32 ttl = G_TC_RTTL(skb->tc_verd);
2118 struct netdev_queue *rxq;
2119 int result = TC_ACT_OK;
2122 if (MAX_RED_LOOP < ttl++) {
2124 "Redir loop detected Dropping packet (%d->%d)\n",
2125 skb->iif, dev->ifindex);
2129 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
2130 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
2132 rxq = &dev->rx_queue;
2135 if (q != &noop_qdisc) {
2136 spin_lock(qdisc_lock(q));
2137 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
2138 result = qdisc_enqueue_root(skb, q);
2139 spin_unlock(qdisc_lock(q));
2145 static inline struct sk_buff *handle_ing(struct sk_buff *skb,
2146 struct packet_type **pt_prev,
2147 int *ret, struct net_device *orig_dev)
2149 if (skb->dev->rx_queue.qdisc == &noop_qdisc)
2153 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2156 /* Huh? Why does turning on AF_PACKET affect this? */
2157 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
2160 switch (ing_filter(skb)) {
2174 * netif_nit_deliver - deliver received packets to network taps
2177 * This function is used to deliver incoming packets to network
2178 * taps. It should be used when the normal netif_receive_skb path
2179 * is bypassed, for example because of VLAN acceleration.
2181 void netif_nit_deliver(struct sk_buff *skb)
2183 struct packet_type *ptype;
2185 if (list_empty(&ptype_all))
2188 skb_reset_network_header(skb);
2189 skb_reset_transport_header(skb);
2190 skb->mac_len = skb->network_header - skb->mac_header;
2193 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2194 if (!ptype->dev || ptype->dev == skb->dev)
2195 deliver_skb(skb, ptype, skb->dev);
2201 * netif_receive_skb - process receive buffer from network
2202 * @skb: buffer to process
2204 * netif_receive_skb() is the main receive data processing function.
2205 * It always succeeds. The buffer may be dropped during processing
2206 * for congestion control or by the protocol layers.
2208 * This function may only be called from softirq context and interrupts
2209 * should be enabled.
2211 * Return values (usually ignored):
2212 * NET_RX_SUCCESS: no congestion
2213 * NET_RX_DROP: packet was dropped
2215 int netif_receive_skb(struct sk_buff *skb)
2217 struct packet_type *ptype, *pt_prev;
2218 struct net_device *orig_dev;
2219 struct net_device *null_or_orig;
2220 int ret = NET_RX_DROP;
2223 if (skb->vlan_tci && vlan_hwaccel_do_receive(skb))
2224 return NET_RX_SUCCESS;
2226 /* if we've gotten here through NAPI, check netpoll */
2227 if (netpoll_receive_skb(skb))
2230 if (!skb->tstamp.tv64)
2234 skb->iif = skb->dev->ifindex;
2236 null_or_orig = NULL;
2237 orig_dev = skb->dev;
2238 if (orig_dev->master) {
2239 if (skb_bond_should_drop(skb))
2240 null_or_orig = orig_dev; /* deliver only exact match */
2242 skb->dev = orig_dev->master;
2245 __get_cpu_var(netdev_rx_stat).total++;
2247 skb_reset_network_header(skb);
2248 skb_reset_transport_header(skb);
2249 skb->mac_len = skb->network_header - skb->mac_header;
2255 /* Don't receive packets in an exiting network namespace */
2256 if (!net_alive(dev_net(skb->dev))) {
2261 #ifdef CONFIG_NET_CLS_ACT
2262 if (skb->tc_verd & TC_NCLS) {
2263 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
2268 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2269 if (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2270 ptype->dev == orig_dev) {
2272 ret = deliver_skb(skb, pt_prev, orig_dev);
2277 #ifdef CONFIG_NET_CLS_ACT
2278 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
2284 skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
2287 skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
2291 type = skb->protocol;
2292 list_for_each_entry_rcu(ptype,
2293 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
2294 if (ptype->type == type &&
2295 (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2296 ptype->dev == orig_dev)) {
2298 ret = deliver_skb(skb, pt_prev, orig_dev);
2304 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2307 /* Jamal, now you will not able to escape explaining
2308 * me how you were going to use this. :-)
2318 /* Network device is going away, flush any packets still pending */
2319 static void flush_backlog(void *arg)
2321 struct net_device *dev = arg;
2322 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2323 struct sk_buff *skb, *tmp;
2325 skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp)
2326 if (skb->dev == dev) {
2327 __skb_unlink(skb, &queue->input_pkt_queue);
2332 static int napi_gro_complete(struct sk_buff *skb)
2334 struct packet_type *ptype;
2335 __be16 type = skb->protocol;
2336 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
2339 if (NAPI_GRO_CB(skb)->count == 1)
2343 list_for_each_entry_rcu(ptype, head, list) {
2344 if (ptype->type != type || ptype->dev || !ptype->gro_complete)
2347 err = ptype->gro_complete(skb);
2353 WARN_ON(&ptype->list == head);
2355 return NET_RX_SUCCESS;
2359 skb_shinfo(skb)->gso_size = 0;
2360 return netif_receive_skb(skb);
2363 void napi_gro_flush(struct napi_struct *napi)
2365 struct sk_buff *skb, *next;
2367 for (skb = napi->gro_list; skb; skb = next) {
2370 napi_gro_complete(skb);
2373 napi->gro_list = NULL;
2375 EXPORT_SYMBOL(napi_gro_flush);
2377 void *skb_gro_header(struct sk_buff *skb, unsigned int hlen)
2379 unsigned int offset = skb_gro_offset(skb);
2382 if (hlen <= skb_headlen(skb))
2383 return skb->data + offset;
2385 if (unlikely(!skb_shinfo(skb)->nr_frags ||
2386 skb_shinfo(skb)->frags[0].size <=
2387 hlen - skb_headlen(skb) ||
2388 PageHighMem(skb_shinfo(skb)->frags[0].page)))
2389 return pskb_may_pull(skb, hlen) ? skb->data + offset : NULL;
2391 return page_address(skb_shinfo(skb)->frags[0].page) +
2392 skb_shinfo(skb)->frags[0].page_offset + offset;
2394 EXPORT_SYMBOL(skb_gro_header);
2396 int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2398 struct sk_buff **pp = NULL;
2399 struct packet_type *ptype;
2400 __be16 type = skb->protocol;
2401 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
2407 if (!(skb->dev->features & NETIF_F_GRO))
2410 if (skb_is_gso(skb) || skb_shinfo(skb)->frag_list)
2414 list_for_each_entry_rcu(ptype, head, list) {
2418 if (ptype->type != type || ptype->dev || !ptype->gro_receive)
2421 skb_set_network_header(skb, skb_gro_offset(skb));
2422 mac = skb_gro_mac_header(skb);
2423 mac_len = skb->network_header - skb->mac_header;
2424 skb->mac_len = mac_len;
2425 NAPI_GRO_CB(skb)->same_flow = 0;
2426 NAPI_GRO_CB(skb)->flush = 0;
2427 NAPI_GRO_CB(skb)->free = 0;
2429 for (p = napi->gro_list; p; p = p->next) {
2432 if (!NAPI_GRO_CB(p)->same_flow)
2435 if (p->mac_len != mac_len ||
2436 memcmp(skb_mac_header(p), mac, mac_len))
2437 NAPI_GRO_CB(p)->same_flow = 0;
2440 pp = ptype->gro_receive(&napi->gro_list, skb);
2445 if (&ptype->list == head)
2448 same_flow = NAPI_GRO_CB(skb)->same_flow;
2449 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
2452 struct sk_buff *nskb = *pp;
2456 napi_gro_complete(nskb);
2463 if (NAPI_GRO_CB(skb)->flush || count >= MAX_GRO_SKBS)
2466 NAPI_GRO_CB(skb)->count = 1;
2467 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
2468 skb->next = napi->gro_list;
2469 napi->gro_list = skb;
2478 EXPORT_SYMBOL(dev_gro_receive);
2480 static int __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2484 for (p = napi->gro_list; p; p = p->next) {
2485 NAPI_GRO_CB(p)->same_flow = 1;
2486 NAPI_GRO_CB(p)->flush = 0;
2489 return dev_gro_receive(napi, skb);
2492 int napi_skb_finish(int ret, struct sk_buff *skb)
2494 int err = NET_RX_SUCCESS;
2498 return netif_receive_skb(skb);
2504 case GRO_MERGED_FREE:
2511 EXPORT_SYMBOL(napi_skb_finish);
2513 int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
2515 skb_gro_reset_offset(skb);
2517 return napi_skb_finish(__napi_gro_receive(napi, skb), skb);
2519 EXPORT_SYMBOL(napi_gro_receive);
2521 void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
2523 __skb_pull(skb, skb_headlen(skb));
2524 skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
2528 EXPORT_SYMBOL(napi_reuse_skb);
2530 struct sk_buff *napi_fraginfo_skb(struct napi_struct *napi,
2531 struct napi_gro_fraginfo *info)
2533 struct net_device *dev = napi->dev;
2534 struct sk_buff *skb = napi->skb;
2540 skb = netdev_alloc_skb(dev, GRO_MAX_HEAD + NET_IP_ALIGN);
2544 skb_reserve(skb, NET_IP_ALIGN);
2547 BUG_ON(info->nr_frags > MAX_SKB_FRAGS);
2548 skb_shinfo(skb)->nr_frags = info->nr_frags;
2549 memcpy(skb_shinfo(skb)->frags, info->frags, sizeof(info->frags));
2551 skb->data_len = info->len;
2552 skb->len += info->len;
2553 skb->truesize += info->len;
2555 skb_reset_mac_header(skb);
2556 skb_gro_reset_offset(skb);
2558 eth = skb_gro_header(skb, sizeof(*eth));
2560 napi_reuse_skb(napi, skb);
2565 skb_gro_pull(skb, sizeof(*eth));
2568 * This works because the only protocols we care about don't require
2569 * special handling. We'll fix it up properly at the end.
2571 skb->protocol = eth->h_proto;
2573 skb->ip_summed = info->ip_summed;
2574 skb->csum = info->csum;
2579 EXPORT_SYMBOL(napi_fraginfo_skb);
2581 int napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb, int ret)
2583 int err = NET_RX_SUCCESS;
2589 may = pskb_may_pull(skb, skb_gro_offset(skb));
2592 skb->protocol = eth_type_trans(skb, napi->dev);
2594 if (ret == GRO_NORMAL)
2595 return netif_receive_skb(skb);
2597 skb_gro_pull(skb, -ETH_HLEN);
2604 case GRO_MERGED_FREE:
2605 napi_reuse_skb(napi, skb);
2611 EXPORT_SYMBOL(napi_frags_finish);
2613 int napi_gro_frags(struct napi_struct *napi, struct napi_gro_fraginfo *info)
2615 struct sk_buff *skb = napi_fraginfo_skb(napi, info);
2620 return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb));
2622 EXPORT_SYMBOL(napi_gro_frags);
2624 static int process_backlog(struct napi_struct *napi, int quota)
2627 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2628 unsigned long start_time = jiffies;
2630 napi->weight = weight_p;
2632 struct sk_buff *skb;
2634 local_irq_disable();
2635 skb = __skb_dequeue(&queue->input_pkt_queue);
2637 __napi_complete(napi);
2643 napi_gro_receive(napi, skb);
2644 } while (++work < quota && jiffies == start_time);
2646 napi_gro_flush(napi);
2652 * __napi_schedule - schedule for receive
2653 * @n: entry to schedule
2655 * The entry's receive function will be scheduled to run
2657 void __napi_schedule(struct napi_struct *n)
2659 unsigned long flags;
2661 local_irq_save(flags);
2662 list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list);
2663 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2664 local_irq_restore(flags);
2666 EXPORT_SYMBOL(__napi_schedule);
2668 void __napi_complete(struct napi_struct *n)
2670 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
2671 BUG_ON(n->gro_list);
2673 list_del(&n->poll_list);
2674 smp_mb__before_clear_bit();
2675 clear_bit(NAPI_STATE_SCHED, &n->state);
2677 EXPORT_SYMBOL(__napi_complete);
2679 void napi_complete(struct napi_struct *n)
2681 unsigned long flags;
2684 * don't let napi dequeue from the cpu poll list
2685 * just in case its running on a different cpu
2687 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
2691 local_irq_save(flags);
2693 local_irq_restore(flags);
2695 EXPORT_SYMBOL(napi_complete);
2697 void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
2698 int (*poll)(struct napi_struct *, int), int weight)
2700 INIT_LIST_HEAD(&napi->poll_list);
2701 napi->gro_list = NULL;
2704 napi->weight = weight;
2705 list_add(&napi->dev_list, &dev->napi_list);
2707 #ifdef CONFIG_NETPOLL
2708 spin_lock_init(&napi->poll_lock);
2709 napi->poll_owner = -1;
2711 set_bit(NAPI_STATE_SCHED, &napi->state);
2713 EXPORT_SYMBOL(netif_napi_add);
2715 void netif_napi_del(struct napi_struct *napi)
2717 struct sk_buff *skb, *next;
2719 list_del_init(&napi->dev_list);
2722 for (skb = napi->gro_list; skb; skb = next) {
2728 napi->gro_list = NULL;
2730 EXPORT_SYMBOL(netif_napi_del);
2733 static void net_rx_action(struct softirq_action *h)
2735 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
2736 unsigned long time_limit = jiffies + 2;
2737 int budget = netdev_budget;
2740 local_irq_disable();
2742 while (!list_empty(list)) {
2743 struct napi_struct *n;
2746 /* If softirq window is exhuasted then punt.
2747 * Allow this to run for 2 jiffies since which will allow
2748 * an average latency of 1.5/HZ.
2750 if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
2755 /* Even though interrupts have been re-enabled, this
2756 * access is safe because interrupts can only add new
2757 * entries to the tail of this list, and only ->poll()
2758 * calls can remove this head entry from the list.
2760 n = list_entry(list->next, struct napi_struct, poll_list);
2762 have = netpoll_poll_lock(n);
2766 /* This NAPI_STATE_SCHED test is for avoiding a race
2767 * with netpoll's poll_napi(). Only the entity which
2768 * obtains the lock and sees NAPI_STATE_SCHED set will
2769 * actually make the ->poll() call. Therefore we avoid
2770 * accidently calling ->poll() when NAPI is not scheduled.
2773 if (test_bit(NAPI_STATE_SCHED, &n->state))
2774 work = n->poll(n, weight);
2776 WARN_ON_ONCE(work > weight);
2780 local_irq_disable();
2782 /* Drivers must not modify the NAPI state if they
2783 * consume the entire weight. In such cases this code
2784 * still "owns" the NAPI instance and therefore can
2785 * move the instance around on the list at-will.
2787 if (unlikely(work == weight)) {
2788 if (unlikely(napi_disable_pending(n)))
2791 list_move_tail(&n->poll_list, list);
2794 netpoll_poll_unlock(have);
2799 #ifdef CONFIG_NET_DMA
2801 * There may not be any more sk_buffs coming right now, so push
2802 * any pending DMA copies to hardware
2804 dma_issue_pending_all();
2810 __get_cpu_var(netdev_rx_stat).time_squeeze++;
2811 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2815 static gifconf_func_t * gifconf_list [NPROTO];
2818 * register_gifconf - register a SIOCGIF handler
2819 * @family: Address family
2820 * @gifconf: Function handler
2822 * Register protocol dependent address dumping routines. The handler
2823 * that is passed must not be freed or reused until it has been replaced
2824 * by another handler.
2826 int register_gifconf(unsigned int family, gifconf_func_t * gifconf)
2828 if (family >= NPROTO)
2830 gifconf_list[family] = gifconf;
2836 * Map an interface index to its name (SIOCGIFNAME)
2840 * We need this ioctl for efficient implementation of the
2841 * if_indextoname() function required by the IPv6 API. Without
2842 * it, we would have to search all the interfaces to find a
2846 static int dev_ifname(struct net *net, struct ifreq __user *arg)
2848 struct net_device *dev;
2852 * Fetch the caller's info block.
2855 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
2858 read_lock(&dev_base_lock);
2859 dev = __dev_get_by_index(net, ifr.ifr_ifindex);
2861 read_unlock(&dev_base_lock);
2865 strcpy(ifr.ifr_name, dev->name);
2866 read_unlock(&dev_base_lock);
2868 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
2874 * Perform a SIOCGIFCONF call. This structure will change
2875 * size eventually, and there is nothing I can do about it.
2876 * Thus we will need a 'compatibility mode'.
2879 static int dev_ifconf(struct net *net, char __user *arg)
2882 struct net_device *dev;
2889 * Fetch the caller's info block.
2892 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
2899 * Loop over the interfaces, and write an info block for each.
2903 for_each_netdev(net, dev) {
2904 for (i = 0; i < NPROTO; i++) {
2905 if (gifconf_list[i]) {
2908 done = gifconf_list[i](dev, NULL, 0);
2910 done = gifconf_list[i](dev, pos + total,
2920 * All done. Write the updated control block back to the caller.
2922 ifc.ifc_len = total;
2925 * Both BSD and Solaris return 0 here, so we do too.
2927 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
2930 #ifdef CONFIG_PROC_FS
2932 * This is invoked by the /proc filesystem handler to display a device
2935 void *dev_seq_start(struct seq_file *seq, loff_t *pos)
2936 __acquires(dev_base_lock)
2938 struct net *net = seq_file_net(seq);
2940 struct net_device *dev;
2942 read_lock(&dev_base_lock);
2944 return SEQ_START_TOKEN;
2947 for_each_netdev(net, dev)
2954 void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2956 struct net *net = seq_file_net(seq);
2958 return v == SEQ_START_TOKEN ?
2959 first_net_device(net) : next_net_device((struct net_device *)v);
2962 void dev_seq_stop(struct seq_file *seq, void *v)
2963 __releases(dev_base_lock)
2965 read_unlock(&dev_base_lock);
2968 static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
2970 const struct net_device_stats *stats = dev_get_stats(dev);
2972 seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
2973 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
2974 dev->name, stats->rx_bytes, stats->rx_packets,
2976 stats->rx_dropped + stats->rx_missed_errors,
2977 stats->rx_fifo_errors,
2978 stats->rx_length_errors + stats->rx_over_errors +
2979 stats->rx_crc_errors + stats->rx_frame_errors,
2980 stats->rx_compressed, stats->multicast,
2981 stats->tx_bytes, stats->tx_packets,
2982 stats->tx_errors, stats->tx_dropped,
2983 stats->tx_fifo_errors, stats->collisions,
2984 stats->tx_carrier_errors +
2985 stats->tx_aborted_errors +
2986 stats->tx_window_errors +
2987 stats->tx_heartbeat_errors,
2988 stats->tx_compressed);
2992 * Called from the PROCfs module. This now uses the new arbitrary sized
2993 * /proc/net interface to create /proc/net/dev
2995 static int dev_seq_show(struct seq_file *seq, void *v)
2997 if (v == SEQ_START_TOKEN)
2998 seq_puts(seq, "Inter-| Receive "
3000 " face |bytes packets errs drop fifo frame "
3001 "compressed multicast|bytes packets errs "
3002 "drop fifo colls carrier compressed\n");
3004 dev_seq_printf_stats(seq, v);
3008 static struct netif_rx_stats *softnet_get_online(loff_t *pos)
3010 struct netif_rx_stats *rc = NULL;
3012 while (*pos < nr_cpu_ids)
3013 if (cpu_online(*pos)) {
3014 rc = &per_cpu(netdev_rx_stat, *pos);
3021 static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
3023 return softnet_get_online(pos);
3026 static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3029 return softnet_get_online(pos);
3032 static void softnet_seq_stop(struct seq_file *seq, void *v)
3036 static int softnet_seq_show(struct seq_file *seq, void *v)
3038 struct netif_rx_stats *s = v;
3040 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
3041 s->total, s->dropped, s->time_squeeze, 0,
3042 0, 0, 0, 0, /* was fastroute */
3047 static const struct seq_operations dev_seq_ops = {
3048 .start = dev_seq_start,
3049 .next = dev_seq_next,
3050 .stop = dev_seq_stop,
3051 .show = dev_seq_show,
3054 static int dev_seq_open(struct inode *inode, struct file *file)
3056 return seq_open_net(inode, file, &dev_seq_ops,
3057 sizeof(struct seq_net_private));
3060 static const struct file_operations dev_seq_fops = {
3061 .owner = THIS_MODULE,
3062 .open = dev_seq_open,
3064 .llseek = seq_lseek,
3065 .release = seq_release_net,
3068 static const struct seq_operations softnet_seq_ops = {
3069 .start = softnet_seq_start,
3070 .next = softnet_seq_next,
3071 .stop = softnet_seq_stop,
3072 .show = softnet_seq_show,
3075 static int softnet_seq_open(struct inode *inode, struct file *file)
3077 return seq_open(file, &softnet_seq_ops);
3080 static const struct file_operations softnet_seq_fops = {
3081 .owner = THIS_MODULE,
3082 .open = softnet_seq_open,
3084 .llseek = seq_lseek,
3085 .release = seq_release,
3088 static void *ptype_get_idx(loff_t pos)
3090 struct packet_type *pt = NULL;
3094 list_for_each_entry_rcu(pt, &ptype_all, list) {
3100 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
3101 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
3110 static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
3114 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
3117 static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3119 struct packet_type *pt;
3120 struct list_head *nxt;
3124 if (v == SEQ_START_TOKEN)
3125 return ptype_get_idx(0);
3128 nxt = pt->list.next;
3129 if (pt->type == htons(ETH_P_ALL)) {
3130 if (nxt != &ptype_all)
3133 nxt = ptype_base[0].next;
3135 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
3137 while (nxt == &ptype_base[hash]) {
3138 if (++hash >= PTYPE_HASH_SIZE)
3140 nxt = ptype_base[hash].next;
3143 return list_entry(nxt, struct packet_type, list);
3146 static void ptype_seq_stop(struct seq_file *seq, void *v)
3152 static int ptype_seq_show(struct seq_file *seq, void *v)
3154 struct packet_type *pt = v;
3156 if (v == SEQ_START_TOKEN)
3157 seq_puts(seq, "Type Device Function\n");
3158 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
3159 if (pt->type == htons(ETH_P_ALL))
3160 seq_puts(seq, "ALL ");
3162 seq_printf(seq, "%04x", ntohs(pt->type));
3164 seq_printf(seq, " %-8s %pF\n",
3165 pt->dev ? pt->dev->name : "", pt->func);
3171 static const struct seq_operations ptype_seq_ops = {
3172 .start = ptype_seq_start,
3173 .next = ptype_seq_next,
3174 .stop = ptype_seq_stop,
3175 .show = ptype_seq_show,
3178 static int ptype_seq_open(struct inode *inode, struct file *file)
3180 return seq_open_net(inode, file, &ptype_seq_ops,
3181 sizeof(struct seq_net_private));
3184 static const struct file_operations ptype_seq_fops = {
3185 .owner = THIS_MODULE,
3186 .open = ptype_seq_open,
3188 .llseek = seq_lseek,
3189 .release = seq_release_net,
3193 static int __net_init dev_proc_net_init(struct net *net)
3197 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
3199 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
3201 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
3204 if (wext_proc_init(net))
3210 proc_net_remove(net, "ptype");
3212 proc_net_remove(net, "softnet_stat");
3214 proc_net_remove(net, "dev");
3218 static void __net_exit dev_proc_net_exit(struct net *net)
3220 wext_proc_exit(net);
3222 proc_net_remove(net, "ptype");
3223 proc_net_remove(net, "softnet_stat");
3224 proc_net_remove(net, "dev");
3227 static struct pernet_operations __net_initdata dev_proc_ops = {
3228 .init = dev_proc_net_init,
3229 .exit = dev_proc_net_exit,
3232 static int __init dev_proc_init(void)
3234 return register_pernet_subsys(&dev_proc_ops);
3237 #define dev_proc_init() 0
3238 #endif /* CONFIG_PROC_FS */
3242 * netdev_set_master - set up master/slave pair
3243 * @slave: slave device
3244 * @master: new master device
3246 * Changes the master device of the slave. Pass %NULL to break the
3247 * bonding. The caller must hold the RTNL semaphore. On a failure
3248 * a negative errno code is returned. On success the reference counts
3249 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
3250 * function returns zero.
3252 int netdev_set_master(struct net_device *slave, struct net_device *master)
3254 struct net_device *old = slave->master;
3264 slave->master = master;
3272 slave->flags |= IFF_SLAVE;
3274 slave->flags &= ~IFF_SLAVE;
3276 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
3280 static void dev_change_rx_flags(struct net_device *dev, int flags)
3282 const struct net_device_ops *ops = dev->netdev_ops;
3284 if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
3285 ops->ndo_change_rx_flags(dev, flags);
3288 static int __dev_set_promiscuity(struct net_device *dev, int inc)
3290 unsigned short old_flags = dev->flags;
3296 dev->flags |= IFF_PROMISC;
3297 dev->promiscuity += inc;
3298 if (dev->promiscuity == 0) {
3301 * If inc causes overflow, untouch promisc and return error.
3304 dev->flags &= ~IFF_PROMISC;
3306 dev->promiscuity -= inc;
3307 printk(KERN_WARNING "%s: promiscuity touches roof, "
3308 "set promiscuity failed, promiscuity feature "
3309 "of device might be broken.\n", dev->name);
3313 if (dev->flags != old_flags) {
3314 printk(KERN_INFO "device %s %s promiscuous mode\n",
3315 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
3317 if (audit_enabled) {
3318 current_uid_gid(&uid, &gid);
3319 audit_log(current->audit_context, GFP_ATOMIC,
3320 AUDIT_ANOM_PROMISCUOUS,
3321 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
3322 dev->name, (dev->flags & IFF_PROMISC),
3323 (old_flags & IFF_PROMISC),
3324 audit_get_loginuid(current),
3326 audit_get_sessionid(current));
3329 dev_change_rx_flags(dev, IFF_PROMISC);
3335 * dev_set_promiscuity - update promiscuity count on a device
3339 * Add or remove promiscuity from a device. While the count in the device
3340 * remains above zero the interface remains promiscuous. Once it hits zero
3341 * the device reverts back to normal filtering operation. A negative inc
3342 * value is used to drop promiscuity on the device.
3343 * Return 0 if successful or a negative errno code on error.
3345 int dev_set_promiscuity(struct net_device *dev, int inc)
3347 unsigned short old_flags = dev->flags;
3350 err = __dev_set_promiscuity(dev, inc);
3353 if (dev->flags != old_flags)
3354 dev_set_rx_mode(dev);
3359 * dev_set_allmulti - update allmulti count on a device
3363 * Add or remove reception of all multicast frames to a device. While the
3364 * count in the device remains above zero the interface remains listening
3365 * to all interfaces. Once it hits zero the device reverts back to normal
3366 * filtering operation. A negative @inc value is used to drop the counter
3367 * when releasing a resource needing all multicasts.
3368 * Return 0 if successful or a negative errno code on error.
3371 int dev_set_allmulti(struct net_device *dev, int inc)
3373 unsigned short old_flags = dev->flags;
3377 dev->flags |= IFF_ALLMULTI;
3378 dev->allmulti += inc;
3379 if (dev->allmulti == 0) {
3382 * If inc causes overflow, untouch allmulti and return error.
3385 dev->flags &= ~IFF_ALLMULTI;
3387 dev->allmulti -= inc;
3388 printk(KERN_WARNING "%s: allmulti touches roof, "
3389 "set allmulti failed, allmulti feature of "
3390 "device might be broken.\n", dev->name);
3394 if (dev->flags ^ old_flags) {
3395 dev_change_rx_flags(dev, IFF_ALLMULTI);
3396 dev_set_rx_mode(dev);
3402 * Upload unicast and multicast address lists to device and
3403 * configure RX filtering. When the device doesn't support unicast
3404 * filtering it is put in promiscuous mode while unicast addresses
3407 void __dev_set_rx_mode(struct net_device *dev)
3409 const struct net_device_ops *ops = dev->netdev_ops;
3411 /* dev_open will call this function so the list will stay sane. */
3412 if (!(dev->flags&IFF_UP))
3415 if (!netif_device_present(dev))
3418 if (ops->ndo_set_rx_mode)
3419 ops->ndo_set_rx_mode(dev);
3421 /* Unicast addresses changes may only happen under the rtnl,
3422 * therefore calling __dev_set_promiscuity here is safe.
3424 if (dev->uc_count > 0 && !dev->uc_promisc) {
3425 __dev_set_promiscuity(dev, 1);
3426 dev->uc_promisc = 1;
3427 } else if (dev->uc_count == 0 && dev->uc_promisc) {
3428 __dev_set_promiscuity(dev, -1);
3429 dev->uc_promisc = 0;
3432 if (ops->ndo_set_multicast_list)
3433 ops->ndo_set_multicast_list(dev);
3437 void dev_set_rx_mode(struct net_device *dev)
3439 netif_addr_lock_bh(dev);
3440 __dev_set_rx_mode(dev);
3441 netif_addr_unlock_bh(dev);
3444 int __dev_addr_delete(struct dev_addr_list **list, int *count,
3445 void *addr, int alen, int glbl)
3447 struct dev_addr_list *da;
3449 for (; (da = *list) != NULL; list = &da->next) {
3450 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3451 alen == da->da_addrlen) {
3453 int old_glbl = da->da_gusers;
3470 int __dev_addr_add(struct dev_addr_list **list, int *count,
3471 void *addr, int alen, int glbl)
3473 struct dev_addr_list *da;
3475 for (da = *list; da != NULL; da = da->next) {
3476 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3477 da->da_addrlen == alen) {
3479 int old_glbl = da->da_gusers;
3489 da = kzalloc(sizeof(*da), GFP_ATOMIC);
3492 memcpy(da->da_addr, addr, alen);
3493 da->da_addrlen = alen;
3495 da->da_gusers = glbl ? 1 : 0;
3503 * dev_unicast_delete - Release secondary unicast address.
3505 * @addr: address to delete
3506 * @alen: length of @addr
3508 * Release reference to a secondary unicast address and remove it
3509 * from the device if the reference count drops to zero.
3511 * The caller must hold the rtnl_mutex.
3513 int dev_unicast_delete(struct net_device *dev, void *addr, int alen)
3519 netif_addr_lock_bh(dev);
3520 err = __dev_addr_delete(&dev->uc_list, &dev->uc_count, addr, alen, 0);
3522 __dev_set_rx_mode(dev);
3523 netif_addr_unlock_bh(dev);
3526 EXPORT_SYMBOL(dev_unicast_delete);
3529 * dev_unicast_add - add a secondary unicast address
3531 * @addr: address to add
3532 * @alen: length of @addr
3534 * Add a secondary unicast address to the device or increase
3535 * the reference count if it already exists.
3537 * The caller must hold the rtnl_mutex.
3539 int dev_unicast_add(struct net_device *dev, void *addr, int alen)
3545 netif_addr_lock_bh(dev);
3546 err = __dev_addr_add(&dev->uc_list, &dev->uc_count, addr, alen, 0);
3548 __dev_set_rx_mode(dev);
3549 netif_addr_unlock_bh(dev);
3552 EXPORT_SYMBOL(dev_unicast_add);
3554 int __dev_addr_sync(struct dev_addr_list **to, int *to_count,
3555 struct dev_addr_list **from, int *from_count)
3557 struct dev_addr_list *da, *next;
3561 while (da != NULL) {
3563 if (!da->da_synced) {
3564 err = __dev_addr_add(to, to_count,
3565 da->da_addr, da->da_addrlen, 0);
3570 } else if (da->da_users == 1) {
3571 __dev_addr_delete(to, to_count,
3572 da->da_addr, da->da_addrlen, 0);
3573 __dev_addr_delete(from, from_count,
3574 da->da_addr, da->da_addrlen, 0);
3581 void __dev_addr_unsync(struct dev_addr_list **to, int *to_count,
3582 struct dev_addr_list **from, int *from_count)
3584 struct dev_addr_list *da, *next;
3587 while (da != NULL) {
3589 if (da->da_synced) {
3590 __dev_addr_delete(to, to_count,
3591 da->da_addr, da->da_addrlen, 0);
3593 __dev_addr_delete(from, from_count,
3594 da->da_addr, da->da_addrlen, 0);
3601 * dev_unicast_sync - Synchronize device's unicast list to another device
3602 * @to: destination device
3603 * @from: source device
3605 * Add newly added addresses to the destination device and release
3606 * addresses that have no users left. The source device must be
3607 * locked by netif_tx_lock_bh.
3609 * This function is intended to be called from the dev->set_rx_mode
3610 * function of layered software devices.
3612 int dev_unicast_sync(struct net_device *to, struct net_device *from)
3616 netif_addr_lock_bh(to);
3617 err = __dev_addr_sync(&to->uc_list, &to->uc_count,
3618 &from->uc_list, &from->uc_count);
3620 __dev_set_rx_mode(to);
3621 netif_addr_unlock_bh(to);
3624 EXPORT_SYMBOL(dev_unicast_sync);
3627 * dev_unicast_unsync - Remove synchronized addresses from the destination device
3628 * @to: destination device
3629 * @from: source device
3631 * Remove all addresses that were added to the destination device by
3632 * dev_unicast_sync(). This function is intended to be called from the
3633 * dev->stop function of layered software devices.
3635 void dev_unicast_unsync(struct net_device *to, struct net_device *from)
3637 netif_addr_lock_bh(from);
3638 netif_addr_lock(to);
3640 __dev_addr_unsync(&to->uc_list, &to->uc_count,
3641 &from->uc_list, &from->uc_count);
3642 __dev_set_rx_mode(to);
3644 netif_addr_unlock(to);
3645 netif_addr_unlock_bh(from);
3647 EXPORT_SYMBOL(dev_unicast_unsync);
3649 static void __dev_addr_discard(struct dev_addr_list **list)
3651 struct dev_addr_list *tmp;
3653 while (*list != NULL) {
3656 if (tmp->da_users > tmp->da_gusers)
3657 printk("__dev_addr_discard: address leakage! "
3658 "da_users=%d\n", tmp->da_users);
3663 static void dev_addr_discard(struct net_device *dev)
3665 netif_addr_lock_bh(dev);
3667 __dev_addr_discard(&dev->uc_list);
3670 __dev_addr_discard(&dev->mc_list);
3673 netif_addr_unlock_bh(dev);
3677 * dev_get_flags - get flags reported to userspace
3680 * Get the combination of flag bits exported through APIs to userspace.
3682 unsigned dev_get_flags(const struct net_device *dev)
3686 flags = (dev->flags & ~(IFF_PROMISC |
3691 (dev->gflags & (IFF_PROMISC |
3694 if (netif_running(dev)) {
3695 if (netif_oper_up(dev))
3696 flags |= IFF_RUNNING;
3697 if (netif_carrier_ok(dev))
3698 flags |= IFF_LOWER_UP;
3699 if (netif_dormant(dev))
3700 flags |= IFF_DORMANT;
3707 * dev_change_flags - change device settings
3709 * @flags: device state flags
3711 * Change settings on device based state flags. The flags are
3712 * in the userspace exported format.
3714 int dev_change_flags(struct net_device *dev, unsigned flags)
3717 int old_flags = dev->flags;
3722 * Set the flags on our device.
3725 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
3726 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
3728 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
3732 * Load in the correct multicast list now the flags have changed.
3735 if ((old_flags ^ flags) & IFF_MULTICAST)
3736 dev_change_rx_flags(dev, IFF_MULTICAST);
3738 dev_set_rx_mode(dev);
3741 * Have we downed the interface. We handle IFF_UP ourselves
3742 * according to user attempts to set it, rather than blindly
3747 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
3748 ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
3751 dev_set_rx_mode(dev);
3754 if (dev->flags & IFF_UP &&
3755 ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
3757 call_netdevice_notifiers(NETDEV_CHANGE, dev);
3759 if ((flags ^ dev->gflags) & IFF_PROMISC) {
3760 int inc = (flags & IFF_PROMISC) ? +1 : -1;
3761 dev->gflags ^= IFF_PROMISC;
3762 dev_set_promiscuity(dev, inc);
3765 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
3766 is important. Some (broken) drivers set IFF_PROMISC, when
3767 IFF_ALLMULTI is requested not asking us and not reporting.
3769 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
3770 int inc = (flags & IFF_ALLMULTI) ? +1 : -1;
3771 dev->gflags ^= IFF_ALLMULTI;
3772 dev_set_allmulti(dev, inc);
3775 /* Exclude state transition flags, already notified */
3776 changes = (old_flags ^ dev->flags) & ~(IFF_UP | IFF_RUNNING);
3778 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
3784 * dev_set_mtu - Change maximum transfer unit
3786 * @new_mtu: new transfer unit
3788 * Change the maximum transfer size of the network device.
3790 int dev_set_mtu(struct net_device *dev, int new_mtu)
3792 const struct net_device_ops *ops = dev->netdev_ops;
3795 if (new_mtu == dev->mtu)
3798 /* MTU must be positive. */
3802 if (!netif_device_present(dev))
3806 if (ops->ndo_change_mtu)
3807 err = ops->ndo_change_mtu(dev, new_mtu);
3811 if (!err && dev->flags & IFF_UP)
3812 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
3817 * dev_set_mac_address - Change Media Access Control Address
3821 * Change the hardware (MAC) address of the device
3823 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
3825 const struct net_device_ops *ops = dev->netdev_ops;
3828 if (!ops->ndo_set_mac_address)
3830 if (sa->sa_family != dev->type)
3832 if (!netif_device_present(dev))
3834 err = ops->ndo_set_mac_address(dev, sa);
3836 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3841 * Perform the SIOCxIFxxx calls, inside read_lock(dev_base_lock)
3843 static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
3846 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
3852 case SIOCGIFFLAGS: /* Get interface flags */
3853 ifr->ifr_flags = dev_get_flags(dev);
3856 case SIOCGIFMETRIC: /* Get the metric on the interface
3857 (currently unused) */
3858 ifr->ifr_metric = 0;
3861 case SIOCGIFMTU: /* Get the MTU of a device */
3862 ifr->ifr_mtu = dev->mtu;
3867 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
3869 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
3870 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
3871 ifr->ifr_hwaddr.sa_family = dev->type;
3879 ifr->ifr_map.mem_start = dev->mem_start;
3880 ifr->ifr_map.mem_end = dev->mem_end;
3881 ifr->ifr_map.base_addr = dev->base_addr;
3882 ifr->ifr_map.irq = dev->irq;
3883 ifr->ifr_map.dma = dev->dma;
3884 ifr->ifr_map.port = dev->if_port;
3888 ifr->ifr_ifindex = dev->ifindex;
3892 ifr->ifr_qlen = dev->tx_queue_len;
3896 /* dev_ioctl() should ensure this case
3908 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
3910 static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
3913 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
3914 const struct net_device_ops *ops;
3919 ops = dev->netdev_ops;
3922 case SIOCSIFFLAGS: /* Set interface flags */
3923 return dev_change_flags(dev, ifr->ifr_flags);
3925 case SIOCSIFMETRIC: /* Set the metric on the interface
3926 (currently unused) */
3929 case SIOCSIFMTU: /* Set the MTU of a device */
3930 return dev_set_mtu(dev, ifr->ifr_mtu);
3933 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
3935 case SIOCSIFHWBROADCAST:
3936 if (ifr->ifr_hwaddr.sa_family != dev->type)
3938 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
3939 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
3940 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3944 if (ops->ndo_set_config) {
3945 if (!netif_device_present(dev))
3947 return ops->ndo_set_config(dev, &ifr->ifr_map);
3952 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
3953 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
3955 if (!netif_device_present(dev))
3957 return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
3961 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
3962 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
3964 if (!netif_device_present(dev))
3966 return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
3970 if (ifr->ifr_qlen < 0)
3972 dev->tx_queue_len = ifr->ifr_qlen;
3976 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
3977 return dev_change_name(dev, ifr->ifr_newname);
3980 * Unknown or private ioctl
3984 if ((cmd >= SIOCDEVPRIVATE &&
3985 cmd <= SIOCDEVPRIVATE + 15) ||
3986 cmd == SIOCBONDENSLAVE ||
3987 cmd == SIOCBONDRELEASE ||
3988 cmd == SIOCBONDSETHWADDR ||
3989 cmd == SIOCBONDSLAVEINFOQUERY ||
3990 cmd == SIOCBONDINFOQUERY ||
3991 cmd == SIOCBONDCHANGEACTIVE ||
3992 cmd == SIOCGMIIPHY ||
3993 cmd == SIOCGMIIREG ||
3994 cmd == SIOCSMIIREG ||
3995 cmd == SIOCBRADDIF ||
3996 cmd == SIOCBRDELIF ||
3997 cmd == SIOCWANDEV) {
3999 if (ops->ndo_do_ioctl) {
4000 if (netif_device_present(dev))
4001 err = ops->ndo_do_ioctl(dev, ifr, cmd);
4013 * This function handles all "interface"-type I/O control requests. The actual
4014 * 'doing' part of this is dev_ifsioc above.
4018 * dev_ioctl - network device ioctl
4019 * @net: the applicable net namespace
4020 * @cmd: command to issue
4021 * @arg: pointer to a struct ifreq in user space
4023 * Issue ioctl functions to devices. This is normally called by the
4024 * user space syscall interfaces but can sometimes be useful for
4025 * other purposes. The return value is the return from the syscall if
4026 * positive or a negative errno code on error.
4029 int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
4035 /* One special case: SIOCGIFCONF takes ifconf argument
4036 and requires shared lock, because it sleeps writing
4040 if (cmd == SIOCGIFCONF) {
4042 ret = dev_ifconf(net, (char __user *) arg);
4046 if (cmd == SIOCGIFNAME)
4047 return dev_ifname(net, (struct ifreq __user *)arg);
4049 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
4052 ifr.ifr_name[IFNAMSIZ-1] = 0;
4054 colon = strchr(ifr.ifr_name, ':');
4059 * See which interface the caller is talking about.
4064 * These ioctl calls:
4065 * - can be done by all.
4066 * - atomic and do not require locking.
4077 dev_load(net, ifr.ifr_name);
4078 read_lock(&dev_base_lock);
4079 ret = dev_ifsioc_locked(net, &ifr, cmd);
4080 read_unlock(&dev_base_lock);
4084 if (copy_to_user(arg, &ifr,
4085 sizeof(struct ifreq)))
4091 dev_load(net, ifr.ifr_name);
4093 ret = dev_ethtool(net, &ifr);
4098 if (copy_to_user(arg, &ifr,
4099 sizeof(struct ifreq)))
4105 * These ioctl calls:
4106 * - require superuser power.
4107 * - require strict serialization.
4113 if (!capable(CAP_NET_ADMIN))
4115 dev_load(net, ifr.ifr_name);
4117 ret = dev_ifsioc(net, &ifr, cmd);
4122 if (copy_to_user(arg, &ifr,
4123 sizeof(struct ifreq)))
4129 * These ioctl calls:
4130 * - require superuser power.
4131 * - require strict serialization.
4132 * - do not return a value
4142 case SIOCSIFHWBROADCAST:
4145 case SIOCBONDENSLAVE:
4146 case SIOCBONDRELEASE:
4147 case SIOCBONDSETHWADDR:
4148 case SIOCBONDCHANGEACTIVE:
4151 if (!capable(CAP_NET_ADMIN))
4154 case SIOCBONDSLAVEINFOQUERY:
4155 case SIOCBONDINFOQUERY:
4156 dev_load(net, ifr.ifr_name);
4158 ret = dev_ifsioc(net, &ifr, cmd);
4163 /* Get the per device memory space. We can add this but
4164 * currently do not support it */
4166 /* Set the per device memory buffer space.
4167 * Not applicable in our case */
4172 * Unknown or private ioctl.
4175 if (cmd == SIOCWANDEV ||
4176 (cmd >= SIOCDEVPRIVATE &&
4177 cmd <= SIOCDEVPRIVATE + 15)) {
4178 dev_load(net, ifr.ifr_name);
4180 ret = dev_ifsioc(net, &ifr, cmd);
4182 if (!ret && copy_to_user(arg, &ifr,
4183 sizeof(struct ifreq)))
4187 /* Take care of Wireless Extensions */
4188 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
4189 return wext_handle_ioctl(net, &ifr, cmd, arg);
4196 * dev_new_index - allocate an ifindex
4197 * @net: the applicable net namespace
4199 * Returns a suitable unique value for a new device interface
4200 * number. The caller must hold the rtnl semaphore or the
4201 * dev_base_lock to be sure it remains unique.
4203 static int dev_new_index(struct net *net)
4209 if (!__dev_get_by_index(net, ifindex))
4214 /* Delayed registration/unregisteration */
4215 static LIST_HEAD(net_todo_list);
4217 static void net_set_todo(struct net_device *dev)
4219 list_add_tail(&dev->todo_list, &net_todo_list);
4222 static void rollback_registered(struct net_device *dev)
4224 BUG_ON(dev_boot_phase);
4227 /* Some devices call without registering for initialization unwind. */
4228 if (dev->reg_state == NETREG_UNINITIALIZED) {
4229 printk(KERN_DEBUG "unregister_netdevice: device %s/%p never "
4230 "was registered\n", dev->name, dev);
4236 BUG_ON(dev->reg_state != NETREG_REGISTERED);
4238 /* If device is running, close it first. */
4241 /* And unlink it from device chain. */
4242 unlist_netdevice(dev);
4244 dev->reg_state = NETREG_UNREGISTERING;
4248 /* Shutdown queueing discipline. */
4252 /* Notify protocols, that we are about to destroy
4253 this device. They should clean all the things.
4255 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4258 * Flush the unicast and multicast chains
4260 dev_addr_discard(dev);
4262 if (dev->netdev_ops->ndo_uninit)
4263 dev->netdev_ops->ndo_uninit(dev);
4265 /* Notifier chain MUST detach us from master device. */
4266 WARN_ON(dev->master);
4268 /* Remove entries from kobject tree */
4269 netdev_unregister_kobject(dev);
4276 static void __netdev_init_queue_locks_one(struct net_device *dev,
4277 struct netdev_queue *dev_queue,
4280 spin_lock_init(&dev_queue->_xmit_lock);
4281 netdev_set_xmit_lockdep_class(&dev_queue->_xmit_lock, dev->type);
4282 dev_queue->xmit_lock_owner = -1;
4285 static void netdev_init_queue_locks(struct net_device *dev)
4287 netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL);
4288 __netdev_init_queue_locks_one(dev, &dev->rx_queue, NULL);
4291 unsigned long netdev_fix_features(unsigned long features, const char *name)
4293 /* Fix illegal SG+CSUM combinations. */
4294 if ((features & NETIF_F_SG) &&
4295 !(features & NETIF_F_ALL_CSUM)) {
4297 printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no "
4298 "checksum feature.\n", name);
4299 features &= ~NETIF_F_SG;
4302 /* TSO requires that SG is present as well. */
4303 if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) {
4305 printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no "
4306 "SG feature.\n", name);
4307 features &= ~NETIF_F_TSO;
4310 if (features & NETIF_F_UFO) {
4311 if (!(features & NETIF_F_GEN_CSUM)) {
4313 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4314 "since no NETIF_F_HW_CSUM feature.\n",
4316 features &= ~NETIF_F_UFO;
4319 if (!(features & NETIF_F_SG)) {
4321 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4322 "since no NETIF_F_SG feature.\n", name);
4323 features &= ~NETIF_F_UFO;
4329 EXPORT_SYMBOL(netdev_fix_features);
4332 * register_netdevice - register a network device
4333 * @dev: device to register
4335 * Take a completed network device structure and add it to the kernel
4336 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
4337 * chain. 0 is returned on success. A negative errno code is returned
4338 * on a failure to set up the device, or if the name is a duplicate.
4340 * Callers must hold the rtnl semaphore. You may want
4341 * register_netdev() instead of this.
4344 * The locking appears insufficient to guarantee two parallel registers
4345 * will not get the same name.
4348 int register_netdevice(struct net_device *dev)
4350 struct hlist_head *head;
4351 struct hlist_node *p;
4353 struct net *net = dev_net(dev);
4355 BUG_ON(dev_boot_phase);
4360 /* When net_device's are persistent, this will be fatal. */
4361 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
4364 spin_lock_init(&dev->addr_list_lock);
4365 netdev_set_addr_lockdep_class(dev);
4366 netdev_init_queue_locks(dev);
4370 #ifdef CONFIG_COMPAT_NET_DEV_OPS
4371 /* Netdevice_ops API compatiability support.
4372 * This is temporary until all network devices are converted.
4374 if (dev->netdev_ops) {
4375 const struct net_device_ops *ops = dev->netdev_ops;
4377 dev->init = ops->ndo_init;
4378 dev->uninit = ops->ndo_uninit;
4379 dev->open = ops->ndo_open;
4380 dev->change_rx_flags = ops->ndo_change_rx_flags;
4381 dev->set_rx_mode = ops->ndo_set_rx_mode;
4382 dev->set_multicast_list = ops->ndo_set_multicast_list;
4383 dev->set_mac_address = ops->ndo_set_mac_address;
4384 dev->validate_addr = ops->ndo_validate_addr;
4385 dev->do_ioctl = ops->ndo_do_ioctl;
4386 dev->set_config = ops->ndo_set_config;
4387 dev->change_mtu = ops->ndo_change_mtu;
4388 dev->tx_timeout = ops->ndo_tx_timeout;
4389 dev->get_stats = ops->ndo_get_stats;
4390 dev->vlan_rx_register = ops->ndo_vlan_rx_register;
4391 dev->vlan_rx_add_vid = ops->ndo_vlan_rx_add_vid;
4392 dev->vlan_rx_kill_vid = ops->ndo_vlan_rx_kill_vid;
4393 #ifdef CONFIG_NET_POLL_CONTROLLER
4394 dev->poll_controller = ops->ndo_poll_controller;
4397 char drivername[64];
4398 pr_info("%s (%s): not using net_device_ops yet\n",
4399 dev->name, netdev_drivername(dev, drivername, 64));
4401 /* This works only because net_device_ops and the
4402 compatiablity structure are the same. */
4403 dev->netdev_ops = (void *) &(dev->init);
4407 /* Init, if this function is available */
4408 if (dev->netdev_ops->ndo_init) {
4409 ret = dev->netdev_ops->ndo_init(dev);
4417 if (!dev_valid_name(dev->name)) {
4422 dev->ifindex = dev_new_index(net);
4423 if (dev->iflink == -1)
4424 dev->iflink = dev->ifindex;
4426 /* Check for existence of name */
4427 head = dev_name_hash(net, dev->name);
4428 hlist_for_each(p, head) {
4429 struct net_device *d
4430 = hlist_entry(p, struct net_device, name_hlist);
4431 if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
4437 /* Fix illegal checksum combinations */
4438 if ((dev->features & NETIF_F_HW_CSUM) &&
4439 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
4440 printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
4442 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
4445 if ((dev->features & NETIF_F_NO_CSUM) &&
4446 (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
4447 printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
4449 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
4452 dev->features = netdev_fix_features(dev->features, dev->name);
4454 /* Enable software GSO if SG is supported. */
4455 if (dev->features & NETIF_F_SG)
4456 dev->features |= NETIF_F_GSO;
4458 netdev_initialize_kobject(dev);
4459 ret = netdev_register_kobject(dev);
4462 dev->reg_state = NETREG_REGISTERED;
4465 * Default initial state at registry is that the
4466 * device is present.
4469 set_bit(__LINK_STATE_PRESENT, &dev->state);
4471 dev_init_scheduler(dev);
4473 list_netdevice(dev);
4475 /* Notify protocols, that a new device appeared. */
4476 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
4477 ret = notifier_to_errno(ret);
4479 rollback_registered(dev);
4480 dev->reg_state = NETREG_UNREGISTERED;
4487 if (dev->netdev_ops->ndo_uninit)
4488 dev->netdev_ops->ndo_uninit(dev);
4493 * init_dummy_netdev - init a dummy network device for NAPI
4494 * @dev: device to init
4496 * This takes a network device structure and initialize the minimum
4497 * amount of fields so it can be used to schedule NAPI polls without
4498 * registering a full blown interface. This is to be used by drivers
4499 * that need to tie several hardware interfaces to a single NAPI
4500 * poll scheduler due to HW limitations.
4502 int init_dummy_netdev(struct net_device *dev)
4504 /* Clear everything. Note we don't initialize spinlocks
4505 * are they aren't supposed to be taken by any of the
4506 * NAPI code and this dummy netdev is supposed to be
4507 * only ever used for NAPI polls
4509 memset(dev, 0, sizeof(struct net_device));
4511 /* make sure we BUG if trying to hit standard
4512 * register/unregister code path
4514 dev->reg_state = NETREG_DUMMY;
4516 /* initialize the ref count */
4517 atomic_set(&dev->refcnt, 1);
4519 /* NAPI wants this */
4520 INIT_LIST_HEAD(&dev->napi_list);
4522 /* a dummy interface is started by default */
4523 set_bit(__LINK_STATE_PRESENT, &dev->state);
4524 set_bit(__LINK_STATE_START, &dev->state);
4528 EXPORT_SYMBOL_GPL(init_dummy_netdev);
4532 * register_netdev - register a network device
4533 * @dev: device to register
4535 * Take a completed network device structure and add it to the kernel
4536 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
4537 * chain. 0 is returned on success. A negative errno code is returned
4538 * on a failure to set up the device, or if the name is a duplicate.
4540 * This is a wrapper around register_netdevice that takes the rtnl semaphore
4541 * and expands the device name if you passed a format string to
4544 int register_netdev(struct net_device *dev)
4551 * If the name is a format string the caller wants us to do a
4554 if (strchr(dev->name, '%')) {
4555 err = dev_alloc_name(dev, dev->name);
4560 err = register_netdevice(dev);
4565 EXPORT_SYMBOL(register_netdev);
4568 * netdev_wait_allrefs - wait until all references are gone.
4570 * This is called when unregistering network devices.
4572 * Any protocol or device that holds a reference should register
4573 * for netdevice notification, and cleanup and put back the
4574 * reference if they receive an UNREGISTER event.
4575 * We can get stuck here if buggy protocols don't correctly
4578 static void netdev_wait_allrefs(struct net_device *dev)
4580 unsigned long rebroadcast_time, warning_time;
4582 rebroadcast_time = warning_time = jiffies;
4583 while (atomic_read(&dev->refcnt) != 0) {
4584 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
4587 /* Rebroadcast unregister notification */
4588 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4590 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
4592 /* We must not have linkwatch events
4593 * pending on unregister. If this
4594 * happens, we simply run the queue
4595 * unscheduled, resulting in a noop
4598 linkwatch_run_queue();
4603 rebroadcast_time = jiffies;
4608 if (time_after(jiffies, warning_time + 10 * HZ)) {
4609 printk(KERN_EMERG "unregister_netdevice: "
4610 "waiting for %s to become free. Usage "
4612 dev->name, atomic_read(&dev->refcnt));
4613 warning_time = jiffies;
4622 * register_netdevice(x1);
4623 * register_netdevice(x2);
4625 * unregister_netdevice(y1);
4626 * unregister_netdevice(y2);
4632 * We are invoked by rtnl_unlock().
4633 * This allows us to deal with problems:
4634 * 1) We can delete sysfs objects which invoke hotplug
4635 * without deadlocking with linkwatch via keventd.
4636 * 2) Since we run with the RTNL semaphore not held, we can sleep
4637 * safely in order to wait for the netdev refcnt to drop to zero.
4639 * We must not return until all unregister events added during
4640 * the interval the lock was held have been completed.
4642 void netdev_run_todo(void)
4644 struct list_head list;
4646 /* Snapshot list, allow later requests */
4647 list_replace_init(&net_todo_list, &list);
4651 while (!list_empty(&list)) {
4652 struct net_device *dev
4653 = list_entry(list.next, struct net_device, todo_list);
4654 list_del(&dev->todo_list);
4656 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
4657 printk(KERN_ERR "network todo '%s' but state %d\n",
4658 dev->name, dev->reg_state);
4663 dev->reg_state = NETREG_UNREGISTERED;
4665 on_each_cpu(flush_backlog, dev, 1);
4667 netdev_wait_allrefs(dev);
4670 BUG_ON(atomic_read(&dev->refcnt));
4671 WARN_ON(dev->ip_ptr);
4672 WARN_ON(dev->ip6_ptr);
4673 WARN_ON(dev->dn_ptr);
4675 if (dev->destructor)
4676 dev->destructor(dev);
4678 /* Free network device */
4679 kobject_put(&dev->dev.kobj);
4684 * dev_get_stats - get network device statistics
4685 * @dev: device to get statistics from
4687 * Get network statistics from device. The device driver may provide
4688 * its own method by setting dev->netdev_ops->get_stats; otherwise
4689 * the internal statistics structure is used.
4691 const struct net_device_stats *dev_get_stats(struct net_device *dev)
4693 const struct net_device_ops *ops = dev->netdev_ops;
4695 if (ops->ndo_get_stats)
4696 return ops->ndo_get_stats(dev);
4700 EXPORT_SYMBOL(dev_get_stats);
4702 static void netdev_init_one_queue(struct net_device *dev,
4703 struct netdev_queue *queue,
4709 static void netdev_init_queues(struct net_device *dev)
4711 netdev_init_one_queue(dev, &dev->rx_queue, NULL);
4712 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
4713 spin_lock_init(&dev->tx_global_lock);
4717 * alloc_netdev_mq - allocate network device
4718 * @sizeof_priv: size of private data to allocate space for
4719 * @name: device name format string
4720 * @setup: callback to initialize device
4721 * @queue_count: the number of subqueues to allocate
4723 * Allocates a struct net_device with private data area for driver use
4724 * and performs basic initialization. Also allocates subquue structs
4725 * for each queue on the device at the end of the netdevice.
4727 struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
4728 void (*setup)(struct net_device *), unsigned int queue_count)
4730 struct netdev_queue *tx;
4731 struct net_device *dev;
4735 BUG_ON(strlen(name) >= sizeof(dev->name));
4737 alloc_size = sizeof(struct net_device);
4739 /* ensure 32-byte alignment of private area */
4740 alloc_size = (alloc_size + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST;
4741 alloc_size += sizeof_priv;
4743 /* ensure 32-byte alignment of whole construct */
4744 alloc_size += NETDEV_ALIGN_CONST;
4746 p = kzalloc(alloc_size, GFP_KERNEL);
4748 printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
4752 tx = kcalloc(queue_count, sizeof(struct netdev_queue), GFP_KERNEL);
4754 printk(KERN_ERR "alloc_netdev: Unable to allocate "
4760 dev = (struct net_device *)
4761 (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
4762 dev->padded = (char *)dev - (char *)p;
4763 dev_net_set(dev, &init_net);
4766 dev->num_tx_queues = queue_count;
4767 dev->real_num_tx_queues = queue_count;
4769 dev->gso_max_size = GSO_MAX_SIZE;
4771 netdev_init_queues(dev);
4773 INIT_LIST_HEAD(&dev->napi_list);
4775 strcpy(dev->name, name);
4778 EXPORT_SYMBOL(alloc_netdev_mq);
4781 * free_netdev - free network device
4784 * This function does the last stage of destroying an allocated device
4785 * interface. The reference to the device object is released.
4786 * If this is the last reference then it will be freed.
4788 void free_netdev(struct net_device *dev)
4790 struct napi_struct *p, *n;
4792 release_net(dev_net(dev));
4796 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
4799 /* Compatibility with error handling in drivers */
4800 if (dev->reg_state == NETREG_UNINITIALIZED) {
4801 kfree((char *)dev - dev->padded);
4805 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
4806 dev->reg_state = NETREG_RELEASED;
4808 /* will free via device release */
4809 put_device(&dev->dev);
4813 * synchronize_net - Synchronize with packet receive processing
4815 * Wait for packets currently being received to be done.
4816 * Does not block later packets from starting.
4818 void synchronize_net(void)
4825 * unregister_netdevice - remove device from the kernel
4828 * This function shuts down a device interface and removes it
4829 * from the kernel tables.
4831 * Callers must hold the rtnl semaphore. You may want
4832 * unregister_netdev() instead of this.
4835 void unregister_netdevice(struct net_device *dev)
4839 rollback_registered(dev);
4840 /* Finish processing unregister after unlock */
4845 * unregister_netdev - remove device from the kernel
4848 * This function shuts down a device interface and removes it
4849 * from the kernel tables.
4851 * This is just a wrapper for unregister_netdevice that takes
4852 * the rtnl semaphore. In general you want to use this and not
4853 * unregister_netdevice.
4855 void unregister_netdev(struct net_device *dev)
4858 unregister_netdevice(dev);
4862 EXPORT_SYMBOL(unregister_netdev);
4865 * dev_change_net_namespace - move device to different nethost namespace
4867 * @net: network namespace
4868 * @pat: If not NULL name pattern to try if the current device name
4869 * is already taken in the destination network namespace.
4871 * This function shuts down a device interface and moves it
4872 * to a new network namespace. On success 0 is returned, on
4873 * a failure a netagive errno code is returned.
4875 * Callers must hold the rtnl semaphore.
4878 int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
4881 const char *destname;
4886 /* Don't allow namespace local devices to be moved. */
4888 if (dev->features & NETIF_F_NETNS_LOCAL)
4892 /* Don't allow real devices to be moved when sysfs
4896 if (dev->dev.parent)
4900 /* Ensure the device has been registrered */
4902 if (dev->reg_state != NETREG_REGISTERED)
4905 /* Get out if there is nothing todo */
4907 if (net_eq(dev_net(dev), net))
4910 /* Pick the destination device name, and ensure
4911 * we can use it in the destination network namespace.
4914 destname = dev->name;
4915 if (__dev_get_by_name(net, destname)) {
4916 /* We get here if we can't use the current device name */
4919 if (!dev_valid_name(pat))
4921 if (strchr(pat, '%')) {
4922 if (__dev_alloc_name(net, pat, buf) < 0)
4927 if (__dev_get_by_name(net, destname))
4932 * And now a mini version of register_netdevice unregister_netdevice.
4935 /* If device is running close it first. */
4938 /* And unlink it from device chain */
4940 unlist_netdevice(dev);
4944 /* Shutdown queueing discipline. */
4947 /* Notify protocols, that we are about to destroy
4948 this device. They should clean all the things.
4950 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4953 * Flush the unicast and multicast chains
4955 dev_addr_discard(dev);
4957 netdev_unregister_kobject(dev);
4959 /* Actually switch the network namespace */
4960 dev_net_set(dev, net);
4962 /* Assign the new device name */
4963 if (destname != dev->name)
4964 strcpy(dev->name, destname);
4966 /* If there is an ifindex conflict assign a new one */
4967 if (__dev_get_by_index(net, dev->ifindex)) {
4968 int iflink = (dev->iflink == dev->ifindex);
4969 dev->ifindex = dev_new_index(net);
4971 dev->iflink = dev->ifindex;
4974 /* Fixup kobjects */
4975 err = netdev_register_kobject(dev);
4978 /* Add the device back in the hashes */
4979 list_netdevice(dev);
4981 /* Notify protocols, that a new device appeared. */
4982 call_netdevice_notifiers(NETDEV_REGISTER, dev);
4990 static int dev_cpu_callback(struct notifier_block *nfb,
4991 unsigned long action,
4994 struct sk_buff **list_skb;
4995 struct Qdisc **list_net;
4996 struct sk_buff *skb;
4997 unsigned int cpu, oldcpu = (unsigned long)ocpu;
4998 struct softnet_data *sd, *oldsd;
5000 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
5003 local_irq_disable();
5004 cpu = smp_processor_id();
5005 sd = &per_cpu(softnet_data, cpu);
5006 oldsd = &per_cpu(softnet_data, oldcpu);
5008 /* Find end of our completion_queue. */
5009 list_skb = &sd->completion_queue;
5011 list_skb = &(*list_skb)->next;
5012 /* Append completion queue from offline CPU. */
5013 *list_skb = oldsd->completion_queue;
5014 oldsd->completion_queue = NULL;
5016 /* Find end of our output_queue. */
5017 list_net = &sd->output_queue;
5019 list_net = &(*list_net)->next_sched;
5020 /* Append output queue from offline CPU. */
5021 *list_net = oldsd->output_queue;
5022 oldsd->output_queue = NULL;
5024 raise_softirq_irqoff(NET_TX_SOFTIRQ);
5027 /* Process offline CPU's input_pkt_queue */
5028 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
5036 * netdev_increment_features - increment feature set by one
5037 * @all: current feature set
5038 * @one: new feature set
5039 * @mask: mask feature set
5041 * Computes a new feature set after adding a device with feature set
5042 * @one to the master device with current feature set @all. Will not
5043 * enable anything that is off in @mask. Returns the new feature set.
5045 unsigned long netdev_increment_features(unsigned long all, unsigned long one,
5048 /* If device needs checksumming, downgrade to it. */
5049 if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
5050 all ^= NETIF_F_NO_CSUM | (one & NETIF_F_ALL_CSUM);
5051 else if (mask & NETIF_F_ALL_CSUM) {
5052 /* If one device supports v4/v6 checksumming, set for all. */
5053 if (one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM) &&
5054 !(all & NETIF_F_GEN_CSUM)) {
5055 all &= ~NETIF_F_ALL_CSUM;
5056 all |= one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
5059 /* If one device supports hw checksumming, set for all. */
5060 if (one & NETIF_F_GEN_CSUM && !(all & NETIF_F_GEN_CSUM)) {
5061 all &= ~NETIF_F_ALL_CSUM;
5062 all |= NETIF_F_HW_CSUM;
5066 one |= NETIF_F_ALL_CSUM;
5068 one |= all & NETIF_F_ONE_FOR_ALL;
5069 all &= one | NETIF_F_LLTX | NETIF_F_GSO;
5070 all |= one & mask & NETIF_F_ONE_FOR_ALL;
5074 EXPORT_SYMBOL(netdev_increment_features);
5076 static struct hlist_head *netdev_create_hash(void)
5079 struct hlist_head *hash;
5081 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
5083 for (i = 0; i < NETDEV_HASHENTRIES; i++)
5084 INIT_HLIST_HEAD(&hash[i]);
5089 /* Initialize per network namespace state */
5090 static int __net_init netdev_init(struct net *net)
5092 INIT_LIST_HEAD(&net->dev_base_head);
5094 net->dev_name_head = netdev_create_hash();
5095 if (net->dev_name_head == NULL)
5098 net->dev_index_head = netdev_create_hash();
5099 if (net->dev_index_head == NULL)
5105 kfree(net->dev_name_head);
5111 * netdev_drivername - network driver for the device
5112 * @dev: network device
5113 * @buffer: buffer for resulting name
5114 * @len: size of buffer
5116 * Determine network driver for device.
5118 char *netdev_drivername(const struct net_device *dev, char *buffer, int len)
5120 const struct device_driver *driver;
5121 const struct device *parent;
5123 if (len <= 0 || !buffer)
5127 parent = dev->dev.parent;
5132 driver = parent->driver;
5133 if (driver && driver->name)
5134 strlcpy(buffer, driver->name, len);
5138 static void __net_exit netdev_exit(struct net *net)
5140 kfree(net->dev_name_head);
5141 kfree(net->dev_index_head);
5144 static struct pernet_operations __net_initdata netdev_net_ops = {
5145 .init = netdev_init,
5146 .exit = netdev_exit,
5149 static void __net_exit default_device_exit(struct net *net)
5151 struct net_device *dev;
5153 * Push all migratable of the network devices back to the
5154 * initial network namespace
5158 for_each_netdev(net, dev) {
5160 char fb_name[IFNAMSIZ];
5162 /* Ignore unmoveable devices (i.e. loopback) */
5163 if (dev->features & NETIF_F_NETNS_LOCAL)
5166 /* Delete virtual devices */
5167 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink) {
5168 dev->rtnl_link_ops->dellink(dev);
5172 /* Push remaing network devices to init_net */
5173 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
5174 err = dev_change_net_namespace(dev, &init_net, fb_name);
5176 printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
5177 __func__, dev->name, err);
5185 static struct pernet_operations __net_initdata default_device_ops = {
5186 .exit = default_device_exit,
5190 * Initialize the DEV module. At boot time this walks the device list and
5191 * unhooks any devices that fail to initialise (normally hardware not
5192 * present) and leaves us with a valid list of present and active devices.
5197 * This is called single threaded during boot, so no need
5198 * to take the rtnl semaphore.
5200 static int __init net_dev_init(void)
5202 int i, rc = -ENOMEM;
5204 BUG_ON(!dev_boot_phase);
5206 if (dev_proc_init())
5209 if (netdev_kobject_init())
5212 INIT_LIST_HEAD(&ptype_all);
5213 for (i = 0; i < PTYPE_HASH_SIZE; i++)
5214 INIT_LIST_HEAD(&ptype_base[i]);
5216 if (register_pernet_subsys(&netdev_net_ops))
5220 * Initialise the packet receive queues.
5223 for_each_possible_cpu(i) {
5224 struct softnet_data *queue;
5226 queue = &per_cpu(softnet_data, i);
5227 skb_queue_head_init(&queue->input_pkt_queue);
5228 queue->completion_queue = NULL;
5229 INIT_LIST_HEAD(&queue->poll_list);
5231 queue->backlog.poll = process_backlog;
5232 queue->backlog.weight = weight_p;
5233 queue->backlog.gro_list = NULL;
5238 /* The loopback device is special if any other network devices
5239 * is present in a network namespace the loopback device must
5240 * be present. Since we now dynamically allocate and free the
5241 * loopback device ensure this invariant is maintained by
5242 * keeping the loopback device as the first device on the
5243 * list of network devices. Ensuring the loopback devices
5244 * is the first device that appears and the last network device
5247 if (register_pernet_device(&loopback_net_ops))
5250 if (register_pernet_device(&default_device_ops))
5253 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
5254 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
5256 hotcpu_notifier(dev_cpu_callback, 0);
5264 subsys_initcall(net_dev_init);
5266 EXPORT_SYMBOL(__dev_get_by_index);
5267 EXPORT_SYMBOL(__dev_get_by_name);
5268 EXPORT_SYMBOL(__dev_remove_pack);
5269 EXPORT_SYMBOL(dev_valid_name);
5270 EXPORT_SYMBOL(dev_add_pack);
5271 EXPORT_SYMBOL(dev_alloc_name);
5272 EXPORT_SYMBOL(dev_close);
5273 EXPORT_SYMBOL(dev_get_by_flags);
5274 EXPORT_SYMBOL(dev_get_by_index);
5275 EXPORT_SYMBOL(dev_get_by_name);
5276 EXPORT_SYMBOL(dev_open);
5277 EXPORT_SYMBOL(dev_queue_xmit);
5278 EXPORT_SYMBOL(dev_remove_pack);
5279 EXPORT_SYMBOL(dev_set_allmulti);
5280 EXPORT_SYMBOL(dev_set_promiscuity);
5281 EXPORT_SYMBOL(dev_change_flags);
5282 EXPORT_SYMBOL(dev_set_mtu);
5283 EXPORT_SYMBOL(dev_set_mac_address);
5284 EXPORT_SYMBOL(free_netdev);
5285 EXPORT_SYMBOL(netdev_boot_setup_check);
5286 EXPORT_SYMBOL(netdev_set_master);
5287 EXPORT_SYMBOL(netdev_state_change);
5288 EXPORT_SYMBOL(netif_receive_skb);
5289 EXPORT_SYMBOL(netif_rx);
5290 EXPORT_SYMBOL(register_gifconf);
5291 EXPORT_SYMBOL(register_netdevice);
5292 EXPORT_SYMBOL(register_netdevice_notifier);
5293 EXPORT_SYMBOL(skb_checksum_help);
5294 EXPORT_SYMBOL(synchronize_net);
5295 EXPORT_SYMBOL(unregister_netdevice);
5296 EXPORT_SYMBOL(unregister_netdevice_notifier);
5297 EXPORT_SYMBOL(net_enable_timestamp);
5298 EXPORT_SYMBOL(net_disable_timestamp);
5299 EXPORT_SYMBOL(dev_get_flags);
5301 #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
5302 EXPORT_SYMBOL(br_handle_frame_hook);
5303 EXPORT_SYMBOL(br_fdb_get_hook);
5304 EXPORT_SYMBOL(br_fdb_put_hook);
5307 EXPORT_SYMBOL(dev_load);
5309 EXPORT_PER_CPU_SYMBOL(softnet_data);