]> bbs.cooldavid.org Git - net-next-2.6.git/blame - net/core/dev.c
net: cleanups in RX queue allocation
[net-next-2.6.git] / net / core / dev.c
CommitLineData
1da177e4
LT
1/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
02c30a84 10 * Authors: Ross Biro
1da177e4
LT
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
74
75#include <asm/uaccess.h>
76#include <asm/system.h>
77#include <linux/bitops.h>
4fc268d2 78#include <linux/capability.h>
1da177e4
LT
79#include <linux/cpu.h>
80#include <linux/types.h>
81#include <linux/kernel.h>
08e9897d 82#include <linux/hash.h>
5a0e3ad6 83#include <linux/slab.h>
1da177e4 84#include <linux/sched.h>
4a3e2f71 85#include <linux/mutex.h>
1da177e4
LT
86#include <linux/string.h>
87#include <linux/mm.h>
88#include <linux/socket.h>
89#include <linux/sockios.h>
90#include <linux/errno.h>
91#include <linux/interrupt.h>
92#include <linux/if_ether.h>
93#include <linux/netdevice.h>
94#include <linux/etherdevice.h>
0187bdfb 95#include <linux/ethtool.h>
1da177e4
LT
96#include <linux/notifier.h>
97#include <linux/skbuff.h>
457c4cbc 98#include <net/net_namespace.h>
1da177e4
LT
99#include <net/sock.h>
100#include <linux/rtnetlink.h>
101#include <linux/proc_fs.h>
102#include <linux/seq_file.h>
103#include <linux/stat.h>
1da177e4
LT
104#include <net/dst.h>
105#include <net/pkt_sched.h>
106#include <net/checksum.h>
44540960 107#include <net/xfrm.h>
1da177e4
LT
108#include <linux/highmem.h>
109#include <linux/init.h>
110#include <linux/kmod.h>
111#include <linux/module.h>
1da177e4
LT
112#include <linux/netpoll.h>
113#include <linux/rcupdate.h>
114#include <linux/delay.h>
295f4a1f 115#include <net/wext.h>
1da177e4 116#include <net/iw_handler.h>
1da177e4 117#include <asm/current.h>
5bdb9886 118#include <linux/audit.h>
db217334 119#include <linux/dmaengine.h>
f6a78bfc 120#include <linux/err.h>
c7fa9d18 121#include <linux/ctype.h>
723e98b7 122#include <linux/if_arp.h>
6de329e2 123#include <linux/if_vlan.h>
8f0f2223 124#include <linux/ip.h>
ad55dcaf 125#include <net/ip.h>
8f0f2223
DM
126#include <linux/ipv6.h>
127#include <linux/in.h>
b6b2fed1
DM
128#include <linux/jhash.h>
129#include <linux/random.h>
9cbc1cb8 130#include <trace/events/napi.h>
5acbbd42 131#include <linux/pci.h>
caeda9b9 132#include <linux/inetdevice.h>
1da177e4 133
342709ef
PE
134#include "net-sysfs.h"
135
d565b0a1
HX
136/* Instead of increasing this, you should create a hash table. */
137#define MAX_GRO_SKBS 8
138
5d38a079
HX
139/* This should be increased if a protocol with a bigger head is added. */
140#define GRO_MAX_HEAD (MAX_HEADER + 128)
141
1da177e4
LT
142/*
143 * The list of packet types we will receive (as opposed to discard)
144 * and the routines to invoke.
145 *
146 * Why 16. Because with 16 the only overlap we get on a hash of the
147 * low nibble of the protocol value is RARP/SNAP/X.25.
148 *
149 * NOTE: That is no longer true with the addition of VLAN tags. Not
150 * sure which should go first, but I bet it won't make much
151 * difference if we are running VLANs. The good news is that
152 * this protocol won't be in the list unless compiled in, so
3041a069 153 * the average user (w/out VLANs) will not be adversely affected.
1da177e4
LT
154 * --BLG
155 *
156 * 0800 IP
157 * 8100 802.1Q VLAN
158 * 0001 802.3
159 * 0002 AX.25
160 * 0004 802.2
161 * 8035 RARP
162 * 0005 SNAP
163 * 0805 X.25
164 * 0806 ARP
165 * 8137 IPX
166 * 0009 Localtalk
167 * 86DD IPv6
168 */
169
82d8a867
PE
170#define PTYPE_HASH_SIZE (16)
171#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
172
1da177e4 173static DEFINE_SPINLOCK(ptype_lock);
82d8a867 174static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
6b2bedc3 175static struct list_head ptype_all __read_mostly; /* Taps */
1da177e4 176
1da177e4 177/*
7562f876 178 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
1da177e4
LT
179 * semaphore.
180 *
c6d14c84 181 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
1da177e4
LT
182 *
183 * Writers must hold the rtnl semaphore while they loop through the
7562f876 184 * dev_base_head list, and hold dev_base_lock for writing when they do the
1da177e4
LT
185 * actual updates. This allows pure readers to access the list even
186 * while a writer is preparing to update it.
187 *
188 * To put it another way, dev_base_lock is held for writing only to
189 * protect against pure readers; the rtnl semaphore provides the
190 * protection against other writers.
191 *
192 * See, for example usages, register_netdevice() and
193 * unregister_netdevice(), which must be called with the rtnl
194 * semaphore held.
195 */
1da177e4 196DEFINE_RWLOCK(dev_base_lock);
1da177e4
LT
197EXPORT_SYMBOL(dev_base_lock);
198
881d966b 199static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
1da177e4
LT
200{
201 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
08e9897d 202 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
1da177e4
LT
203}
204
881d966b 205static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
1da177e4 206{
7c28bd0b 207 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
1da177e4
LT
208}
209
e36fa2f7 210static inline void rps_lock(struct softnet_data *sd)
152102c7
CG
211{
212#ifdef CONFIG_RPS
e36fa2f7 213 spin_lock(&sd->input_pkt_queue.lock);
152102c7
CG
214#endif
215}
216
e36fa2f7 217static inline void rps_unlock(struct softnet_data *sd)
152102c7
CG
218{
219#ifdef CONFIG_RPS
e36fa2f7 220 spin_unlock(&sd->input_pkt_queue.lock);
152102c7
CG
221#endif
222}
223
ce286d32
EB
224/* Device list insertion */
225static int list_netdevice(struct net_device *dev)
226{
c346dca1 227 struct net *net = dev_net(dev);
ce286d32
EB
228
229 ASSERT_RTNL();
230
231 write_lock_bh(&dev_base_lock);
c6d14c84 232 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
72c9528b 233 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
fb699dfd
ED
234 hlist_add_head_rcu(&dev->index_hlist,
235 dev_index_hash(net, dev->ifindex));
ce286d32
EB
236 write_unlock_bh(&dev_base_lock);
237 return 0;
238}
239
fb699dfd
ED
240/* Device list removal
241 * caller must respect a RCU grace period before freeing/reusing dev
242 */
ce286d32
EB
243static void unlist_netdevice(struct net_device *dev)
244{
245 ASSERT_RTNL();
246
247 /* Unlink dev from the device chain */
248 write_lock_bh(&dev_base_lock);
c6d14c84 249 list_del_rcu(&dev->dev_list);
72c9528b 250 hlist_del_rcu(&dev->name_hlist);
fb699dfd 251 hlist_del_rcu(&dev->index_hlist);
ce286d32
EB
252 write_unlock_bh(&dev_base_lock);
253}
254
1da177e4
LT
255/*
256 * Our notifier list
257 */
258
f07d5b94 259static RAW_NOTIFIER_HEAD(netdev_chain);
1da177e4
LT
260
261/*
262 * Device drivers call our routines to queue packets here. We empty the
263 * queue in the local softnet handler.
264 */
bea3348e 265
9958da05 266DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
d1b19dff 267EXPORT_PER_CPU_SYMBOL(softnet_data);
1da177e4 268
cf508b12 269#ifdef CONFIG_LOCKDEP
723e98b7 270/*
c773e847 271 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
723e98b7
JP
272 * according to dev->type
273 */
274static const unsigned short netdev_lock_type[] =
275 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
276 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
277 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
278 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
279 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
280 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
281 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
282 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
283 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
284 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
285 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
286 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
287 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
2d91d78b 288 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET,
929122cd 289 ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154,
fcb94e42 290 ARPHRD_VOID, ARPHRD_NONE};
723e98b7 291
36cbd3dc 292static const char *const netdev_lock_name[] =
723e98b7
JP
293 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
294 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
295 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
296 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
297 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
298 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
299 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
300 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
301 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
302 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
303 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
304 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
305 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
2d91d78b 306 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET",
929122cd 307 "_xmit_PHONET_PIPE", "_xmit_IEEE802154",
fcb94e42 308 "_xmit_VOID", "_xmit_NONE"};
723e98b7
JP
309
310static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
cf508b12 311static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
723e98b7
JP
312
313static inline unsigned short netdev_lock_pos(unsigned short dev_type)
314{
315 int i;
316
317 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
318 if (netdev_lock_type[i] == dev_type)
319 return i;
320 /* the last key is used by default */
321 return ARRAY_SIZE(netdev_lock_type) - 1;
322}
323
cf508b12
DM
324static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
325 unsigned short dev_type)
723e98b7
JP
326{
327 int i;
328
329 i = netdev_lock_pos(dev_type);
330 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
331 netdev_lock_name[i]);
332}
cf508b12
DM
333
334static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
335{
336 int i;
337
338 i = netdev_lock_pos(dev->type);
339 lockdep_set_class_and_name(&dev->addr_list_lock,
340 &netdev_addr_lock_key[i],
341 netdev_lock_name[i]);
342}
723e98b7 343#else
cf508b12
DM
344static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
345 unsigned short dev_type)
346{
347}
348static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
723e98b7
JP
349{
350}
351#endif
1da177e4
LT
352
353/*******************************************************************************
354
355 Protocol management and registration routines
356
357*******************************************************************************/
358
1da177e4
LT
359/*
360 * Add a protocol ID to the list. Now that the input handler is
361 * smarter we can dispense with all the messy stuff that used to be
362 * here.
363 *
364 * BEWARE!!! Protocol handlers, mangling input packets,
365 * MUST BE last in hash buckets and checking protocol handlers
366 * MUST start from promiscuous ptype_all chain in net_bh.
367 * It is true now, do not change it.
368 * Explanation follows: if protocol handler, mangling packet, will
369 * be the first on list, it is not able to sense, that packet
370 * is cloned and should be copied-on-write, so that it will
371 * change it and subsequent readers will get broken packet.
372 * --ANK (980803)
373 */
374
c07b68e8
ED
375static inline struct list_head *ptype_head(const struct packet_type *pt)
376{
377 if (pt->type == htons(ETH_P_ALL))
378 return &ptype_all;
379 else
380 return &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
381}
382
1da177e4
LT
383/**
384 * dev_add_pack - add packet handler
385 * @pt: packet type declaration
386 *
387 * Add a protocol handler to the networking stack. The passed &packet_type
388 * is linked into kernel lists and may not be freed until it has been
389 * removed from the kernel lists.
390 *
4ec93edb 391 * This call does not sleep therefore it can not
1da177e4
LT
392 * guarantee all CPU's that are in middle of receiving packets
393 * will see the new packet type (until the next received packet).
394 */
395
396void dev_add_pack(struct packet_type *pt)
397{
c07b68e8 398 struct list_head *head = ptype_head(pt);
1da177e4 399
c07b68e8
ED
400 spin_lock(&ptype_lock);
401 list_add_rcu(&pt->list, head);
402 spin_unlock(&ptype_lock);
1da177e4 403}
d1b19dff 404EXPORT_SYMBOL(dev_add_pack);
1da177e4 405
1da177e4
LT
406/**
407 * __dev_remove_pack - remove packet handler
408 * @pt: packet type declaration
409 *
410 * Remove a protocol handler that was previously added to the kernel
411 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
412 * from the kernel lists and can be freed or reused once this function
4ec93edb 413 * returns.
1da177e4
LT
414 *
415 * The packet type might still be in use by receivers
416 * and must not be freed until after all the CPU's have gone
417 * through a quiescent state.
418 */
419void __dev_remove_pack(struct packet_type *pt)
420{
c07b68e8 421 struct list_head *head = ptype_head(pt);
1da177e4
LT
422 struct packet_type *pt1;
423
c07b68e8 424 spin_lock(&ptype_lock);
1da177e4
LT
425
426 list_for_each_entry(pt1, head, list) {
427 if (pt == pt1) {
428 list_del_rcu(&pt->list);
429 goto out;
430 }
431 }
432
433 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
434out:
c07b68e8 435 spin_unlock(&ptype_lock);
1da177e4 436}
d1b19dff
ED
437EXPORT_SYMBOL(__dev_remove_pack);
438
1da177e4
LT
439/**
440 * dev_remove_pack - remove packet handler
441 * @pt: packet type declaration
442 *
443 * Remove a protocol handler that was previously added to the kernel
444 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
445 * from the kernel lists and can be freed or reused once this function
446 * returns.
447 *
448 * This call sleeps to guarantee that no CPU is looking at the packet
449 * type after return.
450 */
451void dev_remove_pack(struct packet_type *pt)
452{
453 __dev_remove_pack(pt);
4ec93edb 454
1da177e4
LT
455 synchronize_net();
456}
d1b19dff 457EXPORT_SYMBOL(dev_remove_pack);
1da177e4
LT
458
459/******************************************************************************
460
461 Device Boot-time Settings Routines
462
463*******************************************************************************/
464
465/* Boot time configuration table */
466static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
467
468/**
469 * netdev_boot_setup_add - add new setup entry
470 * @name: name of the device
471 * @map: configured settings for the device
472 *
473 * Adds new setup entry to the dev_boot_setup list. The function
474 * returns 0 on error and 1 on success. This is a generic routine to
475 * all netdevices.
476 */
477static int netdev_boot_setup_add(char *name, struct ifmap *map)
478{
479 struct netdev_boot_setup *s;
480 int i;
481
482 s = dev_boot_setup;
483 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
484 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
485 memset(s[i].name, 0, sizeof(s[i].name));
93b3cff9 486 strlcpy(s[i].name, name, IFNAMSIZ);
1da177e4
LT
487 memcpy(&s[i].map, map, sizeof(s[i].map));
488 break;
489 }
490 }
491
492 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
493}
494
495/**
496 * netdev_boot_setup_check - check boot time settings
497 * @dev: the netdevice
498 *
499 * Check boot time settings for the device.
500 * The found settings are set for the device to be used
501 * later in the device probing.
502 * Returns 0 if no settings found, 1 if they are.
503 */
504int netdev_boot_setup_check(struct net_device *dev)
505{
506 struct netdev_boot_setup *s = dev_boot_setup;
507 int i;
508
509 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
510 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
93b3cff9 511 !strcmp(dev->name, s[i].name)) {
1da177e4
LT
512 dev->irq = s[i].map.irq;
513 dev->base_addr = s[i].map.base_addr;
514 dev->mem_start = s[i].map.mem_start;
515 dev->mem_end = s[i].map.mem_end;
516 return 1;
517 }
518 }
519 return 0;
520}
d1b19dff 521EXPORT_SYMBOL(netdev_boot_setup_check);
1da177e4
LT
522
523
524/**
525 * netdev_boot_base - get address from boot time settings
526 * @prefix: prefix for network device
527 * @unit: id for network device
528 *
529 * Check boot time settings for the base address of device.
530 * The found settings are set for the device to be used
531 * later in the device probing.
532 * Returns 0 if no settings found.
533 */
534unsigned long netdev_boot_base(const char *prefix, int unit)
535{
536 const struct netdev_boot_setup *s = dev_boot_setup;
537 char name[IFNAMSIZ];
538 int i;
539
540 sprintf(name, "%s%d", prefix, unit);
541
542 /*
543 * If device already registered then return base of 1
544 * to indicate not to probe for this interface
545 */
881d966b 546 if (__dev_get_by_name(&init_net, name))
1da177e4
LT
547 return 1;
548
549 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
550 if (!strcmp(name, s[i].name))
551 return s[i].map.base_addr;
552 return 0;
553}
554
555/*
556 * Saves at boot time configured settings for any netdevice.
557 */
558int __init netdev_boot_setup(char *str)
559{
560 int ints[5];
561 struct ifmap map;
562
563 str = get_options(str, ARRAY_SIZE(ints), ints);
564 if (!str || !*str)
565 return 0;
566
567 /* Save settings */
568 memset(&map, 0, sizeof(map));
569 if (ints[0] > 0)
570 map.irq = ints[1];
571 if (ints[0] > 1)
572 map.base_addr = ints[2];
573 if (ints[0] > 2)
574 map.mem_start = ints[3];
575 if (ints[0] > 3)
576 map.mem_end = ints[4];
577
578 /* Add new entry to the list */
579 return netdev_boot_setup_add(str, &map);
580}
581
582__setup("netdev=", netdev_boot_setup);
583
584/*******************************************************************************
585
586 Device Interface Subroutines
587
588*******************************************************************************/
589
590/**
591 * __dev_get_by_name - find a device by its name
c4ea43c5 592 * @net: the applicable net namespace
1da177e4
LT
593 * @name: name to find
594 *
595 * Find an interface by name. Must be called under RTNL semaphore
596 * or @dev_base_lock. If the name is found a pointer to the device
597 * is returned. If the name is not found then %NULL is returned. The
598 * reference counters are not incremented so the caller must be
599 * careful with locks.
600 */
601
881d966b 602struct net_device *__dev_get_by_name(struct net *net, const char *name)
1da177e4
LT
603{
604 struct hlist_node *p;
0bd8d536
ED
605 struct net_device *dev;
606 struct hlist_head *head = dev_name_hash(net, name);
1da177e4 607
0bd8d536 608 hlist_for_each_entry(dev, p, head, name_hlist)
1da177e4
LT
609 if (!strncmp(dev->name, name, IFNAMSIZ))
610 return dev;
0bd8d536 611
1da177e4
LT
612 return NULL;
613}
d1b19dff 614EXPORT_SYMBOL(__dev_get_by_name);
1da177e4 615
72c9528b
ED
616/**
617 * dev_get_by_name_rcu - find a device by its name
618 * @net: the applicable net namespace
619 * @name: name to find
620 *
621 * Find an interface by name.
622 * If the name is found a pointer to the device is returned.
623 * If the name is not found then %NULL is returned.
624 * The reference counters are not incremented so the caller must be
625 * careful with locks. The caller must hold RCU lock.
626 */
627
628struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
629{
630 struct hlist_node *p;
631 struct net_device *dev;
632 struct hlist_head *head = dev_name_hash(net, name);
633
634 hlist_for_each_entry_rcu(dev, p, head, name_hlist)
635 if (!strncmp(dev->name, name, IFNAMSIZ))
636 return dev;
637
638 return NULL;
639}
640EXPORT_SYMBOL(dev_get_by_name_rcu);
641
1da177e4
LT
642/**
643 * dev_get_by_name - find a device by its name
c4ea43c5 644 * @net: the applicable net namespace
1da177e4
LT
645 * @name: name to find
646 *
647 * Find an interface by name. This can be called from any
648 * context and does its own locking. The returned handle has
649 * the usage count incremented and the caller must use dev_put() to
650 * release it when it is no longer needed. %NULL is returned if no
651 * matching device is found.
652 */
653
881d966b 654struct net_device *dev_get_by_name(struct net *net, const char *name)
1da177e4
LT
655{
656 struct net_device *dev;
657
72c9528b
ED
658 rcu_read_lock();
659 dev = dev_get_by_name_rcu(net, name);
1da177e4
LT
660 if (dev)
661 dev_hold(dev);
72c9528b 662 rcu_read_unlock();
1da177e4
LT
663 return dev;
664}
d1b19dff 665EXPORT_SYMBOL(dev_get_by_name);
1da177e4
LT
666
667/**
668 * __dev_get_by_index - find a device by its ifindex
c4ea43c5 669 * @net: the applicable net namespace
1da177e4
LT
670 * @ifindex: index of device
671 *
672 * Search for an interface by index. Returns %NULL if the device
673 * is not found or a pointer to the device. The device has not
674 * had its reference counter increased so the caller must be careful
675 * about locking. The caller must hold either the RTNL semaphore
676 * or @dev_base_lock.
677 */
678
881d966b 679struct net_device *__dev_get_by_index(struct net *net, int ifindex)
1da177e4
LT
680{
681 struct hlist_node *p;
0bd8d536
ED
682 struct net_device *dev;
683 struct hlist_head *head = dev_index_hash(net, ifindex);
1da177e4 684
0bd8d536 685 hlist_for_each_entry(dev, p, head, index_hlist)
1da177e4
LT
686 if (dev->ifindex == ifindex)
687 return dev;
0bd8d536 688
1da177e4
LT
689 return NULL;
690}
d1b19dff 691EXPORT_SYMBOL(__dev_get_by_index);
1da177e4 692
fb699dfd
ED
693/**
694 * dev_get_by_index_rcu - find a device by its ifindex
695 * @net: the applicable net namespace
696 * @ifindex: index of device
697 *
698 * Search for an interface by index. Returns %NULL if the device
699 * is not found or a pointer to the device. The device has not
700 * had its reference counter increased so the caller must be careful
701 * about locking. The caller must hold RCU lock.
702 */
703
704struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
705{
706 struct hlist_node *p;
707 struct net_device *dev;
708 struct hlist_head *head = dev_index_hash(net, ifindex);
709
710 hlist_for_each_entry_rcu(dev, p, head, index_hlist)
711 if (dev->ifindex == ifindex)
712 return dev;
713
714 return NULL;
715}
716EXPORT_SYMBOL(dev_get_by_index_rcu);
717
1da177e4
LT
718
719/**
720 * dev_get_by_index - find a device by its ifindex
c4ea43c5 721 * @net: the applicable net namespace
1da177e4
LT
722 * @ifindex: index of device
723 *
724 * Search for an interface by index. Returns NULL if the device
725 * is not found or a pointer to the device. The device returned has
726 * had a reference added and the pointer is safe until the user calls
727 * dev_put to indicate they have finished with it.
728 */
729
881d966b 730struct net_device *dev_get_by_index(struct net *net, int ifindex)
1da177e4
LT
731{
732 struct net_device *dev;
733
fb699dfd
ED
734 rcu_read_lock();
735 dev = dev_get_by_index_rcu(net, ifindex);
1da177e4
LT
736 if (dev)
737 dev_hold(dev);
fb699dfd 738 rcu_read_unlock();
1da177e4
LT
739 return dev;
740}
d1b19dff 741EXPORT_SYMBOL(dev_get_by_index);
1da177e4
LT
742
743/**
744 * dev_getbyhwaddr - find a device by its hardware address
c4ea43c5 745 * @net: the applicable net namespace
1da177e4
LT
746 * @type: media type of device
747 * @ha: hardware address
748 *
749 * Search for an interface by MAC address. Returns NULL if the device
750 * is not found or a pointer to the device. The caller must hold the
751 * rtnl semaphore. The returned device has not had its ref count increased
752 * and the caller must therefore be careful about locking
753 *
754 * BUGS:
755 * If the API was consistent this would be __dev_get_by_hwaddr
756 */
757
881d966b 758struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
1da177e4
LT
759{
760 struct net_device *dev;
761
762 ASSERT_RTNL();
763
81103a52 764 for_each_netdev(net, dev)
1da177e4
LT
765 if (dev->type == type &&
766 !memcmp(dev->dev_addr, ha, dev->addr_len))
7562f876
PE
767 return dev;
768
769 return NULL;
1da177e4 770}
cf309e3f
JF
771EXPORT_SYMBOL(dev_getbyhwaddr);
772
881d966b 773struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
1da177e4
LT
774{
775 struct net_device *dev;
776
4e9cac2b 777 ASSERT_RTNL();
881d966b 778 for_each_netdev(net, dev)
4e9cac2b 779 if (dev->type == type)
7562f876
PE
780 return dev;
781
782 return NULL;
4e9cac2b 783}
4e9cac2b
PM
784EXPORT_SYMBOL(__dev_getfirstbyhwtype);
785
881d966b 786struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
4e9cac2b 787{
99fe3c39 788 struct net_device *dev, *ret = NULL;
4e9cac2b 789
99fe3c39
ED
790 rcu_read_lock();
791 for_each_netdev_rcu(net, dev)
792 if (dev->type == type) {
793 dev_hold(dev);
794 ret = dev;
795 break;
796 }
797 rcu_read_unlock();
798 return ret;
1da177e4 799}
1da177e4
LT
800EXPORT_SYMBOL(dev_getfirstbyhwtype);
801
802/**
bb69ae04 803 * dev_get_by_flags_rcu - find any device with given flags
c4ea43c5 804 * @net: the applicable net namespace
1da177e4
LT
805 * @if_flags: IFF_* values
806 * @mask: bitmask of bits in if_flags to check
807 *
808 * Search for any interface with the given flags. Returns NULL if a device
bb69ae04
ED
809 * is not found or a pointer to the device. Must be called inside
810 * rcu_read_lock(), and result refcount is unchanged.
1da177e4
LT
811 */
812
bb69ae04 813struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags,
d1b19dff 814 unsigned short mask)
1da177e4 815{
7562f876 816 struct net_device *dev, *ret;
1da177e4 817
7562f876 818 ret = NULL;
c6d14c84 819 for_each_netdev_rcu(net, dev) {
1da177e4 820 if (((dev->flags ^ if_flags) & mask) == 0) {
7562f876 821 ret = dev;
1da177e4
LT
822 break;
823 }
824 }
7562f876 825 return ret;
1da177e4 826}
bb69ae04 827EXPORT_SYMBOL(dev_get_by_flags_rcu);
1da177e4
LT
828
829/**
830 * dev_valid_name - check if name is okay for network device
831 * @name: name string
832 *
833 * Network device names need to be valid file names to
c7fa9d18
DM
834 * to allow sysfs to work. We also disallow any kind of
835 * whitespace.
1da177e4 836 */
c2373ee9 837int dev_valid_name(const char *name)
1da177e4 838{
c7fa9d18
DM
839 if (*name == '\0')
840 return 0;
b6fe17d6
SH
841 if (strlen(name) >= IFNAMSIZ)
842 return 0;
c7fa9d18
DM
843 if (!strcmp(name, ".") || !strcmp(name, ".."))
844 return 0;
845
846 while (*name) {
847 if (*name == '/' || isspace(*name))
848 return 0;
849 name++;
850 }
851 return 1;
1da177e4 852}
d1b19dff 853EXPORT_SYMBOL(dev_valid_name);
1da177e4
LT
854
855/**
b267b179
EB
856 * __dev_alloc_name - allocate a name for a device
857 * @net: network namespace to allocate the device name in
1da177e4 858 * @name: name format string
b267b179 859 * @buf: scratch buffer and result name string
1da177e4
LT
860 *
861 * Passed a format string - eg "lt%d" it will try and find a suitable
3041a069
SH
862 * id. It scans list of devices to build up a free map, then chooses
863 * the first empty slot. The caller must hold the dev_base or rtnl lock
864 * while allocating the name and adding the device in order to avoid
865 * duplicates.
866 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
867 * Returns the number of the unit assigned or a negative errno code.
1da177e4
LT
868 */
869
b267b179 870static int __dev_alloc_name(struct net *net, const char *name, char *buf)
1da177e4
LT
871{
872 int i = 0;
1da177e4
LT
873 const char *p;
874 const int max_netdevices = 8*PAGE_SIZE;
cfcabdcc 875 unsigned long *inuse;
1da177e4
LT
876 struct net_device *d;
877
878 p = strnchr(name, IFNAMSIZ-1, '%');
879 if (p) {
880 /*
881 * Verify the string as this thing may have come from
882 * the user. There must be either one "%d" and no other "%"
883 * characters.
884 */
885 if (p[1] != 'd' || strchr(p + 2, '%'))
886 return -EINVAL;
887
888 /* Use one page as a bit array of possible slots */
cfcabdcc 889 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
1da177e4
LT
890 if (!inuse)
891 return -ENOMEM;
892
881d966b 893 for_each_netdev(net, d) {
1da177e4
LT
894 if (!sscanf(d->name, name, &i))
895 continue;
896 if (i < 0 || i >= max_netdevices)
897 continue;
898
899 /* avoid cases where sscanf is not exact inverse of printf */
b267b179 900 snprintf(buf, IFNAMSIZ, name, i);
1da177e4
LT
901 if (!strncmp(buf, d->name, IFNAMSIZ))
902 set_bit(i, inuse);
903 }
904
905 i = find_first_zero_bit(inuse, max_netdevices);
906 free_page((unsigned long) inuse);
907 }
908
d9031024
OP
909 if (buf != name)
910 snprintf(buf, IFNAMSIZ, name, i);
b267b179 911 if (!__dev_get_by_name(net, buf))
1da177e4 912 return i;
1da177e4
LT
913
914 /* It is possible to run out of possible slots
915 * when the name is long and there isn't enough space left
916 * for the digits, or if all bits are used.
917 */
918 return -ENFILE;
919}
920
b267b179
EB
921/**
922 * dev_alloc_name - allocate a name for a device
923 * @dev: device
924 * @name: name format string
925 *
926 * Passed a format string - eg "lt%d" it will try and find a suitable
927 * id. It scans list of devices to build up a free map, then chooses
928 * the first empty slot. The caller must hold the dev_base or rtnl lock
929 * while allocating the name and adding the device in order to avoid
930 * duplicates.
931 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
932 * Returns the number of the unit assigned or a negative errno code.
933 */
934
935int dev_alloc_name(struct net_device *dev, const char *name)
936{
937 char buf[IFNAMSIZ];
938 struct net *net;
939 int ret;
940
c346dca1
YH
941 BUG_ON(!dev_net(dev));
942 net = dev_net(dev);
b267b179
EB
943 ret = __dev_alloc_name(net, name, buf);
944 if (ret >= 0)
945 strlcpy(dev->name, buf, IFNAMSIZ);
946 return ret;
947}
d1b19dff 948EXPORT_SYMBOL(dev_alloc_name);
b267b179 949
8ce6cebc 950static int dev_get_valid_name(struct net_device *dev, const char *name, bool fmt)
d9031024 951{
8ce6cebc
DL
952 struct net *net;
953
954 BUG_ON(!dev_net(dev));
955 net = dev_net(dev);
956
d9031024
OP
957 if (!dev_valid_name(name))
958 return -EINVAL;
959
960 if (fmt && strchr(name, '%'))
8ce6cebc 961 return dev_alloc_name(dev, name);
d9031024
OP
962 else if (__dev_get_by_name(net, name))
963 return -EEXIST;
8ce6cebc
DL
964 else if (dev->name != name)
965 strlcpy(dev->name, name, IFNAMSIZ);
d9031024
OP
966
967 return 0;
968}
1da177e4
LT
969
970/**
971 * dev_change_name - change name of a device
972 * @dev: device
973 * @newname: name (or format string) must be at least IFNAMSIZ
974 *
975 * Change name of a device, can pass format strings "eth%d".
976 * for wildcarding.
977 */
cf04a4c7 978int dev_change_name(struct net_device *dev, const char *newname)
1da177e4 979{
fcc5a03a 980 char oldname[IFNAMSIZ];
1da177e4 981 int err = 0;
fcc5a03a 982 int ret;
881d966b 983 struct net *net;
1da177e4
LT
984
985 ASSERT_RTNL();
c346dca1 986 BUG_ON(!dev_net(dev));
1da177e4 987
c346dca1 988 net = dev_net(dev);
1da177e4
LT
989 if (dev->flags & IFF_UP)
990 return -EBUSY;
991
c8d90dca
SH
992 if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
993 return 0;
994
fcc5a03a
HX
995 memcpy(oldname, dev->name, IFNAMSIZ);
996
8ce6cebc 997 err = dev_get_valid_name(dev, newname, 1);
d9031024
OP
998 if (err < 0)
999 return err;
1da177e4 1000
fcc5a03a 1001rollback:
a1b3f594
EB
1002 ret = device_rename(&dev->dev, dev->name);
1003 if (ret) {
1004 memcpy(dev->name, oldname, IFNAMSIZ);
1005 return ret;
dcc99773 1006 }
7f988eab
HX
1007
1008 write_lock_bh(&dev_base_lock);
92749821 1009 hlist_del(&dev->name_hlist);
72c9528b
ED
1010 write_unlock_bh(&dev_base_lock);
1011
1012 synchronize_rcu();
1013
1014 write_lock_bh(&dev_base_lock);
1015 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
7f988eab
HX
1016 write_unlock_bh(&dev_base_lock);
1017
056925ab 1018 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
fcc5a03a
HX
1019 ret = notifier_to_errno(ret);
1020
1021 if (ret) {
91e9c07b
ED
1022 /* err >= 0 after dev_alloc_name() or stores the first errno */
1023 if (err >= 0) {
fcc5a03a
HX
1024 err = ret;
1025 memcpy(dev->name, oldname, IFNAMSIZ);
1026 goto rollback;
91e9c07b
ED
1027 } else {
1028 printk(KERN_ERR
1029 "%s: name change rollback failed: %d.\n",
1030 dev->name, ret);
fcc5a03a
HX
1031 }
1032 }
1da177e4
LT
1033
1034 return err;
1035}
1036
0b815a1a
SH
1037/**
1038 * dev_set_alias - change ifalias of a device
1039 * @dev: device
1040 * @alias: name up to IFALIASZ
f0db275a 1041 * @len: limit of bytes to copy from info
0b815a1a
SH
1042 *
1043 * Set ifalias for a device,
1044 */
1045int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1046{
1047 ASSERT_RTNL();
1048
1049 if (len >= IFALIASZ)
1050 return -EINVAL;
1051
96ca4a2c
OH
1052 if (!len) {
1053 if (dev->ifalias) {
1054 kfree(dev->ifalias);
1055 dev->ifalias = NULL;
1056 }
1057 return 0;
1058 }
1059
d1b19dff 1060 dev->ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
0b815a1a
SH
1061 if (!dev->ifalias)
1062 return -ENOMEM;
1063
1064 strlcpy(dev->ifalias, alias, len+1);
1065 return len;
1066}
1067
1068
d8a33ac4 1069/**
3041a069 1070 * netdev_features_change - device changes features
d8a33ac4
SH
1071 * @dev: device to cause notification
1072 *
1073 * Called to indicate a device has changed features.
1074 */
1075void netdev_features_change(struct net_device *dev)
1076{
056925ab 1077 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
d8a33ac4
SH
1078}
1079EXPORT_SYMBOL(netdev_features_change);
1080
1da177e4
LT
1081/**
1082 * netdev_state_change - device changes state
1083 * @dev: device to cause notification
1084 *
1085 * Called to indicate a device has changed state. This function calls
1086 * the notifier chains for netdev_chain and sends a NEWLINK message
1087 * to the routing socket.
1088 */
1089void netdev_state_change(struct net_device *dev)
1090{
1091 if (dev->flags & IFF_UP) {
056925ab 1092 call_netdevice_notifiers(NETDEV_CHANGE, dev);
1da177e4
LT
1093 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1094 }
1095}
d1b19dff 1096EXPORT_SYMBOL(netdev_state_change);
1da177e4 1097
3ca5b404 1098int netdev_bonding_change(struct net_device *dev, unsigned long event)
c1da4ac7 1099{
3ca5b404 1100 return call_netdevice_notifiers(event, dev);
c1da4ac7
OG
1101}
1102EXPORT_SYMBOL(netdev_bonding_change);
1103
1da177e4
LT
1104/**
1105 * dev_load - load a network module
c4ea43c5 1106 * @net: the applicable net namespace
1da177e4
LT
1107 * @name: name of interface
1108 *
1109 * If a network interface is not present and the process has suitable
1110 * privileges this function loads the module. If module loading is not
1111 * available in this kernel then it becomes a nop.
1112 */
1113
881d966b 1114void dev_load(struct net *net, const char *name)
1da177e4 1115{
4ec93edb 1116 struct net_device *dev;
1da177e4 1117
72c9528b
ED
1118 rcu_read_lock();
1119 dev = dev_get_by_name_rcu(net, name);
1120 rcu_read_unlock();
1da177e4 1121
a8f80e8f 1122 if (!dev && capable(CAP_NET_ADMIN))
1da177e4
LT
1123 request_module("%s", name);
1124}
d1b19dff 1125EXPORT_SYMBOL(dev_load);
1da177e4 1126
bd380811 1127static int __dev_open(struct net_device *dev)
1da177e4 1128{
d314774c 1129 const struct net_device_ops *ops = dev->netdev_ops;
3b8bcfd5 1130 int ret;
1da177e4 1131
e46b66bc
BH
1132 ASSERT_RTNL();
1133
1da177e4
LT
1134 /*
1135 * Is it even present?
1136 */
1137 if (!netif_device_present(dev))
1138 return -ENODEV;
1139
3b8bcfd5
JB
1140 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1141 ret = notifier_to_errno(ret);
1142 if (ret)
1143 return ret;
1144
1da177e4
LT
1145 /*
1146 * Call device private open method
1147 */
1148 set_bit(__LINK_STATE_START, &dev->state);
bada339b 1149
d314774c
SH
1150 if (ops->ndo_validate_addr)
1151 ret = ops->ndo_validate_addr(dev);
bada339b 1152
d314774c
SH
1153 if (!ret && ops->ndo_open)
1154 ret = ops->ndo_open(dev);
1da177e4 1155
4ec93edb 1156 /*
1da177e4
LT
1157 * If it went open OK then:
1158 */
1159
bada339b
JG
1160 if (ret)
1161 clear_bit(__LINK_STATE_START, &dev->state);
1162 else {
1da177e4
LT
1163 /*
1164 * Set the flags.
1165 */
1166 dev->flags |= IFF_UP;
1167
649274d9
DW
1168 /*
1169 * Enable NET_DMA
1170 */
b4bd07c2 1171 net_dmaengine_get();
649274d9 1172
1da177e4
LT
1173 /*
1174 * Initialize multicasting status
1175 */
4417da66 1176 dev_set_rx_mode(dev);
1da177e4
LT
1177
1178 /*
1179 * Wakeup transmit queue engine
1180 */
1181 dev_activate(dev);
1da177e4 1182 }
bada339b 1183
1da177e4
LT
1184 return ret;
1185}
1186
1187/**
bd380811
PM
1188 * dev_open - prepare an interface for use.
1189 * @dev: device to open
1da177e4 1190 *
bd380811
PM
1191 * Takes a device from down to up state. The device's private open
1192 * function is invoked and then the multicast lists are loaded. Finally
1193 * the device is moved into the up state and a %NETDEV_UP message is
1194 * sent to the netdev notifier chain.
1195 *
1196 * Calling this function on an active interface is a nop. On a failure
1197 * a negative errno code is returned.
1da177e4 1198 */
bd380811
PM
1199int dev_open(struct net_device *dev)
1200{
1201 int ret;
1202
1203 /*
1204 * Is it already up?
1205 */
1206 if (dev->flags & IFF_UP)
1207 return 0;
1208
1209 /*
1210 * Open device
1211 */
1212 ret = __dev_open(dev);
1213 if (ret < 0)
1214 return ret;
1215
1216 /*
1217 * ... and announce new interface.
1218 */
1219 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1220 call_netdevice_notifiers(NETDEV_UP, dev);
1221
1222 return ret;
1223}
1224EXPORT_SYMBOL(dev_open);
1225
1226static int __dev_close(struct net_device *dev)
1da177e4 1227{
d314774c 1228 const struct net_device_ops *ops = dev->netdev_ops;
e46b66bc 1229
bd380811 1230 ASSERT_RTNL();
9d5010db
DM
1231 might_sleep();
1232
1da177e4
LT
1233 /*
1234 * Tell people we are going down, so that they can
1235 * prepare to death, when device is still operating.
1236 */
056925ab 1237 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1da177e4 1238
1da177e4
LT
1239 clear_bit(__LINK_STATE_START, &dev->state);
1240
1241 /* Synchronize to scheduled poll. We cannot touch poll list,
bea3348e
SH
1242 * it can be even on different cpu. So just clear netif_running().
1243 *
1244 * dev->stop() will invoke napi_disable() on all of it's
1245 * napi_struct instances on this device.
1246 */
1da177e4 1247 smp_mb__after_clear_bit(); /* Commit netif_running(). */
1da177e4 1248
d8b2a4d2
ML
1249 dev_deactivate(dev);
1250
1da177e4
LT
1251 /*
1252 * Call the device specific close. This cannot fail.
1253 * Only if device is UP
1254 *
1255 * We allow it to be called even after a DETACH hot-plug
1256 * event.
1257 */
d314774c
SH
1258 if (ops->ndo_stop)
1259 ops->ndo_stop(dev);
1da177e4
LT
1260
1261 /*
1262 * Device is now down.
1263 */
1264
1265 dev->flags &= ~IFF_UP;
1266
1267 /*
bd380811 1268 * Shutdown NET_DMA
1da177e4 1269 */
bd380811
PM
1270 net_dmaengine_put();
1271
1272 return 0;
1273}
1274
1275/**
1276 * dev_close - shutdown an interface.
1277 * @dev: device to shutdown
1278 *
1279 * This function moves an active device into down state. A
1280 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1281 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1282 * chain.
1283 */
1284int dev_close(struct net_device *dev)
1285{
1286 if (!(dev->flags & IFF_UP))
1287 return 0;
1288
1289 __dev_close(dev);
1da177e4 1290
649274d9 1291 /*
bd380811 1292 * Tell people we are down
649274d9 1293 */
bd380811
PM
1294 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1295 call_netdevice_notifiers(NETDEV_DOWN, dev);
649274d9 1296
1da177e4
LT
1297 return 0;
1298}
d1b19dff 1299EXPORT_SYMBOL(dev_close);
1da177e4
LT
1300
1301
0187bdfb
BH
1302/**
1303 * dev_disable_lro - disable Large Receive Offload on a device
1304 * @dev: device
1305 *
1306 * Disable Large Receive Offload (LRO) on a net device. Must be
1307 * called under RTNL. This is needed if received packets may be
1308 * forwarded to another interface.
1309 */
1310void dev_disable_lro(struct net_device *dev)
1311{
1312 if (dev->ethtool_ops && dev->ethtool_ops->get_flags &&
1313 dev->ethtool_ops->set_flags) {
1314 u32 flags = dev->ethtool_ops->get_flags(dev);
1315 if (flags & ETH_FLAG_LRO) {
1316 flags &= ~ETH_FLAG_LRO;
1317 dev->ethtool_ops->set_flags(dev, flags);
1318 }
1319 }
1320 WARN_ON(dev->features & NETIF_F_LRO);
1321}
1322EXPORT_SYMBOL(dev_disable_lro);
1323
1324
881d966b
EB
1325static int dev_boot_phase = 1;
1326
1da177e4
LT
1327/*
1328 * Device change register/unregister. These are not inline or static
1329 * as we export them to the world.
1330 */
1331
1332/**
1333 * register_netdevice_notifier - register a network notifier block
1334 * @nb: notifier
1335 *
1336 * Register a notifier to be called when network device events occur.
1337 * The notifier passed is linked into the kernel structures and must
1338 * not be reused until it has been unregistered. A negative errno code
1339 * is returned on a failure.
1340 *
1341 * When registered all registration and up events are replayed
4ec93edb 1342 * to the new notifier to allow device to have a race free
1da177e4
LT
1343 * view of the network device list.
1344 */
1345
1346int register_netdevice_notifier(struct notifier_block *nb)
1347{
1348 struct net_device *dev;
fcc5a03a 1349 struct net_device *last;
881d966b 1350 struct net *net;
1da177e4
LT
1351 int err;
1352
1353 rtnl_lock();
f07d5b94 1354 err = raw_notifier_chain_register(&netdev_chain, nb);
fcc5a03a
HX
1355 if (err)
1356 goto unlock;
881d966b
EB
1357 if (dev_boot_phase)
1358 goto unlock;
1359 for_each_net(net) {
1360 for_each_netdev(net, dev) {
1361 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1362 err = notifier_to_errno(err);
1363 if (err)
1364 goto rollback;
1365
1366 if (!(dev->flags & IFF_UP))
1367 continue;
1da177e4 1368
881d966b
EB
1369 nb->notifier_call(nb, NETDEV_UP, dev);
1370 }
1da177e4 1371 }
fcc5a03a
HX
1372
1373unlock:
1da177e4
LT
1374 rtnl_unlock();
1375 return err;
fcc5a03a
HX
1376
1377rollback:
1378 last = dev;
881d966b
EB
1379 for_each_net(net) {
1380 for_each_netdev(net, dev) {
1381 if (dev == last)
1382 break;
fcc5a03a 1383
881d966b
EB
1384 if (dev->flags & IFF_UP) {
1385 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1386 nb->notifier_call(nb, NETDEV_DOWN, dev);
1387 }
1388 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
a5ee1551 1389 nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev);
fcc5a03a 1390 }
fcc5a03a 1391 }
c67625a1
PE
1392
1393 raw_notifier_chain_unregister(&netdev_chain, nb);
fcc5a03a 1394 goto unlock;
1da177e4 1395}
d1b19dff 1396EXPORT_SYMBOL(register_netdevice_notifier);
1da177e4
LT
1397
1398/**
1399 * unregister_netdevice_notifier - unregister a network notifier block
1400 * @nb: notifier
1401 *
1402 * Unregister a notifier previously registered by
1403 * register_netdevice_notifier(). The notifier is unlinked into the
1404 * kernel structures and may then be reused. A negative errno code
1405 * is returned on a failure.
1406 */
1407
1408int unregister_netdevice_notifier(struct notifier_block *nb)
1409{
9f514950
HX
1410 int err;
1411
1412 rtnl_lock();
f07d5b94 1413 err = raw_notifier_chain_unregister(&netdev_chain, nb);
9f514950
HX
1414 rtnl_unlock();
1415 return err;
1da177e4 1416}
d1b19dff 1417EXPORT_SYMBOL(unregister_netdevice_notifier);
1da177e4
LT
1418
1419/**
1420 * call_netdevice_notifiers - call all network notifier blocks
1421 * @val: value passed unmodified to notifier function
c4ea43c5 1422 * @dev: net_device pointer passed unmodified to notifier function
1da177e4
LT
1423 *
1424 * Call all network notifier blocks. Parameters and return value
f07d5b94 1425 * are as for raw_notifier_call_chain().
1da177e4
LT
1426 */
1427
ad7379d4 1428int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1da177e4 1429{
ab930471 1430 ASSERT_RTNL();
ad7379d4 1431 return raw_notifier_call_chain(&netdev_chain, val, dev);
1da177e4
LT
1432}
1433
1434/* When > 0 there are consumers of rx skb time stamps */
1435static atomic_t netstamp_needed = ATOMIC_INIT(0);
1436
1437void net_enable_timestamp(void)
1438{
1439 atomic_inc(&netstamp_needed);
1440}
d1b19dff 1441EXPORT_SYMBOL(net_enable_timestamp);
1da177e4
LT
1442
1443void net_disable_timestamp(void)
1444{
1445 atomic_dec(&netstamp_needed);
1446}
d1b19dff 1447EXPORT_SYMBOL(net_disable_timestamp);
1da177e4 1448
3b098e2d 1449static inline void net_timestamp_set(struct sk_buff *skb)
1da177e4
LT
1450{
1451 if (atomic_read(&netstamp_needed))
a61bbcf2 1452 __net_timestamp(skb);
b7aa0bf7
ED
1453 else
1454 skb->tstamp.tv64 = 0;
1da177e4
LT
1455}
1456
3b098e2d
ED
1457static inline void net_timestamp_check(struct sk_buff *skb)
1458{
1459 if (!skb->tstamp.tv64 && atomic_read(&netstamp_needed))
1460 __net_timestamp(skb);
1461}
1462
44540960
AB
1463/**
1464 * dev_forward_skb - loopback an skb to another netif
1465 *
1466 * @dev: destination network device
1467 * @skb: buffer to forward
1468 *
1469 * return values:
1470 * NET_RX_SUCCESS (no congestion)
6ec82562 1471 * NET_RX_DROP (packet was dropped, but freed)
44540960
AB
1472 *
1473 * dev_forward_skb can be used for injecting an skb from the
1474 * start_xmit function of one device into the receive queue
1475 * of another device.
1476 *
1477 * The receiving device may be in another namespace, so
1478 * we have to clear all information in the skb that could
1479 * impact namespace isolation.
1480 */
1481int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1482{
1483 skb_orphan(skb);
c736eefa 1484 nf_reset(skb);
44540960 1485
caf586e5
ED
1486 if (unlikely(!(dev->flags & IFF_UP) ||
1487 (skb->len > (dev->mtu + dev->hard_header_len)))) {
1488 atomic_long_inc(&dev->rx_dropped);
6ec82562 1489 kfree_skb(skb);
44540960 1490 return NET_RX_DROP;
6ec82562 1491 }
8a83a00b 1492 skb_set_dev(skb, dev);
44540960
AB
1493 skb->tstamp.tv64 = 0;
1494 skb->pkt_type = PACKET_HOST;
1495 skb->protocol = eth_type_trans(skb, dev);
44540960
AB
1496 return netif_rx(skb);
1497}
1498EXPORT_SYMBOL_GPL(dev_forward_skb);
1499
1da177e4
LT
1500/*
1501 * Support routine. Sends outgoing frames to any network
1502 * taps currently in use.
1503 */
1504
f6a78bfc 1505static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1da177e4
LT
1506{
1507 struct packet_type *ptype;
a61bbcf2 1508
8caf1539
JP
1509#ifdef CONFIG_NET_CLS_ACT
1510 if (!(skb->tstamp.tv64 && (G_TC_FROM(skb->tc_verd) & AT_INGRESS)))
3b098e2d 1511 net_timestamp_set(skb);
8caf1539 1512#else
3b098e2d 1513 net_timestamp_set(skb);
8caf1539 1514#endif
1da177e4
LT
1515
1516 rcu_read_lock();
1517 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1518 /* Never send packets back to the socket
1519 * they originated from - MvS (miquels@drinkel.ow.org)
1520 */
1521 if ((ptype->dev == dev || !ptype->dev) &&
1522 (ptype->af_packet_priv == NULL ||
1523 (struct sock *)ptype->af_packet_priv != skb->sk)) {
d1b19dff 1524 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1da177e4
LT
1525 if (!skb2)
1526 break;
1527
1528 /* skb->nh should be correctly
1529 set by sender, so that the second statement is
1530 just protection against buggy protocols.
1531 */
459a98ed 1532 skb_reset_mac_header(skb2);
1da177e4 1533
d56f90a7 1534 if (skb_network_header(skb2) < skb2->data ||
27a884dc 1535 skb2->network_header > skb2->tail) {
1da177e4
LT
1536 if (net_ratelimit())
1537 printk(KERN_CRIT "protocol %04x is "
1538 "buggy, dev %s\n",
70777d03
SAS
1539 ntohs(skb2->protocol),
1540 dev->name);
c1d2bbe1 1541 skb_reset_network_header(skb2);
1da177e4
LT
1542 }
1543
b0e380b1 1544 skb2->transport_header = skb2->network_header;
1da177e4 1545 skb2->pkt_type = PACKET_OUTGOING;
f2ccd8fa 1546 ptype->func(skb2, skb->dev, ptype, skb->dev);
1da177e4
LT
1547 }
1548 }
1549 rcu_read_unlock();
1550}
1551
f0796d5c
JF
1552/*
1553 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
1554 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
1555 */
1556void netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
1557{
1558 unsigned int real_num = dev->real_num_tx_queues;
1559
1560 if (unlikely(txq > dev->num_tx_queues))
1561 ;
1562 else if (txq > real_num)
1563 dev->real_num_tx_queues = txq;
1564 else if (txq < real_num) {
1565 dev->real_num_tx_queues = txq;
1566 qdisc_reset_all_tx_gt(dev, txq);
1567 }
1568}
1569EXPORT_SYMBOL(netif_set_real_num_tx_queues);
56079431 1570
62fe0b40
BH
1571#ifdef CONFIG_RPS
1572/**
1573 * netif_set_real_num_rx_queues - set actual number of RX queues used
1574 * @dev: Network device
1575 * @rxq: Actual number of RX queues
1576 *
1577 * This must be called either with the rtnl_lock held or before
1578 * registration of the net device. Returns 0 on success, or a
4e7f7951
BH
1579 * negative error code. If called before registration, it always
1580 * succeeds.
62fe0b40
BH
1581 */
1582int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
1583{
1584 int rc;
1585
bd25fa7b
TH
1586 if (rxq < 1 || rxq > dev->num_rx_queues)
1587 return -EINVAL;
1588
62fe0b40
BH
1589 if (dev->reg_state == NETREG_REGISTERED) {
1590 ASSERT_RTNL();
1591
62fe0b40
BH
1592 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
1593 rxq);
1594 if (rc)
1595 return rc;
62fe0b40
BH
1596 }
1597
1598 dev->real_num_rx_queues = rxq;
1599 return 0;
1600}
1601EXPORT_SYMBOL(netif_set_real_num_rx_queues);
1602#endif
1603
def82a1d 1604static inline void __netif_reschedule(struct Qdisc *q)
56079431 1605{
def82a1d
JP
1606 struct softnet_data *sd;
1607 unsigned long flags;
56079431 1608
def82a1d
JP
1609 local_irq_save(flags);
1610 sd = &__get_cpu_var(softnet_data);
a9cbd588
CG
1611 q->next_sched = NULL;
1612 *sd->output_queue_tailp = q;
1613 sd->output_queue_tailp = &q->next_sched;
def82a1d
JP
1614 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1615 local_irq_restore(flags);
1616}
1617
1618void __netif_schedule(struct Qdisc *q)
1619{
1620 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
1621 __netif_reschedule(q);
56079431
DV
1622}
1623EXPORT_SYMBOL(__netif_schedule);
1624
bea3348e 1625void dev_kfree_skb_irq(struct sk_buff *skb)
56079431 1626{
3578b0c8 1627 if (atomic_dec_and_test(&skb->users)) {
bea3348e
SH
1628 struct softnet_data *sd;
1629 unsigned long flags;
56079431 1630
bea3348e
SH
1631 local_irq_save(flags);
1632 sd = &__get_cpu_var(softnet_data);
1633 skb->next = sd->completion_queue;
1634 sd->completion_queue = skb;
1635 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1636 local_irq_restore(flags);
1637 }
56079431 1638}
bea3348e 1639EXPORT_SYMBOL(dev_kfree_skb_irq);
56079431
DV
1640
1641void dev_kfree_skb_any(struct sk_buff *skb)
1642{
1643 if (in_irq() || irqs_disabled())
1644 dev_kfree_skb_irq(skb);
1645 else
1646 dev_kfree_skb(skb);
1647}
1648EXPORT_SYMBOL(dev_kfree_skb_any);
1649
1650
bea3348e
SH
1651/**
1652 * netif_device_detach - mark device as removed
1653 * @dev: network device
1654 *
1655 * Mark device as removed from system and therefore no longer available.
1656 */
56079431
DV
1657void netif_device_detach(struct net_device *dev)
1658{
1659 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1660 netif_running(dev)) {
d543103a 1661 netif_tx_stop_all_queues(dev);
56079431
DV
1662 }
1663}
1664EXPORT_SYMBOL(netif_device_detach);
1665
bea3348e
SH
1666/**
1667 * netif_device_attach - mark device as attached
1668 * @dev: network device
1669 *
1670 * Mark device as attached from system and restart if needed.
1671 */
56079431
DV
1672void netif_device_attach(struct net_device *dev)
1673{
1674 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1675 netif_running(dev)) {
d543103a 1676 netif_tx_wake_all_queues(dev);
4ec93edb 1677 __netdev_watchdog_up(dev);
56079431
DV
1678 }
1679}
1680EXPORT_SYMBOL(netif_device_attach);
1681
6de329e2
BH
1682static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1683{
1684 return ((features & NETIF_F_GEN_CSUM) ||
1685 ((features & NETIF_F_IP_CSUM) &&
1686 protocol == htons(ETH_P_IP)) ||
1687 ((features & NETIF_F_IPV6_CSUM) &&
1c8dbcf6
YZ
1688 protocol == htons(ETH_P_IPV6)) ||
1689 ((features & NETIF_F_FCOE_CRC) &&
1690 protocol == htons(ETH_P_FCOE)));
6de329e2
BH
1691}
1692
1693static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
1694{
1695 if (can_checksum_protocol(dev->features, skb->protocol))
1696 return true;
1697
1698 if (skb->protocol == htons(ETH_P_8021Q)) {
1699 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1700 if (can_checksum_protocol(dev->features & dev->vlan_features,
1701 veh->h_vlan_encapsulated_proto))
1702 return true;
1703 }
1704
1705 return false;
1706}
56079431 1707
8a83a00b
AB
1708/**
1709 * skb_dev_set -- assign a new device to a buffer
1710 * @skb: buffer for the new device
1711 * @dev: network device
1712 *
1713 * If an skb is owned by a device already, we have to reset
1714 * all data private to the namespace a device belongs to
1715 * before assigning it a new device.
1716 */
1717#ifdef CONFIG_NET_NS
1718void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
1719{
1720 skb_dst_drop(skb);
1721 if (skb->dev && !net_eq(dev_net(skb->dev), dev_net(dev))) {
1722 secpath_reset(skb);
1723 nf_reset(skb);
1724 skb_init_secmark(skb);
1725 skb->mark = 0;
1726 skb->priority = 0;
1727 skb->nf_trace = 0;
1728 skb->ipvs_property = 0;
1729#ifdef CONFIG_NET_SCHED
1730 skb->tc_index = 0;
1731#endif
1732 }
1733 skb->dev = dev;
1734}
1735EXPORT_SYMBOL(skb_set_dev);
1736#endif /* CONFIG_NET_NS */
1737
1da177e4
LT
1738/*
1739 * Invalidate hardware checksum when packet is to be mangled, and
1740 * complete checksum manually on outgoing path.
1741 */
84fa7933 1742int skb_checksum_help(struct sk_buff *skb)
1da177e4 1743{
d3bc23e7 1744 __wsum csum;
663ead3b 1745 int ret = 0, offset;
1da177e4 1746
84fa7933 1747 if (skb->ip_summed == CHECKSUM_COMPLETE)
a430a43d
HX
1748 goto out_set_summed;
1749
1750 if (unlikely(skb_shinfo(skb)->gso_size)) {
a430a43d
HX
1751 /* Let GSO fix up the checksum. */
1752 goto out_set_summed;
1da177e4
LT
1753 }
1754
a030847e
HX
1755 offset = skb->csum_start - skb_headroom(skb);
1756 BUG_ON(offset >= skb_headlen(skb));
1757 csum = skb_checksum(skb, offset, skb->len - offset, 0);
1758
1759 offset += skb->csum_offset;
1760 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
1761
1762 if (skb_cloned(skb) &&
1763 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
1da177e4
LT
1764 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1765 if (ret)
1766 goto out;
1767 }
1768
a030847e 1769 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
a430a43d 1770out_set_summed:
1da177e4 1771 skb->ip_summed = CHECKSUM_NONE;
4ec93edb 1772out:
1da177e4
LT
1773 return ret;
1774}
d1b19dff 1775EXPORT_SYMBOL(skb_checksum_help);
1da177e4 1776
f6a78bfc
HX
1777/**
1778 * skb_gso_segment - Perform segmentation on skb.
1779 * @skb: buffer to segment
576a30eb 1780 * @features: features for the output path (see dev->features)
f6a78bfc
HX
1781 *
1782 * This function segments the given skb and returns a list of segments.
576a30eb
HX
1783 *
1784 * It may return NULL if the skb requires no segmentation. This is
1785 * only possible when GSO is used for verifying header integrity.
f6a78bfc 1786 */
576a30eb 1787struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
f6a78bfc
HX
1788{
1789 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1790 struct packet_type *ptype;
252e3346 1791 __be16 type = skb->protocol;
a430a43d 1792 int err;
f6a78bfc 1793
459a98ed 1794 skb_reset_mac_header(skb);
b0e380b1 1795 skb->mac_len = skb->network_header - skb->mac_header;
f6a78bfc
HX
1796 __skb_pull(skb, skb->mac_len);
1797
67fd1a73
HX
1798 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1799 struct net_device *dev = skb->dev;
1800 struct ethtool_drvinfo info = {};
1801
1802 if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
1803 dev->ethtool_ops->get_drvinfo(dev, &info);
1804
1805 WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d "
1806 "ip_summed=%d",
1807 info.driver, dev ? dev->features : 0L,
1808 skb->sk ? skb->sk->sk_route_caps : 0L,
1809 skb->len, skb->data_len, skb->ip_summed);
1810
a430a43d
HX
1811 if (skb_header_cloned(skb) &&
1812 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1813 return ERR_PTR(err);
1814 }
1815
f6a78bfc 1816 rcu_read_lock();
82d8a867
PE
1817 list_for_each_entry_rcu(ptype,
1818 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
f6a78bfc 1819 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
84fa7933 1820 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
a430a43d
HX
1821 err = ptype->gso_send_check(skb);
1822 segs = ERR_PTR(err);
1823 if (err || skb_gso_ok(skb, features))
1824 break;
d56f90a7
ACM
1825 __skb_push(skb, (skb->data -
1826 skb_network_header(skb)));
a430a43d 1827 }
576a30eb 1828 segs = ptype->gso_segment(skb, features);
f6a78bfc
HX
1829 break;
1830 }
1831 }
1832 rcu_read_unlock();
1833
98e399f8 1834 __skb_push(skb, skb->data - skb_mac_header(skb));
576a30eb 1835
f6a78bfc
HX
1836 return segs;
1837}
f6a78bfc
HX
1838EXPORT_SYMBOL(skb_gso_segment);
1839
fb286bb2
HX
1840/* Take action when hardware reception checksum errors are detected. */
1841#ifdef CONFIG_BUG
1842void netdev_rx_csum_fault(struct net_device *dev)
1843{
1844 if (net_ratelimit()) {
4ec93edb 1845 printk(KERN_ERR "%s: hw csum failure.\n",
246a4212 1846 dev ? dev->name : "<unknown>");
fb286bb2
HX
1847 dump_stack();
1848 }
1849}
1850EXPORT_SYMBOL(netdev_rx_csum_fault);
1851#endif
1852
1da177e4
LT
1853/* Actually, we should eliminate this check as soon as we know, that:
1854 * 1. IOMMU is present and allows to map all the memory.
1855 * 2. No high memory really exists on this machine.
1856 */
1857
9092c658 1858static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1da177e4 1859{
3d3a8533 1860#ifdef CONFIG_HIGHMEM
1da177e4 1861 int i;
5acbbd42
FT
1862 if (!(dev->features & NETIF_F_HIGHDMA)) {
1863 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1864 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1865 return 1;
1866 }
1da177e4 1867
5acbbd42
FT
1868 if (PCI_DMA_BUS_IS_PHYS) {
1869 struct device *pdev = dev->dev.parent;
1da177e4 1870
9092c658
ED
1871 if (!pdev)
1872 return 0;
5acbbd42
FT
1873 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1874 dma_addr_t addr = page_to_phys(skb_shinfo(skb)->frags[i].page);
1875 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
1876 return 1;
1877 }
1878 }
3d3a8533 1879#endif
1da177e4
LT
1880 return 0;
1881}
1da177e4 1882
f6a78bfc
HX
1883struct dev_gso_cb {
1884 void (*destructor)(struct sk_buff *skb);
1885};
1886
1887#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
1888
1889static void dev_gso_skb_destructor(struct sk_buff *skb)
1890{
1891 struct dev_gso_cb *cb;
1892
1893 do {
1894 struct sk_buff *nskb = skb->next;
1895
1896 skb->next = nskb->next;
1897 nskb->next = NULL;
1898 kfree_skb(nskb);
1899 } while (skb->next);
1900
1901 cb = DEV_GSO_CB(skb);
1902 if (cb->destructor)
1903 cb->destructor(skb);
1904}
1905
1906/**
1907 * dev_gso_segment - Perform emulated hardware segmentation on skb.
1908 * @skb: buffer to segment
1909 *
1910 * This function segments the given skb and stores the list of segments
1911 * in skb->next.
1912 */
1913static int dev_gso_segment(struct sk_buff *skb)
1914{
1915 struct net_device *dev = skb->dev;
1916 struct sk_buff *segs;
576a30eb
HX
1917 int features = dev->features & ~(illegal_highdma(dev, skb) ?
1918 NETIF_F_SG : 0);
1919
1920 segs = skb_gso_segment(skb, features);
1921
1922 /* Verifying header integrity only. */
1923 if (!segs)
1924 return 0;
f6a78bfc 1925
801678c5 1926 if (IS_ERR(segs))
f6a78bfc
HX
1927 return PTR_ERR(segs);
1928
1929 skb->next = segs;
1930 DEV_GSO_CB(skb)->destructor = skb->destructor;
1931 skb->destructor = dev_gso_skb_destructor;
1932
1933 return 0;
1934}
1935
fc6055a5
ED
1936/*
1937 * Try to orphan skb early, right before transmission by the device.
2244d07b
OH
1938 * We cannot orphan skb if tx timestamp is requested or the sk-reference
1939 * is needed on driver level for other reasons, e.g. see net/can/raw.c
fc6055a5
ED
1940 */
1941static inline void skb_orphan_try(struct sk_buff *skb)
1942{
87fd308c
ED
1943 struct sock *sk = skb->sk;
1944
2244d07b 1945 if (sk && !skb_shinfo(skb)->tx_flags) {
87fd308c
ED
1946 /* skb_tx_hash() wont be able to get sk.
1947 * We copy sk_hash into skb->rxhash
1948 */
1949 if (!skb->rxhash)
1950 skb->rxhash = sk->sk_hash;
fc6055a5 1951 skb_orphan(skb);
87fd308c 1952 }
fc6055a5
ED
1953}
1954
6afff0ca
JF
1955/*
1956 * Returns true if either:
1957 * 1. skb has frag_list and the device doesn't support FRAGLIST, or
1958 * 2. skb is fragmented and the device does not support SG, or if
1959 * at least one of fragments is in highmem and device does not
1960 * support DMA from it.
1961 */
1962static inline int skb_needs_linearize(struct sk_buff *skb,
1963 struct net_device *dev)
1964{
1965 return skb_is_nonlinear(skb) &&
21dc3301 1966 ((skb_has_frag_list(skb) && !(dev->features & NETIF_F_FRAGLIST)) ||
6afff0ca
JF
1967 (skb_shinfo(skb)->nr_frags && (!(dev->features & NETIF_F_SG) ||
1968 illegal_highdma(dev, skb))));
1969}
1970
fd2ea0a7
DM
1971int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1972 struct netdev_queue *txq)
f6a78bfc 1973{
00829823 1974 const struct net_device_ops *ops = dev->netdev_ops;
572a9d7b 1975 int rc = NETDEV_TX_OK;
00829823 1976
f6a78bfc 1977 if (likely(!skb->next)) {
9be9a6b9 1978 if (!list_empty(&ptype_all))
f6a78bfc
HX
1979 dev_queue_xmit_nit(skb, dev);
1980
93f154b5
ED
1981 /*
1982 * If device doesnt need skb->dst, release it right now while
1983 * its hot in this cpu cache
1984 */
adf30907
ED
1985 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
1986 skb_dst_drop(skb);
1987
fc6055a5 1988 skb_orphan_try(skb);
9ccb8975
DM
1989
1990 if (netif_needs_gso(dev, skb)) {
1991 if (unlikely(dev_gso_segment(skb)))
1992 goto out_kfree_skb;
1993 if (skb->next)
1994 goto gso;
6afff0ca
JF
1995 } else {
1996 if (skb_needs_linearize(skb, dev) &&
1997 __skb_linearize(skb))
1998 goto out_kfree_skb;
1999
2000 /* If packet is not checksummed and device does not
2001 * support checksumming for this protocol, complete
2002 * checksumming here.
2003 */
2004 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2005 skb_set_transport_header(skb, skb->csum_start -
2006 skb_headroom(skb));
2007 if (!dev_can_checksum(dev, skb) &&
2008 skb_checksum_help(skb))
2009 goto out_kfree_skb;
2010 }
9ccb8975
DM
2011 }
2012
ac45f602 2013 rc = ops->ndo_start_xmit(skb, dev);
ec634fe3 2014 if (rc == NETDEV_TX_OK)
08baf561 2015 txq_trans_update(txq);
ac45f602 2016 return rc;
f6a78bfc
HX
2017 }
2018
576a30eb 2019gso:
f6a78bfc
HX
2020 do {
2021 struct sk_buff *nskb = skb->next;
f6a78bfc
HX
2022
2023 skb->next = nskb->next;
2024 nskb->next = NULL;
068a2de5
KK
2025
2026 /*
2027 * If device doesnt need nskb->dst, release it right now while
2028 * its hot in this cpu cache
2029 */
2030 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2031 skb_dst_drop(nskb);
2032
00829823 2033 rc = ops->ndo_start_xmit(nskb, dev);
ec634fe3 2034 if (unlikely(rc != NETDEV_TX_OK)) {
572a9d7b
PM
2035 if (rc & ~NETDEV_TX_MASK)
2036 goto out_kfree_gso_skb;
f54d9e8d 2037 nskb->next = skb->next;
f6a78bfc
HX
2038 skb->next = nskb;
2039 return rc;
2040 }
08baf561 2041 txq_trans_update(txq);
fd2ea0a7 2042 if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
f54d9e8d 2043 return NETDEV_TX_BUSY;
f6a78bfc 2044 } while (skb->next);
4ec93edb 2045
572a9d7b
PM
2046out_kfree_gso_skb:
2047 if (likely(skb->next == NULL))
2048 skb->destructor = DEV_GSO_CB(skb)->destructor;
f6a78bfc
HX
2049out_kfree_skb:
2050 kfree_skb(skb);
572a9d7b 2051 return rc;
f6a78bfc
HX
2052}
2053
0a9627f2 2054static u32 hashrnd __read_mostly;
b6b2fed1 2055
9247744e 2056u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
8f0f2223 2057{
7019298a 2058 u32 hash;
b6b2fed1 2059
513de11b
DM
2060 if (skb_rx_queue_recorded(skb)) {
2061 hash = skb_get_rx_queue(skb);
d1b19dff 2062 while (unlikely(hash >= dev->real_num_tx_queues))
513de11b
DM
2063 hash -= dev->real_num_tx_queues;
2064 return hash;
2065 }
ec581f6a
ED
2066
2067 if (skb->sk && skb->sk->sk_hash)
7019298a 2068 hash = skb->sk->sk_hash;
ec581f6a 2069 else
87fd308c 2070 hash = (__force u16) skb->protocol ^ skb->rxhash;
0a9627f2 2071 hash = jhash_1word(hash, hashrnd);
b6b2fed1
DM
2072
2073 return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
8f0f2223 2074}
9247744e 2075EXPORT_SYMBOL(skb_tx_hash);
8f0f2223 2076
ed04642f
ED
2077static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
2078{
2079 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
2080 if (net_ratelimit()) {
7a161ea9
ED
2081 pr_warning("%s selects TX queue %d, but "
2082 "real number of TX queues is %d\n",
2083 dev->name, queue_index, dev->real_num_tx_queues);
ed04642f
ED
2084 }
2085 return 0;
2086 }
2087 return queue_index;
2088}
2089
e8a0464c
DM
2090static struct netdev_queue *dev_pick_tx(struct net_device *dev,
2091 struct sk_buff *skb)
2092{
b0f77d0e 2093 int queue_index;
deabc772 2094 const struct net_device_ops *ops = dev->netdev_ops;
a4ee3ce3 2095
deabc772
HS
2096 if (ops->ndo_select_queue) {
2097 queue_index = ops->ndo_select_queue(dev, skb);
2098 queue_index = dev_cap_txqueue(dev, queue_index);
2099 } else {
2100 struct sock *sk = skb->sk;
2101 queue_index = sk_tx_queue_get(sk);
2102 if (queue_index < 0) {
a4ee3ce3 2103
a4ee3ce3
KK
2104 queue_index = 0;
2105 if (dev->real_num_tx_queues > 1)
2106 queue_index = skb_tx_hash(dev, skb);
fd2ea0a7 2107
8728c544 2108 if (sk) {
87eb3670 2109 struct dst_entry *dst = rcu_dereference_check(sk->sk_dst_cache, 1);
8728c544
ED
2110
2111 if (dst && skb_dst(skb) == dst)
2112 sk_tx_queue_set(sk, queue_index);
2113 }
a4ee3ce3
KK
2114 }
2115 }
eae792b7 2116
fd2ea0a7
DM
2117 skb_set_queue_mapping(skb, queue_index);
2118 return netdev_get_tx_queue(dev, queue_index);
e8a0464c
DM
2119}
2120
bbd8a0d3
KK
2121static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2122 struct net_device *dev,
2123 struct netdev_queue *txq)
2124{
2125 spinlock_t *root_lock = qdisc_lock(q);
79640a4c 2126 bool contended = qdisc_is_running(q);
bbd8a0d3
KK
2127 int rc;
2128
79640a4c
ED
2129 /*
2130 * Heuristic to force contended enqueues to serialize on a
2131 * separate lock before trying to get qdisc main lock.
2132 * This permits __QDISC_STATE_RUNNING owner to get the lock more often
2133 * and dequeue packets faster.
2134 */
2135 if (unlikely(contended))
2136 spin_lock(&q->busylock);
2137
bbd8a0d3
KK
2138 spin_lock(root_lock);
2139 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
2140 kfree_skb(skb);
2141 rc = NET_XMIT_DROP;
2142 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
bc135b23 2143 qdisc_run_begin(q)) {
bbd8a0d3
KK
2144 /*
2145 * This is a work-conserving queue; there are no old skbs
2146 * waiting to be sent out; and the qdisc is not running -
2147 * xmit the skb directly.
2148 */
7fee226a
ED
2149 if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
2150 skb_dst_force(skb);
bbd8a0d3 2151 __qdisc_update_bstats(q, skb->len);
79640a4c
ED
2152 if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
2153 if (unlikely(contended)) {
2154 spin_unlock(&q->busylock);
2155 contended = false;
2156 }
bbd8a0d3 2157 __qdisc_run(q);
79640a4c 2158 } else
bc135b23 2159 qdisc_run_end(q);
bbd8a0d3
KK
2160
2161 rc = NET_XMIT_SUCCESS;
2162 } else {
7fee226a 2163 skb_dst_force(skb);
bbd8a0d3 2164 rc = qdisc_enqueue_root(skb, q);
79640a4c
ED
2165 if (qdisc_run_begin(q)) {
2166 if (unlikely(contended)) {
2167 spin_unlock(&q->busylock);
2168 contended = false;
2169 }
2170 __qdisc_run(q);
2171 }
bbd8a0d3
KK
2172 }
2173 spin_unlock(root_lock);
79640a4c
ED
2174 if (unlikely(contended))
2175 spin_unlock(&q->busylock);
bbd8a0d3
KK
2176 return rc;
2177}
2178
745e20f1
ED
2179static DEFINE_PER_CPU(int, xmit_recursion);
2180#define RECURSION_LIMIT 3
2181
d29f749e
DJ
2182/**
2183 * dev_queue_xmit - transmit a buffer
2184 * @skb: buffer to transmit
2185 *
2186 * Queue a buffer for transmission to a network device. The caller must
2187 * have set the device and priority and built the buffer before calling
2188 * this function. The function can be called from an interrupt.
2189 *
2190 * A negative errno code is returned on a failure. A success does not
2191 * guarantee the frame will be transmitted as it may be dropped due
2192 * to congestion or traffic shaping.
2193 *
2194 * -----------------------------------------------------------------------------------
2195 * I notice this method can also return errors from the queue disciplines,
2196 * including NET_XMIT_DROP, which is a positive value. So, errors can also
2197 * be positive.
2198 *
2199 * Regardless of the return value, the skb is consumed, so it is currently
2200 * difficult to retry a send to this method. (You can bump the ref count
2201 * before sending to hold a reference for retry if you are careful.)
2202 *
2203 * When calling this method, interrupts MUST be enabled. This is because
2204 * the BH enable code must have IRQs enabled so that it will not deadlock.
2205 * --BLG
2206 */
1da177e4
LT
2207int dev_queue_xmit(struct sk_buff *skb)
2208{
2209 struct net_device *dev = skb->dev;
dc2b4847 2210 struct netdev_queue *txq;
1da177e4
LT
2211 struct Qdisc *q;
2212 int rc = -ENOMEM;
2213
4ec93edb
YH
2214 /* Disable soft irqs for various locks below. Also
2215 * stops preemption for RCU.
1da177e4 2216 */
4ec93edb 2217 rcu_read_lock_bh();
1da177e4 2218
eae792b7 2219 txq = dev_pick_tx(dev, skb);
a898def2 2220 q = rcu_dereference_bh(txq->qdisc);
37437bb2 2221
1da177e4 2222#ifdef CONFIG_NET_CLS_ACT
d1b19dff 2223 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
1da177e4
LT
2224#endif
2225 if (q->enqueue) {
bbd8a0d3 2226 rc = __dev_xmit_skb(skb, q, dev, txq);
37437bb2 2227 goto out;
1da177e4
LT
2228 }
2229
2230 /* The device has no queue. Common case for software devices:
2231 loopback, all the sorts of tunnels...
2232
932ff279
HX
2233 Really, it is unlikely that netif_tx_lock protection is necessary
2234 here. (f.e. loopback and IP tunnels are clean ignoring statistics
1da177e4
LT
2235 counters.)
2236 However, it is possible, that they rely on protection
2237 made by us here.
2238
2239 Check this and shot the lock. It is not prone from deadlocks.
2240 Either shot noqueue qdisc, it is even simpler 8)
2241 */
2242 if (dev->flags & IFF_UP) {
2243 int cpu = smp_processor_id(); /* ok because BHs are off */
2244
c773e847 2245 if (txq->xmit_lock_owner != cpu) {
1da177e4 2246
745e20f1
ED
2247 if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
2248 goto recursion_alert;
2249
c773e847 2250 HARD_TX_LOCK(dev, txq, cpu);
1da177e4 2251
fd2ea0a7 2252 if (!netif_tx_queue_stopped(txq)) {
745e20f1 2253 __this_cpu_inc(xmit_recursion);
572a9d7b 2254 rc = dev_hard_start_xmit(skb, dev, txq);
745e20f1 2255 __this_cpu_dec(xmit_recursion);
572a9d7b 2256 if (dev_xmit_complete(rc)) {
c773e847 2257 HARD_TX_UNLOCK(dev, txq);
1da177e4
LT
2258 goto out;
2259 }
2260 }
c773e847 2261 HARD_TX_UNLOCK(dev, txq);
1da177e4
LT
2262 if (net_ratelimit())
2263 printk(KERN_CRIT "Virtual device %s asks to "
2264 "queue packet!\n", dev->name);
2265 } else {
2266 /* Recursion is detected! It is possible,
745e20f1
ED
2267 * unfortunately
2268 */
2269recursion_alert:
1da177e4
LT
2270 if (net_ratelimit())
2271 printk(KERN_CRIT "Dead loop on virtual device "
2272 "%s, fix it urgently!\n", dev->name);
2273 }
2274 }
2275
2276 rc = -ENETDOWN;
d4828d85 2277 rcu_read_unlock_bh();
1da177e4 2278
1da177e4
LT
2279 kfree_skb(skb);
2280 return rc;
2281out:
d4828d85 2282 rcu_read_unlock_bh();
1da177e4
LT
2283 return rc;
2284}
d1b19dff 2285EXPORT_SYMBOL(dev_queue_xmit);
1da177e4
LT
2286
2287
2288/*=======================================================================
2289 Receiver routines
2290 =======================================================================*/
2291
6b2bedc3 2292int netdev_max_backlog __read_mostly = 1000;
3b098e2d 2293int netdev_tstamp_prequeue __read_mostly = 1;
6b2bedc3
SH
2294int netdev_budget __read_mostly = 300;
2295int weight_p __read_mostly = 64; /* old backlog weight */
1da177e4 2296
eecfd7c4
ED
2297/* Called with irq disabled */
2298static inline void ____napi_schedule(struct softnet_data *sd,
2299 struct napi_struct *napi)
2300{
2301 list_add_tail(&napi->poll_list, &sd->poll_list);
2302 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2303}
2304
0a9627f2 2305/*
bfb564e7
KK
2306 * __skb_get_rxhash: calculate a flow hash based on src/dst addresses
2307 * and src/dst port numbers. Returns a non-zero hash number on success
2308 * and 0 on failure.
0a9627f2 2309 */
bfb564e7 2310__u32 __skb_get_rxhash(struct sk_buff *skb)
0a9627f2 2311{
12fcdefb 2312 int nhoff, hash = 0, poff;
0a9627f2
TH
2313 struct ipv6hdr *ip6;
2314 struct iphdr *ip;
0a9627f2 2315 u8 ip_proto;
8c52d509
CG
2316 u32 addr1, addr2, ihl;
2317 union {
2318 u32 v32;
2319 u16 v16[2];
2320 } ports;
0a9627f2 2321
bfb564e7 2322 nhoff = skb_network_offset(skb);
0a9627f2
TH
2323
2324 switch (skb->protocol) {
2325 case __constant_htons(ETH_P_IP):
bfb564e7 2326 if (!pskb_may_pull(skb, sizeof(*ip) + nhoff))
0a9627f2
TH
2327 goto done;
2328
1003489e 2329 ip = (struct iphdr *) (skb->data + nhoff);
dbe5775b
CG
2330 if (ip->frag_off & htons(IP_MF | IP_OFFSET))
2331 ip_proto = 0;
2332 else
2333 ip_proto = ip->protocol;
b249dcb8
ED
2334 addr1 = (__force u32) ip->saddr;
2335 addr2 = (__force u32) ip->daddr;
0a9627f2
TH
2336 ihl = ip->ihl;
2337 break;
2338 case __constant_htons(ETH_P_IPV6):
bfb564e7 2339 if (!pskb_may_pull(skb, sizeof(*ip6) + nhoff))
0a9627f2
TH
2340 goto done;
2341
1003489e 2342 ip6 = (struct ipv6hdr *) (skb->data + nhoff);
0a9627f2 2343 ip_proto = ip6->nexthdr;
b249dcb8
ED
2344 addr1 = (__force u32) ip6->saddr.s6_addr32[3];
2345 addr2 = (__force u32) ip6->daddr.s6_addr32[3];
0a9627f2
TH
2346 ihl = (40 >> 2);
2347 break;
2348 default:
2349 goto done;
2350 }
bfb564e7 2351
12fcdefb
CG
2352 ports.v32 = 0;
2353 poff = proto_ports_offset(ip_proto);
2354 if (poff >= 0) {
2355 nhoff += ihl * 4 + poff;
2356 if (pskb_may_pull(skb, nhoff + 4)) {
2357 ports.v32 = * (__force u32 *) (skb->data + nhoff);
8c52d509
CG
2358 if (ports.v16[1] < ports.v16[0])
2359 swap(ports.v16[0], ports.v16[1]);
b249dcb8 2360 }
0a9627f2
TH
2361 }
2362
b249dcb8
ED
2363 /* get a consistent hash (same value on both flow directions) */
2364 if (addr2 < addr1)
2365 swap(addr1, addr2);
0a9627f2 2366
bfb564e7
KK
2367 hash = jhash_3words(addr1, addr2, ports.v32, hashrnd);
2368 if (!hash)
2369 hash = 1;
2370
2371done:
2372 return hash;
2373}
2374EXPORT_SYMBOL(__skb_get_rxhash);
2375
2376#ifdef CONFIG_RPS
2377
2378/* One global table that all flow-based protocols share. */
2379struct rps_sock_flow_table *rps_sock_flow_table __read_mostly;
2380EXPORT_SYMBOL(rps_sock_flow_table);
2381
2382/*
2383 * get_rps_cpu is called from netif_receive_skb and returns the target
2384 * CPU from the RPS map of the receiving queue for a given skb.
2385 * rcu_read_lock must be held on entry.
2386 */
2387static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2388 struct rps_dev_flow **rflowp)
2389{
2390 struct netdev_rx_queue *rxqueue;
6febfca9 2391 struct rps_map *map = NULL;
bfb564e7
KK
2392 struct rps_dev_flow_table *flow_table;
2393 struct rps_sock_flow_table *sock_flow_table;
2394 int cpu = -1;
2395 u16 tcpu;
2396
2397 if (skb_rx_queue_recorded(skb)) {
2398 u16 index = skb_get_rx_queue(skb);
62fe0b40
BH
2399 if (unlikely(index >= dev->real_num_rx_queues)) {
2400 WARN_ONCE(dev->real_num_rx_queues > 1,
2401 "%s received packet on queue %u, but number "
2402 "of RX queues is %u\n",
2403 dev->name, index, dev->real_num_rx_queues);
bfb564e7
KK
2404 goto done;
2405 }
2406 rxqueue = dev->_rx + index;
2407 } else
2408 rxqueue = dev->_rx;
2409
6febfca9
CG
2410 if (rxqueue->rps_map) {
2411 map = rcu_dereference(rxqueue->rps_map);
2412 if (map && map->len == 1) {
2413 tcpu = map->cpus[0];
2414 if (cpu_online(tcpu))
2415 cpu = tcpu;
2416 goto done;
2417 }
2418 } else if (!rxqueue->rps_flow_table) {
bfb564e7 2419 goto done;
6febfca9 2420 }
bfb564e7 2421
2d47b459 2422 skb_reset_network_header(skb);
bfb564e7
KK
2423 if (!skb_get_rxhash(skb))
2424 goto done;
2425
fec5e652
TH
2426 flow_table = rcu_dereference(rxqueue->rps_flow_table);
2427 sock_flow_table = rcu_dereference(rps_sock_flow_table);
2428 if (flow_table && sock_flow_table) {
2429 u16 next_cpu;
2430 struct rps_dev_flow *rflow;
2431
2432 rflow = &flow_table->flows[skb->rxhash & flow_table->mask];
2433 tcpu = rflow->cpu;
2434
2435 next_cpu = sock_flow_table->ents[skb->rxhash &
2436 sock_flow_table->mask];
2437
2438 /*
2439 * If the desired CPU (where last recvmsg was done) is
2440 * different from current CPU (one in the rx-queue flow
2441 * table entry), switch if one of the following holds:
2442 * - Current CPU is unset (equal to RPS_NO_CPU).
2443 * - Current CPU is offline.
2444 * - The current CPU's queue tail has advanced beyond the
2445 * last packet that was enqueued using this table entry.
2446 * This guarantees that all previous packets for the flow
2447 * have been dequeued, thus preserving in order delivery.
2448 */
2449 if (unlikely(tcpu != next_cpu) &&
2450 (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
2451 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
2452 rflow->last_qtail)) >= 0)) {
2453 tcpu = rflow->cpu = next_cpu;
2454 if (tcpu != RPS_NO_CPU)
2455 rflow->last_qtail = per_cpu(softnet_data,
2456 tcpu).input_queue_head;
2457 }
2458 if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
2459 *rflowp = rflow;
2460 cpu = tcpu;
2461 goto done;
2462 }
2463 }
2464
0a9627f2 2465 if (map) {
fec5e652 2466 tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32];
0a9627f2
TH
2467
2468 if (cpu_online(tcpu)) {
2469 cpu = tcpu;
2470 goto done;
2471 }
2472 }
2473
2474done:
0a9627f2
TH
2475 return cpu;
2476}
2477
0a9627f2 2478/* Called from hardirq (IPI) context */
e36fa2f7 2479static void rps_trigger_softirq(void *data)
0a9627f2 2480{
e36fa2f7
ED
2481 struct softnet_data *sd = data;
2482
eecfd7c4 2483 ____napi_schedule(sd, &sd->backlog);
dee42870 2484 sd->received_rps++;
0a9627f2 2485}
e36fa2f7 2486
fec5e652 2487#endif /* CONFIG_RPS */
0a9627f2 2488
e36fa2f7
ED
2489/*
2490 * Check if this softnet_data structure is another cpu one
2491 * If yes, queue it to our IPI list and return 1
2492 * If no, return 0
2493 */
2494static int rps_ipi_queued(struct softnet_data *sd)
2495{
2496#ifdef CONFIG_RPS
2497 struct softnet_data *mysd = &__get_cpu_var(softnet_data);
2498
2499 if (sd != mysd) {
2500 sd->rps_ipi_next = mysd->rps_ipi_list;
2501 mysd->rps_ipi_list = sd;
2502
2503 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2504 return 1;
2505 }
2506#endif /* CONFIG_RPS */
2507 return 0;
2508}
2509
0a9627f2
TH
2510/*
2511 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
2512 * queue (may be a remote CPU queue).
2513 */
fec5e652
TH
2514static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
2515 unsigned int *qtail)
0a9627f2 2516{
e36fa2f7 2517 struct softnet_data *sd;
0a9627f2
TH
2518 unsigned long flags;
2519
e36fa2f7 2520 sd = &per_cpu(softnet_data, cpu);
0a9627f2
TH
2521
2522 local_irq_save(flags);
0a9627f2 2523
e36fa2f7 2524 rps_lock(sd);
6e7676c1
CG
2525 if (skb_queue_len(&sd->input_pkt_queue) <= netdev_max_backlog) {
2526 if (skb_queue_len(&sd->input_pkt_queue)) {
0a9627f2 2527enqueue:
e36fa2f7 2528 __skb_queue_tail(&sd->input_pkt_queue, skb);
76cc8b13 2529 input_queue_tail_incr_save(sd, qtail);
e36fa2f7 2530 rps_unlock(sd);
152102c7 2531 local_irq_restore(flags);
0a9627f2
TH
2532 return NET_RX_SUCCESS;
2533 }
2534
ebda37c2
ED
2535 /* Schedule NAPI for backlog device
2536 * We can use non atomic operation since we own the queue lock
2537 */
2538 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
e36fa2f7 2539 if (!rps_ipi_queued(sd))
eecfd7c4 2540 ____napi_schedule(sd, &sd->backlog);
0a9627f2
TH
2541 }
2542 goto enqueue;
2543 }
2544
dee42870 2545 sd->dropped++;
e36fa2f7 2546 rps_unlock(sd);
0a9627f2 2547
0a9627f2
TH
2548 local_irq_restore(flags);
2549
caf586e5 2550 atomic_long_inc(&skb->dev->rx_dropped);
0a9627f2
TH
2551 kfree_skb(skb);
2552 return NET_RX_DROP;
2553}
1da177e4 2554
1da177e4
LT
2555/**
2556 * netif_rx - post buffer to the network code
2557 * @skb: buffer to post
2558 *
2559 * This function receives a packet from a device driver and queues it for
2560 * the upper (protocol) levels to process. It always succeeds. The buffer
2561 * may be dropped during processing for congestion control or by the
2562 * protocol layers.
2563 *
2564 * return values:
2565 * NET_RX_SUCCESS (no congestion)
1da177e4
LT
2566 * NET_RX_DROP (packet was dropped)
2567 *
2568 */
2569
2570int netif_rx(struct sk_buff *skb)
2571{
b0e28f1e 2572 int ret;
1da177e4
LT
2573
2574 /* if netpoll wants it, pretend we never saw it */
2575 if (netpoll_rx(skb))
2576 return NET_RX_DROP;
2577
3b098e2d
ED
2578 if (netdev_tstamp_prequeue)
2579 net_timestamp_check(skb);
1da177e4 2580
df334545 2581#ifdef CONFIG_RPS
b0e28f1e 2582 {
fec5e652 2583 struct rps_dev_flow voidflow, *rflow = &voidflow;
b0e28f1e
ED
2584 int cpu;
2585
cece1945 2586 preempt_disable();
b0e28f1e 2587 rcu_read_lock();
fec5e652
TH
2588
2589 cpu = get_rps_cpu(skb->dev, skb, &rflow);
b0e28f1e
ED
2590 if (cpu < 0)
2591 cpu = smp_processor_id();
fec5e652
TH
2592
2593 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
2594
b0e28f1e 2595 rcu_read_unlock();
cece1945 2596 preempt_enable();
b0e28f1e 2597 }
1e94d72f 2598#else
fec5e652
TH
2599 {
2600 unsigned int qtail;
2601 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
2602 put_cpu();
2603 }
1e94d72f 2604#endif
b0e28f1e 2605 return ret;
1da177e4 2606}
d1b19dff 2607EXPORT_SYMBOL(netif_rx);
1da177e4
LT
2608
2609int netif_rx_ni(struct sk_buff *skb)
2610{
2611 int err;
2612
2613 preempt_disable();
2614 err = netif_rx(skb);
2615 if (local_softirq_pending())
2616 do_softirq();
2617 preempt_enable();
2618
2619 return err;
2620}
1da177e4
LT
2621EXPORT_SYMBOL(netif_rx_ni);
2622
1da177e4
LT
2623static void net_tx_action(struct softirq_action *h)
2624{
2625 struct softnet_data *sd = &__get_cpu_var(softnet_data);
2626
2627 if (sd->completion_queue) {
2628 struct sk_buff *clist;
2629
2630 local_irq_disable();
2631 clist = sd->completion_queue;
2632 sd->completion_queue = NULL;
2633 local_irq_enable();
2634
2635 while (clist) {
2636 struct sk_buff *skb = clist;
2637 clist = clist->next;
2638
547b792c 2639 WARN_ON(atomic_read(&skb->users));
1da177e4
LT
2640 __kfree_skb(skb);
2641 }
2642 }
2643
2644 if (sd->output_queue) {
37437bb2 2645 struct Qdisc *head;
1da177e4
LT
2646
2647 local_irq_disable();
2648 head = sd->output_queue;
2649 sd->output_queue = NULL;
a9cbd588 2650 sd->output_queue_tailp = &sd->output_queue;
1da177e4
LT
2651 local_irq_enable();
2652
2653 while (head) {
37437bb2
DM
2654 struct Qdisc *q = head;
2655 spinlock_t *root_lock;
2656
1da177e4
LT
2657 head = head->next_sched;
2658
5fb66229 2659 root_lock = qdisc_lock(q);
37437bb2 2660 if (spin_trylock(root_lock)) {
def82a1d
JP
2661 smp_mb__before_clear_bit();
2662 clear_bit(__QDISC_STATE_SCHED,
2663 &q->state);
37437bb2
DM
2664 qdisc_run(q);
2665 spin_unlock(root_lock);
1da177e4 2666 } else {
195648bb 2667 if (!test_bit(__QDISC_STATE_DEACTIVATED,
e8a83e10 2668 &q->state)) {
195648bb 2669 __netif_reschedule(q);
e8a83e10
JP
2670 } else {
2671 smp_mb__before_clear_bit();
2672 clear_bit(__QDISC_STATE_SCHED,
2673 &q->state);
2674 }
1da177e4
LT
2675 }
2676 }
2677 }
2678}
2679
6f05f629
SH
2680static inline int deliver_skb(struct sk_buff *skb,
2681 struct packet_type *pt_prev,
2682 struct net_device *orig_dev)
1da177e4
LT
2683{
2684 atomic_inc(&skb->users);
f2ccd8fa 2685 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1da177e4
LT
2686}
2687
ab95bfe0
JP
2688#if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
2689 (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
da678292
MM
2690/* This hook is defined here for ATM LANE */
2691int (*br_fdb_test_addr_hook)(struct net_device *dev,
2692 unsigned char *addr) __read_mostly;
4fb019a0 2693EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
da678292 2694#endif
1da177e4 2695
1da177e4
LT
2696#ifdef CONFIG_NET_CLS_ACT
2697/* TODO: Maybe we should just force sch_ingress to be compiled in
2698 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
2699 * a compare and 2 stores extra right now if we dont have it on
2700 * but have CONFIG_NET_CLS_ACT
4ec93edb 2701 * NOTE: This doesnt stop any functionality; if you dont have
1da177e4
LT
2702 * the ingress scheduler, you just cant add policies on ingress.
2703 *
2704 */
24824a09 2705static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
1da177e4 2706{
1da177e4 2707 struct net_device *dev = skb->dev;
f697c3e8 2708 u32 ttl = G_TC_RTTL(skb->tc_verd);
555353cf
DM
2709 int result = TC_ACT_OK;
2710 struct Qdisc *q;
4ec93edb 2711
de384830
SH
2712 if (unlikely(MAX_RED_LOOP < ttl++)) {
2713 if (net_ratelimit())
2714 pr_warning( "Redir loop detected Dropping packet (%d->%d)\n",
2715 skb->skb_iif, dev->ifindex);
f697c3e8
HX
2716 return TC_ACT_SHOT;
2717 }
1da177e4 2718
f697c3e8
HX
2719 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
2720 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
1da177e4 2721
83874000 2722 q = rxq->qdisc;
8d50b53d 2723 if (q != &noop_qdisc) {
83874000 2724 spin_lock(qdisc_lock(q));
a9312ae8
DM
2725 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
2726 result = qdisc_enqueue_root(skb, q);
83874000
DM
2727 spin_unlock(qdisc_lock(q));
2728 }
f697c3e8
HX
2729
2730 return result;
2731}
86e65da9 2732
f697c3e8
HX
2733static inline struct sk_buff *handle_ing(struct sk_buff *skb,
2734 struct packet_type **pt_prev,
2735 int *ret, struct net_device *orig_dev)
2736{
24824a09
ED
2737 struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue);
2738
2739 if (!rxq || rxq->qdisc == &noop_qdisc)
f697c3e8 2740 goto out;
1da177e4 2741
f697c3e8
HX
2742 if (*pt_prev) {
2743 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2744 *pt_prev = NULL;
1da177e4
LT
2745 }
2746
24824a09 2747 switch (ing_filter(skb, rxq)) {
f697c3e8
HX
2748 case TC_ACT_SHOT:
2749 case TC_ACT_STOLEN:
2750 kfree_skb(skb);
2751 return NULL;
2752 }
2753
2754out:
2755 skb->tc_verd = 0;
2756 return skb;
1da177e4
LT
2757}
2758#endif
2759
bc1d0411
PM
2760/*
2761 * netif_nit_deliver - deliver received packets to network taps
2762 * @skb: buffer
2763 *
2764 * This function is used to deliver incoming packets to network
2765 * taps. It should be used when the normal netif_receive_skb path
2766 * is bypassed, for example because of VLAN acceleration.
2767 */
2768void netif_nit_deliver(struct sk_buff *skb)
2769{
2770 struct packet_type *ptype;
2771
2772 if (list_empty(&ptype_all))
2773 return;
2774
2775 skb_reset_network_header(skb);
2776 skb_reset_transport_header(skb);
2777 skb->mac_len = skb->network_header - skb->mac_header;
2778
2779 rcu_read_lock();
2780 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2781 if (!ptype->dev || ptype->dev == skb->dev)
2782 deliver_skb(skb, ptype, skb->dev);
2783 }
2784 rcu_read_unlock();
2785}
2786
ab95bfe0
JP
2787/**
2788 * netdev_rx_handler_register - register receive handler
2789 * @dev: device to register a handler for
2790 * @rx_handler: receive handler to register
93e2c32b 2791 * @rx_handler_data: data pointer that is used by rx handler
ab95bfe0
JP
2792 *
2793 * Register a receive hander for a device. This handler will then be
2794 * called from __netif_receive_skb. A negative errno code is returned
2795 * on a failure.
2796 *
2797 * The caller must hold the rtnl_mutex.
2798 */
2799int netdev_rx_handler_register(struct net_device *dev,
93e2c32b
JP
2800 rx_handler_func_t *rx_handler,
2801 void *rx_handler_data)
ab95bfe0
JP
2802{
2803 ASSERT_RTNL();
2804
2805 if (dev->rx_handler)
2806 return -EBUSY;
2807
93e2c32b 2808 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
ab95bfe0
JP
2809 rcu_assign_pointer(dev->rx_handler, rx_handler);
2810
2811 return 0;
2812}
2813EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
2814
2815/**
2816 * netdev_rx_handler_unregister - unregister receive handler
2817 * @dev: device to unregister a handler from
2818 *
2819 * Unregister a receive hander from a device.
2820 *
2821 * The caller must hold the rtnl_mutex.
2822 */
2823void netdev_rx_handler_unregister(struct net_device *dev)
2824{
2825
2826 ASSERT_RTNL();
2827 rcu_assign_pointer(dev->rx_handler, NULL);
93e2c32b 2828 rcu_assign_pointer(dev->rx_handler_data, NULL);
ab95bfe0
JP
2829}
2830EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
2831
acbbc071
ED
2832static inline void skb_bond_set_mac_by_master(struct sk_buff *skb,
2833 struct net_device *master)
2834{
2835 if (skb->pkt_type == PACKET_HOST) {
2836 u16 *dest = (u16 *) eth_hdr(skb)->h_dest;
2837
2838 memcpy(dest, master->dev_addr, ETH_ALEN);
2839 }
2840}
2841
2842/* On bonding slaves other than the currently active slave, suppress
2843 * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and
2844 * ARP on active-backup slaves with arp_validate enabled.
2845 */
2846int __skb_bond_should_drop(struct sk_buff *skb, struct net_device *master)
2847{
2848 struct net_device *dev = skb->dev;
2849
2850 if (master->priv_flags & IFF_MASTER_ARPMON)
2851 dev->last_rx = jiffies;
2852
f350a0a8
JP
2853 if ((master->priv_flags & IFF_MASTER_ALB) &&
2854 (master->priv_flags & IFF_BRIDGE_PORT)) {
acbbc071
ED
2855 /* Do address unmangle. The local destination address
2856 * will be always the one master has. Provides the right
2857 * functionality in a bridge.
2858 */
2859 skb_bond_set_mac_by_master(skb, master);
2860 }
2861
2862 if (dev->priv_flags & IFF_SLAVE_INACTIVE) {
2863 if ((dev->priv_flags & IFF_SLAVE_NEEDARP) &&
2864 skb->protocol == __cpu_to_be16(ETH_P_ARP))
2865 return 0;
2866
2867 if (master->priv_flags & IFF_MASTER_ALB) {
2868 if (skb->pkt_type != PACKET_BROADCAST &&
2869 skb->pkt_type != PACKET_MULTICAST)
2870 return 0;
2871 }
2872 if (master->priv_flags & IFF_MASTER_8023AD &&
2873 skb->protocol == __cpu_to_be16(ETH_P_SLOW))
2874 return 0;
2875
2876 return 1;
2877 }
2878 return 0;
2879}
2880EXPORT_SYMBOL(__skb_bond_should_drop);
2881
10f744d2 2882static int __netif_receive_skb(struct sk_buff *skb)
1da177e4
LT
2883{
2884 struct packet_type *ptype, *pt_prev;
ab95bfe0 2885 rx_handler_func_t *rx_handler;
f2ccd8fa 2886 struct net_device *orig_dev;
0641e4fb 2887 struct net_device *master;
0d7a3681 2888 struct net_device *null_or_orig;
2df4a0fa 2889 struct net_device *orig_or_bond;
1da177e4 2890 int ret = NET_RX_DROP;
252e3346 2891 __be16 type;
1da177e4 2892
3b098e2d
ED
2893 if (!netdev_tstamp_prequeue)
2894 net_timestamp_check(skb);
81bbb3d4 2895
05532121
CG
2896 if (vlan_tx_tag_present(skb))
2897 vlan_hwaccel_do_receive(skb);
9b22ea56 2898
1da177e4 2899 /* if we've gotten here through NAPI, check netpoll */
bea3348e 2900 if (netpoll_receive_skb(skb))
1da177e4
LT
2901 return NET_RX_DROP;
2902
8964be4a
ED
2903 if (!skb->skb_iif)
2904 skb->skb_iif = skb->dev->ifindex;
86e65da9 2905
597a264b
JF
2906 /*
2907 * bonding note: skbs received on inactive slaves should only
2908 * be delivered to pkt handlers that are exact matches. Also
2909 * the deliver_no_wcard flag will be set. If packet handlers
2910 * are sensitive to duplicate packets these skbs will need to
2911 * be dropped at the handler. The vlan accel path may have
2912 * already set the deliver_no_wcard flag.
2913 */
0d7a3681 2914 null_or_orig = NULL;
cc9bd5ce 2915 orig_dev = skb->dev;
0641e4fb 2916 master = ACCESS_ONCE(orig_dev->master);
597a264b
JF
2917 if (skb->deliver_no_wcard)
2918 null_or_orig = orig_dev;
2919 else if (master) {
2920 if (skb_bond_should_drop(skb, master)) {
2921 skb->deliver_no_wcard = 1;
0d7a3681 2922 null_or_orig = orig_dev; /* deliver only exact match */
597a264b 2923 } else
0641e4fb 2924 skb->dev = master;
cc9bd5ce 2925 }
8f903c70 2926
27f39c73 2927 __this_cpu_inc(softnet_data.processed);
c1d2bbe1 2928 skb_reset_network_header(skb);
badff6d0 2929 skb_reset_transport_header(skb);
b0e380b1 2930 skb->mac_len = skb->network_header - skb->mac_header;
1da177e4
LT
2931
2932 pt_prev = NULL;
2933
2934 rcu_read_lock();
2935
2936#ifdef CONFIG_NET_CLS_ACT
2937 if (skb->tc_verd & TC_NCLS) {
2938 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
2939 goto ncls;
2940 }
2941#endif
2942
2943 list_for_each_entry_rcu(ptype, &ptype_all, list) {
f982307f
JE
2944 if (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2945 ptype->dev == orig_dev) {
4ec93edb 2946 if (pt_prev)
f2ccd8fa 2947 ret = deliver_skb(skb, pt_prev, orig_dev);
1da177e4
LT
2948 pt_prev = ptype;
2949 }
2950 }
2951
2952#ifdef CONFIG_NET_CLS_ACT
f697c3e8
HX
2953 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
2954 if (!skb)
1da177e4 2955 goto out;
1da177e4
LT
2956ncls:
2957#endif
2958
ab95bfe0
JP
2959 /* Handle special case of bridge or macvlan */
2960 rx_handler = rcu_dereference(skb->dev->rx_handler);
2961 if (rx_handler) {
2962 if (pt_prev) {
2963 ret = deliver_skb(skb, pt_prev, orig_dev);
2964 pt_prev = NULL;
2965 }
2966 skb = rx_handler(skb);
2967 if (!skb)
2968 goto out;
2969 }
1da177e4 2970
1f3c8804
AG
2971 /*
2972 * Make sure frames received on VLAN interfaces stacked on
2973 * bonding interfaces still make their way to any base bonding
2974 * device that may have registered for a specific ptype. The
2975 * handler may have to adjust skb->dev and orig_dev.
1f3c8804 2976 */
2df4a0fa 2977 orig_or_bond = orig_dev;
1f3c8804
AG
2978 if ((skb->dev->priv_flags & IFF_802_1Q_VLAN) &&
2979 (vlan_dev_real_dev(skb->dev)->priv_flags & IFF_BONDING)) {
2df4a0fa 2980 orig_or_bond = vlan_dev_real_dev(skb->dev);
1f3c8804
AG
2981 }
2982
1da177e4 2983 type = skb->protocol;
82d8a867
PE
2984 list_for_each_entry_rcu(ptype,
2985 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
1f3c8804 2986 if (ptype->type == type && (ptype->dev == null_or_orig ||
ca8d9ea3 2987 ptype->dev == skb->dev || ptype->dev == orig_dev ||
2df4a0fa 2988 ptype->dev == orig_or_bond)) {
4ec93edb 2989 if (pt_prev)
f2ccd8fa 2990 ret = deliver_skb(skb, pt_prev, orig_dev);
1da177e4
LT
2991 pt_prev = ptype;
2992 }
2993 }
2994
2995 if (pt_prev) {
f2ccd8fa 2996 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1da177e4 2997 } else {
caf586e5 2998 atomic_long_inc(&skb->dev->rx_dropped);
1da177e4
LT
2999 kfree_skb(skb);
3000 /* Jamal, now you will not able to escape explaining
3001 * me how you were going to use this. :-)
3002 */
3003 ret = NET_RX_DROP;
3004 }
3005
3006out:
3007 rcu_read_unlock();
3008 return ret;
3009}
0a9627f2
TH
3010
3011/**
3012 * netif_receive_skb - process receive buffer from network
3013 * @skb: buffer to process
3014 *
3015 * netif_receive_skb() is the main receive data processing function.
3016 * It always succeeds. The buffer may be dropped during processing
3017 * for congestion control or by the protocol layers.
3018 *
3019 * This function may only be called from softirq context and interrupts
3020 * should be enabled.
3021 *
3022 * Return values (usually ignored):
3023 * NET_RX_SUCCESS: no congestion
3024 * NET_RX_DROP: packet was dropped
3025 */
3026int netif_receive_skb(struct sk_buff *skb)
3027{
3b098e2d
ED
3028 if (netdev_tstamp_prequeue)
3029 net_timestamp_check(skb);
3030
c1f19b51
RC
3031 if (skb_defer_rx_timestamp(skb))
3032 return NET_RX_SUCCESS;
3033
df334545 3034#ifdef CONFIG_RPS
3b098e2d
ED
3035 {
3036 struct rps_dev_flow voidflow, *rflow = &voidflow;
3037 int cpu, ret;
fec5e652 3038
3b098e2d
ED
3039 rcu_read_lock();
3040
3041 cpu = get_rps_cpu(skb->dev, skb, &rflow);
0a9627f2 3042
3b098e2d
ED
3043 if (cpu >= 0) {
3044 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3045 rcu_read_unlock();
3046 } else {
3047 rcu_read_unlock();
3048 ret = __netif_receive_skb(skb);
3049 }
0a9627f2 3050
3b098e2d 3051 return ret;
fec5e652 3052 }
1e94d72f
TH
3053#else
3054 return __netif_receive_skb(skb);
3055#endif
0a9627f2 3056}
d1b19dff 3057EXPORT_SYMBOL(netif_receive_skb);
1da177e4 3058
88751275
ED
3059/* Network device is going away, flush any packets still pending
3060 * Called with irqs disabled.
3061 */
152102c7 3062static void flush_backlog(void *arg)
6e583ce5 3063{
152102c7 3064 struct net_device *dev = arg;
e36fa2f7 3065 struct softnet_data *sd = &__get_cpu_var(softnet_data);
6e583ce5
SH
3066 struct sk_buff *skb, *tmp;
3067
e36fa2f7 3068 rps_lock(sd);
6e7676c1 3069 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
6e583ce5 3070 if (skb->dev == dev) {
e36fa2f7 3071 __skb_unlink(skb, &sd->input_pkt_queue);
6e583ce5 3072 kfree_skb(skb);
76cc8b13 3073 input_queue_head_incr(sd);
6e583ce5 3074 }
6e7676c1 3075 }
e36fa2f7 3076 rps_unlock(sd);
6e7676c1
CG
3077
3078 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
3079 if (skb->dev == dev) {
3080 __skb_unlink(skb, &sd->process_queue);
3081 kfree_skb(skb);
76cc8b13 3082 input_queue_head_incr(sd);
6e7676c1
CG
3083 }
3084 }
6e583ce5
SH
3085}
3086
d565b0a1
HX
3087static int napi_gro_complete(struct sk_buff *skb)
3088{
3089 struct packet_type *ptype;
3090 __be16 type = skb->protocol;
3091 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
3092 int err = -ENOENT;
3093
fc59f9a3
HX
3094 if (NAPI_GRO_CB(skb)->count == 1) {
3095 skb_shinfo(skb)->gso_size = 0;
d565b0a1 3096 goto out;
fc59f9a3 3097 }
d565b0a1
HX
3098
3099 rcu_read_lock();
3100 list_for_each_entry_rcu(ptype, head, list) {
3101 if (ptype->type != type || ptype->dev || !ptype->gro_complete)
3102 continue;
3103
3104 err = ptype->gro_complete(skb);
3105 break;
3106 }
3107 rcu_read_unlock();
3108
3109 if (err) {
3110 WARN_ON(&ptype->list == head);
3111 kfree_skb(skb);
3112 return NET_RX_SUCCESS;
3113 }
3114
3115out:
d565b0a1
HX
3116 return netif_receive_skb(skb);
3117}
3118
86cac58b 3119inline void napi_gro_flush(struct napi_struct *napi)
d565b0a1
HX
3120{
3121 struct sk_buff *skb, *next;
3122
3123 for (skb = napi->gro_list; skb; skb = next) {
3124 next = skb->next;
3125 skb->next = NULL;
3126 napi_gro_complete(skb);
3127 }
3128
4ae5544f 3129 napi->gro_count = 0;
d565b0a1
HX
3130 napi->gro_list = NULL;
3131}
86cac58b 3132EXPORT_SYMBOL(napi_gro_flush);
d565b0a1 3133
5b252f0c 3134enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
d565b0a1
HX
3135{
3136 struct sk_buff **pp = NULL;
3137 struct packet_type *ptype;
3138 __be16 type = skb->protocol;
3139 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
0da2afd5 3140 int same_flow;
d565b0a1 3141 int mac_len;
5b252f0c 3142 enum gro_result ret;
d565b0a1 3143
ce9e76c8 3144 if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb))
d565b0a1
HX
3145 goto normal;
3146
21dc3301 3147 if (skb_is_gso(skb) || skb_has_frag_list(skb))
f17f5c91
HX
3148 goto normal;
3149
d565b0a1
HX
3150 rcu_read_lock();
3151 list_for_each_entry_rcu(ptype, head, list) {
d565b0a1
HX
3152 if (ptype->type != type || ptype->dev || !ptype->gro_receive)
3153 continue;
3154
86911732 3155 skb_set_network_header(skb, skb_gro_offset(skb));
d565b0a1
HX
3156 mac_len = skb->network_header - skb->mac_header;
3157 skb->mac_len = mac_len;
3158 NAPI_GRO_CB(skb)->same_flow = 0;
3159 NAPI_GRO_CB(skb)->flush = 0;
5d38a079 3160 NAPI_GRO_CB(skb)->free = 0;
d565b0a1 3161
d565b0a1
HX
3162 pp = ptype->gro_receive(&napi->gro_list, skb);
3163 break;
3164 }
3165 rcu_read_unlock();
3166
3167 if (&ptype->list == head)
3168 goto normal;
3169
0da2afd5 3170 same_flow = NAPI_GRO_CB(skb)->same_flow;
5d0d9be8 3171 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
0da2afd5 3172
d565b0a1
HX
3173 if (pp) {
3174 struct sk_buff *nskb = *pp;
3175
3176 *pp = nskb->next;
3177 nskb->next = NULL;
3178 napi_gro_complete(nskb);
4ae5544f 3179 napi->gro_count--;
d565b0a1
HX
3180 }
3181
0da2afd5 3182 if (same_flow)
d565b0a1
HX
3183 goto ok;
3184
4ae5544f 3185 if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
d565b0a1 3186 goto normal;
d565b0a1 3187
4ae5544f 3188 napi->gro_count++;
d565b0a1 3189 NAPI_GRO_CB(skb)->count = 1;
86911732 3190 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
d565b0a1
HX
3191 skb->next = napi->gro_list;
3192 napi->gro_list = skb;
5d0d9be8 3193 ret = GRO_HELD;
d565b0a1 3194
ad0f9904 3195pull:
cb18978c
HX
3196 if (skb_headlen(skb) < skb_gro_offset(skb)) {
3197 int grow = skb_gro_offset(skb) - skb_headlen(skb);
3198
3199 BUG_ON(skb->end - skb->tail < grow);
3200
3201 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
3202
3203 skb->tail += grow;
3204 skb->data_len -= grow;
3205
3206 skb_shinfo(skb)->frags[0].page_offset += grow;
3207 skb_shinfo(skb)->frags[0].size -= grow;
3208
3209 if (unlikely(!skb_shinfo(skb)->frags[0].size)) {
3210 put_page(skb_shinfo(skb)->frags[0].page);
3211 memmove(skb_shinfo(skb)->frags,
3212 skb_shinfo(skb)->frags + 1,
e5093aec 3213 --skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
cb18978c 3214 }
ad0f9904
HX
3215 }
3216
d565b0a1 3217ok:
5d0d9be8 3218 return ret;
d565b0a1
HX
3219
3220normal:
ad0f9904
HX
3221 ret = GRO_NORMAL;
3222 goto pull;
5d38a079 3223}
96e93eab
HX
3224EXPORT_SYMBOL(dev_gro_receive);
3225
40d0802b 3226static inline gro_result_t
5b252f0c 3227__napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
96e93eab
HX
3228{
3229 struct sk_buff *p;
3230
3231 for (p = napi->gro_list; p; p = p->next) {
40d0802b
ED
3232 unsigned long diffs;
3233
3234 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
3235 diffs |= compare_ether_header(skb_mac_header(p),
f64f9e71 3236 skb_gro_mac_header(skb));
40d0802b 3237 NAPI_GRO_CB(p)->same_flow = !diffs;
96e93eab
HX
3238 NAPI_GRO_CB(p)->flush = 0;
3239 }
3240
3241 return dev_gro_receive(napi, skb);
3242}
5d38a079 3243
c7c4b3b6 3244gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
5d38a079 3245{
5d0d9be8
HX
3246 switch (ret) {
3247 case GRO_NORMAL:
c7c4b3b6
BH
3248 if (netif_receive_skb(skb))
3249 ret = GRO_DROP;
3250 break;
5d38a079 3251
5d0d9be8 3252 case GRO_DROP:
5d0d9be8 3253 case GRO_MERGED_FREE:
5d38a079
HX
3254 kfree_skb(skb);
3255 break;
5b252f0c
BH
3256
3257 case GRO_HELD:
3258 case GRO_MERGED:
3259 break;
5d38a079
HX
3260 }
3261
c7c4b3b6 3262 return ret;
5d0d9be8
HX
3263}
3264EXPORT_SYMBOL(napi_skb_finish);
3265
78a478d0
HX
3266void skb_gro_reset_offset(struct sk_buff *skb)
3267{
3268 NAPI_GRO_CB(skb)->data_offset = 0;
3269 NAPI_GRO_CB(skb)->frag0 = NULL;
7489594c 3270 NAPI_GRO_CB(skb)->frag0_len = 0;
78a478d0 3271
78d3fd0b 3272 if (skb->mac_header == skb->tail &&
7489594c 3273 !PageHighMem(skb_shinfo(skb)->frags[0].page)) {
78a478d0
HX
3274 NAPI_GRO_CB(skb)->frag0 =
3275 page_address(skb_shinfo(skb)->frags[0].page) +
3276 skb_shinfo(skb)->frags[0].page_offset;
7489594c
HX
3277 NAPI_GRO_CB(skb)->frag0_len = skb_shinfo(skb)->frags[0].size;
3278 }
78a478d0
HX
3279}
3280EXPORT_SYMBOL(skb_gro_reset_offset);
3281
c7c4b3b6 3282gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
5d0d9be8 3283{
86911732
HX
3284 skb_gro_reset_offset(skb);
3285
5d0d9be8 3286 return napi_skb_finish(__napi_gro_receive(napi, skb), skb);
d565b0a1
HX
3287}
3288EXPORT_SYMBOL(napi_gro_receive);
3289
96e93eab
HX
3290void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
3291{
96e93eab
HX
3292 __skb_pull(skb, skb_headlen(skb));
3293 skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
3294
3295 napi->skb = skb;
3296}
3297EXPORT_SYMBOL(napi_reuse_skb);
3298
76620aaf 3299struct sk_buff *napi_get_frags(struct napi_struct *napi)
5d38a079 3300{
5d38a079 3301 struct sk_buff *skb = napi->skb;
5d38a079
HX
3302
3303 if (!skb) {
89d71a66
ED
3304 skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
3305 if (skb)
3306 napi->skb = skb;
80595d59 3307 }
96e93eab
HX
3308 return skb;
3309}
76620aaf 3310EXPORT_SYMBOL(napi_get_frags);
96e93eab 3311
c7c4b3b6
BH
3312gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
3313 gro_result_t ret)
96e93eab 3314{
5d0d9be8
HX
3315 switch (ret) {
3316 case GRO_NORMAL:
86911732 3317 case GRO_HELD:
e76b69cc 3318 skb->protocol = eth_type_trans(skb, skb->dev);
86911732 3319
c7c4b3b6
BH
3320 if (ret == GRO_HELD)
3321 skb_gro_pull(skb, -ETH_HLEN);
3322 else if (netif_receive_skb(skb))
3323 ret = GRO_DROP;
86911732 3324 break;
5d38a079 3325
5d0d9be8 3326 case GRO_DROP:
5d0d9be8
HX
3327 case GRO_MERGED_FREE:
3328 napi_reuse_skb(napi, skb);
3329 break;
5b252f0c
BH
3330
3331 case GRO_MERGED:
3332 break;
5d0d9be8 3333 }
5d38a079 3334
c7c4b3b6 3335 return ret;
5d38a079 3336}
5d0d9be8
HX
3337EXPORT_SYMBOL(napi_frags_finish);
3338
76620aaf
HX
3339struct sk_buff *napi_frags_skb(struct napi_struct *napi)
3340{
3341 struct sk_buff *skb = napi->skb;
3342 struct ethhdr *eth;
a5b1cf28
HX
3343 unsigned int hlen;
3344 unsigned int off;
76620aaf
HX
3345
3346 napi->skb = NULL;
3347
3348 skb_reset_mac_header(skb);
3349 skb_gro_reset_offset(skb);
3350
a5b1cf28
HX
3351 off = skb_gro_offset(skb);
3352 hlen = off + sizeof(*eth);
3353 eth = skb_gro_header_fast(skb, off);
3354 if (skb_gro_header_hard(skb, hlen)) {
3355 eth = skb_gro_header_slow(skb, hlen, off);
3356 if (unlikely(!eth)) {
3357 napi_reuse_skb(napi, skb);
3358 skb = NULL;
3359 goto out;
3360 }
76620aaf
HX
3361 }
3362
3363 skb_gro_pull(skb, sizeof(*eth));
3364
3365 /*
3366 * This works because the only protocols we care about don't require
3367 * special handling. We'll fix it up properly at the end.
3368 */
3369 skb->protocol = eth->h_proto;
3370
3371out:
3372 return skb;
3373}
3374EXPORT_SYMBOL(napi_frags_skb);
3375
c7c4b3b6 3376gro_result_t napi_gro_frags(struct napi_struct *napi)
5d0d9be8 3377{
76620aaf 3378 struct sk_buff *skb = napi_frags_skb(napi);
5d0d9be8
HX
3379
3380 if (!skb)
c7c4b3b6 3381 return GRO_DROP;
5d0d9be8
HX
3382
3383 return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb));
3384}
5d38a079
HX
3385EXPORT_SYMBOL(napi_gro_frags);
3386
e326bed2
ED
3387/*
3388 * net_rps_action sends any pending IPI's for rps.
3389 * Note: called with local irq disabled, but exits with local irq enabled.
3390 */
3391static void net_rps_action_and_irq_enable(struct softnet_data *sd)
3392{
3393#ifdef CONFIG_RPS
3394 struct softnet_data *remsd = sd->rps_ipi_list;
3395
3396 if (remsd) {
3397 sd->rps_ipi_list = NULL;
3398
3399 local_irq_enable();
3400
3401 /* Send pending IPI's to kick RPS processing on remote cpus. */
3402 while (remsd) {
3403 struct softnet_data *next = remsd->rps_ipi_next;
3404
3405 if (cpu_online(remsd->cpu))
3406 __smp_call_function_single(remsd->cpu,
3407 &remsd->csd, 0);
3408 remsd = next;
3409 }
3410 } else
3411#endif
3412 local_irq_enable();
3413}
3414
bea3348e 3415static int process_backlog(struct napi_struct *napi, int quota)
1da177e4
LT
3416{
3417 int work = 0;
eecfd7c4 3418 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
1da177e4 3419
e326bed2
ED
3420#ifdef CONFIG_RPS
3421 /* Check if we have pending ipi, its better to send them now,
3422 * not waiting net_rx_action() end.
3423 */
3424 if (sd->rps_ipi_list) {
3425 local_irq_disable();
3426 net_rps_action_and_irq_enable(sd);
3427 }
3428#endif
bea3348e 3429 napi->weight = weight_p;
6e7676c1
CG
3430 local_irq_disable();
3431 while (work < quota) {
1da177e4 3432 struct sk_buff *skb;
6e7676c1
CG
3433 unsigned int qlen;
3434
3435 while ((skb = __skb_dequeue(&sd->process_queue))) {
3436 local_irq_enable();
3437 __netif_receive_skb(skb);
6e7676c1 3438 local_irq_disable();
76cc8b13
TH
3439 input_queue_head_incr(sd);
3440 if (++work >= quota) {
3441 local_irq_enable();
3442 return work;
3443 }
6e7676c1 3444 }
1da177e4 3445
e36fa2f7 3446 rps_lock(sd);
6e7676c1 3447 qlen = skb_queue_len(&sd->input_pkt_queue);
76cc8b13 3448 if (qlen)
6e7676c1
CG
3449 skb_queue_splice_tail_init(&sd->input_pkt_queue,
3450 &sd->process_queue);
76cc8b13 3451
6e7676c1 3452 if (qlen < quota - work) {
eecfd7c4
ED
3453 /*
3454 * Inline a custom version of __napi_complete().
3455 * only current cpu owns and manipulates this napi,
3456 * and NAPI_STATE_SCHED is the only possible flag set on backlog.
3457 * we can use a plain write instead of clear_bit(),
3458 * and we dont need an smp_mb() memory barrier.
3459 */
3460 list_del(&napi->poll_list);
3461 napi->state = 0;
3462
6e7676c1 3463 quota = work + qlen;
bea3348e 3464 }
e36fa2f7 3465 rps_unlock(sd);
6e7676c1
CG
3466 }
3467 local_irq_enable();
1da177e4 3468
bea3348e
SH
3469 return work;
3470}
1da177e4 3471
bea3348e
SH
3472/**
3473 * __napi_schedule - schedule for receive
c4ea43c5 3474 * @n: entry to schedule
bea3348e
SH
3475 *
3476 * The entry's receive function will be scheduled to run
3477 */
b5606c2d 3478void __napi_schedule(struct napi_struct *n)
bea3348e
SH
3479{
3480 unsigned long flags;
1da177e4 3481
bea3348e 3482 local_irq_save(flags);
eecfd7c4 3483 ____napi_schedule(&__get_cpu_var(softnet_data), n);
bea3348e 3484 local_irq_restore(flags);
1da177e4 3485}
bea3348e
SH
3486EXPORT_SYMBOL(__napi_schedule);
3487
d565b0a1
HX
3488void __napi_complete(struct napi_struct *n)
3489{
3490 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
3491 BUG_ON(n->gro_list);
3492
3493 list_del(&n->poll_list);
3494 smp_mb__before_clear_bit();
3495 clear_bit(NAPI_STATE_SCHED, &n->state);
3496}
3497EXPORT_SYMBOL(__napi_complete);
3498
3499void napi_complete(struct napi_struct *n)
3500{
3501 unsigned long flags;
3502
3503 /*
3504 * don't let napi dequeue from the cpu poll list
3505 * just in case its running on a different cpu
3506 */
3507 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
3508 return;
3509
3510 napi_gro_flush(n);
3511 local_irq_save(flags);
3512 __napi_complete(n);
3513 local_irq_restore(flags);
3514}
3515EXPORT_SYMBOL(napi_complete);
3516
3517void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
3518 int (*poll)(struct napi_struct *, int), int weight)
3519{
3520 INIT_LIST_HEAD(&napi->poll_list);
4ae5544f 3521 napi->gro_count = 0;
d565b0a1 3522 napi->gro_list = NULL;
5d38a079 3523 napi->skb = NULL;
d565b0a1
HX
3524 napi->poll = poll;
3525 napi->weight = weight;
3526 list_add(&napi->dev_list, &dev->napi_list);
d565b0a1 3527 napi->dev = dev;
5d38a079 3528#ifdef CONFIG_NETPOLL
d565b0a1
HX
3529 spin_lock_init(&napi->poll_lock);
3530 napi->poll_owner = -1;
3531#endif
3532 set_bit(NAPI_STATE_SCHED, &napi->state);
3533}
3534EXPORT_SYMBOL(netif_napi_add);
3535
3536void netif_napi_del(struct napi_struct *napi)
3537{
3538 struct sk_buff *skb, *next;
3539
d7b06636 3540 list_del_init(&napi->dev_list);
76620aaf 3541 napi_free_frags(napi);
d565b0a1
HX
3542
3543 for (skb = napi->gro_list; skb; skb = next) {
3544 next = skb->next;
3545 skb->next = NULL;
3546 kfree_skb(skb);
3547 }
3548
3549 napi->gro_list = NULL;
4ae5544f 3550 napi->gro_count = 0;
d565b0a1
HX
3551}
3552EXPORT_SYMBOL(netif_napi_del);
3553
1da177e4
LT
3554static void net_rx_action(struct softirq_action *h)
3555{
e326bed2 3556 struct softnet_data *sd = &__get_cpu_var(softnet_data);
24f8b238 3557 unsigned long time_limit = jiffies + 2;
51b0bded 3558 int budget = netdev_budget;
53fb95d3
MM
3559 void *have;
3560
1da177e4
LT
3561 local_irq_disable();
3562
e326bed2 3563 while (!list_empty(&sd->poll_list)) {
bea3348e
SH
3564 struct napi_struct *n;
3565 int work, weight;
1da177e4 3566
bea3348e 3567 /* If softirq window is exhuasted then punt.
24f8b238
SH
3568 * Allow this to run for 2 jiffies since which will allow
3569 * an average latency of 1.5/HZ.
bea3348e 3570 */
24f8b238 3571 if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
1da177e4
LT
3572 goto softnet_break;
3573
3574 local_irq_enable();
3575
bea3348e
SH
3576 /* Even though interrupts have been re-enabled, this
3577 * access is safe because interrupts can only add new
3578 * entries to the tail of this list, and only ->poll()
3579 * calls can remove this head entry from the list.
3580 */
e326bed2 3581 n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list);
1da177e4 3582
bea3348e
SH
3583 have = netpoll_poll_lock(n);
3584
3585 weight = n->weight;
3586
0a7606c1
DM
3587 /* This NAPI_STATE_SCHED test is for avoiding a race
3588 * with netpoll's poll_napi(). Only the entity which
3589 * obtains the lock and sees NAPI_STATE_SCHED set will
3590 * actually make the ->poll() call. Therefore we avoid
3591 * accidently calling ->poll() when NAPI is not scheduled.
3592 */
3593 work = 0;
4ea7e386 3594 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
0a7606c1 3595 work = n->poll(n, weight);
4ea7e386
NH
3596 trace_napi_poll(n);
3597 }
bea3348e
SH
3598
3599 WARN_ON_ONCE(work > weight);
3600
3601 budget -= work;
3602
3603 local_irq_disable();
3604
3605 /* Drivers must not modify the NAPI state if they
3606 * consume the entire weight. In such cases this code
3607 * still "owns" the NAPI instance and therefore can
3608 * move the instance around on the list at-will.
3609 */
fed17f30 3610 if (unlikely(work == weight)) {
ff780cd8
HX
3611 if (unlikely(napi_disable_pending(n))) {
3612 local_irq_enable();
3613 napi_complete(n);
3614 local_irq_disable();
3615 } else
e326bed2 3616 list_move_tail(&n->poll_list, &sd->poll_list);
fed17f30 3617 }
bea3348e
SH
3618
3619 netpoll_poll_unlock(have);
1da177e4
LT
3620 }
3621out:
e326bed2 3622 net_rps_action_and_irq_enable(sd);
0a9627f2 3623
db217334
CL
3624#ifdef CONFIG_NET_DMA
3625 /*
3626 * There may not be any more sk_buffs coming right now, so push
3627 * any pending DMA copies to hardware
3628 */
2ba05622 3629 dma_issue_pending_all();
db217334 3630#endif
bea3348e 3631
1da177e4
LT
3632 return;
3633
3634softnet_break:
dee42870 3635 sd->time_squeeze++;
1da177e4
LT
3636 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3637 goto out;
3638}
3639
d1b19dff 3640static gifconf_func_t *gifconf_list[NPROTO];
1da177e4
LT
3641
3642/**
3643 * register_gifconf - register a SIOCGIF handler
3644 * @family: Address family
3645 * @gifconf: Function handler
3646 *
3647 * Register protocol dependent address dumping routines. The handler
3648 * that is passed must not be freed or reused until it has been replaced
3649 * by another handler.
3650 */
d1b19dff 3651int register_gifconf(unsigned int family, gifconf_func_t *gifconf)
1da177e4
LT
3652{
3653 if (family >= NPROTO)
3654 return -EINVAL;
3655 gifconf_list[family] = gifconf;
3656 return 0;
3657}
d1b19dff 3658EXPORT_SYMBOL(register_gifconf);
1da177e4
LT
3659
3660
3661/*
3662 * Map an interface index to its name (SIOCGIFNAME)
3663 */
3664
3665/*
3666 * We need this ioctl for efficient implementation of the
3667 * if_indextoname() function required by the IPv6 API. Without
3668 * it, we would have to search all the interfaces to find a
3669 * match. --pb
3670 */
3671
881d966b 3672static int dev_ifname(struct net *net, struct ifreq __user *arg)
1da177e4
LT
3673{
3674 struct net_device *dev;
3675 struct ifreq ifr;
3676
3677 /*
3678 * Fetch the caller's info block.
3679 */
3680
3681 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
3682 return -EFAULT;
3683
fb699dfd
ED
3684 rcu_read_lock();
3685 dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex);
1da177e4 3686 if (!dev) {
fb699dfd 3687 rcu_read_unlock();
1da177e4
LT
3688 return -ENODEV;
3689 }
3690
3691 strcpy(ifr.ifr_name, dev->name);
fb699dfd 3692 rcu_read_unlock();
1da177e4
LT
3693
3694 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
3695 return -EFAULT;
3696 return 0;
3697}
3698
3699/*
3700 * Perform a SIOCGIFCONF call. This structure will change
3701 * size eventually, and there is nothing I can do about it.
3702 * Thus we will need a 'compatibility mode'.
3703 */
3704
881d966b 3705static int dev_ifconf(struct net *net, char __user *arg)
1da177e4
LT
3706{
3707 struct ifconf ifc;
3708 struct net_device *dev;
3709 char __user *pos;
3710 int len;
3711 int total;
3712 int i;
3713
3714 /*
3715 * Fetch the caller's info block.
3716 */
3717
3718 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
3719 return -EFAULT;
3720
3721 pos = ifc.ifc_buf;
3722 len = ifc.ifc_len;
3723
3724 /*
3725 * Loop over the interfaces, and write an info block for each.
3726 */
3727
3728 total = 0;
881d966b 3729 for_each_netdev(net, dev) {
1da177e4
LT
3730 for (i = 0; i < NPROTO; i++) {
3731 if (gifconf_list[i]) {
3732 int done;
3733 if (!pos)
3734 done = gifconf_list[i](dev, NULL, 0);
3735 else
3736 done = gifconf_list[i](dev, pos + total,
3737 len - total);
3738 if (done < 0)
3739 return -EFAULT;
3740 total += done;
3741 }
3742 }
4ec93edb 3743 }
1da177e4
LT
3744
3745 /*
3746 * All done. Write the updated control block back to the caller.
3747 */
3748 ifc.ifc_len = total;
3749
3750 /*
3751 * Both BSD and Solaris return 0 here, so we do too.
3752 */
3753 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
3754}
3755
3756#ifdef CONFIG_PROC_FS
3757/*
3758 * This is invoked by the /proc filesystem handler to display a device
3759 * in detail.
3760 */
7562f876 3761void *dev_seq_start(struct seq_file *seq, loff_t *pos)
c6d14c84 3762 __acquires(RCU)
1da177e4 3763{
e372c414 3764 struct net *net = seq_file_net(seq);
7562f876 3765 loff_t off;
1da177e4 3766 struct net_device *dev;
1da177e4 3767
c6d14c84 3768 rcu_read_lock();
7562f876
PE
3769 if (!*pos)
3770 return SEQ_START_TOKEN;
1da177e4 3771
7562f876 3772 off = 1;
c6d14c84 3773 for_each_netdev_rcu(net, dev)
7562f876
PE
3774 if (off++ == *pos)
3775 return dev;
1da177e4 3776
7562f876 3777 return NULL;
1da177e4
LT
3778}
3779
3780void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3781{
c6d14c84
ED
3782 struct net_device *dev = (v == SEQ_START_TOKEN) ?
3783 first_net_device(seq_file_net(seq)) :
3784 next_net_device((struct net_device *)v);
3785
1da177e4 3786 ++*pos;
c6d14c84 3787 return rcu_dereference(dev);
1da177e4
LT
3788}
3789
3790void dev_seq_stop(struct seq_file *seq, void *v)
c6d14c84 3791 __releases(RCU)
1da177e4 3792{
c6d14c84 3793 rcu_read_unlock();
1da177e4
LT
3794}
3795
3796static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
3797{
28172739
ED
3798 struct rtnl_link_stats64 temp;
3799 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
1da177e4 3800
be1f3c2c
BH
3801 seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
3802 "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
5a1b5898
RR
3803 dev->name, stats->rx_bytes, stats->rx_packets,
3804 stats->rx_errors,
3805 stats->rx_dropped + stats->rx_missed_errors,
3806 stats->rx_fifo_errors,
3807 stats->rx_length_errors + stats->rx_over_errors +
3808 stats->rx_crc_errors + stats->rx_frame_errors,
3809 stats->rx_compressed, stats->multicast,
3810 stats->tx_bytes, stats->tx_packets,
3811 stats->tx_errors, stats->tx_dropped,
3812 stats->tx_fifo_errors, stats->collisions,
3813 stats->tx_carrier_errors +
3814 stats->tx_aborted_errors +
3815 stats->tx_window_errors +
3816 stats->tx_heartbeat_errors,
3817 stats->tx_compressed);
1da177e4
LT
3818}
3819
3820/*
3821 * Called from the PROCfs module. This now uses the new arbitrary sized
3822 * /proc/net interface to create /proc/net/dev
3823 */
3824static int dev_seq_show(struct seq_file *seq, void *v)
3825{
3826 if (v == SEQ_START_TOKEN)
3827 seq_puts(seq, "Inter-| Receive "
3828 " | Transmit\n"
3829 " face |bytes packets errs drop fifo frame "
3830 "compressed multicast|bytes packets errs "
3831 "drop fifo colls carrier compressed\n");
3832 else
3833 dev_seq_printf_stats(seq, v);
3834 return 0;
3835}
3836
dee42870 3837static struct softnet_data *softnet_get_online(loff_t *pos)
1da177e4 3838{
dee42870 3839 struct softnet_data *sd = NULL;
1da177e4 3840
0c0b0aca 3841 while (*pos < nr_cpu_ids)
4ec93edb 3842 if (cpu_online(*pos)) {
dee42870 3843 sd = &per_cpu(softnet_data, *pos);
1da177e4
LT
3844 break;
3845 } else
3846 ++*pos;
dee42870 3847 return sd;
1da177e4
LT
3848}
3849
3850static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
3851{
3852 return softnet_get_online(pos);
3853}
3854
3855static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3856{
3857 ++*pos;
3858 return softnet_get_online(pos);
3859}
3860
3861static void softnet_seq_stop(struct seq_file *seq, void *v)
3862{
3863}
3864
3865static int softnet_seq_show(struct seq_file *seq, void *v)
3866{
dee42870 3867 struct softnet_data *sd = v;
1da177e4 3868
0a9627f2 3869 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
dee42870 3870 sd->processed, sd->dropped, sd->time_squeeze, 0,
c1ebcdb8 3871 0, 0, 0, 0, /* was fastroute */
dee42870 3872 sd->cpu_collision, sd->received_rps);
1da177e4
LT
3873 return 0;
3874}
3875
f690808e 3876static const struct seq_operations dev_seq_ops = {
1da177e4
LT
3877 .start = dev_seq_start,
3878 .next = dev_seq_next,
3879 .stop = dev_seq_stop,
3880 .show = dev_seq_show,
3881};
3882
3883static int dev_seq_open(struct inode *inode, struct file *file)
3884{
e372c414
DL
3885 return seq_open_net(inode, file, &dev_seq_ops,
3886 sizeof(struct seq_net_private));
1da177e4
LT
3887}
3888
9a32144e 3889static const struct file_operations dev_seq_fops = {
1da177e4
LT
3890 .owner = THIS_MODULE,
3891 .open = dev_seq_open,
3892 .read = seq_read,
3893 .llseek = seq_lseek,
e372c414 3894 .release = seq_release_net,
1da177e4
LT
3895};
3896
f690808e 3897static const struct seq_operations softnet_seq_ops = {
1da177e4
LT
3898 .start = softnet_seq_start,
3899 .next = softnet_seq_next,
3900 .stop = softnet_seq_stop,
3901 .show = softnet_seq_show,
3902};
3903
3904static int softnet_seq_open(struct inode *inode, struct file *file)
3905{
3906 return seq_open(file, &softnet_seq_ops);
3907}
3908
9a32144e 3909static const struct file_operations softnet_seq_fops = {
1da177e4
LT
3910 .owner = THIS_MODULE,
3911 .open = softnet_seq_open,
3912 .read = seq_read,
3913 .llseek = seq_lseek,
3914 .release = seq_release,
3915};
3916
0e1256ff
SH
3917static void *ptype_get_idx(loff_t pos)
3918{
3919 struct packet_type *pt = NULL;
3920 loff_t i = 0;
3921 int t;
3922
3923 list_for_each_entry_rcu(pt, &ptype_all, list) {
3924 if (i == pos)
3925 return pt;
3926 ++i;
3927 }
3928
82d8a867 3929 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
0e1256ff
SH
3930 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
3931 if (i == pos)
3932 return pt;
3933 ++i;
3934 }
3935 }
3936 return NULL;
3937}
3938
3939static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
72348a42 3940 __acquires(RCU)
0e1256ff
SH
3941{
3942 rcu_read_lock();
3943 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
3944}
3945
3946static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3947{
3948 struct packet_type *pt;
3949 struct list_head *nxt;
3950 int hash;
3951
3952 ++*pos;
3953 if (v == SEQ_START_TOKEN)
3954 return ptype_get_idx(0);
3955
3956 pt = v;
3957 nxt = pt->list.next;
3958 if (pt->type == htons(ETH_P_ALL)) {
3959 if (nxt != &ptype_all)
3960 goto found;
3961 hash = 0;
3962 nxt = ptype_base[0].next;
3963 } else
82d8a867 3964 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
0e1256ff
SH
3965
3966 while (nxt == &ptype_base[hash]) {
82d8a867 3967 if (++hash >= PTYPE_HASH_SIZE)
0e1256ff
SH
3968 return NULL;
3969 nxt = ptype_base[hash].next;
3970 }
3971found:
3972 return list_entry(nxt, struct packet_type, list);
3973}
3974
3975static void ptype_seq_stop(struct seq_file *seq, void *v)
72348a42 3976 __releases(RCU)
0e1256ff
SH
3977{
3978 rcu_read_unlock();
3979}
3980
0e1256ff
SH
3981static int ptype_seq_show(struct seq_file *seq, void *v)
3982{
3983 struct packet_type *pt = v;
3984
3985 if (v == SEQ_START_TOKEN)
3986 seq_puts(seq, "Type Device Function\n");
c346dca1 3987 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
0e1256ff
SH
3988 if (pt->type == htons(ETH_P_ALL))
3989 seq_puts(seq, "ALL ");
3990 else
3991 seq_printf(seq, "%04x", ntohs(pt->type));
3992
908cd2da
AD
3993 seq_printf(seq, " %-8s %pF\n",
3994 pt->dev ? pt->dev->name : "", pt->func);
0e1256ff
SH
3995 }
3996
3997 return 0;
3998}
3999
4000static const struct seq_operations ptype_seq_ops = {
4001 .start = ptype_seq_start,
4002 .next = ptype_seq_next,
4003 .stop = ptype_seq_stop,
4004 .show = ptype_seq_show,
4005};
4006
4007static int ptype_seq_open(struct inode *inode, struct file *file)
4008{
2feb27db
PE
4009 return seq_open_net(inode, file, &ptype_seq_ops,
4010 sizeof(struct seq_net_private));
0e1256ff
SH
4011}
4012
4013static const struct file_operations ptype_seq_fops = {
4014 .owner = THIS_MODULE,
4015 .open = ptype_seq_open,
4016 .read = seq_read,
4017 .llseek = seq_lseek,
2feb27db 4018 .release = seq_release_net,
0e1256ff
SH
4019};
4020
4021
4665079c 4022static int __net_init dev_proc_net_init(struct net *net)
1da177e4
LT
4023{
4024 int rc = -ENOMEM;
4025
881d966b 4026 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
1da177e4 4027 goto out;
881d966b 4028 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
1da177e4 4029 goto out_dev;
881d966b 4030 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
457c4cbc 4031 goto out_softnet;
0e1256ff 4032
881d966b 4033 if (wext_proc_init(net))
457c4cbc 4034 goto out_ptype;
1da177e4
LT
4035 rc = 0;
4036out:
4037 return rc;
457c4cbc 4038out_ptype:
881d966b 4039 proc_net_remove(net, "ptype");
1da177e4 4040out_softnet:
881d966b 4041 proc_net_remove(net, "softnet_stat");
1da177e4 4042out_dev:
881d966b 4043 proc_net_remove(net, "dev");
1da177e4
LT
4044 goto out;
4045}
881d966b 4046
4665079c 4047static void __net_exit dev_proc_net_exit(struct net *net)
881d966b
EB
4048{
4049 wext_proc_exit(net);
4050
4051 proc_net_remove(net, "ptype");
4052 proc_net_remove(net, "softnet_stat");
4053 proc_net_remove(net, "dev");
4054}
4055
022cbae6 4056static struct pernet_operations __net_initdata dev_proc_ops = {
881d966b
EB
4057 .init = dev_proc_net_init,
4058 .exit = dev_proc_net_exit,
4059};
4060
4061static int __init dev_proc_init(void)
4062{
4063 return register_pernet_subsys(&dev_proc_ops);
4064}
1da177e4
LT
4065#else
4066#define dev_proc_init() 0
4067#endif /* CONFIG_PROC_FS */
4068
4069
4070/**
4071 * netdev_set_master - set up master/slave pair
4072 * @slave: slave device
4073 * @master: new master device
4074 *
4075 * Changes the master device of the slave. Pass %NULL to break the
4076 * bonding. The caller must hold the RTNL semaphore. On a failure
4077 * a negative errno code is returned. On success the reference counts
4078 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
4079 * function returns zero.
4080 */
4081int netdev_set_master(struct net_device *slave, struct net_device *master)
4082{
4083 struct net_device *old = slave->master;
4084
4085 ASSERT_RTNL();
4086
4087 if (master) {
4088 if (old)
4089 return -EBUSY;
4090 dev_hold(master);
4091 }
4092
4093 slave->master = master;
4ec93edb 4094
283f2fe8
ED
4095 if (old) {
4096 synchronize_net();
1da177e4 4097 dev_put(old);
283f2fe8 4098 }
1da177e4
LT
4099 if (master)
4100 slave->flags |= IFF_SLAVE;
4101 else
4102 slave->flags &= ~IFF_SLAVE;
4103
4104 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
4105 return 0;
4106}
d1b19dff 4107EXPORT_SYMBOL(netdev_set_master);
1da177e4 4108
b6c40d68
PM
4109static void dev_change_rx_flags(struct net_device *dev, int flags)
4110{
d314774c
SH
4111 const struct net_device_ops *ops = dev->netdev_ops;
4112
4113 if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
4114 ops->ndo_change_rx_flags(dev, flags);
b6c40d68
PM
4115}
4116
dad9b335 4117static int __dev_set_promiscuity(struct net_device *dev, int inc)
1da177e4
LT
4118{
4119 unsigned short old_flags = dev->flags;
8192b0c4
DH
4120 uid_t uid;
4121 gid_t gid;
1da177e4 4122
24023451
PM
4123 ASSERT_RTNL();
4124
dad9b335
WC
4125 dev->flags |= IFF_PROMISC;
4126 dev->promiscuity += inc;
4127 if (dev->promiscuity == 0) {
4128 /*
4129 * Avoid overflow.
4130 * If inc causes overflow, untouch promisc and return error.
4131 */
4132 if (inc < 0)
4133 dev->flags &= ~IFF_PROMISC;
4134 else {
4135 dev->promiscuity -= inc;
4136 printk(KERN_WARNING "%s: promiscuity touches roof, "
4137 "set promiscuity failed, promiscuity feature "
4138 "of device might be broken.\n", dev->name);
4139 return -EOVERFLOW;
4140 }
4141 }
52609c0b 4142 if (dev->flags != old_flags) {
1da177e4
LT
4143 printk(KERN_INFO "device %s %s promiscuous mode\n",
4144 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
4ec93edb 4145 "left");
8192b0c4
DH
4146 if (audit_enabled) {
4147 current_uid_gid(&uid, &gid);
7759db82
KHK
4148 audit_log(current->audit_context, GFP_ATOMIC,
4149 AUDIT_ANOM_PROMISCUOUS,
4150 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
4151 dev->name, (dev->flags & IFF_PROMISC),
4152 (old_flags & IFF_PROMISC),
4153 audit_get_loginuid(current),
8192b0c4 4154 uid, gid,
7759db82 4155 audit_get_sessionid(current));
8192b0c4 4156 }
24023451 4157
b6c40d68 4158 dev_change_rx_flags(dev, IFF_PROMISC);
1da177e4 4159 }
dad9b335 4160 return 0;
1da177e4
LT
4161}
4162
4417da66
PM
4163/**
4164 * dev_set_promiscuity - update promiscuity count on a device
4165 * @dev: device
4166 * @inc: modifier
4167 *
4168 * Add or remove promiscuity from a device. While the count in the device
4169 * remains above zero the interface remains promiscuous. Once it hits zero
4170 * the device reverts back to normal filtering operation. A negative inc
4171 * value is used to drop promiscuity on the device.
dad9b335 4172 * Return 0 if successful or a negative errno code on error.
4417da66 4173 */
dad9b335 4174int dev_set_promiscuity(struct net_device *dev, int inc)
4417da66
PM
4175{
4176 unsigned short old_flags = dev->flags;
dad9b335 4177 int err;
4417da66 4178
dad9b335 4179 err = __dev_set_promiscuity(dev, inc);
4b5a698e 4180 if (err < 0)
dad9b335 4181 return err;
4417da66
PM
4182 if (dev->flags != old_flags)
4183 dev_set_rx_mode(dev);
dad9b335 4184 return err;
4417da66 4185}
d1b19dff 4186EXPORT_SYMBOL(dev_set_promiscuity);
4417da66 4187
1da177e4
LT
4188/**
4189 * dev_set_allmulti - update allmulti count on a device
4190 * @dev: device
4191 * @inc: modifier
4192 *
4193 * Add or remove reception of all multicast frames to a device. While the
4194 * count in the device remains above zero the interface remains listening
4195 * to all interfaces. Once it hits zero the device reverts back to normal
4196 * filtering operation. A negative @inc value is used to drop the counter
4197 * when releasing a resource needing all multicasts.
dad9b335 4198 * Return 0 if successful or a negative errno code on error.
1da177e4
LT
4199 */
4200
dad9b335 4201int dev_set_allmulti(struct net_device *dev, int inc)
1da177e4
LT
4202{
4203 unsigned short old_flags = dev->flags;
4204
24023451
PM
4205 ASSERT_RTNL();
4206
1da177e4 4207 dev->flags |= IFF_ALLMULTI;
dad9b335
WC
4208 dev->allmulti += inc;
4209 if (dev->allmulti == 0) {
4210 /*
4211 * Avoid overflow.
4212 * If inc causes overflow, untouch allmulti and return error.
4213 */
4214 if (inc < 0)
4215 dev->flags &= ~IFF_ALLMULTI;
4216 else {
4217 dev->allmulti -= inc;
4218 printk(KERN_WARNING "%s: allmulti touches roof, "
4219 "set allmulti failed, allmulti feature of "
4220 "device might be broken.\n", dev->name);
4221 return -EOVERFLOW;
4222 }
4223 }
24023451 4224 if (dev->flags ^ old_flags) {
b6c40d68 4225 dev_change_rx_flags(dev, IFF_ALLMULTI);
4417da66 4226 dev_set_rx_mode(dev);
24023451 4227 }
dad9b335 4228 return 0;
4417da66 4229}
d1b19dff 4230EXPORT_SYMBOL(dev_set_allmulti);
4417da66
PM
4231
4232/*
4233 * Upload unicast and multicast address lists to device and
4234 * configure RX filtering. When the device doesn't support unicast
53ccaae1 4235 * filtering it is put in promiscuous mode while unicast addresses
4417da66
PM
4236 * are present.
4237 */
4238void __dev_set_rx_mode(struct net_device *dev)
4239{
d314774c
SH
4240 const struct net_device_ops *ops = dev->netdev_ops;
4241
4417da66
PM
4242 /* dev_open will call this function so the list will stay sane. */
4243 if (!(dev->flags&IFF_UP))
4244 return;
4245
4246 if (!netif_device_present(dev))
40b77c94 4247 return;
4417da66 4248
d314774c
SH
4249 if (ops->ndo_set_rx_mode)
4250 ops->ndo_set_rx_mode(dev);
4417da66
PM
4251 else {
4252 /* Unicast addresses changes may only happen under the rtnl,
4253 * therefore calling __dev_set_promiscuity here is safe.
4254 */
32e7bfc4 4255 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
4417da66
PM
4256 __dev_set_promiscuity(dev, 1);
4257 dev->uc_promisc = 1;
32e7bfc4 4258 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
4417da66
PM
4259 __dev_set_promiscuity(dev, -1);
4260 dev->uc_promisc = 0;
4261 }
4262
d314774c
SH
4263 if (ops->ndo_set_multicast_list)
4264 ops->ndo_set_multicast_list(dev);
4417da66
PM
4265 }
4266}
4267
4268void dev_set_rx_mode(struct net_device *dev)
4269{
b9e40857 4270 netif_addr_lock_bh(dev);
4417da66 4271 __dev_set_rx_mode(dev);
b9e40857 4272 netif_addr_unlock_bh(dev);
1da177e4
LT
4273}
4274
f0db275a
SH
4275/**
4276 * dev_get_flags - get flags reported to userspace
4277 * @dev: device
4278 *
4279 * Get the combination of flag bits exported through APIs to userspace.
4280 */
1da177e4
LT
4281unsigned dev_get_flags(const struct net_device *dev)
4282{
4283 unsigned flags;
4284
4285 flags = (dev->flags & ~(IFF_PROMISC |
4286 IFF_ALLMULTI |
b00055aa
SR
4287 IFF_RUNNING |
4288 IFF_LOWER_UP |
4289 IFF_DORMANT)) |
1da177e4
LT
4290 (dev->gflags & (IFF_PROMISC |
4291 IFF_ALLMULTI));
4292
b00055aa
SR
4293 if (netif_running(dev)) {
4294 if (netif_oper_up(dev))
4295 flags |= IFF_RUNNING;
4296 if (netif_carrier_ok(dev))
4297 flags |= IFF_LOWER_UP;
4298 if (netif_dormant(dev))
4299 flags |= IFF_DORMANT;
4300 }
1da177e4
LT
4301
4302 return flags;
4303}
d1b19dff 4304EXPORT_SYMBOL(dev_get_flags);
1da177e4 4305
bd380811 4306int __dev_change_flags(struct net_device *dev, unsigned int flags)
1da177e4 4307{
1da177e4 4308 int old_flags = dev->flags;
bd380811 4309 int ret;
1da177e4 4310
24023451
PM
4311 ASSERT_RTNL();
4312
1da177e4
LT
4313 /*
4314 * Set the flags on our device.
4315 */
4316
4317 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
4318 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
4319 IFF_AUTOMEDIA)) |
4320 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
4321 IFF_ALLMULTI));
4322
4323 /*
4324 * Load in the correct multicast list now the flags have changed.
4325 */
4326
b6c40d68
PM
4327 if ((old_flags ^ flags) & IFF_MULTICAST)
4328 dev_change_rx_flags(dev, IFF_MULTICAST);
24023451 4329
4417da66 4330 dev_set_rx_mode(dev);
1da177e4
LT
4331
4332 /*
4333 * Have we downed the interface. We handle IFF_UP ourselves
4334 * according to user attempts to set it, rather than blindly
4335 * setting it.
4336 */
4337
4338 ret = 0;
4339 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
bd380811 4340 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
1da177e4
LT
4341
4342 if (!ret)
4417da66 4343 dev_set_rx_mode(dev);
1da177e4
LT
4344 }
4345
1da177e4 4346 if ((flags ^ dev->gflags) & IFF_PROMISC) {
d1b19dff
ED
4347 int inc = (flags & IFF_PROMISC) ? 1 : -1;
4348
1da177e4
LT
4349 dev->gflags ^= IFF_PROMISC;
4350 dev_set_promiscuity(dev, inc);
4351 }
4352
4353 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
4354 is important. Some (broken) drivers set IFF_PROMISC, when
4355 IFF_ALLMULTI is requested not asking us and not reporting.
4356 */
4357 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
d1b19dff
ED
4358 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
4359
1da177e4
LT
4360 dev->gflags ^= IFF_ALLMULTI;
4361 dev_set_allmulti(dev, inc);
4362 }
4363
bd380811
PM
4364 return ret;
4365}
4366
4367void __dev_notify_flags(struct net_device *dev, unsigned int old_flags)
4368{
4369 unsigned int changes = dev->flags ^ old_flags;
4370
4371 if (changes & IFF_UP) {
4372 if (dev->flags & IFF_UP)
4373 call_netdevice_notifiers(NETDEV_UP, dev);
4374 else
4375 call_netdevice_notifiers(NETDEV_DOWN, dev);
4376 }
4377
4378 if (dev->flags & IFF_UP &&
4379 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE)))
4380 call_netdevice_notifiers(NETDEV_CHANGE, dev);
4381}
4382
4383/**
4384 * dev_change_flags - change device settings
4385 * @dev: device
4386 * @flags: device state flags
4387 *
4388 * Change settings on device based state flags. The flags are
4389 * in the userspace exported format.
4390 */
4391int dev_change_flags(struct net_device *dev, unsigned flags)
4392{
4393 int ret, changes;
4394 int old_flags = dev->flags;
4395
4396 ret = __dev_change_flags(dev, flags);
4397 if (ret < 0)
4398 return ret;
4399
4400 changes = old_flags ^ dev->flags;
7c355f53
TG
4401 if (changes)
4402 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
1da177e4 4403
bd380811 4404 __dev_notify_flags(dev, old_flags);
1da177e4
LT
4405 return ret;
4406}
d1b19dff 4407EXPORT_SYMBOL(dev_change_flags);
1da177e4 4408
f0db275a
SH
4409/**
4410 * dev_set_mtu - Change maximum transfer unit
4411 * @dev: device
4412 * @new_mtu: new transfer unit
4413 *
4414 * Change the maximum transfer size of the network device.
4415 */
1da177e4
LT
4416int dev_set_mtu(struct net_device *dev, int new_mtu)
4417{
d314774c 4418 const struct net_device_ops *ops = dev->netdev_ops;
1da177e4
LT
4419 int err;
4420
4421 if (new_mtu == dev->mtu)
4422 return 0;
4423
4424 /* MTU must be positive. */
4425 if (new_mtu < 0)
4426 return -EINVAL;
4427
4428 if (!netif_device_present(dev))
4429 return -ENODEV;
4430
4431 err = 0;
d314774c
SH
4432 if (ops->ndo_change_mtu)
4433 err = ops->ndo_change_mtu(dev, new_mtu);
1da177e4
LT
4434 else
4435 dev->mtu = new_mtu;
d314774c 4436
1da177e4 4437 if (!err && dev->flags & IFF_UP)
056925ab 4438 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
1da177e4
LT
4439 return err;
4440}
d1b19dff 4441EXPORT_SYMBOL(dev_set_mtu);
1da177e4 4442
f0db275a
SH
4443/**
4444 * dev_set_mac_address - Change Media Access Control Address
4445 * @dev: device
4446 * @sa: new address
4447 *
4448 * Change the hardware (MAC) address of the device
4449 */
1da177e4
LT
4450int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
4451{
d314774c 4452 const struct net_device_ops *ops = dev->netdev_ops;
1da177e4
LT
4453 int err;
4454
d314774c 4455 if (!ops->ndo_set_mac_address)
1da177e4
LT
4456 return -EOPNOTSUPP;
4457 if (sa->sa_family != dev->type)
4458 return -EINVAL;
4459 if (!netif_device_present(dev))
4460 return -ENODEV;
d314774c 4461 err = ops->ndo_set_mac_address(dev, sa);
1da177e4 4462 if (!err)
056925ab 4463 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
1da177e4
LT
4464 return err;
4465}
d1b19dff 4466EXPORT_SYMBOL(dev_set_mac_address);
1da177e4
LT
4467
4468/*
3710becf 4469 * Perform the SIOCxIFxxx calls, inside rcu_read_lock()
1da177e4 4470 */
14e3e079 4471static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
1da177e4
LT
4472{
4473 int err;
3710becf 4474 struct net_device *dev = dev_get_by_name_rcu(net, ifr->ifr_name);
1da177e4
LT
4475
4476 if (!dev)
4477 return -ENODEV;
4478
4479 switch (cmd) {
d1b19dff
ED
4480 case SIOCGIFFLAGS: /* Get interface flags */
4481 ifr->ifr_flags = (short) dev_get_flags(dev);
4482 return 0;
1da177e4 4483
d1b19dff
ED
4484 case SIOCGIFMETRIC: /* Get the metric on the interface
4485 (currently unused) */
4486 ifr->ifr_metric = 0;
4487 return 0;
1da177e4 4488
d1b19dff
ED
4489 case SIOCGIFMTU: /* Get the MTU of a device */
4490 ifr->ifr_mtu = dev->mtu;
4491 return 0;
1da177e4 4492
d1b19dff
ED
4493 case SIOCGIFHWADDR:
4494 if (!dev->addr_len)
4495 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
4496 else
4497 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
4498 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4499 ifr->ifr_hwaddr.sa_family = dev->type;
4500 return 0;
1da177e4 4501
d1b19dff
ED
4502 case SIOCGIFSLAVE:
4503 err = -EINVAL;
4504 break;
14e3e079 4505
d1b19dff
ED
4506 case SIOCGIFMAP:
4507 ifr->ifr_map.mem_start = dev->mem_start;
4508 ifr->ifr_map.mem_end = dev->mem_end;
4509 ifr->ifr_map.base_addr = dev->base_addr;
4510 ifr->ifr_map.irq = dev->irq;
4511 ifr->ifr_map.dma = dev->dma;
4512 ifr->ifr_map.port = dev->if_port;
4513 return 0;
14e3e079 4514
d1b19dff
ED
4515 case SIOCGIFINDEX:
4516 ifr->ifr_ifindex = dev->ifindex;
4517 return 0;
14e3e079 4518
d1b19dff
ED
4519 case SIOCGIFTXQLEN:
4520 ifr->ifr_qlen = dev->tx_queue_len;
4521 return 0;
14e3e079 4522
d1b19dff
ED
4523 default:
4524 /* dev_ioctl() should ensure this case
4525 * is never reached
4526 */
4527 WARN_ON(1);
4528 err = -EINVAL;
4529 break;
14e3e079
JG
4530
4531 }
4532 return err;
4533}
4534
4535/*
4536 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
4537 */
4538static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
4539{
4540 int err;
4541 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
5f2f6da7 4542 const struct net_device_ops *ops;
14e3e079
JG
4543
4544 if (!dev)
4545 return -ENODEV;
4546
5f2f6da7
JP
4547 ops = dev->netdev_ops;
4548
14e3e079 4549 switch (cmd) {
d1b19dff
ED
4550 case SIOCSIFFLAGS: /* Set interface flags */
4551 return dev_change_flags(dev, ifr->ifr_flags);
14e3e079 4552
d1b19dff
ED
4553 case SIOCSIFMETRIC: /* Set the metric on the interface
4554 (currently unused) */
4555 return -EOPNOTSUPP;
14e3e079 4556
d1b19dff
ED
4557 case SIOCSIFMTU: /* Set the MTU of a device */
4558 return dev_set_mtu(dev, ifr->ifr_mtu);
1da177e4 4559
d1b19dff
ED
4560 case SIOCSIFHWADDR:
4561 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
1da177e4 4562
d1b19dff
ED
4563 case SIOCSIFHWBROADCAST:
4564 if (ifr->ifr_hwaddr.sa_family != dev->type)
4565 return -EINVAL;
4566 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
4567 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4568 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4569 return 0;
1da177e4 4570
d1b19dff
ED
4571 case SIOCSIFMAP:
4572 if (ops->ndo_set_config) {
1da177e4
LT
4573 if (!netif_device_present(dev))
4574 return -ENODEV;
d1b19dff
ED
4575 return ops->ndo_set_config(dev, &ifr->ifr_map);
4576 }
4577 return -EOPNOTSUPP;
1da177e4 4578
d1b19dff
ED
4579 case SIOCADDMULTI:
4580 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
4581 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4582 return -EINVAL;
4583 if (!netif_device_present(dev))
4584 return -ENODEV;
22bedad3 4585 return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data);
d1b19dff
ED
4586
4587 case SIOCDELMULTI:
4588 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
4589 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4590 return -EINVAL;
4591 if (!netif_device_present(dev))
4592 return -ENODEV;
22bedad3 4593 return dev_mc_del_global(dev, ifr->ifr_hwaddr.sa_data);
1da177e4 4594
d1b19dff
ED
4595 case SIOCSIFTXQLEN:
4596 if (ifr->ifr_qlen < 0)
4597 return -EINVAL;
4598 dev->tx_queue_len = ifr->ifr_qlen;
4599 return 0;
1da177e4 4600
d1b19dff
ED
4601 case SIOCSIFNAME:
4602 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
4603 return dev_change_name(dev, ifr->ifr_newname);
1da177e4 4604
d1b19dff
ED
4605 /*
4606 * Unknown or private ioctl
4607 */
4608 default:
4609 if ((cmd >= SIOCDEVPRIVATE &&
4610 cmd <= SIOCDEVPRIVATE + 15) ||
4611 cmd == SIOCBONDENSLAVE ||
4612 cmd == SIOCBONDRELEASE ||
4613 cmd == SIOCBONDSETHWADDR ||
4614 cmd == SIOCBONDSLAVEINFOQUERY ||
4615 cmd == SIOCBONDINFOQUERY ||
4616 cmd == SIOCBONDCHANGEACTIVE ||
4617 cmd == SIOCGMIIPHY ||
4618 cmd == SIOCGMIIREG ||
4619 cmd == SIOCSMIIREG ||
4620 cmd == SIOCBRADDIF ||
4621 cmd == SIOCBRDELIF ||
4622 cmd == SIOCSHWTSTAMP ||
4623 cmd == SIOCWANDEV) {
4624 err = -EOPNOTSUPP;
4625 if (ops->ndo_do_ioctl) {
4626 if (netif_device_present(dev))
4627 err = ops->ndo_do_ioctl(dev, ifr, cmd);
4628 else
4629 err = -ENODEV;
4630 }
4631 } else
4632 err = -EINVAL;
1da177e4
LT
4633
4634 }
4635 return err;
4636}
4637
4638/*
4639 * This function handles all "interface"-type I/O control requests. The actual
4640 * 'doing' part of this is dev_ifsioc above.
4641 */
4642
4643/**
4644 * dev_ioctl - network device ioctl
c4ea43c5 4645 * @net: the applicable net namespace
1da177e4
LT
4646 * @cmd: command to issue
4647 * @arg: pointer to a struct ifreq in user space
4648 *
4649 * Issue ioctl functions to devices. This is normally called by the
4650 * user space syscall interfaces but can sometimes be useful for
4651 * other purposes. The return value is the return from the syscall if
4652 * positive or a negative errno code on error.
4653 */
4654
881d966b 4655int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
1da177e4
LT
4656{
4657 struct ifreq ifr;
4658 int ret;
4659 char *colon;
4660
4661 /* One special case: SIOCGIFCONF takes ifconf argument
4662 and requires shared lock, because it sleeps writing
4663 to user space.
4664 */
4665
4666 if (cmd == SIOCGIFCONF) {
6756ae4b 4667 rtnl_lock();
881d966b 4668 ret = dev_ifconf(net, (char __user *) arg);
6756ae4b 4669 rtnl_unlock();
1da177e4
LT
4670 return ret;
4671 }
4672 if (cmd == SIOCGIFNAME)
881d966b 4673 return dev_ifname(net, (struct ifreq __user *)arg);
1da177e4
LT
4674
4675 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
4676 return -EFAULT;
4677
4678 ifr.ifr_name[IFNAMSIZ-1] = 0;
4679
4680 colon = strchr(ifr.ifr_name, ':');
4681 if (colon)
4682 *colon = 0;
4683
4684 /*
4685 * See which interface the caller is talking about.
4686 */
4687
4688 switch (cmd) {
d1b19dff
ED
4689 /*
4690 * These ioctl calls:
4691 * - can be done by all.
4692 * - atomic and do not require locking.
4693 * - return a value
4694 */
4695 case SIOCGIFFLAGS:
4696 case SIOCGIFMETRIC:
4697 case SIOCGIFMTU:
4698 case SIOCGIFHWADDR:
4699 case SIOCGIFSLAVE:
4700 case SIOCGIFMAP:
4701 case SIOCGIFINDEX:
4702 case SIOCGIFTXQLEN:
4703 dev_load(net, ifr.ifr_name);
3710becf 4704 rcu_read_lock();
d1b19dff 4705 ret = dev_ifsioc_locked(net, &ifr, cmd);
3710becf 4706 rcu_read_unlock();
d1b19dff
ED
4707 if (!ret) {
4708 if (colon)
4709 *colon = ':';
4710 if (copy_to_user(arg, &ifr,
4711 sizeof(struct ifreq)))
4712 ret = -EFAULT;
4713 }
4714 return ret;
1da177e4 4715
d1b19dff
ED
4716 case SIOCETHTOOL:
4717 dev_load(net, ifr.ifr_name);
4718 rtnl_lock();
4719 ret = dev_ethtool(net, &ifr);
4720 rtnl_unlock();
4721 if (!ret) {
4722 if (colon)
4723 *colon = ':';
4724 if (copy_to_user(arg, &ifr,
4725 sizeof(struct ifreq)))
4726 ret = -EFAULT;
4727 }
4728 return ret;
1da177e4 4729
d1b19dff
ED
4730 /*
4731 * These ioctl calls:
4732 * - require superuser power.
4733 * - require strict serialization.
4734 * - return a value
4735 */
4736 case SIOCGMIIPHY:
4737 case SIOCGMIIREG:
4738 case SIOCSIFNAME:
4739 if (!capable(CAP_NET_ADMIN))
4740 return -EPERM;
4741 dev_load(net, ifr.ifr_name);
4742 rtnl_lock();
4743 ret = dev_ifsioc(net, &ifr, cmd);
4744 rtnl_unlock();
4745 if (!ret) {
4746 if (colon)
4747 *colon = ':';
4748 if (copy_to_user(arg, &ifr,
4749 sizeof(struct ifreq)))
4750 ret = -EFAULT;
4751 }
4752 return ret;
1da177e4 4753
d1b19dff
ED
4754 /*
4755 * These ioctl calls:
4756 * - require superuser power.
4757 * - require strict serialization.
4758 * - do not return a value
4759 */
4760 case SIOCSIFFLAGS:
4761 case SIOCSIFMETRIC:
4762 case SIOCSIFMTU:
4763 case SIOCSIFMAP:
4764 case SIOCSIFHWADDR:
4765 case SIOCSIFSLAVE:
4766 case SIOCADDMULTI:
4767 case SIOCDELMULTI:
4768 case SIOCSIFHWBROADCAST:
4769 case SIOCSIFTXQLEN:
4770 case SIOCSMIIREG:
4771 case SIOCBONDENSLAVE:
4772 case SIOCBONDRELEASE:
4773 case SIOCBONDSETHWADDR:
4774 case SIOCBONDCHANGEACTIVE:
4775 case SIOCBRADDIF:
4776 case SIOCBRDELIF:
4777 case SIOCSHWTSTAMP:
4778 if (!capable(CAP_NET_ADMIN))
4779 return -EPERM;
4780 /* fall through */
4781 case SIOCBONDSLAVEINFOQUERY:
4782 case SIOCBONDINFOQUERY:
4783 dev_load(net, ifr.ifr_name);
4784 rtnl_lock();
4785 ret = dev_ifsioc(net, &ifr, cmd);
4786 rtnl_unlock();
4787 return ret;
4788
4789 case SIOCGIFMEM:
4790 /* Get the per device memory space. We can add this but
4791 * currently do not support it */
4792 case SIOCSIFMEM:
4793 /* Set the per device memory buffer space.
4794 * Not applicable in our case */
4795 case SIOCSIFLINK:
4796 return -EINVAL;
4797
4798 /*
4799 * Unknown or private ioctl.
4800 */
4801 default:
4802 if (cmd == SIOCWANDEV ||
4803 (cmd >= SIOCDEVPRIVATE &&
4804 cmd <= SIOCDEVPRIVATE + 15)) {
881d966b 4805 dev_load(net, ifr.ifr_name);
1da177e4 4806 rtnl_lock();
881d966b 4807 ret = dev_ifsioc(net, &ifr, cmd);
1da177e4 4808 rtnl_unlock();
d1b19dff
ED
4809 if (!ret && copy_to_user(arg, &ifr,
4810 sizeof(struct ifreq)))
4811 ret = -EFAULT;
1da177e4 4812 return ret;
d1b19dff
ED
4813 }
4814 /* Take care of Wireless Extensions */
4815 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
4816 return wext_handle_ioctl(net, &ifr, cmd, arg);
4817 return -EINVAL;
1da177e4
LT
4818 }
4819}
4820
4821
4822/**
4823 * dev_new_index - allocate an ifindex
c4ea43c5 4824 * @net: the applicable net namespace
1da177e4
LT
4825 *
4826 * Returns a suitable unique value for a new device interface
4827 * number. The caller must hold the rtnl semaphore or the
4828 * dev_base_lock to be sure it remains unique.
4829 */
881d966b 4830static int dev_new_index(struct net *net)
1da177e4
LT
4831{
4832 static int ifindex;
4833 for (;;) {
4834 if (++ifindex <= 0)
4835 ifindex = 1;
881d966b 4836 if (!__dev_get_by_index(net, ifindex))
1da177e4
LT
4837 return ifindex;
4838 }
4839}
4840
1da177e4 4841/* Delayed registration/unregisteration */
3b5b34fd 4842static LIST_HEAD(net_todo_list);
1da177e4 4843
6f05f629 4844static void net_set_todo(struct net_device *dev)
1da177e4 4845{
1da177e4 4846 list_add_tail(&dev->todo_list, &net_todo_list);
1da177e4
LT
4847}
4848
9b5e383c 4849static void rollback_registered_many(struct list_head *head)
93ee31f1 4850{
e93737b0 4851 struct net_device *dev, *tmp;
9b5e383c 4852
93ee31f1
DL
4853 BUG_ON(dev_boot_phase);
4854 ASSERT_RTNL();
4855
e93737b0 4856 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
9b5e383c 4857 /* Some devices call without registering
e93737b0
KK
4858 * for initialization unwind. Remove those
4859 * devices and proceed with the remaining.
9b5e383c
ED
4860 */
4861 if (dev->reg_state == NETREG_UNINITIALIZED) {
4862 pr_debug("unregister_netdevice: device %s/%p never "
4863 "was registered\n", dev->name, dev);
93ee31f1 4864
9b5e383c 4865 WARN_ON(1);
e93737b0
KK
4866 list_del(&dev->unreg_list);
4867 continue;
9b5e383c 4868 }
93ee31f1 4869
9b5e383c 4870 BUG_ON(dev->reg_state != NETREG_REGISTERED);
93ee31f1 4871
9b5e383c
ED
4872 /* If device is running, close it first. */
4873 dev_close(dev);
93ee31f1 4874
9b5e383c
ED
4875 /* And unlink it from device chain. */
4876 unlist_netdevice(dev);
93ee31f1 4877
9b5e383c
ED
4878 dev->reg_state = NETREG_UNREGISTERING;
4879 }
93ee31f1
DL
4880
4881 synchronize_net();
4882
9b5e383c
ED
4883 list_for_each_entry(dev, head, unreg_list) {
4884 /* Shutdown queueing discipline. */
4885 dev_shutdown(dev);
93ee31f1
DL
4886
4887
9b5e383c
ED
4888 /* Notify protocols, that we are about to destroy
4889 this device. They should clean all the things.
4890 */
4891 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
93ee31f1 4892
a2835763
PM
4893 if (!dev->rtnl_link_ops ||
4894 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
4895 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
4896
9b5e383c
ED
4897 /*
4898 * Flush the unicast and multicast chains
4899 */
a748ee24 4900 dev_uc_flush(dev);
22bedad3 4901 dev_mc_flush(dev);
93ee31f1 4902
9b5e383c
ED
4903 if (dev->netdev_ops->ndo_uninit)
4904 dev->netdev_ops->ndo_uninit(dev);
93ee31f1 4905
9b5e383c
ED
4906 /* Notifier chain MUST detach us from master device. */
4907 WARN_ON(dev->master);
93ee31f1 4908
9b5e383c
ED
4909 /* Remove entries from kobject tree */
4910 netdev_unregister_kobject(dev);
4911 }
93ee31f1 4912
a5ee1551 4913 /* Process any work delayed until the end of the batch */
e5e26d75 4914 dev = list_first_entry(head, struct net_device, unreg_list);
a5ee1551 4915 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
93ee31f1 4916
ef885afb 4917 rcu_barrier();
395264d5 4918
a5ee1551 4919 list_for_each_entry(dev, head, unreg_list)
9b5e383c
ED
4920 dev_put(dev);
4921}
4922
4923static void rollback_registered(struct net_device *dev)
4924{
4925 LIST_HEAD(single);
4926
4927 list_add(&dev->unreg_list, &single);
4928 rollback_registered_many(&single);
93ee31f1
DL
4929}
4930
e8a0464c
DM
4931static void __netdev_init_queue_locks_one(struct net_device *dev,
4932 struct netdev_queue *dev_queue,
4933 void *_unused)
c773e847
DM
4934{
4935 spin_lock_init(&dev_queue->_xmit_lock);
cf508b12 4936 netdev_set_xmit_lockdep_class(&dev_queue->_xmit_lock, dev->type);
c773e847
DM
4937 dev_queue->xmit_lock_owner = -1;
4938}
4939
4940static void netdev_init_queue_locks(struct net_device *dev)
4941{
e8a0464c 4942 netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL);
c773e847
DM
4943}
4944
b63365a2
HX
4945unsigned long netdev_fix_features(unsigned long features, const char *name)
4946{
4947 /* Fix illegal SG+CSUM combinations. */
4948 if ((features & NETIF_F_SG) &&
4949 !(features & NETIF_F_ALL_CSUM)) {
4950 if (name)
4951 printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no "
4952 "checksum feature.\n", name);
4953 features &= ~NETIF_F_SG;
4954 }
4955
4956 /* TSO requires that SG is present as well. */
4957 if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) {
4958 if (name)
4959 printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no "
4960 "SG feature.\n", name);
4961 features &= ~NETIF_F_TSO;
4962 }
4963
4964 if (features & NETIF_F_UFO) {
4965 if (!(features & NETIF_F_GEN_CSUM)) {
4966 if (name)
4967 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4968 "since no NETIF_F_HW_CSUM feature.\n",
4969 name);
4970 features &= ~NETIF_F_UFO;
4971 }
4972
4973 if (!(features & NETIF_F_SG)) {
4974 if (name)
4975 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4976 "since no NETIF_F_SG feature.\n", name);
4977 features &= ~NETIF_F_UFO;
4978 }
4979 }
4980
4981 return features;
4982}
4983EXPORT_SYMBOL(netdev_fix_features);
4984
fc4a7489
PM
4985/**
4986 * netif_stacked_transfer_operstate - transfer operstate
4987 * @rootdev: the root or lower level device to transfer state from
4988 * @dev: the device to transfer operstate to
4989 *
4990 * Transfer operational state from root to device. This is normally
4991 * called when a stacking relationship exists between the root
4992 * device and the device(a leaf device).
4993 */
4994void netif_stacked_transfer_operstate(const struct net_device *rootdev,
4995 struct net_device *dev)
4996{
4997 if (rootdev->operstate == IF_OPER_DORMANT)
4998 netif_dormant_on(dev);
4999 else
5000 netif_dormant_off(dev);
5001
5002 if (netif_carrier_ok(rootdev)) {
5003 if (!netif_carrier_ok(dev))
5004 netif_carrier_on(dev);
5005 } else {
5006 if (netif_carrier_ok(dev))
5007 netif_carrier_off(dev);
5008 }
5009}
5010EXPORT_SYMBOL(netif_stacked_transfer_operstate);
5011
1b4bf461
ED
5012static int netif_alloc_rx_queues(struct net_device *dev)
5013{
5014#ifdef CONFIG_RPS
5015 unsigned int i, count = dev->num_rx_queues;
bd25fa7b 5016 struct netdev_rx_queue *rx;
1b4bf461 5017
bd25fa7b 5018 BUG_ON(count < 1);
1b4bf461 5019
bd25fa7b
TH
5020 rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
5021 if (!rx) {
5022 pr_err("netdev: Unable to allocate %u rx queues.\n", count);
5023 return -ENOMEM;
1b4bf461 5024 }
bd25fa7b
TH
5025 dev->_rx = rx;
5026
5027 /*
5028 * Set a pointer to first element in the array which holds the
5029 * reference count.
5030 */
5031 for (i = 0; i < count; i++)
5032 rx[i].first = rx;
1b4bf461
ED
5033#endif
5034 return 0;
5035}
5036
1da177e4
LT
5037/**
5038 * register_netdevice - register a network device
5039 * @dev: device to register
5040 *
5041 * Take a completed network device structure and add it to the kernel
5042 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5043 * chain. 0 is returned on success. A negative errno code is returned
5044 * on a failure to set up the device, or if the name is a duplicate.
5045 *
5046 * Callers must hold the rtnl semaphore. You may want
5047 * register_netdev() instead of this.
5048 *
5049 * BUGS:
5050 * The locking appears insufficient to guarantee two parallel registers
5051 * will not get the same name.
5052 */
5053
5054int register_netdevice(struct net_device *dev)
5055{
1da177e4 5056 int ret;
d314774c 5057 struct net *net = dev_net(dev);
1da177e4
LT
5058
5059 BUG_ON(dev_boot_phase);
5060 ASSERT_RTNL();
5061
b17a7c17
SH
5062 might_sleep();
5063
1da177e4
LT
5064 /* When net_device's are persistent, this will be fatal. */
5065 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
d314774c 5066 BUG_ON(!net);
1da177e4 5067
f1f28aa3 5068 spin_lock_init(&dev->addr_list_lock);
cf508b12 5069 netdev_set_addr_lockdep_class(dev);
c773e847 5070 netdev_init_queue_locks(dev);
1da177e4 5071
1da177e4
LT
5072 dev->iflink = -1;
5073
1b4bf461
ED
5074 ret = netif_alloc_rx_queues(dev);
5075 if (ret)
5076 goto out;
0a9627f2 5077
1da177e4 5078 /* Init, if this function is available */
d314774c
SH
5079 if (dev->netdev_ops->ndo_init) {
5080 ret = dev->netdev_ops->ndo_init(dev);
1da177e4
LT
5081 if (ret) {
5082 if (ret > 0)
5083 ret = -EIO;
90833aa4 5084 goto out;
1da177e4
LT
5085 }
5086 }
4ec93edb 5087
8ce6cebc 5088 ret = dev_get_valid_name(dev, dev->name, 0);
d9031024 5089 if (ret)
7ce1b0ed 5090 goto err_uninit;
1da177e4 5091
881d966b 5092 dev->ifindex = dev_new_index(net);
1da177e4
LT
5093 if (dev->iflink == -1)
5094 dev->iflink = dev->ifindex;
5095
d212f87b
SH
5096 /* Fix illegal checksum combinations */
5097 if ((dev->features & NETIF_F_HW_CSUM) &&
5098 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5099 printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
5100 dev->name);
5101 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
5102 }
5103
5104 if ((dev->features & NETIF_F_NO_CSUM) &&
5105 (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5106 printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
5107 dev->name);
5108 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
5109 }
5110
b63365a2 5111 dev->features = netdev_fix_features(dev->features, dev->name);
1da177e4 5112
e5a4a72d
LB
5113 /* Enable software GSO if SG is supported. */
5114 if (dev->features & NETIF_F_SG)
5115 dev->features |= NETIF_F_GSO;
5116
c5256c51
ED
5117 /* Enable GRO and NETIF_F_HIGHDMA for vlans by default,
5118 * vlan_dev_init() will do the dev->features check, so these features
5119 * are enabled only if supported by underlying device.
16c3ea78 5120 */
c5256c51 5121 dev->vlan_features |= (NETIF_F_GRO | NETIF_F_HIGHDMA);
16c3ea78 5122
7ffbe3fd
JB
5123 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
5124 ret = notifier_to_errno(ret);
5125 if (ret)
5126 goto err_uninit;
5127
8b41d188 5128 ret = netdev_register_kobject(dev);
b17a7c17 5129 if (ret)
7ce1b0ed 5130 goto err_uninit;
b17a7c17
SH
5131 dev->reg_state = NETREG_REGISTERED;
5132
1da177e4
LT
5133 /*
5134 * Default initial state at registry is that the
5135 * device is present.
5136 */
5137
5138 set_bit(__LINK_STATE_PRESENT, &dev->state);
5139
1da177e4 5140 dev_init_scheduler(dev);
1da177e4 5141 dev_hold(dev);
ce286d32 5142 list_netdevice(dev);
1da177e4
LT
5143
5144 /* Notify protocols, that a new device appeared. */
056925ab 5145 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
fcc5a03a 5146 ret = notifier_to_errno(ret);
93ee31f1
DL
5147 if (ret) {
5148 rollback_registered(dev);
5149 dev->reg_state = NETREG_UNREGISTERED;
5150 }
d90a909e
EB
5151 /*
5152 * Prevent userspace races by waiting until the network
5153 * device is fully setup before sending notifications.
5154 */
a2835763
PM
5155 if (!dev->rtnl_link_ops ||
5156 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
5157 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
1da177e4
LT
5158
5159out:
5160 return ret;
7ce1b0ed
HX
5161
5162err_uninit:
d314774c
SH
5163 if (dev->netdev_ops->ndo_uninit)
5164 dev->netdev_ops->ndo_uninit(dev);
7ce1b0ed 5165 goto out;
1da177e4 5166}
d1b19dff 5167EXPORT_SYMBOL(register_netdevice);
1da177e4 5168
937f1ba5
BH
5169/**
5170 * init_dummy_netdev - init a dummy network device for NAPI
5171 * @dev: device to init
5172 *
5173 * This takes a network device structure and initialize the minimum
5174 * amount of fields so it can be used to schedule NAPI polls without
5175 * registering a full blown interface. This is to be used by drivers
5176 * that need to tie several hardware interfaces to a single NAPI
5177 * poll scheduler due to HW limitations.
5178 */
5179int init_dummy_netdev(struct net_device *dev)
5180{
5181 /* Clear everything. Note we don't initialize spinlocks
5182 * are they aren't supposed to be taken by any of the
5183 * NAPI code and this dummy netdev is supposed to be
5184 * only ever used for NAPI polls
5185 */
5186 memset(dev, 0, sizeof(struct net_device));
5187
5188 /* make sure we BUG if trying to hit standard
5189 * register/unregister code path
5190 */
5191 dev->reg_state = NETREG_DUMMY;
5192
937f1ba5
BH
5193 /* NAPI wants this */
5194 INIT_LIST_HEAD(&dev->napi_list);
5195
5196 /* a dummy interface is started by default */
5197 set_bit(__LINK_STATE_PRESENT, &dev->state);
5198 set_bit(__LINK_STATE_START, &dev->state);
5199
29b4433d
ED
5200 /* Note : We dont allocate pcpu_refcnt for dummy devices,
5201 * because users of this 'device' dont need to change
5202 * its refcount.
5203 */
5204
937f1ba5
BH
5205 return 0;
5206}
5207EXPORT_SYMBOL_GPL(init_dummy_netdev);
5208
5209
1da177e4
LT
5210/**
5211 * register_netdev - register a network device
5212 * @dev: device to register
5213 *
5214 * Take a completed network device structure and add it to the kernel
5215 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5216 * chain. 0 is returned on success. A negative errno code is returned
5217 * on a failure to set up the device, or if the name is a duplicate.
5218 *
38b4da38 5219 * This is a wrapper around register_netdevice that takes the rtnl semaphore
1da177e4
LT
5220 * and expands the device name if you passed a format string to
5221 * alloc_netdev.
5222 */
5223int register_netdev(struct net_device *dev)
5224{
5225 int err;
5226
5227 rtnl_lock();
5228
5229 /*
5230 * If the name is a format string the caller wants us to do a
5231 * name allocation.
5232 */
5233 if (strchr(dev->name, '%')) {
5234 err = dev_alloc_name(dev, dev->name);
5235 if (err < 0)
5236 goto out;
5237 }
4ec93edb 5238
1da177e4
LT
5239 err = register_netdevice(dev);
5240out:
5241 rtnl_unlock();
5242 return err;
5243}
5244EXPORT_SYMBOL(register_netdev);
5245
29b4433d
ED
5246int netdev_refcnt_read(const struct net_device *dev)
5247{
5248 int i, refcnt = 0;
5249
5250 for_each_possible_cpu(i)
5251 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
5252 return refcnt;
5253}
5254EXPORT_SYMBOL(netdev_refcnt_read);
5255
1da177e4
LT
5256/*
5257 * netdev_wait_allrefs - wait until all references are gone.
5258 *
5259 * This is called when unregistering network devices.
5260 *
5261 * Any protocol or device that holds a reference should register
5262 * for netdevice notification, and cleanup and put back the
5263 * reference if they receive an UNREGISTER event.
5264 * We can get stuck here if buggy protocols don't correctly
4ec93edb 5265 * call dev_put.
1da177e4
LT
5266 */
5267static void netdev_wait_allrefs(struct net_device *dev)
5268{
5269 unsigned long rebroadcast_time, warning_time;
29b4433d 5270 int refcnt;
1da177e4 5271
e014debe
ED
5272 linkwatch_forget_dev(dev);
5273
1da177e4 5274 rebroadcast_time = warning_time = jiffies;
29b4433d
ED
5275 refcnt = netdev_refcnt_read(dev);
5276
5277 while (refcnt != 0) {
1da177e4 5278 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
6756ae4b 5279 rtnl_lock();
1da177e4
LT
5280
5281 /* Rebroadcast unregister notification */
056925ab 5282 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
a5ee1551 5283 /* don't resend NETDEV_UNREGISTER_BATCH, _BATCH users
395264d5 5284 * should have already handle it the first time */
1da177e4
LT
5285
5286 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
5287 &dev->state)) {
5288 /* We must not have linkwatch events
5289 * pending on unregister. If this
5290 * happens, we simply run the queue
5291 * unscheduled, resulting in a noop
5292 * for this device.
5293 */
5294 linkwatch_run_queue();
5295 }
5296
6756ae4b 5297 __rtnl_unlock();
1da177e4
LT
5298
5299 rebroadcast_time = jiffies;
5300 }
5301
5302 msleep(250);
5303
29b4433d
ED
5304 refcnt = netdev_refcnt_read(dev);
5305
1da177e4
LT
5306 if (time_after(jiffies, warning_time + 10 * HZ)) {
5307 printk(KERN_EMERG "unregister_netdevice: "
5308 "waiting for %s to become free. Usage "
5309 "count = %d\n",
29b4433d 5310 dev->name, refcnt);
1da177e4
LT
5311 warning_time = jiffies;
5312 }
5313 }
5314}
5315
5316/* The sequence is:
5317 *
5318 * rtnl_lock();
5319 * ...
5320 * register_netdevice(x1);
5321 * register_netdevice(x2);
5322 * ...
5323 * unregister_netdevice(y1);
5324 * unregister_netdevice(y2);
5325 * ...
5326 * rtnl_unlock();
5327 * free_netdev(y1);
5328 * free_netdev(y2);
5329 *
58ec3b4d 5330 * We are invoked by rtnl_unlock().
1da177e4 5331 * This allows us to deal with problems:
b17a7c17 5332 * 1) We can delete sysfs objects which invoke hotplug
1da177e4
LT
5333 * without deadlocking with linkwatch via keventd.
5334 * 2) Since we run with the RTNL semaphore not held, we can sleep
5335 * safely in order to wait for the netdev refcnt to drop to zero.
58ec3b4d
HX
5336 *
5337 * We must not return until all unregister events added during
5338 * the interval the lock was held have been completed.
1da177e4 5339 */
1da177e4
LT
5340void netdev_run_todo(void)
5341{
626ab0e6 5342 struct list_head list;
1da177e4 5343
1da177e4 5344 /* Snapshot list, allow later requests */
626ab0e6 5345 list_replace_init(&net_todo_list, &list);
58ec3b4d
HX
5346
5347 __rtnl_unlock();
626ab0e6 5348
1da177e4
LT
5349 while (!list_empty(&list)) {
5350 struct net_device *dev
e5e26d75 5351 = list_first_entry(&list, struct net_device, todo_list);
1da177e4
LT
5352 list_del(&dev->todo_list);
5353
b17a7c17
SH
5354 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
5355 printk(KERN_ERR "network todo '%s' but state %d\n",
5356 dev->name, dev->reg_state);
5357 dump_stack();
5358 continue;
5359 }
1da177e4 5360
b17a7c17 5361 dev->reg_state = NETREG_UNREGISTERED;
1da177e4 5362
152102c7 5363 on_each_cpu(flush_backlog, dev, 1);
6e583ce5 5364
b17a7c17 5365 netdev_wait_allrefs(dev);
1da177e4 5366
b17a7c17 5367 /* paranoia */
29b4433d 5368 BUG_ON(netdev_refcnt_read(dev));
95ae6b22 5369 WARN_ON(rcu_dereference_raw(dev->ip_ptr));
547b792c
IJ
5370 WARN_ON(dev->ip6_ptr);
5371 WARN_ON(dev->dn_ptr);
1da177e4 5372
b17a7c17
SH
5373 if (dev->destructor)
5374 dev->destructor(dev);
9093bbb2
SH
5375
5376 /* Free network device */
5377 kobject_put(&dev->dev.kobj);
1da177e4 5378 }
1da177e4
LT
5379}
5380
d83345ad
ED
5381/**
5382 * dev_txq_stats_fold - fold tx_queues stats
5383 * @dev: device to get statistics from
3cfde79c 5384 * @stats: struct rtnl_link_stats64 to hold results
d83345ad
ED
5385 */
5386void dev_txq_stats_fold(const struct net_device *dev,
3cfde79c 5387 struct rtnl_link_stats64 *stats)
d83345ad 5388{
bd27290a 5389 u64 tx_bytes = 0, tx_packets = 0, tx_dropped = 0;
d83345ad
ED
5390 unsigned int i;
5391 struct netdev_queue *txq;
5392
5393 for (i = 0; i < dev->num_tx_queues; i++) {
5394 txq = netdev_get_tx_queue(dev, i);
bd27290a 5395 spin_lock_bh(&txq->_xmit_lock);
d83345ad
ED
5396 tx_bytes += txq->tx_bytes;
5397 tx_packets += txq->tx_packets;
5398 tx_dropped += txq->tx_dropped;
bd27290a 5399 spin_unlock_bh(&txq->_xmit_lock);
d83345ad
ED
5400 }
5401 if (tx_bytes || tx_packets || tx_dropped) {
5402 stats->tx_bytes = tx_bytes;
5403 stats->tx_packets = tx_packets;
5404 stats->tx_dropped = tx_dropped;
5405 }
5406}
5407EXPORT_SYMBOL(dev_txq_stats_fold);
5408
3cfde79c
BH
5409/* Convert net_device_stats to rtnl_link_stats64. They have the same
5410 * fields in the same order, with only the type differing.
5411 */
5412static void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
5413 const struct net_device_stats *netdev_stats)
5414{
5415#if BITS_PER_LONG == 64
5416 BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
5417 memcpy(stats64, netdev_stats, sizeof(*stats64));
5418#else
5419 size_t i, n = sizeof(*stats64) / sizeof(u64);
5420 const unsigned long *src = (const unsigned long *)netdev_stats;
5421 u64 *dst = (u64 *)stats64;
5422
5423 BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) !=
5424 sizeof(*stats64) / sizeof(u64));
5425 for (i = 0; i < n; i++)
5426 dst[i] = src[i];
5427#endif
5428}
5429
eeda3fd6
SH
5430/**
5431 * dev_get_stats - get network device statistics
5432 * @dev: device to get statistics from
28172739 5433 * @storage: place to store stats
eeda3fd6 5434 *
d7753516
BH
5435 * Get network statistics from device. Return @storage.
5436 * The device driver may provide its own method by setting
5437 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
5438 * otherwise the internal statistics structure is used.
eeda3fd6 5439 */
d7753516
BH
5440struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
5441 struct rtnl_link_stats64 *storage)
7004bf25 5442{
eeda3fd6
SH
5443 const struct net_device_ops *ops = dev->netdev_ops;
5444
28172739
ED
5445 if (ops->ndo_get_stats64) {
5446 memset(storage, 0, sizeof(*storage));
caf586e5
ED
5447 ops->ndo_get_stats64(dev, storage);
5448 } else if (ops->ndo_get_stats) {
3cfde79c 5449 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
caf586e5
ED
5450 } else {
5451 netdev_stats_to_stats64(storage, &dev->stats);
5452 dev_txq_stats_fold(dev, storage);
28172739 5453 }
caf586e5 5454 storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
28172739 5455 return storage;
c45d286e 5456}
eeda3fd6 5457EXPORT_SYMBOL(dev_get_stats);
c45d286e 5458
dc2b4847 5459static void netdev_init_one_queue(struct net_device *dev,
e8a0464c
DM
5460 struct netdev_queue *queue,
5461 void *_unused)
dc2b4847 5462{
dc2b4847
DM
5463 queue->dev = dev;
5464}
5465
bb949fbd
DM
5466static void netdev_init_queues(struct net_device *dev)
5467{
e8a0464c 5468 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
c3f26a26 5469 spin_lock_init(&dev->tx_global_lock);
bb949fbd
DM
5470}
5471
24824a09
ED
5472struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
5473{
5474 struct netdev_queue *queue = dev_ingress_queue(dev);
5475
5476#ifdef CONFIG_NET_CLS_ACT
5477 if (queue)
5478 return queue;
5479 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
5480 if (!queue)
5481 return NULL;
5482 netdev_init_one_queue(dev, queue, NULL);
5483 __netdev_init_queue_locks_one(dev, queue, NULL);
5484 queue->qdisc = &noop_qdisc;
5485 queue->qdisc_sleeping = &noop_qdisc;
5486 rcu_assign_pointer(dev->ingress_queue, queue);
5487#endif
5488 return queue;
5489}
5490
1da177e4 5491/**
f25f4e44 5492 * alloc_netdev_mq - allocate network device
1da177e4
LT
5493 * @sizeof_priv: size of private data to allocate space for
5494 * @name: device name format string
5495 * @setup: callback to initialize device
f25f4e44 5496 * @queue_count: the number of subqueues to allocate
1da177e4
LT
5497 *
5498 * Allocates a struct net_device with private data area for driver use
f25f4e44
PWJ
5499 * and performs basic initialization. Also allocates subquue structs
5500 * for each queue on the device at the end of the netdevice.
1da177e4 5501 */
f25f4e44
PWJ
5502struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
5503 void (*setup)(struct net_device *), unsigned int queue_count)
1da177e4 5504{
e8a0464c 5505 struct netdev_queue *tx;
1da177e4 5506 struct net_device *dev;
7943986c 5507 size_t alloc_size;
1ce8e7b5 5508 struct net_device *p;
1da177e4 5509
b6fe17d6
SH
5510 BUG_ON(strlen(name) >= sizeof(dev->name));
5511
55513fb4
TH
5512 if (queue_count < 1) {
5513 pr_err("alloc_netdev: Unable to allocate device "
5514 "with zero queues.\n");
5515 return NULL;
5516 }
5517
fd2ea0a7 5518 alloc_size = sizeof(struct net_device);
d1643d24
AD
5519 if (sizeof_priv) {
5520 /* ensure 32-byte alignment of private area */
1ce8e7b5 5521 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
d1643d24
AD
5522 alloc_size += sizeof_priv;
5523 }
5524 /* ensure 32-byte alignment of whole construct */
1ce8e7b5 5525 alloc_size += NETDEV_ALIGN - 1;
1da177e4 5526
31380de9 5527 p = kzalloc(alloc_size, GFP_KERNEL);
1da177e4 5528 if (!p) {
b6fe17d6 5529 printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
1da177e4
LT
5530 return NULL;
5531 }
1da177e4 5532
7943986c 5533 tx = kcalloc(queue_count, sizeof(struct netdev_queue), GFP_KERNEL);
e8a0464c
DM
5534 if (!tx) {
5535 printk(KERN_ERR "alloc_netdev: Unable to allocate "
5536 "tx qdiscs.\n");
ab9c73cc 5537 goto free_p;
e8a0464c
DM
5538 }
5539
0a9627f2 5540
1ce8e7b5 5541 dev = PTR_ALIGN(p, NETDEV_ALIGN);
1da177e4 5542 dev->padded = (char *)dev - (char *)p;
ab9c73cc 5543
29b4433d
ED
5544 dev->pcpu_refcnt = alloc_percpu(int);
5545 if (!dev->pcpu_refcnt)
1b4bf461 5546 goto free_tx;
ab9c73cc 5547
29b4433d
ED
5548 if (dev_addr_init(dev))
5549 goto free_pcpu;
5550
22bedad3 5551 dev_mc_init(dev);
a748ee24 5552 dev_uc_init(dev);
ccffad25 5553
c346dca1 5554 dev_net_set(dev, &init_net);
1da177e4 5555
e8a0464c
DM
5556 dev->_tx = tx;
5557 dev->num_tx_queues = queue_count;
fd2ea0a7 5558 dev->real_num_tx_queues = queue_count;
e8a0464c 5559
df334545 5560#ifdef CONFIG_RPS
0a9627f2 5561 dev->num_rx_queues = queue_count;
62fe0b40 5562 dev->real_num_rx_queues = queue_count;
df334545 5563#endif
0a9627f2 5564
82cc1a7a 5565 dev->gso_max_size = GSO_MAX_SIZE;
1da177e4 5566
bb949fbd
DM
5567 netdev_init_queues(dev);
5568
15682bc4
PWJ
5569 INIT_LIST_HEAD(&dev->ethtool_ntuple_list.list);
5570 dev->ethtool_ntuple_list.count = 0;
d565b0a1 5571 INIT_LIST_HEAD(&dev->napi_list);
9fdce099 5572 INIT_LIST_HEAD(&dev->unreg_list);
e014debe 5573 INIT_LIST_HEAD(&dev->link_watch_list);
93f154b5 5574 dev->priv_flags = IFF_XMIT_DST_RELEASE;
1da177e4
LT
5575 setup(dev);
5576 strcpy(dev->name, name);
5577 return dev;
ab9c73cc
JP
5578
5579free_tx:
5580 kfree(tx);
29b4433d
ED
5581free_pcpu:
5582 free_percpu(dev->pcpu_refcnt);
ab9c73cc
JP
5583free_p:
5584 kfree(p);
5585 return NULL;
1da177e4 5586}
f25f4e44 5587EXPORT_SYMBOL(alloc_netdev_mq);
1da177e4
LT
5588
5589/**
5590 * free_netdev - free network device
5591 * @dev: device
5592 *
4ec93edb
YH
5593 * This function does the last stage of destroying an allocated device
5594 * interface. The reference to the device object is released.
1da177e4
LT
5595 * If this is the last reference then it will be freed.
5596 */
5597void free_netdev(struct net_device *dev)
5598{
d565b0a1
HX
5599 struct napi_struct *p, *n;
5600
f3005d7f
DL
5601 release_net(dev_net(dev));
5602
e8a0464c
DM
5603 kfree(dev->_tx);
5604
24824a09
ED
5605 kfree(rcu_dereference_raw(dev->ingress_queue));
5606
f001fde5
JP
5607 /* Flush device addresses */
5608 dev_addr_flush(dev);
5609
15682bc4
PWJ
5610 /* Clear ethtool n-tuple list */
5611 ethtool_ntuple_flush(dev);
5612
d565b0a1
HX
5613 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
5614 netif_napi_del(p);
5615
29b4433d
ED
5616 free_percpu(dev->pcpu_refcnt);
5617 dev->pcpu_refcnt = NULL;
5618
3041a069 5619 /* Compatibility with error handling in drivers */
1da177e4
LT
5620 if (dev->reg_state == NETREG_UNINITIALIZED) {
5621 kfree((char *)dev - dev->padded);
5622 return;
5623 }
5624
5625 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
5626 dev->reg_state = NETREG_RELEASED;
5627
43cb76d9
GKH
5628 /* will free via device release */
5629 put_device(&dev->dev);
1da177e4 5630}
d1b19dff 5631EXPORT_SYMBOL(free_netdev);
4ec93edb 5632
f0db275a
SH
5633/**
5634 * synchronize_net - Synchronize with packet receive processing
5635 *
5636 * Wait for packets currently being received to be done.
5637 * Does not block later packets from starting.
5638 */
4ec93edb 5639void synchronize_net(void)
1da177e4
LT
5640{
5641 might_sleep();
fbd568a3 5642 synchronize_rcu();
1da177e4 5643}
d1b19dff 5644EXPORT_SYMBOL(synchronize_net);
1da177e4
LT
5645
5646/**
44a0873d 5647 * unregister_netdevice_queue - remove device from the kernel
1da177e4 5648 * @dev: device
44a0873d 5649 * @head: list
6ebfbc06 5650 *
1da177e4 5651 * This function shuts down a device interface and removes it
d59b54b1 5652 * from the kernel tables.
44a0873d 5653 * If head not NULL, device is queued to be unregistered later.
1da177e4
LT
5654 *
5655 * Callers must hold the rtnl semaphore. You may want
5656 * unregister_netdev() instead of this.
5657 */
5658
44a0873d 5659void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
1da177e4 5660{
a6620712
HX
5661 ASSERT_RTNL();
5662
44a0873d 5663 if (head) {
9fdce099 5664 list_move_tail(&dev->unreg_list, head);
44a0873d
ED
5665 } else {
5666 rollback_registered(dev);
5667 /* Finish processing unregister after unlock */
5668 net_set_todo(dev);
5669 }
1da177e4 5670}
44a0873d 5671EXPORT_SYMBOL(unregister_netdevice_queue);
1da177e4 5672
9b5e383c
ED
5673/**
5674 * unregister_netdevice_many - unregister many devices
5675 * @head: list of devices
9b5e383c
ED
5676 */
5677void unregister_netdevice_many(struct list_head *head)
5678{
5679 struct net_device *dev;
5680
5681 if (!list_empty(head)) {
5682 rollback_registered_many(head);
5683 list_for_each_entry(dev, head, unreg_list)
5684 net_set_todo(dev);
5685 }
5686}
63c8099d 5687EXPORT_SYMBOL(unregister_netdevice_many);
9b5e383c 5688
1da177e4
LT
5689/**
5690 * unregister_netdev - remove device from the kernel
5691 * @dev: device
5692 *
5693 * This function shuts down a device interface and removes it
d59b54b1 5694 * from the kernel tables.
1da177e4
LT
5695 *
5696 * This is just a wrapper for unregister_netdevice that takes
5697 * the rtnl semaphore. In general you want to use this and not
5698 * unregister_netdevice.
5699 */
5700void unregister_netdev(struct net_device *dev)
5701{
5702 rtnl_lock();
5703 unregister_netdevice(dev);
5704 rtnl_unlock();
5705}
1da177e4
LT
5706EXPORT_SYMBOL(unregister_netdev);
5707
ce286d32
EB
5708/**
5709 * dev_change_net_namespace - move device to different nethost namespace
5710 * @dev: device
5711 * @net: network namespace
5712 * @pat: If not NULL name pattern to try if the current device name
5713 * is already taken in the destination network namespace.
5714 *
5715 * This function shuts down a device interface and moves it
5716 * to a new network namespace. On success 0 is returned, on
5717 * a failure a netagive errno code is returned.
5718 *
5719 * Callers must hold the rtnl semaphore.
5720 */
5721
5722int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
5723{
ce286d32
EB
5724 int err;
5725
5726 ASSERT_RTNL();
5727
5728 /* Don't allow namespace local devices to be moved. */
5729 err = -EINVAL;
5730 if (dev->features & NETIF_F_NETNS_LOCAL)
5731 goto out;
5732
5733 /* Ensure the device has been registrered */
5734 err = -EINVAL;
5735 if (dev->reg_state != NETREG_REGISTERED)
5736 goto out;
5737
5738 /* Get out if there is nothing todo */
5739 err = 0;
878628fb 5740 if (net_eq(dev_net(dev), net))
ce286d32
EB
5741 goto out;
5742
5743 /* Pick the destination device name, and ensure
5744 * we can use it in the destination network namespace.
5745 */
5746 err = -EEXIST;
d9031024 5747 if (__dev_get_by_name(net, dev->name)) {
ce286d32
EB
5748 /* We get here if we can't use the current device name */
5749 if (!pat)
5750 goto out;
8ce6cebc 5751 if (dev_get_valid_name(dev, pat, 1))
ce286d32
EB
5752 goto out;
5753 }
5754
5755 /*
5756 * And now a mini version of register_netdevice unregister_netdevice.
5757 */
5758
5759 /* If device is running close it first. */
9b772652 5760 dev_close(dev);
ce286d32
EB
5761
5762 /* And unlink it from device chain */
5763 err = -ENODEV;
5764 unlist_netdevice(dev);
5765
5766 synchronize_net();
5767
5768 /* Shutdown queueing discipline. */
5769 dev_shutdown(dev);
5770
5771 /* Notify protocols, that we are about to destroy
5772 this device. They should clean all the things.
3b27e105
DL
5773
5774 Note that dev->reg_state stays at NETREG_REGISTERED.
5775 This is wanted because this way 8021q and macvlan know
5776 the device is just moving and can keep their slaves up.
ce286d32
EB
5777 */
5778 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
a5ee1551 5779 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
ce286d32
EB
5780
5781 /*
5782 * Flush the unicast and multicast chains
5783 */
a748ee24 5784 dev_uc_flush(dev);
22bedad3 5785 dev_mc_flush(dev);
ce286d32
EB
5786
5787 /* Actually switch the network namespace */
c346dca1 5788 dev_net_set(dev, net);
ce286d32 5789
ce286d32
EB
5790 /* If there is an ifindex conflict assign a new one */
5791 if (__dev_get_by_index(net, dev->ifindex)) {
5792 int iflink = (dev->iflink == dev->ifindex);
5793 dev->ifindex = dev_new_index(net);
5794 if (iflink)
5795 dev->iflink = dev->ifindex;
5796 }
5797
8b41d188 5798 /* Fixup kobjects */
a1b3f594 5799 err = device_rename(&dev->dev, dev->name);
8b41d188 5800 WARN_ON(err);
ce286d32
EB
5801
5802 /* Add the device back in the hashes */
5803 list_netdevice(dev);
5804
5805 /* Notify protocols, that a new device appeared. */
5806 call_netdevice_notifiers(NETDEV_REGISTER, dev);
5807
d90a909e
EB
5808 /*
5809 * Prevent userspace races by waiting until the network
5810 * device is fully setup before sending notifications.
5811 */
5812 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
5813
ce286d32
EB
5814 synchronize_net();
5815 err = 0;
5816out:
5817 return err;
5818}
463d0183 5819EXPORT_SYMBOL_GPL(dev_change_net_namespace);
ce286d32 5820
1da177e4
LT
5821static int dev_cpu_callback(struct notifier_block *nfb,
5822 unsigned long action,
5823 void *ocpu)
5824{
5825 struct sk_buff **list_skb;
1da177e4
LT
5826 struct sk_buff *skb;
5827 unsigned int cpu, oldcpu = (unsigned long)ocpu;
5828 struct softnet_data *sd, *oldsd;
5829
8bb78442 5830 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
1da177e4
LT
5831 return NOTIFY_OK;
5832
5833 local_irq_disable();
5834 cpu = smp_processor_id();
5835 sd = &per_cpu(softnet_data, cpu);
5836 oldsd = &per_cpu(softnet_data, oldcpu);
5837
5838 /* Find end of our completion_queue. */
5839 list_skb = &sd->completion_queue;
5840 while (*list_skb)
5841 list_skb = &(*list_skb)->next;
5842 /* Append completion queue from offline CPU. */
5843 *list_skb = oldsd->completion_queue;
5844 oldsd->completion_queue = NULL;
5845
1da177e4 5846 /* Append output queue from offline CPU. */
a9cbd588
CG
5847 if (oldsd->output_queue) {
5848 *sd->output_queue_tailp = oldsd->output_queue;
5849 sd->output_queue_tailp = oldsd->output_queue_tailp;
5850 oldsd->output_queue = NULL;
5851 oldsd->output_queue_tailp = &oldsd->output_queue;
5852 }
1da177e4
LT
5853
5854 raise_softirq_irqoff(NET_TX_SOFTIRQ);
5855 local_irq_enable();
5856
5857 /* Process offline CPU's input_pkt_queue */
76cc8b13 5858 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
1da177e4 5859 netif_rx(skb);
76cc8b13 5860 input_queue_head_incr(oldsd);
fec5e652 5861 }
76cc8b13 5862 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
6e7676c1 5863 netif_rx(skb);
76cc8b13
TH
5864 input_queue_head_incr(oldsd);
5865 }
1da177e4
LT
5866
5867 return NOTIFY_OK;
5868}
1da177e4
LT
5869
5870
7f353bf2 5871/**
b63365a2
HX
5872 * netdev_increment_features - increment feature set by one
5873 * @all: current feature set
5874 * @one: new feature set
5875 * @mask: mask feature set
7f353bf2
HX
5876 *
5877 * Computes a new feature set after adding a device with feature set
b63365a2
HX
5878 * @one to the master device with current feature set @all. Will not
5879 * enable anything that is off in @mask. Returns the new feature set.
7f353bf2 5880 */
b63365a2
HX
5881unsigned long netdev_increment_features(unsigned long all, unsigned long one,
5882 unsigned long mask)
5883{
5884 /* If device needs checksumming, downgrade to it. */
d1b19dff 5885 if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
b63365a2
HX
5886 all ^= NETIF_F_NO_CSUM | (one & NETIF_F_ALL_CSUM);
5887 else if (mask & NETIF_F_ALL_CSUM) {
5888 /* If one device supports v4/v6 checksumming, set for all. */
5889 if (one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM) &&
5890 !(all & NETIF_F_GEN_CSUM)) {
5891 all &= ~NETIF_F_ALL_CSUM;
5892 all |= one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
5893 }
e2a6b852 5894
b63365a2
HX
5895 /* If one device supports hw checksumming, set for all. */
5896 if (one & NETIF_F_GEN_CSUM && !(all & NETIF_F_GEN_CSUM)) {
5897 all &= ~NETIF_F_ALL_CSUM;
5898 all |= NETIF_F_HW_CSUM;
5899 }
5900 }
7f353bf2 5901
b63365a2 5902 one |= NETIF_F_ALL_CSUM;
7f353bf2 5903
b63365a2 5904 one |= all & NETIF_F_ONE_FOR_ALL;
d9f5950f 5905 all &= one | NETIF_F_LLTX | NETIF_F_GSO | NETIF_F_UFO;
b63365a2 5906 all |= one & mask & NETIF_F_ONE_FOR_ALL;
7f353bf2
HX
5907
5908 return all;
5909}
b63365a2 5910EXPORT_SYMBOL(netdev_increment_features);
7f353bf2 5911
30d97d35
PE
5912static struct hlist_head *netdev_create_hash(void)
5913{
5914 int i;
5915 struct hlist_head *hash;
5916
5917 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
5918 if (hash != NULL)
5919 for (i = 0; i < NETDEV_HASHENTRIES; i++)
5920 INIT_HLIST_HEAD(&hash[i]);
5921
5922 return hash;
5923}
5924
881d966b 5925/* Initialize per network namespace state */
4665079c 5926static int __net_init netdev_init(struct net *net)
881d966b 5927{
881d966b 5928 INIT_LIST_HEAD(&net->dev_base_head);
881d966b 5929
30d97d35
PE
5930 net->dev_name_head = netdev_create_hash();
5931 if (net->dev_name_head == NULL)
5932 goto err_name;
881d966b 5933
30d97d35
PE
5934 net->dev_index_head = netdev_create_hash();
5935 if (net->dev_index_head == NULL)
5936 goto err_idx;
881d966b
EB
5937
5938 return 0;
30d97d35
PE
5939
5940err_idx:
5941 kfree(net->dev_name_head);
5942err_name:
5943 return -ENOMEM;
881d966b
EB
5944}
5945
f0db275a
SH
5946/**
5947 * netdev_drivername - network driver for the device
5948 * @dev: network device
5949 * @buffer: buffer for resulting name
5950 * @len: size of buffer
5951 *
5952 * Determine network driver for device.
5953 */
cf04a4c7 5954char *netdev_drivername(const struct net_device *dev, char *buffer, int len)
6579e57b 5955{
cf04a4c7
SH
5956 const struct device_driver *driver;
5957 const struct device *parent;
6579e57b
AV
5958
5959 if (len <= 0 || !buffer)
5960 return buffer;
5961 buffer[0] = 0;
5962
5963 parent = dev->dev.parent;
5964
5965 if (!parent)
5966 return buffer;
5967
5968 driver = parent->driver;
5969 if (driver && driver->name)
5970 strlcpy(buffer, driver->name, len);
5971 return buffer;
5972}
5973
256df2f3
JP
5974static int __netdev_printk(const char *level, const struct net_device *dev,
5975 struct va_format *vaf)
5976{
5977 int r;
5978
5979 if (dev && dev->dev.parent)
5980 r = dev_printk(level, dev->dev.parent, "%s: %pV",
5981 netdev_name(dev), vaf);
5982 else if (dev)
5983 r = printk("%s%s: %pV", level, netdev_name(dev), vaf);
5984 else
5985 r = printk("%s(NULL net_device): %pV", level, vaf);
5986
5987 return r;
5988}
5989
5990int netdev_printk(const char *level, const struct net_device *dev,
5991 const char *format, ...)
5992{
5993 struct va_format vaf;
5994 va_list args;
5995 int r;
5996
5997 va_start(args, format);
5998
5999 vaf.fmt = format;
6000 vaf.va = &args;
6001
6002 r = __netdev_printk(level, dev, &vaf);
6003 va_end(args);
6004
6005 return r;
6006}
6007EXPORT_SYMBOL(netdev_printk);
6008
6009#define define_netdev_printk_level(func, level) \
6010int func(const struct net_device *dev, const char *fmt, ...) \
6011{ \
6012 int r; \
6013 struct va_format vaf; \
6014 va_list args; \
6015 \
6016 va_start(args, fmt); \
6017 \
6018 vaf.fmt = fmt; \
6019 vaf.va = &args; \
6020 \
6021 r = __netdev_printk(level, dev, &vaf); \
6022 va_end(args); \
6023 \
6024 return r; \
6025} \
6026EXPORT_SYMBOL(func);
6027
6028define_netdev_printk_level(netdev_emerg, KERN_EMERG);
6029define_netdev_printk_level(netdev_alert, KERN_ALERT);
6030define_netdev_printk_level(netdev_crit, KERN_CRIT);
6031define_netdev_printk_level(netdev_err, KERN_ERR);
6032define_netdev_printk_level(netdev_warn, KERN_WARNING);
6033define_netdev_printk_level(netdev_notice, KERN_NOTICE);
6034define_netdev_printk_level(netdev_info, KERN_INFO);
6035
4665079c 6036static void __net_exit netdev_exit(struct net *net)
881d966b
EB
6037{
6038 kfree(net->dev_name_head);
6039 kfree(net->dev_index_head);
6040}
6041
022cbae6 6042static struct pernet_operations __net_initdata netdev_net_ops = {
881d966b
EB
6043 .init = netdev_init,
6044 .exit = netdev_exit,
6045};
6046
4665079c 6047static void __net_exit default_device_exit(struct net *net)
ce286d32 6048{
e008b5fc 6049 struct net_device *dev, *aux;
ce286d32 6050 /*
e008b5fc 6051 * Push all migratable network devices back to the
ce286d32
EB
6052 * initial network namespace
6053 */
6054 rtnl_lock();
e008b5fc 6055 for_each_netdev_safe(net, dev, aux) {
ce286d32 6056 int err;
aca51397 6057 char fb_name[IFNAMSIZ];
ce286d32
EB
6058
6059 /* Ignore unmoveable devices (i.e. loopback) */
6060 if (dev->features & NETIF_F_NETNS_LOCAL)
6061 continue;
6062
e008b5fc
EB
6063 /* Leave virtual devices for the generic cleanup */
6064 if (dev->rtnl_link_ops)
6065 continue;
d0c082ce 6066
ce286d32 6067 /* Push remaing network devices to init_net */
aca51397
PE
6068 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
6069 err = dev_change_net_namespace(dev, &init_net, fb_name);
ce286d32 6070 if (err) {
aca51397 6071 printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
ce286d32 6072 __func__, dev->name, err);
aca51397 6073 BUG();
ce286d32
EB
6074 }
6075 }
6076 rtnl_unlock();
6077}
6078
04dc7f6b
EB
6079static void __net_exit default_device_exit_batch(struct list_head *net_list)
6080{
6081 /* At exit all network devices most be removed from a network
6082 * namespace. Do this in the reverse order of registeration.
6083 * Do this across as many network namespaces as possible to
6084 * improve batching efficiency.
6085 */
6086 struct net_device *dev;
6087 struct net *net;
6088 LIST_HEAD(dev_kill_list);
6089
6090 rtnl_lock();
6091 list_for_each_entry(net, net_list, exit_list) {
6092 for_each_netdev_reverse(net, dev) {
6093 if (dev->rtnl_link_ops)
6094 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
6095 else
6096 unregister_netdevice_queue(dev, &dev_kill_list);
6097 }
6098 }
6099 unregister_netdevice_many(&dev_kill_list);
6100 rtnl_unlock();
6101}
6102
022cbae6 6103static struct pernet_operations __net_initdata default_device_ops = {
ce286d32 6104 .exit = default_device_exit,
04dc7f6b 6105 .exit_batch = default_device_exit_batch,
ce286d32
EB
6106};
6107
1da177e4
LT
6108/*
6109 * Initialize the DEV module. At boot time this walks the device list and
6110 * unhooks any devices that fail to initialise (normally hardware not
6111 * present) and leaves us with a valid list of present and active devices.
6112 *
6113 */
6114
6115/*
6116 * This is called single threaded during boot, so no need
6117 * to take the rtnl semaphore.
6118 */
6119static int __init net_dev_init(void)
6120{
6121 int i, rc = -ENOMEM;
6122
6123 BUG_ON(!dev_boot_phase);
6124
1da177e4
LT
6125 if (dev_proc_init())
6126 goto out;
6127
8b41d188 6128 if (netdev_kobject_init())
1da177e4
LT
6129 goto out;
6130
6131 INIT_LIST_HEAD(&ptype_all);
82d8a867 6132 for (i = 0; i < PTYPE_HASH_SIZE; i++)
1da177e4
LT
6133 INIT_LIST_HEAD(&ptype_base[i]);
6134
881d966b
EB
6135 if (register_pernet_subsys(&netdev_net_ops))
6136 goto out;
1da177e4
LT
6137
6138 /*
6139 * Initialise the packet receive queues.
6140 */
6141
6f912042 6142 for_each_possible_cpu(i) {
e36fa2f7 6143 struct softnet_data *sd = &per_cpu(softnet_data, i);
1da177e4 6144
dee42870 6145 memset(sd, 0, sizeof(*sd));
e36fa2f7 6146 skb_queue_head_init(&sd->input_pkt_queue);
6e7676c1 6147 skb_queue_head_init(&sd->process_queue);
e36fa2f7
ED
6148 sd->completion_queue = NULL;
6149 INIT_LIST_HEAD(&sd->poll_list);
a9cbd588
CG
6150 sd->output_queue = NULL;
6151 sd->output_queue_tailp = &sd->output_queue;
df334545 6152#ifdef CONFIG_RPS
e36fa2f7
ED
6153 sd->csd.func = rps_trigger_softirq;
6154 sd->csd.info = sd;
6155 sd->csd.flags = 0;
6156 sd->cpu = i;
1e94d72f 6157#endif
0a9627f2 6158
e36fa2f7
ED
6159 sd->backlog.poll = process_backlog;
6160 sd->backlog.weight = weight_p;
6161 sd->backlog.gro_list = NULL;
6162 sd->backlog.gro_count = 0;
1da177e4
LT
6163 }
6164
1da177e4
LT
6165 dev_boot_phase = 0;
6166
505d4f73
EB
6167 /* The loopback device is special if any other network devices
6168 * is present in a network namespace the loopback device must
6169 * be present. Since we now dynamically allocate and free the
6170 * loopback device ensure this invariant is maintained by
6171 * keeping the loopback device as the first device on the
6172 * list of network devices. Ensuring the loopback devices
6173 * is the first device that appears and the last network device
6174 * that disappears.
6175 */
6176 if (register_pernet_device(&loopback_net_ops))
6177 goto out;
6178
6179 if (register_pernet_device(&default_device_ops))
6180 goto out;
6181
962cf36c
CM
6182 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
6183 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
1da177e4
LT
6184
6185 hotcpu_notifier(dev_cpu_callback, 0);
6186 dst_init();
6187 dev_mcast_init();
6188 rc = 0;
6189out:
6190 return rc;
6191}
6192
6193subsys_initcall(net_dev_init);
6194
e88721f8
KK
6195static int __init initialize_hashrnd(void)
6196{
0a9627f2 6197 get_random_bytes(&hashrnd, sizeof(hashrnd));
e88721f8
KK
6198 return 0;
6199}
6200
6201late_initcall_sync(initialize_hashrnd);
6202