]> bbs.cooldavid.org Git - net-next-2.6.git/blame - net/core/dev.c
stmmac: Make time functionality depend upon RTC_HCTOSYS_DEVICE
[net-next-2.6.git] / net / core / dev.c
CommitLineData
1da177e4
LT
1/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
02c30a84 10 * Authors: Ross Biro
1da177e4
LT
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
74
75#include <asm/uaccess.h>
76#include <asm/system.h>
77#include <linux/bitops.h>
4fc268d2 78#include <linux/capability.h>
1da177e4
LT
79#include <linux/cpu.h>
80#include <linux/types.h>
81#include <linux/kernel.h>
08e9897d 82#include <linux/hash.h>
5a0e3ad6 83#include <linux/slab.h>
1da177e4 84#include <linux/sched.h>
4a3e2f71 85#include <linux/mutex.h>
1da177e4
LT
86#include <linux/string.h>
87#include <linux/mm.h>
88#include <linux/socket.h>
89#include <linux/sockios.h>
90#include <linux/errno.h>
91#include <linux/interrupt.h>
92#include <linux/if_ether.h>
93#include <linux/netdevice.h>
94#include <linux/etherdevice.h>
0187bdfb 95#include <linux/ethtool.h>
1da177e4
LT
96#include <linux/notifier.h>
97#include <linux/skbuff.h>
457c4cbc 98#include <net/net_namespace.h>
1da177e4
LT
99#include <net/sock.h>
100#include <linux/rtnetlink.h>
101#include <linux/proc_fs.h>
102#include <linux/seq_file.h>
103#include <linux/stat.h>
1da177e4
LT
104#include <net/dst.h>
105#include <net/pkt_sched.h>
106#include <net/checksum.h>
44540960 107#include <net/xfrm.h>
1da177e4
LT
108#include <linux/highmem.h>
109#include <linux/init.h>
110#include <linux/kmod.h>
111#include <linux/module.h>
1da177e4
LT
112#include <linux/netpoll.h>
113#include <linux/rcupdate.h>
114#include <linux/delay.h>
295f4a1f 115#include <net/wext.h>
1da177e4 116#include <net/iw_handler.h>
1da177e4 117#include <asm/current.h>
5bdb9886 118#include <linux/audit.h>
db217334 119#include <linux/dmaengine.h>
f6a78bfc 120#include <linux/err.h>
c7fa9d18 121#include <linux/ctype.h>
723e98b7 122#include <linux/if_arp.h>
6de329e2 123#include <linux/if_vlan.h>
8f0f2223 124#include <linux/ip.h>
ad55dcaf 125#include <net/ip.h>
8f0f2223
DM
126#include <linux/ipv6.h>
127#include <linux/in.h>
b6b2fed1
DM
128#include <linux/jhash.h>
129#include <linux/random.h>
9cbc1cb8 130#include <trace/events/napi.h>
5acbbd42 131#include <linux/pci.h>
1da177e4 132
342709ef
PE
133#include "net-sysfs.h"
134
d565b0a1
HX
135/* Instead of increasing this, you should create a hash table. */
136#define MAX_GRO_SKBS 8
137
5d38a079
HX
138/* This should be increased if a protocol with a bigger head is added. */
139#define GRO_MAX_HEAD (MAX_HEADER + 128)
140
1da177e4
LT
141/*
142 * The list of packet types we will receive (as opposed to discard)
143 * and the routines to invoke.
144 *
145 * Why 16. Because with 16 the only overlap we get on a hash of the
146 * low nibble of the protocol value is RARP/SNAP/X.25.
147 *
148 * NOTE: That is no longer true with the addition of VLAN tags. Not
149 * sure which should go first, but I bet it won't make much
150 * difference if we are running VLANs. The good news is that
151 * this protocol won't be in the list unless compiled in, so
3041a069 152 * the average user (w/out VLANs) will not be adversely affected.
1da177e4
LT
153 * --BLG
154 *
155 * 0800 IP
156 * 8100 802.1Q VLAN
157 * 0001 802.3
158 * 0002 AX.25
159 * 0004 802.2
160 * 8035 RARP
161 * 0005 SNAP
162 * 0805 X.25
163 * 0806 ARP
164 * 8137 IPX
165 * 0009 Localtalk
166 * 86DD IPv6
167 */
168
82d8a867
PE
169#define PTYPE_HASH_SIZE (16)
170#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
171
1da177e4 172static DEFINE_SPINLOCK(ptype_lock);
82d8a867 173static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
6b2bedc3 174static struct list_head ptype_all __read_mostly; /* Taps */
1da177e4 175
1da177e4 176/*
7562f876 177 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
1da177e4
LT
178 * semaphore.
179 *
c6d14c84 180 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
1da177e4
LT
181 *
182 * Writers must hold the rtnl semaphore while they loop through the
7562f876 183 * dev_base_head list, and hold dev_base_lock for writing when they do the
1da177e4
LT
184 * actual updates. This allows pure readers to access the list even
185 * while a writer is preparing to update it.
186 *
187 * To put it another way, dev_base_lock is held for writing only to
188 * protect against pure readers; the rtnl semaphore provides the
189 * protection against other writers.
190 *
191 * See, for example usages, register_netdevice() and
192 * unregister_netdevice(), which must be called with the rtnl
193 * semaphore held.
194 */
1da177e4 195DEFINE_RWLOCK(dev_base_lock);
1da177e4
LT
196EXPORT_SYMBOL(dev_base_lock);
197
881d966b 198static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
1da177e4
LT
199{
200 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
08e9897d 201 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
1da177e4
LT
202}
203
881d966b 204static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
1da177e4 205{
7c28bd0b 206 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
1da177e4
LT
207}
208
e36fa2f7 209static inline void rps_lock(struct softnet_data *sd)
152102c7
CG
210{
211#ifdef CONFIG_RPS
e36fa2f7 212 spin_lock(&sd->input_pkt_queue.lock);
152102c7
CG
213#endif
214}
215
e36fa2f7 216static inline void rps_unlock(struct softnet_data *sd)
152102c7
CG
217{
218#ifdef CONFIG_RPS
e36fa2f7 219 spin_unlock(&sd->input_pkt_queue.lock);
152102c7
CG
220#endif
221}
222
ce286d32
EB
223/* Device list insertion */
224static int list_netdevice(struct net_device *dev)
225{
c346dca1 226 struct net *net = dev_net(dev);
ce286d32
EB
227
228 ASSERT_RTNL();
229
230 write_lock_bh(&dev_base_lock);
c6d14c84 231 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
72c9528b 232 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
fb699dfd
ED
233 hlist_add_head_rcu(&dev->index_hlist,
234 dev_index_hash(net, dev->ifindex));
ce286d32
EB
235 write_unlock_bh(&dev_base_lock);
236 return 0;
237}
238
fb699dfd
ED
239/* Device list removal
240 * caller must respect a RCU grace period before freeing/reusing dev
241 */
ce286d32
EB
242static void unlist_netdevice(struct net_device *dev)
243{
244 ASSERT_RTNL();
245
246 /* Unlink dev from the device chain */
247 write_lock_bh(&dev_base_lock);
c6d14c84 248 list_del_rcu(&dev->dev_list);
72c9528b 249 hlist_del_rcu(&dev->name_hlist);
fb699dfd 250 hlist_del_rcu(&dev->index_hlist);
ce286d32
EB
251 write_unlock_bh(&dev_base_lock);
252}
253
1da177e4
LT
254/*
255 * Our notifier list
256 */
257
f07d5b94 258static RAW_NOTIFIER_HEAD(netdev_chain);
1da177e4
LT
259
260/*
261 * Device drivers call our routines to queue packets here. We empty the
262 * queue in the local softnet handler.
263 */
bea3348e 264
9958da05 265DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
d1b19dff 266EXPORT_PER_CPU_SYMBOL(softnet_data);
1da177e4 267
cf508b12 268#ifdef CONFIG_LOCKDEP
723e98b7 269/*
c773e847 270 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
723e98b7
JP
271 * according to dev->type
272 */
273static const unsigned short netdev_lock_type[] =
274 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
275 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
276 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
277 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
278 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
279 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
280 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
281 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
282 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
283 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
284 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
285 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
286 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
2d91d78b 287 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET,
929122cd 288 ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154,
fcb94e42 289 ARPHRD_VOID, ARPHRD_NONE};
723e98b7 290
36cbd3dc 291static const char *const netdev_lock_name[] =
723e98b7
JP
292 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
293 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
294 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
295 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
296 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
297 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
298 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
299 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
300 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
301 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
302 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
303 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
304 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
2d91d78b 305 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET",
929122cd 306 "_xmit_PHONET_PIPE", "_xmit_IEEE802154",
fcb94e42 307 "_xmit_VOID", "_xmit_NONE"};
723e98b7
JP
308
309static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
cf508b12 310static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
723e98b7
JP
311
312static inline unsigned short netdev_lock_pos(unsigned short dev_type)
313{
314 int i;
315
316 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
317 if (netdev_lock_type[i] == dev_type)
318 return i;
319 /* the last key is used by default */
320 return ARRAY_SIZE(netdev_lock_type) - 1;
321}
322
cf508b12
DM
323static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
324 unsigned short dev_type)
723e98b7
JP
325{
326 int i;
327
328 i = netdev_lock_pos(dev_type);
329 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
330 netdev_lock_name[i]);
331}
cf508b12
DM
332
333static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
334{
335 int i;
336
337 i = netdev_lock_pos(dev->type);
338 lockdep_set_class_and_name(&dev->addr_list_lock,
339 &netdev_addr_lock_key[i],
340 netdev_lock_name[i]);
341}
723e98b7 342#else
cf508b12
DM
343static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
344 unsigned short dev_type)
345{
346}
347static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
723e98b7
JP
348{
349}
350#endif
1da177e4
LT
351
352/*******************************************************************************
353
354 Protocol management and registration routines
355
356*******************************************************************************/
357
1da177e4
LT
358/*
359 * Add a protocol ID to the list. Now that the input handler is
360 * smarter we can dispense with all the messy stuff that used to be
361 * here.
362 *
363 * BEWARE!!! Protocol handlers, mangling input packets,
364 * MUST BE last in hash buckets and checking protocol handlers
365 * MUST start from promiscuous ptype_all chain in net_bh.
366 * It is true now, do not change it.
367 * Explanation follows: if protocol handler, mangling packet, will
368 * be the first on list, it is not able to sense, that packet
369 * is cloned and should be copied-on-write, so that it will
370 * change it and subsequent readers will get broken packet.
371 * --ANK (980803)
372 */
373
374/**
375 * dev_add_pack - add packet handler
376 * @pt: packet type declaration
377 *
378 * Add a protocol handler to the networking stack. The passed &packet_type
379 * is linked into kernel lists and may not be freed until it has been
380 * removed from the kernel lists.
381 *
4ec93edb 382 * This call does not sleep therefore it can not
1da177e4
LT
383 * guarantee all CPU's that are in middle of receiving packets
384 * will see the new packet type (until the next received packet).
385 */
386
387void dev_add_pack(struct packet_type *pt)
388{
389 int hash;
390
391 spin_lock_bh(&ptype_lock);
9be9a6b9 392 if (pt->type == htons(ETH_P_ALL))
1da177e4 393 list_add_rcu(&pt->list, &ptype_all);
9be9a6b9 394 else {
82d8a867 395 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
1da177e4
LT
396 list_add_rcu(&pt->list, &ptype_base[hash]);
397 }
398 spin_unlock_bh(&ptype_lock);
399}
d1b19dff 400EXPORT_SYMBOL(dev_add_pack);
1da177e4 401
1da177e4
LT
402/**
403 * __dev_remove_pack - remove packet handler
404 * @pt: packet type declaration
405 *
406 * Remove a protocol handler that was previously added to the kernel
407 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
408 * from the kernel lists and can be freed or reused once this function
4ec93edb 409 * returns.
1da177e4
LT
410 *
411 * The packet type might still be in use by receivers
412 * and must not be freed until after all the CPU's have gone
413 * through a quiescent state.
414 */
415void __dev_remove_pack(struct packet_type *pt)
416{
417 struct list_head *head;
418 struct packet_type *pt1;
419
420 spin_lock_bh(&ptype_lock);
421
9be9a6b9 422 if (pt->type == htons(ETH_P_ALL))
1da177e4 423 head = &ptype_all;
9be9a6b9 424 else
82d8a867 425 head = &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
1da177e4
LT
426
427 list_for_each_entry(pt1, head, list) {
428 if (pt == pt1) {
429 list_del_rcu(&pt->list);
430 goto out;
431 }
432 }
433
434 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
435out:
436 spin_unlock_bh(&ptype_lock);
437}
d1b19dff
ED
438EXPORT_SYMBOL(__dev_remove_pack);
439
1da177e4
LT
440/**
441 * dev_remove_pack - remove packet handler
442 * @pt: packet type declaration
443 *
444 * Remove a protocol handler that was previously added to the kernel
445 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
446 * from the kernel lists and can be freed or reused once this function
447 * returns.
448 *
449 * This call sleeps to guarantee that no CPU is looking at the packet
450 * type after return.
451 */
452void dev_remove_pack(struct packet_type *pt)
453{
454 __dev_remove_pack(pt);
4ec93edb 455
1da177e4
LT
456 synchronize_net();
457}
d1b19dff 458EXPORT_SYMBOL(dev_remove_pack);
1da177e4
LT
459
460/******************************************************************************
461
462 Device Boot-time Settings Routines
463
464*******************************************************************************/
465
466/* Boot time configuration table */
467static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
468
469/**
470 * netdev_boot_setup_add - add new setup entry
471 * @name: name of the device
472 * @map: configured settings for the device
473 *
474 * Adds new setup entry to the dev_boot_setup list. The function
475 * returns 0 on error and 1 on success. This is a generic routine to
476 * all netdevices.
477 */
478static int netdev_boot_setup_add(char *name, struct ifmap *map)
479{
480 struct netdev_boot_setup *s;
481 int i;
482
483 s = dev_boot_setup;
484 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
485 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
486 memset(s[i].name, 0, sizeof(s[i].name));
93b3cff9 487 strlcpy(s[i].name, name, IFNAMSIZ);
1da177e4
LT
488 memcpy(&s[i].map, map, sizeof(s[i].map));
489 break;
490 }
491 }
492
493 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
494}
495
496/**
497 * netdev_boot_setup_check - check boot time settings
498 * @dev: the netdevice
499 *
500 * Check boot time settings for the device.
501 * The found settings are set for the device to be used
502 * later in the device probing.
503 * Returns 0 if no settings found, 1 if they are.
504 */
505int netdev_boot_setup_check(struct net_device *dev)
506{
507 struct netdev_boot_setup *s = dev_boot_setup;
508 int i;
509
510 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
511 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
93b3cff9 512 !strcmp(dev->name, s[i].name)) {
1da177e4
LT
513 dev->irq = s[i].map.irq;
514 dev->base_addr = s[i].map.base_addr;
515 dev->mem_start = s[i].map.mem_start;
516 dev->mem_end = s[i].map.mem_end;
517 return 1;
518 }
519 }
520 return 0;
521}
d1b19dff 522EXPORT_SYMBOL(netdev_boot_setup_check);
1da177e4
LT
523
524
525/**
526 * netdev_boot_base - get address from boot time settings
527 * @prefix: prefix for network device
528 * @unit: id for network device
529 *
530 * Check boot time settings for the base address of device.
531 * The found settings are set for the device to be used
532 * later in the device probing.
533 * Returns 0 if no settings found.
534 */
535unsigned long netdev_boot_base(const char *prefix, int unit)
536{
537 const struct netdev_boot_setup *s = dev_boot_setup;
538 char name[IFNAMSIZ];
539 int i;
540
541 sprintf(name, "%s%d", prefix, unit);
542
543 /*
544 * If device already registered then return base of 1
545 * to indicate not to probe for this interface
546 */
881d966b 547 if (__dev_get_by_name(&init_net, name))
1da177e4
LT
548 return 1;
549
550 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
551 if (!strcmp(name, s[i].name))
552 return s[i].map.base_addr;
553 return 0;
554}
555
556/*
557 * Saves at boot time configured settings for any netdevice.
558 */
559int __init netdev_boot_setup(char *str)
560{
561 int ints[5];
562 struct ifmap map;
563
564 str = get_options(str, ARRAY_SIZE(ints), ints);
565 if (!str || !*str)
566 return 0;
567
568 /* Save settings */
569 memset(&map, 0, sizeof(map));
570 if (ints[0] > 0)
571 map.irq = ints[1];
572 if (ints[0] > 1)
573 map.base_addr = ints[2];
574 if (ints[0] > 2)
575 map.mem_start = ints[3];
576 if (ints[0] > 3)
577 map.mem_end = ints[4];
578
579 /* Add new entry to the list */
580 return netdev_boot_setup_add(str, &map);
581}
582
583__setup("netdev=", netdev_boot_setup);
584
585/*******************************************************************************
586
587 Device Interface Subroutines
588
589*******************************************************************************/
590
591/**
592 * __dev_get_by_name - find a device by its name
c4ea43c5 593 * @net: the applicable net namespace
1da177e4
LT
594 * @name: name to find
595 *
596 * Find an interface by name. Must be called under RTNL semaphore
597 * or @dev_base_lock. If the name is found a pointer to the device
598 * is returned. If the name is not found then %NULL is returned. The
599 * reference counters are not incremented so the caller must be
600 * careful with locks.
601 */
602
881d966b 603struct net_device *__dev_get_by_name(struct net *net, const char *name)
1da177e4
LT
604{
605 struct hlist_node *p;
0bd8d536
ED
606 struct net_device *dev;
607 struct hlist_head *head = dev_name_hash(net, name);
1da177e4 608
0bd8d536 609 hlist_for_each_entry(dev, p, head, name_hlist)
1da177e4
LT
610 if (!strncmp(dev->name, name, IFNAMSIZ))
611 return dev;
0bd8d536 612
1da177e4
LT
613 return NULL;
614}
d1b19dff 615EXPORT_SYMBOL(__dev_get_by_name);
1da177e4 616
72c9528b
ED
617/**
618 * dev_get_by_name_rcu - find a device by its name
619 * @net: the applicable net namespace
620 * @name: name to find
621 *
622 * Find an interface by name.
623 * If the name is found a pointer to the device is returned.
624 * If the name is not found then %NULL is returned.
625 * The reference counters are not incremented so the caller must be
626 * careful with locks. The caller must hold RCU lock.
627 */
628
629struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
630{
631 struct hlist_node *p;
632 struct net_device *dev;
633 struct hlist_head *head = dev_name_hash(net, name);
634
635 hlist_for_each_entry_rcu(dev, p, head, name_hlist)
636 if (!strncmp(dev->name, name, IFNAMSIZ))
637 return dev;
638
639 return NULL;
640}
641EXPORT_SYMBOL(dev_get_by_name_rcu);
642
1da177e4
LT
643/**
644 * dev_get_by_name - find a device by its name
c4ea43c5 645 * @net: the applicable net namespace
1da177e4
LT
646 * @name: name to find
647 *
648 * Find an interface by name. This can be called from any
649 * context and does its own locking. The returned handle has
650 * the usage count incremented and the caller must use dev_put() to
651 * release it when it is no longer needed. %NULL is returned if no
652 * matching device is found.
653 */
654
881d966b 655struct net_device *dev_get_by_name(struct net *net, const char *name)
1da177e4
LT
656{
657 struct net_device *dev;
658
72c9528b
ED
659 rcu_read_lock();
660 dev = dev_get_by_name_rcu(net, name);
1da177e4
LT
661 if (dev)
662 dev_hold(dev);
72c9528b 663 rcu_read_unlock();
1da177e4
LT
664 return dev;
665}
d1b19dff 666EXPORT_SYMBOL(dev_get_by_name);
1da177e4
LT
667
668/**
669 * __dev_get_by_index - find a device by its ifindex
c4ea43c5 670 * @net: the applicable net namespace
1da177e4
LT
671 * @ifindex: index of device
672 *
673 * Search for an interface by index. Returns %NULL if the device
674 * is not found or a pointer to the device. The device has not
675 * had its reference counter increased so the caller must be careful
676 * about locking. The caller must hold either the RTNL semaphore
677 * or @dev_base_lock.
678 */
679
881d966b 680struct net_device *__dev_get_by_index(struct net *net, int ifindex)
1da177e4
LT
681{
682 struct hlist_node *p;
0bd8d536
ED
683 struct net_device *dev;
684 struct hlist_head *head = dev_index_hash(net, ifindex);
1da177e4 685
0bd8d536 686 hlist_for_each_entry(dev, p, head, index_hlist)
1da177e4
LT
687 if (dev->ifindex == ifindex)
688 return dev;
0bd8d536 689
1da177e4
LT
690 return NULL;
691}
d1b19dff 692EXPORT_SYMBOL(__dev_get_by_index);
1da177e4 693
fb699dfd
ED
694/**
695 * dev_get_by_index_rcu - find a device by its ifindex
696 * @net: the applicable net namespace
697 * @ifindex: index of device
698 *
699 * Search for an interface by index. Returns %NULL if the device
700 * is not found or a pointer to the device. The device has not
701 * had its reference counter increased so the caller must be careful
702 * about locking. The caller must hold RCU lock.
703 */
704
705struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
706{
707 struct hlist_node *p;
708 struct net_device *dev;
709 struct hlist_head *head = dev_index_hash(net, ifindex);
710
711 hlist_for_each_entry_rcu(dev, p, head, index_hlist)
712 if (dev->ifindex == ifindex)
713 return dev;
714
715 return NULL;
716}
717EXPORT_SYMBOL(dev_get_by_index_rcu);
718
1da177e4
LT
719
720/**
721 * dev_get_by_index - find a device by its ifindex
c4ea43c5 722 * @net: the applicable net namespace
1da177e4
LT
723 * @ifindex: index of device
724 *
725 * Search for an interface by index. Returns NULL if the device
726 * is not found or a pointer to the device. The device returned has
727 * had a reference added and the pointer is safe until the user calls
728 * dev_put to indicate they have finished with it.
729 */
730
881d966b 731struct net_device *dev_get_by_index(struct net *net, int ifindex)
1da177e4
LT
732{
733 struct net_device *dev;
734
fb699dfd
ED
735 rcu_read_lock();
736 dev = dev_get_by_index_rcu(net, ifindex);
1da177e4
LT
737 if (dev)
738 dev_hold(dev);
fb699dfd 739 rcu_read_unlock();
1da177e4
LT
740 return dev;
741}
d1b19dff 742EXPORT_SYMBOL(dev_get_by_index);
1da177e4
LT
743
744/**
745 * dev_getbyhwaddr - find a device by its hardware address
c4ea43c5 746 * @net: the applicable net namespace
1da177e4
LT
747 * @type: media type of device
748 * @ha: hardware address
749 *
750 * Search for an interface by MAC address. Returns NULL if the device
751 * is not found or a pointer to the device. The caller must hold the
752 * rtnl semaphore. The returned device has not had its ref count increased
753 * and the caller must therefore be careful about locking
754 *
755 * BUGS:
756 * If the API was consistent this would be __dev_get_by_hwaddr
757 */
758
881d966b 759struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
1da177e4
LT
760{
761 struct net_device *dev;
762
763 ASSERT_RTNL();
764
81103a52 765 for_each_netdev(net, dev)
1da177e4
LT
766 if (dev->type == type &&
767 !memcmp(dev->dev_addr, ha, dev->addr_len))
7562f876
PE
768 return dev;
769
770 return NULL;
1da177e4 771}
cf309e3f
JF
772EXPORT_SYMBOL(dev_getbyhwaddr);
773
881d966b 774struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
1da177e4
LT
775{
776 struct net_device *dev;
777
4e9cac2b 778 ASSERT_RTNL();
881d966b 779 for_each_netdev(net, dev)
4e9cac2b 780 if (dev->type == type)
7562f876
PE
781 return dev;
782
783 return NULL;
4e9cac2b 784}
4e9cac2b
PM
785EXPORT_SYMBOL(__dev_getfirstbyhwtype);
786
881d966b 787struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
4e9cac2b 788{
99fe3c39 789 struct net_device *dev, *ret = NULL;
4e9cac2b 790
99fe3c39
ED
791 rcu_read_lock();
792 for_each_netdev_rcu(net, dev)
793 if (dev->type == type) {
794 dev_hold(dev);
795 ret = dev;
796 break;
797 }
798 rcu_read_unlock();
799 return ret;
1da177e4 800}
1da177e4
LT
801EXPORT_SYMBOL(dev_getfirstbyhwtype);
802
803/**
bb69ae04 804 * dev_get_by_flags_rcu - find any device with given flags
c4ea43c5 805 * @net: the applicable net namespace
1da177e4
LT
806 * @if_flags: IFF_* values
807 * @mask: bitmask of bits in if_flags to check
808 *
809 * Search for any interface with the given flags. Returns NULL if a device
bb69ae04
ED
810 * is not found or a pointer to the device. Must be called inside
811 * rcu_read_lock(), and result refcount is unchanged.
1da177e4
LT
812 */
813
bb69ae04 814struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags,
d1b19dff 815 unsigned short mask)
1da177e4 816{
7562f876 817 struct net_device *dev, *ret;
1da177e4 818
7562f876 819 ret = NULL;
c6d14c84 820 for_each_netdev_rcu(net, dev) {
1da177e4 821 if (((dev->flags ^ if_flags) & mask) == 0) {
7562f876 822 ret = dev;
1da177e4
LT
823 break;
824 }
825 }
7562f876 826 return ret;
1da177e4 827}
bb69ae04 828EXPORT_SYMBOL(dev_get_by_flags_rcu);
1da177e4
LT
829
830/**
831 * dev_valid_name - check if name is okay for network device
832 * @name: name string
833 *
834 * Network device names need to be valid file names to
c7fa9d18
DM
835 * to allow sysfs to work. We also disallow any kind of
836 * whitespace.
1da177e4 837 */
c2373ee9 838int dev_valid_name(const char *name)
1da177e4 839{
c7fa9d18
DM
840 if (*name == '\0')
841 return 0;
b6fe17d6
SH
842 if (strlen(name) >= IFNAMSIZ)
843 return 0;
c7fa9d18
DM
844 if (!strcmp(name, ".") || !strcmp(name, ".."))
845 return 0;
846
847 while (*name) {
848 if (*name == '/' || isspace(*name))
849 return 0;
850 name++;
851 }
852 return 1;
1da177e4 853}
d1b19dff 854EXPORT_SYMBOL(dev_valid_name);
1da177e4
LT
855
856/**
b267b179
EB
857 * __dev_alloc_name - allocate a name for a device
858 * @net: network namespace to allocate the device name in
1da177e4 859 * @name: name format string
b267b179 860 * @buf: scratch buffer and result name string
1da177e4
LT
861 *
862 * Passed a format string - eg "lt%d" it will try and find a suitable
3041a069
SH
863 * id. It scans list of devices to build up a free map, then chooses
864 * the first empty slot. The caller must hold the dev_base or rtnl lock
865 * while allocating the name and adding the device in order to avoid
866 * duplicates.
867 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
868 * Returns the number of the unit assigned or a negative errno code.
1da177e4
LT
869 */
870
b267b179 871static int __dev_alloc_name(struct net *net, const char *name, char *buf)
1da177e4
LT
872{
873 int i = 0;
1da177e4
LT
874 const char *p;
875 const int max_netdevices = 8*PAGE_SIZE;
cfcabdcc 876 unsigned long *inuse;
1da177e4
LT
877 struct net_device *d;
878
879 p = strnchr(name, IFNAMSIZ-1, '%');
880 if (p) {
881 /*
882 * Verify the string as this thing may have come from
883 * the user. There must be either one "%d" and no other "%"
884 * characters.
885 */
886 if (p[1] != 'd' || strchr(p + 2, '%'))
887 return -EINVAL;
888
889 /* Use one page as a bit array of possible slots */
cfcabdcc 890 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
1da177e4
LT
891 if (!inuse)
892 return -ENOMEM;
893
881d966b 894 for_each_netdev(net, d) {
1da177e4
LT
895 if (!sscanf(d->name, name, &i))
896 continue;
897 if (i < 0 || i >= max_netdevices)
898 continue;
899
900 /* avoid cases where sscanf is not exact inverse of printf */
b267b179 901 snprintf(buf, IFNAMSIZ, name, i);
1da177e4
LT
902 if (!strncmp(buf, d->name, IFNAMSIZ))
903 set_bit(i, inuse);
904 }
905
906 i = find_first_zero_bit(inuse, max_netdevices);
907 free_page((unsigned long) inuse);
908 }
909
d9031024
OP
910 if (buf != name)
911 snprintf(buf, IFNAMSIZ, name, i);
b267b179 912 if (!__dev_get_by_name(net, buf))
1da177e4 913 return i;
1da177e4
LT
914
915 /* It is possible to run out of possible slots
916 * when the name is long and there isn't enough space left
917 * for the digits, or if all bits are used.
918 */
919 return -ENFILE;
920}
921
b267b179
EB
922/**
923 * dev_alloc_name - allocate a name for a device
924 * @dev: device
925 * @name: name format string
926 *
927 * Passed a format string - eg "lt%d" it will try and find a suitable
928 * id. It scans list of devices to build up a free map, then chooses
929 * the first empty slot. The caller must hold the dev_base or rtnl lock
930 * while allocating the name and adding the device in order to avoid
931 * duplicates.
932 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
933 * Returns the number of the unit assigned or a negative errno code.
934 */
935
936int dev_alloc_name(struct net_device *dev, const char *name)
937{
938 char buf[IFNAMSIZ];
939 struct net *net;
940 int ret;
941
c346dca1
YH
942 BUG_ON(!dev_net(dev));
943 net = dev_net(dev);
b267b179
EB
944 ret = __dev_alloc_name(net, name, buf);
945 if (ret >= 0)
946 strlcpy(dev->name, buf, IFNAMSIZ);
947 return ret;
948}
d1b19dff 949EXPORT_SYMBOL(dev_alloc_name);
b267b179 950
8ce6cebc 951static int dev_get_valid_name(struct net_device *dev, const char *name, bool fmt)
d9031024 952{
8ce6cebc
DL
953 struct net *net;
954
955 BUG_ON(!dev_net(dev));
956 net = dev_net(dev);
957
d9031024
OP
958 if (!dev_valid_name(name))
959 return -EINVAL;
960
961 if (fmt && strchr(name, '%'))
8ce6cebc 962 return dev_alloc_name(dev, name);
d9031024
OP
963 else if (__dev_get_by_name(net, name))
964 return -EEXIST;
8ce6cebc
DL
965 else if (dev->name != name)
966 strlcpy(dev->name, name, IFNAMSIZ);
d9031024
OP
967
968 return 0;
969}
1da177e4
LT
970
971/**
972 * dev_change_name - change name of a device
973 * @dev: device
974 * @newname: name (or format string) must be at least IFNAMSIZ
975 *
976 * Change name of a device, can pass format strings "eth%d".
977 * for wildcarding.
978 */
cf04a4c7 979int dev_change_name(struct net_device *dev, const char *newname)
1da177e4 980{
fcc5a03a 981 char oldname[IFNAMSIZ];
1da177e4 982 int err = 0;
fcc5a03a 983 int ret;
881d966b 984 struct net *net;
1da177e4
LT
985
986 ASSERT_RTNL();
c346dca1 987 BUG_ON(!dev_net(dev));
1da177e4 988
c346dca1 989 net = dev_net(dev);
1da177e4
LT
990 if (dev->flags & IFF_UP)
991 return -EBUSY;
992
c8d90dca
SH
993 if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
994 return 0;
995
fcc5a03a
HX
996 memcpy(oldname, dev->name, IFNAMSIZ);
997
8ce6cebc 998 err = dev_get_valid_name(dev, newname, 1);
d9031024
OP
999 if (err < 0)
1000 return err;
1da177e4 1001
fcc5a03a 1002rollback:
a1b3f594
EB
1003 ret = device_rename(&dev->dev, dev->name);
1004 if (ret) {
1005 memcpy(dev->name, oldname, IFNAMSIZ);
1006 return ret;
dcc99773 1007 }
7f988eab
HX
1008
1009 write_lock_bh(&dev_base_lock);
92749821 1010 hlist_del(&dev->name_hlist);
72c9528b
ED
1011 write_unlock_bh(&dev_base_lock);
1012
1013 synchronize_rcu();
1014
1015 write_lock_bh(&dev_base_lock);
1016 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
7f988eab
HX
1017 write_unlock_bh(&dev_base_lock);
1018
056925ab 1019 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
fcc5a03a
HX
1020 ret = notifier_to_errno(ret);
1021
1022 if (ret) {
91e9c07b
ED
1023 /* err >= 0 after dev_alloc_name() or stores the first errno */
1024 if (err >= 0) {
fcc5a03a
HX
1025 err = ret;
1026 memcpy(dev->name, oldname, IFNAMSIZ);
1027 goto rollback;
91e9c07b
ED
1028 } else {
1029 printk(KERN_ERR
1030 "%s: name change rollback failed: %d.\n",
1031 dev->name, ret);
fcc5a03a
HX
1032 }
1033 }
1da177e4
LT
1034
1035 return err;
1036}
1037
0b815a1a
SH
1038/**
1039 * dev_set_alias - change ifalias of a device
1040 * @dev: device
1041 * @alias: name up to IFALIASZ
f0db275a 1042 * @len: limit of bytes to copy from info
0b815a1a
SH
1043 *
1044 * Set ifalias for a device,
1045 */
1046int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1047{
1048 ASSERT_RTNL();
1049
1050 if (len >= IFALIASZ)
1051 return -EINVAL;
1052
96ca4a2c
OH
1053 if (!len) {
1054 if (dev->ifalias) {
1055 kfree(dev->ifalias);
1056 dev->ifalias = NULL;
1057 }
1058 return 0;
1059 }
1060
d1b19dff 1061 dev->ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
0b815a1a
SH
1062 if (!dev->ifalias)
1063 return -ENOMEM;
1064
1065 strlcpy(dev->ifalias, alias, len+1);
1066 return len;
1067}
1068
1069
d8a33ac4 1070/**
3041a069 1071 * netdev_features_change - device changes features
d8a33ac4
SH
1072 * @dev: device to cause notification
1073 *
1074 * Called to indicate a device has changed features.
1075 */
1076void netdev_features_change(struct net_device *dev)
1077{
056925ab 1078 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
d8a33ac4
SH
1079}
1080EXPORT_SYMBOL(netdev_features_change);
1081
1da177e4
LT
1082/**
1083 * netdev_state_change - device changes state
1084 * @dev: device to cause notification
1085 *
1086 * Called to indicate a device has changed state. This function calls
1087 * the notifier chains for netdev_chain and sends a NEWLINK message
1088 * to the routing socket.
1089 */
1090void netdev_state_change(struct net_device *dev)
1091{
1092 if (dev->flags & IFF_UP) {
056925ab 1093 call_netdevice_notifiers(NETDEV_CHANGE, dev);
1da177e4
LT
1094 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1095 }
1096}
d1b19dff 1097EXPORT_SYMBOL(netdev_state_change);
1da177e4 1098
3ca5b404 1099int netdev_bonding_change(struct net_device *dev, unsigned long event)
c1da4ac7 1100{
3ca5b404 1101 return call_netdevice_notifiers(event, dev);
c1da4ac7
OG
1102}
1103EXPORT_SYMBOL(netdev_bonding_change);
1104
1da177e4
LT
1105/**
1106 * dev_load - load a network module
c4ea43c5 1107 * @net: the applicable net namespace
1da177e4
LT
1108 * @name: name of interface
1109 *
1110 * If a network interface is not present and the process has suitable
1111 * privileges this function loads the module. If module loading is not
1112 * available in this kernel then it becomes a nop.
1113 */
1114
881d966b 1115void dev_load(struct net *net, const char *name)
1da177e4 1116{
4ec93edb 1117 struct net_device *dev;
1da177e4 1118
72c9528b
ED
1119 rcu_read_lock();
1120 dev = dev_get_by_name_rcu(net, name);
1121 rcu_read_unlock();
1da177e4 1122
a8f80e8f 1123 if (!dev && capable(CAP_NET_ADMIN))
1da177e4
LT
1124 request_module("%s", name);
1125}
d1b19dff 1126EXPORT_SYMBOL(dev_load);
1da177e4 1127
bd380811 1128static int __dev_open(struct net_device *dev)
1da177e4 1129{
d314774c 1130 const struct net_device_ops *ops = dev->netdev_ops;
3b8bcfd5 1131 int ret;
1da177e4 1132
e46b66bc
BH
1133 ASSERT_RTNL();
1134
1da177e4
LT
1135 /*
1136 * Is it even present?
1137 */
1138 if (!netif_device_present(dev))
1139 return -ENODEV;
1140
3b8bcfd5
JB
1141 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1142 ret = notifier_to_errno(ret);
1143 if (ret)
1144 return ret;
1145
1da177e4
LT
1146 /*
1147 * Call device private open method
1148 */
1149 set_bit(__LINK_STATE_START, &dev->state);
bada339b 1150
d314774c
SH
1151 if (ops->ndo_validate_addr)
1152 ret = ops->ndo_validate_addr(dev);
bada339b 1153
d314774c
SH
1154 if (!ret && ops->ndo_open)
1155 ret = ops->ndo_open(dev);
1da177e4 1156
4ec93edb 1157 /*
1da177e4
LT
1158 * If it went open OK then:
1159 */
1160
bada339b
JG
1161 if (ret)
1162 clear_bit(__LINK_STATE_START, &dev->state);
1163 else {
1da177e4
LT
1164 /*
1165 * Set the flags.
1166 */
1167 dev->flags |= IFF_UP;
1168
649274d9
DW
1169 /*
1170 * Enable NET_DMA
1171 */
b4bd07c2 1172 net_dmaengine_get();
649274d9 1173
1da177e4
LT
1174 /*
1175 * Initialize multicasting status
1176 */
4417da66 1177 dev_set_rx_mode(dev);
1da177e4
LT
1178
1179 /*
1180 * Wakeup transmit queue engine
1181 */
1182 dev_activate(dev);
1da177e4 1183 }
bada339b 1184
1da177e4
LT
1185 return ret;
1186}
1187
1188/**
bd380811
PM
1189 * dev_open - prepare an interface for use.
1190 * @dev: device to open
1da177e4 1191 *
bd380811
PM
1192 * Takes a device from down to up state. The device's private open
1193 * function is invoked and then the multicast lists are loaded. Finally
1194 * the device is moved into the up state and a %NETDEV_UP message is
1195 * sent to the netdev notifier chain.
1196 *
1197 * Calling this function on an active interface is a nop. On a failure
1198 * a negative errno code is returned.
1da177e4 1199 */
bd380811
PM
1200int dev_open(struct net_device *dev)
1201{
1202 int ret;
1203
1204 /*
1205 * Is it already up?
1206 */
1207 if (dev->flags & IFF_UP)
1208 return 0;
1209
1210 /*
1211 * Open device
1212 */
1213 ret = __dev_open(dev);
1214 if (ret < 0)
1215 return ret;
1216
1217 /*
1218 * ... and announce new interface.
1219 */
1220 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1221 call_netdevice_notifiers(NETDEV_UP, dev);
1222
1223 return ret;
1224}
1225EXPORT_SYMBOL(dev_open);
1226
1227static int __dev_close(struct net_device *dev)
1da177e4 1228{
d314774c 1229 const struct net_device_ops *ops = dev->netdev_ops;
e46b66bc 1230
bd380811 1231 ASSERT_RTNL();
9d5010db
DM
1232 might_sleep();
1233
1da177e4
LT
1234 /*
1235 * Tell people we are going down, so that they can
1236 * prepare to death, when device is still operating.
1237 */
056925ab 1238 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1da177e4 1239
1da177e4
LT
1240 clear_bit(__LINK_STATE_START, &dev->state);
1241
1242 /* Synchronize to scheduled poll. We cannot touch poll list,
bea3348e
SH
1243 * it can be even on different cpu. So just clear netif_running().
1244 *
1245 * dev->stop() will invoke napi_disable() on all of it's
1246 * napi_struct instances on this device.
1247 */
1da177e4 1248 smp_mb__after_clear_bit(); /* Commit netif_running(). */
1da177e4 1249
d8b2a4d2
ML
1250 dev_deactivate(dev);
1251
1da177e4
LT
1252 /*
1253 * Call the device specific close. This cannot fail.
1254 * Only if device is UP
1255 *
1256 * We allow it to be called even after a DETACH hot-plug
1257 * event.
1258 */
d314774c
SH
1259 if (ops->ndo_stop)
1260 ops->ndo_stop(dev);
1da177e4
LT
1261
1262 /*
1263 * Device is now down.
1264 */
1265
1266 dev->flags &= ~IFF_UP;
1267
1268 /*
bd380811 1269 * Shutdown NET_DMA
1da177e4 1270 */
bd380811
PM
1271 net_dmaengine_put();
1272
1273 return 0;
1274}
1275
1276/**
1277 * dev_close - shutdown an interface.
1278 * @dev: device to shutdown
1279 *
1280 * This function moves an active device into down state. A
1281 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1282 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1283 * chain.
1284 */
1285int dev_close(struct net_device *dev)
1286{
1287 if (!(dev->flags & IFF_UP))
1288 return 0;
1289
1290 __dev_close(dev);
1da177e4 1291
649274d9 1292 /*
bd380811 1293 * Tell people we are down
649274d9 1294 */
bd380811
PM
1295 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1296 call_netdevice_notifiers(NETDEV_DOWN, dev);
649274d9 1297
1da177e4
LT
1298 return 0;
1299}
d1b19dff 1300EXPORT_SYMBOL(dev_close);
1da177e4
LT
1301
1302
0187bdfb
BH
1303/**
1304 * dev_disable_lro - disable Large Receive Offload on a device
1305 * @dev: device
1306 *
1307 * Disable Large Receive Offload (LRO) on a net device. Must be
1308 * called under RTNL. This is needed if received packets may be
1309 * forwarded to another interface.
1310 */
1311void dev_disable_lro(struct net_device *dev)
1312{
1313 if (dev->ethtool_ops && dev->ethtool_ops->get_flags &&
1314 dev->ethtool_ops->set_flags) {
1315 u32 flags = dev->ethtool_ops->get_flags(dev);
1316 if (flags & ETH_FLAG_LRO) {
1317 flags &= ~ETH_FLAG_LRO;
1318 dev->ethtool_ops->set_flags(dev, flags);
1319 }
1320 }
1321 WARN_ON(dev->features & NETIF_F_LRO);
1322}
1323EXPORT_SYMBOL(dev_disable_lro);
1324
1325
881d966b
EB
1326static int dev_boot_phase = 1;
1327
1da177e4
LT
1328/*
1329 * Device change register/unregister. These are not inline or static
1330 * as we export them to the world.
1331 */
1332
1333/**
1334 * register_netdevice_notifier - register a network notifier block
1335 * @nb: notifier
1336 *
1337 * Register a notifier to be called when network device events occur.
1338 * The notifier passed is linked into the kernel structures and must
1339 * not be reused until it has been unregistered. A negative errno code
1340 * is returned on a failure.
1341 *
1342 * When registered all registration and up events are replayed
4ec93edb 1343 * to the new notifier to allow device to have a race free
1da177e4
LT
1344 * view of the network device list.
1345 */
1346
1347int register_netdevice_notifier(struct notifier_block *nb)
1348{
1349 struct net_device *dev;
fcc5a03a 1350 struct net_device *last;
881d966b 1351 struct net *net;
1da177e4
LT
1352 int err;
1353
1354 rtnl_lock();
f07d5b94 1355 err = raw_notifier_chain_register(&netdev_chain, nb);
fcc5a03a
HX
1356 if (err)
1357 goto unlock;
881d966b
EB
1358 if (dev_boot_phase)
1359 goto unlock;
1360 for_each_net(net) {
1361 for_each_netdev(net, dev) {
1362 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1363 err = notifier_to_errno(err);
1364 if (err)
1365 goto rollback;
1366
1367 if (!(dev->flags & IFF_UP))
1368 continue;
1da177e4 1369
881d966b
EB
1370 nb->notifier_call(nb, NETDEV_UP, dev);
1371 }
1da177e4 1372 }
fcc5a03a
HX
1373
1374unlock:
1da177e4
LT
1375 rtnl_unlock();
1376 return err;
fcc5a03a
HX
1377
1378rollback:
1379 last = dev;
881d966b
EB
1380 for_each_net(net) {
1381 for_each_netdev(net, dev) {
1382 if (dev == last)
1383 break;
fcc5a03a 1384
881d966b
EB
1385 if (dev->flags & IFF_UP) {
1386 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1387 nb->notifier_call(nb, NETDEV_DOWN, dev);
1388 }
1389 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
a5ee1551 1390 nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev);
fcc5a03a 1391 }
fcc5a03a 1392 }
c67625a1
PE
1393
1394 raw_notifier_chain_unregister(&netdev_chain, nb);
fcc5a03a 1395 goto unlock;
1da177e4 1396}
d1b19dff 1397EXPORT_SYMBOL(register_netdevice_notifier);
1da177e4
LT
1398
1399/**
1400 * unregister_netdevice_notifier - unregister a network notifier block
1401 * @nb: notifier
1402 *
1403 * Unregister a notifier previously registered by
1404 * register_netdevice_notifier(). The notifier is unlinked into the
1405 * kernel structures and may then be reused. A negative errno code
1406 * is returned on a failure.
1407 */
1408
1409int unregister_netdevice_notifier(struct notifier_block *nb)
1410{
9f514950
HX
1411 int err;
1412
1413 rtnl_lock();
f07d5b94 1414 err = raw_notifier_chain_unregister(&netdev_chain, nb);
9f514950
HX
1415 rtnl_unlock();
1416 return err;
1da177e4 1417}
d1b19dff 1418EXPORT_SYMBOL(unregister_netdevice_notifier);
1da177e4
LT
1419
1420/**
1421 * call_netdevice_notifiers - call all network notifier blocks
1422 * @val: value passed unmodified to notifier function
c4ea43c5 1423 * @dev: net_device pointer passed unmodified to notifier function
1da177e4
LT
1424 *
1425 * Call all network notifier blocks. Parameters and return value
f07d5b94 1426 * are as for raw_notifier_call_chain().
1da177e4
LT
1427 */
1428
ad7379d4 1429int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1da177e4 1430{
ab930471 1431 ASSERT_RTNL();
ad7379d4 1432 return raw_notifier_call_chain(&netdev_chain, val, dev);
1da177e4
LT
1433}
1434
1435/* When > 0 there are consumers of rx skb time stamps */
1436static atomic_t netstamp_needed = ATOMIC_INIT(0);
1437
1438void net_enable_timestamp(void)
1439{
1440 atomic_inc(&netstamp_needed);
1441}
d1b19dff 1442EXPORT_SYMBOL(net_enable_timestamp);
1da177e4
LT
1443
1444void net_disable_timestamp(void)
1445{
1446 atomic_dec(&netstamp_needed);
1447}
d1b19dff 1448EXPORT_SYMBOL(net_disable_timestamp);
1da177e4 1449
3b098e2d 1450static inline void net_timestamp_set(struct sk_buff *skb)
1da177e4
LT
1451{
1452 if (atomic_read(&netstamp_needed))
a61bbcf2 1453 __net_timestamp(skb);
b7aa0bf7
ED
1454 else
1455 skb->tstamp.tv64 = 0;
1da177e4
LT
1456}
1457
3b098e2d
ED
1458static inline void net_timestamp_check(struct sk_buff *skb)
1459{
1460 if (!skb->tstamp.tv64 && atomic_read(&netstamp_needed))
1461 __net_timestamp(skb);
1462}
1463
44540960
AB
1464/**
1465 * dev_forward_skb - loopback an skb to another netif
1466 *
1467 * @dev: destination network device
1468 * @skb: buffer to forward
1469 *
1470 * return values:
1471 * NET_RX_SUCCESS (no congestion)
6ec82562 1472 * NET_RX_DROP (packet was dropped, but freed)
44540960
AB
1473 *
1474 * dev_forward_skb can be used for injecting an skb from the
1475 * start_xmit function of one device into the receive queue
1476 * of another device.
1477 *
1478 * The receiving device may be in another namespace, so
1479 * we have to clear all information in the skb that could
1480 * impact namespace isolation.
1481 */
1482int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1483{
1484 skb_orphan(skb);
c736eefa 1485 nf_reset(skb);
44540960 1486
6ec82562
ED
1487 if (!(dev->flags & IFF_UP) ||
1488 (skb->len > (dev->mtu + dev->hard_header_len))) {
1489 kfree_skb(skb);
44540960 1490 return NET_RX_DROP;
6ec82562 1491 }
8a83a00b 1492 skb_set_dev(skb, dev);
44540960
AB
1493 skb->tstamp.tv64 = 0;
1494 skb->pkt_type = PACKET_HOST;
1495 skb->protocol = eth_type_trans(skb, dev);
44540960
AB
1496 return netif_rx(skb);
1497}
1498EXPORT_SYMBOL_GPL(dev_forward_skb);
1499
1da177e4
LT
1500/*
1501 * Support routine. Sends outgoing frames to any network
1502 * taps currently in use.
1503 */
1504
f6a78bfc 1505static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1da177e4
LT
1506{
1507 struct packet_type *ptype;
a61bbcf2 1508
8caf1539
JP
1509#ifdef CONFIG_NET_CLS_ACT
1510 if (!(skb->tstamp.tv64 && (G_TC_FROM(skb->tc_verd) & AT_INGRESS)))
3b098e2d 1511 net_timestamp_set(skb);
8caf1539 1512#else
3b098e2d 1513 net_timestamp_set(skb);
8caf1539 1514#endif
1da177e4
LT
1515
1516 rcu_read_lock();
1517 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1518 /* Never send packets back to the socket
1519 * they originated from - MvS (miquels@drinkel.ow.org)
1520 */
1521 if ((ptype->dev == dev || !ptype->dev) &&
1522 (ptype->af_packet_priv == NULL ||
1523 (struct sock *)ptype->af_packet_priv != skb->sk)) {
d1b19dff 1524 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1da177e4
LT
1525 if (!skb2)
1526 break;
1527
1528 /* skb->nh should be correctly
1529 set by sender, so that the second statement is
1530 just protection against buggy protocols.
1531 */
459a98ed 1532 skb_reset_mac_header(skb2);
1da177e4 1533
d56f90a7 1534 if (skb_network_header(skb2) < skb2->data ||
27a884dc 1535 skb2->network_header > skb2->tail) {
1da177e4
LT
1536 if (net_ratelimit())
1537 printk(KERN_CRIT "protocol %04x is "
1538 "buggy, dev %s\n",
70777d03
SAS
1539 ntohs(skb2->protocol),
1540 dev->name);
c1d2bbe1 1541 skb_reset_network_header(skb2);
1da177e4
LT
1542 }
1543
b0e380b1 1544 skb2->transport_header = skb2->network_header;
1da177e4 1545 skb2->pkt_type = PACKET_OUTGOING;
f2ccd8fa 1546 ptype->func(skb2, skb->dev, ptype, skb->dev);
1da177e4
LT
1547 }
1548 }
1549 rcu_read_unlock();
1550}
1551
f0796d5c
JF
1552/*
1553 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
1554 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
1555 */
1556void netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
1557{
1558 unsigned int real_num = dev->real_num_tx_queues;
1559
1560 if (unlikely(txq > dev->num_tx_queues))
1561 ;
1562 else if (txq > real_num)
1563 dev->real_num_tx_queues = txq;
1564 else if (txq < real_num) {
1565 dev->real_num_tx_queues = txq;
1566 qdisc_reset_all_tx_gt(dev, txq);
1567 }
1568}
1569EXPORT_SYMBOL(netif_set_real_num_tx_queues);
56079431 1570
def82a1d 1571static inline void __netif_reschedule(struct Qdisc *q)
56079431 1572{
def82a1d
JP
1573 struct softnet_data *sd;
1574 unsigned long flags;
56079431 1575
def82a1d
JP
1576 local_irq_save(flags);
1577 sd = &__get_cpu_var(softnet_data);
a9cbd588
CG
1578 q->next_sched = NULL;
1579 *sd->output_queue_tailp = q;
1580 sd->output_queue_tailp = &q->next_sched;
def82a1d
JP
1581 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1582 local_irq_restore(flags);
1583}
1584
1585void __netif_schedule(struct Qdisc *q)
1586{
1587 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
1588 __netif_reschedule(q);
56079431
DV
1589}
1590EXPORT_SYMBOL(__netif_schedule);
1591
bea3348e 1592void dev_kfree_skb_irq(struct sk_buff *skb)
56079431 1593{
3578b0c8 1594 if (atomic_dec_and_test(&skb->users)) {
bea3348e
SH
1595 struct softnet_data *sd;
1596 unsigned long flags;
56079431 1597
bea3348e
SH
1598 local_irq_save(flags);
1599 sd = &__get_cpu_var(softnet_data);
1600 skb->next = sd->completion_queue;
1601 sd->completion_queue = skb;
1602 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1603 local_irq_restore(flags);
1604 }
56079431 1605}
bea3348e 1606EXPORT_SYMBOL(dev_kfree_skb_irq);
56079431
DV
1607
1608void dev_kfree_skb_any(struct sk_buff *skb)
1609{
1610 if (in_irq() || irqs_disabled())
1611 dev_kfree_skb_irq(skb);
1612 else
1613 dev_kfree_skb(skb);
1614}
1615EXPORT_SYMBOL(dev_kfree_skb_any);
1616
1617
bea3348e
SH
1618/**
1619 * netif_device_detach - mark device as removed
1620 * @dev: network device
1621 *
1622 * Mark device as removed from system and therefore no longer available.
1623 */
56079431
DV
1624void netif_device_detach(struct net_device *dev)
1625{
1626 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1627 netif_running(dev)) {
d543103a 1628 netif_tx_stop_all_queues(dev);
56079431
DV
1629 }
1630}
1631EXPORT_SYMBOL(netif_device_detach);
1632
bea3348e
SH
1633/**
1634 * netif_device_attach - mark device as attached
1635 * @dev: network device
1636 *
1637 * Mark device as attached from system and restart if needed.
1638 */
56079431
DV
1639void netif_device_attach(struct net_device *dev)
1640{
1641 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1642 netif_running(dev)) {
d543103a 1643 netif_tx_wake_all_queues(dev);
4ec93edb 1644 __netdev_watchdog_up(dev);
56079431
DV
1645 }
1646}
1647EXPORT_SYMBOL(netif_device_attach);
1648
6de329e2
BH
1649static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1650{
1651 return ((features & NETIF_F_GEN_CSUM) ||
1652 ((features & NETIF_F_IP_CSUM) &&
1653 protocol == htons(ETH_P_IP)) ||
1654 ((features & NETIF_F_IPV6_CSUM) &&
1c8dbcf6
YZ
1655 protocol == htons(ETH_P_IPV6)) ||
1656 ((features & NETIF_F_FCOE_CRC) &&
1657 protocol == htons(ETH_P_FCOE)));
6de329e2
BH
1658}
1659
1660static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
1661{
1662 if (can_checksum_protocol(dev->features, skb->protocol))
1663 return true;
1664
1665 if (skb->protocol == htons(ETH_P_8021Q)) {
1666 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1667 if (can_checksum_protocol(dev->features & dev->vlan_features,
1668 veh->h_vlan_encapsulated_proto))
1669 return true;
1670 }
1671
1672 return false;
1673}
56079431 1674
8a83a00b
AB
1675/**
1676 * skb_dev_set -- assign a new device to a buffer
1677 * @skb: buffer for the new device
1678 * @dev: network device
1679 *
1680 * If an skb is owned by a device already, we have to reset
1681 * all data private to the namespace a device belongs to
1682 * before assigning it a new device.
1683 */
1684#ifdef CONFIG_NET_NS
1685void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
1686{
1687 skb_dst_drop(skb);
1688 if (skb->dev && !net_eq(dev_net(skb->dev), dev_net(dev))) {
1689 secpath_reset(skb);
1690 nf_reset(skb);
1691 skb_init_secmark(skb);
1692 skb->mark = 0;
1693 skb->priority = 0;
1694 skb->nf_trace = 0;
1695 skb->ipvs_property = 0;
1696#ifdef CONFIG_NET_SCHED
1697 skb->tc_index = 0;
1698#endif
1699 }
1700 skb->dev = dev;
1701}
1702EXPORT_SYMBOL(skb_set_dev);
1703#endif /* CONFIG_NET_NS */
1704
1da177e4
LT
1705/*
1706 * Invalidate hardware checksum when packet is to be mangled, and
1707 * complete checksum manually on outgoing path.
1708 */
84fa7933 1709int skb_checksum_help(struct sk_buff *skb)
1da177e4 1710{
d3bc23e7 1711 __wsum csum;
663ead3b 1712 int ret = 0, offset;
1da177e4 1713
84fa7933 1714 if (skb->ip_summed == CHECKSUM_COMPLETE)
a430a43d
HX
1715 goto out_set_summed;
1716
1717 if (unlikely(skb_shinfo(skb)->gso_size)) {
a430a43d
HX
1718 /* Let GSO fix up the checksum. */
1719 goto out_set_summed;
1da177e4
LT
1720 }
1721
a030847e
HX
1722 offset = skb->csum_start - skb_headroom(skb);
1723 BUG_ON(offset >= skb_headlen(skb));
1724 csum = skb_checksum(skb, offset, skb->len - offset, 0);
1725
1726 offset += skb->csum_offset;
1727 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
1728
1729 if (skb_cloned(skb) &&
1730 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
1da177e4
LT
1731 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1732 if (ret)
1733 goto out;
1734 }
1735
a030847e 1736 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
a430a43d 1737out_set_summed:
1da177e4 1738 skb->ip_summed = CHECKSUM_NONE;
4ec93edb 1739out:
1da177e4
LT
1740 return ret;
1741}
d1b19dff 1742EXPORT_SYMBOL(skb_checksum_help);
1da177e4 1743
f6a78bfc
HX
1744/**
1745 * skb_gso_segment - Perform segmentation on skb.
1746 * @skb: buffer to segment
576a30eb 1747 * @features: features for the output path (see dev->features)
f6a78bfc
HX
1748 *
1749 * This function segments the given skb and returns a list of segments.
576a30eb
HX
1750 *
1751 * It may return NULL if the skb requires no segmentation. This is
1752 * only possible when GSO is used for verifying header integrity.
f6a78bfc 1753 */
576a30eb 1754struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
f6a78bfc
HX
1755{
1756 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1757 struct packet_type *ptype;
252e3346 1758 __be16 type = skb->protocol;
a430a43d 1759 int err;
f6a78bfc 1760
459a98ed 1761 skb_reset_mac_header(skb);
b0e380b1 1762 skb->mac_len = skb->network_header - skb->mac_header;
f6a78bfc
HX
1763 __skb_pull(skb, skb->mac_len);
1764
67fd1a73
HX
1765 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1766 struct net_device *dev = skb->dev;
1767 struct ethtool_drvinfo info = {};
1768
1769 if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
1770 dev->ethtool_ops->get_drvinfo(dev, &info);
1771
1772 WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d "
1773 "ip_summed=%d",
1774 info.driver, dev ? dev->features : 0L,
1775 skb->sk ? skb->sk->sk_route_caps : 0L,
1776 skb->len, skb->data_len, skb->ip_summed);
1777
a430a43d
HX
1778 if (skb_header_cloned(skb) &&
1779 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1780 return ERR_PTR(err);
1781 }
1782
f6a78bfc 1783 rcu_read_lock();
82d8a867
PE
1784 list_for_each_entry_rcu(ptype,
1785 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
f6a78bfc 1786 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
84fa7933 1787 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
a430a43d
HX
1788 err = ptype->gso_send_check(skb);
1789 segs = ERR_PTR(err);
1790 if (err || skb_gso_ok(skb, features))
1791 break;
d56f90a7
ACM
1792 __skb_push(skb, (skb->data -
1793 skb_network_header(skb)));
a430a43d 1794 }
576a30eb 1795 segs = ptype->gso_segment(skb, features);
f6a78bfc
HX
1796 break;
1797 }
1798 }
1799 rcu_read_unlock();
1800
98e399f8 1801 __skb_push(skb, skb->data - skb_mac_header(skb));
576a30eb 1802
f6a78bfc
HX
1803 return segs;
1804}
f6a78bfc
HX
1805EXPORT_SYMBOL(skb_gso_segment);
1806
fb286bb2
HX
1807/* Take action when hardware reception checksum errors are detected. */
1808#ifdef CONFIG_BUG
1809void netdev_rx_csum_fault(struct net_device *dev)
1810{
1811 if (net_ratelimit()) {
4ec93edb 1812 printk(KERN_ERR "%s: hw csum failure.\n",
246a4212 1813 dev ? dev->name : "<unknown>");
fb286bb2
HX
1814 dump_stack();
1815 }
1816}
1817EXPORT_SYMBOL(netdev_rx_csum_fault);
1818#endif
1819
1da177e4
LT
1820/* Actually, we should eliminate this check as soon as we know, that:
1821 * 1. IOMMU is present and allows to map all the memory.
1822 * 2. No high memory really exists on this machine.
1823 */
1824
9092c658 1825static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1da177e4 1826{
3d3a8533 1827#ifdef CONFIG_HIGHMEM
1da177e4 1828 int i;
5acbbd42
FT
1829 if (!(dev->features & NETIF_F_HIGHDMA)) {
1830 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1831 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1832 return 1;
1833 }
1da177e4 1834
5acbbd42
FT
1835 if (PCI_DMA_BUS_IS_PHYS) {
1836 struct device *pdev = dev->dev.parent;
1da177e4 1837
9092c658
ED
1838 if (!pdev)
1839 return 0;
5acbbd42
FT
1840 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1841 dma_addr_t addr = page_to_phys(skb_shinfo(skb)->frags[i].page);
1842 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
1843 return 1;
1844 }
1845 }
3d3a8533 1846#endif
1da177e4
LT
1847 return 0;
1848}
1da177e4 1849
f6a78bfc
HX
1850struct dev_gso_cb {
1851 void (*destructor)(struct sk_buff *skb);
1852};
1853
1854#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
1855
1856static void dev_gso_skb_destructor(struct sk_buff *skb)
1857{
1858 struct dev_gso_cb *cb;
1859
1860 do {
1861 struct sk_buff *nskb = skb->next;
1862
1863 skb->next = nskb->next;
1864 nskb->next = NULL;
1865 kfree_skb(nskb);
1866 } while (skb->next);
1867
1868 cb = DEV_GSO_CB(skb);
1869 if (cb->destructor)
1870 cb->destructor(skb);
1871}
1872
1873/**
1874 * dev_gso_segment - Perform emulated hardware segmentation on skb.
1875 * @skb: buffer to segment
1876 *
1877 * This function segments the given skb and stores the list of segments
1878 * in skb->next.
1879 */
1880static int dev_gso_segment(struct sk_buff *skb)
1881{
1882 struct net_device *dev = skb->dev;
1883 struct sk_buff *segs;
576a30eb
HX
1884 int features = dev->features & ~(illegal_highdma(dev, skb) ?
1885 NETIF_F_SG : 0);
1886
1887 segs = skb_gso_segment(skb, features);
1888
1889 /* Verifying header integrity only. */
1890 if (!segs)
1891 return 0;
f6a78bfc 1892
801678c5 1893 if (IS_ERR(segs))
f6a78bfc
HX
1894 return PTR_ERR(segs);
1895
1896 skb->next = segs;
1897 DEV_GSO_CB(skb)->destructor = skb->destructor;
1898 skb->destructor = dev_gso_skb_destructor;
1899
1900 return 0;
1901}
1902
fc6055a5
ED
1903/*
1904 * Try to orphan skb early, right before transmission by the device.
2244d07b
OH
1905 * We cannot orphan skb if tx timestamp is requested or the sk-reference
1906 * is needed on driver level for other reasons, e.g. see net/can/raw.c
fc6055a5
ED
1907 */
1908static inline void skb_orphan_try(struct sk_buff *skb)
1909{
87fd308c
ED
1910 struct sock *sk = skb->sk;
1911
2244d07b 1912 if (sk && !skb_shinfo(skb)->tx_flags) {
87fd308c
ED
1913 /* skb_tx_hash() wont be able to get sk.
1914 * We copy sk_hash into skb->rxhash
1915 */
1916 if (!skb->rxhash)
1917 skb->rxhash = sk->sk_hash;
fc6055a5 1918 skb_orphan(skb);
87fd308c 1919 }
fc6055a5
ED
1920}
1921
6afff0ca
JF
1922/*
1923 * Returns true if either:
1924 * 1. skb has frag_list and the device doesn't support FRAGLIST, or
1925 * 2. skb is fragmented and the device does not support SG, or if
1926 * at least one of fragments is in highmem and device does not
1927 * support DMA from it.
1928 */
1929static inline int skb_needs_linearize(struct sk_buff *skb,
1930 struct net_device *dev)
1931{
1932 return skb_is_nonlinear(skb) &&
21dc3301 1933 ((skb_has_frag_list(skb) && !(dev->features & NETIF_F_FRAGLIST)) ||
6afff0ca
JF
1934 (skb_shinfo(skb)->nr_frags && (!(dev->features & NETIF_F_SG) ||
1935 illegal_highdma(dev, skb))));
1936}
1937
fd2ea0a7
DM
1938int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1939 struct netdev_queue *txq)
f6a78bfc 1940{
00829823 1941 const struct net_device_ops *ops = dev->netdev_ops;
572a9d7b 1942 int rc = NETDEV_TX_OK;
00829823 1943
f6a78bfc 1944 if (likely(!skb->next)) {
9be9a6b9 1945 if (!list_empty(&ptype_all))
f6a78bfc
HX
1946 dev_queue_xmit_nit(skb, dev);
1947
93f154b5
ED
1948 /*
1949 * If device doesnt need skb->dst, release it right now while
1950 * its hot in this cpu cache
1951 */
adf30907
ED
1952 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
1953 skb_dst_drop(skb);
1954
fc6055a5 1955 skb_orphan_try(skb);
9ccb8975
DM
1956
1957 if (netif_needs_gso(dev, skb)) {
1958 if (unlikely(dev_gso_segment(skb)))
1959 goto out_kfree_skb;
1960 if (skb->next)
1961 goto gso;
6afff0ca
JF
1962 } else {
1963 if (skb_needs_linearize(skb, dev) &&
1964 __skb_linearize(skb))
1965 goto out_kfree_skb;
1966
1967 /* If packet is not checksummed and device does not
1968 * support checksumming for this protocol, complete
1969 * checksumming here.
1970 */
1971 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1972 skb_set_transport_header(skb, skb->csum_start -
1973 skb_headroom(skb));
1974 if (!dev_can_checksum(dev, skb) &&
1975 skb_checksum_help(skb))
1976 goto out_kfree_skb;
1977 }
9ccb8975
DM
1978 }
1979
ac45f602 1980 rc = ops->ndo_start_xmit(skb, dev);
ec634fe3 1981 if (rc == NETDEV_TX_OK)
08baf561 1982 txq_trans_update(txq);
ac45f602 1983 return rc;
f6a78bfc
HX
1984 }
1985
576a30eb 1986gso:
f6a78bfc
HX
1987 do {
1988 struct sk_buff *nskb = skb->next;
f6a78bfc
HX
1989
1990 skb->next = nskb->next;
1991 nskb->next = NULL;
068a2de5
KK
1992
1993 /*
1994 * If device doesnt need nskb->dst, release it right now while
1995 * its hot in this cpu cache
1996 */
1997 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
1998 skb_dst_drop(nskb);
1999
00829823 2000 rc = ops->ndo_start_xmit(nskb, dev);
ec634fe3 2001 if (unlikely(rc != NETDEV_TX_OK)) {
572a9d7b
PM
2002 if (rc & ~NETDEV_TX_MASK)
2003 goto out_kfree_gso_skb;
f54d9e8d 2004 nskb->next = skb->next;
f6a78bfc
HX
2005 skb->next = nskb;
2006 return rc;
2007 }
08baf561 2008 txq_trans_update(txq);
fd2ea0a7 2009 if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
f54d9e8d 2010 return NETDEV_TX_BUSY;
f6a78bfc 2011 } while (skb->next);
4ec93edb 2012
572a9d7b
PM
2013out_kfree_gso_skb:
2014 if (likely(skb->next == NULL))
2015 skb->destructor = DEV_GSO_CB(skb)->destructor;
f6a78bfc
HX
2016out_kfree_skb:
2017 kfree_skb(skb);
572a9d7b 2018 return rc;
f6a78bfc
HX
2019}
2020
0a9627f2 2021static u32 hashrnd __read_mostly;
b6b2fed1 2022
9247744e 2023u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
8f0f2223 2024{
7019298a 2025 u32 hash;
b6b2fed1 2026
513de11b
DM
2027 if (skb_rx_queue_recorded(skb)) {
2028 hash = skb_get_rx_queue(skb);
d1b19dff 2029 while (unlikely(hash >= dev->real_num_tx_queues))
513de11b
DM
2030 hash -= dev->real_num_tx_queues;
2031 return hash;
2032 }
ec581f6a
ED
2033
2034 if (skb->sk && skb->sk->sk_hash)
7019298a 2035 hash = skb->sk->sk_hash;
ec581f6a 2036 else
87fd308c 2037 hash = (__force u16) skb->protocol ^ skb->rxhash;
0a9627f2 2038 hash = jhash_1word(hash, hashrnd);
b6b2fed1
DM
2039
2040 return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
8f0f2223 2041}
9247744e 2042EXPORT_SYMBOL(skb_tx_hash);
8f0f2223 2043
ed04642f
ED
2044static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
2045{
2046 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
2047 if (net_ratelimit()) {
7a161ea9
ED
2048 pr_warning("%s selects TX queue %d, but "
2049 "real number of TX queues is %d\n",
2050 dev->name, queue_index, dev->real_num_tx_queues);
ed04642f
ED
2051 }
2052 return 0;
2053 }
2054 return queue_index;
2055}
2056
e8a0464c
DM
2057static struct netdev_queue *dev_pick_tx(struct net_device *dev,
2058 struct sk_buff *skb)
2059{
b0f77d0e 2060 int queue_index;
a4ee3ce3
KK
2061 struct sock *sk = skb->sk;
2062
b0f77d0e
TH
2063 queue_index = sk_tx_queue_get(sk);
2064 if (queue_index < 0) {
a4ee3ce3
KK
2065 const struct net_device_ops *ops = dev->netdev_ops;
2066
2067 if (ops->ndo_select_queue) {
2068 queue_index = ops->ndo_select_queue(dev, skb);
ed04642f 2069 queue_index = dev_cap_txqueue(dev, queue_index);
a4ee3ce3
KK
2070 } else {
2071 queue_index = 0;
2072 if (dev->real_num_tx_queues > 1)
2073 queue_index = skb_tx_hash(dev, skb);
fd2ea0a7 2074
8728c544 2075 if (sk) {
87eb3670 2076 struct dst_entry *dst = rcu_dereference_check(sk->sk_dst_cache, 1);
8728c544
ED
2077
2078 if (dst && skb_dst(skb) == dst)
2079 sk_tx_queue_set(sk, queue_index);
2080 }
a4ee3ce3
KK
2081 }
2082 }
eae792b7 2083
fd2ea0a7
DM
2084 skb_set_queue_mapping(skb, queue_index);
2085 return netdev_get_tx_queue(dev, queue_index);
e8a0464c
DM
2086}
2087
bbd8a0d3
KK
2088static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2089 struct net_device *dev,
2090 struct netdev_queue *txq)
2091{
2092 spinlock_t *root_lock = qdisc_lock(q);
79640a4c 2093 bool contended = qdisc_is_running(q);
bbd8a0d3
KK
2094 int rc;
2095
79640a4c
ED
2096 /*
2097 * Heuristic to force contended enqueues to serialize on a
2098 * separate lock before trying to get qdisc main lock.
2099 * This permits __QDISC_STATE_RUNNING owner to get the lock more often
2100 * and dequeue packets faster.
2101 */
2102 if (unlikely(contended))
2103 spin_lock(&q->busylock);
2104
bbd8a0d3
KK
2105 spin_lock(root_lock);
2106 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
2107 kfree_skb(skb);
2108 rc = NET_XMIT_DROP;
2109 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
bc135b23 2110 qdisc_run_begin(q)) {
bbd8a0d3
KK
2111 /*
2112 * This is a work-conserving queue; there are no old skbs
2113 * waiting to be sent out; and the qdisc is not running -
2114 * xmit the skb directly.
2115 */
7fee226a
ED
2116 if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
2117 skb_dst_force(skb);
bbd8a0d3 2118 __qdisc_update_bstats(q, skb->len);
79640a4c
ED
2119 if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
2120 if (unlikely(contended)) {
2121 spin_unlock(&q->busylock);
2122 contended = false;
2123 }
bbd8a0d3 2124 __qdisc_run(q);
79640a4c 2125 } else
bc135b23 2126 qdisc_run_end(q);
bbd8a0d3
KK
2127
2128 rc = NET_XMIT_SUCCESS;
2129 } else {
7fee226a 2130 skb_dst_force(skb);
bbd8a0d3 2131 rc = qdisc_enqueue_root(skb, q);
79640a4c
ED
2132 if (qdisc_run_begin(q)) {
2133 if (unlikely(contended)) {
2134 spin_unlock(&q->busylock);
2135 contended = false;
2136 }
2137 __qdisc_run(q);
2138 }
bbd8a0d3
KK
2139 }
2140 spin_unlock(root_lock);
79640a4c
ED
2141 if (unlikely(contended))
2142 spin_unlock(&q->busylock);
bbd8a0d3
KK
2143 return rc;
2144}
2145
d29f749e
DJ
2146/**
2147 * dev_queue_xmit - transmit a buffer
2148 * @skb: buffer to transmit
2149 *
2150 * Queue a buffer for transmission to a network device. The caller must
2151 * have set the device and priority and built the buffer before calling
2152 * this function. The function can be called from an interrupt.
2153 *
2154 * A negative errno code is returned on a failure. A success does not
2155 * guarantee the frame will be transmitted as it may be dropped due
2156 * to congestion or traffic shaping.
2157 *
2158 * -----------------------------------------------------------------------------------
2159 * I notice this method can also return errors from the queue disciplines,
2160 * including NET_XMIT_DROP, which is a positive value. So, errors can also
2161 * be positive.
2162 *
2163 * Regardless of the return value, the skb is consumed, so it is currently
2164 * difficult to retry a send to this method. (You can bump the ref count
2165 * before sending to hold a reference for retry if you are careful.)
2166 *
2167 * When calling this method, interrupts MUST be enabled. This is because
2168 * the BH enable code must have IRQs enabled so that it will not deadlock.
2169 * --BLG
2170 */
1da177e4
LT
2171int dev_queue_xmit(struct sk_buff *skb)
2172{
2173 struct net_device *dev = skb->dev;
dc2b4847 2174 struct netdev_queue *txq;
1da177e4
LT
2175 struct Qdisc *q;
2176 int rc = -ENOMEM;
2177
4ec93edb
YH
2178 /* Disable soft irqs for various locks below. Also
2179 * stops preemption for RCU.
1da177e4 2180 */
4ec93edb 2181 rcu_read_lock_bh();
1da177e4 2182
eae792b7 2183 txq = dev_pick_tx(dev, skb);
a898def2 2184 q = rcu_dereference_bh(txq->qdisc);
37437bb2 2185
1da177e4 2186#ifdef CONFIG_NET_CLS_ACT
d1b19dff 2187 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
1da177e4
LT
2188#endif
2189 if (q->enqueue) {
bbd8a0d3 2190 rc = __dev_xmit_skb(skb, q, dev, txq);
37437bb2 2191 goto out;
1da177e4
LT
2192 }
2193
2194 /* The device has no queue. Common case for software devices:
2195 loopback, all the sorts of tunnels...
2196
932ff279
HX
2197 Really, it is unlikely that netif_tx_lock protection is necessary
2198 here. (f.e. loopback and IP tunnels are clean ignoring statistics
1da177e4
LT
2199 counters.)
2200 However, it is possible, that they rely on protection
2201 made by us here.
2202
2203 Check this and shot the lock. It is not prone from deadlocks.
2204 Either shot noqueue qdisc, it is even simpler 8)
2205 */
2206 if (dev->flags & IFF_UP) {
2207 int cpu = smp_processor_id(); /* ok because BHs are off */
2208
c773e847 2209 if (txq->xmit_lock_owner != cpu) {
1da177e4 2210
c773e847 2211 HARD_TX_LOCK(dev, txq, cpu);
1da177e4 2212
fd2ea0a7 2213 if (!netif_tx_queue_stopped(txq)) {
572a9d7b
PM
2214 rc = dev_hard_start_xmit(skb, dev, txq);
2215 if (dev_xmit_complete(rc)) {
c773e847 2216 HARD_TX_UNLOCK(dev, txq);
1da177e4
LT
2217 goto out;
2218 }
2219 }
c773e847 2220 HARD_TX_UNLOCK(dev, txq);
1da177e4
LT
2221 if (net_ratelimit())
2222 printk(KERN_CRIT "Virtual device %s asks to "
2223 "queue packet!\n", dev->name);
2224 } else {
2225 /* Recursion is detected! It is possible,
2226 * unfortunately */
2227 if (net_ratelimit())
2228 printk(KERN_CRIT "Dead loop on virtual device "
2229 "%s, fix it urgently!\n", dev->name);
2230 }
2231 }
2232
2233 rc = -ENETDOWN;
d4828d85 2234 rcu_read_unlock_bh();
1da177e4 2235
1da177e4
LT
2236 kfree_skb(skb);
2237 return rc;
2238out:
d4828d85 2239 rcu_read_unlock_bh();
1da177e4
LT
2240 return rc;
2241}
d1b19dff 2242EXPORT_SYMBOL(dev_queue_xmit);
1da177e4
LT
2243
2244
2245/*=======================================================================
2246 Receiver routines
2247 =======================================================================*/
2248
6b2bedc3 2249int netdev_max_backlog __read_mostly = 1000;
3b098e2d 2250int netdev_tstamp_prequeue __read_mostly = 1;
6b2bedc3
SH
2251int netdev_budget __read_mostly = 300;
2252int weight_p __read_mostly = 64; /* old backlog weight */
1da177e4 2253
eecfd7c4
ED
2254/* Called with irq disabled */
2255static inline void ____napi_schedule(struct softnet_data *sd,
2256 struct napi_struct *napi)
2257{
2258 list_add_tail(&napi->poll_list, &sd->poll_list);
2259 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2260}
2261
0a9627f2 2262/*
bfb564e7
KK
2263 * __skb_get_rxhash: calculate a flow hash based on src/dst addresses
2264 * and src/dst port numbers. Returns a non-zero hash number on success
2265 * and 0 on failure.
0a9627f2 2266 */
bfb564e7 2267__u32 __skb_get_rxhash(struct sk_buff *skb)
0a9627f2 2268{
12fcdefb 2269 int nhoff, hash = 0, poff;
0a9627f2
TH
2270 struct ipv6hdr *ip6;
2271 struct iphdr *ip;
0a9627f2 2272 u8 ip_proto;
8c52d509
CG
2273 u32 addr1, addr2, ihl;
2274 union {
2275 u32 v32;
2276 u16 v16[2];
2277 } ports;
0a9627f2 2278
bfb564e7 2279 nhoff = skb_network_offset(skb);
0a9627f2
TH
2280
2281 switch (skb->protocol) {
2282 case __constant_htons(ETH_P_IP):
bfb564e7 2283 if (!pskb_may_pull(skb, sizeof(*ip) + nhoff))
0a9627f2
TH
2284 goto done;
2285
1003489e 2286 ip = (struct iphdr *) (skb->data + nhoff);
dbe5775b
CG
2287 if (ip->frag_off & htons(IP_MF | IP_OFFSET))
2288 ip_proto = 0;
2289 else
2290 ip_proto = ip->protocol;
b249dcb8
ED
2291 addr1 = (__force u32) ip->saddr;
2292 addr2 = (__force u32) ip->daddr;
0a9627f2
TH
2293 ihl = ip->ihl;
2294 break;
2295 case __constant_htons(ETH_P_IPV6):
bfb564e7 2296 if (!pskb_may_pull(skb, sizeof(*ip6) + nhoff))
0a9627f2
TH
2297 goto done;
2298
1003489e 2299 ip6 = (struct ipv6hdr *) (skb->data + nhoff);
0a9627f2 2300 ip_proto = ip6->nexthdr;
b249dcb8
ED
2301 addr1 = (__force u32) ip6->saddr.s6_addr32[3];
2302 addr2 = (__force u32) ip6->daddr.s6_addr32[3];
0a9627f2
TH
2303 ihl = (40 >> 2);
2304 break;
2305 default:
2306 goto done;
2307 }
bfb564e7 2308
12fcdefb
CG
2309 ports.v32 = 0;
2310 poff = proto_ports_offset(ip_proto);
2311 if (poff >= 0) {
2312 nhoff += ihl * 4 + poff;
2313 if (pskb_may_pull(skb, nhoff + 4)) {
2314 ports.v32 = * (__force u32 *) (skb->data + nhoff);
8c52d509
CG
2315 if (ports.v16[1] < ports.v16[0])
2316 swap(ports.v16[0], ports.v16[1]);
b249dcb8 2317 }
0a9627f2
TH
2318 }
2319
b249dcb8
ED
2320 /* get a consistent hash (same value on both flow directions) */
2321 if (addr2 < addr1)
2322 swap(addr1, addr2);
0a9627f2 2323
bfb564e7
KK
2324 hash = jhash_3words(addr1, addr2, ports.v32, hashrnd);
2325 if (!hash)
2326 hash = 1;
2327
2328done:
2329 return hash;
2330}
2331EXPORT_SYMBOL(__skb_get_rxhash);
2332
2333#ifdef CONFIG_RPS
2334
2335/* One global table that all flow-based protocols share. */
2336struct rps_sock_flow_table *rps_sock_flow_table __read_mostly;
2337EXPORT_SYMBOL(rps_sock_flow_table);
2338
2339/*
2340 * get_rps_cpu is called from netif_receive_skb and returns the target
2341 * CPU from the RPS map of the receiving queue for a given skb.
2342 * rcu_read_lock must be held on entry.
2343 */
2344static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2345 struct rps_dev_flow **rflowp)
2346{
2347 struct netdev_rx_queue *rxqueue;
2348 struct rps_map *map;
2349 struct rps_dev_flow_table *flow_table;
2350 struct rps_sock_flow_table *sock_flow_table;
2351 int cpu = -1;
2352 u16 tcpu;
2353
2354 if (skb_rx_queue_recorded(skb)) {
2355 u16 index = skb_get_rx_queue(skb);
2356 if (unlikely(index >= dev->num_rx_queues)) {
2357 WARN_ONCE(dev->num_rx_queues > 1, "%s received packet "
2358 "on queue %u, but number of RX queues is %u\n",
2359 dev->name, index, dev->num_rx_queues);
2360 goto done;
2361 }
2362 rxqueue = dev->_rx + index;
2363 } else
2364 rxqueue = dev->_rx;
2365
2366 if (!rxqueue->rps_map && !rxqueue->rps_flow_table)
2367 goto done;
2368
2d47b459 2369 skb_reset_network_header(skb);
bfb564e7
KK
2370 if (!skb_get_rxhash(skb))
2371 goto done;
2372
fec5e652
TH
2373 flow_table = rcu_dereference(rxqueue->rps_flow_table);
2374 sock_flow_table = rcu_dereference(rps_sock_flow_table);
2375 if (flow_table && sock_flow_table) {
2376 u16 next_cpu;
2377 struct rps_dev_flow *rflow;
2378
2379 rflow = &flow_table->flows[skb->rxhash & flow_table->mask];
2380 tcpu = rflow->cpu;
2381
2382 next_cpu = sock_flow_table->ents[skb->rxhash &
2383 sock_flow_table->mask];
2384
2385 /*
2386 * If the desired CPU (where last recvmsg was done) is
2387 * different from current CPU (one in the rx-queue flow
2388 * table entry), switch if one of the following holds:
2389 * - Current CPU is unset (equal to RPS_NO_CPU).
2390 * - Current CPU is offline.
2391 * - The current CPU's queue tail has advanced beyond the
2392 * last packet that was enqueued using this table entry.
2393 * This guarantees that all previous packets for the flow
2394 * have been dequeued, thus preserving in order delivery.
2395 */
2396 if (unlikely(tcpu != next_cpu) &&
2397 (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
2398 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
2399 rflow->last_qtail)) >= 0)) {
2400 tcpu = rflow->cpu = next_cpu;
2401 if (tcpu != RPS_NO_CPU)
2402 rflow->last_qtail = per_cpu(softnet_data,
2403 tcpu).input_queue_head;
2404 }
2405 if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
2406 *rflowp = rflow;
2407 cpu = tcpu;
2408 goto done;
2409 }
2410 }
2411
0a9627f2
TH
2412 map = rcu_dereference(rxqueue->rps_map);
2413 if (map) {
fec5e652 2414 tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32];
0a9627f2
TH
2415
2416 if (cpu_online(tcpu)) {
2417 cpu = tcpu;
2418 goto done;
2419 }
2420 }
2421
2422done:
0a9627f2
TH
2423 return cpu;
2424}
2425
0a9627f2 2426/* Called from hardirq (IPI) context */
e36fa2f7 2427static void rps_trigger_softirq(void *data)
0a9627f2 2428{
e36fa2f7
ED
2429 struct softnet_data *sd = data;
2430
eecfd7c4 2431 ____napi_schedule(sd, &sd->backlog);
dee42870 2432 sd->received_rps++;
0a9627f2 2433}
e36fa2f7 2434
fec5e652 2435#endif /* CONFIG_RPS */
0a9627f2 2436
e36fa2f7
ED
2437/*
2438 * Check if this softnet_data structure is another cpu one
2439 * If yes, queue it to our IPI list and return 1
2440 * If no, return 0
2441 */
2442static int rps_ipi_queued(struct softnet_data *sd)
2443{
2444#ifdef CONFIG_RPS
2445 struct softnet_data *mysd = &__get_cpu_var(softnet_data);
2446
2447 if (sd != mysd) {
2448 sd->rps_ipi_next = mysd->rps_ipi_list;
2449 mysd->rps_ipi_list = sd;
2450
2451 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2452 return 1;
2453 }
2454#endif /* CONFIG_RPS */
2455 return 0;
2456}
2457
0a9627f2
TH
2458/*
2459 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
2460 * queue (may be a remote CPU queue).
2461 */
fec5e652
TH
2462static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
2463 unsigned int *qtail)
0a9627f2 2464{
e36fa2f7 2465 struct softnet_data *sd;
0a9627f2
TH
2466 unsigned long flags;
2467
e36fa2f7 2468 sd = &per_cpu(softnet_data, cpu);
0a9627f2
TH
2469
2470 local_irq_save(flags);
0a9627f2 2471
e36fa2f7 2472 rps_lock(sd);
6e7676c1
CG
2473 if (skb_queue_len(&sd->input_pkt_queue) <= netdev_max_backlog) {
2474 if (skb_queue_len(&sd->input_pkt_queue)) {
0a9627f2 2475enqueue:
e36fa2f7 2476 __skb_queue_tail(&sd->input_pkt_queue, skb);
76cc8b13 2477 input_queue_tail_incr_save(sd, qtail);
e36fa2f7 2478 rps_unlock(sd);
152102c7 2479 local_irq_restore(flags);
0a9627f2
TH
2480 return NET_RX_SUCCESS;
2481 }
2482
ebda37c2
ED
2483 /* Schedule NAPI for backlog device
2484 * We can use non atomic operation since we own the queue lock
2485 */
2486 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
e36fa2f7 2487 if (!rps_ipi_queued(sd))
eecfd7c4 2488 ____napi_schedule(sd, &sd->backlog);
0a9627f2
TH
2489 }
2490 goto enqueue;
2491 }
2492
dee42870 2493 sd->dropped++;
e36fa2f7 2494 rps_unlock(sd);
0a9627f2 2495
0a9627f2
TH
2496 local_irq_restore(flags);
2497
2498 kfree_skb(skb);
2499 return NET_RX_DROP;
2500}
1da177e4 2501
1da177e4
LT
2502/**
2503 * netif_rx - post buffer to the network code
2504 * @skb: buffer to post
2505 *
2506 * This function receives a packet from a device driver and queues it for
2507 * the upper (protocol) levels to process. It always succeeds. The buffer
2508 * may be dropped during processing for congestion control or by the
2509 * protocol layers.
2510 *
2511 * return values:
2512 * NET_RX_SUCCESS (no congestion)
1da177e4
LT
2513 * NET_RX_DROP (packet was dropped)
2514 *
2515 */
2516
2517int netif_rx(struct sk_buff *skb)
2518{
b0e28f1e 2519 int ret;
1da177e4
LT
2520
2521 /* if netpoll wants it, pretend we never saw it */
2522 if (netpoll_rx(skb))
2523 return NET_RX_DROP;
2524
3b098e2d
ED
2525 if (netdev_tstamp_prequeue)
2526 net_timestamp_check(skb);
1da177e4 2527
df334545 2528#ifdef CONFIG_RPS
b0e28f1e 2529 {
fec5e652 2530 struct rps_dev_flow voidflow, *rflow = &voidflow;
b0e28f1e
ED
2531 int cpu;
2532
cece1945 2533 preempt_disable();
b0e28f1e 2534 rcu_read_lock();
fec5e652
TH
2535
2536 cpu = get_rps_cpu(skb->dev, skb, &rflow);
b0e28f1e
ED
2537 if (cpu < 0)
2538 cpu = smp_processor_id();
fec5e652
TH
2539
2540 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
2541
b0e28f1e 2542 rcu_read_unlock();
cece1945 2543 preempt_enable();
b0e28f1e 2544 }
1e94d72f 2545#else
fec5e652
TH
2546 {
2547 unsigned int qtail;
2548 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
2549 put_cpu();
2550 }
1e94d72f 2551#endif
b0e28f1e 2552 return ret;
1da177e4 2553}
d1b19dff 2554EXPORT_SYMBOL(netif_rx);
1da177e4
LT
2555
2556int netif_rx_ni(struct sk_buff *skb)
2557{
2558 int err;
2559
2560 preempt_disable();
2561 err = netif_rx(skb);
2562 if (local_softirq_pending())
2563 do_softirq();
2564 preempt_enable();
2565
2566 return err;
2567}
1da177e4
LT
2568EXPORT_SYMBOL(netif_rx_ni);
2569
1da177e4
LT
2570static void net_tx_action(struct softirq_action *h)
2571{
2572 struct softnet_data *sd = &__get_cpu_var(softnet_data);
2573
2574 if (sd->completion_queue) {
2575 struct sk_buff *clist;
2576
2577 local_irq_disable();
2578 clist = sd->completion_queue;
2579 sd->completion_queue = NULL;
2580 local_irq_enable();
2581
2582 while (clist) {
2583 struct sk_buff *skb = clist;
2584 clist = clist->next;
2585
547b792c 2586 WARN_ON(atomic_read(&skb->users));
1da177e4
LT
2587 __kfree_skb(skb);
2588 }
2589 }
2590
2591 if (sd->output_queue) {
37437bb2 2592 struct Qdisc *head;
1da177e4
LT
2593
2594 local_irq_disable();
2595 head = sd->output_queue;
2596 sd->output_queue = NULL;
a9cbd588 2597 sd->output_queue_tailp = &sd->output_queue;
1da177e4
LT
2598 local_irq_enable();
2599
2600 while (head) {
37437bb2
DM
2601 struct Qdisc *q = head;
2602 spinlock_t *root_lock;
2603
1da177e4
LT
2604 head = head->next_sched;
2605
5fb66229 2606 root_lock = qdisc_lock(q);
37437bb2 2607 if (spin_trylock(root_lock)) {
def82a1d
JP
2608 smp_mb__before_clear_bit();
2609 clear_bit(__QDISC_STATE_SCHED,
2610 &q->state);
37437bb2
DM
2611 qdisc_run(q);
2612 spin_unlock(root_lock);
1da177e4 2613 } else {
195648bb 2614 if (!test_bit(__QDISC_STATE_DEACTIVATED,
e8a83e10 2615 &q->state)) {
195648bb 2616 __netif_reschedule(q);
e8a83e10
JP
2617 } else {
2618 smp_mb__before_clear_bit();
2619 clear_bit(__QDISC_STATE_SCHED,
2620 &q->state);
2621 }
1da177e4
LT
2622 }
2623 }
2624 }
2625}
2626
6f05f629
SH
2627static inline int deliver_skb(struct sk_buff *skb,
2628 struct packet_type *pt_prev,
2629 struct net_device *orig_dev)
1da177e4
LT
2630{
2631 atomic_inc(&skb->users);
f2ccd8fa 2632 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1da177e4
LT
2633}
2634
ab95bfe0
JP
2635#if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
2636 (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
da678292
MM
2637/* This hook is defined here for ATM LANE */
2638int (*br_fdb_test_addr_hook)(struct net_device *dev,
2639 unsigned char *addr) __read_mostly;
4fb019a0 2640EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
da678292 2641#endif
1da177e4 2642
1da177e4
LT
2643#ifdef CONFIG_NET_CLS_ACT
2644/* TODO: Maybe we should just force sch_ingress to be compiled in
2645 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
2646 * a compare and 2 stores extra right now if we dont have it on
2647 * but have CONFIG_NET_CLS_ACT
4ec93edb 2648 * NOTE: This doesnt stop any functionality; if you dont have
1da177e4
LT
2649 * the ingress scheduler, you just cant add policies on ingress.
2650 *
2651 */
4ec93edb 2652static int ing_filter(struct sk_buff *skb)
1da177e4 2653{
1da177e4 2654 struct net_device *dev = skb->dev;
f697c3e8 2655 u32 ttl = G_TC_RTTL(skb->tc_verd);
555353cf
DM
2656 struct netdev_queue *rxq;
2657 int result = TC_ACT_OK;
2658 struct Qdisc *q;
4ec93edb 2659
de384830
SH
2660 if (unlikely(MAX_RED_LOOP < ttl++)) {
2661 if (net_ratelimit())
2662 pr_warning( "Redir loop detected Dropping packet (%d->%d)\n",
2663 skb->skb_iif, dev->ifindex);
f697c3e8
HX
2664 return TC_ACT_SHOT;
2665 }
1da177e4 2666
f697c3e8
HX
2667 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
2668 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
1da177e4 2669
555353cf
DM
2670 rxq = &dev->rx_queue;
2671
83874000 2672 q = rxq->qdisc;
8d50b53d 2673 if (q != &noop_qdisc) {
83874000 2674 spin_lock(qdisc_lock(q));
a9312ae8
DM
2675 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
2676 result = qdisc_enqueue_root(skb, q);
83874000
DM
2677 spin_unlock(qdisc_lock(q));
2678 }
f697c3e8
HX
2679
2680 return result;
2681}
86e65da9 2682
f697c3e8
HX
2683static inline struct sk_buff *handle_ing(struct sk_buff *skb,
2684 struct packet_type **pt_prev,
2685 int *ret, struct net_device *orig_dev)
2686{
8d50b53d 2687 if (skb->dev->rx_queue.qdisc == &noop_qdisc)
f697c3e8 2688 goto out;
1da177e4 2689
f697c3e8
HX
2690 if (*pt_prev) {
2691 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2692 *pt_prev = NULL;
1da177e4
LT
2693 }
2694
f697c3e8
HX
2695 switch (ing_filter(skb)) {
2696 case TC_ACT_SHOT:
2697 case TC_ACT_STOLEN:
2698 kfree_skb(skb);
2699 return NULL;
2700 }
2701
2702out:
2703 skb->tc_verd = 0;
2704 return skb;
1da177e4
LT
2705}
2706#endif
2707
bc1d0411
PM
2708/*
2709 * netif_nit_deliver - deliver received packets to network taps
2710 * @skb: buffer
2711 *
2712 * This function is used to deliver incoming packets to network
2713 * taps. It should be used when the normal netif_receive_skb path
2714 * is bypassed, for example because of VLAN acceleration.
2715 */
2716void netif_nit_deliver(struct sk_buff *skb)
2717{
2718 struct packet_type *ptype;
2719
2720 if (list_empty(&ptype_all))
2721 return;
2722
2723 skb_reset_network_header(skb);
2724 skb_reset_transport_header(skb);
2725 skb->mac_len = skb->network_header - skb->mac_header;
2726
2727 rcu_read_lock();
2728 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2729 if (!ptype->dev || ptype->dev == skb->dev)
2730 deliver_skb(skb, ptype, skb->dev);
2731 }
2732 rcu_read_unlock();
2733}
2734
ab95bfe0
JP
2735/**
2736 * netdev_rx_handler_register - register receive handler
2737 * @dev: device to register a handler for
2738 * @rx_handler: receive handler to register
93e2c32b 2739 * @rx_handler_data: data pointer that is used by rx handler
ab95bfe0
JP
2740 *
2741 * Register a receive hander for a device. This handler will then be
2742 * called from __netif_receive_skb. A negative errno code is returned
2743 * on a failure.
2744 *
2745 * The caller must hold the rtnl_mutex.
2746 */
2747int netdev_rx_handler_register(struct net_device *dev,
93e2c32b
JP
2748 rx_handler_func_t *rx_handler,
2749 void *rx_handler_data)
ab95bfe0
JP
2750{
2751 ASSERT_RTNL();
2752
2753 if (dev->rx_handler)
2754 return -EBUSY;
2755
93e2c32b 2756 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
ab95bfe0
JP
2757 rcu_assign_pointer(dev->rx_handler, rx_handler);
2758
2759 return 0;
2760}
2761EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
2762
2763/**
2764 * netdev_rx_handler_unregister - unregister receive handler
2765 * @dev: device to unregister a handler from
2766 *
2767 * Unregister a receive hander from a device.
2768 *
2769 * The caller must hold the rtnl_mutex.
2770 */
2771void netdev_rx_handler_unregister(struct net_device *dev)
2772{
2773
2774 ASSERT_RTNL();
2775 rcu_assign_pointer(dev->rx_handler, NULL);
93e2c32b 2776 rcu_assign_pointer(dev->rx_handler_data, NULL);
ab95bfe0
JP
2777}
2778EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
2779
acbbc071
ED
2780static inline void skb_bond_set_mac_by_master(struct sk_buff *skb,
2781 struct net_device *master)
2782{
2783 if (skb->pkt_type == PACKET_HOST) {
2784 u16 *dest = (u16 *) eth_hdr(skb)->h_dest;
2785
2786 memcpy(dest, master->dev_addr, ETH_ALEN);
2787 }
2788}
2789
2790/* On bonding slaves other than the currently active slave, suppress
2791 * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and
2792 * ARP on active-backup slaves with arp_validate enabled.
2793 */
2794int __skb_bond_should_drop(struct sk_buff *skb, struct net_device *master)
2795{
2796 struct net_device *dev = skb->dev;
2797
2798 if (master->priv_flags & IFF_MASTER_ARPMON)
2799 dev->last_rx = jiffies;
2800
f350a0a8
JP
2801 if ((master->priv_flags & IFF_MASTER_ALB) &&
2802 (master->priv_flags & IFF_BRIDGE_PORT)) {
acbbc071
ED
2803 /* Do address unmangle. The local destination address
2804 * will be always the one master has. Provides the right
2805 * functionality in a bridge.
2806 */
2807 skb_bond_set_mac_by_master(skb, master);
2808 }
2809
2810 if (dev->priv_flags & IFF_SLAVE_INACTIVE) {
2811 if ((dev->priv_flags & IFF_SLAVE_NEEDARP) &&
2812 skb->protocol == __cpu_to_be16(ETH_P_ARP))
2813 return 0;
2814
2815 if (master->priv_flags & IFF_MASTER_ALB) {
2816 if (skb->pkt_type != PACKET_BROADCAST &&
2817 skb->pkt_type != PACKET_MULTICAST)
2818 return 0;
2819 }
2820 if (master->priv_flags & IFF_MASTER_8023AD &&
2821 skb->protocol == __cpu_to_be16(ETH_P_SLOW))
2822 return 0;
2823
2824 return 1;
2825 }
2826 return 0;
2827}
2828EXPORT_SYMBOL(__skb_bond_should_drop);
2829
10f744d2 2830static int __netif_receive_skb(struct sk_buff *skb)
1da177e4
LT
2831{
2832 struct packet_type *ptype, *pt_prev;
ab95bfe0 2833 rx_handler_func_t *rx_handler;
f2ccd8fa 2834 struct net_device *orig_dev;
0641e4fb 2835 struct net_device *master;
0d7a3681 2836 struct net_device *null_or_orig;
2df4a0fa 2837 struct net_device *orig_or_bond;
1da177e4 2838 int ret = NET_RX_DROP;
252e3346 2839 __be16 type;
1da177e4 2840
3b098e2d
ED
2841 if (!netdev_tstamp_prequeue)
2842 net_timestamp_check(skb);
81bbb3d4 2843
05532121
CG
2844 if (vlan_tx_tag_present(skb))
2845 vlan_hwaccel_do_receive(skb);
9b22ea56 2846
1da177e4 2847 /* if we've gotten here through NAPI, check netpoll */
bea3348e 2848 if (netpoll_receive_skb(skb))
1da177e4
LT
2849 return NET_RX_DROP;
2850
8964be4a
ED
2851 if (!skb->skb_iif)
2852 skb->skb_iif = skb->dev->ifindex;
86e65da9 2853
597a264b
JF
2854 /*
2855 * bonding note: skbs received on inactive slaves should only
2856 * be delivered to pkt handlers that are exact matches. Also
2857 * the deliver_no_wcard flag will be set. If packet handlers
2858 * are sensitive to duplicate packets these skbs will need to
2859 * be dropped at the handler. The vlan accel path may have
2860 * already set the deliver_no_wcard flag.
2861 */
0d7a3681 2862 null_or_orig = NULL;
cc9bd5ce 2863 orig_dev = skb->dev;
0641e4fb 2864 master = ACCESS_ONCE(orig_dev->master);
597a264b
JF
2865 if (skb->deliver_no_wcard)
2866 null_or_orig = orig_dev;
2867 else if (master) {
2868 if (skb_bond_should_drop(skb, master)) {
2869 skb->deliver_no_wcard = 1;
0d7a3681 2870 null_or_orig = orig_dev; /* deliver only exact match */
597a264b 2871 } else
0641e4fb 2872 skb->dev = master;
cc9bd5ce 2873 }
8f903c70 2874
27f39c73 2875 __this_cpu_inc(softnet_data.processed);
c1d2bbe1 2876 skb_reset_network_header(skb);
badff6d0 2877 skb_reset_transport_header(skb);
b0e380b1 2878 skb->mac_len = skb->network_header - skb->mac_header;
1da177e4
LT
2879
2880 pt_prev = NULL;
2881
2882 rcu_read_lock();
2883
2884#ifdef CONFIG_NET_CLS_ACT
2885 if (skb->tc_verd & TC_NCLS) {
2886 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
2887 goto ncls;
2888 }
2889#endif
2890
2891 list_for_each_entry_rcu(ptype, &ptype_all, list) {
f982307f
JE
2892 if (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2893 ptype->dev == orig_dev) {
4ec93edb 2894 if (pt_prev)
f2ccd8fa 2895 ret = deliver_skb(skb, pt_prev, orig_dev);
1da177e4
LT
2896 pt_prev = ptype;
2897 }
2898 }
2899
2900#ifdef CONFIG_NET_CLS_ACT
f697c3e8
HX
2901 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
2902 if (!skb)
1da177e4 2903 goto out;
1da177e4
LT
2904ncls:
2905#endif
2906
ab95bfe0
JP
2907 /* Handle special case of bridge or macvlan */
2908 rx_handler = rcu_dereference(skb->dev->rx_handler);
2909 if (rx_handler) {
2910 if (pt_prev) {
2911 ret = deliver_skb(skb, pt_prev, orig_dev);
2912 pt_prev = NULL;
2913 }
2914 skb = rx_handler(skb);
2915 if (!skb)
2916 goto out;
2917 }
1da177e4 2918
1f3c8804
AG
2919 /*
2920 * Make sure frames received on VLAN interfaces stacked on
2921 * bonding interfaces still make their way to any base bonding
2922 * device that may have registered for a specific ptype. The
2923 * handler may have to adjust skb->dev and orig_dev.
1f3c8804 2924 */
2df4a0fa 2925 orig_or_bond = orig_dev;
1f3c8804
AG
2926 if ((skb->dev->priv_flags & IFF_802_1Q_VLAN) &&
2927 (vlan_dev_real_dev(skb->dev)->priv_flags & IFF_BONDING)) {
2df4a0fa 2928 orig_or_bond = vlan_dev_real_dev(skb->dev);
1f3c8804
AG
2929 }
2930
1da177e4 2931 type = skb->protocol;
82d8a867
PE
2932 list_for_each_entry_rcu(ptype,
2933 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
1f3c8804 2934 if (ptype->type == type && (ptype->dev == null_or_orig ||
ca8d9ea3 2935 ptype->dev == skb->dev || ptype->dev == orig_dev ||
2df4a0fa 2936 ptype->dev == orig_or_bond)) {
4ec93edb 2937 if (pt_prev)
f2ccd8fa 2938 ret = deliver_skb(skb, pt_prev, orig_dev);
1da177e4
LT
2939 pt_prev = ptype;
2940 }
2941 }
2942
2943 if (pt_prev) {
f2ccd8fa 2944 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1da177e4
LT
2945 } else {
2946 kfree_skb(skb);
2947 /* Jamal, now you will not able to escape explaining
2948 * me how you were going to use this. :-)
2949 */
2950 ret = NET_RX_DROP;
2951 }
2952
2953out:
2954 rcu_read_unlock();
2955 return ret;
2956}
0a9627f2
TH
2957
2958/**
2959 * netif_receive_skb - process receive buffer from network
2960 * @skb: buffer to process
2961 *
2962 * netif_receive_skb() is the main receive data processing function.
2963 * It always succeeds. The buffer may be dropped during processing
2964 * for congestion control or by the protocol layers.
2965 *
2966 * This function may only be called from softirq context and interrupts
2967 * should be enabled.
2968 *
2969 * Return values (usually ignored):
2970 * NET_RX_SUCCESS: no congestion
2971 * NET_RX_DROP: packet was dropped
2972 */
2973int netif_receive_skb(struct sk_buff *skb)
2974{
3b098e2d
ED
2975 if (netdev_tstamp_prequeue)
2976 net_timestamp_check(skb);
2977
c1f19b51
RC
2978 if (skb_defer_rx_timestamp(skb))
2979 return NET_RX_SUCCESS;
2980
df334545 2981#ifdef CONFIG_RPS
3b098e2d
ED
2982 {
2983 struct rps_dev_flow voidflow, *rflow = &voidflow;
2984 int cpu, ret;
fec5e652 2985
3b098e2d
ED
2986 rcu_read_lock();
2987
2988 cpu = get_rps_cpu(skb->dev, skb, &rflow);
0a9627f2 2989
3b098e2d
ED
2990 if (cpu >= 0) {
2991 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
2992 rcu_read_unlock();
2993 } else {
2994 rcu_read_unlock();
2995 ret = __netif_receive_skb(skb);
2996 }
0a9627f2 2997
3b098e2d 2998 return ret;
fec5e652 2999 }
1e94d72f
TH
3000#else
3001 return __netif_receive_skb(skb);
3002#endif
0a9627f2 3003}
d1b19dff 3004EXPORT_SYMBOL(netif_receive_skb);
1da177e4 3005
88751275
ED
3006/* Network device is going away, flush any packets still pending
3007 * Called with irqs disabled.
3008 */
152102c7 3009static void flush_backlog(void *arg)
6e583ce5 3010{
152102c7 3011 struct net_device *dev = arg;
e36fa2f7 3012 struct softnet_data *sd = &__get_cpu_var(softnet_data);
6e583ce5
SH
3013 struct sk_buff *skb, *tmp;
3014
e36fa2f7 3015 rps_lock(sd);
6e7676c1 3016 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
6e583ce5 3017 if (skb->dev == dev) {
e36fa2f7 3018 __skb_unlink(skb, &sd->input_pkt_queue);
6e583ce5 3019 kfree_skb(skb);
76cc8b13 3020 input_queue_head_incr(sd);
6e583ce5 3021 }
6e7676c1 3022 }
e36fa2f7 3023 rps_unlock(sd);
6e7676c1
CG
3024
3025 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
3026 if (skb->dev == dev) {
3027 __skb_unlink(skb, &sd->process_queue);
3028 kfree_skb(skb);
76cc8b13 3029 input_queue_head_incr(sd);
6e7676c1
CG
3030 }
3031 }
6e583ce5
SH
3032}
3033
d565b0a1
HX
3034static int napi_gro_complete(struct sk_buff *skb)
3035{
3036 struct packet_type *ptype;
3037 __be16 type = skb->protocol;
3038 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
3039 int err = -ENOENT;
3040
fc59f9a3
HX
3041 if (NAPI_GRO_CB(skb)->count == 1) {
3042 skb_shinfo(skb)->gso_size = 0;
d565b0a1 3043 goto out;
fc59f9a3 3044 }
d565b0a1
HX
3045
3046 rcu_read_lock();
3047 list_for_each_entry_rcu(ptype, head, list) {
3048 if (ptype->type != type || ptype->dev || !ptype->gro_complete)
3049 continue;
3050
3051 err = ptype->gro_complete(skb);
3052 break;
3053 }
3054 rcu_read_unlock();
3055
3056 if (err) {
3057 WARN_ON(&ptype->list == head);
3058 kfree_skb(skb);
3059 return NET_RX_SUCCESS;
3060 }
3061
3062out:
d565b0a1
HX
3063 return netif_receive_skb(skb);
3064}
3065
11380a4b 3066static void napi_gro_flush(struct napi_struct *napi)
d565b0a1
HX
3067{
3068 struct sk_buff *skb, *next;
3069
3070 for (skb = napi->gro_list; skb; skb = next) {
3071 next = skb->next;
3072 skb->next = NULL;
3073 napi_gro_complete(skb);
3074 }
3075
4ae5544f 3076 napi->gro_count = 0;
d565b0a1
HX
3077 napi->gro_list = NULL;
3078}
d565b0a1 3079
5b252f0c 3080enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
d565b0a1
HX
3081{
3082 struct sk_buff **pp = NULL;
3083 struct packet_type *ptype;
3084 __be16 type = skb->protocol;
3085 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
0da2afd5 3086 int same_flow;
d565b0a1 3087 int mac_len;
5b252f0c 3088 enum gro_result ret;
d565b0a1 3089
ce9e76c8 3090 if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb))
d565b0a1
HX
3091 goto normal;
3092
21dc3301 3093 if (skb_is_gso(skb) || skb_has_frag_list(skb))
f17f5c91
HX
3094 goto normal;
3095
d565b0a1
HX
3096 rcu_read_lock();
3097 list_for_each_entry_rcu(ptype, head, list) {
d565b0a1
HX
3098 if (ptype->type != type || ptype->dev || !ptype->gro_receive)
3099 continue;
3100
86911732 3101 skb_set_network_header(skb, skb_gro_offset(skb));
d565b0a1
HX
3102 mac_len = skb->network_header - skb->mac_header;
3103 skb->mac_len = mac_len;
3104 NAPI_GRO_CB(skb)->same_flow = 0;
3105 NAPI_GRO_CB(skb)->flush = 0;
5d38a079 3106 NAPI_GRO_CB(skb)->free = 0;
d565b0a1 3107
d565b0a1
HX
3108 pp = ptype->gro_receive(&napi->gro_list, skb);
3109 break;
3110 }
3111 rcu_read_unlock();
3112
3113 if (&ptype->list == head)
3114 goto normal;
3115
0da2afd5 3116 same_flow = NAPI_GRO_CB(skb)->same_flow;
5d0d9be8 3117 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
0da2afd5 3118
d565b0a1
HX
3119 if (pp) {
3120 struct sk_buff *nskb = *pp;
3121
3122 *pp = nskb->next;
3123 nskb->next = NULL;
3124 napi_gro_complete(nskb);
4ae5544f 3125 napi->gro_count--;
d565b0a1
HX
3126 }
3127
0da2afd5 3128 if (same_flow)
d565b0a1
HX
3129 goto ok;
3130
4ae5544f 3131 if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
d565b0a1 3132 goto normal;
d565b0a1 3133
4ae5544f 3134 napi->gro_count++;
d565b0a1 3135 NAPI_GRO_CB(skb)->count = 1;
86911732 3136 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
d565b0a1
HX
3137 skb->next = napi->gro_list;
3138 napi->gro_list = skb;
5d0d9be8 3139 ret = GRO_HELD;
d565b0a1 3140
ad0f9904 3141pull:
cb18978c
HX
3142 if (skb_headlen(skb) < skb_gro_offset(skb)) {
3143 int grow = skb_gro_offset(skb) - skb_headlen(skb);
3144
3145 BUG_ON(skb->end - skb->tail < grow);
3146
3147 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
3148
3149 skb->tail += grow;
3150 skb->data_len -= grow;
3151
3152 skb_shinfo(skb)->frags[0].page_offset += grow;
3153 skb_shinfo(skb)->frags[0].size -= grow;
3154
3155 if (unlikely(!skb_shinfo(skb)->frags[0].size)) {
3156 put_page(skb_shinfo(skb)->frags[0].page);
3157 memmove(skb_shinfo(skb)->frags,
3158 skb_shinfo(skb)->frags + 1,
e5093aec 3159 --skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
cb18978c 3160 }
ad0f9904
HX
3161 }
3162
d565b0a1 3163ok:
5d0d9be8 3164 return ret;
d565b0a1
HX
3165
3166normal:
ad0f9904
HX
3167 ret = GRO_NORMAL;
3168 goto pull;
5d38a079 3169}
96e93eab
HX
3170EXPORT_SYMBOL(dev_gro_receive);
3171
5b252f0c
BH
3172static gro_result_t
3173__napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
96e93eab
HX
3174{
3175 struct sk_buff *p;
3176
3177 for (p = napi->gro_list; p; p = p->next) {
f64f9e71
JP
3178 NAPI_GRO_CB(p)->same_flow =
3179 (p->dev == skb->dev) &&
3180 !compare_ether_header(skb_mac_header(p),
3181 skb_gro_mac_header(skb));
96e93eab
HX
3182 NAPI_GRO_CB(p)->flush = 0;
3183 }
3184
3185 return dev_gro_receive(napi, skb);
3186}
5d38a079 3187
c7c4b3b6 3188gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
5d38a079 3189{
5d0d9be8
HX
3190 switch (ret) {
3191 case GRO_NORMAL:
c7c4b3b6
BH
3192 if (netif_receive_skb(skb))
3193 ret = GRO_DROP;
3194 break;
5d38a079 3195
5d0d9be8 3196 case GRO_DROP:
5d0d9be8 3197 case GRO_MERGED_FREE:
5d38a079
HX
3198 kfree_skb(skb);
3199 break;
5b252f0c
BH
3200
3201 case GRO_HELD:
3202 case GRO_MERGED:
3203 break;
5d38a079
HX
3204 }
3205
c7c4b3b6 3206 return ret;
5d0d9be8
HX
3207}
3208EXPORT_SYMBOL(napi_skb_finish);
3209
78a478d0
HX
3210void skb_gro_reset_offset(struct sk_buff *skb)
3211{
3212 NAPI_GRO_CB(skb)->data_offset = 0;
3213 NAPI_GRO_CB(skb)->frag0 = NULL;
7489594c 3214 NAPI_GRO_CB(skb)->frag0_len = 0;
78a478d0 3215
78d3fd0b 3216 if (skb->mac_header == skb->tail &&
7489594c 3217 !PageHighMem(skb_shinfo(skb)->frags[0].page)) {
78a478d0
HX
3218 NAPI_GRO_CB(skb)->frag0 =
3219 page_address(skb_shinfo(skb)->frags[0].page) +
3220 skb_shinfo(skb)->frags[0].page_offset;
7489594c
HX
3221 NAPI_GRO_CB(skb)->frag0_len = skb_shinfo(skb)->frags[0].size;
3222 }
78a478d0
HX
3223}
3224EXPORT_SYMBOL(skb_gro_reset_offset);
3225
c7c4b3b6 3226gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
5d0d9be8 3227{
86911732
HX
3228 skb_gro_reset_offset(skb);
3229
5d0d9be8 3230 return napi_skb_finish(__napi_gro_receive(napi, skb), skb);
d565b0a1
HX
3231}
3232EXPORT_SYMBOL(napi_gro_receive);
3233
96e93eab
HX
3234void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
3235{
96e93eab
HX
3236 __skb_pull(skb, skb_headlen(skb));
3237 skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
3238
3239 napi->skb = skb;
3240}
3241EXPORT_SYMBOL(napi_reuse_skb);
3242
76620aaf 3243struct sk_buff *napi_get_frags(struct napi_struct *napi)
5d38a079 3244{
5d38a079 3245 struct sk_buff *skb = napi->skb;
5d38a079
HX
3246
3247 if (!skb) {
89d71a66
ED
3248 skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
3249 if (skb)
3250 napi->skb = skb;
80595d59 3251 }
96e93eab
HX
3252 return skb;
3253}
76620aaf 3254EXPORT_SYMBOL(napi_get_frags);
96e93eab 3255
c7c4b3b6
BH
3256gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
3257 gro_result_t ret)
96e93eab 3258{
5d0d9be8
HX
3259 switch (ret) {
3260 case GRO_NORMAL:
86911732 3261 case GRO_HELD:
e76b69cc 3262 skb->protocol = eth_type_trans(skb, skb->dev);
86911732 3263
c7c4b3b6
BH
3264 if (ret == GRO_HELD)
3265 skb_gro_pull(skb, -ETH_HLEN);
3266 else if (netif_receive_skb(skb))
3267 ret = GRO_DROP;
86911732 3268 break;
5d38a079 3269
5d0d9be8 3270 case GRO_DROP:
5d0d9be8
HX
3271 case GRO_MERGED_FREE:
3272 napi_reuse_skb(napi, skb);
3273 break;
5b252f0c
BH
3274
3275 case GRO_MERGED:
3276 break;
5d0d9be8 3277 }
5d38a079 3278
c7c4b3b6 3279 return ret;
5d38a079 3280}
5d0d9be8
HX
3281EXPORT_SYMBOL(napi_frags_finish);
3282
76620aaf
HX
3283struct sk_buff *napi_frags_skb(struct napi_struct *napi)
3284{
3285 struct sk_buff *skb = napi->skb;
3286 struct ethhdr *eth;
a5b1cf28
HX
3287 unsigned int hlen;
3288 unsigned int off;
76620aaf
HX
3289
3290 napi->skb = NULL;
3291
3292 skb_reset_mac_header(skb);
3293 skb_gro_reset_offset(skb);
3294
a5b1cf28
HX
3295 off = skb_gro_offset(skb);
3296 hlen = off + sizeof(*eth);
3297 eth = skb_gro_header_fast(skb, off);
3298 if (skb_gro_header_hard(skb, hlen)) {
3299 eth = skb_gro_header_slow(skb, hlen, off);
3300 if (unlikely(!eth)) {
3301 napi_reuse_skb(napi, skb);
3302 skb = NULL;
3303 goto out;
3304 }
76620aaf
HX
3305 }
3306
3307 skb_gro_pull(skb, sizeof(*eth));
3308
3309 /*
3310 * This works because the only protocols we care about don't require
3311 * special handling. We'll fix it up properly at the end.
3312 */
3313 skb->protocol = eth->h_proto;
3314
3315out:
3316 return skb;
3317}
3318EXPORT_SYMBOL(napi_frags_skb);
3319
c7c4b3b6 3320gro_result_t napi_gro_frags(struct napi_struct *napi)
5d0d9be8 3321{
76620aaf 3322 struct sk_buff *skb = napi_frags_skb(napi);
5d0d9be8
HX
3323
3324 if (!skb)
c7c4b3b6 3325 return GRO_DROP;
5d0d9be8
HX
3326
3327 return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb));
3328}
5d38a079
HX
3329EXPORT_SYMBOL(napi_gro_frags);
3330
e326bed2
ED
3331/*
3332 * net_rps_action sends any pending IPI's for rps.
3333 * Note: called with local irq disabled, but exits with local irq enabled.
3334 */
3335static void net_rps_action_and_irq_enable(struct softnet_data *sd)
3336{
3337#ifdef CONFIG_RPS
3338 struct softnet_data *remsd = sd->rps_ipi_list;
3339
3340 if (remsd) {
3341 sd->rps_ipi_list = NULL;
3342
3343 local_irq_enable();
3344
3345 /* Send pending IPI's to kick RPS processing on remote cpus. */
3346 while (remsd) {
3347 struct softnet_data *next = remsd->rps_ipi_next;
3348
3349 if (cpu_online(remsd->cpu))
3350 __smp_call_function_single(remsd->cpu,
3351 &remsd->csd, 0);
3352 remsd = next;
3353 }
3354 } else
3355#endif
3356 local_irq_enable();
3357}
3358
bea3348e 3359static int process_backlog(struct napi_struct *napi, int quota)
1da177e4
LT
3360{
3361 int work = 0;
eecfd7c4 3362 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
1da177e4 3363
e326bed2
ED
3364#ifdef CONFIG_RPS
3365 /* Check if we have pending ipi, its better to send them now,
3366 * not waiting net_rx_action() end.
3367 */
3368 if (sd->rps_ipi_list) {
3369 local_irq_disable();
3370 net_rps_action_and_irq_enable(sd);
3371 }
3372#endif
bea3348e 3373 napi->weight = weight_p;
6e7676c1
CG
3374 local_irq_disable();
3375 while (work < quota) {
1da177e4 3376 struct sk_buff *skb;
6e7676c1
CG
3377 unsigned int qlen;
3378
3379 while ((skb = __skb_dequeue(&sd->process_queue))) {
3380 local_irq_enable();
3381 __netif_receive_skb(skb);
6e7676c1 3382 local_irq_disable();
76cc8b13
TH
3383 input_queue_head_incr(sd);
3384 if (++work >= quota) {
3385 local_irq_enable();
3386 return work;
3387 }
6e7676c1 3388 }
1da177e4 3389
e36fa2f7 3390 rps_lock(sd);
6e7676c1 3391 qlen = skb_queue_len(&sd->input_pkt_queue);
76cc8b13 3392 if (qlen)
6e7676c1
CG
3393 skb_queue_splice_tail_init(&sd->input_pkt_queue,
3394 &sd->process_queue);
76cc8b13 3395
6e7676c1 3396 if (qlen < quota - work) {
eecfd7c4
ED
3397 /*
3398 * Inline a custom version of __napi_complete().
3399 * only current cpu owns and manipulates this napi,
3400 * and NAPI_STATE_SCHED is the only possible flag set on backlog.
3401 * we can use a plain write instead of clear_bit(),
3402 * and we dont need an smp_mb() memory barrier.
3403 */
3404 list_del(&napi->poll_list);
3405 napi->state = 0;
3406
6e7676c1 3407 quota = work + qlen;
bea3348e 3408 }
e36fa2f7 3409 rps_unlock(sd);
6e7676c1
CG
3410 }
3411 local_irq_enable();
1da177e4 3412
bea3348e
SH
3413 return work;
3414}
1da177e4 3415
bea3348e
SH
3416/**
3417 * __napi_schedule - schedule for receive
c4ea43c5 3418 * @n: entry to schedule
bea3348e
SH
3419 *
3420 * The entry's receive function will be scheduled to run
3421 */
b5606c2d 3422void __napi_schedule(struct napi_struct *n)
bea3348e
SH
3423{
3424 unsigned long flags;
1da177e4 3425
bea3348e 3426 local_irq_save(flags);
eecfd7c4 3427 ____napi_schedule(&__get_cpu_var(softnet_data), n);
bea3348e 3428 local_irq_restore(flags);
1da177e4 3429}
bea3348e
SH
3430EXPORT_SYMBOL(__napi_schedule);
3431
d565b0a1
HX
3432void __napi_complete(struct napi_struct *n)
3433{
3434 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
3435 BUG_ON(n->gro_list);
3436
3437 list_del(&n->poll_list);
3438 smp_mb__before_clear_bit();
3439 clear_bit(NAPI_STATE_SCHED, &n->state);
3440}
3441EXPORT_SYMBOL(__napi_complete);
3442
3443void napi_complete(struct napi_struct *n)
3444{
3445 unsigned long flags;
3446
3447 /*
3448 * don't let napi dequeue from the cpu poll list
3449 * just in case its running on a different cpu
3450 */
3451 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
3452 return;
3453
3454 napi_gro_flush(n);
3455 local_irq_save(flags);
3456 __napi_complete(n);
3457 local_irq_restore(flags);
3458}
3459EXPORT_SYMBOL(napi_complete);
3460
3461void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
3462 int (*poll)(struct napi_struct *, int), int weight)
3463{
3464 INIT_LIST_HEAD(&napi->poll_list);
4ae5544f 3465 napi->gro_count = 0;
d565b0a1 3466 napi->gro_list = NULL;
5d38a079 3467 napi->skb = NULL;
d565b0a1
HX
3468 napi->poll = poll;
3469 napi->weight = weight;
3470 list_add(&napi->dev_list, &dev->napi_list);
d565b0a1 3471 napi->dev = dev;
5d38a079 3472#ifdef CONFIG_NETPOLL
d565b0a1
HX
3473 spin_lock_init(&napi->poll_lock);
3474 napi->poll_owner = -1;
3475#endif
3476 set_bit(NAPI_STATE_SCHED, &napi->state);
3477}
3478EXPORT_SYMBOL(netif_napi_add);
3479
3480void netif_napi_del(struct napi_struct *napi)
3481{
3482 struct sk_buff *skb, *next;
3483
d7b06636 3484 list_del_init(&napi->dev_list);
76620aaf 3485 napi_free_frags(napi);
d565b0a1
HX
3486
3487 for (skb = napi->gro_list; skb; skb = next) {
3488 next = skb->next;
3489 skb->next = NULL;
3490 kfree_skb(skb);
3491 }
3492
3493 napi->gro_list = NULL;
4ae5544f 3494 napi->gro_count = 0;
d565b0a1
HX
3495}
3496EXPORT_SYMBOL(netif_napi_del);
3497
1da177e4
LT
3498static void net_rx_action(struct softirq_action *h)
3499{
e326bed2 3500 struct softnet_data *sd = &__get_cpu_var(softnet_data);
24f8b238 3501 unsigned long time_limit = jiffies + 2;
51b0bded 3502 int budget = netdev_budget;
53fb95d3
MM
3503 void *have;
3504
1da177e4
LT
3505 local_irq_disable();
3506
e326bed2 3507 while (!list_empty(&sd->poll_list)) {
bea3348e
SH
3508 struct napi_struct *n;
3509 int work, weight;
1da177e4 3510
bea3348e 3511 /* If softirq window is exhuasted then punt.
24f8b238
SH
3512 * Allow this to run for 2 jiffies since which will allow
3513 * an average latency of 1.5/HZ.
bea3348e 3514 */
24f8b238 3515 if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
1da177e4
LT
3516 goto softnet_break;
3517
3518 local_irq_enable();
3519
bea3348e
SH
3520 /* Even though interrupts have been re-enabled, this
3521 * access is safe because interrupts can only add new
3522 * entries to the tail of this list, and only ->poll()
3523 * calls can remove this head entry from the list.
3524 */
e326bed2 3525 n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list);
1da177e4 3526
bea3348e
SH
3527 have = netpoll_poll_lock(n);
3528
3529 weight = n->weight;
3530
0a7606c1
DM
3531 /* This NAPI_STATE_SCHED test is for avoiding a race
3532 * with netpoll's poll_napi(). Only the entity which
3533 * obtains the lock and sees NAPI_STATE_SCHED set will
3534 * actually make the ->poll() call. Therefore we avoid
3535 * accidently calling ->poll() when NAPI is not scheduled.
3536 */
3537 work = 0;
4ea7e386 3538 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
0a7606c1 3539 work = n->poll(n, weight);
4ea7e386
NH
3540 trace_napi_poll(n);
3541 }
bea3348e
SH
3542
3543 WARN_ON_ONCE(work > weight);
3544
3545 budget -= work;
3546
3547 local_irq_disable();
3548
3549 /* Drivers must not modify the NAPI state if they
3550 * consume the entire weight. In such cases this code
3551 * still "owns" the NAPI instance and therefore can
3552 * move the instance around on the list at-will.
3553 */
fed17f30 3554 if (unlikely(work == weight)) {
ff780cd8
HX
3555 if (unlikely(napi_disable_pending(n))) {
3556 local_irq_enable();
3557 napi_complete(n);
3558 local_irq_disable();
3559 } else
e326bed2 3560 list_move_tail(&n->poll_list, &sd->poll_list);
fed17f30 3561 }
bea3348e
SH
3562
3563 netpoll_poll_unlock(have);
1da177e4
LT
3564 }
3565out:
e326bed2 3566 net_rps_action_and_irq_enable(sd);
0a9627f2 3567
db217334
CL
3568#ifdef CONFIG_NET_DMA
3569 /*
3570 * There may not be any more sk_buffs coming right now, so push
3571 * any pending DMA copies to hardware
3572 */
2ba05622 3573 dma_issue_pending_all();
db217334 3574#endif
bea3348e 3575
1da177e4
LT
3576 return;
3577
3578softnet_break:
dee42870 3579 sd->time_squeeze++;
1da177e4
LT
3580 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3581 goto out;
3582}
3583
d1b19dff 3584static gifconf_func_t *gifconf_list[NPROTO];
1da177e4
LT
3585
3586/**
3587 * register_gifconf - register a SIOCGIF handler
3588 * @family: Address family
3589 * @gifconf: Function handler
3590 *
3591 * Register protocol dependent address dumping routines. The handler
3592 * that is passed must not be freed or reused until it has been replaced
3593 * by another handler.
3594 */
d1b19dff 3595int register_gifconf(unsigned int family, gifconf_func_t *gifconf)
1da177e4
LT
3596{
3597 if (family >= NPROTO)
3598 return -EINVAL;
3599 gifconf_list[family] = gifconf;
3600 return 0;
3601}
d1b19dff 3602EXPORT_SYMBOL(register_gifconf);
1da177e4
LT
3603
3604
3605/*
3606 * Map an interface index to its name (SIOCGIFNAME)
3607 */
3608
3609/*
3610 * We need this ioctl for efficient implementation of the
3611 * if_indextoname() function required by the IPv6 API. Without
3612 * it, we would have to search all the interfaces to find a
3613 * match. --pb
3614 */
3615
881d966b 3616static int dev_ifname(struct net *net, struct ifreq __user *arg)
1da177e4
LT
3617{
3618 struct net_device *dev;
3619 struct ifreq ifr;
3620
3621 /*
3622 * Fetch the caller's info block.
3623 */
3624
3625 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
3626 return -EFAULT;
3627
fb699dfd
ED
3628 rcu_read_lock();
3629 dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex);
1da177e4 3630 if (!dev) {
fb699dfd 3631 rcu_read_unlock();
1da177e4
LT
3632 return -ENODEV;
3633 }
3634
3635 strcpy(ifr.ifr_name, dev->name);
fb699dfd 3636 rcu_read_unlock();
1da177e4
LT
3637
3638 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
3639 return -EFAULT;
3640 return 0;
3641}
3642
3643/*
3644 * Perform a SIOCGIFCONF call. This structure will change
3645 * size eventually, and there is nothing I can do about it.
3646 * Thus we will need a 'compatibility mode'.
3647 */
3648
881d966b 3649static int dev_ifconf(struct net *net, char __user *arg)
1da177e4
LT
3650{
3651 struct ifconf ifc;
3652 struct net_device *dev;
3653 char __user *pos;
3654 int len;
3655 int total;
3656 int i;
3657
3658 /*
3659 * Fetch the caller's info block.
3660 */
3661
3662 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
3663 return -EFAULT;
3664
3665 pos = ifc.ifc_buf;
3666 len = ifc.ifc_len;
3667
3668 /*
3669 * Loop over the interfaces, and write an info block for each.
3670 */
3671
3672 total = 0;
881d966b 3673 for_each_netdev(net, dev) {
1da177e4
LT
3674 for (i = 0; i < NPROTO; i++) {
3675 if (gifconf_list[i]) {
3676 int done;
3677 if (!pos)
3678 done = gifconf_list[i](dev, NULL, 0);
3679 else
3680 done = gifconf_list[i](dev, pos + total,
3681 len - total);
3682 if (done < 0)
3683 return -EFAULT;
3684 total += done;
3685 }
3686 }
4ec93edb 3687 }
1da177e4
LT
3688
3689 /*
3690 * All done. Write the updated control block back to the caller.
3691 */
3692 ifc.ifc_len = total;
3693
3694 /*
3695 * Both BSD and Solaris return 0 here, so we do too.
3696 */
3697 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
3698}
3699
3700#ifdef CONFIG_PROC_FS
3701/*
3702 * This is invoked by the /proc filesystem handler to display a device
3703 * in detail.
3704 */
7562f876 3705void *dev_seq_start(struct seq_file *seq, loff_t *pos)
c6d14c84 3706 __acquires(RCU)
1da177e4 3707{
e372c414 3708 struct net *net = seq_file_net(seq);
7562f876 3709 loff_t off;
1da177e4 3710 struct net_device *dev;
1da177e4 3711
c6d14c84 3712 rcu_read_lock();
7562f876
PE
3713 if (!*pos)
3714 return SEQ_START_TOKEN;
1da177e4 3715
7562f876 3716 off = 1;
c6d14c84 3717 for_each_netdev_rcu(net, dev)
7562f876
PE
3718 if (off++ == *pos)
3719 return dev;
1da177e4 3720
7562f876 3721 return NULL;
1da177e4
LT
3722}
3723
3724void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3725{
c6d14c84
ED
3726 struct net_device *dev = (v == SEQ_START_TOKEN) ?
3727 first_net_device(seq_file_net(seq)) :
3728 next_net_device((struct net_device *)v);
3729
1da177e4 3730 ++*pos;
c6d14c84 3731 return rcu_dereference(dev);
1da177e4
LT
3732}
3733
3734void dev_seq_stop(struct seq_file *seq, void *v)
c6d14c84 3735 __releases(RCU)
1da177e4 3736{
c6d14c84 3737 rcu_read_unlock();
1da177e4
LT
3738}
3739
3740static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
3741{
28172739
ED
3742 struct rtnl_link_stats64 temp;
3743 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
1da177e4 3744
be1f3c2c
BH
3745 seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
3746 "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
5a1b5898
RR
3747 dev->name, stats->rx_bytes, stats->rx_packets,
3748 stats->rx_errors,
3749 stats->rx_dropped + stats->rx_missed_errors,
3750 stats->rx_fifo_errors,
3751 stats->rx_length_errors + stats->rx_over_errors +
3752 stats->rx_crc_errors + stats->rx_frame_errors,
3753 stats->rx_compressed, stats->multicast,
3754 stats->tx_bytes, stats->tx_packets,
3755 stats->tx_errors, stats->tx_dropped,
3756 stats->tx_fifo_errors, stats->collisions,
3757 stats->tx_carrier_errors +
3758 stats->tx_aborted_errors +
3759 stats->tx_window_errors +
3760 stats->tx_heartbeat_errors,
3761 stats->tx_compressed);
1da177e4
LT
3762}
3763
3764/*
3765 * Called from the PROCfs module. This now uses the new arbitrary sized
3766 * /proc/net interface to create /proc/net/dev
3767 */
3768static int dev_seq_show(struct seq_file *seq, void *v)
3769{
3770 if (v == SEQ_START_TOKEN)
3771 seq_puts(seq, "Inter-| Receive "
3772 " | Transmit\n"
3773 " face |bytes packets errs drop fifo frame "
3774 "compressed multicast|bytes packets errs "
3775 "drop fifo colls carrier compressed\n");
3776 else
3777 dev_seq_printf_stats(seq, v);
3778 return 0;
3779}
3780
dee42870 3781static struct softnet_data *softnet_get_online(loff_t *pos)
1da177e4 3782{
dee42870 3783 struct softnet_data *sd = NULL;
1da177e4 3784
0c0b0aca 3785 while (*pos < nr_cpu_ids)
4ec93edb 3786 if (cpu_online(*pos)) {
dee42870 3787 sd = &per_cpu(softnet_data, *pos);
1da177e4
LT
3788 break;
3789 } else
3790 ++*pos;
dee42870 3791 return sd;
1da177e4
LT
3792}
3793
3794static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
3795{
3796 return softnet_get_online(pos);
3797}
3798
3799static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3800{
3801 ++*pos;
3802 return softnet_get_online(pos);
3803}
3804
3805static void softnet_seq_stop(struct seq_file *seq, void *v)
3806{
3807}
3808
3809static int softnet_seq_show(struct seq_file *seq, void *v)
3810{
dee42870 3811 struct softnet_data *sd = v;
1da177e4 3812
0a9627f2 3813 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
dee42870 3814 sd->processed, sd->dropped, sd->time_squeeze, 0,
c1ebcdb8 3815 0, 0, 0, 0, /* was fastroute */
dee42870 3816 sd->cpu_collision, sd->received_rps);
1da177e4
LT
3817 return 0;
3818}
3819
f690808e 3820static const struct seq_operations dev_seq_ops = {
1da177e4
LT
3821 .start = dev_seq_start,
3822 .next = dev_seq_next,
3823 .stop = dev_seq_stop,
3824 .show = dev_seq_show,
3825};
3826
3827static int dev_seq_open(struct inode *inode, struct file *file)
3828{
e372c414
DL
3829 return seq_open_net(inode, file, &dev_seq_ops,
3830 sizeof(struct seq_net_private));
1da177e4
LT
3831}
3832
9a32144e 3833static const struct file_operations dev_seq_fops = {
1da177e4
LT
3834 .owner = THIS_MODULE,
3835 .open = dev_seq_open,
3836 .read = seq_read,
3837 .llseek = seq_lseek,
e372c414 3838 .release = seq_release_net,
1da177e4
LT
3839};
3840
f690808e 3841static const struct seq_operations softnet_seq_ops = {
1da177e4
LT
3842 .start = softnet_seq_start,
3843 .next = softnet_seq_next,
3844 .stop = softnet_seq_stop,
3845 .show = softnet_seq_show,
3846};
3847
3848static int softnet_seq_open(struct inode *inode, struct file *file)
3849{
3850 return seq_open(file, &softnet_seq_ops);
3851}
3852
9a32144e 3853static const struct file_operations softnet_seq_fops = {
1da177e4
LT
3854 .owner = THIS_MODULE,
3855 .open = softnet_seq_open,
3856 .read = seq_read,
3857 .llseek = seq_lseek,
3858 .release = seq_release,
3859};
3860
0e1256ff
SH
3861static void *ptype_get_idx(loff_t pos)
3862{
3863 struct packet_type *pt = NULL;
3864 loff_t i = 0;
3865 int t;
3866
3867 list_for_each_entry_rcu(pt, &ptype_all, list) {
3868 if (i == pos)
3869 return pt;
3870 ++i;
3871 }
3872
82d8a867 3873 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
0e1256ff
SH
3874 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
3875 if (i == pos)
3876 return pt;
3877 ++i;
3878 }
3879 }
3880 return NULL;
3881}
3882
3883static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
72348a42 3884 __acquires(RCU)
0e1256ff
SH
3885{
3886 rcu_read_lock();
3887 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
3888}
3889
3890static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3891{
3892 struct packet_type *pt;
3893 struct list_head *nxt;
3894 int hash;
3895
3896 ++*pos;
3897 if (v == SEQ_START_TOKEN)
3898 return ptype_get_idx(0);
3899
3900 pt = v;
3901 nxt = pt->list.next;
3902 if (pt->type == htons(ETH_P_ALL)) {
3903 if (nxt != &ptype_all)
3904 goto found;
3905 hash = 0;
3906 nxt = ptype_base[0].next;
3907 } else
82d8a867 3908 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
0e1256ff
SH
3909
3910 while (nxt == &ptype_base[hash]) {
82d8a867 3911 if (++hash >= PTYPE_HASH_SIZE)
0e1256ff
SH
3912 return NULL;
3913 nxt = ptype_base[hash].next;
3914 }
3915found:
3916 return list_entry(nxt, struct packet_type, list);
3917}
3918
3919static void ptype_seq_stop(struct seq_file *seq, void *v)
72348a42 3920 __releases(RCU)
0e1256ff
SH
3921{
3922 rcu_read_unlock();
3923}
3924
0e1256ff
SH
3925static int ptype_seq_show(struct seq_file *seq, void *v)
3926{
3927 struct packet_type *pt = v;
3928
3929 if (v == SEQ_START_TOKEN)
3930 seq_puts(seq, "Type Device Function\n");
c346dca1 3931 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
0e1256ff
SH
3932 if (pt->type == htons(ETH_P_ALL))
3933 seq_puts(seq, "ALL ");
3934 else
3935 seq_printf(seq, "%04x", ntohs(pt->type));
3936
908cd2da
AD
3937 seq_printf(seq, " %-8s %pF\n",
3938 pt->dev ? pt->dev->name : "", pt->func);
0e1256ff
SH
3939 }
3940
3941 return 0;
3942}
3943
3944static const struct seq_operations ptype_seq_ops = {
3945 .start = ptype_seq_start,
3946 .next = ptype_seq_next,
3947 .stop = ptype_seq_stop,
3948 .show = ptype_seq_show,
3949};
3950
3951static int ptype_seq_open(struct inode *inode, struct file *file)
3952{
2feb27db
PE
3953 return seq_open_net(inode, file, &ptype_seq_ops,
3954 sizeof(struct seq_net_private));
0e1256ff
SH
3955}
3956
3957static const struct file_operations ptype_seq_fops = {
3958 .owner = THIS_MODULE,
3959 .open = ptype_seq_open,
3960 .read = seq_read,
3961 .llseek = seq_lseek,
2feb27db 3962 .release = seq_release_net,
0e1256ff
SH
3963};
3964
3965
4665079c 3966static int __net_init dev_proc_net_init(struct net *net)
1da177e4
LT
3967{
3968 int rc = -ENOMEM;
3969
881d966b 3970 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
1da177e4 3971 goto out;
881d966b 3972 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
1da177e4 3973 goto out_dev;
881d966b 3974 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
457c4cbc 3975 goto out_softnet;
0e1256ff 3976
881d966b 3977 if (wext_proc_init(net))
457c4cbc 3978 goto out_ptype;
1da177e4
LT
3979 rc = 0;
3980out:
3981 return rc;
457c4cbc 3982out_ptype:
881d966b 3983 proc_net_remove(net, "ptype");
1da177e4 3984out_softnet:
881d966b 3985 proc_net_remove(net, "softnet_stat");
1da177e4 3986out_dev:
881d966b 3987 proc_net_remove(net, "dev");
1da177e4
LT
3988 goto out;
3989}
881d966b 3990
4665079c 3991static void __net_exit dev_proc_net_exit(struct net *net)
881d966b
EB
3992{
3993 wext_proc_exit(net);
3994
3995 proc_net_remove(net, "ptype");
3996 proc_net_remove(net, "softnet_stat");
3997 proc_net_remove(net, "dev");
3998}
3999
022cbae6 4000static struct pernet_operations __net_initdata dev_proc_ops = {
881d966b
EB
4001 .init = dev_proc_net_init,
4002 .exit = dev_proc_net_exit,
4003};
4004
4005static int __init dev_proc_init(void)
4006{
4007 return register_pernet_subsys(&dev_proc_ops);
4008}
1da177e4
LT
4009#else
4010#define dev_proc_init() 0
4011#endif /* CONFIG_PROC_FS */
4012
4013
4014/**
4015 * netdev_set_master - set up master/slave pair
4016 * @slave: slave device
4017 * @master: new master device
4018 *
4019 * Changes the master device of the slave. Pass %NULL to break the
4020 * bonding. The caller must hold the RTNL semaphore. On a failure
4021 * a negative errno code is returned. On success the reference counts
4022 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
4023 * function returns zero.
4024 */
4025int netdev_set_master(struct net_device *slave, struct net_device *master)
4026{
4027 struct net_device *old = slave->master;
4028
4029 ASSERT_RTNL();
4030
4031 if (master) {
4032 if (old)
4033 return -EBUSY;
4034 dev_hold(master);
4035 }
4036
4037 slave->master = master;
4ec93edb 4038
283f2fe8
ED
4039 if (old) {
4040 synchronize_net();
1da177e4 4041 dev_put(old);
283f2fe8 4042 }
1da177e4
LT
4043 if (master)
4044 slave->flags |= IFF_SLAVE;
4045 else
4046 slave->flags &= ~IFF_SLAVE;
4047
4048 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
4049 return 0;
4050}
d1b19dff 4051EXPORT_SYMBOL(netdev_set_master);
1da177e4 4052
b6c40d68
PM
4053static void dev_change_rx_flags(struct net_device *dev, int flags)
4054{
d314774c
SH
4055 const struct net_device_ops *ops = dev->netdev_ops;
4056
4057 if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
4058 ops->ndo_change_rx_flags(dev, flags);
b6c40d68
PM
4059}
4060
dad9b335 4061static int __dev_set_promiscuity(struct net_device *dev, int inc)
1da177e4
LT
4062{
4063 unsigned short old_flags = dev->flags;
8192b0c4
DH
4064 uid_t uid;
4065 gid_t gid;
1da177e4 4066
24023451
PM
4067 ASSERT_RTNL();
4068
dad9b335
WC
4069 dev->flags |= IFF_PROMISC;
4070 dev->promiscuity += inc;
4071 if (dev->promiscuity == 0) {
4072 /*
4073 * Avoid overflow.
4074 * If inc causes overflow, untouch promisc and return error.
4075 */
4076 if (inc < 0)
4077 dev->flags &= ~IFF_PROMISC;
4078 else {
4079 dev->promiscuity -= inc;
4080 printk(KERN_WARNING "%s: promiscuity touches roof, "
4081 "set promiscuity failed, promiscuity feature "
4082 "of device might be broken.\n", dev->name);
4083 return -EOVERFLOW;
4084 }
4085 }
52609c0b 4086 if (dev->flags != old_flags) {
1da177e4
LT
4087 printk(KERN_INFO "device %s %s promiscuous mode\n",
4088 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
4ec93edb 4089 "left");
8192b0c4
DH
4090 if (audit_enabled) {
4091 current_uid_gid(&uid, &gid);
7759db82
KHK
4092 audit_log(current->audit_context, GFP_ATOMIC,
4093 AUDIT_ANOM_PROMISCUOUS,
4094 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
4095 dev->name, (dev->flags & IFF_PROMISC),
4096 (old_flags & IFF_PROMISC),
4097 audit_get_loginuid(current),
8192b0c4 4098 uid, gid,
7759db82 4099 audit_get_sessionid(current));
8192b0c4 4100 }
24023451 4101
b6c40d68 4102 dev_change_rx_flags(dev, IFF_PROMISC);
1da177e4 4103 }
dad9b335 4104 return 0;
1da177e4
LT
4105}
4106
4417da66
PM
4107/**
4108 * dev_set_promiscuity - update promiscuity count on a device
4109 * @dev: device
4110 * @inc: modifier
4111 *
4112 * Add or remove promiscuity from a device. While the count in the device
4113 * remains above zero the interface remains promiscuous. Once it hits zero
4114 * the device reverts back to normal filtering operation. A negative inc
4115 * value is used to drop promiscuity on the device.
dad9b335 4116 * Return 0 if successful or a negative errno code on error.
4417da66 4117 */
dad9b335 4118int dev_set_promiscuity(struct net_device *dev, int inc)
4417da66
PM
4119{
4120 unsigned short old_flags = dev->flags;
dad9b335 4121 int err;
4417da66 4122
dad9b335 4123 err = __dev_set_promiscuity(dev, inc);
4b5a698e 4124 if (err < 0)
dad9b335 4125 return err;
4417da66
PM
4126 if (dev->flags != old_flags)
4127 dev_set_rx_mode(dev);
dad9b335 4128 return err;
4417da66 4129}
d1b19dff 4130EXPORT_SYMBOL(dev_set_promiscuity);
4417da66 4131
1da177e4
LT
4132/**
4133 * dev_set_allmulti - update allmulti count on a device
4134 * @dev: device
4135 * @inc: modifier
4136 *
4137 * Add or remove reception of all multicast frames to a device. While the
4138 * count in the device remains above zero the interface remains listening
4139 * to all interfaces. Once it hits zero the device reverts back to normal
4140 * filtering operation. A negative @inc value is used to drop the counter
4141 * when releasing a resource needing all multicasts.
dad9b335 4142 * Return 0 if successful or a negative errno code on error.
1da177e4
LT
4143 */
4144
dad9b335 4145int dev_set_allmulti(struct net_device *dev, int inc)
1da177e4
LT
4146{
4147 unsigned short old_flags = dev->flags;
4148
24023451
PM
4149 ASSERT_RTNL();
4150
1da177e4 4151 dev->flags |= IFF_ALLMULTI;
dad9b335
WC
4152 dev->allmulti += inc;
4153 if (dev->allmulti == 0) {
4154 /*
4155 * Avoid overflow.
4156 * If inc causes overflow, untouch allmulti and return error.
4157 */
4158 if (inc < 0)
4159 dev->flags &= ~IFF_ALLMULTI;
4160 else {
4161 dev->allmulti -= inc;
4162 printk(KERN_WARNING "%s: allmulti touches roof, "
4163 "set allmulti failed, allmulti feature of "
4164 "device might be broken.\n", dev->name);
4165 return -EOVERFLOW;
4166 }
4167 }
24023451 4168 if (dev->flags ^ old_flags) {
b6c40d68 4169 dev_change_rx_flags(dev, IFF_ALLMULTI);
4417da66 4170 dev_set_rx_mode(dev);
24023451 4171 }
dad9b335 4172 return 0;
4417da66 4173}
d1b19dff 4174EXPORT_SYMBOL(dev_set_allmulti);
4417da66
PM
4175
4176/*
4177 * Upload unicast and multicast address lists to device and
4178 * configure RX filtering. When the device doesn't support unicast
53ccaae1 4179 * filtering it is put in promiscuous mode while unicast addresses
4417da66
PM
4180 * are present.
4181 */
4182void __dev_set_rx_mode(struct net_device *dev)
4183{
d314774c
SH
4184 const struct net_device_ops *ops = dev->netdev_ops;
4185
4417da66
PM
4186 /* dev_open will call this function so the list will stay sane. */
4187 if (!(dev->flags&IFF_UP))
4188 return;
4189
4190 if (!netif_device_present(dev))
40b77c94 4191 return;
4417da66 4192
d314774c
SH
4193 if (ops->ndo_set_rx_mode)
4194 ops->ndo_set_rx_mode(dev);
4417da66
PM
4195 else {
4196 /* Unicast addresses changes may only happen under the rtnl,
4197 * therefore calling __dev_set_promiscuity here is safe.
4198 */
32e7bfc4 4199 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
4417da66
PM
4200 __dev_set_promiscuity(dev, 1);
4201 dev->uc_promisc = 1;
32e7bfc4 4202 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
4417da66
PM
4203 __dev_set_promiscuity(dev, -1);
4204 dev->uc_promisc = 0;
4205 }
4206
d314774c
SH
4207 if (ops->ndo_set_multicast_list)
4208 ops->ndo_set_multicast_list(dev);
4417da66
PM
4209 }
4210}
4211
4212void dev_set_rx_mode(struct net_device *dev)
4213{
b9e40857 4214 netif_addr_lock_bh(dev);
4417da66 4215 __dev_set_rx_mode(dev);
b9e40857 4216 netif_addr_unlock_bh(dev);
1da177e4
LT
4217}
4218
f0db275a
SH
4219/**
4220 * dev_get_flags - get flags reported to userspace
4221 * @dev: device
4222 *
4223 * Get the combination of flag bits exported through APIs to userspace.
4224 */
1da177e4
LT
4225unsigned dev_get_flags(const struct net_device *dev)
4226{
4227 unsigned flags;
4228
4229 flags = (dev->flags & ~(IFF_PROMISC |
4230 IFF_ALLMULTI |
b00055aa
SR
4231 IFF_RUNNING |
4232 IFF_LOWER_UP |
4233 IFF_DORMANT)) |
1da177e4
LT
4234 (dev->gflags & (IFF_PROMISC |
4235 IFF_ALLMULTI));
4236
b00055aa
SR
4237 if (netif_running(dev)) {
4238 if (netif_oper_up(dev))
4239 flags |= IFF_RUNNING;
4240 if (netif_carrier_ok(dev))
4241 flags |= IFF_LOWER_UP;
4242 if (netif_dormant(dev))
4243 flags |= IFF_DORMANT;
4244 }
1da177e4
LT
4245
4246 return flags;
4247}
d1b19dff 4248EXPORT_SYMBOL(dev_get_flags);
1da177e4 4249
bd380811 4250int __dev_change_flags(struct net_device *dev, unsigned int flags)
1da177e4 4251{
1da177e4 4252 int old_flags = dev->flags;
bd380811 4253 int ret;
1da177e4 4254
24023451
PM
4255 ASSERT_RTNL();
4256
1da177e4
LT
4257 /*
4258 * Set the flags on our device.
4259 */
4260
4261 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
4262 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
4263 IFF_AUTOMEDIA)) |
4264 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
4265 IFF_ALLMULTI));
4266
4267 /*
4268 * Load in the correct multicast list now the flags have changed.
4269 */
4270
b6c40d68
PM
4271 if ((old_flags ^ flags) & IFF_MULTICAST)
4272 dev_change_rx_flags(dev, IFF_MULTICAST);
24023451 4273
4417da66 4274 dev_set_rx_mode(dev);
1da177e4
LT
4275
4276 /*
4277 * Have we downed the interface. We handle IFF_UP ourselves
4278 * according to user attempts to set it, rather than blindly
4279 * setting it.
4280 */
4281
4282 ret = 0;
4283 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
bd380811 4284 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
1da177e4
LT
4285
4286 if (!ret)
4417da66 4287 dev_set_rx_mode(dev);
1da177e4
LT
4288 }
4289
1da177e4 4290 if ((flags ^ dev->gflags) & IFF_PROMISC) {
d1b19dff
ED
4291 int inc = (flags & IFF_PROMISC) ? 1 : -1;
4292
1da177e4
LT
4293 dev->gflags ^= IFF_PROMISC;
4294 dev_set_promiscuity(dev, inc);
4295 }
4296
4297 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
4298 is important. Some (broken) drivers set IFF_PROMISC, when
4299 IFF_ALLMULTI is requested not asking us and not reporting.
4300 */
4301 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
d1b19dff
ED
4302 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
4303
1da177e4
LT
4304 dev->gflags ^= IFF_ALLMULTI;
4305 dev_set_allmulti(dev, inc);
4306 }
4307
bd380811
PM
4308 return ret;
4309}
4310
4311void __dev_notify_flags(struct net_device *dev, unsigned int old_flags)
4312{
4313 unsigned int changes = dev->flags ^ old_flags;
4314
4315 if (changes & IFF_UP) {
4316 if (dev->flags & IFF_UP)
4317 call_netdevice_notifiers(NETDEV_UP, dev);
4318 else
4319 call_netdevice_notifiers(NETDEV_DOWN, dev);
4320 }
4321
4322 if (dev->flags & IFF_UP &&
4323 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE)))
4324 call_netdevice_notifiers(NETDEV_CHANGE, dev);
4325}
4326
4327/**
4328 * dev_change_flags - change device settings
4329 * @dev: device
4330 * @flags: device state flags
4331 *
4332 * Change settings on device based state flags. The flags are
4333 * in the userspace exported format.
4334 */
4335int dev_change_flags(struct net_device *dev, unsigned flags)
4336{
4337 int ret, changes;
4338 int old_flags = dev->flags;
4339
4340 ret = __dev_change_flags(dev, flags);
4341 if (ret < 0)
4342 return ret;
4343
4344 changes = old_flags ^ dev->flags;
7c355f53
TG
4345 if (changes)
4346 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
1da177e4 4347
bd380811 4348 __dev_notify_flags(dev, old_flags);
1da177e4
LT
4349 return ret;
4350}
d1b19dff 4351EXPORT_SYMBOL(dev_change_flags);
1da177e4 4352
f0db275a
SH
4353/**
4354 * dev_set_mtu - Change maximum transfer unit
4355 * @dev: device
4356 * @new_mtu: new transfer unit
4357 *
4358 * Change the maximum transfer size of the network device.
4359 */
1da177e4
LT
4360int dev_set_mtu(struct net_device *dev, int new_mtu)
4361{
d314774c 4362 const struct net_device_ops *ops = dev->netdev_ops;
1da177e4
LT
4363 int err;
4364
4365 if (new_mtu == dev->mtu)
4366 return 0;
4367
4368 /* MTU must be positive. */
4369 if (new_mtu < 0)
4370 return -EINVAL;
4371
4372 if (!netif_device_present(dev))
4373 return -ENODEV;
4374
4375 err = 0;
d314774c
SH
4376 if (ops->ndo_change_mtu)
4377 err = ops->ndo_change_mtu(dev, new_mtu);
1da177e4
LT
4378 else
4379 dev->mtu = new_mtu;
d314774c 4380
1da177e4 4381 if (!err && dev->flags & IFF_UP)
056925ab 4382 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
1da177e4
LT
4383 return err;
4384}
d1b19dff 4385EXPORT_SYMBOL(dev_set_mtu);
1da177e4 4386
f0db275a
SH
4387/**
4388 * dev_set_mac_address - Change Media Access Control Address
4389 * @dev: device
4390 * @sa: new address
4391 *
4392 * Change the hardware (MAC) address of the device
4393 */
1da177e4
LT
4394int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
4395{
d314774c 4396 const struct net_device_ops *ops = dev->netdev_ops;
1da177e4
LT
4397 int err;
4398
d314774c 4399 if (!ops->ndo_set_mac_address)
1da177e4
LT
4400 return -EOPNOTSUPP;
4401 if (sa->sa_family != dev->type)
4402 return -EINVAL;
4403 if (!netif_device_present(dev))
4404 return -ENODEV;
d314774c 4405 err = ops->ndo_set_mac_address(dev, sa);
1da177e4 4406 if (!err)
056925ab 4407 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
1da177e4
LT
4408 return err;
4409}
d1b19dff 4410EXPORT_SYMBOL(dev_set_mac_address);
1da177e4
LT
4411
4412/*
3710becf 4413 * Perform the SIOCxIFxxx calls, inside rcu_read_lock()
1da177e4 4414 */
14e3e079 4415static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
1da177e4
LT
4416{
4417 int err;
3710becf 4418 struct net_device *dev = dev_get_by_name_rcu(net, ifr->ifr_name);
1da177e4
LT
4419
4420 if (!dev)
4421 return -ENODEV;
4422
4423 switch (cmd) {
d1b19dff
ED
4424 case SIOCGIFFLAGS: /* Get interface flags */
4425 ifr->ifr_flags = (short) dev_get_flags(dev);
4426 return 0;
1da177e4 4427
d1b19dff
ED
4428 case SIOCGIFMETRIC: /* Get the metric on the interface
4429 (currently unused) */
4430 ifr->ifr_metric = 0;
4431 return 0;
1da177e4 4432
d1b19dff
ED
4433 case SIOCGIFMTU: /* Get the MTU of a device */
4434 ifr->ifr_mtu = dev->mtu;
4435 return 0;
1da177e4 4436
d1b19dff
ED
4437 case SIOCGIFHWADDR:
4438 if (!dev->addr_len)
4439 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
4440 else
4441 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
4442 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4443 ifr->ifr_hwaddr.sa_family = dev->type;
4444 return 0;
1da177e4 4445
d1b19dff
ED
4446 case SIOCGIFSLAVE:
4447 err = -EINVAL;
4448 break;
14e3e079 4449
d1b19dff
ED
4450 case SIOCGIFMAP:
4451 ifr->ifr_map.mem_start = dev->mem_start;
4452 ifr->ifr_map.mem_end = dev->mem_end;
4453 ifr->ifr_map.base_addr = dev->base_addr;
4454 ifr->ifr_map.irq = dev->irq;
4455 ifr->ifr_map.dma = dev->dma;
4456 ifr->ifr_map.port = dev->if_port;
4457 return 0;
14e3e079 4458
d1b19dff
ED
4459 case SIOCGIFINDEX:
4460 ifr->ifr_ifindex = dev->ifindex;
4461 return 0;
14e3e079 4462
d1b19dff
ED
4463 case SIOCGIFTXQLEN:
4464 ifr->ifr_qlen = dev->tx_queue_len;
4465 return 0;
14e3e079 4466
d1b19dff
ED
4467 default:
4468 /* dev_ioctl() should ensure this case
4469 * is never reached
4470 */
4471 WARN_ON(1);
4472 err = -EINVAL;
4473 break;
14e3e079
JG
4474
4475 }
4476 return err;
4477}
4478
4479/*
4480 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
4481 */
4482static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
4483{
4484 int err;
4485 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
5f2f6da7 4486 const struct net_device_ops *ops;
14e3e079
JG
4487
4488 if (!dev)
4489 return -ENODEV;
4490
5f2f6da7
JP
4491 ops = dev->netdev_ops;
4492
14e3e079 4493 switch (cmd) {
d1b19dff
ED
4494 case SIOCSIFFLAGS: /* Set interface flags */
4495 return dev_change_flags(dev, ifr->ifr_flags);
14e3e079 4496
d1b19dff
ED
4497 case SIOCSIFMETRIC: /* Set the metric on the interface
4498 (currently unused) */
4499 return -EOPNOTSUPP;
14e3e079 4500
d1b19dff
ED
4501 case SIOCSIFMTU: /* Set the MTU of a device */
4502 return dev_set_mtu(dev, ifr->ifr_mtu);
1da177e4 4503
d1b19dff
ED
4504 case SIOCSIFHWADDR:
4505 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
1da177e4 4506
d1b19dff
ED
4507 case SIOCSIFHWBROADCAST:
4508 if (ifr->ifr_hwaddr.sa_family != dev->type)
4509 return -EINVAL;
4510 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
4511 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4512 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4513 return 0;
1da177e4 4514
d1b19dff
ED
4515 case SIOCSIFMAP:
4516 if (ops->ndo_set_config) {
1da177e4
LT
4517 if (!netif_device_present(dev))
4518 return -ENODEV;
d1b19dff
ED
4519 return ops->ndo_set_config(dev, &ifr->ifr_map);
4520 }
4521 return -EOPNOTSUPP;
1da177e4 4522
d1b19dff
ED
4523 case SIOCADDMULTI:
4524 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
4525 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4526 return -EINVAL;
4527 if (!netif_device_present(dev))
4528 return -ENODEV;
22bedad3 4529 return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data);
d1b19dff
ED
4530
4531 case SIOCDELMULTI:
4532 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
4533 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4534 return -EINVAL;
4535 if (!netif_device_present(dev))
4536 return -ENODEV;
22bedad3 4537 return dev_mc_del_global(dev, ifr->ifr_hwaddr.sa_data);
1da177e4 4538
d1b19dff
ED
4539 case SIOCSIFTXQLEN:
4540 if (ifr->ifr_qlen < 0)
4541 return -EINVAL;
4542 dev->tx_queue_len = ifr->ifr_qlen;
4543 return 0;
1da177e4 4544
d1b19dff
ED
4545 case SIOCSIFNAME:
4546 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
4547 return dev_change_name(dev, ifr->ifr_newname);
1da177e4 4548
d1b19dff
ED
4549 /*
4550 * Unknown or private ioctl
4551 */
4552 default:
4553 if ((cmd >= SIOCDEVPRIVATE &&
4554 cmd <= SIOCDEVPRIVATE + 15) ||
4555 cmd == SIOCBONDENSLAVE ||
4556 cmd == SIOCBONDRELEASE ||
4557 cmd == SIOCBONDSETHWADDR ||
4558 cmd == SIOCBONDSLAVEINFOQUERY ||
4559 cmd == SIOCBONDINFOQUERY ||
4560 cmd == SIOCBONDCHANGEACTIVE ||
4561 cmd == SIOCGMIIPHY ||
4562 cmd == SIOCGMIIREG ||
4563 cmd == SIOCSMIIREG ||
4564 cmd == SIOCBRADDIF ||
4565 cmd == SIOCBRDELIF ||
4566 cmd == SIOCSHWTSTAMP ||
4567 cmd == SIOCWANDEV) {
4568 err = -EOPNOTSUPP;
4569 if (ops->ndo_do_ioctl) {
4570 if (netif_device_present(dev))
4571 err = ops->ndo_do_ioctl(dev, ifr, cmd);
4572 else
4573 err = -ENODEV;
4574 }
4575 } else
4576 err = -EINVAL;
1da177e4
LT
4577
4578 }
4579 return err;
4580}
4581
4582/*
4583 * This function handles all "interface"-type I/O control requests. The actual
4584 * 'doing' part of this is dev_ifsioc above.
4585 */
4586
4587/**
4588 * dev_ioctl - network device ioctl
c4ea43c5 4589 * @net: the applicable net namespace
1da177e4
LT
4590 * @cmd: command to issue
4591 * @arg: pointer to a struct ifreq in user space
4592 *
4593 * Issue ioctl functions to devices. This is normally called by the
4594 * user space syscall interfaces but can sometimes be useful for
4595 * other purposes. The return value is the return from the syscall if
4596 * positive or a negative errno code on error.
4597 */
4598
881d966b 4599int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
1da177e4
LT
4600{
4601 struct ifreq ifr;
4602 int ret;
4603 char *colon;
4604
4605 /* One special case: SIOCGIFCONF takes ifconf argument
4606 and requires shared lock, because it sleeps writing
4607 to user space.
4608 */
4609
4610 if (cmd == SIOCGIFCONF) {
6756ae4b 4611 rtnl_lock();
881d966b 4612 ret = dev_ifconf(net, (char __user *) arg);
6756ae4b 4613 rtnl_unlock();
1da177e4
LT
4614 return ret;
4615 }
4616 if (cmd == SIOCGIFNAME)
881d966b 4617 return dev_ifname(net, (struct ifreq __user *)arg);
1da177e4
LT
4618
4619 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
4620 return -EFAULT;
4621
4622 ifr.ifr_name[IFNAMSIZ-1] = 0;
4623
4624 colon = strchr(ifr.ifr_name, ':');
4625 if (colon)
4626 *colon = 0;
4627
4628 /*
4629 * See which interface the caller is talking about.
4630 */
4631
4632 switch (cmd) {
d1b19dff
ED
4633 /*
4634 * These ioctl calls:
4635 * - can be done by all.
4636 * - atomic and do not require locking.
4637 * - return a value
4638 */
4639 case SIOCGIFFLAGS:
4640 case SIOCGIFMETRIC:
4641 case SIOCGIFMTU:
4642 case SIOCGIFHWADDR:
4643 case SIOCGIFSLAVE:
4644 case SIOCGIFMAP:
4645 case SIOCGIFINDEX:
4646 case SIOCGIFTXQLEN:
4647 dev_load(net, ifr.ifr_name);
3710becf 4648 rcu_read_lock();
d1b19dff 4649 ret = dev_ifsioc_locked(net, &ifr, cmd);
3710becf 4650 rcu_read_unlock();
d1b19dff
ED
4651 if (!ret) {
4652 if (colon)
4653 *colon = ':';
4654 if (copy_to_user(arg, &ifr,
4655 sizeof(struct ifreq)))
4656 ret = -EFAULT;
4657 }
4658 return ret;
1da177e4 4659
d1b19dff
ED
4660 case SIOCETHTOOL:
4661 dev_load(net, ifr.ifr_name);
4662 rtnl_lock();
4663 ret = dev_ethtool(net, &ifr);
4664 rtnl_unlock();
4665 if (!ret) {
4666 if (colon)
4667 *colon = ':';
4668 if (copy_to_user(arg, &ifr,
4669 sizeof(struct ifreq)))
4670 ret = -EFAULT;
4671 }
4672 return ret;
1da177e4 4673
d1b19dff
ED
4674 /*
4675 * These ioctl calls:
4676 * - require superuser power.
4677 * - require strict serialization.
4678 * - return a value
4679 */
4680 case SIOCGMIIPHY:
4681 case SIOCGMIIREG:
4682 case SIOCSIFNAME:
4683 if (!capable(CAP_NET_ADMIN))
4684 return -EPERM;
4685 dev_load(net, ifr.ifr_name);
4686 rtnl_lock();
4687 ret = dev_ifsioc(net, &ifr, cmd);
4688 rtnl_unlock();
4689 if (!ret) {
4690 if (colon)
4691 *colon = ':';
4692 if (copy_to_user(arg, &ifr,
4693 sizeof(struct ifreq)))
4694 ret = -EFAULT;
4695 }
4696 return ret;
1da177e4 4697
d1b19dff
ED
4698 /*
4699 * These ioctl calls:
4700 * - require superuser power.
4701 * - require strict serialization.
4702 * - do not return a value
4703 */
4704 case SIOCSIFFLAGS:
4705 case SIOCSIFMETRIC:
4706 case SIOCSIFMTU:
4707 case SIOCSIFMAP:
4708 case SIOCSIFHWADDR:
4709 case SIOCSIFSLAVE:
4710 case SIOCADDMULTI:
4711 case SIOCDELMULTI:
4712 case SIOCSIFHWBROADCAST:
4713 case SIOCSIFTXQLEN:
4714 case SIOCSMIIREG:
4715 case SIOCBONDENSLAVE:
4716 case SIOCBONDRELEASE:
4717 case SIOCBONDSETHWADDR:
4718 case SIOCBONDCHANGEACTIVE:
4719 case SIOCBRADDIF:
4720 case SIOCBRDELIF:
4721 case SIOCSHWTSTAMP:
4722 if (!capable(CAP_NET_ADMIN))
4723 return -EPERM;
4724 /* fall through */
4725 case SIOCBONDSLAVEINFOQUERY:
4726 case SIOCBONDINFOQUERY:
4727 dev_load(net, ifr.ifr_name);
4728 rtnl_lock();
4729 ret = dev_ifsioc(net, &ifr, cmd);
4730 rtnl_unlock();
4731 return ret;
4732
4733 case SIOCGIFMEM:
4734 /* Get the per device memory space. We can add this but
4735 * currently do not support it */
4736 case SIOCSIFMEM:
4737 /* Set the per device memory buffer space.
4738 * Not applicable in our case */
4739 case SIOCSIFLINK:
4740 return -EINVAL;
4741
4742 /*
4743 * Unknown or private ioctl.
4744 */
4745 default:
4746 if (cmd == SIOCWANDEV ||
4747 (cmd >= SIOCDEVPRIVATE &&
4748 cmd <= SIOCDEVPRIVATE + 15)) {
881d966b 4749 dev_load(net, ifr.ifr_name);
1da177e4 4750 rtnl_lock();
881d966b 4751 ret = dev_ifsioc(net, &ifr, cmd);
1da177e4 4752 rtnl_unlock();
d1b19dff
ED
4753 if (!ret && copy_to_user(arg, &ifr,
4754 sizeof(struct ifreq)))
4755 ret = -EFAULT;
1da177e4 4756 return ret;
d1b19dff
ED
4757 }
4758 /* Take care of Wireless Extensions */
4759 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
4760 return wext_handle_ioctl(net, &ifr, cmd, arg);
4761 return -EINVAL;
1da177e4
LT
4762 }
4763}
4764
4765
4766/**
4767 * dev_new_index - allocate an ifindex
c4ea43c5 4768 * @net: the applicable net namespace
1da177e4
LT
4769 *
4770 * Returns a suitable unique value for a new device interface
4771 * number. The caller must hold the rtnl semaphore or the
4772 * dev_base_lock to be sure it remains unique.
4773 */
881d966b 4774static int dev_new_index(struct net *net)
1da177e4
LT
4775{
4776 static int ifindex;
4777 for (;;) {
4778 if (++ifindex <= 0)
4779 ifindex = 1;
881d966b 4780 if (!__dev_get_by_index(net, ifindex))
1da177e4
LT
4781 return ifindex;
4782 }
4783}
4784
1da177e4 4785/* Delayed registration/unregisteration */
3b5b34fd 4786static LIST_HEAD(net_todo_list);
1da177e4 4787
6f05f629 4788static void net_set_todo(struct net_device *dev)
1da177e4 4789{
1da177e4 4790 list_add_tail(&dev->todo_list, &net_todo_list);
1da177e4
LT
4791}
4792
9b5e383c 4793static void rollback_registered_many(struct list_head *head)
93ee31f1 4794{
e93737b0 4795 struct net_device *dev, *tmp;
9b5e383c 4796
93ee31f1
DL
4797 BUG_ON(dev_boot_phase);
4798 ASSERT_RTNL();
4799
e93737b0 4800 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
9b5e383c 4801 /* Some devices call without registering
e93737b0
KK
4802 * for initialization unwind. Remove those
4803 * devices and proceed with the remaining.
9b5e383c
ED
4804 */
4805 if (dev->reg_state == NETREG_UNINITIALIZED) {
4806 pr_debug("unregister_netdevice: device %s/%p never "
4807 "was registered\n", dev->name, dev);
93ee31f1 4808
9b5e383c 4809 WARN_ON(1);
e93737b0
KK
4810 list_del(&dev->unreg_list);
4811 continue;
9b5e383c 4812 }
93ee31f1 4813
9b5e383c 4814 BUG_ON(dev->reg_state != NETREG_REGISTERED);
93ee31f1 4815
9b5e383c
ED
4816 /* If device is running, close it first. */
4817 dev_close(dev);
93ee31f1 4818
9b5e383c
ED
4819 /* And unlink it from device chain. */
4820 unlist_netdevice(dev);
93ee31f1 4821
9b5e383c
ED
4822 dev->reg_state = NETREG_UNREGISTERING;
4823 }
93ee31f1
DL
4824
4825 synchronize_net();
4826
9b5e383c
ED
4827 list_for_each_entry(dev, head, unreg_list) {
4828 /* Shutdown queueing discipline. */
4829 dev_shutdown(dev);
93ee31f1
DL
4830
4831
9b5e383c
ED
4832 /* Notify protocols, that we are about to destroy
4833 this device. They should clean all the things.
4834 */
4835 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
93ee31f1 4836
a2835763
PM
4837 if (!dev->rtnl_link_ops ||
4838 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
4839 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
4840
9b5e383c
ED
4841 /*
4842 * Flush the unicast and multicast chains
4843 */
a748ee24 4844 dev_uc_flush(dev);
22bedad3 4845 dev_mc_flush(dev);
93ee31f1 4846
9b5e383c
ED
4847 if (dev->netdev_ops->ndo_uninit)
4848 dev->netdev_ops->ndo_uninit(dev);
93ee31f1 4849
9b5e383c
ED
4850 /* Notifier chain MUST detach us from master device. */
4851 WARN_ON(dev->master);
93ee31f1 4852
9b5e383c
ED
4853 /* Remove entries from kobject tree */
4854 netdev_unregister_kobject(dev);
4855 }
93ee31f1 4856
a5ee1551 4857 /* Process any work delayed until the end of the batch */
e5e26d75 4858 dev = list_first_entry(head, struct net_device, unreg_list);
a5ee1551 4859 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
93ee31f1 4860
a5ee1551 4861 synchronize_net();
395264d5 4862
a5ee1551 4863 list_for_each_entry(dev, head, unreg_list)
9b5e383c
ED
4864 dev_put(dev);
4865}
4866
4867static void rollback_registered(struct net_device *dev)
4868{
4869 LIST_HEAD(single);
4870
4871 list_add(&dev->unreg_list, &single);
4872 rollback_registered_many(&single);
93ee31f1
DL
4873}
4874
e8a0464c
DM
4875static void __netdev_init_queue_locks_one(struct net_device *dev,
4876 struct netdev_queue *dev_queue,
4877 void *_unused)
c773e847
DM
4878{
4879 spin_lock_init(&dev_queue->_xmit_lock);
cf508b12 4880 netdev_set_xmit_lockdep_class(&dev_queue->_xmit_lock, dev->type);
c773e847
DM
4881 dev_queue->xmit_lock_owner = -1;
4882}
4883
4884static void netdev_init_queue_locks(struct net_device *dev)
4885{
e8a0464c
DM
4886 netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL);
4887 __netdev_init_queue_locks_one(dev, &dev->rx_queue, NULL);
c773e847
DM
4888}
4889
b63365a2
HX
4890unsigned long netdev_fix_features(unsigned long features, const char *name)
4891{
4892 /* Fix illegal SG+CSUM combinations. */
4893 if ((features & NETIF_F_SG) &&
4894 !(features & NETIF_F_ALL_CSUM)) {
4895 if (name)
4896 printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no "
4897 "checksum feature.\n", name);
4898 features &= ~NETIF_F_SG;
4899 }
4900
4901 /* TSO requires that SG is present as well. */
4902 if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) {
4903 if (name)
4904 printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no "
4905 "SG feature.\n", name);
4906 features &= ~NETIF_F_TSO;
4907 }
4908
4909 if (features & NETIF_F_UFO) {
4910 if (!(features & NETIF_F_GEN_CSUM)) {
4911 if (name)
4912 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4913 "since no NETIF_F_HW_CSUM feature.\n",
4914 name);
4915 features &= ~NETIF_F_UFO;
4916 }
4917
4918 if (!(features & NETIF_F_SG)) {
4919 if (name)
4920 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4921 "since no NETIF_F_SG feature.\n", name);
4922 features &= ~NETIF_F_UFO;
4923 }
4924 }
4925
4926 return features;
4927}
4928EXPORT_SYMBOL(netdev_fix_features);
4929
fc4a7489
PM
4930/**
4931 * netif_stacked_transfer_operstate - transfer operstate
4932 * @rootdev: the root or lower level device to transfer state from
4933 * @dev: the device to transfer operstate to
4934 *
4935 * Transfer operational state from root to device. This is normally
4936 * called when a stacking relationship exists between the root
4937 * device and the device(a leaf device).
4938 */
4939void netif_stacked_transfer_operstate(const struct net_device *rootdev,
4940 struct net_device *dev)
4941{
4942 if (rootdev->operstate == IF_OPER_DORMANT)
4943 netif_dormant_on(dev);
4944 else
4945 netif_dormant_off(dev);
4946
4947 if (netif_carrier_ok(rootdev)) {
4948 if (!netif_carrier_ok(dev))
4949 netif_carrier_on(dev);
4950 } else {
4951 if (netif_carrier_ok(dev))
4952 netif_carrier_off(dev);
4953 }
4954}
4955EXPORT_SYMBOL(netif_stacked_transfer_operstate);
4956
1da177e4
LT
4957/**
4958 * register_netdevice - register a network device
4959 * @dev: device to register
4960 *
4961 * Take a completed network device structure and add it to the kernel
4962 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
4963 * chain. 0 is returned on success. A negative errno code is returned
4964 * on a failure to set up the device, or if the name is a duplicate.
4965 *
4966 * Callers must hold the rtnl semaphore. You may want
4967 * register_netdev() instead of this.
4968 *
4969 * BUGS:
4970 * The locking appears insufficient to guarantee two parallel registers
4971 * will not get the same name.
4972 */
4973
4974int register_netdevice(struct net_device *dev)
4975{
1da177e4 4976 int ret;
d314774c 4977 struct net *net = dev_net(dev);
1da177e4
LT
4978
4979 BUG_ON(dev_boot_phase);
4980 ASSERT_RTNL();
4981
b17a7c17
SH
4982 might_sleep();
4983
1da177e4
LT
4984 /* When net_device's are persistent, this will be fatal. */
4985 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
d314774c 4986 BUG_ON(!net);
1da177e4 4987
f1f28aa3 4988 spin_lock_init(&dev->addr_list_lock);
cf508b12 4989 netdev_set_addr_lockdep_class(dev);
c773e847 4990 netdev_init_queue_locks(dev);
1da177e4 4991
1da177e4
LT
4992 dev->iflink = -1;
4993
df334545 4994#ifdef CONFIG_RPS
0a9627f2
TH
4995 if (!dev->num_rx_queues) {
4996 /*
4997 * Allocate a single RX queue if driver never called
4998 * alloc_netdev_mq
4999 */
5000
5001 dev->_rx = kzalloc(sizeof(struct netdev_rx_queue), GFP_KERNEL);
5002 if (!dev->_rx) {
5003 ret = -ENOMEM;
5004 goto out;
5005 }
5006
5007 dev->_rx->first = dev->_rx;
5008 atomic_set(&dev->_rx->count, 1);
5009 dev->num_rx_queues = 1;
5010 }
df334545 5011#endif
1da177e4 5012 /* Init, if this function is available */
d314774c
SH
5013 if (dev->netdev_ops->ndo_init) {
5014 ret = dev->netdev_ops->ndo_init(dev);
1da177e4
LT
5015 if (ret) {
5016 if (ret > 0)
5017 ret = -EIO;
90833aa4 5018 goto out;
1da177e4
LT
5019 }
5020 }
4ec93edb 5021
8ce6cebc 5022 ret = dev_get_valid_name(dev, dev->name, 0);
d9031024 5023 if (ret)
7ce1b0ed 5024 goto err_uninit;
1da177e4 5025
881d966b 5026 dev->ifindex = dev_new_index(net);
1da177e4
LT
5027 if (dev->iflink == -1)
5028 dev->iflink = dev->ifindex;
5029
d212f87b
SH
5030 /* Fix illegal checksum combinations */
5031 if ((dev->features & NETIF_F_HW_CSUM) &&
5032 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5033 printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
5034 dev->name);
5035 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
5036 }
5037
5038 if ((dev->features & NETIF_F_NO_CSUM) &&
5039 (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5040 printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
5041 dev->name);
5042 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
5043 }
5044
b63365a2 5045 dev->features = netdev_fix_features(dev->features, dev->name);
1da177e4 5046
e5a4a72d
LB
5047 /* Enable software GSO if SG is supported. */
5048 if (dev->features & NETIF_F_SG)
5049 dev->features |= NETIF_F_GSO;
5050
7ffbe3fd
JB
5051 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
5052 ret = notifier_to_errno(ret);
5053 if (ret)
5054 goto err_uninit;
5055
8b41d188 5056 ret = netdev_register_kobject(dev);
b17a7c17 5057 if (ret)
7ce1b0ed 5058 goto err_uninit;
b17a7c17
SH
5059 dev->reg_state = NETREG_REGISTERED;
5060
1da177e4
LT
5061 /*
5062 * Default initial state at registry is that the
5063 * device is present.
5064 */
5065
5066 set_bit(__LINK_STATE_PRESENT, &dev->state);
5067
1da177e4 5068 dev_init_scheduler(dev);
1da177e4 5069 dev_hold(dev);
ce286d32 5070 list_netdevice(dev);
1da177e4
LT
5071
5072 /* Notify protocols, that a new device appeared. */
056925ab 5073 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
fcc5a03a 5074 ret = notifier_to_errno(ret);
93ee31f1
DL
5075 if (ret) {
5076 rollback_registered(dev);
5077 dev->reg_state = NETREG_UNREGISTERED;
5078 }
d90a909e
EB
5079 /*
5080 * Prevent userspace races by waiting until the network
5081 * device is fully setup before sending notifications.
5082 */
a2835763
PM
5083 if (!dev->rtnl_link_ops ||
5084 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
5085 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
1da177e4
LT
5086
5087out:
5088 return ret;
7ce1b0ed
HX
5089
5090err_uninit:
d314774c
SH
5091 if (dev->netdev_ops->ndo_uninit)
5092 dev->netdev_ops->ndo_uninit(dev);
7ce1b0ed 5093 goto out;
1da177e4 5094}
d1b19dff 5095EXPORT_SYMBOL(register_netdevice);
1da177e4 5096
937f1ba5
BH
5097/**
5098 * init_dummy_netdev - init a dummy network device for NAPI
5099 * @dev: device to init
5100 *
5101 * This takes a network device structure and initialize the minimum
5102 * amount of fields so it can be used to schedule NAPI polls without
5103 * registering a full blown interface. This is to be used by drivers
5104 * that need to tie several hardware interfaces to a single NAPI
5105 * poll scheduler due to HW limitations.
5106 */
5107int init_dummy_netdev(struct net_device *dev)
5108{
5109 /* Clear everything. Note we don't initialize spinlocks
5110 * are they aren't supposed to be taken by any of the
5111 * NAPI code and this dummy netdev is supposed to be
5112 * only ever used for NAPI polls
5113 */
5114 memset(dev, 0, sizeof(struct net_device));
5115
5116 /* make sure we BUG if trying to hit standard
5117 * register/unregister code path
5118 */
5119 dev->reg_state = NETREG_DUMMY;
5120
5121 /* initialize the ref count */
5122 atomic_set(&dev->refcnt, 1);
5123
5124 /* NAPI wants this */
5125 INIT_LIST_HEAD(&dev->napi_list);
5126
5127 /* a dummy interface is started by default */
5128 set_bit(__LINK_STATE_PRESENT, &dev->state);
5129 set_bit(__LINK_STATE_START, &dev->state);
5130
5131 return 0;
5132}
5133EXPORT_SYMBOL_GPL(init_dummy_netdev);
5134
5135
1da177e4
LT
5136/**
5137 * register_netdev - register a network device
5138 * @dev: device to register
5139 *
5140 * Take a completed network device structure and add it to the kernel
5141 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5142 * chain. 0 is returned on success. A negative errno code is returned
5143 * on a failure to set up the device, or if the name is a duplicate.
5144 *
38b4da38 5145 * This is a wrapper around register_netdevice that takes the rtnl semaphore
1da177e4
LT
5146 * and expands the device name if you passed a format string to
5147 * alloc_netdev.
5148 */
5149int register_netdev(struct net_device *dev)
5150{
5151 int err;
5152
5153 rtnl_lock();
5154
5155 /*
5156 * If the name is a format string the caller wants us to do a
5157 * name allocation.
5158 */
5159 if (strchr(dev->name, '%')) {
5160 err = dev_alloc_name(dev, dev->name);
5161 if (err < 0)
5162 goto out;
5163 }
4ec93edb 5164
1da177e4
LT
5165 err = register_netdevice(dev);
5166out:
5167 rtnl_unlock();
5168 return err;
5169}
5170EXPORT_SYMBOL(register_netdev);
5171
5172/*
5173 * netdev_wait_allrefs - wait until all references are gone.
5174 *
5175 * This is called when unregistering network devices.
5176 *
5177 * Any protocol or device that holds a reference should register
5178 * for netdevice notification, and cleanup and put back the
5179 * reference if they receive an UNREGISTER event.
5180 * We can get stuck here if buggy protocols don't correctly
4ec93edb 5181 * call dev_put.
1da177e4
LT
5182 */
5183static void netdev_wait_allrefs(struct net_device *dev)
5184{
5185 unsigned long rebroadcast_time, warning_time;
5186
e014debe
ED
5187 linkwatch_forget_dev(dev);
5188
1da177e4
LT
5189 rebroadcast_time = warning_time = jiffies;
5190 while (atomic_read(&dev->refcnt) != 0) {
5191 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
6756ae4b 5192 rtnl_lock();
1da177e4
LT
5193
5194 /* Rebroadcast unregister notification */
056925ab 5195 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
a5ee1551 5196 /* don't resend NETDEV_UNREGISTER_BATCH, _BATCH users
395264d5 5197 * should have already handle it the first time */
1da177e4
LT
5198
5199 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
5200 &dev->state)) {
5201 /* We must not have linkwatch events
5202 * pending on unregister. If this
5203 * happens, we simply run the queue
5204 * unscheduled, resulting in a noop
5205 * for this device.
5206 */
5207 linkwatch_run_queue();
5208 }
5209
6756ae4b 5210 __rtnl_unlock();
1da177e4
LT
5211
5212 rebroadcast_time = jiffies;
5213 }
5214
5215 msleep(250);
5216
5217 if (time_after(jiffies, warning_time + 10 * HZ)) {
5218 printk(KERN_EMERG "unregister_netdevice: "
5219 "waiting for %s to become free. Usage "
5220 "count = %d\n",
5221 dev->name, atomic_read(&dev->refcnt));
5222 warning_time = jiffies;
5223 }
5224 }
5225}
5226
5227/* The sequence is:
5228 *
5229 * rtnl_lock();
5230 * ...
5231 * register_netdevice(x1);
5232 * register_netdevice(x2);
5233 * ...
5234 * unregister_netdevice(y1);
5235 * unregister_netdevice(y2);
5236 * ...
5237 * rtnl_unlock();
5238 * free_netdev(y1);
5239 * free_netdev(y2);
5240 *
58ec3b4d 5241 * We are invoked by rtnl_unlock().
1da177e4 5242 * This allows us to deal with problems:
b17a7c17 5243 * 1) We can delete sysfs objects which invoke hotplug
1da177e4
LT
5244 * without deadlocking with linkwatch via keventd.
5245 * 2) Since we run with the RTNL semaphore not held, we can sleep
5246 * safely in order to wait for the netdev refcnt to drop to zero.
58ec3b4d
HX
5247 *
5248 * We must not return until all unregister events added during
5249 * the interval the lock was held have been completed.
1da177e4 5250 */
1da177e4
LT
5251void netdev_run_todo(void)
5252{
626ab0e6 5253 struct list_head list;
1da177e4 5254
1da177e4 5255 /* Snapshot list, allow later requests */
626ab0e6 5256 list_replace_init(&net_todo_list, &list);
58ec3b4d
HX
5257
5258 __rtnl_unlock();
626ab0e6 5259
1da177e4
LT
5260 while (!list_empty(&list)) {
5261 struct net_device *dev
e5e26d75 5262 = list_first_entry(&list, struct net_device, todo_list);
1da177e4
LT
5263 list_del(&dev->todo_list);
5264
b17a7c17
SH
5265 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
5266 printk(KERN_ERR "network todo '%s' but state %d\n",
5267 dev->name, dev->reg_state);
5268 dump_stack();
5269 continue;
5270 }
1da177e4 5271
b17a7c17 5272 dev->reg_state = NETREG_UNREGISTERED;
1da177e4 5273
152102c7 5274 on_each_cpu(flush_backlog, dev, 1);
6e583ce5 5275
b17a7c17 5276 netdev_wait_allrefs(dev);
1da177e4 5277
b17a7c17
SH
5278 /* paranoia */
5279 BUG_ON(atomic_read(&dev->refcnt));
547b792c
IJ
5280 WARN_ON(dev->ip_ptr);
5281 WARN_ON(dev->ip6_ptr);
5282 WARN_ON(dev->dn_ptr);
1da177e4 5283
b17a7c17
SH
5284 if (dev->destructor)
5285 dev->destructor(dev);
9093bbb2
SH
5286
5287 /* Free network device */
5288 kobject_put(&dev->dev.kobj);
1da177e4 5289 }
1da177e4
LT
5290}
5291
d83345ad
ED
5292/**
5293 * dev_txq_stats_fold - fold tx_queues stats
5294 * @dev: device to get statistics from
3cfde79c 5295 * @stats: struct rtnl_link_stats64 to hold results
d83345ad
ED
5296 */
5297void dev_txq_stats_fold(const struct net_device *dev,
3cfde79c 5298 struct rtnl_link_stats64 *stats)
d83345ad 5299{
bd27290a 5300 u64 tx_bytes = 0, tx_packets = 0, tx_dropped = 0;
d83345ad
ED
5301 unsigned int i;
5302 struct netdev_queue *txq;
5303
5304 for (i = 0; i < dev->num_tx_queues; i++) {
5305 txq = netdev_get_tx_queue(dev, i);
bd27290a 5306 spin_lock_bh(&txq->_xmit_lock);
d83345ad
ED
5307 tx_bytes += txq->tx_bytes;
5308 tx_packets += txq->tx_packets;
5309 tx_dropped += txq->tx_dropped;
bd27290a 5310 spin_unlock_bh(&txq->_xmit_lock);
d83345ad
ED
5311 }
5312 if (tx_bytes || tx_packets || tx_dropped) {
5313 stats->tx_bytes = tx_bytes;
5314 stats->tx_packets = tx_packets;
5315 stats->tx_dropped = tx_dropped;
5316 }
5317}
5318EXPORT_SYMBOL(dev_txq_stats_fold);
5319
3cfde79c
BH
5320/* Convert net_device_stats to rtnl_link_stats64. They have the same
5321 * fields in the same order, with only the type differing.
5322 */
5323static void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
5324 const struct net_device_stats *netdev_stats)
5325{
5326#if BITS_PER_LONG == 64
5327 BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
5328 memcpy(stats64, netdev_stats, sizeof(*stats64));
5329#else
5330 size_t i, n = sizeof(*stats64) / sizeof(u64);
5331 const unsigned long *src = (const unsigned long *)netdev_stats;
5332 u64 *dst = (u64 *)stats64;
5333
5334 BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) !=
5335 sizeof(*stats64) / sizeof(u64));
5336 for (i = 0; i < n; i++)
5337 dst[i] = src[i];
5338#endif
5339}
5340
eeda3fd6
SH
5341/**
5342 * dev_get_stats - get network device statistics
5343 * @dev: device to get statistics from
28172739 5344 * @storage: place to store stats
eeda3fd6 5345 *
d7753516
BH
5346 * Get network statistics from device. Return @storage.
5347 * The device driver may provide its own method by setting
5348 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
5349 * otherwise the internal statistics structure is used.
eeda3fd6 5350 */
d7753516
BH
5351struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
5352 struct rtnl_link_stats64 *storage)
7004bf25 5353{
eeda3fd6
SH
5354 const struct net_device_ops *ops = dev->netdev_ops;
5355
28172739
ED
5356 if (ops->ndo_get_stats64) {
5357 memset(storage, 0, sizeof(*storage));
5358 return ops->ndo_get_stats64(dev, storage);
5359 }
5360 if (ops->ndo_get_stats) {
3cfde79c 5361 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
28172739
ED
5362 return storage;
5363 }
3cfde79c
BH
5364 netdev_stats_to_stats64(storage, &dev->stats);
5365 dev_txq_stats_fold(dev, storage);
28172739 5366 return storage;
c45d286e 5367}
eeda3fd6 5368EXPORT_SYMBOL(dev_get_stats);
c45d286e 5369
dc2b4847 5370static void netdev_init_one_queue(struct net_device *dev,
e8a0464c
DM
5371 struct netdev_queue *queue,
5372 void *_unused)
dc2b4847 5373{
dc2b4847
DM
5374 queue->dev = dev;
5375}
5376
bb949fbd
DM
5377static void netdev_init_queues(struct net_device *dev)
5378{
e8a0464c
DM
5379 netdev_init_one_queue(dev, &dev->rx_queue, NULL);
5380 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
c3f26a26 5381 spin_lock_init(&dev->tx_global_lock);
bb949fbd
DM
5382}
5383
1da177e4 5384/**
f25f4e44 5385 * alloc_netdev_mq - allocate network device
1da177e4
LT
5386 * @sizeof_priv: size of private data to allocate space for
5387 * @name: device name format string
5388 * @setup: callback to initialize device
f25f4e44 5389 * @queue_count: the number of subqueues to allocate
1da177e4
LT
5390 *
5391 * Allocates a struct net_device with private data area for driver use
f25f4e44
PWJ
5392 * and performs basic initialization. Also allocates subquue structs
5393 * for each queue on the device at the end of the netdevice.
1da177e4 5394 */
f25f4e44
PWJ
5395struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
5396 void (*setup)(struct net_device *), unsigned int queue_count)
1da177e4 5397{
e8a0464c 5398 struct netdev_queue *tx;
1da177e4 5399 struct net_device *dev;
7943986c 5400 size_t alloc_size;
1ce8e7b5 5401 struct net_device *p;
df334545
ED
5402#ifdef CONFIG_RPS
5403 struct netdev_rx_queue *rx;
0a9627f2 5404 int i;
df334545 5405#endif
1da177e4 5406
b6fe17d6
SH
5407 BUG_ON(strlen(name) >= sizeof(dev->name));
5408
fd2ea0a7 5409 alloc_size = sizeof(struct net_device);
d1643d24
AD
5410 if (sizeof_priv) {
5411 /* ensure 32-byte alignment of private area */
1ce8e7b5 5412 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
d1643d24
AD
5413 alloc_size += sizeof_priv;
5414 }
5415 /* ensure 32-byte alignment of whole construct */
1ce8e7b5 5416 alloc_size += NETDEV_ALIGN - 1;
1da177e4 5417
31380de9 5418 p = kzalloc(alloc_size, GFP_KERNEL);
1da177e4 5419 if (!p) {
b6fe17d6 5420 printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
1da177e4
LT
5421 return NULL;
5422 }
1da177e4 5423
7943986c 5424 tx = kcalloc(queue_count, sizeof(struct netdev_queue), GFP_KERNEL);
e8a0464c
DM
5425 if (!tx) {
5426 printk(KERN_ERR "alloc_netdev: Unable to allocate "
5427 "tx qdiscs.\n");
ab9c73cc 5428 goto free_p;
e8a0464c
DM
5429 }
5430
df334545 5431#ifdef CONFIG_RPS
0a9627f2
TH
5432 rx = kcalloc(queue_count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
5433 if (!rx) {
5434 printk(KERN_ERR "alloc_netdev: Unable to allocate "
5435 "rx queues.\n");
5436 goto free_tx;
5437 }
5438
5439 atomic_set(&rx->count, queue_count);
5440
5441 /*
5442 * Set a pointer to first element in the array which holds the
5443 * reference count.
5444 */
5445 for (i = 0; i < queue_count; i++)
5446 rx[i].first = rx;
df334545 5447#endif
0a9627f2 5448
1ce8e7b5 5449 dev = PTR_ALIGN(p, NETDEV_ALIGN);
1da177e4 5450 dev->padded = (char *)dev - (char *)p;
ab9c73cc
JP
5451
5452 if (dev_addr_init(dev))
0a9627f2 5453 goto free_rx;
ab9c73cc 5454
22bedad3 5455 dev_mc_init(dev);
a748ee24 5456 dev_uc_init(dev);
ccffad25 5457
c346dca1 5458 dev_net_set(dev, &init_net);
1da177e4 5459
e8a0464c
DM
5460 dev->_tx = tx;
5461 dev->num_tx_queues = queue_count;
fd2ea0a7 5462 dev->real_num_tx_queues = queue_count;
e8a0464c 5463
df334545 5464#ifdef CONFIG_RPS
0a9627f2
TH
5465 dev->_rx = rx;
5466 dev->num_rx_queues = queue_count;
df334545 5467#endif
0a9627f2 5468
82cc1a7a 5469 dev->gso_max_size = GSO_MAX_SIZE;
1da177e4 5470
bb949fbd
DM
5471 netdev_init_queues(dev);
5472
15682bc4
PWJ
5473 INIT_LIST_HEAD(&dev->ethtool_ntuple_list.list);
5474 dev->ethtool_ntuple_list.count = 0;
d565b0a1 5475 INIT_LIST_HEAD(&dev->napi_list);
9fdce099 5476 INIT_LIST_HEAD(&dev->unreg_list);
e014debe 5477 INIT_LIST_HEAD(&dev->link_watch_list);
93f154b5 5478 dev->priv_flags = IFF_XMIT_DST_RELEASE;
1da177e4
LT
5479 setup(dev);
5480 strcpy(dev->name, name);
5481 return dev;
ab9c73cc 5482
0a9627f2 5483free_rx:
df334545 5484#ifdef CONFIG_RPS
0a9627f2 5485 kfree(rx);
ab9c73cc 5486free_tx:
df334545 5487#endif
ab9c73cc 5488 kfree(tx);
ab9c73cc
JP
5489free_p:
5490 kfree(p);
5491 return NULL;
1da177e4 5492}
f25f4e44 5493EXPORT_SYMBOL(alloc_netdev_mq);
1da177e4
LT
5494
5495/**
5496 * free_netdev - free network device
5497 * @dev: device
5498 *
4ec93edb
YH
5499 * This function does the last stage of destroying an allocated device
5500 * interface. The reference to the device object is released.
1da177e4
LT
5501 * If this is the last reference then it will be freed.
5502 */
5503void free_netdev(struct net_device *dev)
5504{
d565b0a1
HX
5505 struct napi_struct *p, *n;
5506
f3005d7f
DL
5507 release_net(dev_net(dev));
5508
e8a0464c
DM
5509 kfree(dev->_tx);
5510
f001fde5
JP
5511 /* Flush device addresses */
5512 dev_addr_flush(dev);
5513
15682bc4
PWJ
5514 /* Clear ethtool n-tuple list */
5515 ethtool_ntuple_flush(dev);
5516
d565b0a1
HX
5517 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
5518 netif_napi_del(p);
5519
3041a069 5520 /* Compatibility with error handling in drivers */
1da177e4
LT
5521 if (dev->reg_state == NETREG_UNINITIALIZED) {
5522 kfree((char *)dev - dev->padded);
5523 return;
5524 }
5525
5526 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
5527 dev->reg_state = NETREG_RELEASED;
5528
43cb76d9
GKH
5529 /* will free via device release */
5530 put_device(&dev->dev);
1da177e4 5531}
d1b19dff 5532EXPORT_SYMBOL(free_netdev);
4ec93edb 5533
f0db275a
SH
5534/**
5535 * synchronize_net - Synchronize with packet receive processing
5536 *
5537 * Wait for packets currently being received to be done.
5538 * Does not block later packets from starting.
5539 */
4ec93edb 5540void synchronize_net(void)
1da177e4
LT
5541{
5542 might_sleep();
fbd568a3 5543 synchronize_rcu();
1da177e4 5544}
d1b19dff 5545EXPORT_SYMBOL(synchronize_net);
1da177e4
LT
5546
5547/**
44a0873d 5548 * unregister_netdevice_queue - remove device from the kernel
1da177e4 5549 * @dev: device
44a0873d 5550 * @head: list
6ebfbc06 5551 *
1da177e4 5552 * This function shuts down a device interface and removes it
d59b54b1 5553 * from the kernel tables.
44a0873d 5554 * If head not NULL, device is queued to be unregistered later.
1da177e4
LT
5555 *
5556 * Callers must hold the rtnl semaphore. You may want
5557 * unregister_netdev() instead of this.
5558 */
5559
44a0873d 5560void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
1da177e4 5561{
a6620712
HX
5562 ASSERT_RTNL();
5563
44a0873d 5564 if (head) {
9fdce099 5565 list_move_tail(&dev->unreg_list, head);
44a0873d
ED
5566 } else {
5567 rollback_registered(dev);
5568 /* Finish processing unregister after unlock */
5569 net_set_todo(dev);
5570 }
1da177e4 5571}
44a0873d 5572EXPORT_SYMBOL(unregister_netdevice_queue);
1da177e4 5573
9b5e383c
ED
5574/**
5575 * unregister_netdevice_many - unregister many devices
5576 * @head: list of devices
9b5e383c
ED
5577 */
5578void unregister_netdevice_many(struct list_head *head)
5579{
5580 struct net_device *dev;
5581
5582 if (!list_empty(head)) {
5583 rollback_registered_many(head);
5584 list_for_each_entry(dev, head, unreg_list)
5585 net_set_todo(dev);
5586 }
5587}
63c8099d 5588EXPORT_SYMBOL(unregister_netdevice_many);
9b5e383c 5589
1da177e4
LT
5590/**
5591 * unregister_netdev - remove device from the kernel
5592 * @dev: device
5593 *
5594 * This function shuts down a device interface and removes it
d59b54b1 5595 * from the kernel tables.
1da177e4
LT
5596 *
5597 * This is just a wrapper for unregister_netdevice that takes
5598 * the rtnl semaphore. In general you want to use this and not
5599 * unregister_netdevice.
5600 */
5601void unregister_netdev(struct net_device *dev)
5602{
5603 rtnl_lock();
5604 unregister_netdevice(dev);
5605 rtnl_unlock();
5606}
1da177e4
LT
5607EXPORT_SYMBOL(unregister_netdev);
5608
ce286d32
EB
5609/**
5610 * dev_change_net_namespace - move device to different nethost namespace
5611 * @dev: device
5612 * @net: network namespace
5613 * @pat: If not NULL name pattern to try if the current device name
5614 * is already taken in the destination network namespace.
5615 *
5616 * This function shuts down a device interface and moves it
5617 * to a new network namespace. On success 0 is returned, on
5618 * a failure a netagive errno code is returned.
5619 *
5620 * Callers must hold the rtnl semaphore.
5621 */
5622
5623int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
5624{
ce286d32
EB
5625 int err;
5626
5627 ASSERT_RTNL();
5628
5629 /* Don't allow namespace local devices to be moved. */
5630 err = -EINVAL;
5631 if (dev->features & NETIF_F_NETNS_LOCAL)
5632 goto out;
5633
5634 /* Ensure the device has been registrered */
5635 err = -EINVAL;
5636 if (dev->reg_state != NETREG_REGISTERED)
5637 goto out;
5638
5639 /* Get out if there is nothing todo */
5640 err = 0;
878628fb 5641 if (net_eq(dev_net(dev), net))
ce286d32
EB
5642 goto out;
5643
5644 /* Pick the destination device name, and ensure
5645 * we can use it in the destination network namespace.
5646 */
5647 err = -EEXIST;
d9031024 5648 if (__dev_get_by_name(net, dev->name)) {
ce286d32
EB
5649 /* We get here if we can't use the current device name */
5650 if (!pat)
5651 goto out;
8ce6cebc 5652 if (dev_get_valid_name(dev, pat, 1))
ce286d32
EB
5653 goto out;
5654 }
5655
5656 /*
5657 * And now a mini version of register_netdevice unregister_netdevice.
5658 */
5659
5660 /* If device is running close it first. */
9b772652 5661 dev_close(dev);
ce286d32
EB
5662
5663 /* And unlink it from device chain */
5664 err = -ENODEV;
5665 unlist_netdevice(dev);
5666
5667 synchronize_net();
5668
5669 /* Shutdown queueing discipline. */
5670 dev_shutdown(dev);
5671
5672 /* Notify protocols, that we are about to destroy
5673 this device. They should clean all the things.
5674 */
5675 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
a5ee1551 5676 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
ce286d32
EB
5677
5678 /*
5679 * Flush the unicast and multicast chains
5680 */
a748ee24 5681 dev_uc_flush(dev);
22bedad3 5682 dev_mc_flush(dev);
ce286d32
EB
5683
5684 /* Actually switch the network namespace */
c346dca1 5685 dev_net_set(dev, net);
ce286d32 5686
ce286d32
EB
5687 /* If there is an ifindex conflict assign a new one */
5688 if (__dev_get_by_index(net, dev->ifindex)) {
5689 int iflink = (dev->iflink == dev->ifindex);
5690 dev->ifindex = dev_new_index(net);
5691 if (iflink)
5692 dev->iflink = dev->ifindex;
5693 }
5694
8b41d188 5695 /* Fixup kobjects */
a1b3f594 5696 err = device_rename(&dev->dev, dev->name);
8b41d188 5697 WARN_ON(err);
ce286d32
EB
5698
5699 /* Add the device back in the hashes */
5700 list_netdevice(dev);
5701
5702 /* Notify protocols, that a new device appeared. */
5703 call_netdevice_notifiers(NETDEV_REGISTER, dev);
5704
d90a909e
EB
5705 /*
5706 * Prevent userspace races by waiting until the network
5707 * device is fully setup before sending notifications.
5708 */
5709 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
5710
ce286d32
EB
5711 synchronize_net();
5712 err = 0;
5713out:
5714 return err;
5715}
463d0183 5716EXPORT_SYMBOL_GPL(dev_change_net_namespace);
ce286d32 5717
1da177e4
LT
5718static int dev_cpu_callback(struct notifier_block *nfb,
5719 unsigned long action,
5720 void *ocpu)
5721{
5722 struct sk_buff **list_skb;
1da177e4
LT
5723 struct sk_buff *skb;
5724 unsigned int cpu, oldcpu = (unsigned long)ocpu;
5725 struct softnet_data *sd, *oldsd;
5726
8bb78442 5727 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
1da177e4
LT
5728 return NOTIFY_OK;
5729
5730 local_irq_disable();
5731 cpu = smp_processor_id();
5732 sd = &per_cpu(softnet_data, cpu);
5733 oldsd = &per_cpu(softnet_data, oldcpu);
5734
5735 /* Find end of our completion_queue. */
5736 list_skb = &sd->completion_queue;
5737 while (*list_skb)
5738 list_skb = &(*list_skb)->next;
5739 /* Append completion queue from offline CPU. */
5740 *list_skb = oldsd->completion_queue;
5741 oldsd->completion_queue = NULL;
5742
1da177e4 5743 /* Append output queue from offline CPU. */
a9cbd588
CG
5744 if (oldsd->output_queue) {
5745 *sd->output_queue_tailp = oldsd->output_queue;
5746 sd->output_queue_tailp = oldsd->output_queue_tailp;
5747 oldsd->output_queue = NULL;
5748 oldsd->output_queue_tailp = &oldsd->output_queue;
5749 }
1da177e4
LT
5750
5751 raise_softirq_irqoff(NET_TX_SOFTIRQ);
5752 local_irq_enable();
5753
5754 /* Process offline CPU's input_pkt_queue */
76cc8b13 5755 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
1da177e4 5756 netif_rx(skb);
76cc8b13 5757 input_queue_head_incr(oldsd);
fec5e652 5758 }
76cc8b13 5759 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
6e7676c1 5760 netif_rx(skb);
76cc8b13
TH
5761 input_queue_head_incr(oldsd);
5762 }
1da177e4
LT
5763
5764 return NOTIFY_OK;
5765}
1da177e4
LT
5766
5767
7f353bf2 5768/**
b63365a2
HX
5769 * netdev_increment_features - increment feature set by one
5770 * @all: current feature set
5771 * @one: new feature set
5772 * @mask: mask feature set
7f353bf2
HX
5773 *
5774 * Computes a new feature set after adding a device with feature set
b63365a2
HX
5775 * @one to the master device with current feature set @all. Will not
5776 * enable anything that is off in @mask. Returns the new feature set.
7f353bf2 5777 */
b63365a2
HX
5778unsigned long netdev_increment_features(unsigned long all, unsigned long one,
5779 unsigned long mask)
5780{
5781 /* If device needs checksumming, downgrade to it. */
d1b19dff 5782 if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
b63365a2
HX
5783 all ^= NETIF_F_NO_CSUM | (one & NETIF_F_ALL_CSUM);
5784 else if (mask & NETIF_F_ALL_CSUM) {
5785 /* If one device supports v4/v6 checksumming, set for all. */
5786 if (one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM) &&
5787 !(all & NETIF_F_GEN_CSUM)) {
5788 all &= ~NETIF_F_ALL_CSUM;
5789 all |= one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
5790 }
e2a6b852 5791
b63365a2
HX
5792 /* If one device supports hw checksumming, set for all. */
5793 if (one & NETIF_F_GEN_CSUM && !(all & NETIF_F_GEN_CSUM)) {
5794 all &= ~NETIF_F_ALL_CSUM;
5795 all |= NETIF_F_HW_CSUM;
5796 }
5797 }
7f353bf2 5798
b63365a2 5799 one |= NETIF_F_ALL_CSUM;
7f353bf2 5800
b63365a2 5801 one |= all & NETIF_F_ONE_FOR_ALL;
d9f5950f 5802 all &= one | NETIF_F_LLTX | NETIF_F_GSO | NETIF_F_UFO;
b63365a2 5803 all |= one & mask & NETIF_F_ONE_FOR_ALL;
7f353bf2
HX
5804
5805 return all;
5806}
b63365a2 5807EXPORT_SYMBOL(netdev_increment_features);
7f353bf2 5808
30d97d35
PE
5809static struct hlist_head *netdev_create_hash(void)
5810{
5811 int i;
5812 struct hlist_head *hash;
5813
5814 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
5815 if (hash != NULL)
5816 for (i = 0; i < NETDEV_HASHENTRIES; i++)
5817 INIT_HLIST_HEAD(&hash[i]);
5818
5819 return hash;
5820}
5821
881d966b 5822/* Initialize per network namespace state */
4665079c 5823static int __net_init netdev_init(struct net *net)
881d966b 5824{
881d966b 5825 INIT_LIST_HEAD(&net->dev_base_head);
881d966b 5826
30d97d35
PE
5827 net->dev_name_head = netdev_create_hash();
5828 if (net->dev_name_head == NULL)
5829 goto err_name;
881d966b 5830
30d97d35
PE
5831 net->dev_index_head = netdev_create_hash();
5832 if (net->dev_index_head == NULL)
5833 goto err_idx;
881d966b
EB
5834
5835 return 0;
30d97d35
PE
5836
5837err_idx:
5838 kfree(net->dev_name_head);
5839err_name:
5840 return -ENOMEM;
881d966b
EB
5841}
5842
f0db275a
SH
5843/**
5844 * netdev_drivername - network driver for the device
5845 * @dev: network device
5846 * @buffer: buffer for resulting name
5847 * @len: size of buffer
5848 *
5849 * Determine network driver for device.
5850 */
cf04a4c7 5851char *netdev_drivername(const struct net_device *dev, char *buffer, int len)
6579e57b 5852{
cf04a4c7
SH
5853 const struct device_driver *driver;
5854 const struct device *parent;
6579e57b
AV
5855
5856 if (len <= 0 || !buffer)
5857 return buffer;
5858 buffer[0] = 0;
5859
5860 parent = dev->dev.parent;
5861
5862 if (!parent)
5863 return buffer;
5864
5865 driver = parent->driver;
5866 if (driver && driver->name)
5867 strlcpy(buffer, driver->name, len);
5868 return buffer;
5869}
5870
256df2f3
JP
5871static int __netdev_printk(const char *level, const struct net_device *dev,
5872 struct va_format *vaf)
5873{
5874 int r;
5875
5876 if (dev && dev->dev.parent)
5877 r = dev_printk(level, dev->dev.parent, "%s: %pV",
5878 netdev_name(dev), vaf);
5879 else if (dev)
5880 r = printk("%s%s: %pV", level, netdev_name(dev), vaf);
5881 else
5882 r = printk("%s(NULL net_device): %pV", level, vaf);
5883
5884 return r;
5885}
5886
5887int netdev_printk(const char *level, const struct net_device *dev,
5888 const char *format, ...)
5889{
5890 struct va_format vaf;
5891 va_list args;
5892 int r;
5893
5894 va_start(args, format);
5895
5896 vaf.fmt = format;
5897 vaf.va = &args;
5898
5899 r = __netdev_printk(level, dev, &vaf);
5900 va_end(args);
5901
5902 return r;
5903}
5904EXPORT_SYMBOL(netdev_printk);
5905
5906#define define_netdev_printk_level(func, level) \
5907int func(const struct net_device *dev, const char *fmt, ...) \
5908{ \
5909 int r; \
5910 struct va_format vaf; \
5911 va_list args; \
5912 \
5913 va_start(args, fmt); \
5914 \
5915 vaf.fmt = fmt; \
5916 vaf.va = &args; \
5917 \
5918 r = __netdev_printk(level, dev, &vaf); \
5919 va_end(args); \
5920 \
5921 return r; \
5922} \
5923EXPORT_SYMBOL(func);
5924
5925define_netdev_printk_level(netdev_emerg, KERN_EMERG);
5926define_netdev_printk_level(netdev_alert, KERN_ALERT);
5927define_netdev_printk_level(netdev_crit, KERN_CRIT);
5928define_netdev_printk_level(netdev_err, KERN_ERR);
5929define_netdev_printk_level(netdev_warn, KERN_WARNING);
5930define_netdev_printk_level(netdev_notice, KERN_NOTICE);
5931define_netdev_printk_level(netdev_info, KERN_INFO);
5932
4665079c 5933static void __net_exit netdev_exit(struct net *net)
881d966b
EB
5934{
5935 kfree(net->dev_name_head);
5936 kfree(net->dev_index_head);
5937}
5938
022cbae6 5939static struct pernet_operations __net_initdata netdev_net_ops = {
881d966b
EB
5940 .init = netdev_init,
5941 .exit = netdev_exit,
5942};
5943
4665079c 5944static void __net_exit default_device_exit(struct net *net)
ce286d32 5945{
e008b5fc 5946 struct net_device *dev, *aux;
ce286d32 5947 /*
e008b5fc 5948 * Push all migratable network devices back to the
ce286d32
EB
5949 * initial network namespace
5950 */
5951 rtnl_lock();
e008b5fc 5952 for_each_netdev_safe(net, dev, aux) {
ce286d32 5953 int err;
aca51397 5954 char fb_name[IFNAMSIZ];
ce286d32
EB
5955
5956 /* Ignore unmoveable devices (i.e. loopback) */
5957 if (dev->features & NETIF_F_NETNS_LOCAL)
5958 continue;
5959
e008b5fc
EB
5960 /* Leave virtual devices for the generic cleanup */
5961 if (dev->rtnl_link_ops)
5962 continue;
d0c082ce 5963
ce286d32 5964 /* Push remaing network devices to init_net */
aca51397
PE
5965 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
5966 err = dev_change_net_namespace(dev, &init_net, fb_name);
ce286d32 5967 if (err) {
aca51397 5968 printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
ce286d32 5969 __func__, dev->name, err);
aca51397 5970 BUG();
ce286d32
EB
5971 }
5972 }
5973 rtnl_unlock();
5974}
5975
04dc7f6b
EB
5976static void __net_exit default_device_exit_batch(struct list_head *net_list)
5977{
5978 /* At exit all network devices most be removed from a network
5979 * namespace. Do this in the reverse order of registeration.
5980 * Do this across as many network namespaces as possible to
5981 * improve batching efficiency.
5982 */
5983 struct net_device *dev;
5984 struct net *net;
5985 LIST_HEAD(dev_kill_list);
5986
5987 rtnl_lock();
5988 list_for_each_entry(net, net_list, exit_list) {
5989 for_each_netdev_reverse(net, dev) {
5990 if (dev->rtnl_link_ops)
5991 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
5992 else
5993 unregister_netdevice_queue(dev, &dev_kill_list);
5994 }
5995 }
5996 unregister_netdevice_many(&dev_kill_list);
5997 rtnl_unlock();
5998}
5999
022cbae6 6000static struct pernet_operations __net_initdata default_device_ops = {
ce286d32 6001 .exit = default_device_exit,
04dc7f6b 6002 .exit_batch = default_device_exit_batch,
ce286d32
EB
6003};
6004
1da177e4
LT
6005/*
6006 * Initialize the DEV module. At boot time this walks the device list and
6007 * unhooks any devices that fail to initialise (normally hardware not
6008 * present) and leaves us with a valid list of present and active devices.
6009 *
6010 */
6011
6012/*
6013 * This is called single threaded during boot, so no need
6014 * to take the rtnl semaphore.
6015 */
6016static int __init net_dev_init(void)
6017{
6018 int i, rc = -ENOMEM;
6019
6020 BUG_ON(!dev_boot_phase);
6021
1da177e4
LT
6022 if (dev_proc_init())
6023 goto out;
6024
8b41d188 6025 if (netdev_kobject_init())
1da177e4
LT
6026 goto out;
6027
6028 INIT_LIST_HEAD(&ptype_all);
82d8a867 6029 for (i = 0; i < PTYPE_HASH_SIZE; i++)
1da177e4
LT
6030 INIT_LIST_HEAD(&ptype_base[i]);
6031
881d966b
EB
6032 if (register_pernet_subsys(&netdev_net_ops))
6033 goto out;
1da177e4
LT
6034
6035 /*
6036 * Initialise the packet receive queues.
6037 */
6038
6f912042 6039 for_each_possible_cpu(i) {
e36fa2f7 6040 struct softnet_data *sd = &per_cpu(softnet_data, i);
1da177e4 6041
dee42870 6042 memset(sd, 0, sizeof(*sd));
e36fa2f7 6043 skb_queue_head_init(&sd->input_pkt_queue);
6e7676c1 6044 skb_queue_head_init(&sd->process_queue);
e36fa2f7
ED
6045 sd->completion_queue = NULL;
6046 INIT_LIST_HEAD(&sd->poll_list);
a9cbd588
CG
6047 sd->output_queue = NULL;
6048 sd->output_queue_tailp = &sd->output_queue;
df334545 6049#ifdef CONFIG_RPS
e36fa2f7
ED
6050 sd->csd.func = rps_trigger_softirq;
6051 sd->csd.info = sd;
6052 sd->csd.flags = 0;
6053 sd->cpu = i;
1e94d72f 6054#endif
0a9627f2 6055
e36fa2f7
ED
6056 sd->backlog.poll = process_backlog;
6057 sd->backlog.weight = weight_p;
6058 sd->backlog.gro_list = NULL;
6059 sd->backlog.gro_count = 0;
1da177e4
LT
6060 }
6061
1da177e4
LT
6062 dev_boot_phase = 0;
6063
505d4f73
EB
6064 /* The loopback device is special if any other network devices
6065 * is present in a network namespace the loopback device must
6066 * be present. Since we now dynamically allocate and free the
6067 * loopback device ensure this invariant is maintained by
6068 * keeping the loopback device as the first device on the
6069 * list of network devices. Ensuring the loopback devices
6070 * is the first device that appears and the last network device
6071 * that disappears.
6072 */
6073 if (register_pernet_device(&loopback_net_ops))
6074 goto out;
6075
6076 if (register_pernet_device(&default_device_ops))
6077 goto out;
6078
962cf36c
CM
6079 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
6080 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
1da177e4
LT
6081
6082 hotcpu_notifier(dev_cpu_callback, 0);
6083 dst_init();
6084 dev_mcast_init();
6085 rc = 0;
6086out:
6087 return rc;
6088}
6089
6090subsys_initcall(net_dev_init);
6091
e88721f8
KK
6092static int __init initialize_hashrnd(void)
6093{
0a9627f2 6094 get_random_bytes(&hashrnd, sizeof(hashrnd));
e88721f8
KK
6095 return 0;
6096}
6097
6098late_initcall_sync(initialize_hashrnd);
6099