]> bbs.cooldavid.org Git - net-next-2.6.git/blame - net/core/dev.c
phonet: Fix build warning.
[net-next-2.6.git] / net / core / dev.c
CommitLineData
1da177e4
LT
1/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
02c30a84 10 * Authors: Ross Biro
1da177e4
LT
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
74
75#include <asm/uaccess.h>
76#include <asm/system.h>
77#include <linux/bitops.h>
4fc268d2 78#include <linux/capability.h>
1da177e4
LT
79#include <linux/cpu.h>
80#include <linux/types.h>
81#include <linux/kernel.h>
08e9897d 82#include <linux/hash.h>
5a0e3ad6 83#include <linux/slab.h>
1da177e4 84#include <linux/sched.h>
4a3e2f71 85#include <linux/mutex.h>
1da177e4
LT
86#include <linux/string.h>
87#include <linux/mm.h>
88#include <linux/socket.h>
89#include <linux/sockios.h>
90#include <linux/errno.h>
91#include <linux/interrupt.h>
92#include <linux/if_ether.h>
93#include <linux/netdevice.h>
94#include <linux/etherdevice.h>
0187bdfb 95#include <linux/ethtool.h>
1da177e4
LT
96#include <linux/notifier.h>
97#include <linux/skbuff.h>
457c4cbc 98#include <net/net_namespace.h>
1da177e4
LT
99#include <net/sock.h>
100#include <linux/rtnetlink.h>
101#include <linux/proc_fs.h>
102#include <linux/seq_file.h>
103#include <linux/stat.h>
1da177e4
LT
104#include <net/dst.h>
105#include <net/pkt_sched.h>
106#include <net/checksum.h>
44540960 107#include <net/xfrm.h>
1da177e4
LT
108#include <linux/highmem.h>
109#include <linux/init.h>
110#include <linux/kmod.h>
111#include <linux/module.h>
1da177e4
LT
112#include <linux/netpoll.h>
113#include <linux/rcupdate.h>
114#include <linux/delay.h>
295f4a1f 115#include <net/wext.h>
1da177e4 116#include <net/iw_handler.h>
1da177e4 117#include <asm/current.h>
5bdb9886 118#include <linux/audit.h>
db217334 119#include <linux/dmaengine.h>
f6a78bfc 120#include <linux/err.h>
c7fa9d18 121#include <linux/ctype.h>
723e98b7 122#include <linux/if_arp.h>
6de329e2 123#include <linux/if_vlan.h>
8f0f2223 124#include <linux/ip.h>
ad55dcaf 125#include <net/ip.h>
8f0f2223
DM
126#include <linux/ipv6.h>
127#include <linux/in.h>
b6b2fed1
DM
128#include <linux/jhash.h>
129#include <linux/random.h>
9cbc1cb8 130#include <trace/events/napi.h>
5acbbd42 131#include <linux/pci.h>
1da177e4 132
342709ef
PE
133#include "net-sysfs.h"
134
d565b0a1
HX
135/* Instead of increasing this, you should create a hash table. */
136#define MAX_GRO_SKBS 8
137
5d38a079
HX
138/* This should be increased if a protocol with a bigger head is added. */
139#define GRO_MAX_HEAD (MAX_HEADER + 128)
140
1da177e4
LT
141/*
142 * The list of packet types we will receive (as opposed to discard)
143 * and the routines to invoke.
144 *
145 * Why 16. Because with 16 the only overlap we get on a hash of the
146 * low nibble of the protocol value is RARP/SNAP/X.25.
147 *
148 * NOTE: That is no longer true with the addition of VLAN tags. Not
149 * sure which should go first, but I bet it won't make much
150 * difference if we are running VLANs. The good news is that
151 * this protocol won't be in the list unless compiled in, so
3041a069 152 * the average user (w/out VLANs) will not be adversely affected.
1da177e4
LT
153 * --BLG
154 *
155 * 0800 IP
156 * 8100 802.1Q VLAN
157 * 0001 802.3
158 * 0002 AX.25
159 * 0004 802.2
160 * 8035 RARP
161 * 0005 SNAP
162 * 0805 X.25
163 * 0806 ARP
164 * 8137 IPX
165 * 0009 Localtalk
166 * 86DD IPv6
167 */
168
82d8a867
PE
169#define PTYPE_HASH_SIZE (16)
170#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
171
1da177e4 172static DEFINE_SPINLOCK(ptype_lock);
82d8a867 173static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
6b2bedc3 174static struct list_head ptype_all __read_mostly; /* Taps */
1da177e4 175
1da177e4 176/*
7562f876 177 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
1da177e4
LT
178 * semaphore.
179 *
c6d14c84 180 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
1da177e4
LT
181 *
182 * Writers must hold the rtnl semaphore while they loop through the
7562f876 183 * dev_base_head list, and hold dev_base_lock for writing when they do the
1da177e4
LT
184 * actual updates. This allows pure readers to access the list even
185 * while a writer is preparing to update it.
186 *
187 * To put it another way, dev_base_lock is held for writing only to
188 * protect against pure readers; the rtnl semaphore provides the
189 * protection against other writers.
190 *
191 * See, for example usages, register_netdevice() and
192 * unregister_netdevice(), which must be called with the rtnl
193 * semaphore held.
194 */
1da177e4 195DEFINE_RWLOCK(dev_base_lock);
1da177e4
LT
196EXPORT_SYMBOL(dev_base_lock);
197
881d966b 198static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
1da177e4
LT
199{
200 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
08e9897d 201 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
1da177e4
LT
202}
203
881d966b 204static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
1da177e4 205{
7c28bd0b 206 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
1da177e4
LT
207}
208
e36fa2f7 209static inline void rps_lock(struct softnet_data *sd)
152102c7
CG
210{
211#ifdef CONFIG_RPS
e36fa2f7 212 spin_lock(&sd->input_pkt_queue.lock);
152102c7
CG
213#endif
214}
215
e36fa2f7 216static inline void rps_unlock(struct softnet_data *sd)
152102c7
CG
217{
218#ifdef CONFIG_RPS
e36fa2f7 219 spin_unlock(&sd->input_pkt_queue.lock);
152102c7
CG
220#endif
221}
222
ce286d32
EB
223/* Device list insertion */
224static int list_netdevice(struct net_device *dev)
225{
c346dca1 226 struct net *net = dev_net(dev);
ce286d32
EB
227
228 ASSERT_RTNL();
229
230 write_lock_bh(&dev_base_lock);
c6d14c84 231 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
72c9528b 232 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
fb699dfd
ED
233 hlist_add_head_rcu(&dev->index_hlist,
234 dev_index_hash(net, dev->ifindex));
ce286d32
EB
235 write_unlock_bh(&dev_base_lock);
236 return 0;
237}
238
fb699dfd
ED
239/* Device list removal
240 * caller must respect a RCU grace period before freeing/reusing dev
241 */
ce286d32
EB
242static void unlist_netdevice(struct net_device *dev)
243{
244 ASSERT_RTNL();
245
246 /* Unlink dev from the device chain */
247 write_lock_bh(&dev_base_lock);
c6d14c84 248 list_del_rcu(&dev->dev_list);
72c9528b 249 hlist_del_rcu(&dev->name_hlist);
fb699dfd 250 hlist_del_rcu(&dev->index_hlist);
ce286d32
EB
251 write_unlock_bh(&dev_base_lock);
252}
253
1da177e4
LT
254/*
255 * Our notifier list
256 */
257
f07d5b94 258static RAW_NOTIFIER_HEAD(netdev_chain);
1da177e4
LT
259
260/*
261 * Device drivers call our routines to queue packets here. We empty the
262 * queue in the local softnet handler.
263 */
bea3348e 264
9958da05 265DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
d1b19dff 266EXPORT_PER_CPU_SYMBOL(softnet_data);
1da177e4 267
cf508b12 268#ifdef CONFIG_LOCKDEP
723e98b7 269/*
c773e847 270 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
723e98b7
JP
271 * according to dev->type
272 */
273static const unsigned short netdev_lock_type[] =
274 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
275 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
276 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
277 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
278 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
279 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
280 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
281 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
282 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
283 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
284 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
285 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
286 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
2d91d78b 287 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET,
929122cd 288 ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154,
fcb94e42 289 ARPHRD_VOID, ARPHRD_NONE};
723e98b7 290
36cbd3dc 291static const char *const netdev_lock_name[] =
723e98b7
JP
292 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
293 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
294 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
295 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
296 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
297 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
298 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
299 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
300 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
301 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
302 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
303 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
304 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
2d91d78b 305 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET",
929122cd 306 "_xmit_PHONET_PIPE", "_xmit_IEEE802154",
fcb94e42 307 "_xmit_VOID", "_xmit_NONE"};
723e98b7
JP
308
309static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
cf508b12 310static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
723e98b7
JP
311
312static inline unsigned short netdev_lock_pos(unsigned short dev_type)
313{
314 int i;
315
316 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
317 if (netdev_lock_type[i] == dev_type)
318 return i;
319 /* the last key is used by default */
320 return ARRAY_SIZE(netdev_lock_type) - 1;
321}
322
cf508b12
DM
323static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
324 unsigned short dev_type)
723e98b7
JP
325{
326 int i;
327
328 i = netdev_lock_pos(dev_type);
329 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
330 netdev_lock_name[i]);
331}
cf508b12
DM
332
333static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
334{
335 int i;
336
337 i = netdev_lock_pos(dev->type);
338 lockdep_set_class_and_name(&dev->addr_list_lock,
339 &netdev_addr_lock_key[i],
340 netdev_lock_name[i]);
341}
723e98b7 342#else
cf508b12
DM
343static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
344 unsigned short dev_type)
345{
346}
347static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
723e98b7
JP
348{
349}
350#endif
1da177e4
LT
351
352/*******************************************************************************
353
354 Protocol management and registration routines
355
356*******************************************************************************/
357
1da177e4
LT
358/*
359 * Add a protocol ID to the list. Now that the input handler is
360 * smarter we can dispense with all the messy stuff that used to be
361 * here.
362 *
363 * BEWARE!!! Protocol handlers, mangling input packets,
364 * MUST BE last in hash buckets and checking protocol handlers
365 * MUST start from promiscuous ptype_all chain in net_bh.
366 * It is true now, do not change it.
367 * Explanation follows: if protocol handler, mangling packet, will
368 * be the first on list, it is not able to sense, that packet
369 * is cloned and should be copied-on-write, so that it will
370 * change it and subsequent readers will get broken packet.
371 * --ANK (980803)
372 */
373
c07b68e8
ED
374static inline struct list_head *ptype_head(const struct packet_type *pt)
375{
376 if (pt->type == htons(ETH_P_ALL))
377 return &ptype_all;
378 else
379 return &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
380}
381
1da177e4
LT
382/**
383 * dev_add_pack - add packet handler
384 * @pt: packet type declaration
385 *
386 * Add a protocol handler to the networking stack. The passed &packet_type
387 * is linked into kernel lists and may not be freed until it has been
388 * removed from the kernel lists.
389 *
4ec93edb 390 * This call does not sleep therefore it can not
1da177e4
LT
391 * guarantee all CPU's that are in middle of receiving packets
392 * will see the new packet type (until the next received packet).
393 */
394
395void dev_add_pack(struct packet_type *pt)
396{
c07b68e8 397 struct list_head *head = ptype_head(pt);
1da177e4 398
c07b68e8
ED
399 spin_lock(&ptype_lock);
400 list_add_rcu(&pt->list, head);
401 spin_unlock(&ptype_lock);
1da177e4 402}
d1b19dff 403EXPORT_SYMBOL(dev_add_pack);
1da177e4 404
1da177e4
LT
405/**
406 * __dev_remove_pack - remove packet handler
407 * @pt: packet type declaration
408 *
409 * Remove a protocol handler that was previously added to the kernel
410 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
411 * from the kernel lists and can be freed or reused once this function
4ec93edb 412 * returns.
1da177e4
LT
413 *
414 * The packet type might still be in use by receivers
415 * and must not be freed until after all the CPU's have gone
416 * through a quiescent state.
417 */
418void __dev_remove_pack(struct packet_type *pt)
419{
c07b68e8 420 struct list_head *head = ptype_head(pt);
1da177e4
LT
421 struct packet_type *pt1;
422
c07b68e8 423 spin_lock(&ptype_lock);
1da177e4
LT
424
425 list_for_each_entry(pt1, head, list) {
426 if (pt == pt1) {
427 list_del_rcu(&pt->list);
428 goto out;
429 }
430 }
431
432 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
433out:
c07b68e8 434 spin_unlock(&ptype_lock);
1da177e4 435}
d1b19dff
ED
436EXPORT_SYMBOL(__dev_remove_pack);
437
1da177e4
LT
438/**
439 * dev_remove_pack - remove packet handler
440 * @pt: packet type declaration
441 *
442 * Remove a protocol handler that was previously added to the kernel
443 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
444 * from the kernel lists and can be freed or reused once this function
445 * returns.
446 *
447 * This call sleeps to guarantee that no CPU is looking at the packet
448 * type after return.
449 */
450void dev_remove_pack(struct packet_type *pt)
451{
452 __dev_remove_pack(pt);
4ec93edb 453
1da177e4
LT
454 synchronize_net();
455}
d1b19dff 456EXPORT_SYMBOL(dev_remove_pack);
1da177e4
LT
457
458/******************************************************************************
459
460 Device Boot-time Settings Routines
461
462*******************************************************************************/
463
464/* Boot time configuration table */
465static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
466
467/**
468 * netdev_boot_setup_add - add new setup entry
469 * @name: name of the device
470 * @map: configured settings for the device
471 *
472 * Adds new setup entry to the dev_boot_setup list. The function
473 * returns 0 on error and 1 on success. This is a generic routine to
474 * all netdevices.
475 */
476static int netdev_boot_setup_add(char *name, struct ifmap *map)
477{
478 struct netdev_boot_setup *s;
479 int i;
480
481 s = dev_boot_setup;
482 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
483 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
484 memset(s[i].name, 0, sizeof(s[i].name));
93b3cff9 485 strlcpy(s[i].name, name, IFNAMSIZ);
1da177e4
LT
486 memcpy(&s[i].map, map, sizeof(s[i].map));
487 break;
488 }
489 }
490
491 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
492}
493
494/**
495 * netdev_boot_setup_check - check boot time settings
496 * @dev: the netdevice
497 *
498 * Check boot time settings for the device.
499 * The found settings are set for the device to be used
500 * later in the device probing.
501 * Returns 0 if no settings found, 1 if they are.
502 */
503int netdev_boot_setup_check(struct net_device *dev)
504{
505 struct netdev_boot_setup *s = dev_boot_setup;
506 int i;
507
508 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
509 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
93b3cff9 510 !strcmp(dev->name, s[i].name)) {
1da177e4
LT
511 dev->irq = s[i].map.irq;
512 dev->base_addr = s[i].map.base_addr;
513 dev->mem_start = s[i].map.mem_start;
514 dev->mem_end = s[i].map.mem_end;
515 return 1;
516 }
517 }
518 return 0;
519}
d1b19dff 520EXPORT_SYMBOL(netdev_boot_setup_check);
1da177e4
LT
521
522
523/**
524 * netdev_boot_base - get address from boot time settings
525 * @prefix: prefix for network device
526 * @unit: id for network device
527 *
528 * Check boot time settings for the base address of device.
529 * The found settings are set for the device to be used
530 * later in the device probing.
531 * Returns 0 if no settings found.
532 */
533unsigned long netdev_boot_base(const char *prefix, int unit)
534{
535 const struct netdev_boot_setup *s = dev_boot_setup;
536 char name[IFNAMSIZ];
537 int i;
538
539 sprintf(name, "%s%d", prefix, unit);
540
541 /*
542 * If device already registered then return base of 1
543 * to indicate not to probe for this interface
544 */
881d966b 545 if (__dev_get_by_name(&init_net, name))
1da177e4
LT
546 return 1;
547
548 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
549 if (!strcmp(name, s[i].name))
550 return s[i].map.base_addr;
551 return 0;
552}
553
554/*
555 * Saves at boot time configured settings for any netdevice.
556 */
557int __init netdev_boot_setup(char *str)
558{
559 int ints[5];
560 struct ifmap map;
561
562 str = get_options(str, ARRAY_SIZE(ints), ints);
563 if (!str || !*str)
564 return 0;
565
566 /* Save settings */
567 memset(&map, 0, sizeof(map));
568 if (ints[0] > 0)
569 map.irq = ints[1];
570 if (ints[0] > 1)
571 map.base_addr = ints[2];
572 if (ints[0] > 2)
573 map.mem_start = ints[3];
574 if (ints[0] > 3)
575 map.mem_end = ints[4];
576
577 /* Add new entry to the list */
578 return netdev_boot_setup_add(str, &map);
579}
580
581__setup("netdev=", netdev_boot_setup);
582
583/*******************************************************************************
584
585 Device Interface Subroutines
586
587*******************************************************************************/
588
589/**
590 * __dev_get_by_name - find a device by its name
c4ea43c5 591 * @net: the applicable net namespace
1da177e4
LT
592 * @name: name to find
593 *
594 * Find an interface by name. Must be called under RTNL semaphore
595 * or @dev_base_lock. If the name is found a pointer to the device
596 * is returned. If the name is not found then %NULL is returned. The
597 * reference counters are not incremented so the caller must be
598 * careful with locks.
599 */
600
881d966b 601struct net_device *__dev_get_by_name(struct net *net, const char *name)
1da177e4
LT
602{
603 struct hlist_node *p;
0bd8d536
ED
604 struct net_device *dev;
605 struct hlist_head *head = dev_name_hash(net, name);
1da177e4 606
0bd8d536 607 hlist_for_each_entry(dev, p, head, name_hlist)
1da177e4
LT
608 if (!strncmp(dev->name, name, IFNAMSIZ))
609 return dev;
0bd8d536 610
1da177e4
LT
611 return NULL;
612}
d1b19dff 613EXPORT_SYMBOL(__dev_get_by_name);
1da177e4 614
72c9528b
ED
615/**
616 * dev_get_by_name_rcu - find a device by its name
617 * @net: the applicable net namespace
618 * @name: name to find
619 *
620 * Find an interface by name.
621 * If the name is found a pointer to the device is returned.
622 * If the name is not found then %NULL is returned.
623 * The reference counters are not incremented so the caller must be
624 * careful with locks. The caller must hold RCU lock.
625 */
626
627struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
628{
629 struct hlist_node *p;
630 struct net_device *dev;
631 struct hlist_head *head = dev_name_hash(net, name);
632
633 hlist_for_each_entry_rcu(dev, p, head, name_hlist)
634 if (!strncmp(dev->name, name, IFNAMSIZ))
635 return dev;
636
637 return NULL;
638}
639EXPORT_SYMBOL(dev_get_by_name_rcu);
640
1da177e4
LT
641/**
642 * dev_get_by_name - find a device by its name
c4ea43c5 643 * @net: the applicable net namespace
1da177e4
LT
644 * @name: name to find
645 *
646 * Find an interface by name. This can be called from any
647 * context and does its own locking. The returned handle has
648 * the usage count incremented and the caller must use dev_put() to
649 * release it when it is no longer needed. %NULL is returned if no
650 * matching device is found.
651 */
652
881d966b 653struct net_device *dev_get_by_name(struct net *net, const char *name)
1da177e4
LT
654{
655 struct net_device *dev;
656
72c9528b
ED
657 rcu_read_lock();
658 dev = dev_get_by_name_rcu(net, name);
1da177e4
LT
659 if (dev)
660 dev_hold(dev);
72c9528b 661 rcu_read_unlock();
1da177e4
LT
662 return dev;
663}
d1b19dff 664EXPORT_SYMBOL(dev_get_by_name);
1da177e4
LT
665
666/**
667 * __dev_get_by_index - find a device by its ifindex
c4ea43c5 668 * @net: the applicable net namespace
1da177e4
LT
669 * @ifindex: index of device
670 *
671 * Search for an interface by index. Returns %NULL if the device
672 * is not found or a pointer to the device. The device has not
673 * had its reference counter increased so the caller must be careful
674 * about locking. The caller must hold either the RTNL semaphore
675 * or @dev_base_lock.
676 */
677
881d966b 678struct net_device *__dev_get_by_index(struct net *net, int ifindex)
1da177e4
LT
679{
680 struct hlist_node *p;
0bd8d536
ED
681 struct net_device *dev;
682 struct hlist_head *head = dev_index_hash(net, ifindex);
1da177e4 683
0bd8d536 684 hlist_for_each_entry(dev, p, head, index_hlist)
1da177e4
LT
685 if (dev->ifindex == ifindex)
686 return dev;
0bd8d536 687
1da177e4
LT
688 return NULL;
689}
d1b19dff 690EXPORT_SYMBOL(__dev_get_by_index);
1da177e4 691
fb699dfd
ED
692/**
693 * dev_get_by_index_rcu - find a device by its ifindex
694 * @net: the applicable net namespace
695 * @ifindex: index of device
696 *
697 * Search for an interface by index. Returns %NULL if the device
698 * is not found or a pointer to the device. The device has not
699 * had its reference counter increased so the caller must be careful
700 * about locking. The caller must hold RCU lock.
701 */
702
703struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
704{
705 struct hlist_node *p;
706 struct net_device *dev;
707 struct hlist_head *head = dev_index_hash(net, ifindex);
708
709 hlist_for_each_entry_rcu(dev, p, head, index_hlist)
710 if (dev->ifindex == ifindex)
711 return dev;
712
713 return NULL;
714}
715EXPORT_SYMBOL(dev_get_by_index_rcu);
716
1da177e4
LT
717
718/**
719 * dev_get_by_index - find a device by its ifindex
c4ea43c5 720 * @net: the applicable net namespace
1da177e4
LT
721 * @ifindex: index of device
722 *
723 * Search for an interface by index. Returns NULL if the device
724 * is not found or a pointer to the device. The device returned has
725 * had a reference added and the pointer is safe until the user calls
726 * dev_put to indicate they have finished with it.
727 */
728
881d966b 729struct net_device *dev_get_by_index(struct net *net, int ifindex)
1da177e4
LT
730{
731 struct net_device *dev;
732
fb699dfd
ED
733 rcu_read_lock();
734 dev = dev_get_by_index_rcu(net, ifindex);
1da177e4
LT
735 if (dev)
736 dev_hold(dev);
fb699dfd 737 rcu_read_unlock();
1da177e4
LT
738 return dev;
739}
d1b19dff 740EXPORT_SYMBOL(dev_get_by_index);
1da177e4
LT
741
742/**
743 * dev_getbyhwaddr - find a device by its hardware address
c4ea43c5 744 * @net: the applicable net namespace
1da177e4
LT
745 * @type: media type of device
746 * @ha: hardware address
747 *
748 * Search for an interface by MAC address. Returns NULL if the device
749 * is not found or a pointer to the device. The caller must hold the
750 * rtnl semaphore. The returned device has not had its ref count increased
751 * and the caller must therefore be careful about locking
752 *
753 * BUGS:
754 * If the API was consistent this would be __dev_get_by_hwaddr
755 */
756
881d966b 757struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
1da177e4
LT
758{
759 struct net_device *dev;
760
761 ASSERT_RTNL();
762
81103a52 763 for_each_netdev(net, dev)
1da177e4
LT
764 if (dev->type == type &&
765 !memcmp(dev->dev_addr, ha, dev->addr_len))
7562f876
PE
766 return dev;
767
768 return NULL;
1da177e4 769}
cf309e3f
JF
770EXPORT_SYMBOL(dev_getbyhwaddr);
771
881d966b 772struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
1da177e4
LT
773{
774 struct net_device *dev;
775
4e9cac2b 776 ASSERT_RTNL();
881d966b 777 for_each_netdev(net, dev)
4e9cac2b 778 if (dev->type == type)
7562f876
PE
779 return dev;
780
781 return NULL;
4e9cac2b 782}
4e9cac2b
PM
783EXPORT_SYMBOL(__dev_getfirstbyhwtype);
784
881d966b 785struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
4e9cac2b 786{
99fe3c39 787 struct net_device *dev, *ret = NULL;
4e9cac2b 788
99fe3c39
ED
789 rcu_read_lock();
790 for_each_netdev_rcu(net, dev)
791 if (dev->type == type) {
792 dev_hold(dev);
793 ret = dev;
794 break;
795 }
796 rcu_read_unlock();
797 return ret;
1da177e4 798}
1da177e4
LT
799EXPORT_SYMBOL(dev_getfirstbyhwtype);
800
801/**
bb69ae04 802 * dev_get_by_flags_rcu - find any device with given flags
c4ea43c5 803 * @net: the applicable net namespace
1da177e4
LT
804 * @if_flags: IFF_* values
805 * @mask: bitmask of bits in if_flags to check
806 *
807 * Search for any interface with the given flags. Returns NULL if a device
bb69ae04
ED
808 * is not found or a pointer to the device. Must be called inside
809 * rcu_read_lock(), and result refcount is unchanged.
1da177e4
LT
810 */
811
bb69ae04 812struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags,
d1b19dff 813 unsigned short mask)
1da177e4 814{
7562f876 815 struct net_device *dev, *ret;
1da177e4 816
7562f876 817 ret = NULL;
c6d14c84 818 for_each_netdev_rcu(net, dev) {
1da177e4 819 if (((dev->flags ^ if_flags) & mask) == 0) {
7562f876 820 ret = dev;
1da177e4
LT
821 break;
822 }
823 }
7562f876 824 return ret;
1da177e4 825}
bb69ae04 826EXPORT_SYMBOL(dev_get_by_flags_rcu);
1da177e4
LT
827
828/**
829 * dev_valid_name - check if name is okay for network device
830 * @name: name string
831 *
832 * Network device names need to be valid file names to
c7fa9d18
DM
833 * to allow sysfs to work. We also disallow any kind of
834 * whitespace.
1da177e4 835 */
c2373ee9 836int dev_valid_name(const char *name)
1da177e4 837{
c7fa9d18
DM
838 if (*name == '\0')
839 return 0;
b6fe17d6
SH
840 if (strlen(name) >= IFNAMSIZ)
841 return 0;
c7fa9d18
DM
842 if (!strcmp(name, ".") || !strcmp(name, ".."))
843 return 0;
844
845 while (*name) {
846 if (*name == '/' || isspace(*name))
847 return 0;
848 name++;
849 }
850 return 1;
1da177e4 851}
d1b19dff 852EXPORT_SYMBOL(dev_valid_name);
1da177e4
LT
853
854/**
b267b179
EB
855 * __dev_alloc_name - allocate a name for a device
856 * @net: network namespace to allocate the device name in
1da177e4 857 * @name: name format string
b267b179 858 * @buf: scratch buffer and result name string
1da177e4
LT
859 *
860 * Passed a format string - eg "lt%d" it will try and find a suitable
3041a069
SH
861 * id. It scans list of devices to build up a free map, then chooses
862 * the first empty slot. The caller must hold the dev_base or rtnl lock
863 * while allocating the name and adding the device in order to avoid
864 * duplicates.
865 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
866 * Returns the number of the unit assigned or a negative errno code.
1da177e4
LT
867 */
868
b267b179 869static int __dev_alloc_name(struct net *net, const char *name, char *buf)
1da177e4
LT
870{
871 int i = 0;
1da177e4
LT
872 const char *p;
873 const int max_netdevices = 8*PAGE_SIZE;
cfcabdcc 874 unsigned long *inuse;
1da177e4
LT
875 struct net_device *d;
876
877 p = strnchr(name, IFNAMSIZ-1, '%');
878 if (p) {
879 /*
880 * Verify the string as this thing may have come from
881 * the user. There must be either one "%d" and no other "%"
882 * characters.
883 */
884 if (p[1] != 'd' || strchr(p + 2, '%'))
885 return -EINVAL;
886
887 /* Use one page as a bit array of possible slots */
cfcabdcc 888 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
1da177e4
LT
889 if (!inuse)
890 return -ENOMEM;
891
881d966b 892 for_each_netdev(net, d) {
1da177e4
LT
893 if (!sscanf(d->name, name, &i))
894 continue;
895 if (i < 0 || i >= max_netdevices)
896 continue;
897
898 /* avoid cases where sscanf is not exact inverse of printf */
b267b179 899 snprintf(buf, IFNAMSIZ, name, i);
1da177e4
LT
900 if (!strncmp(buf, d->name, IFNAMSIZ))
901 set_bit(i, inuse);
902 }
903
904 i = find_first_zero_bit(inuse, max_netdevices);
905 free_page((unsigned long) inuse);
906 }
907
d9031024
OP
908 if (buf != name)
909 snprintf(buf, IFNAMSIZ, name, i);
b267b179 910 if (!__dev_get_by_name(net, buf))
1da177e4 911 return i;
1da177e4
LT
912
913 /* It is possible to run out of possible slots
914 * when the name is long and there isn't enough space left
915 * for the digits, or if all bits are used.
916 */
917 return -ENFILE;
918}
919
b267b179
EB
920/**
921 * dev_alloc_name - allocate a name for a device
922 * @dev: device
923 * @name: name format string
924 *
925 * Passed a format string - eg "lt%d" it will try and find a suitable
926 * id. It scans list of devices to build up a free map, then chooses
927 * the first empty slot. The caller must hold the dev_base or rtnl lock
928 * while allocating the name and adding the device in order to avoid
929 * duplicates.
930 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
931 * Returns the number of the unit assigned or a negative errno code.
932 */
933
934int dev_alloc_name(struct net_device *dev, const char *name)
935{
936 char buf[IFNAMSIZ];
937 struct net *net;
938 int ret;
939
c346dca1
YH
940 BUG_ON(!dev_net(dev));
941 net = dev_net(dev);
b267b179
EB
942 ret = __dev_alloc_name(net, name, buf);
943 if (ret >= 0)
944 strlcpy(dev->name, buf, IFNAMSIZ);
945 return ret;
946}
d1b19dff 947EXPORT_SYMBOL(dev_alloc_name);
b267b179 948
8ce6cebc 949static int dev_get_valid_name(struct net_device *dev, const char *name, bool fmt)
d9031024 950{
8ce6cebc
DL
951 struct net *net;
952
953 BUG_ON(!dev_net(dev));
954 net = dev_net(dev);
955
d9031024
OP
956 if (!dev_valid_name(name))
957 return -EINVAL;
958
959 if (fmt && strchr(name, '%'))
8ce6cebc 960 return dev_alloc_name(dev, name);
d9031024
OP
961 else if (__dev_get_by_name(net, name))
962 return -EEXIST;
8ce6cebc
DL
963 else if (dev->name != name)
964 strlcpy(dev->name, name, IFNAMSIZ);
d9031024
OP
965
966 return 0;
967}
1da177e4
LT
968
969/**
970 * dev_change_name - change name of a device
971 * @dev: device
972 * @newname: name (or format string) must be at least IFNAMSIZ
973 *
974 * Change name of a device, can pass format strings "eth%d".
975 * for wildcarding.
976 */
cf04a4c7 977int dev_change_name(struct net_device *dev, const char *newname)
1da177e4 978{
fcc5a03a 979 char oldname[IFNAMSIZ];
1da177e4 980 int err = 0;
fcc5a03a 981 int ret;
881d966b 982 struct net *net;
1da177e4
LT
983
984 ASSERT_RTNL();
c346dca1 985 BUG_ON(!dev_net(dev));
1da177e4 986
c346dca1 987 net = dev_net(dev);
1da177e4
LT
988 if (dev->flags & IFF_UP)
989 return -EBUSY;
990
c8d90dca
SH
991 if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
992 return 0;
993
fcc5a03a
HX
994 memcpy(oldname, dev->name, IFNAMSIZ);
995
8ce6cebc 996 err = dev_get_valid_name(dev, newname, 1);
d9031024
OP
997 if (err < 0)
998 return err;
1da177e4 999
fcc5a03a 1000rollback:
a1b3f594
EB
1001 ret = device_rename(&dev->dev, dev->name);
1002 if (ret) {
1003 memcpy(dev->name, oldname, IFNAMSIZ);
1004 return ret;
dcc99773 1005 }
7f988eab
HX
1006
1007 write_lock_bh(&dev_base_lock);
92749821 1008 hlist_del(&dev->name_hlist);
72c9528b
ED
1009 write_unlock_bh(&dev_base_lock);
1010
1011 synchronize_rcu();
1012
1013 write_lock_bh(&dev_base_lock);
1014 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
7f988eab
HX
1015 write_unlock_bh(&dev_base_lock);
1016
056925ab 1017 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
fcc5a03a
HX
1018 ret = notifier_to_errno(ret);
1019
1020 if (ret) {
91e9c07b
ED
1021 /* err >= 0 after dev_alloc_name() or stores the first errno */
1022 if (err >= 0) {
fcc5a03a
HX
1023 err = ret;
1024 memcpy(dev->name, oldname, IFNAMSIZ);
1025 goto rollback;
91e9c07b
ED
1026 } else {
1027 printk(KERN_ERR
1028 "%s: name change rollback failed: %d.\n",
1029 dev->name, ret);
fcc5a03a
HX
1030 }
1031 }
1da177e4
LT
1032
1033 return err;
1034}
1035
0b815a1a
SH
1036/**
1037 * dev_set_alias - change ifalias of a device
1038 * @dev: device
1039 * @alias: name up to IFALIASZ
f0db275a 1040 * @len: limit of bytes to copy from info
0b815a1a
SH
1041 *
1042 * Set ifalias for a device,
1043 */
1044int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1045{
1046 ASSERT_RTNL();
1047
1048 if (len >= IFALIASZ)
1049 return -EINVAL;
1050
96ca4a2c
OH
1051 if (!len) {
1052 if (dev->ifalias) {
1053 kfree(dev->ifalias);
1054 dev->ifalias = NULL;
1055 }
1056 return 0;
1057 }
1058
d1b19dff 1059 dev->ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
0b815a1a
SH
1060 if (!dev->ifalias)
1061 return -ENOMEM;
1062
1063 strlcpy(dev->ifalias, alias, len+1);
1064 return len;
1065}
1066
1067
d8a33ac4 1068/**
3041a069 1069 * netdev_features_change - device changes features
d8a33ac4
SH
1070 * @dev: device to cause notification
1071 *
1072 * Called to indicate a device has changed features.
1073 */
1074void netdev_features_change(struct net_device *dev)
1075{
056925ab 1076 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
d8a33ac4
SH
1077}
1078EXPORT_SYMBOL(netdev_features_change);
1079
1da177e4
LT
1080/**
1081 * netdev_state_change - device changes state
1082 * @dev: device to cause notification
1083 *
1084 * Called to indicate a device has changed state. This function calls
1085 * the notifier chains for netdev_chain and sends a NEWLINK message
1086 * to the routing socket.
1087 */
1088void netdev_state_change(struct net_device *dev)
1089{
1090 if (dev->flags & IFF_UP) {
056925ab 1091 call_netdevice_notifiers(NETDEV_CHANGE, dev);
1da177e4
LT
1092 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1093 }
1094}
d1b19dff 1095EXPORT_SYMBOL(netdev_state_change);
1da177e4 1096
3ca5b404 1097int netdev_bonding_change(struct net_device *dev, unsigned long event)
c1da4ac7 1098{
3ca5b404 1099 return call_netdevice_notifiers(event, dev);
c1da4ac7
OG
1100}
1101EXPORT_SYMBOL(netdev_bonding_change);
1102
1da177e4
LT
1103/**
1104 * dev_load - load a network module
c4ea43c5 1105 * @net: the applicable net namespace
1da177e4
LT
1106 * @name: name of interface
1107 *
1108 * If a network interface is not present and the process has suitable
1109 * privileges this function loads the module. If module loading is not
1110 * available in this kernel then it becomes a nop.
1111 */
1112
881d966b 1113void dev_load(struct net *net, const char *name)
1da177e4 1114{
4ec93edb 1115 struct net_device *dev;
1da177e4 1116
72c9528b
ED
1117 rcu_read_lock();
1118 dev = dev_get_by_name_rcu(net, name);
1119 rcu_read_unlock();
1da177e4 1120
a8f80e8f 1121 if (!dev && capable(CAP_NET_ADMIN))
1da177e4
LT
1122 request_module("%s", name);
1123}
d1b19dff 1124EXPORT_SYMBOL(dev_load);
1da177e4 1125
bd380811 1126static int __dev_open(struct net_device *dev)
1da177e4 1127{
d314774c 1128 const struct net_device_ops *ops = dev->netdev_ops;
3b8bcfd5 1129 int ret;
1da177e4 1130
e46b66bc
BH
1131 ASSERT_RTNL();
1132
1da177e4
LT
1133 /*
1134 * Is it even present?
1135 */
1136 if (!netif_device_present(dev))
1137 return -ENODEV;
1138
3b8bcfd5
JB
1139 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1140 ret = notifier_to_errno(ret);
1141 if (ret)
1142 return ret;
1143
1da177e4
LT
1144 /*
1145 * Call device private open method
1146 */
1147 set_bit(__LINK_STATE_START, &dev->state);
bada339b 1148
d314774c
SH
1149 if (ops->ndo_validate_addr)
1150 ret = ops->ndo_validate_addr(dev);
bada339b 1151
d314774c
SH
1152 if (!ret && ops->ndo_open)
1153 ret = ops->ndo_open(dev);
1da177e4 1154
4ec93edb 1155 /*
1da177e4
LT
1156 * If it went open OK then:
1157 */
1158
bada339b
JG
1159 if (ret)
1160 clear_bit(__LINK_STATE_START, &dev->state);
1161 else {
1da177e4
LT
1162 /*
1163 * Set the flags.
1164 */
1165 dev->flags |= IFF_UP;
1166
649274d9
DW
1167 /*
1168 * Enable NET_DMA
1169 */
b4bd07c2 1170 net_dmaengine_get();
649274d9 1171
1da177e4
LT
1172 /*
1173 * Initialize multicasting status
1174 */
4417da66 1175 dev_set_rx_mode(dev);
1da177e4
LT
1176
1177 /*
1178 * Wakeup transmit queue engine
1179 */
1180 dev_activate(dev);
1da177e4 1181 }
bada339b 1182
1da177e4
LT
1183 return ret;
1184}
1185
1186/**
bd380811
PM
1187 * dev_open - prepare an interface for use.
1188 * @dev: device to open
1da177e4 1189 *
bd380811
PM
1190 * Takes a device from down to up state. The device's private open
1191 * function is invoked and then the multicast lists are loaded. Finally
1192 * the device is moved into the up state and a %NETDEV_UP message is
1193 * sent to the netdev notifier chain.
1194 *
1195 * Calling this function on an active interface is a nop. On a failure
1196 * a negative errno code is returned.
1da177e4 1197 */
bd380811
PM
1198int dev_open(struct net_device *dev)
1199{
1200 int ret;
1201
1202 /*
1203 * Is it already up?
1204 */
1205 if (dev->flags & IFF_UP)
1206 return 0;
1207
1208 /*
1209 * Open device
1210 */
1211 ret = __dev_open(dev);
1212 if (ret < 0)
1213 return ret;
1214
1215 /*
1216 * ... and announce new interface.
1217 */
1218 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1219 call_netdevice_notifiers(NETDEV_UP, dev);
1220
1221 return ret;
1222}
1223EXPORT_SYMBOL(dev_open);
1224
1225static int __dev_close(struct net_device *dev)
1da177e4 1226{
d314774c 1227 const struct net_device_ops *ops = dev->netdev_ops;
e46b66bc 1228
bd380811 1229 ASSERT_RTNL();
9d5010db
DM
1230 might_sleep();
1231
1da177e4
LT
1232 /*
1233 * Tell people we are going down, so that they can
1234 * prepare to death, when device is still operating.
1235 */
056925ab 1236 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1da177e4 1237
1da177e4
LT
1238 clear_bit(__LINK_STATE_START, &dev->state);
1239
1240 /* Synchronize to scheduled poll. We cannot touch poll list,
bea3348e
SH
1241 * it can be even on different cpu. So just clear netif_running().
1242 *
1243 * dev->stop() will invoke napi_disable() on all of it's
1244 * napi_struct instances on this device.
1245 */
1da177e4 1246 smp_mb__after_clear_bit(); /* Commit netif_running(). */
1da177e4 1247
d8b2a4d2
ML
1248 dev_deactivate(dev);
1249
1da177e4
LT
1250 /*
1251 * Call the device specific close. This cannot fail.
1252 * Only if device is UP
1253 *
1254 * We allow it to be called even after a DETACH hot-plug
1255 * event.
1256 */
d314774c
SH
1257 if (ops->ndo_stop)
1258 ops->ndo_stop(dev);
1da177e4
LT
1259
1260 /*
1261 * Device is now down.
1262 */
1263
1264 dev->flags &= ~IFF_UP;
1265
1266 /*
bd380811 1267 * Shutdown NET_DMA
1da177e4 1268 */
bd380811
PM
1269 net_dmaengine_put();
1270
1271 return 0;
1272}
1273
1274/**
1275 * dev_close - shutdown an interface.
1276 * @dev: device to shutdown
1277 *
1278 * This function moves an active device into down state. A
1279 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1280 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1281 * chain.
1282 */
1283int dev_close(struct net_device *dev)
1284{
1285 if (!(dev->flags & IFF_UP))
1286 return 0;
1287
1288 __dev_close(dev);
1da177e4 1289
649274d9 1290 /*
bd380811 1291 * Tell people we are down
649274d9 1292 */
bd380811
PM
1293 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1294 call_netdevice_notifiers(NETDEV_DOWN, dev);
649274d9 1295
1da177e4
LT
1296 return 0;
1297}
d1b19dff 1298EXPORT_SYMBOL(dev_close);
1da177e4
LT
1299
1300
0187bdfb
BH
1301/**
1302 * dev_disable_lro - disable Large Receive Offload on a device
1303 * @dev: device
1304 *
1305 * Disable Large Receive Offload (LRO) on a net device. Must be
1306 * called under RTNL. This is needed if received packets may be
1307 * forwarded to another interface.
1308 */
1309void dev_disable_lro(struct net_device *dev)
1310{
1311 if (dev->ethtool_ops && dev->ethtool_ops->get_flags &&
1312 dev->ethtool_ops->set_flags) {
1313 u32 flags = dev->ethtool_ops->get_flags(dev);
1314 if (flags & ETH_FLAG_LRO) {
1315 flags &= ~ETH_FLAG_LRO;
1316 dev->ethtool_ops->set_flags(dev, flags);
1317 }
1318 }
1319 WARN_ON(dev->features & NETIF_F_LRO);
1320}
1321EXPORT_SYMBOL(dev_disable_lro);
1322
1323
881d966b
EB
1324static int dev_boot_phase = 1;
1325
1da177e4
LT
1326/*
1327 * Device change register/unregister. These are not inline or static
1328 * as we export them to the world.
1329 */
1330
1331/**
1332 * register_netdevice_notifier - register a network notifier block
1333 * @nb: notifier
1334 *
1335 * Register a notifier to be called when network device events occur.
1336 * The notifier passed is linked into the kernel structures and must
1337 * not be reused until it has been unregistered. A negative errno code
1338 * is returned on a failure.
1339 *
1340 * When registered all registration and up events are replayed
4ec93edb 1341 * to the new notifier to allow device to have a race free
1da177e4
LT
1342 * view of the network device list.
1343 */
1344
1345int register_netdevice_notifier(struct notifier_block *nb)
1346{
1347 struct net_device *dev;
fcc5a03a 1348 struct net_device *last;
881d966b 1349 struct net *net;
1da177e4
LT
1350 int err;
1351
1352 rtnl_lock();
f07d5b94 1353 err = raw_notifier_chain_register(&netdev_chain, nb);
fcc5a03a
HX
1354 if (err)
1355 goto unlock;
881d966b
EB
1356 if (dev_boot_phase)
1357 goto unlock;
1358 for_each_net(net) {
1359 for_each_netdev(net, dev) {
1360 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1361 err = notifier_to_errno(err);
1362 if (err)
1363 goto rollback;
1364
1365 if (!(dev->flags & IFF_UP))
1366 continue;
1da177e4 1367
881d966b
EB
1368 nb->notifier_call(nb, NETDEV_UP, dev);
1369 }
1da177e4 1370 }
fcc5a03a
HX
1371
1372unlock:
1da177e4
LT
1373 rtnl_unlock();
1374 return err;
fcc5a03a
HX
1375
1376rollback:
1377 last = dev;
881d966b
EB
1378 for_each_net(net) {
1379 for_each_netdev(net, dev) {
1380 if (dev == last)
1381 break;
fcc5a03a 1382
881d966b
EB
1383 if (dev->flags & IFF_UP) {
1384 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1385 nb->notifier_call(nb, NETDEV_DOWN, dev);
1386 }
1387 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
a5ee1551 1388 nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev);
fcc5a03a 1389 }
fcc5a03a 1390 }
c67625a1
PE
1391
1392 raw_notifier_chain_unregister(&netdev_chain, nb);
fcc5a03a 1393 goto unlock;
1da177e4 1394}
d1b19dff 1395EXPORT_SYMBOL(register_netdevice_notifier);
1da177e4
LT
1396
1397/**
1398 * unregister_netdevice_notifier - unregister a network notifier block
1399 * @nb: notifier
1400 *
1401 * Unregister a notifier previously registered by
1402 * register_netdevice_notifier(). The notifier is unlinked into the
1403 * kernel structures and may then be reused. A negative errno code
1404 * is returned on a failure.
1405 */
1406
1407int unregister_netdevice_notifier(struct notifier_block *nb)
1408{
9f514950
HX
1409 int err;
1410
1411 rtnl_lock();
f07d5b94 1412 err = raw_notifier_chain_unregister(&netdev_chain, nb);
9f514950
HX
1413 rtnl_unlock();
1414 return err;
1da177e4 1415}
d1b19dff 1416EXPORT_SYMBOL(unregister_netdevice_notifier);
1da177e4
LT
1417
1418/**
1419 * call_netdevice_notifiers - call all network notifier blocks
1420 * @val: value passed unmodified to notifier function
c4ea43c5 1421 * @dev: net_device pointer passed unmodified to notifier function
1da177e4
LT
1422 *
1423 * Call all network notifier blocks. Parameters and return value
f07d5b94 1424 * are as for raw_notifier_call_chain().
1da177e4
LT
1425 */
1426
ad7379d4 1427int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1da177e4 1428{
ab930471 1429 ASSERT_RTNL();
ad7379d4 1430 return raw_notifier_call_chain(&netdev_chain, val, dev);
1da177e4
LT
1431}
1432
1433/* When > 0 there are consumers of rx skb time stamps */
1434static atomic_t netstamp_needed = ATOMIC_INIT(0);
1435
1436void net_enable_timestamp(void)
1437{
1438 atomic_inc(&netstamp_needed);
1439}
d1b19dff 1440EXPORT_SYMBOL(net_enable_timestamp);
1da177e4
LT
1441
1442void net_disable_timestamp(void)
1443{
1444 atomic_dec(&netstamp_needed);
1445}
d1b19dff 1446EXPORT_SYMBOL(net_disable_timestamp);
1da177e4 1447
3b098e2d 1448static inline void net_timestamp_set(struct sk_buff *skb)
1da177e4
LT
1449{
1450 if (atomic_read(&netstamp_needed))
a61bbcf2 1451 __net_timestamp(skb);
b7aa0bf7
ED
1452 else
1453 skb->tstamp.tv64 = 0;
1da177e4
LT
1454}
1455
3b098e2d
ED
1456static inline void net_timestamp_check(struct sk_buff *skb)
1457{
1458 if (!skb->tstamp.tv64 && atomic_read(&netstamp_needed))
1459 __net_timestamp(skb);
1460}
1461
44540960
AB
1462/**
1463 * dev_forward_skb - loopback an skb to another netif
1464 *
1465 * @dev: destination network device
1466 * @skb: buffer to forward
1467 *
1468 * return values:
1469 * NET_RX_SUCCESS (no congestion)
6ec82562 1470 * NET_RX_DROP (packet was dropped, but freed)
44540960
AB
1471 *
1472 * dev_forward_skb can be used for injecting an skb from the
1473 * start_xmit function of one device into the receive queue
1474 * of another device.
1475 *
1476 * The receiving device may be in another namespace, so
1477 * we have to clear all information in the skb that could
1478 * impact namespace isolation.
1479 */
1480int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1481{
1482 skb_orphan(skb);
c736eefa 1483 nf_reset(skb);
44540960 1484
6ec82562
ED
1485 if (!(dev->flags & IFF_UP) ||
1486 (skb->len > (dev->mtu + dev->hard_header_len))) {
1487 kfree_skb(skb);
44540960 1488 return NET_RX_DROP;
6ec82562 1489 }
8a83a00b 1490 skb_set_dev(skb, dev);
44540960
AB
1491 skb->tstamp.tv64 = 0;
1492 skb->pkt_type = PACKET_HOST;
1493 skb->protocol = eth_type_trans(skb, dev);
44540960
AB
1494 return netif_rx(skb);
1495}
1496EXPORT_SYMBOL_GPL(dev_forward_skb);
1497
1da177e4
LT
1498/*
1499 * Support routine. Sends outgoing frames to any network
1500 * taps currently in use.
1501 */
1502
f6a78bfc 1503static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1da177e4
LT
1504{
1505 struct packet_type *ptype;
a61bbcf2 1506
8caf1539
JP
1507#ifdef CONFIG_NET_CLS_ACT
1508 if (!(skb->tstamp.tv64 && (G_TC_FROM(skb->tc_verd) & AT_INGRESS)))
3b098e2d 1509 net_timestamp_set(skb);
8caf1539 1510#else
3b098e2d 1511 net_timestamp_set(skb);
8caf1539 1512#endif
1da177e4
LT
1513
1514 rcu_read_lock();
1515 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1516 /* Never send packets back to the socket
1517 * they originated from - MvS (miquels@drinkel.ow.org)
1518 */
1519 if ((ptype->dev == dev || !ptype->dev) &&
1520 (ptype->af_packet_priv == NULL ||
1521 (struct sock *)ptype->af_packet_priv != skb->sk)) {
d1b19dff 1522 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1da177e4
LT
1523 if (!skb2)
1524 break;
1525
1526 /* skb->nh should be correctly
1527 set by sender, so that the second statement is
1528 just protection against buggy protocols.
1529 */
459a98ed 1530 skb_reset_mac_header(skb2);
1da177e4 1531
d56f90a7 1532 if (skb_network_header(skb2) < skb2->data ||
27a884dc 1533 skb2->network_header > skb2->tail) {
1da177e4
LT
1534 if (net_ratelimit())
1535 printk(KERN_CRIT "protocol %04x is "
1536 "buggy, dev %s\n",
70777d03
SAS
1537 ntohs(skb2->protocol),
1538 dev->name);
c1d2bbe1 1539 skb_reset_network_header(skb2);
1da177e4
LT
1540 }
1541
b0e380b1 1542 skb2->transport_header = skb2->network_header;
1da177e4 1543 skb2->pkt_type = PACKET_OUTGOING;
f2ccd8fa 1544 ptype->func(skb2, skb->dev, ptype, skb->dev);
1da177e4
LT
1545 }
1546 }
1547 rcu_read_unlock();
1548}
1549
f0796d5c
JF
1550/*
1551 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
1552 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
1553 */
1554void netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
1555{
1556 unsigned int real_num = dev->real_num_tx_queues;
1557
1558 if (unlikely(txq > dev->num_tx_queues))
1559 ;
1560 else if (txq > real_num)
1561 dev->real_num_tx_queues = txq;
1562 else if (txq < real_num) {
1563 dev->real_num_tx_queues = txq;
1564 qdisc_reset_all_tx_gt(dev, txq);
1565 }
1566}
1567EXPORT_SYMBOL(netif_set_real_num_tx_queues);
56079431 1568
def82a1d 1569static inline void __netif_reschedule(struct Qdisc *q)
56079431 1570{
def82a1d
JP
1571 struct softnet_data *sd;
1572 unsigned long flags;
56079431 1573
def82a1d
JP
1574 local_irq_save(flags);
1575 sd = &__get_cpu_var(softnet_data);
a9cbd588
CG
1576 q->next_sched = NULL;
1577 *sd->output_queue_tailp = q;
1578 sd->output_queue_tailp = &q->next_sched;
def82a1d
JP
1579 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1580 local_irq_restore(flags);
1581}
1582
1583void __netif_schedule(struct Qdisc *q)
1584{
1585 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
1586 __netif_reschedule(q);
56079431
DV
1587}
1588EXPORT_SYMBOL(__netif_schedule);
1589
bea3348e 1590void dev_kfree_skb_irq(struct sk_buff *skb)
56079431 1591{
3578b0c8 1592 if (atomic_dec_and_test(&skb->users)) {
bea3348e
SH
1593 struct softnet_data *sd;
1594 unsigned long flags;
56079431 1595
bea3348e
SH
1596 local_irq_save(flags);
1597 sd = &__get_cpu_var(softnet_data);
1598 skb->next = sd->completion_queue;
1599 sd->completion_queue = skb;
1600 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1601 local_irq_restore(flags);
1602 }
56079431 1603}
bea3348e 1604EXPORT_SYMBOL(dev_kfree_skb_irq);
56079431
DV
1605
1606void dev_kfree_skb_any(struct sk_buff *skb)
1607{
1608 if (in_irq() || irqs_disabled())
1609 dev_kfree_skb_irq(skb);
1610 else
1611 dev_kfree_skb(skb);
1612}
1613EXPORT_SYMBOL(dev_kfree_skb_any);
1614
1615
bea3348e
SH
1616/**
1617 * netif_device_detach - mark device as removed
1618 * @dev: network device
1619 *
1620 * Mark device as removed from system and therefore no longer available.
1621 */
56079431
DV
1622void netif_device_detach(struct net_device *dev)
1623{
1624 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1625 netif_running(dev)) {
d543103a 1626 netif_tx_stop_all_queues(dev);
56079431
DV
1627 }
1628}
1629EXPORT_SYMBOL(netif_device_detach);
1630
bea3348e
SH
1631/**
1632 * netif_device_attach - mark device as attached
1633 * @dev: network device
1634 *
1635 * Mark device as attached from system and restart if needed.
1636 */
56079431
DV
1637void netif_device_attach(struct net_device *dev)
1638{
1639 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1640 netif_running(dev)) {
d543103a 1641 netif_tx_wake_all_queues(dev);
4ec93edb 1642 __netdev_watchdog_up(dev);
56079431
DV
1643 }
1644}
1645EXPORT_SYMBOL(netif_device_attach);
1646
6de329e2
BH
1647static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1648{
1649 return ((features & NETIF_F_GEN_CSUM) ||
1650 ((features & NETIF_F_IP_CSUM) &&
1651 protocol == htons(ETH_P_IP)) ||
1652 ((features & NETIF_F_IPV6_CSUM) &&
1c8dbcf6
YZ
1653 protocol == htons(ETH_P_IPV6)) ||
1654 ((features & NETIF_F_FCOE_CRC) &&
1655 protocol == htons(ETH_P_FCOE)));
6de329e2
BH
1656}
1657
1658static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
1659{
1660 if (can_checksum_protocol(dev->features, skb->protocol))
1661 return true;
1662
1663 if (skb->protocol == htons(ETH_P_8021Q)) {
1664 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1665 if (can_checksum_protocol(dev->features & dev->vlan_features,
1666 veh->h_vlan_encapsulated_proto))
1667 return true;
1668 }
1669
1670 return false;
1671}
56079431 1672
8a83a00b
AB
1673/**
1674 * skb_dev_set -- assign a new device to a buffer
1675 * @skb: buffer for the new device
1676 * @dev: network device
1677 *
1678 * If an skb is owned by a device already, we have to reset
1679 * all data private to the namespace a device belongs to
1680 * before assigning it a new device.
1681 */
1682#ifdef CONFIG_NET_NS
1683void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
1684{
1685 skb_dst_drop(skb);
1686 if (skb->dev && !net_eq(dev_net(skb->dev), dev_net(dev))) {
1687 secpath_reset(skb);
1688 nf_reset(skb);
1689 skb_init_secmark(skb);
1690 skb->mark = 0;
1691 skb->priority = 0;
1692 skb->nf_trace = 0;
1693 skb->ipvs_property = 0;
1694#ifdef CONFIG_NET_SCHED
1695 skb->tc_index = 0;
1696#endif
1697 }
1698 skb->dev = dev;
1699}
1700EXPORT_SYMBOL(skb_set_dev);
1701#endif /* CONFIG_NET_NS */
1702
1da177e4
LT
1703/*
1704 * Invalidate hardware checksum when packet is to be mangled, and
1705 * complete checksum manually on outgoing path.
1706 */
84fa7933 1707int skb_checksum_help(struct sk_buff *skb)
1da177e4 1708{
d3bc23e7 1709 __wsum csum;
663ead3b 1710 int ret = 0, offset;
1da177e4 1711
84fa7933 1712 if (skb->ip_summed == CHECKSUM_COMPLETE)
a430a43d
HX
1713 goto out_set_summed;
1714
1715 if (unlikely(skb_shinfo(skb)->gso_size)) {
a430a43d
HX
1716 /* Let GSO fix up the checksum. */
1717 goto out_set_summed;
1da177e4
LT
1718 }
1719
a030847e
HX
1720 offset = skb->csum_start - skb_headroom(skb);
1721 BUG_ON(offset >= skb_headlen(skb));
1722 csum = skb_checksum(skb, offset, skb->len - offset, 0);
1723
1724 offset += skb->csum_offset;
1725 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
1726
1727 if (skb_cloned(skb) &&
1728 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
1da177e4
LT
1729 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1730 if (ret)
1731 goto out;
1732 }
1733
a030847e 1734 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
a430a43d 1735out_set_summed:
1da177e4 1736 skb->ip_summed = CHECKSUM_NONE;
4ec93edb 1737out:
1da177e4
LT
1738 return ret;
1739}
d1b19dff 1740EXPORT_SYMBOL(skb_checksum_help);
1da177e4 1741
f6a78bfc
HX
1742/**
1743 * skb_gso_segment - Perform segmentation on skb.
1744 * @skb: buffer to segment
576a30eb 1745 * @features: features for the output path (see dev->features)
f6a78bfc
HX
1746 *
1747 * This function segments the given skb and returns a list of segments.
576a30eb
HX
1748 *
1749 * It may return NULL if the skb requires no segmentation. This is
1750 * only possible when GSO is used for verifying header integrity.
f6a78bfc 1751 */
576a30eb 1752struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
f6a78bfc
HX
1753{
1754 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1755 struct packet_type *ptype;
252e3346 1756 __be16 type = skb->protocol;
a430a43d 1757 int err;
f6a78bfc 1758
459a98ed 1759 skb_reset_mac_header(skb);
b0e380b1 1760 skb->mac_len = skb->network_header - skb->mac_header;
f6a78bfc
HX
1761 __skb_pull(skb, skb->mac_len);
1762
67fd1a73
HX
1763 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1764 struct net_device *dev = skb->dev;
1765 struct ethtool_drvinfo info = {};
1766
1767 if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
1768 dev->ethtool_ops->get_drvinfo(dev, &info);
1769
1770 WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d "
1771 "ip_summed=%d",
1772 info.driver, dev ? dev->features : 0L,
1773 skb->sk ? skb->sk->sk_route_caps : 0L,
1774 skb->len, skb->data_len, skb->ip_summed);
1775
a430a43d
HX
1776 if (skb_header_cloned(skb) &&
1777 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1778 return ERR_PTR(err);
1779 }
1780
f6a78bfc 1781 rcu_read_lock();
82d8a867
PE
1782 list_for_each_entry_rcu(ptype,
1783 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
f6a78bfc 1784 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
84fa7933 1785 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
a430a43d
HX
1786 err = ptype->gso_send_check(skb);
1787 segs = ERR_PTR(err);
1788 if (err || skb_gso_ok(skb, features))
1789 break;
d56f90a7
ACM
1790 __skb_push(skb, (skb->data -
1791 skb_network_header(skb)));
a430a43d 1792 }
576a30eb 1793 segs = ptype->gso_segment(skb, features);
f6a78bfc
HX
1794 break;
1795 }
1796 }
1797 rcu_read_unlock();
1798
98e399f8 1799 __skb_push(skb, skb->data - skb_mac_header(skb));
576a30eb 1800
f6a78bfc
HX
1801 return segs;
1802}
f6a78bfc
HX
1803EXPORT_SYMBOL(skb_gso_segment);
1804
fb286bb2
HX
1805/* Take action when hardware reception checksum errors are detected. */
1806#ifdef CONFIG_BUG
1807void netdev_rx_csum_fault(struct net_device *dev)
1808{
1809 if (net_ratelimit()) {
4ec93edb 1810 printk(KERN_ERR "%s: hw csum failure.\n",
246a4212 1811 dev ? dev->name : "<unknown>");
fb286bb2
HX
1812 dump_stack();
1813 }
1814}
1815EXPORT_SYMBOL(netdev_rx_csum_fault);
1816#endif
1817
1da177e4
LT
1818/* Actually, we should eliminate this check as soon as we know, that:
1819 * 1. IOMMU is present and allows to map all the memory.
1820 * 2. No high memory really exists on this machine.
1821 */
1822
9092c658 1823static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1da177e4 1824{
3d3a8533 1825#ifdef CONFIG_HIGHMEM
1da177e4 1826 int i;
5acbbd42
FT
1827 if (!(dev->features & NETIF_F_HIGHDMA)) {
1828 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1829 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1830 return 1;
1831 }
1da177e4 1832
5acbbd42
FT
1833 if (PCI_DMA_BUS_IS_PHYS) {
1834 struct device *pdev = dev->dev.parent;
1da177e4 1835
9092c658
ED
1836 if (!pdev)
1837 return 0;
5acbbd42
FT
1838 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1839 dma_addr_t addr = page_to_phys(skb_shinfo(skb)->frags[i].page);
1840 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
1841 return 1;
1842 }
1843 }
3d3a8533 1844#endif
1da177e4
LT
1845 return 0;
1846}
1da177e4 1847
f6a78bfc
HX
1848struct dev_gso_cb {
1849 void (*destructor)(struct sk_buff *skb);
1850};
1851
1852#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
1853
1854static void dev_gso_skb_destructor(struct sk_buff *skb)
1855{
1856 struct dev_gso_cb *cb;
1857
1858 do {
1859 struct sk_buff *nskb = skb->next;
1860
1861 skb->next = nskb->next;
1862 nskb->next = NULL;
1863 kfree_skb(nskb);
1864 } while (skb->next);
1865
1866 cb = DEV_GSO_CB(skb);
1867 if (cb->destructor)
1868 cb->destructor(skb);
1869}
1870
1871/**
1872 * dev_gso_segment - Perform emulated hardware segmentation on skb.
1873 * @skb: buffer to segment
1874 *
1875 * This function segments the given skb and stores the list of segments
1876 * in skb->next.
1877 */
1878static int dev_gso_segment(struct sk_buff *skb)
1879{
1880 struct net_device *dev = skb->dev;
1881 struct sk_buff *segs;
576a30eb
HX
1882 int features = dev->features & ~(illegal_highdma(dev, skb) ?
1883 NETIF_F_SG : 0);
1884
1885 segs = skb_gso_segment(skb, features);
1886
1887 /* Verifying header integrity only. */
1888 if (!segs)
1889 return 0;
f6a78bfc 1890
801678c5 1891 if (IS_ERR(segs))
f6a78bfc
HX
1892 return PTR_ERR(segs);
1893
1894 skb->next = segs;
1895 DEV_GSO_CB(skb)->destructor = skb->destructor;
1896 skb->destructor = dev_gso_skb_destructor;
1897
1898 return 0;
1899}
1900
fc6055a5
ED
1901/*
1902 * Try to orphan skb early, right before transmission by the device.
2244d07b
OH
1903 * We cannot orphan skb if tx timestamp is requested or the sk-reference
1904 * is needed on driver level for other reasons, e.g. see net/can/raw.c
fc6055a5
ED
1905 */
1906static inline void skb_orphan_try(struct sk_buff *skb)
1907{
87fd308c
ED
1908 struct sock *sk = skb->sk;
1909
2244d07b 1910 if (sk && !skb_shinfo(skb)->tx_flags) {
87fd308c
ED
1911 /* skb_tx_hash() wont be able to get sk.
1912 * We copy sk_hash into skb->rxhash
1913 */
1914 if (!skb->rxhash)
1915 skb->rxhash = sk->sk_hash;
fc6055a5 1916 skb_orphan(skb);
87fd308c 1917 }
fc6055a5
ED
1918}
1919
6afff0ca
JF
1920/*
1921 * Returns true if either:
1922 * 1. skb has frag_list and the device doesn't support FRAGLIST, or
1923 * 2. skb is fragmented and the device does not support SG, or if
1924 * at least one of fragments is in highmem and device does not
1925 * support DMA from it.
1926 */
1927static inline int skb_needs_linearize(struct sk_buff *skb,
1928 struct net_device *dev)
1929{
1930 return skb_is_nonlinear(skb) &&
21dc3301 1931 ((skb_has_frag_list(skb) && !(dev->features & NETIF_F_FRAGLIST)) ||
6afff0ca
JF
1932 (skb_shinfo(skb)->nr_frags && (!(dev->features & NETIF_F_SG) ||
1933 illegal_highdma(dev, skb))));
1934}
1935
fd2ea0a7
DM
1936int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1937 struct netdev_queue *txq)
f6a78bfc 1938{
00829823 1939 const struct net_device_ops *ops = dev->netdev_ops;
572a9d7b 1940 int rc = NETDEV_TX_OK;
00829823 1941
f6a78bfc 1942 if (likely(!skb->next)) {
9be9a6b9 1943 if (!list_empty(&ptype_all))
f6a78bfc
HX
1944 dev_queue_xmit_nit(skb, dev);
1945
93f154b5
ED
1946 /*
1947 * If device doesnt need skb->dst, release it right now while
1948 * its hot in this cpu cache
1949 */
adf30907
ED
1950 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
1951 skb_dst_drop(skb);
1952
fc6055a5 1953 skb_orphan_try(skb);
9ccb8975
DM
1954
1955 if (netif_needs_gso(dev, skb)) {
1956 if (unlikely(dev_gso_segment(skb)))
1957 goto out_kfree_skb;
1958 if (skb->next)
1959 goto gso;
6afff0ca
JF
1960 } else {
1961 if (skb_needs_linearize(skb, dev) &&
1962 __skb_linearize(skb))
1963 goto out_kfree_skb;
1964
1965 /* If packet is not checksummed and device does not
1966 * support checksumming for this protocol, complete
1967 * checksumming here.
1968 */
1969 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1970 skb_set_transport_header(skb, skb->csum_start -
1971 skb_headroom(skb));
1972 if (!dev_can_checksum(dev, skb) &&
1973 skb_checksum_help(skb))
1974 goto out_kfree_skb;
1975 }
9ccb8975
DM
1976 }
1977
ac45f602 1978 rc = ops->ndo_start_xmit(skb, dev);
ec634fe3 1979 if (rc == NETDEV_TX_OK)
08baf561 1980 txq_trans_update(txq);
ac45f602 1981 return rc;
f6a78bfc
HX
1982 }
1983
576a30eb 1984gso:
f6a78bfc
HX
1985 do {
1986 struct sk_buff *nskb = skb->next;
f6a78bfc
HX
1987
1988 skb->next = nskb->next;
1989 nskb->next = NULL;
068a2de5
KK
1990
1991 /*
1992 * If device doesnt need nskb->dst, release it right now while
1993 * its hot in this cpu cache
1994 */
1995 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
1996 skb_dst_drop(nskb);
1997
00829823 1998 rc = ops->ndo_start_xmit(nskb, dev);
ec634fe3 1999 if (unlikely(rc != NETDEV_TX_OK)) {
572a9d7b
PM
2000 if (rc & ~NETDEV_TX_MASK)
2001 goto out_kfree_gso_skb;
f54d9e8d 2002 nskb->next = skb->next;
f6a78bfc
HX
2003 skb->next = nskb;
2004 return rc;
2005 }
08baf561 2006 txq_trans_update(txq);
fd2ea0a7 2007 if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
f54d9e8d 2008 return NETDEV_TX_BUSY;
f6a78bfc 2009 } while (skb->next);
4ec93edb 2010
572a9d7b
PM
2011out_kfree_gso_skb:
2012 if (likely(skb->next == NULL))
2013 skb->destructor = DEV_GSO_CB(skb)->destructor;
f6a78bfc
HX
2014out_kfree_skb:
2015 kfree_skb(skb);
572a9d7b 2016 return rc;
f6a78bfc
HX
2017}
2018
0a9627f2 2019static u32 hashrnd __read_mostly;
b6b2fed1 2020
9247744e 2021u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
8f0f2223 2022{
7019298a 2023 u32 hash;
b6b2fed1 2024
513de11b
DM
2025 if (skb_rx_queue_recorded(skb)) {
2026 hash = skb_get_rx_queue(skb);
d1b19dff 2027 while (unlikely(hash >= dev->real_num_tx_queues))
513de11b
DM
2028 hash -= dev->real_num_tx_queues;
2029 return hash;
2030 }
ec581f6a
ED
2031
2032 if (skb->sk && skb->sk->sk_hash)
7019298a 2033 hash = skb->sk->sk_hash;
ec581f6a 2034 else
87fd308c 2035 hash = (__force u16) skb->protocol ^ skb->rxhash;
0a9627f2 2036 hash = jhash_1word(hash, hashrnd);
b6b2fed1
DM
2037
2038 return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
8f0f2223 2039}
9247744e 2040EXPORT_SYMBOL(skb_tx_hash);
8f0f2223 2041
ed04642f
ED
2042static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
2043{
2044 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
2045 if (net_ratelimit()) {
7a161ea9
ED
2046 pr_warning("%s selects TX queue %d, but "
2047 "real number of TX queues is %d\n",
2048 dev->name, queue_index, dev->real_num_tx_queues);
ed04642f
ED
2049 }
2050 return 0;
2051 }
2052 return queue_index;
2053}
2054
e8a0464c
DM
2055static struct netdev_queue *dev_pick_tx(struct net_device *dev,
2056 struct sk_buff *skb)
2057{
b0f77d0e 2058 int queue_index;
deabc772 2059 const struct net_device_ops *ops = dev->netdev_ops;
a4ee3ce3 2060
deabc772
HS
2061 if (ops->ndo_select_queue) {
2062 queue_index = ops->ndo_select_queue(dev, skb);
2063 queue_index = dev_cap_txqueue(dev, queue_index);
2064 } else {
2065 struct sock *sk = skb->sk;
2066 queue_index = sk_tx_queue_get(sk);
2067 if (queue_index < 0) {
a4ee3ce3 2068
a4ee3ce3
KK
2069 queue_index = 0;
2070 if (dev->real_num_tx_queues > 1)
2071 queue_index = skb_tx_hash(dev, skb);
fd2ea0a7 2072
8728c544 2073 if (sk) {
87eb3670 2074 struct dst_entry *dst = rcu_dereference_check(sk->sk_dst_cache, 1);
8728c544
ED
2075
2076 if (dst && skb_dst(skb) == dst)
2077 sk_tx_queue_set(sk, queue_index);
2078 }
a4ee3ce3
KK
2079 }
2080 }
eae792b7 2081
fd2ea0a7
DM
2082 skb_set_queue_mapping(skb, queue_index);
2083 return netdev_get_tx_queue(dev, queue_index);
e8a0464c
DM
2084}
2085
bbd8a0d3
KK
2086static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2087 struct net_device *dev,
2088 struct netdev_queue *txq)
2089{
2090 spinlock_t *root_lock = qdisc_lock(q);
79640a4c 2091 bool contended = qdisc_is_running(q);
bbd8a0d3
KK
2092 int rc;
2093
79640a4c
ED
2094 /*
2095 * Heuristic to force contended enqueues to serialize on a
2096 * separate lock before trying to get qdisc main lock.
2097 * This permits __QDISC_STATE_RUNNING owner to get the lock more often
2098 * and dequeue packets faster.
2099 */
2100 if (unlikely(contended))
2101 spin_lock(&q->busylock);
2102
bbd8a0d3
KK
2103 spin_lock(root_lock);
2104 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
2105 kfree_skb(skb);
2106 rc = NET_XMIT_DROP;
2107 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
bc135b23 2108 qdisc_run_begin(q)) {
bbd8a0d3
KK
2109 /*
2110 * This is a work-conserving queue; there are no old skbs
2111 * waiting to be sent out; and the qdisc is not running -
2112 * xmit the skb directly.
2113 */
7fee226a
ED
2114 if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
2115 skb_dst_force(skb);
bbd8a0d3 2116 __qdisc_update_bstats(q, skb->len);
79640a4c
ED
2117 if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
2118 if (unlikely(contended)) {
2119 spin_unlock(&q->busylock);
2120 contended = false;
2121 }
bbd8a0d3 2122 __qdisc_run(q);
79640a4c 2123 } else
bc135b23 2124 qdisc_run_end(q);
bbd8a0d3
KK
2125
2126 rc = NET_XMIT_SUCCESS;
2127 } else {
7fee226a 2128 skb_dst_force(skb);
bbd8a0d3 2129 rc = qdisc_enqueue_root(skb, q);
79640a4c
ED
2130 if (qdisc_run_begin(q)) {
2131 if (unlikely(contended)) {
2132 spin_unlock(&q->busylock);
2133 contended = false;
2134 }
2135 __qdisc_run(q);
2136 }
bbd8a0d3
KK
2137 }
2138 spin_unlock(root_lock);
79640a4c
ED
2139 if (unlikely(contended))
2140 spin_unlock(&q->busylock);
bbd8a0d3
KK
2141 return rc;
2142}
2143
d29f749e
DJ
2144/**
2145 * dev_queue_xmit - transmit a buffer
2146 * @skb: buffer to transmit
2147 *
2148 * Queue a buffer for transmission to a network device. The caller must
2149 * have set the device and priority and built the buffer before calling
2150 * this function. The function can be called from an interrupt.
2151 *
2152 * A negative errno code is returned on a failure. A success does not
2153 * guarantee the frame will be transmitted as it may be dropped due
2154 * to congestion or traffic shaping.
2155 *
2156 * -----------------------------------------------------------------------------------
2157 * I notice this method can also return errors from the queue disciplines,
2158 * including NET_XMIT_DROP, which is a positive value. So, errors can also
2159 * be positive.
2160 *
2161 * Regardless of the return value, the skb is consumed, so it is currently
2162 * difficult to retry a send to this method. (You can bump the ref count
2163 * before sending to hold a reference for retry if you are careful.)
2164 *
2165 * When calling this method, interrupts MUST be enabled. This is because
2166 * the BH enable code must have IRQs enabled so that it will not deadlock.
2167 * --BLG
2168 */
1da177e4
LT
2169int dev_queue_xmit(struct sk_buff *skb)
2170{
2171 struct net_device *dev = skb->dev;
dc2b4847 2172 struct netdev_queue *txq;
1da177e4
LT
2173 struct Qdisc *q;
2174 int rc = -ENOMEM;
2175
4ec93edb
YH
2176 /* Disable soft irqs for various locks below. Also
2177 * stops preemption for RCU.
1da177e4 2178 */
4ec93edb 2179 rcu_read_lock_bh();
1da177e4 2180
eae792b7 2181 txq = dev_pick_tx(dev, skb);
a898def2 2182 q = rcu_dereference_bh(txq->qdisc);
37437bb2 2183
1da177e4 2184#ifdef CONFIG_NET_CLS_ACT
d1b19dff 2185 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
1da177e4
LT
2186#endif
2187 if (q->enqueue) {
bbd8a0d3 2188 rc = __dev_xmit_skb(skb, q, dev, txq);
37437bb2 2189 goto out;
1da177e4
LT
2190 }
2191
2192 /* The device has no queue. Common case for software devices:
2193 loopback, all the sorts of tunnels...
2194
932ff279
HX
2195 Really, it is unlikely that netif_tx_lock protection is necessary
2196 here. (f.e. loopback and IP tunnels are clean ignoring statistics
1da177e4
LT
2197 counters.)
2198 However, it is possible, that they rely on protection
2199 made by us here.
2200
2201 Check this and shot the lock. It is not prone from deadlocks.
2202 Either shot noqueue qdisc, it is even simpler 8)
2203 */
2204 if (dev->flags & IFF_UP) {
2205 int cpu = smp_processor_id(); /* ok because BHs are off */
2206
c773e847 2207 if (txq->xmit_lock_owner != cpu) {
1da177e4 2208
c773e847 2209 HARD_TX_LOCK(dev, txq, cpu);
1da177e4 2210
fd2ea0a7 2211 if (!netif_tx_queue_stopped(txq)) {
572a9d7b
PM
2212 rc = dev_hard_start_xmit(skb, dev, txq);
2213 if (dev_xmit_complete(rc)) {
c773e847 2214 HARD_TX_UNLOCK(dev, txq);
1da177e4
LT
2215 goto out;
2216 }
2217 }
c773e847 2218 HARD_TX_UNLOCK(dev, txq);
1da177e4
LT
2219 if (net_ratelimit())
2220 printk(KERN_CRIT "Virtual device %s asks to "
2221 "queue packet!\n", dev->name);
2222 } else {
2223 /* Recursion is detected! It is possible,
2224 * unfortunately */
2225 if (net_ratelimit())
2226 printk(KERN_CRIT "Dead loop on virtual device "
2227 "%s, fix it urgently!\n", dev->name);
2228 }
2229 }
2230
2231 rc = -ENETDOWN;
d4828d85 2232 rcu_read_unlock_bh();
1da177e4 2233
1da177e4
LT
2234 kfree_skb(skb);
2235 return rc;
2236out:
d4828d85 2237 rcu_read_unlock_bh();
1da177e4
LT
2238 return rc;
2239}
d1b19dff 2240EXPORT_SYMBOL(dev_queue_xmit);
1da177e4
LT
2241
2242
2243/*=======================================================================
2244 Receiver routines
2245 =======================================================================*/
2246
6b2bedc3 2247int netdev_max_backlog __read_mostly = 1000;
3b098e2d 2248int netdev_tstamp_prequeue __read_mostly = 1;
6b2bedc3
SH
2249int netdev_budget __read_mostly = 300;
2250int weight_p __read_mostly = 64; /* old backlog weight */
1da177e4 2251
eecfd7c4
ED
2252/* Called with irq disabled */
2253static inline void ____napi_schedule(struct softnet_data *sd,
2254 struct napi_struct *napi)
2255{
2256 list_add_tail(&napi->poll_list, &sd->poll_list);
2257 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2258}
2259
0a9627f2 2260/*
bfb564e7
KK
2261 * __skb_get_rxhash: calculate a flow hash based on src/dst addresses
2262 * and src/dst port numbers. Returns a non-zero hash number on success
2263 * and 0 on failure.
0a9627f2 2264 */
bfb564e7 2265__u32 __skb_get_rxhash(struct sk_buff *skb)
0a9627f2 2266{
12fcdefb 2267 int nhoff, hash = 0, poff;
0a9627f2
TH
2268 struct ipv6hdr *ip6;
2269 struct iphdr *ip;
0a9627f2 2270 u8 ip_proto;
8c52d509
CG
2271 u32 addr1, addr2, ihl;
2272 union {
2273 u32 v32;
2274 u16 v16[2];
2275 } ports;
0a9627f2 2276
bfb564e7 2277 nhoff = skb_network_offset(skb);
0a9627f2
TH
2278
2279 switch (skb->protocol) {
2280 case __constant_htons(ETH_P_IP):
bfb564e7 2281 if (!pskb_may_pull(skb, sizeof(*ip) + nhoff))
0a9627f2
TH
2282 goto done;
2283
1003489e 2284 ip = (struct iphdr *) (skb->data + nhoff);
dbe5775b
CG
2285 if (ip->frag_off & htons(IP_MF | IP_OFFSET))
2286 ip_proto = 0;
2287 else
2288 ip_proto = ip->protocol;
b249dcb8
ED
2289 addr1 = (__force u32) ip->saddr;
2290 addr2 = (__force u32) ip->daddr;
0a9627f2
TH
2291 ihl = ip->ihl;
2292 break;
2293 case __constant_htons(ETH_P_IPV6):
bfb564e7 2294 if (!pskb_may_pull(skb, sizeof(*ip6) + nhoff))
0a9627f2
TH
2295 goto done;
2296
1003489e 2297 ip6 = (struct ipv6hdr *) (skb->data + nhoff);
0a9627f2 2298 ip_proto = ip6->nexthdr;
b249dcb8
ED
2299 addr1 = (__force u32) ip6->saddr.s6_addr32[3];
2300 addr2 = (__force u32) ip6->daddr.s6_addr32[3];
0a9627f2
TH
2301 ihl = (40 >> 2);
2302 break;
2303 default:
2304 goto done;
2305 }
bfb564e7 2306
12fcdefb
CG
2307 ports.v32 = 0;
2308 poff = proto_ports_offset(ip_proto);
2309 if (poff >= 0) {
2310 nhoff += ihl * 4 + poff;
2311 if (pskb_may_pull(skb, nhoff + 4)) {
2312 ports.v32 = * (__force u32 *) (skb->data + nhoff);
8c52d509
CG
2313 if (ports.v16[1] < ports.v16[0])
2314 swap(ports.v16[0], ports.v16[1]);
b249dcb8 2315 }
0a9627f2
TH
2316 }
2317
b249dcb8
ED
2318 /* get a consistent hash (same value on both flow directions) */
2319 if (addr2 < addr1)
2320 swap(addr1, addr2);
0a9627f2 2321
bfb564e7
KK
2322 hash = jhash_3words(addr1, addr2, ports.v32, hashrnd);
2323 if (!hash)
2324 hash = 1;
2325
2326done:
2327 return hash;
2328}
2329EXPORT_SYMBOL(__skb_get_rxhash);
2330
2331#ifdef CONFIG_RPS
2332
2333/* One global table that all flow-based protocols share. */
2334struct rps_sock_flow_table *rps_sock_flow_table __read_mostly;
2335EXPORT_SYMBOL(rps_sock_flow_table);
2336
2337/*
2338 * get_rps_cpu is called from netif_receive_skb and returns the target
2339 * CPU from the RPS map of the receiving queue for a given skb.
2340 * rcu_read_lock must be held on entry.
2341 */
2342static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2343 struct rps_dev_flow **rflowp)
2344{
2345 struct netdev_rx_queue *rxqueue;
6febfca9 2346 struct rps_map *map = NULL;
bfb564e7
KK
2347 struct rps_dev_flow_table *flow_table;
2348 struct rps_sock_flow_table *sock_flow_table;
2349 int cpu = -1;
2350 u16 tcpu;
2351
2352 if (skb_rx_queue_recorded(skb)) {
2353 u16 index = skb_get_rx_queue(skb);
2354 if (unlikely(index >= dev->num_rx_queues)) {
2355 WARN_ONCE(dev->num_rx_queues > 1, "%s received packet "
2356 "on queue %u, but number of RX queues is %u\n",
2357 dev->name, index, dev->num_rx_queues);
2358 goto done;
2359 }
2360 rxqueue = dev->_rx + index;
2361 } else
2362 rxqueue = dev->_rx;
2363
6febfca9
CG
2364 if (rxqueue->rps_map) {
2365 map = rcu_dereference(rxqueue->rps_map);
2366 if (map && map->len == 1) {
2367 tcpu = map->cpus[0];
2368 if (cpu_online(tcpu))
2369 cpu = tcpu;
2370 goto done;
2371 }
2372 } else if (!rxqueue->rps_flow_table) {
bfb564e7 2373 goto done;
6febfca9 2374 }
bfb564e7 2375
2d47b459 2376 skb_reset_network_header(skb);
bfb564e7
KK
2377 if (!skb_get_rxhash(skb))
2378 goto done;
2379
fec5e652
TH
2380 flow_table = rcu_dereference(rxqueue->rps_flow_table);
2381 sock_flow_table = rcu_dereference(rps_sock_flow_table);
2382 if (flow_table && sock_flow_table) {
2383 u16 next_cpu;
2384 struct rps_dev_flow *rflow;
2385
2386 rflow = &flow_table->flows[skb->rxhash & flow_table->mask];
2387 tcpu = rflow->cpu;
2388
2389 next_cpu = sock_flow_table->ents[skb->rxhash &
2390 sock_flow_table->mask];
2391
2392 /*
2393 * If the desired CPU (where last recvmsg was done) is
2394 * different from current CPU (one in the rx-queue flow
2395 * table entry), switch if one of the following holds:
2396 * - Current CPU is unset (equal to RPS_NO_CPU).
2397 * - Current CPU is offline.
2398 * - The current CPU's queue tail has advanced beyond the
2399 * last packet that was enqueued using this table entry.
2400 * This guarantees that all previous packets for the flow
2401 * have been dequeued, thus preserving in order delivery.
2402 */
2403 if (unlikely(tcpu != next_cpu) &&
2404 (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
2405 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
2406 rflow->last_qtail)) >= 0)) {
2407 tcpu = rflow->cpu = next_cpu;
2408 if (tcpu != RPS_NO_CPU)
2409 rflow->last_qtail = per_cpu(softnet_data,
2410 tcpu).input_queue_head;
2411 }
2412 if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
2413 *rflowp = rflow;
2414 cpu = tcpu;
2415 goto done;
2416 }
2417 }
2418
0a9627f2 2419 if (map) {
fec5e652 2420 tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32];
0a9627f2
TH
2421
2422 if (cpu_online(tcpu)) {
2423 cpu = tcpu;
2424 goto done;
2425 }
2426 }
2427
2428done:
0a9627f2
TH
2429 return cpu;
2430}
2431
0a9627f2 2432/* Called from hardirq (IPI) context */
e36fa2f7 2433static void rps_trigger_softirq(void *data)
0a9627f2 2434{
e36fa2f7
ED
2435 struct softnet_data *sd = data;
2436
eecfd7c4 2437 ____napi_schedule(sd, &sd->backlog);
dee42870 2438 sd->received_rps++;
0a9627f2 2439}
e36fa2f7 2440
fec5e652 2441#endif /* CONFIG_RPS */
0a9627f2 2442
e36fa2f7
ED
2443/*
2444 * Check if this softnet_data structure is another cpu one
2445 * If yes, queue it to our IPI list and return 1
2446 * If no, return 0
2447 */
2448static int rps_ipi_queued(struct softnet_data *sd)
2449{
2450#ifdef CONFIG_RPS
2451 struct softnet_data *mysd = &__get_cpu_var(softnet_data);
2452
2453 if (sd != mysd) {
2454 sd->rps_ipi_next = mysd->rps_ipi_list;
2455 mysd->rps_ipi_list = sd;
2456
2457 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2458 return 1;
2459 }
2460#endif /* CONFIG_RPS */
2461 return 0;
2462}
2463
0a9627f2
TH
2464/*
2465 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
2466 * queue (may be a remote CPU queue).
2467 */
fec5e652
TH
2468static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
2469 unsigned int *qtail)
0a9627f2 2470{
e36fa2f7 2471 struct softnet_data *sd;
0a9627f2
TH
2472 unsigned long flags;
2473
e36fa2f7 2474 sd = &per_cpu(softnet_data, cpu);
0a9627f2
TH
2475
2476 local_irq_save(flags);
0a9627f2 2477
e36fa2f7 2478 rps_lock(sd);
6e7676c1
CG
2479 if (skb_queue_len(&sd->input_pkt_queue) <= netdev_max_backlog) {
2480 if (skb_queue_len(&sd->input_pkt_queue)) {
0a9627f2 2481enqueue:
e36fa2f7 2482 __skb_queue_tail(&sd->input_pkt_queue, skb);
76cc8b13 2483 input_queue_tail_incr_save(sd, qtail);
e36fa2f7 2484 rps_unlock(sd);
152102c7 2485 local_irq_restore(flags);
0a9627f2
TH
2486 return NET_RX_SUCCESS;
2487 }
2488
ebda37c2
ED
2489 /* Schedule NAPI for backlog device
2490 * We can use non atomic operation since we own the queue lock
2491 */
2492 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
e36fa2f7 2493 if (!rps_ipi_queued(sd))
eecfd7c4 2494 ____napi_schedule(sd, &sd->backlog);
0a9627f2
TH
2495 }
2496 goto enqueue;
2497 }
2498
dee42870 2499 sd->dropped++;
e36fa2f7 2500 rps_unlock(sd);
0a9627f2 2501
0a9627f2
TH
2502 local_irq_restore(flags);
2503
2504 kfree_skb(skb);
2505 return NET_RX_DROP;
2506}
1da177e4 2507
1da177e4
LT
2508/**
2509 * netif_rx - post buffer to the network code
2510 * @skb: buffer to post
2511 *
2512 * This function receives a packet from a device driver and queues it for
2513 * the upper (protocol) levels to process. It always succeeds. The buffer
2514 * may be dropped during processing for congestion control or by the
2515 * protocol layers.
2516 *
2517 * return values:
2518 * NET_RX_SUCCESS (no congestion)
1da177e4
LT
2519 * NET_RX_DROP (packet was dropped)
2520 *
2521 */
2522
2523int netif_rx(struct sk_buff *skb)
2524{
b0e28f1e 2525 int ret;
1da177e4
LT
2526
2527 /* if netpoll wants it, pretend we never saw it */
2528 if (netpoll_rx(skb))
2529 return NET_RX_DROP;
2530
3b098e2d
ED
2531 if (netdev_tstamp_prequeue)
2532 net_timestamp_check(skb);
1da177e4 2533
df334545 2534#ifdef CONFIG_RPS
b0e28f1e 2535 {
fec5e652 2536 struct rps_dev_flow voidflow, *rflow = &voidflow;
b0e28f1e
ED
2537 int cpu;
2538
cece1945 2539 preempt_disable();
b0e28f1e 2540 rcu_read_lock();
fec5e652
TH
2541
2542 cpu = get_rps_cpu(skb->dev, skb, &rflow);
b0e28f1e
ED
2543 if (cpu < 0)
2544 cpu = smp_processor_id();
fec5e652
TH
2545
2546 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
2547
b0e28f1e 2548 rcu_read_unlock();
cece1945 2549 preempt_enable();
b0e28f1e 2550 }
1e94d72f 2551#else
fec5e652
TH
2552 {
2553 unsigned int qtail;
2554 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
2555 put_cpu();
2556 }
1e94d72f 2557#endif
b0e28f1e 2558 return ret;
1da177e4 2559}
d1b19dff 2560EXPORT_SYMBOL(netif_rx);
1da177e4
LT
2561
2562int netif_rx_ni(struct sk_buff *skb)
2563{
2564 int err;
2565
2566 preempt_disable();
2567 err = netif_rx(skb);
2568 if (local_softirq_pending())
2569 do_softirq();
2570 preempt_enable();
2571
2572 return err;
2573}
1da177e4
LT
2574EXPORT_SYMBOL(netif_rx_ni);
2575
1da177e4
LT
2576static void net_tx_action(struct softirq_action *h)
2577{
2578 struct softnet_data *sd = &__get_cpu_var(softnet_data);
2579
2580 if (sd->completion_queue) {
2581 struct sk_buff *clist;
2582
2583 local_irq_disable();
2584 clist = sd->completion_queue;
2585 sd->completion_queue = NULL;
2586 local_irq_enable();
2587
2588 while (clist) {
2589 struct sk_buff *skb = clist;
2590 clist = clist->next;
2591
547b792c 2592 WARN_ON(atomic_read(&skb->users));
1da177e4
LT
2593 __kfree_skb(skb);
2594 }
2595 }
2596
2597 if (sd->output_queue) {
37437bb2 2598 struct Qdisc *head;
1da177e4
LT
2599
2600 local_irq_disable();
2601 head = sd->output_queue;
2602 sd->output_queue = NULL;
a9cbd588 2603 sd->output_queue_tailp = &sd->output_queue;
1da177e4
LT
2604 local_irq_enable();
2605
2606 while (head) {
37437bb2
DM
2607 struct Qdisc *q = head;
2608 spinlock_t *root_lock;
2609
1da177e4
LT
2610 head = head->next_sched;
2611
5fb66229 2612 root_lock = qdisc_lock(q);
37437bb2 2613 if (spin_trylock(root_lock)) {
def82a1d
JP
2614 smp_mb__before_clear_bit();
2615 clear_bit(__QDISC_STATE_SCHED,
2616 &q->state);
37437bb2
DM
2617 qdisc_run(q);
2618 spin_unlock(root_lock);
1da177e4 2619 } else {
195648bb 2620 if (!test_bit(__QDISC_STATE_DEACTIVATED,
e8a83e10 2621 &q->state)) {
195648bb 2622 __netif_reschedule(q);
e8a83e10
JP
2623 } else {
2624 smp_mb__before_clear_bit();
2625 clear_bit(__QDISC_STATE_SCHED,
2626 &q->state);
2627 }
1da177e4
LT
2628 }
2629 }
2630 }
2631}
2632
6f05f629
SH
2633static inline int deliver_skb(struct sk_buff *skb,
2634 struct packet_type *pt_prev,
2635 struct net_device *orig_dev)
1da177e4
LT
2636{
2637 atomic_inc(&skb->users);
f2ccd8fa 2638 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1da177e4
LT
2639}
2640
ab95bfe0
JP
2641#if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
2642 (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
da678292
MM
2643/* This hook is defined here for ATM LANE */
2644int (*br_fdb_test_addr_hook)(struct net_device *dev,
2645 unsigned char *addr) __read_mostly;
4fb019a0 2646EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
da678292 2647#endif
1da177e4 2648
1da177e4
LT
2649#ifdef CONFIG_NET_CLS_ACT
2650/* TODO: Maybe we should just force sch_ingress to be compiled in
2651 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
2652 * a compare and 2 stores extra right now if we dont have it on
2653 * but have CONFIG_NET_CLS_ACT
4ec93edb 2654 * NOTE: This doesnt stop any functionality; if you dont have
1da177e4
LT
2655 * the ingress scheduler, you just cant add policies on ingress.
2656 *
2657 */
4ec93edb 2658static int ing_filter(struct sk_buff *skb)
1da177e4 2659{
1da177e4 2660 struct net_device *dev = skb->dev;
f697c3e8 2661 u32 ttl = G_TC_RTTL(skb->tc_verd);
555353cf
DM
2662 struct netdev_queue *rxq;
2663 int result = TC_ACT_OK;
2664 struct Qdisc *q;
4ec93edb 2665
de384830
SH
2666 if (unlikely(MAX_RED_LOOP < ttl++)) {
2667 if (net_ratelimit())
2668 pr_warning( "Redir loop detected Dropping packet (%d->%d)\n",
2669 skb->skb_iif, dev->ifindex);
f697c3e8
HX
2670 return TC_ACT_SHOT;
2671 }
1da177e4 2672
f697c3e8
HX
2673 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
2674 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
1da177e4 2675
555353cf
DM
2676 rxq = &dev->rx_queue;
2677
83874000 2678 q = rxq->qdisc;
8d50b53d 2679 if (q != &noop_qdisc) {
83874000 2680 spin_lock(qdisc_lock(q));
a9312ae8
DM
2681 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
2682 result = qdisc_enqueue_root(skb, q);
83874000
DM
2683 spin_unlock(qdisc_lock(q));
2684 }
f697c3e8
HX
2685
2686 return result;
2687}
86e65da9 2688
f697c3e8
HX
2689static inline struct sk_buff *handle_ing(struct sk_buff *skb,
2690 struct packet_type **pt_prev,
2691 int *ret, struct net_device *orig_dev)
2692{
8d50b53d 2693 if (skb->dev->rx_queue.qdisc == &noop_qdisc)
f697c3e8 2694 goto out;
1da177e4 2695
f697c3e8
HX
2696 if (*pt_prev) {
2697 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2698 *pt_prev = NULL;
1da177e4
LT
2699 }
2700
f697c3e8
HX
2701 switch (ing_filter(skb)) {
2702 case TC_ACT_SHOT:
2703 case TC_ACT_STOLEN:
2704 kfree_skb(skb);
2705 return NULL;
2706 }
2707
2708out:
2709 skb->tc_verd = 0;
2710 return skb;
1da177e4
LT
2711}
2712#endif
2713
bc1d0411
PM
2714/*
2715 * netif_nit_deliver - deliver received packets to network taps
2716 * @skb: buffer
2717 *
2718 * This function is used to deliver incoming packets to network
2719 * taps. It should be used when the normal netif_receive_skb path
2720 * is bypassed, for example because of VLAN acceleration.
2721 */
2722void netif_nit_deliver(struct sk_buff *skb)
2723{
2724 struct packet_type *ptype;
2725
2726 if (list_empty(&ptype_all))
2727 return;
2728
2729 skb_reset_network_header(skb);
2730 skb_reset_transport_header(skb);
2731 skb->mac_len = skb->network_header - skb->mac_header;
2732
2733 rcu_read_lock();
2734 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2735 if (!ptype->dev || ptype->dev == skb->dev)
2736 deliver_skb(skb, ptype, skb->dev);
2737 }
2738 rcu_read_unlock();
2739}
2740
ab95bfe0
JP
2741/**
2742 * netdev_rx_handler_register - register receive handler
2743 * @dev: device to register a handler for
2744 * @rx_handler: receive handler to register
93e2c32b 2745 * @rx_handler_data: data pointer that is used by rx handler
ab95bfe0
JP
2746 *
2747 * Register a receive hander for a device. This handler will then be
2748 * called from __netif_receive_skb. A negative errno code is returned
2749 * on a failure.
2750 *
2751 * The caller must hold the rtnl_mutex.
2752 */
2753int netdev_rx_handler_register(struct net_device *dev,
93e2c32b
JP
2754 rx_handler_func_t *rx_handler,
2755 void *rx_handler_data)
ab95bfe0
JP
2756{
2757 ASSERT_RTNL();
2758
2759 if (dev->rx_handler)
2760 return -EBUSY;
2761
93e2c32b 2762 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
ab95bfe0
JP
2763 rcu_assign_pointer(dev->rx_handler, rx_handler);
2764
2765 return 0;
2766}
2767EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
2768
2769/**
2770 * netdev_rx_handler_unregister - unregister receive handler
2771 * @dev: device to unregister a handler from
2772 *
2773 * Unregister a receive hander from a device.
2774 *
2775 * The caller must hold the rtnl_mutex.
2776 */
2777void netdev_rx_handler_unregister(struct net_device *dev)
2778{
2779
2780 ASSERT_RTNL();
2781 rcu_assign_pointer(dev->rx_handler, NULL);
93e2c32b 2782 rcu_assign_pointer(dev->rx_handler_data, NULL);
ab95bfe0
JP
2783}
2784EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
2785
acbbc071
ED
2786static inline void skb_bond_set_mac_by_master(struct sk_buff *skb,
2787 struct net_device *master)
2788{
2789 if (skb->pkt_type == PACKET_HOST) {
2790 u16 *dest = (u16 *) eth_hdr(skb)->h_dest;
2791
2792 memcpy(dest, master->dev_addr, ETH_ALEN);
2793 }
2794}
2795
2796/* On bonding slaves other than the currently active slave, suppress
2797 * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and
2798 * ARP on active-backup slaves with arp_validate enabled.
2799 */
2800int __skb_bond_should_drop(struct sk_buff *skb, struct net_device *master)
2801{
2802 struct net_device *dev = skb->dev;
2803
2804 if (master->priv_flags & IFF_MASTER_ARPMON)
2805 dev->last_rx = jiffies;
2806
f350a0a8
JP
2807 if ((master->priv_flags & IFF_MASTER_ALB) &&
2808 (master->priv_flags & IFF_BRIDGE_PORT)) {
acbbc071
ED
2809 /* Do address unmangle. The local destination address
2810 * will be always the one master has. Provides the right
2811 * functionality in a bridge.
2812 */
2813 skb_bond_set_mac_by_master(skb, master);
2814 }
2815
2816 if (dev->priv_flags & IFF_SLAVE_INACTIVE) {
2817 if ((dev->priv_flags & IFF_SLAVE_NEEDARP) &&
2818 skb->protocol == __cpu_to_be16(ETH_P_ARP))
2819 return 0;
2820
2821 if (master->priv_flags & IFF_MASTER_ALB) {
2822 if (skb->pkt_type != PACKET_BROADCAST &&
2823 skb->pkt_type != PACKET_MULTICAST)
2824 return 0;
2825 }
2826 if (master->priv_flags & IFF_MASTER_8023AD &&
2827 skb->protocol == __cpu_to_be16(ETH_P_SLOW))
2828 return 0;
2829
2830 return 1;
2831 }
2832 return 0;
2833}
2834EXPORT_SYMBOL(__skb_bond_should_drop);
2835
10f744d2 2836static int __netif_receive_skb(struct sk_buff *skb)
1da177e4
LT
2837{
2838 struct packet_type *ptype, *pt_prev;
ab95bfe0 2839 rx_handler_func_t *rx_handler;
f2ccd8fa 2840 struct net_device *orig_dev;
0641e4fb 2841 struct net_device *master;
0d7a3681 2842 struct net_device *null_or_orig;
2df4a0fa 2843 struct net_device *orig_or_bond;
1da177e4 2844 int ret = NET_RX_DROP;
252e3346 2845 __be16 type;
1da177e4 2846
3b098e2d
ED
2847 if (!netdev_tstamp_prequeue)
2848 net_timestamp_check(skb);
81bbb3d4 2849
05532121
CG
2850 if (vlan_tx_tag_present(skb))
2851 vlan_hwaccel_do_receive(skb);
9b22ea56 2852
1da177e4 2853 /* if we've gotten here through NAPI, check netpoll */
bea3348e 2854 if (netpoll_receive_skb(skb))
1da177e4
LT
2855 return NET_RX_DROP;
2856
8964be4a
ED
2857 if (!skb->skb_iif)
2858 skb->skb_iif = skb->dev->ifindex;
86e65da9 2859
597a264b
JF
2860 /*
2861 * bonding note: skbs received on inactive slaves should only
2862 * be delivered to pkt handlers that are exact matches. Also
2863 * the deliver_no_wcard flag will be set. If packet handlers
2864 * are sensitive to duplicate packets these skbs will need to
2865 * be dropped at the handler. The vlan accel path may have
2866 * already set the deliver_no_wcard flag.
2867 */
0d7a3681 2868 null_or_orig = NULL;
cc9bd5ce 2869 orig_dev = skb->dev;
0641e4fb 2870 master = ACCESS_ONCE(orig_dev->master);
597a264b
JF
2871 if (skb->deliver_no_wcard)
2872 null_or_orig = orig_dev;
2873 else if (master) {
2874 if (skb_bond_should_drop(skb, master)) {
2875 skb->deliver_no_wcard = 1;
0d7a3681 2876 null_or_orig = orig_dev; /* deliver only exact match */
597a264b 2877 } else
0641e4fb 2878 skb->dev = master;
cc9bd5ce 2879 }
8f903c70 2880
27f39c73 2881 __this_cpu_inc(softnet_data.processed);
c1d2bbe1 2882 skb_reset_network_header(skb);
badff6d0 2883 skb_reset_transport_header(skb);
b0e380b1 2884 skb->mac_len = skb->network_header - skb->mac_header;
1da177e4
LT
2885
2886 pt_prev = NULL;
2887
2888 rcu_read_lock();
2889
2890#ifdef CONFIG_NET_CLS_ACT
2891 if (skb->tc_verd & TC_NCLS) {
2892 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
2893 goto ncls;
2894 }
2895#endif
2896
2897 list_for_each_entry_rcu(ptype, &ptype_all, list) {
f982307f
JE
2898 if (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2899 ptype->dev == orig_dev) {
4ec93edb 2900 if (pt_prev)
f2ccd8fa 2901 ret = deliver_skb(skb, pt_prev, orig_dev);
1da177e4
LT
2902 pt_prev = ptype;
2903 }
2904 }
2905
2906#ifdef CONFIG_NET_CLS_ACT
f697c3e8
HX
2907 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
2908 if (!skb)
1da177e4 2909 goto out;
1da177e4
LT
2910ncls:
2911#endif
2912
ab95bfe0
JP
2913 /* Handle special case of bridge or macvlan */
2914 rx_handler = rcu_dereference(skb->dev->rx_handler);
2915 if (rx_handler) {
2916 if (pt_prev) {
2917 ret = deliver_skb(skb, pt_prev, orig_dev);
2918 pt_prev = NULL;
2919 }
2920 skb = rx_handler(skb);
2921 if (!skb)
2922 goto out;
2923 }
1da177e4 2924
1f3c8804
AG
2925 /*
2926 * Make sure frames received on VLAN interfaces stacked on
2927 * bonding interfaces still make their way to any base bonding
2928 * device that may have registered for a specific ptype. The
2929 * handler may have to adjust skb->dev and orig_dev.
1f3c8804 2930 */
2df4a0fa 2931 orig_or_bond = orig_dev;
1f3c8804
AG
2932 if ((skb->dev->priv_flags & IFF_802_1Q_VLAN) &&
2933 (vlan_dev_real_dev(skb->dev)->priv_flags & IFF_BONDING)) {
2df4a0fa 2934 orig_or_bond = vlan_dev_real_dev(skb->dev);
1f3c8804
AG
2935 }
2936
1da177e4 2937 type = skb->protocol;
82d8a867
PE
2938 list_for_each_entry_rcu(ptype,
2939 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
1f3c8804 2940 if (ptype->type == type && (ptype->dev == null_or_orig ||
ca8d9ea3 2941 ptype->dev == skb->dev || ptype->dev == orig_dev ||
2df4a0fa 2942 ptype->dev == orig_or_bond)) {
4ec93edb 2943 if (pt_prev)
f2ccd8fa 2944 ret = deliver_skb(skb, pt_prev, orig_dev);
1da177e4
LT
2945 pt_prev = ptype;
2946 }
2947 }
2948
2949 if (pt_prev) {
f2ccd8fa 2950 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1da177e4
LT
2951 } else {
2952 kfree_skb(skb);
2953 /* Jamal, now you will not able to escape explaining
2954 * me how you were going to use this. :-)
2955 */
2956 ret = NET_RX_DROP;
2957 }
2958
2959out:
2960 rcu_read_unlock();
2961 return ret;
2962}
0a9627f2
TH
2963
2964/**
2965 * netif_receive_skb - process receive buffer from network
2966 * @skb: buffer to process
2967 *
2968 * netif_receive_skb() is the main receive data processing function.
2969 * It always succeeds. The buffer may be dropped during processing
2970 * for congestion control or by the protocol layers.
2971 *
2972 * This function may only be called from softirq context and interrupts
2973 * should be enabled.
2974 *
2975 * Return values (usually ignored):
2976 * NET_RX_SUCCESS: no congestion
2977 * NET_RX_DROP: packet was dropped
2978 */
2979int netif_receive_skb(struct sk_buff *skb)
2980{
3b098e2d
ED
2981 if (netdev_tstamp_prequeue)
2982 net_timestamp_check(skb);
2983
c1f19b51
RC
2984 if (skb_defer_rx_timestamp(skb))
2985 return NET_RX_SUCCESS;
2986
df334545 2987#ifdef CONFIG_RPS
3b098e2d
ED
2988 {
2989 struct rps_dev_flow voidflow, *rflow = &voidflow;
2990 int cpu, ret;
fec5e652 2991
3b098e2d
ED
2992 rcu_read_lock();
2993
2994 cpu = get_rps_cpu(skb->dev, skb, &rflow);
0a9627f2 2995
3b098e2d
ED
2996 if (cpu >= 0) {
2997 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
2998 rcu_read_unlock();
2999 } else {
3000 rcu_read_unlock();
3001 ret = __netif_receive_skb(skb);
3002 }
0a9627f2 3003
3b098e2d 3004 return ret;
fec5e652 3005 }
1e94d72f
TH
3006#else
3007 return __netif_receive_skb(skb);
3008#endif
0a9627f2 3009}
d1b19dff 3010EXPORT_SYMBOL(netif_receive_skb);
1da177e4 3011
88751275
ED
3012/* Network device is going away, flush any packets still pending
3013 * Called with irqs disabled.
3014 */
152102c7 3015static void flush_backlog(void *arg)
6e583ce5 3016{
152102c7 3017 struct net_device *dev = arg;
e36fa2f7 3018 struct softnet_data *sd = &__get_cpu_var(softnet_data);
6e583ce5
SH
3019 struct sk_buff *skb, *tmp;
3020
e36fa2f7 3021 rps_lock(sd);
6e7676c1 3022 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
6e583ce5 3023 if (skb->dev == dev) {
e36fa2f7 3024 __skb_unlink(skb, &sd->input_pkt_queue);
6e583ce5 3025 kfree_skb(skb);
76cc8b13 3026 input_queue_head_incr(sd);
6e583ce5 3027 }
6e7676c1 3028 }
e36fa2f7 3029 rps_unlock(sd);
6e7676c1
CG
3030
3031 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
3032 if (skb->dev == dev) {
3033 __skb_unlink(skb, &sd->process_queue);
3034 kfree_skb(skb);
76cc8b13 3035 input_queue_head_incr(sd);
6e7676c1
CG
3036 }
3037 }
6e583ce5
SH
3038}
3039
d565b0a1
HX
3040static int napi_gro_complete(struct sk_buff *skb)
3041{
3042 struct packet_type *ptype;
3043 __be16 type = skb->protocol;
3044 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
3045 int err = -ENOENT;
3046
fc59f9a3
HX
3047 if (NAPI_GRO_CB(skb)->count == 1) {
3048 skb_shinfo(skb)->gso_size = 0;
d565b0a1 3049 goto out;
fc59f9a3 3050 }
d565b0a1
HX
3051
3052 rcu_read_lock();
3053 list_for_each_entry_rcu(ptype, head, list) {
3054 if (ptype->type != type || ptype->dev || !ptype->gro_complete)
3055 continue;
3056
3057 err = ptype->gro_complete(skb);
3058 break;
3059 }
3060 rcu_read_unlock();
3061
3062 if (err) {
3063 WARN_ON(&ptype->list == head);
3064 kfree_skb(skb);
3065 return NET_RX_SUCCESS;
3066 }
3067
3068out:
d565b0a1
HX
3069 return netif_receive_skb(skb);
3070}
3071
86cac58b 3072inline void napi_gro_flush(struct napi_struct *napi)
d565b0a1
HX
3073{
3074 struct sk_buff *skb, *next;
3075
3076 for (skb = napi->gro_list; skb; skb = next) {
3077 next = skb->next;
3078 skb->next = NULL;
3079 napi_gro_complete(skb);
3080 }
3081
4ae5544f 3082 napi->gro_count = 0;
d565b0a1
HX
3083 napi->gro_list = NULL;
3084}
86cac58b 3085EXPORT_SYMBOL(napi_gro_flush);
d565b0a1 3086
5b252f0c 3087enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
d565b0a1
HX
3088{
3089 struct sk_buff **pp = NULL;
3090 struct packet_type *ptype;
3091 __be16 type = skb->protocol;
3092 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
0da2afd5 3093 int same_flow;
d565b0a1 3094 int mac_len;
5b252f0c 3095 enum gro_result ret;
d565b0a1 3096
ce9e76c8 3097 if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb))
d565b0a1
HX
3098 goto normal;
3099
21dc3301 3100 if (skb_is_gso(skb) || skb_has_frag_list(skb))
f17f5c91
HX
3101 goto normal;
3102
d565b0a1
HX
3103 rcu_read_lock();
3104 list_for_each_entry_rcu(ptype, head, list) {
d565b0a1
HX
3105 if (ptype->type != type || ptype->dev || !ptype->gro_receive)
3106 continue;
3107
86911732 3108 skb_set_network_header(skb, skb_gro_offset(skb));
d565b0a1
HX
3109 mac_len = skb->network_header - skb->mac_header;
3110 skb->mac_len = mac_len;
3111 NAPI_GRO_CB(skb)->same_flow = 0;
3112 NAPI_GRO_CB(skb)->flush = 0;
5d38a079 3113 NAPI_GRO_CB(skb)->free = 0;
d565b0a1 3114
d565b0a1
HX
3115 pp = ptype->gro_receive(&napi->gro_list, skb);
3116 break;
3117 }
3118 rcu_read_unlock();
3119
3120 if (&ptype->list == head)
3121 goto normal;
3122
0da2afd5 3123 same_flow = NAPI_GRO_CB(skb)->same_flow;
5d0d9be8 3124 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
0da2afd5 3125
d565b0a1
HX
3126 if (pp) {
3127 struct sk_buff *nskb = *pp;
3128
3129 *pp = nskb->next;
3130 nskb->next = NULL;
3131 napi_gro_complete(nskb);
4ae5544f 3132 napi->gro_count--;
d565b0a1
HX
3133 }
3134
0da2afd5 3135 if (same_flow)
d565b0a1
HX
3136 goto ok;
3137
4ae5544f 3138 if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
d565b0a1 3139 goto normal;
d565b0a1 3140
4ae5544f 3141 napi->gro_count++;
d565b0a1 3142 NAPI_GRO_CB(skb)->count = 1;
86911732 3143 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
d565b0a1
HX
3144 skb->next = napi->gro_list;
3145 napi->gro_list = skb;
5d0d9be8 3146 ret = GRO_HELD;
d565b0a1 3147
ad0f9904 3148pull:
cb18978c
HX
3149 if (skb_headlen(skb) < skb_gro_offset(skb)) {
3150 int grow = skb_gro_offset(skb) - skb_headlen(skb);
3151
3152 BUG_ON(skb->end - skb->tail < grow);
3153
3154 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
3155
3156 skb->tail += grow;
3157 skb->data_len -= grow;
3158
3159 skb_shinfo(skb)->frags[0].page_offset += grow;
3160 skb_shinfo(skb)->frags[0].size -= grow;
3161
3162 if (unlikely(!skb_shinfo(skb)->frags[0].size)) {
3163 put_page(skb_shinfo(skb)->frags[0].page);
3164 memmove(skb_shinfo(skb)->frags,
3165 skb_shinfo(skb)->frags + 1,
e5093aec 3166 --skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
cb18978c 3167 }
ad0f9904
HX
3168 }
3169
d565b0a1 3170ok:
5d0d9be8 3171 return ret;
d565b0a1
HX
3172
3173normal:
ad0f9904
HX
3174 ret = GRO_NORMAL;
3175 goto pull;
5d38a079 3176}
96e93eab
HX
3177EXPORT_SYMBOL(dev_gro_receive);
3178
40d0802b 3179static inline gro_result_t
5b252f0c 3180__napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
96e93eab
HX
3181{
3182 struct sk_buff *p;
3183
3184 for (p = napi->gro_list; p; p = p->next) {
40d0802b
ED
3185 unsigned long diffs;
3186
3187 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
3188 diffs |= compare_ether_header(skb_mac_header(p),
f64f9e71 3189 skb_gro_mac_header(skb));
40d0802b 3190 NAPI_GRO_CB(p)->same_flow = !diffs;
96e93eab
HX
3191 NAPI_GRO_CB(p)->flush = 0;
3192 }
3193
3194 return dev_gro_receive(napi, skb);
3195}
5d38a079 3196
c7c4b3b6 3197gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
5d38a079 3198{
5d0d9be8
HX
3199 switch (ret) {
3200 case GRO_NORMAL:
c7c4b3b6
BH
3201 if (netif_receive_skb(skb))
3202 ret = GRO_DROP;
3203 break;
5d38a079 3204
5d0d9be8 3205 case GRO_DROP:
5d0d9be8 3206 case GRO_MERGED_FREE:
5d38a079
HX
3207 kfree_skb(skb);
3208 break;
5b252f0c
BH
3209
3210 case GRO_HELD:
3211 case GRO_MERGED:
3212 break;
5d38a079
HX
3213 }
3214
c7c4b3b6 3215 return ret;
5d0d9be8
HX
3216}
3217EXPORT_SYMBOL(napi_skb_finish);
3218
78a478d0
HX
3219void skb_gro_reset_offset(struct sk_buff *skb)
3220{
3221 NAPI_GRO_CB(skb)->data_offset = 0;
3222 NAPI_GRO_CB(skb)->frag0 = NULL;
7489594c 3223 NAPI_GRO_CB(skb)->frag0_len = 0;
78a478d0 3224
78d3fd0b 3225 if (skb->mac_header == skb->tail &&
7489594c 3226 !PageHighMem(skb_shinfo(skb)->frags[0].page)) {
78a478d0
HX
3227 NAPI_GRO_CB(skb)->frag0 =
3228 page_address(skb_shinfo(skb)->frags[0].page) +
3229 skb_shinfo(skb)->frags[0].page_offset;
7489594c
HX
3230 NAPI_GRO_CB(skb)->frag0_len = skb_shinfo(skb)->frags[0].size;
3231 }
78a478d0
HX
3232}
3233EXPORT_SYMBOL(skb_gro_reset_offset);
3234
c7c4b3b6 3235gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
5d0d9be8 3236{
86911732
HX
3237 skb_gro_reset_offset(skb);
3238
5d0d9be8 3239 return napi_skb_finish(__napi_gro_receive(napi, skb), skb);
d565b0a1
HX
3240}
3241EXPORT_SYMBOL(napi_gro_receive);
3242
96e93eab
HX
3243void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
3244{
96e93eab
HX
3245 __skb_pull(skb, skb_headlen(skb));
3246 skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
3247
3248 napi->skb = skb;
3249}
3250EXPORT_SYMBOL(napi_reuse_skb);
3251
76620aaf 3252struct sk_buff *napi_get_frags(struct napi_struct *napi)
5d38a079 3253{
5d38a079 3254 struct sk_buff *skb = napi->skb;
5d38a079
HX
3255
3256 if (!skb) {
89d71a66
ED
3257 skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
3258 if (skb)
3259 napi->skb = skb;
80595d59 3260 }
96e93eab
HX
3261 return skb;
3262}
76620aaf 3263EXPORT_SYMBOL(napi_get_frags);
96e93eab 3264
c7c4b3b6
BH
3265gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
3266 gro_result_t ret)
96e93eab 3267{
5d0d9be8
HX
3268 switch (ret) {
3269 case GRO_NORMAL:
86911732 3270 case GRO_HELD:
e76b69cc 3271 skb->protocol = eth_type_trans(skb, skb->dev);
86911732 3272
c7c4b3b6
BH
3273 if (ret == GRO_HELD)
3274 skb_gro_pull(skb, -ETH_HLEN);
3275 else if (netif_receive_skb(skb))
3276 ret = GRO_DROP;
86911732 3277 break;
5d38a079 3278
5d0d9be8 3279 case GRO_DROP:
5d0d9be8
HX
3280 case GRO_MERGED_FREE:
3281 napi_reuse_skb(napi, skb);
3282 break;
5b252f0c
BH
3283
3284 case GRO_MERGED:
3285 break;
5d0d9be8 3286 }
5d38a079 3287
c7c4b3b6 3288 return ret;
5d38a079 3289}
5d0d9be8
HX
3290EXPORT_SYMBOL(napi_frags_finish);
3291
76620aaf
HX
3292struct sk_buff *napi_frags_skb(struct napi_struct *napi)
3293{
3294 struct sk_buff *skb = napi->skb;
3295 struct ethhdr *eth;
a5b1cf28
HX
3296 unsigned int hlen;
3297 unsigned int off;
76620aaf
HX
3298
3299 napi->skb = NULL;
3300
3301 skb_reset_mac_header(skb);
3302 skb_gro_reset_offset(skb);
3303
a5b1cf28
HX
3304 off = skb_gro_offset(skb);
3305 hlen = off + sizeof(*eth);
3306 eth = skb_gro_header_fast(skb, off);
3307 if (skb_gro_header_hard(skb, hlen)) {
3308 eth = skb_gro_header_slow(skb, hlen, off);
3309 if (unlikely(!eth)) {
3310 napi_reuse_skb(napi, skb);
3311 skb = NULL;
3312 goto out;
3313 }
76620aaf
HX
3314 }
3315
3316 skb_gro_pull(skb, sizeof(*eth));
3317
3318 /*
3319 * This works because the only protocols we care about don't require
3320 * special handling. We'll fix it up properly at the end.
3321 */
3322 skb->protocol = eth->h_proto;
3323
3324out:
3325 return skb;
3326}
3327EXPORT_SYMBOL(napi_frags_skb);
3328
c7c4b3b6 3329gro_result_t napi_gro_frags(struct napi_struct *napi)
5d0d9be8 3330{
76620aaf 3331 struct sk_buff *skb = napi_frags_skb(napi);
5d0d9be8
HX
3332
3333 if (!skb)
c7c4b3b6 3334 return GRO_DROP;
5d0d9be8
HX
3335
3336 return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb));
3337}
5d38a079
HX
3338EXPORT_SYMBOL(napi_gro_frags);
3339
e326bed2
ED
3340/*
3341 * net_rps_action sends any pending IPI's for rps.
3342 * Note: called with local irq disabled, but exits with local irq enabled.
3343 */
3344static void net_rps_action_and_irq_enable(struct softnet_data *sd)
3345{
3346#ifdef CONFIG_RPS
3347 struct softnet_data *remsd = sd->rps_ipi_list;
3348
3349 if (remsd) {
3350 sd->rps_ipi_list = NULL;
3351
3352 local_irq_enable();
3353
3354 /* Send pending IPI's to kick RPS processing on remote cpus. */
3355 while (remsd) {
3356 struct softnet_data *next = remsd->rps_ipi_next;
3357
3358 if (cpu_online(remsd->cpu))
3359 __smp_call_function_single(remsd->cpu,
3360 &remsd->csd, 0);
3361 remsd = next;
3362 }
3363 } else
3364#endif
3365 local_irq_enable();
3366}
3367
bea3348e 3368static int process_backlog(struct napi_struct *napi, int quota)
1da177e4
LT
3369{
3370 int work = 0;
eecfd7c4 3371 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
1da177e4 3372
e326bed2
ED
3373#ifdef CONFIG_RPS
3374 /* Check if we have pending ipi, its better to send them now,
3375 * not waiting net_rx_action() end.
3376 */
3377 if (sd->rps_ipi_list) {
3378 local_irq_disable();
3379 net_rps_action_and_irq_enable(sd);
3380 }
3381#endif
bea3348e 3382 napi->weight = weight_p;
6e7676c1
CG
3383 local_irq_disable();
3384 while (work < quota) {
1da177e4 3385 struct sk_buff *skb;
6e7676c1
CG
3386 unsigned int qlen;
3387
3388 while ((skb = __skb_dequeue(&sd->process_queue))) {
3389 local_irq_enable();
3390 __netif_receive_skb(skb);
6e7676c1 3391 local_irq_disable();
76cc8b13
TH
3392 input_queue_head_incr(sd);
3393 if (++work >= quota) {
3394 local_irq_enable();
3395 return work;
3396 }
6e7676c1 3397 }
1da177e4 3398
e36fa2f7 3399 rps_lock(sd);
6e7676c1 3400 qlen = skb_queue_len(&sd->input_pkt_queue);
76cc8b13 3401 if (qlen)
6e7676c1
CG
3402 skb_queue_splice_tail_init(&sd->input_pkt_queue,
3403 &sd->process_queue);
76cc8b13 3404
6e7676c1 3405 if (qlen < quota - work) {
eecfd7c4
ED
3406 /*
3407 * Inline a custom version of __napi_complete().
3408 * only current cpu owns and manipulates this napi,
3409 * and NAPI_STATE_SCHED is the only possible flag set on backlog.
3410 * we can use a plain write instead of clear_bit(),
3411 * and we dont need an smp_mb() memory barrier.
3412 */
3413 list_del(&napi->poll_list);
3414 napi->state = 0;
3415
6e7676c1 3416 quota = work + qlen;
bea3348e 3417 }
e36fa2f7 3418 rps_unlock(sd);
6e7676c1
CG
3419 }
3420 local_irq_enable();
1da177e4 3421
bea3348e
SH
3422 return work;
3423}
1da177e4 3424
bea3348e
SH
3425/**
3426 * __napi_schedule - schedule for receive
c4ea43c5 3427 * @n: entry to schedule
bea3348e
SH
3428 *
3429 * The entry's receive function will be scheduled to run
3430 */
b5606c2d 3431void __napi_schedule(struct napi_struct *n)
bea3348e
SH
3432{
3433 unsigned long flags;
1da177e4 3434
bea3348e 3435 local_irq_save(flags);
eecfd7c4 3436 ____napi_schedule(&__get_cpu_var(softnet_data), n);
bea3348e 3437 local_irq_restore(flags);
1da177e4 3438}
bea3348e
SH
3439EXPORT_SYMBOL(__napi_schedule);
3440
d565b0a1
HX
3441void __napi_complete(struct napi_struct *n)
3442{
3443 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
3444 BUG_ON(n->gro_list);
3445
3446 list_del(&n->poll_list);
3447 smp_mb__before_clear_bit();
3448 clear_bit(NAPI_STATE_SCHED, &n->state);
3449}
3450EXPORT_SYMBOL(__napi_complete);
3451
3452void napi_complete(struct napi_struct *n)
3453{
3454 unsigned long flags;
3455
3456 /*
3457 * don't let napi dequeue from the cpu poll list
3458 * just in case its running on a different cpu
3459 */
3460 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
3461 return;
3462
3463 napi_gro_flush(n);
3464 local_irq_save(flags);
3465 __napi_complete(n);
3466 local_irq_restore(flags);
3467}
3468EXPORT_SYMBOL(napi_complete);
3469
3470void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
3471 int (*poll)(struct napi_struct *, int), int weight)
3472{
3473 INIT_LIST_HEAD(&napi->poll_list);
4ae5544f 3474 napi->gro_count = 0;
d565b0a1 3475 napi->gro_list = NULL;
5d38a079 3476 napi->skb = NULL;
d565b0a1
HX
3477 napi->poll = poll;
3478 napi->weight = weight;
3479 list_add(&napi->dev_list, &dev->napi_list);
d565b0a1 3480 napi->dev = dev;
5d38a079 3481#ifdef CONFIG_NETPOLL
d565b0a1
HX
3482 spin_lock_init(&napi->poll_lock);
3483 napi->poll_owner = -1;
3484#endif
3485 set_bit(NAPI_STATE_SCHED, &napi->state);
3486}
3487EXPORT_SYMBOL(netif_napi_add);
3488
3489void netif_napi_del(struct napi_struct *napi)
3490{
3491 struct sk_buff *skb, *next;
3492
d7b06636 3493 list_del_init(&napi->dev_list);
76620aaf 3494 napi_free_frags(napi);
d565b0a1
HX
3495
3496 for (skb = napi->gro_list; skb; skb = next) {
3497 next = skb->next;
3498 skb->next = NULL;
3499 kfree_skb(skb);
3500 }
3501
3502 napi->gro_list = NULL;
4ae5544f 3503 napi->gro_count = 0;
d565b0a1
HX
3504}
3505EXPORT_SYMBOL(netif_napi_del);
3506
1da177e4
LT
3507static void net_rx_action(struct softirq_action *h)
3508{
e326bed2 3509 struct softnet_data *sd = &__get_cpu_var(softnet_data);
24f8b238 3510 unsigned long time_limit = jiffies + 2;
51b0bded 3511 int budget = netdev_budget;
53fb95d3
MM
3512 void *have;
3513
1da177e4
LT
3514 local_irq_disable();
3515
e326bed2 3516 while (!list_empty(&sd->poll_list)) {
bea3348e
SH
3517 struct napi_struct *n;
3518 int work, weight;
1da177e4 3519
bea3348e 3520 /* If softirq window is exhuasted then punt.
24f8b238
SH
3521 * Allow this to run for 2 jiffies since which will allow
3522 * an average latency of 1.5/HZ.
bea3348e 3523 */
24f8b238 3524 if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
1da177e4
LT
3525 goto softnet_break;
3526
3527 local_irq_enable();
3528
bea3348e
SH
3529 /* Even though interrupts have been re-enabled, this
3530 * access is safe because interrupts can only add new
3531 * entries to the tail of this list, and only ->poll()
3532 * calls can remove this head entry from the list.
3533 */
e326bed2 3534 n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list);
1da177e4 3535
bea3348e
SH
3536 have = netpoll_poll_lock(n);
3537
3538 weight = n->weight;
3539
0a7606c1
DM
3540 /* This NAPI_STATE_SCHED test is for avoiding a race
3541 * with netpoll's poll_napi(). Only the entity which
3542 * obtains the lock and sees NAPI_STATE_SCHED set will
3543 * actually make the ->poll() call. Therefore we avoid
3544 * accidently calling ->poll() when NAPI is not scheduled.
3545 */
3546 work = 0;
4ea7e386 3547 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
0a7606c1 3548 work = n->poll(n, weight);
4ea7e386
NH
3549 trace_napi_poll(n);
3550 }
bea3348e
SH
3551
3552 WARN_ON_ONCE(work > weight);
3553
3554 budget -= work;
3555
3556 local_irq_disable();
3557
3558 /* Drivers must not modify the NAPI state if they
3559 * consume the entire weight. In such cases this code
3560 * still "owns" the NAPI instance and therefore can
3561 * move the instance around on the list at-will.
3562 */
fed17f30 3563 if (unlikely(work == weight)) {
ff780cd8
HX
3564 if (unlikely(napi_disable_pending(n))) {
3565 local_irq_enable();
3566 napi_complete(n);
3567 local_irq_disable();
3568 } else
e326bed2 3569 list_move_tail(&n->poll_list, &sd->poll_list);
fed17f30 3570 }
bea3348e
SH
3571
3572 netpoll_poll_unlock(have);
1da177e4
LT
3573 }
3574out:
e326bed2 3575 net_rps_action_and_irq_enable(sd);
0a9627f2 3576
db217334
CL
3577#ifdef CONFIG_NET_DMA
3578 /*
3579 * There may not be any more sk_buffs coming right now, so push
3580 * any pending DMA copies to hardware
3581 */
2ba05622 3582 dma_issue_pending_all();
db217334 3583#endif
bea3348e 3584
1da177e4
LT
3585 return;
3586
3587softnet_break:
dee42870 3588 sd->time_squeeze++;
1da177e4
LT
3589 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3590 goto out;
3591}
3592
d1b19dff 3593static gifconf_func_t *gifconf_list[NPROTO];
1da177e4
LT
3594
3595/**
3596 * register_gifconf - register a SIOCGIF handler
3597 * @family: Address family
3598 * @gifconf: Function handler
3599 *
3600 * Register protocol dependent address dumping routines. The handler
3601 * that is passed must not be freed or reused until it has been replaced
3602 * by another handler.
3603 */
d1b19dff 3604int register_gifconf(unsigned int family, gifconf_func_t *gifconf)
1da177e4
LT
3605{
3606 if (family >= NPROTO)
3607 return -EINVAL;
3608 gifconf_list[family] = gifconf;
3609 return 0;
3610}
d1b19dff 3611EXPORT_SYMBOL(register_gifconf);
1da177e4
LT
3612
3613
3614/*
3615 * Map an interface index to its name (SIOCGIFNAME)
3616 */
3617
3618/*
3619 * We need this ioctl for efficient implementation of the
3620 * if_indextoname() function required by the IPv6 API. Without
3621 * it, we would have to search all the interfaces to find a
3622 * match. --pb
3623 */
3624
881d966b 3625static int dev_ifname(struct net *net, struct ifreq __user *arg)
1da177e4
LT
3626{
3627 struct net_device *dev;
3628 struct ifreq ifr;
3629
3630 /*
3631 * Fetch the caller's info block.
3632 */
3633
3634 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
3635 return -EFAULT;
3636
fb699dfd
ED
3637 rcu_read_lock();
3638 dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex);
1da177e4 3639 if (!dev) {
fb699dfd 3640 rcu_read_unlock();
1da177e4
LT
3641 return -ENODEV;
3642 }
3643
3644 strcpy(ifr.ifr_name, dev->name);
fb699dfd 3645 rcu_read_unlock();
1da177e4
LT
3646
3647 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
3648 return -EFAULT;
3649 return 0;
3650}
3651
3652/*
3653 * Perform a SIOCGIFCONF call. This structure will change
3654 * size eventually, and there is nothing I can do about it.
3655 * Thus we will need a 'compatibility mode'.
3656 */
3657
881d966b 3658static int dev_ifconf(struct net *net, char __user *arg)
1da177e4
LT
3659{
3660 struct ifconf ifc;
3661 struct net_device *dev;
3662 char __user *pos;
3663 int len;
3664 int total;
3665 int i;
3666
3667 /*
3668 * Fetch the caller's info block.
3669 */
3670
3671 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
3672 return -EFAULT;
3673
3674 pos = ifc.ifc_buf;
3675 len = ifc.ifc_len;
3676
3677 /*
3678 * Loop over the interfaces, and write an info block for each.
3679 */
3680
3681 total = 0;
881d966b 3682 for_each_netdev(net, dev) {
1da177e4
LT
3683 for (i = 0; i < NPROTO; i++) {
3684 if (gifconf_list[i]) {
3685 int done;
3686 if (!pos)
3687 done = gifconf_list[i](dev, NULL, 0);
3688 else
3689 done = gifconf_list[i](dev, pos + total,
3690 len - total);
3691 if (done < 0)
3692 return -EFAULT;
3693 total += done;
3694 }
3695 }
4ec93edb 3696 }
1da177e4
LT
3697
3698 /*
3699 * All done. Write the updated control block back to the caller.
3700 */
3701 ifc.ifc_len = total;
3702
3703 /*
3704 * Both BSD and Solaris return 0 here, so we do too.
3705 */
3706 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
3707}
3708
3709#ifdef CONFIG_PROC_FS
3710/*
3711 * This is invoked by the /proc filesystem handler to display a device
3712 * in detail.
3713 */
7562f876 3714void *dev_seq_start(struct seq_file *seq, loff_t *pos)
c6d14c84 3715 __acquires(RCU)
1da177e4 3716{
e372c414 3717 struct net *net = seq_file_net(seq);
7562f876 3718 loff_t off;
1da177e4 3719 struct net_device *dev;
1da177e4 3720
c6d14c84 3721 rcu_read_lock();
7562f876
PE
3722 if (!*pos)
3723 return SEQ_START_TOKEN;
1da177e4 3724
7562f876 3725 off = 1;
c6d14c84 3726 for_each_netdev_rcu(net, dev)
7562f876
PE
3727 if (off++ == *pos)
3728 return dev;
1da177e4 3729
7562f876 3730 return NULL;
1da177e4
LT
3731}
3732
3733void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3734{
c6d14c84
ED
3735 struct net_device *dev = (v == SEQ_START_TOKEN) ?
3736 first_net_device(seq_file_net(seq)) :
3737 next_net_device((struct net_device *)v);
3738
1da177e4 3739 ++*pos;
c6d14c84 3740 return rcu_dereference(dev);
1da177e4
LT
3741}
3742
3743void dev_seq_stop(struct seq_file *seq, void *v)
c6d14c84 3744 __releases(RCU)
1da177e4 3745{
c6d14c84 3746 rcu_read_unlock();
1da177e4
LT
3747}
3748
3749static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
3750{
28172739
ED
3751 struct rtnl_link_stats64 temp;
3752 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
1da177e4 3753
be1f3c2c
BH
3754 seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
3755 "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
5a1b5898
RR
3756 dev->name, stats->rx_bytes, stats->rx_packets,
3757 stats->rx_errors,
3758 stats->rx_dropped + stats->rx_missed_errors,
3759 stats->rx_fifo_errors,
3760 stats->rx_length_errors + stats->rx_over_errors +
3761 stats->rx_crc_errors + stats->rx_frame_errors,
3762 stats->rx_compressed, stats->multicast,
3763 stats->tx_bytes, stats->tx_packets,
3764 stats->tx_errors, stats->tx_dropped,
3765 stats->tx_fifo_errors, stats->collisions,
3766 stats->tx_carrier_errors +
3767 stats->tx_aborted_errors +
3768 stats->tx_window_errors +
3769 stats->tx_heartbeat_errors,
3770 stats->tx_compressed);
1da177e4
LT
3771}
3772
3773/*
3774 * Called from the PROCfs module. This now uses the new arbitrary sized
3775 * /proc/net interface to create /proc/net/dev
3776 */
3777static int dev_seq_show(struct seq_file *seq, void *v)
3778{
3779 if (v == SEQ_START_TOKEN)
3780 seq_puts(seq, "Inter-| Receive "
3781 " | Transmit\n"
3782 " face |bytes packets errs drop fifo frame "
3783 "compressed multicast|bytes packets errs "
3784 "drop fifo colls carrier compressed\n");
3785 else
3786 dev_seq_printf_stats(seq, v);
3787 return 0;
3788}
3789
dee42870 3790static struct softnet_data *softnet_get_online(loff_t *pos)
1da177e4 3791{
dee42870 3792 struct softnet_data *sd = NULL;
1da177e4 3793
0c0b0aca 3794 while (*pos < nr_cpu_ids)
4ec93edb 3795 if (cpu_online(*pos)) {
dee42870 3796 sd = &per_cpu(softnet_data, *pos);
1da177e4
LT
3797 break;
3798 } else
3799 ++*pos;
dee42870 3800 return sd;
1da177e4
LT
3801}
3802
3803static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
3804{
3805 return softnet_get_online(pos);
3806}
3807
3808static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3809{
3810 ++*pos;
3811 return softnet_get_online(pos);
3812}
3813
3814static void softnet_seq_stop(struct seq_file *seq, void *v)
3815{
3816}
3817
3818static int softnet_seq_show(struct seq_file *seq, void *v)
3819{
dee42870 3820 struct softnet_data *sd = v;
1da177e4 3821
0a9627f2 3822 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
dee42870 3823 sd->processed, sd->dropped, sd->time_squeeze, 0,
c1ebcdb8 3824 0, 0, 0, 0, /* was fastroute */
dee42870 3825 sd->cpu_collision, sd->received_rps);
1da177e4
LT
3826 return 0;
3827}
3828
f690808e 3829static const struct seq_operations dev_seq_ops = {
1da177e4
LT
3830 .start = dev_seq_start,
3831 .next = dev_seq_next,
3832 .stop = dev_seq_stop,
3833 .show = dev_seq_show,
3834};
3835
3836static int dev_seq_open(struct inode *inode, struct file *file)
3837{
e372c414
DL
3838 return seq_open_net(inode, file, &dev_seq_ops,
3839 sizeof(struct seq_net_private));
1da177e4
LT
3840}
3841
9a32144e 3842static const struct file_operations dev_seq_fops = {
1da177e4
LT
3843 .owner = THIS_MODULE,
3844 .open = dev_seq_open,
3845 .read = seq_read,
3846 .llseek = seq_lseek,
e372c414 3847 .release = seq_release_net,
1da177e4
LT
3848};
3849
f690808e 3850static const struct seq_operations softnet_seq_ops = {
1da177e4
LT
3851 .start = softnet_seq_start,
3852 .next = softnet_seq_next,
3853 .stop = softnet_seq_stop,
3854 .show = softnet_seq_show,
3855};
3856
3857static int softnet_seq_open(struct inode *inode, struct file *file)
3858{
3859 return seq_open(file, &softnet_seq_ops);
3860}
3861
9a32144e 3862static const struct file_operations softnet_seq_fops = {
1da177e4
LT
3863 .owner = THIS_MODULE,
3864 .open = softnet_seq_open,
3865 .read = seq_read,
3866 .llseek = seq_lseek,
3867 .release = seq_release,
3868};
3869
0e1256ff
SH
3870static void *ptype_get_idx(loff_t pos)
3871{
3872 struct packet_type *pt = NULL;
3873 loff_t i = 0;
3874 int t;
3875
3876 list_for_each_entry_rcu(pt, &ptype_all, list) {
3877 if (i == pos)
3878 return pt;
3879 ++i;
3880 }
3881
82d8a867 3882 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
0e1256ff
SH
3883 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
3884 if (i == pos)
3885 return pt;
3886 ++i;
3887 }
3888 }
3889 return NULL;
3890}
3891
3892static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
72348a42 3893 __acquires(RCU)
0e1256ff
SH
3894{
3895 rcu_read_lock();
3896 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
3897}
3898
3899static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3900{
3901 struct packet_type *pt;
3902 struct list_head *nxt;
3903 int hash;
3904
3905 ++*pos;
3906 if (v == SEQ_START_TOKEN)
3907 return ptype_get_idx(0);
3908
3909 pt = v;
3910 nxt = pt->list.next;
3911 if (pt->type == htons(ETH_P_ALL)) {
3912 if (nxt != &ptype_all)
3913 goto found;
3914 hash = 0;
3915 nxt = ptype_base[0].next;
3916 } else
82d8a867 3917 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
0e1256ff
SH
3918
3919 while (nxt == &ptype_base[hash]) {
82d8a867 3920 if (++hash >= PTYPE_HASH_SIZE)
0e1256ff
SH
3921 return NULL;
3922 nxt = ptype_base[hash].next;
3923 }
3924found:
3925 return list_entry(nxt, struct packet_type, list);
3926}
3927
3928static void ptype_seq_stop(struct seq_file *seq, void *v)
72348a42 3929 __releases(RCU)
0e1256ff
SH
3930{
3931 rcu_read_unlock();
3932}
3933
0e1256ff
SH
3934static int ptype_seq_show(struct seq_file *seq, void *v)
3935{
3936 struct packet_type *pt = v;
3937
3938 if (v == SEQ_START_TOKEN)
3939 seq_puts(seq, "Type Device Function\n");
c346dca1 3940 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
0e1256ff
SH
3941 if (pt->type == htons(ETH_P_ALL))
3942 seq_puts(seq, "ALL ");
3943 else
3944 seq_printf(seq, "%04x", ntohs(pt->type));
3945
908cd2da
AD
3946 seq_printf(seq, " %-8s %pF\n",
3947 pt->dev ? pt->dev->name : "", pt->func);
0e1256ff
SH
3948 }
3949
3950 return 0;
3951}
3952
3953static const struct seq_operations ptype_seq_ops = {
3954 .start = ptype_seq_start,
3955 .next = ptype_seq_next,
3956 .stop = ptype_seq_stop,
3957 .show = ptype_seq_show,
3958};
3959
3960static int ptype_seq_open(struct inode *inode, struct file *file)
3961{
2feb27db
PE
3962 return seq_open_net(inode, file, &ptype_seq_ops,
3963 sizeof(struct seq_net_private));
0e1256ff
SH
3964}
3965
3966static const struct file_operations ptype_seq_fops = {
3967 .owner = THIS_MODULE,
3968 .open = ptype_seq_open,
3969 .read = seq_read,
3970 .llseek = seq_lseek,
2feb27db 3971 .release = seq_release_net,
0e1256ff
SH
3972};
3973
3974
4665079c 3975static int __net_init dev_proc_net_init(struct net *net)
1da177e4
LT
3976{
3977 int rc = -ENOMEM;
3978
881d966b 3979 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
1da177e4 3980 goto out;
881d966b 3981 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
1da177e4 3982 goto out_dev;
881d966b 3983 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
457c4cbc 3984 goto out_softnet;
0e1256ff 3985
881d966b 3986 if (wext_proc_init(net))
457c4cbc 3987 goto out_ptype;
1da177e4
LT
3988 rc = 0;
3989out:
3990 return rc;
457c4cbc 3991out_ptype:
881d966b 3992 proc_net_remove(net, "ptype");
1da177e4 3993out_softnet:
881d966b 3994 proc_net_remove(net, "softnet_stat");
1da177e4 3995out_dev:
881d966b 3996 proc_net_remove(net, "dev");
1da177e4
LT
3997 goto out;
3998}
881d966b 3999
4665079c 4000static void __net_exit dev_proc_net_exit(struct net *net)
881d966b
EB
4001{
4002 wext_proc_exit(net);
4003
4004 proc_net_remove(net, "ptype");
4005 proc_net_remove(net, "softnet_stat");
4006 proc_net_remove(net, "dev");
4007}
4008
022cbae6 4009static struct pernet_operations __net_initdata dev_proc_ops = {
881d966b
EB
4010 .init = dev_proc_net_init,
4011 .exit = dev_proc_net_exit,
4012};
4013
4014static int __init dev_proc_init(void)
4015{
4016 return register_pernet_subsys(&dev_proc_ops);
4017}
1da177e4
LT
4018#else
4019#define dev_proc_init() 0
4020#endif /* CONFIG_PROC_FS */
4021
4022
4023/**
4024 * netdev_set_master - set up master/slave pair
4025 * @slave: slave device
4026 * @master: new master device
4027 *
4028 * Changes the master device of the slave. Pass %NULL to break the
4029 * bonding. The caller must hold the RTNL semaphore. On a failure
4030 * a negative errno code is returned. On success the reference counts
4031 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
4032 * function returns zero.
4033 */
4034int netdev_set_master(struct net_device *slave, struct net_device *master)
4035{
4036 struct net_device *old = slave->master;
4037
4038 ASSERT_RTNL();
4039
4040 if (master) {
4041 if (old)
4042 return -EBUSY;
4043 dev_hold(master);
4044 }
4045
4046 slave->master = master;
4ec93edb 4047
283f2fe8
ED
4048 if (old) {
4049 synchronize_net();
1da177e4 4050 dev_put(old);
283f2fe8 4051 }
1da177e4
LT
4052 if (master)
4053 slave->flags |= IFF_SLAVE;
4054 else
4055 slave->flags &= ~IFF_SLAVE;
4056
4057 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
4058 return 0;
4059}
d1b19dff 4060EXPORT_SYMBOL(netdev_set_master);
1da177e4 4061
b6c40d68
PM
4062static void dev_change_rx_flags(struct net_device *dev, int flags)
4063{
d314774c
SH
4064 const struct net_device_ops *ops = dev->netdev_ops;
4065
4066 if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
4067 ops->ndo_change_rx_flags(dev, flags);
b6c40d68
PM
4068}
4069
dad9b335 4070static int __dev_set_promiscuity(struct net_device *dev, int inc)
1da177e4
LT
4071{
4072 unsigned short old_flags = dev->flags;
8192b0c4
DH
4073 uid_t uid;
4074 gid_t gid;
1da177e4 4075
24023451
PM
4076 ASSERT_RTNL();
4077
dad9b335
WC
4078 dev->flags |= IFF_PROMISC;
4079 dev->promiscuity += inc;
4080 if (dev->promiscuity == 0) {
4081 /*
4082 * Avoid overflow.
4083 * If inc causes overflow, untouch promisc and return error.
4084 */
4085 if (inc < 0)
4086 dev->flags &= ~IFF_PROMISC;
4087 else {
4088 dev->promiscuity -= inc;
4089 printk(KERN_WARNING "%s: promiscuity touches roof, "
4090 "set promiscuity failed, promiscuity feature "
4091 "of device might be broken.\n", dev->name);
4092 return -EOVERFLOW;
4093 }
4094 }
52609c0b 4095 if (dev->flags != old_flags) {
1da177e4
LT
4096 printk(KERN_INFO "device %s %s promiscuous mode\n",
4097 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
4ec93edb 4098 "left");
8192b0c4
DH
4099 if (audit_enabled) {
4100 current_uid_gid(&uid, &gid);
7759db82
KHK
4101 audit_log(current->audit_context, GFP_ATOMIC,
4102 AUDIT_ANOM_PROMISCUOUS,
4103 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
4104 dev->name, (dev->flags & IFF_PROMISC),
4105 (old_flags & IFF_PROMISC),
4106 audit_get_loginuid(current),
8192b0c4 4107 uid, gid,
7759db82 4108 audit_get_sessionid(current));
8192b0c4 4109 }
24023451 4110
b6c40d68 4111 dev_change_rx_flags(dev, IFF_PROMISC);
1da177e4 4112 }
dad9b335 4113 return 0;
1da177e4
LT
4114}
4115
4417da66
PM
4116/**
4117 * dev_set_promiscuity - update promiscuity count on a device
4118 * @dev: device
4119 * @inc: modifier
4120 *
4121 * Add or remove promiscuity from a device. While the count in the device
4122 * remains above zero the interface remains promiscuous. Once it hits zero
4123 * the device reverts back to normal filtering operation. A negative inc
4124 * value is used to drop promiscuity on the device.
dad9b335 4125 * Return 0 if successful or a negative errno code on error.
4417da66 4126 */
dad9b335 4127int dev_set_promiscuity(struct net_device *dev, int inc)
4417da66
PM
4128{
4129 unsigned short old_flags = dev->flags;
dad9b335 4130 int err;
4417da66 4131
dad9b335 4132 err = __dev_set_promiscuity(dev, inc);
4b5a698e 4133 if (err < 0)
dad9b335 4134 return err;
4417da66
PM
4135 if (dev->flags != old_flags)
4136 dev_set_rx_mode(dev);
dad9b335 4137 return err;
4417da66 4138}
d1b19dff 4139EXPORT_SYMBOL(dev_set_promiscuity);
4417da66 4140
1da177e4
LT
4141/**
4142 * dev_set_allmulti - update allmulti count on a device
4143 * @dev: device
4144 * @inc: modifier
4145 *
4146 * Add or remove reception of all multicast frames to a device. While the
4147 * count in the device remains above zero the interface remains listening
4148 * to all interfaces. Once it hits zero the device reverts back to normal
4149 * filtering operation. A negative @inc value is used to drop the counter
4150 * when releasing a resource needing all multicasts.
dad9b335 4151 * Return 0 if successful or a negative errno code on error.
1da177e4
LT
4152 */
4153
dad9b335 4154int dev_set_allmulti(struct net_device *dev, int inc)
1da177e4
LT
4155{
4156 unsigned short old_flags = dev->flags;
4157
24023451
PM
4158 ASSERT_RTNL();
4159
1da177e4 4160 dev->flags |= IFF_ALLMULTI;
dad9b335
WC
4161 dev->allmulti += inc;
4162 if (dev->allmulti == 0) {
4163 /*
4164 * Avoid overflow.
4165 * If inc causes overflow, untouch allmulti and return error.
4166 */
4167 if (inc < 0)
4168 dev->flags &= ~IFF_ALLMULTI;
4169 else {
4170 dev->allmulti -= inc;
4171 printk(KERN_WARNING "%s: allmulti touches roof, "
4172 "set allmulti failed, allmulti feature of "
4173 "device might be broken.\n", dev->name);
4174 return -EOVERFLOW;
4175 }
4176 }
24023451 4177 if (dev->flags ^ old_flags) {
b6c40d68 4178 dev_change_rx_flags(dev, IFF_ALLMULTI);
4417da66 4179 dev_set_rx_mode(dev);
24023451 4180 }
dad9b335 4181 return 0;
4417da66 4182}
d1b19dff 4183EXPORT_SYMBOL(dev_set_allmulti);
4417da66
PM
4184
4185/*
4186 * Upload unicast and multicast address lists to device and
4187 * configure RX filtering. When the device doesn't support unicast
53ccaae1 4188 * filtering it is put in promiscuous mode while unicast addresses
4417da66
PM
4189 * are present.
4190 */
4191void __dev_set_rx_mode(struct net_device *dev)
4192{
d314774c
SH
4193 const struct net_device_ops *ops = dev->netdev_ops;
4194
4417da66
PM
4195 /* dev_open will call this function so the list will stay sane. */
4196 if (!(dev->flags&IFF_UP))
4197 return;
4198
4199 if (!netif_device_present(dev))
40b77c94 4200 return;
4417da66 4201
d314774c
SH
4202 if (ops->ndo_set_rx_mode)
4203 ops->ndo_set_rx_mode(dev);
4417da66
PM
4204 else {
4205 /* Unicast addresses changes may only happen under the rtnl,
4206 * therefore calling __dev_set_promiscuity here is safe.
4207 */
32e7bfc4 4208 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
4417da66
PM
4209 __dev_set_promiscuity(dev, 1);
4210 dev->uc_promisc = 1;
32e7bfc4 4211 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
4417da66
PM
4212 __dev_set_promiscuity(dev, -1);
4213 dev->uc_promisc = 0;
4214 }
4215
d314774c
SH
4216 if (ops->ndo_set_multicast_list)
4217 ops->ndo_set_multicast_list(dev);
4417da66
PM
4218 }
4219}
4220
4221void dev_set_rx_mode(struct net_device *dev)
4222{
b9e40857 4223 netif_addr_lock_bh(dev);
4417da66 4224 __dev_set_rx_mode(dev);
b9e40857 4225 netif_addr_unlock_bh(dev);
1da177e4
LT
4226}
4227
f0db275a
SH
4228/**
4229 * dev_get_flags - get flags reported to userspace
4230 * @dev: device
4231 *
4232 * Get the combination of flag bits exported through APIs to userspace.
4233 */
1da177e4
LT
4234unsigned dev_get_flags(const struct net_device *dev)
4235{
4236 unsigned flags;
4237
4238 flags = (dev->flags & ~(IFF_PROMISC |
4239 IFF_ALLMULTI |
b00055aa
SR
4240 IFF_RUNNING |
4241 IFF_LOWER_UP |
4242 IFF_DORMANT)) |
1da177e4
LT
4243 (dev->gflags & (IFF_PROMISC |
4244 IFF_ALLMULTI));
4245
b00055aa
SR
4246 if (netif_running(dev)) {
4247 if (netif_oper_up(dev))
4248 flags |= IFF_RUNNING;
4249 if (netif_carrier_ok(dev))
4250 flags |= IFF_LOWER_UP;
4251 if (netif_dormant(dev))
4252 flags |= IFF_DORMANT;
4253 }
1da177e4
LT
4254
4255 return flags;
4256}
d1b19dff 4257EXPORT_SYMBOL(dev_get_flags);
1da177e4 4258
bd380811 4259int __dev_change_flags(struct net_device *dev, unsigned int flags)
1da177e4 4260{
1da177e4 4261 int old_flags = dev->flags;
bd380811 4262 int ret;
1da177e4 4263
24023451
PM
4264 ASSERT_RTNL();
4265
1da177e4
LT
4266 /*
4267 * Set the flags on our device.
4268 */
4269
4270 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
4271 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
4272 IFF_AUTOMEDIA)) |
4273 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
4274 IFF_ALLMULTI));
4275
4276 /*
4277 * Load in the correct multicast list now the flags have changed.
4278 */
4279
b6c40d68
PM
4280 if ((old_flags ^ flags) & IFF_MULTICAST)
4281 dev_change_rx_flags(dev, IFF_MULTICAST);
24023451 4282
4417da66 4283 dev_set_rx_mode(dev);
1da177e4
LT
4284
4285 /*
4286 * Have we downed the interface. We handle IFF_UP ourselves
4287 * according to user attempts to set it, rather than blindly
4288 * setting it.
4289 */
4290
4291 ret = 0;
4292 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
bd380811 4293 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
1da177e4
LT
4294
4295 if (!ret)
4417da66 4296 dev_set_rx_mode(dev);
1da177e4
LT
4297 }
4298
1da177e4 4299 if ((flags ^ dev->gflags) & IFF_PROMISC) {
d1b19dff
ED
4300 int inc = (flags & IFF_PROMISC) ? 1 : -1;
4301
1da177e4
LT
4302 dev->gflags ^= IFF_PROMISC;
4303 dev_set_promiscuity(dev, inc);
4304 }
4305
4306 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
4307 is important. Some (broken) drivers set IFF_PROMISC, when
4308 IFF_ALLMULTI is requested not asking us and not reporting.
4309 */
4310 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
d1b19dff
ED
4311 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
4312
1da177e4
LT
4313 dev->gflags ^= IFF_ALLMULTI;
4314 dev_set_allmulti(dev, inc);
4315 }
4316
bd380811
PM
4317 return ret;
4318}
4319
4320void __dev_notify_flags(struct net_device *dev, unsigned int old_flags)
4321{
4322 unsigned int changes = dev->flags ^ old_flags;
4323
4324 if (changes & IFF_UP) {
4325 if (dev->flags & IFF_UP)
4326 call_netdevice_notifiers(NETDEV_UP, dev);
4327 else
4328 call_netdevice_notifiers(NETDEV_DOWN, dev);
4329 }
4330
4331 if (dev->flags & IFF_UP &&
4332 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE)))
4333 call_netdevice_notifiers(NETDEV_CHANGE, dev);
4334}
4335
4336/**
4337 * dev_change_flags - change device settings
4338 * @dev: device
4339 * @flags: device state flags
4340 *
4341 * Change settings on device based state flags. The flags are
4342 * in the userspace exported format.
4343 */
4344int dev_change_flags(struct net_device *dev, unsigned flags)
4345{
4346 int ret, changes;
4347 int old_flags = dev->flags;
4348
4349 ret = __dev_change_flags(dev, flags);
4350 if (ret < 0)
4351 return ret;
4352
4353 changes = old_flags ^ dev->flags;
7c355f53
TG
4354 if (changes)
4355 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
1da177e4 4356
bd380811 4357 __dev_notify_flags(dev, old_flags);
1da177e4
LT
4358 return ret;
4359}
d1b19dff 4360EXPORT_SYMBOL(dev_change_flags);
1da177e4 4361
f0db275a
SH
4362/**
4363 * dev_set_mtu - Change maximum transfer unit
4364 * @dev: device
4365 * @new_mtu: new transfer unit
4366 *
4367 * Change the maximum transfer size of the network device.
4368 */
1da177e4
LT
4369int dev_set_mtu(struct net_device *dev, int new_mtu)
4370{
d314774c 4371 const struct net_device_ops *ops = dev->netdev_ops;
1da177e4
LT
4372 int err;
4373
4374 if (new_mtu == dev->mtu)
4375 return 0;
4376
4377 /* MTU must be positive. */
4378 if (new_mtu < 0)
4379 return -EINVAL;
4380
4381 if (!netif_device_present(dev))
4382 return -ENODEV;
4383
4384 err = 0;
d314774c
SH
4385 if (ops->ndo_change_mtu)
4386 err = ops->ndo_change_mtu(dev, new_mtu);
1da177e4
LT
4387 else
4388 dev->mtu = new_mtu;
d314774c 4389
1da177e4 4390 if (!err && dev->flags & IFF_UP)
056925ab 4391 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
1da177e4
LT
4392 return err;
4393}
d1b19dff 4394EXPORT_SYMBOL(dev_set_mtu);
1da177e4 4395
f0db275a
SH
4396/**
4397 * dev_set_mac_address - Change Media Access Control Address
4398 * @dev: device
4399 * @sa: new address
4400 *
4401 * Change the hardware (MAC) address of the device
4402 */
1da177e4
LT
4403int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
4404{
d314774c 4405 const struct net_device_ops *ops = dev->netdev_ops;
1da177e4
LT
4406 int err;
4407
d314774c 4408 if (!ops->ndo_set_mac_address)
1da177e4
LT
4409 return -EOPNOTSUPP;
4410 if (sa->sa_family != dev->type)
4411 return -EINVAL;
4412 if (!netif_device_present(dev))
4413 return -ENODEV;
d314774c 4414 err = ops->ndo_set_mac_address(dev, sa);
1da177e4 4415 if (!err)
056925ab 4416 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
1da177e4
LT
4417 return err;
4418}
d1b19dff 4419EXPORT_SYMBOL(dev_set_mac_address);
1da177e4
LT
4420
4421/*
3710becf 4422 * Perform the SIOCxIFxxx calls, inside rcu_read_lock()
1da177e4 4423 */
14e3e079 4424static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
1da177e4
LT
4425{
4426 int err;
3710becf 4427 struct net_device *dev = dev_get_by_name_rcu(net, ifr->ifr_name);
1da177e4
LT
4428
4429 if (!dev)
4430 return -ENODEV;
4431
4432 switch (cmd) {
d1b19dff
ED
4433 case SIOCGIFFLAGS: /* Get interface flags */
4434 ifr->ifr_flags = (short) dev_get_flags(dev);
4435 return 0;
1da177e4 4436
d1b19dff
ED
4437 case SIOCGIFMETRIC: /* Get the metric on the interface
4438 (currently unused) */
4439 ifr->ifr_metric = 0;
4440 return 0;
1da177e4 4441
d1b19dff
ED
4442 case SIOCGIFMTU: /* Get the MTU of a device */
4443 ifr->ifr_mtu = dev->mtu;
4444 return 0;
1da177e4 4445
d1b19dff
ED
4446 case SIOCGIFHWADDR:
4447 if (!dev->addr_len)
4448 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
4449 else
4450 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
4451 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4452 ifr->ifr_hwaddr.sa_family = dev->type;
4453 return 0;
1da177e4 4454
d1b19dff
ED
4455 case SIOCGIFSLAVE:
4456 err = -EINVAL;
4457 break;
14e3e079 4458
d1b19dff
ED
4459 case SIOCGIFMAP:
4460 ifr->ifr_map.mem_start = dev->mem_start;
4461 ifr->ifr_map.mem_end = dev->mem_end;
4462 ifr->ifr_map.base_addr = dev->base_addr;
4463 ifr->ifr_map.irq = dev->irq;
4464 ifr->ifr_map.dma = dev->dma;
4465 ifr->ifr_map.port = dev->if_port;
4466 return 0;
14e3e079 4467
d1b19dff
ED
4468 case SIOCGIFINDEX:
4469 ifr->ifr_ifindex = dev->ifindex;
4470 return 0;
14e3e079 4471
d1b19dff
ED
4472 case SIOCGIFTXQLEN:
4473 ifr->ifr_qlen = dev->tx_queue_len;
4474 return 0;
14e3e079 4475
d1b19dff
ED
4476 default:
4477 /* dev_ioctl() should ensure this case
4478 * is never reached
4479 */
4480 WARN_ON(1);
4481 err = -EINVAL;
4482 break;
14e3e079
JG
4483
4484 }
4485 return err;
4486}
4487
4488/*
4489 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
4490 */
4491static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
4492{
4493 int err;
4494 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
5f2f6da7 4495 const struct net_device_ops *ops;
14e3e079
JG
4496
4497 if (!dev)
4498 return -ENODEV;
4499
5f2f6da7
JP
4500 ops = dev->netdev_ops;
4501
14e3e079 4502 switch (cmd) {
d1b19dff
ED
4503 case SIOCSIFFLAGS: /* Set interface flags */
4504 return dev_change_flags(dev, ifr->ifr_flags);
14e3e079 4505
d1b19dff
ED
4506 case SIOCSIFMETRIC: /* Set the metric on the interface
4507 (currently unused) */
4508 return -EOPNOTSUPP;
14e3e079 4509
d1b19dff
ED
4510 case SIOCSIFMTU: /* Set the MTU of a device */
4511 return dev_set_mtu(dev, ifr->ifr_mtu);
1da177e4 4512
d1b19dff
ED
4513 case SIOCSIFHWADDR:
4514 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
1da177e4 4515
d1b19dff
ED
4516 case SIOCSIFHWBROADCAST:
4517 if (ifr->ifr_hwaddr.sa_family != dev->type)
4518 return -EINVAL;
4519 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
4520 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4521 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4522 return 0;
1da177e4 4523
d1b19dff
ED
4524 case SIOCSIFMAP:
4525 if (ops->ndo_set_config) {
1da177e4
LT
4526 if (!netif_device_present(dev))
4527 return -ENODEV;
d1b19dff
ED
4528 return ops->ndo_set_config(dev, &ifr->ifr_map);
4529 }
4530 return -EOPNOTSUPP;
1da177e4 4531
d1b19dff
ED
4532 case SIOCADDMULTI:
4533 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
4534 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4535 return -EINVAL;
4536 if (!netif_device_present(dev))
4537 return -ENODEV;
22bedad3 4538 return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data);
d1b19dff
ED
4539
4540 case SIOCDELMULTI:
4541 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
4542 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4543 return -EINVAL;
4544 if (!netif_device_present(dev))
4545 return -ENODEV;
22bedad3 4546 return dev_mc_del_global(dev, ifr->ifr_hwaddr.sa_data);
1da177e4 4547
d1b19dff
ED
4548 case SIOCSIFTXQLEN:
4549 if (ifr->ifr_qlen < 0)
4550 return -EINVAL;
4551 dev->tx_queue_len = ifr->ifr_qlen;
4552 return 0;
1da177e4 4553
d1b19dff
ED
4554 case SIOCSIFNAME:
4555 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
4556 return dev_change_name(dev, ifr->ifr_newname);
1da177e4 4557
d1b19dff
ED
4558 /*
4559 * Unknown or private ioctl
4560 */
4561 default:
4562 if ((cmd >= SIOCDEVPRIVATE &&
4563 cmd <= SIOCDEVPRIVATE + 15) ||
4564 cmd == SIOCBONDENSLAVE ||
4565 cmd == SIOCBONDRELEASE ||
4566 cmd == SIOCBONDSETHWADDR ||
4567 cmd == SIOCBONDSLAVEINFOQUERY ||
4568 cmd == SIOCBONDINFOQUERY ||
4569 cmd == SIOCBONDCHANGEACTIVE ||
4570 cmd == SIOCGMIIPHY ||
4571 cmd == SIOCGMIIREG ||
4572 cmd == SIOCSMIIREG ||
4573 cmd == SIOCBRADDIF ||
4574 cmd == SIOCBRDELIF ||
4575 cmd == SIOCSHWTSTAMP ||
4576 cmd == SIOCWANDEV) {
4577 err = -EOPNOTSUPP;
4578 if (ops->ndo_do_ioctl) {
4579 if (netif_device_present(dev))
4580 err = ops->ndo_do_ioctl(dev, ifr, cmd);
4581 else
4582 err = -ENODEV;
4583 }
4584 } else
4585 err = -EINVAL;
1da177e4
LT
4586
4587 }
4588 return err;
4589}
4590
4591/*
4592 * This function handles all "interface"-type I/O control requests. The actual
4593 * 'doing' part of this is dev_ifsioc above.
4594 */
4595
4596/**
4597 * dev_ioctl - network device ioctl
c4ea43c5 4598 * @net: the applicable net namespace
1da177e4
LT
4599 * @cmd: command to issue
4600 * @arg: pointer to a struct ifreq in user space
4601 *
4602 * Issue ioctl functions to devices. This is normally called by the
4603 * user space syscall interfaces but can sometimes be useful for
4604 * other purposes. The return value is the return from the syscall if
4605 * positive or a negative errno code on error.
4606 */
4607
881d966b 4608int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
1da177e4
LT
4609{
4610 struct ifreq ifr;
4611 int ret;
4612 char *colon;
4613
4614 /* One special case: SIOCGIFCONF takes ifconf argument
4615 and requires shared lock, because it sleeps writing
4616 to user space.
4617 */
4618
4619 if (cmd == SIOCGIFCONF) {
6756ae4b 4620 rtnl_lock();
881d966b 4621 ret = dev_ifconf(net, (char __user *) arg);
6756ae4b 4622 rtnl_unlock();
1da177e4
LT
4623 return ret;
4624 }
4625 if (cmd == SIOCGIFNAME)
881d966b 4626 return dev_ifname(net, (struct ifreq __user *)arg);
1da177e4
LT
4627
4628 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
4629 return -EFAULT;
4630
4631 ifr.ifr_name[IFNAMSIZ-1] = 0;
4632
4633 colon = strchr(ifr.ifr_name, ':');
4634 if (colon)
4635 *colon = 0;
4636
4637 /*
4638 * See which interface the caller is talking about.
4639 */
4640
4641 switch (cmd) {
d1b19dff
ED
4642 /*
4643 * These ioctl calls:
4644 * - can be done by all.
4645 * - atomic and do not require locking.
4646 * - return a value
4647 */
4648 case SIOCGIFFLAGS:
4649 case SIOCGIFMETRIC:
4650 case SIOCGIFMTU:
4651 case SIOCGIFHWADDR:
4652 case SIOCGIFSLAVE:
4653 case SIOCGIFMAP:
4654 case SIOCGIFINDEX:
4655 case SIOCGIFTXQLEN:
4656 dev_load(net, ifr.ifr_name);
3710becf 4657 rcu_read_lock();
d1b19dff 4658 ret = dev_ifsioc_locked(net, &ifr, cmd);
3710becf 4659 rcu_read_unlock();
d1b19dff
ED
4660 if (!ret) {
4661 if (colon)
4662 *colon = ':';
4663 if (copy_to_user(arg, &ifr,
4664 sizeof(struct ifreq)))
4665 ret = -EFAULT;
4666 }
4667 return ret;
1da177e4 4668
d1b19dff
ED
4669 case SIOCETHTOOL:
4670 dev_load(net, ifr.ifr_name);
4671 rtnl_lock();
4672 ret = dev_ethtool(net, &ifr);
4673 rtnl_unlock();
4674 if (!ret) {
4675 if (colon)
4676 *colon = ':';
4677 if (copy_to_user(arg, &ifr,
4678 sizeof(struct ifreq)))
4679 ret = -EFAULT;
4680 }
4681 return ret;
1da177e4 4682
d1b19dff
ED
4683 /*
4684 * These ioctl calls:
4685 * - require superuser power.
4686 * - require strict serialization.
4687 * - return a value
4688 */
4689 case SIOCGMIIPHY:
4690 case SIOCGMIIREG:
4691 case SIOCSIFNAME:
4692 if (!capable(CAP_NET_ADMIN))
4693 return -EPERM;
4694 dev_load(net, ifr.ifr_name);
4695 rtnl_lock();
4696 ret = dev_ifsioc(net, &ifr, cmd);
4697 rtnl_unlock();
4698 if (!ret) {
4699 if (colon)
4700 *colon = ':';
4701 if (copy_to_user(arg, &ifr,
4702 sizeof(struct ifreq)))
4703 ret = -EFAULT;
4704 }
4705 return ret;
1da177e4 4706
d1b19dff
ED
4707 /*
4708 * These ioctl calls:
4709 * - require superuser power.
4710 * - require strict serialization.
4711 * - do not return a value
4712 */
4713 case SIOCSIFFLAGS:
4714 case SIOCSIFMETRIC:
4715 case SIOCSIFMTU:
4716 case SIOCSIFMAP:
4717 case SIOCSIFHWADDR:
4718 case SIOCSIFSLAVE:
4719 case SIOCADDMULTI:
4720 case SIOCDELMULTI:
4721 case SIOCSIFHWBROADCAST:
4722 case SIOCSIFTXQLEN:
4723 case SIOCSMIIREG:
4724 case SIOCBONDENSLAVE:
4725 case SIOCBONDRELEASE:
4726 case SIOCBONDSETHWADDR:
4727 case SIOCBONDCHANGEACTIVE:
4728 case SIOCBRADDIF:
4729 case SIOCBRDELIF:
4730 case SIOCSHWTSTAMP:
4731 if (!capable(CAP_NET_ADMIN))
4732 return -EPERM;
4733 /* fall through */
4734 case SIOCBONDSLAVEINFOQUERY:
4735 case SIOCBONDINFOQUERY:
4736 dev_load(net, ifr.ifr_name);
4737 rtnl_lock();
4738 ret = dev_ifsioc(net, &ifr, cmd);
4739 rtnl_unlock();
4740 return ret;
4741
4742 case SIOCGIFMEM:
4743 /* Get the per device memory space. We can add this but
4744 * currently do not support it */
4745 case SIOCSIFMEM:
4746 /* Set the per device memory buffer space.
4747 * Not applicable in our case */
4748 case SIOCSIFLINK:
4749 return -EINVAL;
4750
4751 /*
4752 * Unknown or private ioctl.
4753 */
4754 default:
4755 if (cmd == SIOCWANDEV ||
4756 (cmd >= SIOCDEVPRIVATE &&
4757 cmd <= SIOCDEVPRIVATE + 15)) {
881d966b 4758 dev_load(net, ifr.ifr_name);
1da177e4 4759 rtnl_lock();
881d966b 4760 ret = dev_ifsioc(net, &ifr, cmd);
1da177e4 4761 rtnl_unlock();
d1b19dff
ED
4762 if (!ret && copy_to_user(arg, &ifr,
4763 sizeof(struct ifreq)))
4764 ret = -EFAULT;
1da177e4 4765 return ret;
d1b19dff
ED
4766 }
4767 /* Take care of Wireless Extensions */
4768 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
4769 return wext_handle_ioctl(net, &ifr, cmd, arg);
4770 return -EINVAL;
1da177e4
LT
4771 }
4772}
4773
4774
4775/**
4776 * dev_new_index - allocate an ifindex
c4ea43c5 4777 * @net: the applicable net namespace
1da177e4
LT
4778 *
4779 * Returns a suitable unique value for a new device interface
4780 * number. The caller must hold the rtnl semaphore or the
4781 * dev_base_lock to be sure it remains unique.
4782 */
881d966b 4783static int dev_new_index(struct net *net)
1da177e4
LT
4784{
4785 static int ifindex;
4786 for (;;) {
4787 if (++ifindex <= 0)
4788 ifindex = 1;
881d966b 4789 if (!__dev_get_by_index(net, ifindex))
1da177e4
LT
4790 return ifindex;
4791 }
4792}
4793
1da177e4 4794/* Delayed registration/unregisteration */
3b5b34fd 4795static LIST_HEAD(net_todo_list);
1da177e4 4796
6f05f629 4797static void net_set_todo(struct net_device *dev)
1da177e4 4798{
1da177e4 4799 list_add_tail(&dev->todo_list, &net_todo_list);
1da177e4
LT
4800}
4801
9b5e383c 4802static void rollback_registered_many(struct list_head *head)
93ee31f1 4803{
e93737b0 4804 struct net_device *dev, *tmp;
9b5e383c 4805
93ee31f1
DL
4806 BUG_ON(dev_boot_phase);
4807 ASSERT_RTNL();
4808
e93737b0 4809 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
9b5e383c 4810 /* Some devices call without registering
e93737b0
KK
4811 * for initialization unwind. Remove those
4812 * devices and proceed with the remaining.
9b5e383c
ED
4813 */
4814 if (dev->reg_state == NETREG_UNINITIALIZED) {
4815 pr_debug("unregister_netdevice: device %s/%p never "
4816 "was registered\n", dev->name, dev);
93ee31f1 4817
9b5e383c 4818 WARN_ON(1);
e93737b0
KK
4819 list_del(&dev->unreg_list);
4820 continue;
9b5e383c 4821 }
93ee31f1 4822
9b5e383c 4823 BUG_ON(dev->reg_state != NETREG_REGISTERED);
93ee31f1 4824
9b5e383c
ED
4825 /* If device is running, close it first. */
4826 dev_close(dev);
93ee31f1 4827
9b5e383c
ED
4828 /* And unlink it from device chain. */
4829 unlist_netdevice(dev);
93ee31f1 4830
9b5e383c
ED
4831 dev->reg_state = NETREG_UNREGISTERING;
4832 }
93ee31f1
DL
4833
4834 synchronize_net();
4835
9b5e383c
ED
4836 list_for_each_entry(dev, head, unreg_list) {
4837 /* Shutdown queueing discipline. */
4838 dev_shutdown(dev);
93ee31f1
DL
4839
4840
9b5e383c
ED
4841 /* Notify protocols, that we are about to destroy
4842 this device. They should clean all the things.
4843 */
4844 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
93ee31f1 4845
a2835763
PM
4846 if (!dev->rtnl_link_ops ||
4847 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
4848 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
4849
9b5e383c
ED
4850 /*
4851 * Flush the unicast and multicast chains
4852 */
a748ee24 4853 dev_uc_flush(dev);
22bedad3 4854 dev_mc_flush(dev);
93ee31f1 4855
9b5e383c
ED
4856 if (dev->netdev_ops->ndo_uninit)
4857 dev->netdev_ops->ndo_uninit(dev);
93ee31f1 4858
9b5e383c
ED
4859 /* Notifier chain MUST detach us from master device. */
4860 WARN_ON(dev->master);
93ee31f1 4861
9b5e383c
ED
4862 /* Remove entries from kobject tree */
4863 netdev_unregister_kobject(dev);
4864 }
93ee31f1 4865
a5ee1551 4866 /* Process any work delayed until the end of the batch */
e5e26d75 4867 dev = list_first_entry(head, struct net_device, unreg_list);
a5ee1551 4868 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
93ee31f1 4869
a5ee1551 4870 synchronize_net();
395264d5 4871
a5ee1551 4872 list_for_each_entry(dev, head, unreg_list)
9b5e383c
ED
4873 dev_put(dev);
4874}
4875
4876static void rollback_registered(struct net_device *dev)
4877{
4878 LIST_HEAD(single);
4879
4880 list_add(&dev->unreg_list, &single);
4881 rollback_registered_many(&single);
93ee31f1
DL
4882}
4883
e8a0464c
DM
4884static void __netdev_init_queue_locks_one(struct net_device *dev,
4885 struct netdev_queue *dev_queue,
4886 void *_unused)
c773e847
DM
4887{
4888 spin_lock_init(&dev_queue->_xmit_lock);
cf508b12 4889 netdev_set_xmit_lockdep_class(&dev_queue->_xmit_lock, dev->type);
c773e847
DM
4890 dev_queue->xmit_lock_owner = -1;
4891}
4892
4893static void netdev_init_queue_locks(struct net_device *dev)
4894{
e8a0464c
DM
4895 netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL);
4896 __netdev_init_queue_locks_one(dev, &dev->rx_queue, NULL);
c773e847
DM
4897}
4898
b63365a2
HX
4899unsigned long netdev_fix_features(unsigned long features, const char *name)
4900{
4901 /* Fix illegal SG+CSUM combinations. */
4902 if ((features & NETIF_F_SG) &&
4903 !(features & NETIF_F_ALL_CSUM)) {
4904 if (name)
4905 printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no "
4906 "checksum feature.\n", name);
4907 features &= ~NETIF_F_SG;
4908 }
4909
4910 /* TSO requires that SG is present as well. */
4911 if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) {
4912 if (name)
4913 printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no "
4914 "SG feature.\n", name);
4915 features &= ~NETIF_F_TSO;
4916 }
4917
4918 if (features & NETIF_F_UFO) {
4919 if (!(features & NETIF_F_GEN_CSUM)) {
4920 if (name)
4921 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4922 "since no NETIF_F_HW_CSUM feature.\n",
4923 name);
4924 features &= ~NETIF_F_UFO;
4925 }
4926
4927 if (!(features & NETIF_F_SG)) {
4928 if (name)
4929 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4930 "since no NETIF_F_SG feature.\n", name);
4931 features &= ~NETIF_F_UFO;
4932 }
4933 }
4934
4935 return features;
4936}
4937EXPORT_SYMBOL(netdev_fix_features);
4938
fc4a7489
PM
4939/**
4940 * netif_stacked_transfer_operstate - transfer operstate
4941 * @rootdev: the root or lower level device to transfer state from
4942 * @dev: the device to transfer operstate to
4943 *
4944 * Transfer operational state from root to device. This is normally
4945 * called when a stacking relationship exists between the root
4946 * device and the device(a leaf device).
4947 */
4948void netif_stacked_transfer_operstate(const struct net_device *rootdev,
4949 struct net_device *dev)
4950{
4951 if (rootdev->operstate == IF_OPER_DORMANT)
4952 netif_dormant_on(dev);
4953 else
4954 netif_dormant_off(dev);
4955
4956 if (netif_carrier_ok(rootdev)) {
4957 if (!netif_carrier_ok(dev))
4958 netif_carrier_on(dev);
4959 } else {
4960 if (netif_carrier_ok(dev))
4961 netif_carrier_off(dev);
4962 }
4963}
4964EXPORT_SYMBOL(netif_stacked_transfer_operstate);
4965
1da177e4
LT
4966/**
4967 * register_netdevice - register a network device
4968 * @dev: device to register
4969 *
4970 * Take a completed network device structure and add it to the kernel
4971 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
4972 * chain. 0 is returned on success. A negative errno code is returned
4973 * on a failure to set up the device, or if the name is a duplicate.
4974 *
4975 * Callers must hold the rtnl semaphore. You may want
4976 * register_netdev() instead of this.
4977 *
4978 * BUGS:
4979 * The locking appears insufficient to guarantee two parallel registers
4980 * will not get the same name.
4981 */
4982
4983int register_netdevice(struct net_device *dev)
4984{
1da177e4 4985 int ret;
d314774c 4986 struct net *net = dev_net(dev);
1da177e4
LT
4987
4988 BUG_ON(dev_boot_phase);
4989 ASSERT_RTNL();
4990
b17a7c17
SH
4991 might_sleep();
4992
1da177e4
LT
4993 /* When net_device's are persistent, this will be fatal. */
4994 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
d314774c 4995 BUG_ON(!net);
1da177e4 4996
f1f28aa3 4997 spin_lock_init(&dev->addr_list_lock);
cf508b12 4998 netdev_set_addr_lockdep_class(dev);
c773e847 4999 netdev_init_queue_locks(dev);
1da177e4 5000
1da177e4
LT
5001 dev->iflink = -1;
5002
df334545 5003#ifdef CONFIG_RPS
0a9627f2
TH
5004 if (!dev->num_rx_queues) {
5005 /*
5006 * Allocate a single RX queue if driver never called
5007 * alloc_netdev_mq
5008 */
5009
5010 dev->_rx = kzalloc(sizeof(struct netdev_rx_queue), GFP_KERNEL);
5011 if (!dev->_rx) {
5012 ret = -ENOMEM;
5013 goto out;
5014 }
5015
5016 dev->_rx->first = dev->_rx;
5017 atomic_set(&dev->_rx->count, 1);
5018 dev->num_rx_queues = 1;
5019 }
df334545 5020#endif
1da177e4 5021 /* Init, if this function is available */
d314774c
SH
5022 if (dev->netdev_ops->ndo_init) {
5023 ret = dev->netdev_ops->ndo_init(dev);
1da177e4
LT
5024 if (ret) {
5025 if (ret > 0)
5026 ret = -EIO;
90833aa4 5027 goto out;
1da177e4
LT
5028 }
5029 }
4ec93edb 5030
8ce6cebc 5031 ret = dev_get_valid_name(dev, dev->name, 0);
d9031024 5032 if (ret)
7ce1b0ed 5033 goto err_uninit;
1da177e4 5034
881d966b 5035 dev->ifindex = dev_new_index(net);
1da177e4
LT
5036 if (dev->iflink == -1)
5037 dev->iflink = dev->ifindex;
5038
d212f87b
SH
5039 /* Fix illegal checksum combinations */
5040 if ((dev->features & NETIF_F_HW_CSUM) &&
5041 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5042 printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
5043 dev->name);
5044 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
5045 }
5046
5047 if ((dev->features & NETIF_F_NO_CSUM) &&
5048 (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5049 printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
5050 dev->name);
5051 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
5052 }
5053
b63365a2 5054 dev->features = netdev_fix_features(dev->features, dev->name);
1da177e4 5055
e5a4a72d
LB
5056 /* Enable software GSO if SG is supported. */
5057 if (dev->features & NETIF_F_SG)
5058 dev->features |= NETIF_F_GSO;
5059
7ffbe3fd
JB
5060 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
5061 ret = notifier_to_errno(ret);
5062 if (ret)
5063 goto err_uninit;
5064
8b41d188 5065 ret = netdev_register_kobject(dev);
b17a7c17 5066 if (ret)
7ce1b0ed 5067 goto err_uninit;
b17a7c17
SH
5068 dev->reg_state = NETREG_REGISTERED;
5069
1da177e4
LT
5070 /*
5071 * Default initial state at registry is that the
5072 * device is present.
5073 */
5074
5075 set_bit(__LINK_STATE_PRESENT, &dev->state);
5076
1da177e4 5077 dev_init_scheduler(dev);
1da177e4 5078 dev_hold(dev);
ce286d32 5079 list_netdevice(dev);
1da177e4
LT
5080
5081 /* Notify protocols, that a new device appeared. */
056925ab 5082 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
fcc5a03a 5083 ret = notifier_to_errno(ret);
93ee31f1
DL
5084 if (ret) {
5085 rollback_registered(dev);
5086 dev->reg_state = NETREG_UNREGISTERED;
5087 }
d90a909e
EB
5088 /*
5089 * Prevent userspace races by waiting until the network
5090 * device is fully setup before sending notifications.
5091 */
a2835763
PM
5092 if (!dev->rtnl_link_ops ||
5093 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
5094 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
1da177e4
LT
5095
5096out:
5097 return ret;
7ce1b0ed
HX
5098
5099err_uninit:
d314774c
SH
5100 if (dev->netdev_ops->ndo_uninit)
5101 dev->netdev_ops->ndo_uninit(dev);
7ce1b0ed 5102 goto out;
1da177e4 5103}
d1b19dff 5104EXPORT_SYMBOL(register_netdevice);
1da177e4 5105
937f1ba5
BH
5106/**
5107 * init_dummy_netdev - init a dummy network device for NAPI
5108 * @dev: device to init
5109 *
5110 * This takes a network device structure and initialize the minimum
5111 * amount of fields so it can be used to schedule NAPI polls without
5112 * registering a full blown interface. This is to be used by drivers
5113 * that need to tie several hardware interfaces to a single NAPI
5114 * poll scheduler due to HW limitations.
5115 */
5116int init_dummy_netdev(struct net_device *dev)
5117{
5118 /* Clear everything. Note we don't initialize spinlocks
5119 * are they aren't supposed to be taken by any of the
5120 * NAPI code and this dummy netdev is supposed to be
5121 * only ever used for NAPI polls
5122 */
5123 memset(dev, 0, sizeof(struct net_device));
5124
5125 /* make sure we BUG if trying to hit standard
5126 * register/unregister code path
5127 */
5128 dev->reg_state = NETREG_DUMMY;
5129
5130 /* initialize the ref count */
5131 atomic_set(&dev->refcnt, 1);
5132
5133 /* NAPI wants this */
5134 INIT_LIST_HEAD(&dev->napi_list);
5135
5136 /* a dummy interface is started by default */
5137 set_bit(__LINK_STATE_PRESENT, &dev->state);
5138 set_bit(__LINK_STATE_START, &dev->state);
5139
5140 return 0;
5141}
5142EXPORT_SYMBOL_GPL(init_dummy_netdev);
5143
5144
1da177e4
LT
5145/**
5146 * register_netdev - register a network device
5147 * @dev: device to register
5148 *
5149 * Take a completed network device structure and add it to the kernel
5150 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5151 * chain. 0 is returned on success. A negative errno code is returned
5152 * on a failure to set up the device, or if the name is a duplicate.
5153 *
38b4da38 5154 * This is a wrapper around register_netdevice that takes the rtnl semaphore
1da177e4
LT
5155 * and expands the device name if you passed a format string to
5156 * alloc_netdev.
5157 */
5158int register_netdev(struct net_device *dev)
5159{
5160 int err;
5161
5162 rtnl_lock();
5163
5164 /*
5165 * If the name is a format string the caller wants us to do a
5166 * name allocation.
5167 */
5168 if (strchr(dev->name, '%')) {
5169 err = dev_alloc_name(dev, dev->name);
5170 if (err < 0)
5171 goto out;
5172 }
4ec93edb 5173
1da177e4
LT
5174 err = register_netdevice(dev);
5175out:
5176 rtnl_unlock();
5177 return err;
5178}
5179EXPORT_SYMBOL(register_netdev);
5180
5181/*
5182 * netdev_wait_allrefs - wait until all references are gone.
5183 *
5184 * This is called when unregistering network devices.
5185 *
5186 * Any protocol or device that holds a reference should register
5187 * for netdevice notification, and cleanup and put back the
5188 * reference if they receive an UNREGISTER event.
5189 * We can get stuck here if buggy protocols don't correctly
4ec93edb 5190 * call dev_put.
1da177e4
LT
5191 */
5192static void netdev_wait_allrefs(struct net_device *dev)
5193{
5194 unsigned long rebroadcast_time, warning_time;
5195
e014debe
ED
5196 linkwatch_forget_dev(dev);
5197
1da177e4
LT
5198 rebroadcast_time = warning_time = jiffies;
5199 while (atomic_read(&dev->refcnt) != 0) {
5200 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
6756ae4b 5201 rtnl_lock();
1da177e4
LT
5202
5203 /* Rebroadcast unregister notification */
056925ab 5204 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
a5ee1551 5205 /* don't resend NETDEV_UNREGISTER_BATCH, _BATCH users
395264d5 5206 * should have already handle it the first time */
1da177e4
LT
5207
5208 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
5209 &dev->state)) {
5210 /* We must not have linkwatch events
5211 * pending on unregister. If this
5212 * happens, we simply run the queue
5213 * unscheduled, resulting in a noop
5214 * for this device.
5215 */
5216 linkwatch_run_queue();
5217 }
5218
6756ae4b 5219 __rtnl_unlock();
1da177e4
LT
5220
5221 rebroadcast_time = jiffies;
5222 }
5223
5224 msleep(250);
5225
5226 if (time_after(jiffies, warning_time + 10 * HZ)) {
5227 printk(KERN_EMERG "unregister_netdevice: "
5228 "waiting for %s to become free. Usage "
5229 "count = %d\n",
5230 dev->name, atomic_read(&dev->refcnt));
5231 warning_time = jiffies;
5232 }
5233 }
5234}
5235
5236/* The sequence is:
5237 *
5238 * rtnl_lock();
5239 * ...
5240 * register_netdevice(x1);
5241 * register_netdevice(x2);
5242 * ...
5243 * unregister_netdevice(y1);
5244 * unregister_netdevice(y2);
5245 * ...
5246 * rtnl_unlock();
5247 * free_netdev(y1);
5248 * free_netdev(y2);
5249 *
58ec3b4d 5250 * We are invoked by rtnl_unlock().
1da177e4 5251 * This allows us to deal with problems:
b17a7c17 5252 * 1) We can delete sysfs objects which invoke hotplug
1da177e4
LT
5253 * without deadlocking with linkwatch via keventd.
5254 * 2) Since we run with the RTNL semaphore not held, we can sleep
5255 * safely in order to wait for the netdev refcnt to drop to zero.
58ec3b4d
HX
5256 *
5257 * We must not return until all unregister events added during
5258 * the interval the lock was held have been completed.
1da177e4 5259 */
1da177e4
LT
5260void netdev_run_todo(void)
5261{
626ab0e6 5262 struct list_head list;
1da177e4 5263
1da177e4 5264 /* Snapshot list, allow later requests */
626ab0e6 5265 list_replace_init(&net_todo_list, &list);
58ec3b4d
HX
5266
5267 __rtnl_unlock();
626ab0e6 5268
1da177e4
LT
5269 while (!list_empty(&list)) {
5270 struct net_device *dev
e5e26d75 5271 = list_first_entry(&list, struct net_device, todo_list);
1da177e4
LT
5272 list_del(&dev->todo_list);
5273
b17a7c17
SH
5274 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
5275 printk(KERN_ERR "network todo '%s' but state %d\n",
5276 dev->name, dev->reg_state);
5277 dump_stack();
5278 continue;
5279 }
1da177e4 5280
b17a7c17 5281 dev->reg_state = NETREG_UNREGISTERED;
1da177e4 5282
152102c7 5283 on_each_cpu(flush_backlog, dev, 1);
6e583ce5 5284
b17a7c17 5285 netdev_wait_allrefs(dev);
1da177e4 5286
b17a7c17
SH
5287 /* paranoia */
5288 BUG_ON(atomic_read(&dev->refcnt));
547b792c
IJ
5289 WARN_ON(dev->ip_ptr);
5290 WARN_ON(dev->ip6_ptr);
5291 WARN_ON(dev->dn_ptr);
1da177e4 5292
b17a7c17
SH
5293 if (dev->destructor)
5294 dev->destructor(dev);
9093bbb2
SH
5295
5296 /* Free network device */
5297 kobject_put(&dev->dev.kobj);
1da177e4 5298 }
1da177e4
LT
5299}
5300
d83345ad
ED
5301/**
5302 * dev_txq_stats_fold - fold tx_queues stats
5303 * @dev: device to get statistics from
3cfde79c 5304 * @stats: struct rtnl_link_stats64 to hold results
d83345ad
ED
5305 */
5306void dev_txq_stats_fold(const struct net_device *dev,
3cfde79c 5307 struct rtnl_link_stats64 *stats)
d83345ad 5308{
bd27290a 5309 u64 tx_bytes = 0, tx_packets = 0, tx_dropped = 0;
d83345ad
ED
5310 unsigned int i;
5311 struct netdev_queue *txq;
5312
5313 for (i = 0; i < dev->num_tx_queues; i++) {
5314 txq = netdev_get_tx_queue(dev, i);
bd27290a 5315 spin_lock_bh(&txq->_xmit_lock);
d83345ad
ED
5316 tx_bytes += txq->tx_bytes;
5317 tx_packets += txq->tx_packets;
5318 tx_dropped += txq->tx_dropped;
bd27290a 5319 spin_unlock_bh(&txq->_xmit_lock);
d83345ad
ED
5320 }
5321 if (tx_bytes || tx_packets || tx_dropped) {
5322 stats->tx_bytes = tx_bytes;
5323 stats->tx_packets = tx_packets;
5324 stats->tx_dropped = tx_dropped;
5325 }
5326}
5327EXPORT_SYMBOL(dev_txq_stats_fold);
5328
3cfde79c
BH
5329/* Convert net_device_stats to rtnl_link_stats64. They have the same
5330 * fields in the same order, with only the type differing.
5331 */
5332static void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
5333 const struct net_device_stats *netdev_stats)
5334{
5335#if BITS_PER_LONG == 64
5336 BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
5337 memcpy(stats64, netdev_stats, sizeof(*stats64));
5338#else
5339 size_t i, n = sizeof(*stats64) / sizeof(u64);
5340 const unsigned long *src = (const unsigned long *)netdev_stats;
5341 u64 *dst = (u64 *)stats64;
5342
5343 BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) !=
5344 sizeof(*stats64) / sizeof(u64));
5345 for (i = 0; i < n; i++)
5346 dst[i] = src[i];
5347#endif
5348}
5349
eeda3fd6
SH
5350/**
5351 * dev_get_stats - get network device statistics
5352 * @dev: device to get statistics from
28172739 5353 * @storage: place to store stats
eeda3fd6 5354 *
d7753516
BH
5355 * Get network statistics from device. Return @storage.
5356 * The device driver may provide its own method by setting
5357 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
5358 * otherwise the internal statistics structure is used.
eeda3fd6 5359 */
d7753516
BH
5360struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
5361 struct rtnl_link_stats64 *storage)
7004bf25 5362{
eeda3fd6
SH
5363 const struct net_device_ops *ops = dev->netdev_ops;
5364
28172739
ED
5365 if (ops->ndo_get_stats64) {
5366 memset(storage, 0, sizeof(*storage));
5367 return ops->ndo_get_stats64(dev, storage);
5368 }
5369 if (ops->ndo_get_stats) {
3cfde79c 5370 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
28172739
ED
5371 return storage;
5372 }
3cfde79c
BH
5373 netdev_stats_to_stats64(storage, &dev->stats);
5374 dev_txq_stats_fold(dev, storage);
28172739 5375 return storage;
c45d286e 5376}
eeda3fd6 5377EXPORT_SYMBOL(dev_get_stats);
c45d286e 5378
dc2b4847 5379static void netdev_init_one_queue(struct net_device *dev,
e8a0464c
DM
5380 struct netdev_queue *queue,
5381 void *_unused)
dc2b4847 5382{
dc2b4847
DM
5383 queue->dev = dev;
5384}
5385
bb949fbd
DM
5386static void netdev_init_queues(struct net_device *dev)
5387{
e8a0464c
DM
5388 netdev_init_one_queue(dev, &dev->rx_queue, NULL);
5389 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
c3f26a26 5390 spin_lock_init(&dev->tx_global_lock);
bb949fbd
DM
5391}
5392
1da177e4 5393/**
f25f4e44 5394 * alloc_netdev_mq - allocate network device
1da177e4
LT
5395 * @sizeof_priv: size of private data to allocate space for
5396 * @name: device name format string
5397 * @setup: callback to initialize device
f25f4e44 5398 * @queue_count: the number of subqueues to allocate
1da177e4
LT
5399 *
5400 * Allocates a struct net_device with private data area for driver use
f25f4e44
PWJ
5401 * and performs basic initialization. Also allocates subquue structs
5402 * for each queue on the device at the end of the netdevice.
1da177e4 5403 */
f25f4e44
PWJ
5404struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
5405 void (*setup)(struct net_device *), unsigned int queue_count)
1da177e4 5406{
e8a0464c 5407 struct netdev_queue *tx;
1da177e4 5408 struct net_device *dev;
7943986c 5409 size_t alloc_size;
1ce8e7b5 5410 struct net_device *p;
df334545
ED
5411#ifdef CONFIG_RPS
5412 struct netdev_rx_queue *rx;
0a9627f2 5413 int i;
df334545 5414#endif
1da177e4 5415
b6fe17d6
SH
5416 BUG_ON(strlen(name) >= sizeof(dev->name));
5417
fd2ea0a7 5418 alloc_size = sizeof(struct net_device);
d1643d24
AD
5419 if (sizeof_priv) {
5420 /* ensure 32-byte alignment of private area */
1ce8e7b5 5421 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
d1643d24
AD
5422 alloc_size += sizeof_priv;
5423 }
5424 /* ensure 32-byte alignment of whole construct */
1ce8e7b5 5425 alloc_size += NETDEV_ALIGN - 1;
1da177e4 5426
31380de9 5427 p = kzalloc(alloc_size, GFP_KERNEL);
1da177e4 5428 if (!p) {
b6fe17d6 5429 printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
1da177e4
LT
5430 return NULL;
5431 }
1da177e4 5432
7943986c 5433 tx = kcalloc(queue_count, sizeof(struct netdev_queue), GFP_KERNEL);
e8a0464c
DM
5434 if (!tx) {
5435 printk(KERN_ERR "alloc_netdev: Unable to allocate "
5436 "tx qdiscs.\n");
ab9c73cc 5437 goto free_p;
e8a0464c
DM
5438 }
5439
df334545 5440#ifdef CONFIG_RPS
0a9627f2
TH
5441 rx = kcalloc(queue_count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
5442 if (!rx) {
5443 printk(KERN_ERR "alloc_netdev: Unable to allocate "
5444 "rx queues.\n");
5445 goto free_tx;
5446 }
5447
5448 atomic_set(&rx->count, queue_count);
5449
5450 /*
5451 * Set a pointer to first element in the array which holds the
5452 * reference count.
5453 */
5454 for (i = 0; i < queue_count; i++)
5455 rx[i].first = rx;
df334545 5456#endif
0a9627f2 5457
1ce8e7b5 5458 dev = PTR_ALIGN(p, NETDEV_ALIGN);
1da177e4 5459 dev->padded = (char *)dev - (char *)p;
ab9c73cc
JP
5460
5461 if (dev_addr_init(dev))
0a9627f2 5462 goto free_rx;
ab9c73cc 5463
22bedad3 5464 dev_mc_init(dev);
a748ee24 5465 dev_uc_init(dev);
ccffad25 5466
c346dca1 5467 dev_net_set(dev, &init_net);
1da177e4 5468
e8a0464c
DM
5469 dev->_tx = tx;
5470 dev->num_tx_queues = queue_count;
fd2ea0a7 5471 dev->real_num_tx_queues = queue_count;
e8a0464c 5472
df334545 5473#ifdef CONFIG_RPS
0a9627f2
TH
5474 dev->_rx = rx;
5475 dev->num_rx_queues = queue_count;
df334545 5476#endif
0a9627f2 5477
82cc1a7a 5478 dev->gso_max_size = GSO_MAX_SIZE;
1da177e4 5479
bb949fbd
DM
5480 netdev_init_queues(dev);
5481
15682bc4
PWJ
5482 INIT_LIST_HEAD(&dev->ethtool_ntuple_list.list);
5483 dev->ethtool_ntuple_list.count = 0;
d565b0a1 5484 INIT_LIST_HEAD(&dev->napi_list);
9fdce099 5485 INIT_LIST_HEAD(&dev->unreg_list);
e014debe 5486 INIT_LIST_HEAD(&dev->link_watch_list);
93f154b5 5487 dev->priv_flags = IFF_XMIT_DST_RELEASE;
1da177e4
LT
5488 setup(dev);
5489 strcpy(dev->name, name);
5490 return dev;
ab9c73cc 5491
0a9627f2 5492free_rx:
df334545 5493#ifdef CONFIG_RPS
0a9627f2 5494 kfree(rx);
ab9c73cc 5495free_tx:
df334545 5496#endif
ab9c73cc 5497 kfree(tx);
ab9c73cc
JP
5498free_p:
5499 kfree(p);
5500 return NULL;
1da177e4 5501}
f25f4e44 5502EXPORT_SYMBOL(alloc_netdev_mq);
1da177e4
LT
5503
5504/**
5505 * free_netdev - free network device
5506 * @dev: device
5507 *
4ec93edb
YH
5508 * This function does the last stage of destroying an allocated device
5509 * interface. The reference to the device object is released.
1da177e4
LT
5510 * If this is the last reference then it will be freed.
5511 */
5512void free_netdev(struct net_device *dev)
5513{
d565b0a1
HX
5514 struct napi_struct *p, *n;
5515
f3005d7f
DL
5516 release_net(dev_net(dev));
5517
e8a0464c
DM
5518 kfree(dev->_tx);
5519
f001fde5
JP
5520 /* Flush device addresses */
5521 dev_addr_flush(dev);
5522
15682bc4
PWJ
5523 /* Clear ethtool n-tuple list */
5524 ethtool_ntuple_flush(dev);
5525
d565b0a1
HX
5526 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
5527 netif_napi_del(p);
5528
3041a069 5529 /* Compatibility with error handling in drivers */
1da177e4
LT
5530 if (dev->reg_state == NETREG_UNINITIALIZED) {
5531 kfree((char *)dev - dev->padded);
5532 return;
5533 }
5534
5535 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
5536 dev->reg_state = NETREG_RELEASED;
5537
43cb76d9
GKH
5538 /* will free via device release */
5539 put_device(&dev->dev);
1da177e4 5540}
d1b19dff 5541EXPORT_SYMBOL(free_netdev);
4ec93edb 5542
f0db275a
SH
5543/**
5544 * synchronize_net - Synchronize with packet receive processing
5545 *
5546 * Wait for packets currently being received to be done.
5547 * Does not block later packets from starting.
5548 */
4ec93edb 5549void synchronize_net(void)
1da177e4
LT
5550{
5551 might_sleep();
fbd568a3 5552 synchronize_rcu();
1da177e4 5553}
d1b19dff 5554EXPORT_SYMBOL(synchronize_net);
1da177e4
LT
5555
5556/**
44a0873d 5557 * unregister_netdevice_queue - remove device from the kernel
1da177e4 5558 * @dev: device
44a0873d 5559 * @head: list
6ebfbc06 5560 *
1da177e4 5561 * This function shuts down a device interface and removes it
d59b54b1 5562 * from the kernel tables.
44a0873d 5563 * If head not NULL, device is queued to be unregistered later.
1da177e4
LT
5564 *
5565 * Callers must hold the rtnl semaphore. You may want
5566 * unregister_netdev() instead of this.
5567 */
5568
44a0873d 5569void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
1da177e4 5570{
a6620712
HX
5571 ASSERT_RTNL();
5572
44a0873d 5573 if (head) {
9fdce099 5574 list_move_tail(&dev->unreg_list, head);
44a0873d
ED
5575 } else {
5576 rollback_registered(dev);
5577 /* Finish processing unregister after unlock */
5578 net_set_todo(dev);
5579 }
1da177e4 5580}
44a0873d 5581EXPORT_SYMBOL(unregister_netdevice_queue);
1da177e4 5582
9b5e383c
ED
5583/**
5584 * unregister_netdevice_many - unregister many devices
5585 * @head: list of devices
9b5e383c
ED
5586 */
5587void unregister_netdevice_many(struct list_head *head)
5588{
5589 struct net_device *dev;
5590
5591 if (!list_empty(head)) {
5592 rollback_registered_many(head);
5593 list_for_each_entry(dev, head, unreg_list)
5594 net_set_todo(dev);
5595 }
5596}
63c8099d 5597EXPORT_SYMBOL(unregister_netdevice_many);
9b5e383c 5598
1da177e4
LT
5599/**
5600 * unregister_netdev - remove device from the kernel
5601 * @dev: device
5602 *
5603 * This function shuts down a device interface and removes it
d59b54b1 5604 * from the kernel tables.
1da177e4
LT
5605 *
5606 * This is just a wrapper for unregister_netdevice that takes
5607 * the rtnl semaphore. In general you want to use this and not
5608 * unregister_netdevice.
5609 */
5610void unregister_netdev(struct net_device *dev)
5611{
5612 rtnl_lock();
5613 unregister_netdevice(dev);
5614 rtnl_unlock();
5615}
1da177e4
LT
5616EXPORT_SYMBOL(unregister_netdev);
5617
ce286d32
EB
5618/**
5619 * dev_change_net_namespace - move device to different nethost namespace
5620 * @dev: device
5621 * @net: network namespace
5622 * @pat: If not NULL name pattern to try if the current device name
5623 * is already taken in the destination network namespace.
5624 *
5625 * This function shuts down a device interface and moves it
5626 * to a new network namespace. On success 0 is returned, on
5627 * a failure a netagive errno code is returned.
5628 *
5629 * Callers must hold the rtnl semaphore.
5630 */
5631
5632int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
5633{
ce286d32
EB
5634 int err;
5635
5636 ASSERT_RTNL();
5637
5638 /* Don't allow namespace local devices to be moved. */
5639 err = -EINVAL;
5640 if (dev->features & NETIF_F_NETNS_LOCAL)
5641 goto out;
5642
5643 /* Ensure the device has been registrered */
5644 err = -EINVAL;
5645 if (dev->reg_state != NETREG_REGISTERED)
5646 goto out;
5647
5648 /* Get out if there is nothing todo */
5649 err = 0;
878628fb 5650 if (net_eq(dev_net(dev), net))
ce286d32
EB
5651 goto out;
5652
5653 /* Pick the destination device name, and ensure
5654 * we can use it in the destination network namespace.
5655 */
5656 err = -EEXIST;
d9031024 5657 if (__dev_get_by_name(net, dev->name)) {
ce286d32
EB
5658 /* We get here if we can't use the current device name */
5659 if (!pat)
5660 goto out;
8ce6cebc 5661 if (dev_get_valid_name(dev, pat, 1))
ce286d32
EB
5662 goto out;
5663 }
5664
5665 /*
5666 * And now a mini version of register_netdevice unregister_netdevice.
5667 */
5668
5669 /* If device is running close it first. */
9b772652 5670 dev_close(dev);
ce286d32
EB
5671
5672 /* And unlink it from device chain */
5673 err = -ENODEV;
5674 unlist_netdevice(dev);
5675
5676 synchronize_net();
5677
5678 /* Shutdown queueing discipline. */
5679 dev_shutdown(dev);
5680
5681 /* Notify protocols, that we are about to destroy
5682 this device. They should clean all the things.
5683 */
5684 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
a5ee1551 5685 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
ce286d32
EB
5686
5687 /*
5688 * Flush the unicast and multicast chains
5689 */
a748ee24 5690 dev_uc_flush(dev);
22bedad3 5691 dev_mc_flush(dev);
ce286d32
EB
5692
5693 /* Actually switch the network namespace */
c346dca1 5694 dev_net_set(dev, net);
ce286d32 5695
ce286d32
EB
5696 /* If there is an ifindex conflict assign a new one */
5697 if (__dev_get_by_index(net, dev->ifindex)) {
5698 int iflink = (dev->iflink == dev->ifindex);
5699 dev->ifindex = dev_new_index(net);
5700 if (iflink)
5701 dev->iflink = dev->ifindex;
5702 }
5703
8b41d188 5704 /* Fixup kobjects */
a1b3f594 5705 err = device_rename(&dev->dev, dev->name);
8b41d188 5706 WARN_ON(err);
ce286d32
EB
5707
5708 /* Add the device back in the hashes */
5709 list_netdevice(dev);
5710
5711 /* Notify protocols, that a new device appeared. */
5712 call_netdevice_notifiers(NETDEV_REGISTER, dev);
5713
d90a909e
EB
5714 /*
5715 * Prevent userspace races by waiting until the network
5716 * device is fully setup before sending notifications.
5717 */
5718 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
5719
ce286d32
EB
5720 synchronize_net();
5721 err = 0;
5722out:
5723 return err;
5724}
463d0183 5725EXPORT_SYMBOL_GPL(dev_change_net_namespace);
ce286d32 5726
1da177e4
LT
5727static int dev_cpu_callback(struct notifier_block *nfb,
5728 unsigned long action,
5729 void *ocpu)
5730{
5731 struct sk_buff **list_skb;
1da177e4
LT
5732 struct sk_buff *skb;
5733 unsigned int cpu, oldcpu = (unsigned long)ocpu;
5734 struct softnet_data *sd, *oldsd;
5735
8bb78442 5736 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
1da177e4
LT
5737 return NOTIFY_OK;
5738
5739 local_irq_disable();
5740 cpu = smp_processor_id();
5741 sd = &per_cpu(softnet_data, cpu);
5742 oldsd = &per_cpu(softnet_data, oldcpu);
5743
5744 /* Find end of our completion_queue. */
5745 list_skb = &sd->completion_queue;
5746 while (*list_skb)
5747 list_skb = &(*list_skb)->next;
5748 /* Append completion queue from offline CPU. */
5749 *list_skb = oldsd->completion_queue;
5750 oldsd->completion_queue = NULL;
5751
1da177e4 5752 /* Append output queue from offline CPU. */
a9cbd588
CG
5753 if (oldsd->output_queue) {
5754 *sd->output_queue_tailp = oldsd->output_queue;
5755 sd->output_queue_tailp = oldsd->output_queue_tailp;
5756 oldsd->output_queue = NULL;
5757 oldsd->output_queue_tailp = &oldsd->output_queue;
5758 }
1da177e4
LT
5759
5760 raise_softirq_irqoff(NET_TX_SOFTIRQ);
5761 local_irq_enable();
5762
5763 /* Process offline CPU's input_pkt_queue */
76cc8b13 5764 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
1da177e4 5765 netif_rx(skb);
76cc8b13 5766 input_queue_head_incr(oldsd);
fec5e652 5767 }
76cc8b13 5768 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
6e7676c1 5769 netif_rx(skb);
76cc8b13
TH
5770 input_queue_head_incr(oldsd);
5771 }
1da177e4
LT
5772
5773 return NOTIFY_OK;
5774}
1da177e4
LT
5775
5776
7f353bf2 5777/**
b63365a2
HX
5778 * netdev_increment_features - increment feature set by one
5779 * @all: current feature set
5780 * @one: new feature set
5781 * @mask: mask feature set
7f353bf2
HX
5782 *
5783 * Computes a new feature set after adding a device with feature set
b63365a2
HX
5784 * @one to the master device with current feature set @all. Will not
5785 * enable anything that is off in @mask. Returns the new feature set.
7f353bf2 5786 */
b63365a2
HX
5787unsigned long netdev_increment_features(unsigned long all, unsigned long one,
5788 unsigned long mask)
5789{
5790 /* If device needs checksumming, downgrade to it. */
d1b19dff 5791 if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
b63365a2
HX
5792 all ^= NETIF_F_NO_CSUM | (one & NETIF_F_ALL_CSUM);
5793 else if (mask & NETIF_F_ALL_CSUM) {
5794 /* If one device supports v4/v6 checksumming, set for all. */
5795 if (one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM) &&
5796 !(all & NETIF_F_GEN_CSUM)) {
5797 all &= ~NETIF_F_ALL_CSUM;
5798 all |= one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
5799 }
e2a6b852 5800
b63365a2
HX
5801 /* If one device supports hw checksumming, set for all. */
5802 if (one & NETIF_F_GEN_CSUM && !(all & NETIF_F_GEN_CSUM)) {
5803 all &= ~NETIF_F_ALL_CSUM;
5804 all |= NETIF_F_HW_CSUM;
5805 }
5806 }
7f353bf2 5807
b63365a2 5808 one |= NETIF_F_ALL_CSUM;
7f353bf2 5809
b63365a2 5810 one |= all & NETIF_F_ONE_FOR_ALL;
d9f5950f 5811 all &= one | NETIF_F_LLTX | NETIF_F_GSO | NETIF_F_UFO;
b63365a2 5812 all |= one & mask & NETIF_F_ONE_FOR_ALL;
7f353bf2
HX
5813
5814 return all;
5815}
b63365a2 5816EXPORT_SYMBOL(netdev_increment_features);
7f353bf2 5817
30d97d35
PE
5818static struct hlist_head *netdev_create_hash(void)
5819{
5820 int i;
5821 struct hlist_head *hash;
5822
5823 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
5824 if (hash != NULL)
5825 for (i = 0; i < NETDEV_HASHENTRIES; i++)
5826 INIT_HLIST_HEAD(&hash[i]);
5827
5828 return hash;
5829}
5830
881d966b 5831/* Initialize per network namespace state */
4665079c 5832static int __net_init netdev_init(struct net *net)
881d966b 5833{
881d966b 5834 INIT_LIST_HEAD(&net->dev_base_head);
881d966b 5835
30d97d35
PE
5836 net->dev_name_head = netdev_create_hash();
5837 if (net->dev_name_head == NULL)
5838 goto err_name;
881d966b 5839
30d97d35
PE
5840 net->dev_index_head = netdev_create_hash();
5841 if (net->dev_index_head == NULL)
5842 goto err_idx;
881d966b
EB
5843
5844 return 0;
30d97d35
PE
5845
5846err_idx:
5847 kfree(net->dev_name_head);
5848err_name:
5849 return -ENOMEM;
881d966b
EB
5850}
5851
f0db275a
SH
5852/**
5853 * netdev_drivername - network driver for the device
5854 * @dev: network device
5855 * @buffer: buffer for resulting name
5856 * @len: size of buffer
5857 *
5858 * Determine network driver for device.
5859 */
cf04a4c7 5860char *netdev_drivername(const struct net_device *dev, char *buffer, int len)
6579e57b 5861{
cf04a4c7
SH
5862 const struct device_driver *driver;
5863 const struct device *parent;
6579e57b
AV
5864
5865 if (len <= 0 || !buffer)
5866 return buffer;
5867 buffer[0] = 0;
5868
5869 parent = dev->dev.parent;
5870
5871 if (!parent)
5872 return buffer;
5873
5874 driver = parent->driver;
5875 if (driver && driver->name)
5876 strlcpy(buffer, driver->name, len);
5877 return buffer;
5878}
5879
256df2f3
JP
5880static int __netdev_printk(const char *level, const struct net_device *dev,
5881 struct va_format *vaf)
5882{
5883 int r;
5884
5885 if (dev && dev->dev.parent)
5886 r = dev_printk(level, dev->dev.parent, "%s: %pV",
5887 netdev_name(dev), vaf);
5888 else if (dev)
5889 r = printk("%s%s: %pV", level, netdev_name(dev), vaf);
5890 else
5891 r = printk("%s(NULL net_device): %pV", level, vaf);
5892
5893 return r;
5894}
5895
5896int netdev_printk(const char *level, const struct net_device *dev,
5897 const char *format, ...)
5898{
5899 struct va_format vaf;
5900 va_list args;
5901 int r;
5902
5903 va_start(args, format);
5904
5905 vaf.fmt = format;
5906 vaf.va = &args;
5907
5908 r = __netdev_printk(level, dev, &vaf);
5909 va_end(args);
5910
5911 return r;
5912}
5913EXPORT_SYMBOL(netdev_printk);
5914
5915#define define_netdev_printk_level(func, level) \
5916int func(const struct net_device *dev, const char *fmt, ...) \
5917{ \
5918 int r; \
5919 struct va_format vaf; \
5920 va_list args; \
5921 \
5922 va_start(args, fmt); \
5923 \
5924 vaf.fmt = fmt; \
5925 vaf.va = &args; \
5926 \
5927 r = __netdev_printk(level, dev, &vaf); \
5928 va_end(args); \
5929 \
5930 return r; \
5931} \
5932EXPORT_SYMBOL(func);
5933
5934define_netdev_printk_level(netdev_emerg, KERN_EMERG);
5935define_netdev_printk_level(netdev_alert, KERN_ALERT);
5936define_netdev_printk_level(netdev_crit, KERN_CRIT);
5937define_netdev_printk_level(netdev_err, KERN_ERR);
5938define_netdev_printk_level(netdev_warn, KERN_WARNING);
5939define_netdev_printk_level(netdev_notice, KERN_NOTICE);
5940define_netdev_printk_level(netdev_info, KERN_INFO);
5941
4665079c 5942static void __net_exit netdev_exit(struct net *net)
881d966b
EB
5943{
5944 kfree(net->dev_name_head);
5945 kfree(net->dev_index_head);
5946}
5947
022cbae6 5948static struct pernet_operations __net_initdata netdev_net_ops = {
881d966b
EB
5949 .init = netdev_init,
5950 .exit = netdev_exit,
5951};
5952
4665079c 5953static void __net_exit default_device_exit(struct net *net)
ce286d32 5954{
e008b5fc 5955 struct net_device *dev, *aux;
ce286d32 5956 /*
e008b5fc 5957 * Push all migratable network devices back to the
ce286d32
EB
5958 * initial network namespace
5959 */
5960 rtnl_lock();
e008b5fc 5961 for_each_netdev_safe(net, dev, aux) {
ce286d32 5962 int err;
aca51397 5963 char fb_name[IFNAMSIZ];
ce286d32
EB
5964
5965 /* Ignore unmoveable devices (i.e. loopback) */
5966 if (dev->features & NETIF_F_NETNS_LOCAL)
5967 continue;
5968
e008b5fc
EB
5969 /* Leave virtual devices for the generic cleanup */
5970 if (dev->rtnl_link_ops)
5971 continue;
d0c082ce 5972
ce286d32 5973 /* Push remaing network devices to init_net */
aca51397
PE
5974 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
5975 err = dev_change_net_namespace(dev, &init_net, fb_name);
ce286d32 5976 if (err) {
aca51397 5977 printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
ce286d32 5978 __func__, dev->name, err);
aca51397 5979 BUG();
ce286d32
EB
5980 }
5981 }
5982 rtnl_unlock();
5983}
5984
04dc7f6b
EB
5985static void __net_exit default_device_exit_batch(struct list_head *net_list)
5986{
5987 /* At exit all network devices most be removed from a network
5988 * namespace. Do this in the reverse order of registeration.
5989 * Do this across as many network namespaces as possible to
5990 * improve batching efficiency.
5991 */
5992 struct net_device *dev;
5993 struct net *net;
5994 LIST_HEAD(dev_kill_list);
5995
5996 rtnl_lock();
5997 list_for_each_entry(net, net_list, exit_list) {
5998 for_each_netdev_reverse(net, dev) {
5999 if (dev->rtnl_link_ops)
6000 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
6001 else
6002 unregister_netdevice_queue(dev, &dev_kill_list);
6003 }
6004 }
6005 unregister_netdevice_many(&dev_kill_list);
6006 rtnl_unlock();
6007}
6008
022cbae6 6009static struct pernet_operations __net_initdata default_device_ops = {
ce286d32 6010 .exit = default_device_exit,
04dc7f6b 6011 .exit_batch = default_device_exit_batch,
ce286d32
EB
6012};
6013
1da177e4
LT
6014/*
6015 * Initialize the DEV module. At boot time this walks the device list and
6016 * unhooks any devices that fail to initialise (normally hardware not
6017 * present) and leaves us with a valid list of present and active devices.
6018 *
6019 */
6020
6021/*
6022 * This is called single threaded during boot, so no need
6023 * to take the rtnl semaphore.
6024 */
6025static int __init net_dev_init(void)
6026{
6027 int i, rc = -ENOMEM;
6028
6029 BUG_ON(!dev_boot_phase);
6030
1da177e4
LT
6031 if (dev_proc_init())
6032 goto out;
6033
8b41d188 6034 if (netdev_kobject_init())
1da177e4
LT
6035 goto out;
6036
6037 INIT_LIST_HEAD(&ptype_all);
82d8a867 6038 for (i = 0; i < PTYPE_HASH_SIZE; i++)
1da177e4
LT
6039 INIT_LIST_HEAD(&ptype_base[i]);
6040
881d966b
EB
6041 if (register_pernet_subsys(&netdev_net_ops))
6042 goto out;
1da177e4
LT
6043
6044 /*
6045 * Initialise the packet receive queues.
6046 */
6047
6f912042 6048 for_each_possible_cpu(i) {
e36fa2f7 6049 struct softnet_data *sd = &per_cpu(softnet_data, i);
1da177e4 6050
dee42870 6051 memset(sd, 0, sizeof(*sd));
e36fa2f7 6052 skb_queue_head_init(&sd->input_pkt_queue);
6e7676c1 6053 skb_queue_head_init(&sd->process_queue);
e36fa2f7
ED
6054 sd->completion_queue = NULL;
6055 INIT_LIST_HEAD(&sd->poll_list);
a9cbd588
CG
6056 sd->output_queue = NULL;
6057 sd->output_queue_tailp = &sd->output_queue;
df334545 6058#ifdef CONFIG_RPS
e36fa2f7
ED
6059 sd->csd.func = rps_trigger_softirq;
6060 sd->csd.info = sd;
6061 sd->csd.flags = 0;
6062 sd->cpu = i;
1e94d72f 6063#endif
0a9627f2 6064
e36fa2f7
ED
6065 sd->backlog.poll = process_backlog;
6066 sd->backlog.weight = weight_p;
6067 sd->backlog.gro_list = NULL;
6068 sd->backlog.gro_count = 0;
1da177e4
LT
6069 }
6070
1da177e4
LT
6071 dev_boot_phase = 0;
6072
505d4f73
EB
6073 /* The loopback device is special if any other network devices
6074 * is present in a network namespace the loopback device must
6075 * be present. Since we now dynamically allocate and free the
6076 * loopback device ensure this invariant is maintained by
6077 * keeping the loopback device as the first device on the
6078 * list of network devices. Ensuring the loopback devices
6079 * is the first device that appears and the last network device
6080 * that disappears.
6081 */
6082 if (register_pernet_device(&loopback_net_ops))
6083 goto out;
6084
6085 if (register_pernet_device(&default_device_ops))
6086 goto out;
6087
962cf36c
CM
6088 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
6089 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
1da177e4
LT
6090
6091 hotcpu_notifier(dev_cpu_callback, 0);
6092 dst_init();
6093 dev_mcast_init();
6094 rc = 0;
6095out:
6096 return rc;
6097}
6098
6099subsys_initcall(net_dev_init);
6100
e88721f8
KK
6101static int __init initialize_hashrnd(void)
6102{
0a9627f2 6103 get_random_bytes(&hashrnd, sizeof(hashrnd));
e88721f8
KK
6104 return 0;
6105}
6106
6107late_initcall_sync(initialize_hashrnd);
6108