]> bbs.cooldavid.org Git - net-next-2.6.git/blame - net/core/dev.c
vlan: Avoid hash table lookup to find group.
[net-next-2.6.git] / net / core / dev.c
CommitLineData
1da177e4
LT
1/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
02c30a84 10 * Authors: Ross Biro
1da177e4
LT
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
74
75#include <asm/uaccess.h>
76#include <asm/system.h>
77#include <linux/bitops.h>
4fc268d2 78#include <linux/capability.h>
1da177e4
LT
79#include <linux/cpu.h>
80#include <linux/types.h>
81#include <linux/kernel.h>
08e9897d 82#include <linux/hash.h>
5a0e3ad6 83#include <linux/slab.h>
1da177e4 84#include <linux/sched.h>
4a3e2f71 85#include <linux/mutex.h>
1da177e4
LT
86#include <linux/string.h>
87#include <linux/mm.h>
88#include <linux/socket.h>
89#include <linux/sockios.h>
90#include <linux/errno.h>
91#include <linux/interrupt.h>
92#include <linux/if_ether.h>
93#include <linux/netdevice.h>
94#include <linux/etherdevice.h>
0187bdfb 95#include <linux/ethtool.h>
1da177e4
LT
96#include <linux/notifier.h>
97#include <linux/skbuff.h>
457c4cbc 98#include <net/net_namespace.h>
1da177e4
LT
99#include <net/sock.h>
100#include <linux/rtnetlink.h>
101#include <linux/proc_fs.h>
102#include <linux/seq_file.h>
103#include <linux/stat.h>
1da177e4
LT
104#include <net/dst.h>
105#include <net/pkt_sched.h>
106#include <net/checksum.h>
44540960 107#include <net/xfrm.h>
1da177e4
LT
108#include <linux/highmem.h>
109#include <linux/init.h>
110#include <linux/kmod.h>
111#include <linux/module.h>
1da177e4
LT
112#include <linux/netpoll.h>
113#include <linux/rcupdate.h>
114#include <linux/delay.h>
295f4a1f 115#include <net/wext.h>
1da177e4 116#include <net/iw_handler.h>
1da177e4 117#include <asm/current.h>
5bdb9886 118#include <linux/audit.h>
db217334 119#include <linux/dmaengine.h>
f6a78bfc 120#include <linux/err.h>
c7fa9d18 121#include <linux/ctype.h>
723e98b7 122#include <linux/if_arp.h>
6de329e2 123#include <linux/if_vlan.h>
8f0f2223 124#include <linux/ip.h>
ad55dcaf 125#include <net/ip.h>
8f0f2223
DM
126#include <linux/ipv6.h>
127#include <linux/in.h>
b6b2fed1
DM
128#include <linux/jhash.h>
129#include <linux/random.h>
9cbc1cb8 130#include <trace/events/napi.h>
5acbbd42 131#include <linux/pci.h>
caeda9b9 132#include <linux/inetdevice.h>
1da177e4 133
342709ef
PE
134#include "net-sysfs.h"
135
d565b0a1
HX
136/* Instead of increasing this, you should create a hash table. */
137#define MAX_GRO_SKBS 8
138
5d38a079
HX
139/* This should be increased if a protocol with a bigger head is added. */
140#define GRO_MAX_HEAD (MAX_HEADER + 128)
141
1da177e4
LT
142/*
143 * The list of packet types we will receive (as opposed to discard)
144 * and the routines to invoke.
145 *
146 * Why 16. Because with 16 the only overlap we get on a hash of the
147 * low nibble of the protocol value is RARP/SNAP/X.25.
148 *
149 * NOTE: That is no longer true with the addition of VLAN tags. Not
150 * sure which should go first, but I bet it won't make much
151 * difference if we are running VLANs. The good news is that
152 * this protocol won't be in the list unless compiled in, so
3041a069 153 * the average user (w/out VLANs) will not be adversely affected.
1da177e4
LT
154 * --BLG
155 *
156 * 0800 IP
157 * 8100 802.1Q VLAN
158 * 0001 802.3
159 * 0002 AX.25
160 * 0004 802.2
161 * 8035 RARP
162 * 0005 SNAP
163 * 0805 X.25
164 * 0806 ARP
165 * 8137 IPX
166 * 0009 Localtalk
167 * 86DD IPv6
168 */
169
82d8a867
PE
170#define PTYPE_HASH_SIZE (16)
171#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
172
1da177e4 173static DEFINE_SPINLOCK(ptype_lock);
82d8a867 174static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
6b2bedc3 175static struct list_head ptype_all __read_mostly; /* Taps */
1da177e4 176
1da177e4 177/*
7562f876 178 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
1da177e4
LT
179 * semaphore.
180 *
c6d14c84 181 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
1da177e4
LT
182 *
183 * Writers must hold the rtnl semaphore while they loop through the
7562f876 184 * dev_base_head list, and hold dev_base_lock for writing when they do the
1da177e4
LT
185 * actual updates. This allows pure readers to access the list even
186 * while a writer is preparing to update it.
187 *
188 * To put it another way, dev_base_lock is held for writing only to
189 * protect against pure readers; the rtnl semaphore provides the
190 * protection against other writers.
191 *
192 * See, for example usages, register_netdevice() and
193 * unregister_netdevice(), which must be called with the rtnl
194 * semaphore held.
195 */
1da177e4 196DEFINE_RWLOCK(dev_base_lock);
1da177e4
LT
197EXPORT_SYMBOL(dev_base_lock);
198
881d966b 199static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
1da177e4
LT
200{
201 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
08e9897d 202 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
1da177e4
LT
203}
204
881d966b 205static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
1da177e4 206{
7c28bd0b 207 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
1da177e4
LT
208}
209
e36fa2f7 210static inline void rps_lock(struct softnet_data *sd)
152102c7
CG
211{
212#ifdef CONFIG_RPS
e36fa2f7 213 spin_lock(&sd->input_pkt_queue.lock);
152102c7
CG
214#endif
215}
216
e36fa2f7 217static inline void rps_unlock(struct softnet_data *sd)
152102c7
CG
218{
219#ifdef CONFIG_RPS
e36fa2f7 220 spin_unlock(&sd->input_pkt_queue.lock);
152102c7
CG
221#endif
222}
223
ce286d32
EB
224/* Device list insertion */
225static int list_netdevice(struct net_device *dev)
226{
c346dca1 227 struct net *net = dev_net(dev);
ce286d32
EB
228
229 ASSERT_RTNL();
230
231 write_lock_bh(&dev_base_lock);
c6d14c84 232 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
72c9528b 233 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
fb699dfd
ED
234 hlist_add_head_rcu(&dev->index_hlist,
235 dev_index_hash(net, dev->ifindex));
ce286d32
EB
236 write_unlock_bh(&dev_base_lock);
237 return 0;
238}
239
fb699dfd
ED
240/* Device list removal
241 * caller must respect a RCU grace period before freeing/reusing dev
242 */
ce286d32
EB
243static void unlist_netdevice(struct net_device *dev)
244{
245 ASSERT_RTNL();
246
247 /* Unlink dev from the device chain */
248 write_lock_bh(&dev_base_lock);
c6d14c84 249 list_del_rcu(&dev->dev_list);
72c9528b 250 hlist_del_rcu(&dev->name_hlist);
fb699dfd 251 hlist_del_rcu(&dev->index_hlist);
ce286d32
EB
252 write_unlock_bh(&dev_base_lock);
253}
254
1da177e4
LT
255/*
256 * Our notifier list
257 */
258
f07d5b94 259static RAW_NOTIFIER_HEAD(netdev_chain);
1da177e4
LT
260
261/*
262 * Device drivers call our routines to queue packets here. We empty the
263 * queue in the local softnet handler.
264 */
bea3348e 265
9958da05 266DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
d1b19dff 267EXPORT_PER_CPU_SYMBOL(softnet_data);
1da177e4 268
cf508b12 269#ifdef CONFIG_LOCKDEP
723e98b7 270/*
c773e847 271 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
723e98b7
JP
272 * according to dev->type
273 */
274static const unsigned short netdev_lock_type[] =
275 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
276 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
277 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
278 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
279 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
280 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
281 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
282 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
283 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
284 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
285 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
286 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
287 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
2d91d78b 288 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET,
929122cd 289 ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154,
fcb94e42 290 ARPHRD_VOID, ARPHRD_NONE};
723e98b7 291
36cbd3dc 292static const char *const netdev_lock_name[] =
723e98b7
JP
293 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
294 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
295 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
296 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
297 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
298 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
299 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
300 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
301 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
302 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
303 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
304 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
305 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
2d91d78b 306 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET",
929122cd 307 "_xmit_PHONET_PIPE", "_xmit_IEEE802154",
fcb94e42 308 "_xmit_VOID", "_xmit_NONE"};
723e98b7
JP
309
310static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
cf508b12 311static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
723e98b7
JP
312
313static inline unsigned short netdev_lock_pos(unsigned short dev_type)
314{
315 int i;
316
317 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
318 if (netdev_lock_type[i] == dev_type)
319 return i;
320 /* the last key is used by default */
321 return ARRAY_SIZE(netdev_lock_type) - 1;
322}
323
cf508b12
DM
324static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
325 unsigned short dev_type)
723e98b7
JP
326{
327 int i;
328
329 i = netdev_lock_pos(dev_type);
330 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
331 netdev_lock_name[i]);
332}
cf508b12
DM
333
334static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
335{
336 int i;
337
338 i = netdev_lock_pos(dev->type);
339 lockdep_set_class_and_name(&dev->addr_list_lock,
340 &netdev_addr_lock_key[i],
341 netdev_lock_name[i]);
342}
723e98b7 343#else
cf508b12
DM
344static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
345 unsigned short dev_type)
346{
347}
348static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
723e98b7
JP
349{
350}
351#endif
1da177e4
LT
352
353/*******************************************************************************
354
355 Protocol management and registration routines
356
357*******************************************************************************/
358
1da177e4
LT
359/*
360 * Add a protocol ID to the list. Now that the input handler is
361 * smarter we can dispense with all the messy stuff that used to be
362 * here.
363 *
364 * BEWARE!!! Protocol handlers, mangling input packets,
365 * MUST BE last in hash buckets and checking protocol handlers
366 * MUST start from promiscuous ptype_all chain in net_bh.
367 * It is true now, do not change it.
368 * Explanation follows: if protocol handler, mangling packet, will
369 * be the first on list, it is not able to sense, that packet
370 * is cloned and should be copied-on-write, so that it will
371 * change it and subsequent readers will get broken packet.
372 * --ANK (980803)
373 */
374
c07b68e8
ED
375static inline struct list_head *ptype_head(const struct packet_type *pt)
376{
377 if (pt->type == htons(ETH_P_ALL))
378 return &ptype_all;
379 else
380 return &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
381}
382
1da177e4
LT
383/**
384 * dev_add_pack - add packet handler
385 * @pt: packet type declaration
386 *
387 * Add a protocol handler to the networking stack. The passed &packet_type
388 * is linked into kernel lists and may not be freed until it has been
389 * removed from the kernel lists.
390 *
4ec93edb 391 * This call does not sleep therefore it can not
1da177e4
LT
392 * guarantee all CPU's that are in middle of receiving packets
393 * will see the new packet type (until the next received packet).
394 */
395
396void dev_add_pack(struct packet_type *pt)
397{
c07b68e8 398 struct list_head *head = ptype_head(pt);
1da177e4 399
c07b68e8
ED
400 spin_lock(&ptype_lock);
401 list_add_rcu(&pt->list, head);
402 spin_unlock(&ptype_lock);
1da177e4 403}
d1b19dff 404EXPORT_SYMBOL(dev_add_pack);
1da177e4 405
1da177e4
LT
406/**
407 * __dev_remove_pack - remove packet handler
408 * @pt: packet type declaration
409 *
410 * Remove a protocol handler that was previously added to the kernel
411 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
412 * from the kernel lists and can be freed or reused once this function
4ec93edb 413 * returns.
1da177e4
LT
414 *
415 * The packet type might still be in use by receivers
416 * and must not be freed until after all the CPU's have gone
417 * through a quiescent state.
418 */
419void __dev_remove_pack(struct packet_type *pt)
420{
c07b68e8 421 struct list_head *head = ptype_head(pt);
1da177e4
LT
422 struct packet_type *pt1;
423
c07b68e8 424 spin_lock(&ptype_lock);
1da177e4
LT
425
426 list_for_each_entry(pt1, head, list) {
427 if (pt == pt1) {
428 list_del_rcu(&pt->list);
429 goto out;
430 }
431 }
432
433 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
434out:
c07b68e8 435 spin_unlock(&ptype_lock);
1da177e4 436}
d1b19dff
ED
437EXPORT_SYMBOL(__dev_remove_pack);
438
1da177e4
LT
439/**
440 * dev_remove_pack - remove packet handler
441 * @pt: packet type declaration
442 *
443 * Remove a protocol handler that was previously added to the kernel
444 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
445 * from the kernel lists and can be freed or reused once this function
446 * returns.
447 *
448 * This call sleeps to guarantee that no CPU is looking at the packet
449 * type after return.
450 */
451void dev_remove_pack(struct packet_type *pt)
452{
453 __dev_remove_pack(pt);
4ec93edb 454
1da177e4
LT
455 synchronize_net();
456}
d1b19dff 457EXPORT_SYMBOL(dev_remove_pack);
1da177e4
LT
458
459/******************************************************************************
460
461 Device Boot-time Settings Routines
462
463*******************************************************************************/
464
465/* Boot time configuration table */
466static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
467
468/**
469 * netdev_boot_setup_add - add new setup entry
470 * @name: name of the device
471 * @map: configured settings for the device
472 *
473 * Adds new setup entry to the dev_boot_setup list. The function
474 * returns 0 on error and 1 on success. This is a generic routine to
475 * all netdevices.
476 */
477static int netdev_boot_setup_add(char *name, struct ifmap *map)
478{
479 struct netdev_boot_setup *s;
480 int i;
481
482 s = dev_boot_setup;
483 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
484 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
485 memset(s[i].name, 0, sizeof(s[i].name));
93b3cff9 486 strlcpy(s[i].name, name, IFNAMSIZ);
1da177e4
LT
487 memcpy(&s[i].map, map, sizeof(s[i].map));
488 break;
489 }
490 }
491
492 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
493}
494
495/**
496 * netdev_boot_setup_check - check boot time settings
497 * @dev: the netdevice
498 *
499 * Check boot time settings for the device.
500 * The found settings are set for the device to be used
501 * later in the device probing.
502 * Returns 0 if no settings found, 1 if they are.
503 */
504int netdev_boot_setup_check(struct net_device *dev)
505{
506 struct netdev_boot_setup *s = dev_boot_setup;
507 int i;
508
509 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
510 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
93b3cff9 511 !strcmp(dev->name, s[i].name)) {
1da177e4
LT
512 dev->irq = s[i].map.irq;
513 dev->base_addr = s[i].map.base_addr;
514 dev->mem_start = s[i].map.mem_start;
515 dev->mem_end = s[i].map.mem_end;
516 return 1;
517 }
518 }
519 return 0;
520}
d1b19dff 521EXPORT_SYMBOL(netdev_boot_setup_check);
1da177e4
LT
522
523
524/**
525 * netdev_boot_base - get address from boot time settings
526 * @prefix: prefix for network device
527 * @unit: id for network device
528 *
529 * Check boot time settings for the base address of device.
530 * The found settings are set for the device to be used
531 * later in the device probing.
532 * Returns 0 if no settings found.
533 */
534unsigned long netdev_boot_base(const char *prefix, int unit)
535{
536 const struct netdev_boot_setup *s = dev_boot_setup;
537 char name[IFNAMSIZ];
538 int i;
539
540 sprintf(name, "%s%d", prefix, unit);
541
542 /*
543 * If device already registered then return base of 1
544 * to indicate not to probe for this interface
545 */
881d966b 546 if (__dev_get_by_name(&init_net, name))
1da177e4
LT
547 return 1;
548
549 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
550 if (!strcmp(name, s[i].name))
551 return s[i].map.base_addr;
552 return 0;
553}
554
555/*
556 * Saves at boot time configured settings for any netdevice.
557 */
558int __init netdev_boot_setup(char *str)
559{
560 int ints[5];
561 struct ifmap map;
562
563 str = get_options(str, ARRAY_SIZE(ints), ints);
564 if (!str || !*str)
565 return 0;
566
567 /* Save settings */
568 memset(&map, 0, sizeof(map));
569 if (ints[0] > 0)
570 map.irq = ints[1];
571 if (ints[0] > 1)
572 map.base_addr = ints[2];
573 if (ints[0] > 2)
574 map.mem_start = ints[3];
575 if (ints[0] > 3)
576 map.mem_end = ints[4];
577
578 /* Add new entry to the list */
579 return netdev_boot_setup_add(str, &map);
580}
581
582__setup("netdev=", netdev_boot_setup);
583
584/*******************************************************************************
585
586 Device Interface Subroutines
587
588*******************************************************************************/
589
590/**
591 * __dev_get_by_name - find a device by its name
c4ea43c5 592 * @net: the applicable net namespace
1da177e4
LT
593 * @name: name to find
594 *
595 * Find an interface by name. Must be called under RTNL semaphore
596 * or @dev_base_lock. If the name is found a pointer to the device
597 * is returned. If the name is not found then %NULL is returned. The
598 * reference counters are not incremented so the caller must be
599 * careful with locks.
600 */
601
881d966b 602struct net_device *__dev_get_by_name(struct net *net, const char *name)
1da177e4
LT
603{
604 struct hlist_node *p;
0bd8d536
ED
605 struct net_device *dev;
606 struct hlist_head *head = dev_name_hash(net, name);
1da177e4 607
0bd8d536 608 hlist_for_each_entry(dev, p, head, name_hlist)
1da177e4
LT
609 if (!strncmp(dev->name, name, IFNAMSIZ))
610 return dev;
0bd8d536 611
1da177e4
LT
612 return NULL;
613}
d1b19dff 614EXPORT_SYMBOL(__dev_get_by_name);
1da177e4 615
72c9528b
ED
616/**
617 * dev_get_by_name_rcu - find a device by its name
618 * @net: the applicable net namespace
619 * @name: name to find
620 *
621 * Find an interface by name.
622 * If the name is found a pointer to the device is returned.
623 * If the name is not found then %NULL is returned.
624 * The reference counters are not incremented so the caller must be
625 * careful with locks. The caller must hold RCU lock.
626 */
627
628struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
629{
630 struct hlist_node *p;
631 struct net_device *dev;
632 struct hlist_head *head = dev_name_hash(net, name);
633
634 hlist_for_each_entry_rcu(dev, p, head, name_hlist)
635 if (!strncmp(dev->name, name, IFNAMSIZ))
636 return dev;
637
638 return NULL;
639}
640EXPORT_SYMBOL(dev_get_by_name_rcu);
641
1da177e4
LT
642/**
643 * dev_get_by_name - find a device by its name
c4ea43c5 644 * @net: the applicable net namespace
1da177e4
LT
645 * @name: name to find
646 *
647 * Find an interface by name. This can be called from any
648 * context and does its own locking. The returned handle has
649 * the usage count incremented and the caller must use dev_put() to
650 * release it when it is no longer needed. %NULL is returned if no
651 * matching device is found.
652 */
653
881d966b 654struct net_device *dev_get_by_name(struct net *net, const char *name)
1da177e4
LT
655{
656 struct net_device *dev;
657
72c9528b
ED
658 rcu_read_lock();
659 dev = dev_get_by_name_rcu(net, name);
1da177e4
LT
660 if (dev)
661 dev_hold(dev);
72c9528b 662 rcu_read_unlock();
1da177e4
LT
663 return dev;
664}
d1b19dff 665EXPORT_SYMBOL(dev_get_by_name);
1da177e4
LT
666
667/**
668 * __dev_get_by_index - find a device by its ifindex
c4ea43c5 669 * @net: the applicable net namespace
1da177e4
LT
670 * @ifindex: index of device
671 *
672 * Search for an interface by index. Returns %NULL if the device
673 * is not found or a pointer to the device. The device has not
674 * had its reference counter increased so the caller must be careful
675 * about locking. The caller must hold either the RTNL semaphore
676 * or @dev_base_lock.
677 */
678
881d966b 679struct net_device *__dev_get_by_index(struct net *net, int ifindex)
1da177e4
LT
680{
681 struct hlist_node *p;
0bd8d536
ED
682 struct net_device *dev;
683 struct hlist_head *head = dev_index_hash(net, ifindex);
1da177e4 684
0bd8d536 685 hlist_for_each_entry(dev, p, head, index_hlist)
1da177e4
LT
686 if (dev->ifindex == ifindex)
687 return dev;
0bd8d536 688
1da177e4
LT
689 return NULL;
690}
d1b19dff 691EXPORT_SYMBOL(__dev_get_by_index);
1da177e4 692
fb699dfd
ED
693/**
694 * dev_get_by_index_rcu - find a device by its ifindex
695 * @net: the applicable net namespace
696 * @ifindex: index of device
697 *
698 * Search for an interface by index. Returns %NULL if the device
699 * is not found or a pointer to the device. The device has not
700 * had its reference counter increased so the caller must be careful
701 * about locking. The caller must hold RCU lock.
702 */
703
704struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
705{
706 struct hlist_node *p;
707 struct net_device *dev;
708 struct hlist_head *head = dev_index_hash(net, ifindex);
709
710 hlist_for_each_entry_rcu(dev, p, head, index_hlist)
711 if (dev->ifindex == ifindex)
712 return dev;
713
714 return NULL;
715}
716EXPORT_SYMBOL(dev_get_by_index_rcu);
717
1da177e4
LT
718
719/**
720 * dev_get_by_index - find a device by its ifindex
c4ea43c5 721 * @net: the applicable net namespace
1da177e4
LT
722 * @ifindex: index of device
723 *
724 * Search for an interface by index. Returns NULL if the device
725 * is not found or a pointer to the device. The device returned has
726 * had a reference added and the pointer is safe until the user calls
727 * dev_put to indicate they have finished with it.
728 */
729
881d966b 730struct net_device *dev_get_by_index(struct net *net, int ifindex)
1da177e4
LT
731{
732 struct net_device *dev;
733
fb699dfd
ED
734 rcu_read_lock();
735 dev = dev_get_by_index_rcu(net, ifindex);
1da177e4
LT
736 if (dev)
737 dev_hold(dev);
fb699dfd 738 rcu_read_unlock();
1da177e4
LT
739 return dev;
740}
d1b19dff 741EXPORT_SYMBOL(dev_get_by_index);
1da177e4
LT
742
743/**
744 * dev_getbyhwaddr - find a device by its hardware address
c4ea43c5 745 * @net: the applicable net namespace
1da177e4
LT
746 * @type: media type of device
747 * @ha: hardware address
748 *
749 * Search for an interface by MAC address. Returns NULL if the device
750 * is not found or a pointer to the device. The caller must hold the
751 * rtnl semaphore. The returned device has not had its ref count increased
752 * and the caller must therefore be careful about locking
753 *
754 * BUGS:
755 * If the API was consistent this would be __dev_get_by_hwaddr
756 */
757
881d966b 758struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
1da177e4
LT
759{
760 struct net_device *dev;
761
762 ASSERT_RTNL();
763
81103a52 764 for_each_netdev(net, dev)
1da177e4
LT
765 if (dev->type == type &&
766 !memcmp(dev->dev_addr, ha, dev->addr_len))
7562f876
PE
767 return dev;
768
769 return NULL;
1da177e4 770}
cf309e3f
JF
771EXPORT_SYMBOL(dev_getbyhwaddr);
772
881d966b 773struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
1da177e4
LT
774{
775 struct net_device *dev;
776
4e9cac2b 777 ASSERT_RTNL();
881d966b 778 for_each_netdev(net, dev)
4e9cac2b 779 if (dev->type == type)
7562f876
PE
780 return dev;
781
782 return NULL;
4e9cac2b 783}
4e9cac2b
PM
784EXPORT_SYMBOL(__dev_getfirstbyhwtype);
785
881d966b 786struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
4e9cac2b 787{
99fe3c39 788 struct net_device *dev, *ret = NULL;
4e9cac2b 789
99fe3c39
ED
790 rcu_read_lock();
791 for_each_netdev_rcu(net, dev)
792 if (dev->type == type) {
793 dev_hold(dev);
794 ret = dev;
795 break;
796 }
797 rcu_read_unlock();
798 return ret;
1da177e4 799}
1da177e4
LT
800EXPORT_SYMBOL(dev_getfirstbyhwtype);
801
802/**
bb69ae04 803 * dev_get_by_flags_rcu - find any device with given flags
c4ea43c5 804 * @net: the applicable net namespace
1da177e4
LT
805 * @if_flags: IFF_* values
806 * @mask: bitmask of bits in if_flags to check
807 *
808 * Search for any interface with the given flags. Returns NULL if a device
bb69ae04
ED
809 * is not found or a pointer to the device. Must be called inside
810 * rcu_read_lock(), and result refcount is unchanged.
1da177e4
LT
811 */
812
bb69ae04 813struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags,
d1b19dff 814 unsigned short mask)
1da177e4 815{
7562f876 816 struct net_device *dev, *ret;
1da177e4 817
7562f876 818 ret = NULL;
c6d14c84 819 for_each_netdev_rcu(net, dev) {
1da177e4 820 if (((dev->flags ^ if_flags) & mask) == 0) {
7562f876 821 ret = dev;
1da177e4
LT
822 break;
823 }
824 }
7562f876 825 return ret;
1da177e4 826}
bb69ae04 827EXPORT_SYMBOL(dev_get_by_flags_rcu);
1da177e4
LT
828
829/**
830 * dev_valid_name - check if name is okay for network device
831 * @name: name string
832 *
833 * Network device names need to be valid file names to
c7fa9d18
DM
834 * to allow sysfs to work. We also disallow any kind of
835 * whitespace.
1da177e4 836 */
c2373ee9 837int dev_valid_name(const char *name)
1da177e4 838{
c7fa9d18
DM
839 if (*name == '\0')
840 return 0;
b6fe17d6
SH
841 if (strlen(name) >= IFNAMSIZ)
842 return 0;
c7fa9d18
DM
843 if (!strcmp(name, ".") || !strcmp(name, ".."))
844 return 0;
845
846 while (*name) {
847 if (*name == '/' || isspace(*name))
848 return 0;
849 name++;
850 }
851 return 1;
1da177e4 852}
d1b19dff 853EXPORT_SYMBOL(dev_valid_name);
1da177e4
LT
854
855/**
b267b179
EB
856 * __dev_alloc_name - allocate a name for a device
857 * @net: network namespace to allocate the device name in
1da177e4 858 * @name: name format string
b267b179 859 * @buf: scratch buffer and result name string
1da177e4
LT
860 *
861 * Passed a format string - eg "lt%d" it will try and find a suitable
3041a069
SH
862 * id. It scans list of devices to build up a free map, then chooses
863 * the first empty slot. The caller must hold the dev_base or rtnl lock
864 * while allocating the name and adding the device in order to avoid
865 * duplicates.
866 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
867 * Returns the number of the unit assigned or a negative errno code.
1da177e4
LT
868 */
869
b267b179 870static int __dev_alloc_name(struct net *net, const char *name, char *buf)
1da177e4
LT
871{
872 int i = 0;
1da177e4
LT
873 const char *p;
874 const int max_netdevices = 8*PAGE_SIZE;
cfcabdcc 875 unsigned long *inuse;
1da177e4
LT
876 struct net_device *d;
877
878 p = strnchr(name, IFNAMSIZ-1, '%');
879 if (p) {
880 /*
881 * Verify the string as this thing may have come from
882 * the user. There must be either one "%d" and no other "%"
883 * characters.
884 */
885 if (p[1] != 'd' || strchr(p + 2, '%'))
886 return -EINVAL;
887
888 /* Use one page as a bit array of possible slots */
cfcabdcc 889 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
1da177e4
LT
890 if (!inuse)
891 return -ENOMEM;
892
881d966b 893 for_each_netdev(net, d) {
1da177e4
LT
894 if (!sscanf(d->name, name, &i))
895 continue;
896 if (i < 0 || i >= max_netdevices)
897 continue;
898
899 /* avoid cases where sscanf is not exact inverse of printf */
b267b179 900 snprintf(buf, IFNAMSIZ, name, i);
1da177e4
LT
901 if (!strncmp(buf, d->name, IFNAMSIZ))
902 set_bit(i, inuse);
903 }
904
905 i = find_first_zero_bit(inuse, max_netdevices);
906 free_page((unsigned long) inuse);
907 }
908
d9031024
OP
909 if (buf != name)
910 snprintf(buf, IFNAMSIZ, name, i);
b267b179 911 if (!__dev_get_by_name(net, buf))
1da177e4 912 return i;
1da177e4
LT
913
914 /* It is possible to run out of possible slots
915 * when the name is long and there isn't enough space left
916 * for the digits, or if all bits are used.
917 */
918 return -ENFILE;
919}
920
b267b179
EB
921/**
922 * dev_alloc_name - allocate a name for a device
923 * @dev: device
924 * @name: name format string
925 *
926 * Passed a format string - eg "lt%d" it will try and find a suitable
927 * id. It scans list of devices to build up a free map, then chooses
928 * the first empty slot. The caller must hold the dev_base or rtnl lock
929 * while allocating the name and adding the device in order to avoid
930 * duplicates.
931 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
932 * Returns the number of the unit assigned or a negative errno code.
933 */
934
935int dev_alloc_name(struct net_device *dev, const char *name)
936{
937 char buf[IFNAMSIZ];
938 struct net *net;
939 int ret;
940
c346dca1
YH
941 BUG_ON(!dev_net(dev));
942 net = dev_net(dev);
b267b179
EB
943 ret = __dev_alloc_name(net, name, buf);
944 if (ret >= 0)
945 strlcpy(dev->name, buf, IFNAMSIZ);
946 return ret;
947}
d1b19dff 948EXPORT_SYMBOL(dev_alloc_name);
b267b179 949
8ce6cebc 950static int dev_get_valid_name(struct net_device *dev, const char *name, bool fmt)
d9031024 951{
8ce6cebc
DL
952 struct net *net;
953
954 BUG_ON(!dev_net(dev));
955 net = dev_net(dev);
956
d9031024
OP
957 if (!dev_valid_name(name))
958 return -EINVAL;
959
960 if (fmt && strchr(name, '%'))
8ce6cebc 961 return dev_alloc_name(dev, name);
d9031024
OP
962 else if (__dev_get_by_name(net, name))
963 return -EEXIST;
8ce6cebc
DL
964 else if (dev->name != name)
965 strlcpy(dev->name, name, IFNAMSIZ);
d9031024
OP
966
967 return 0;
968}
1da177e4
LT
969
970/**
971 * dev_change_name - change name of a device
972 * @dev: device
973 * @newname: name (or format string) must be at least IFNAMSIZ
974 *
975 * Change name of a device, can pass format strings "eth%d".
976 * for wildcarding.
977 */
cf04a4c7 978int dev_change_name(struct net_device *dev, const char *newname)
1da177e4 979{
fcc5a03a 980 char oldname[IFNAMSIZ];
1da177e4 981 int err = 0;
fcc5a03a 982 int ret;
881d966b 983 struct net *net;
1da177e4
LT
984
985 ASSERT_RTNL();
c346dca1 986 BUG_ON(!dev_net(dev));
1da177e4 987
c346dca1 988 net = dev_net(dev);
1da177e4
LT
989 if (dev->flags & IFF_UP)
990 return -EBUSY;
991
c8d90dca
SH
992 if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
993 return 0;
994
fcc5a03a
HX
995 memcpy(oldname, dev->name, IFNAMSIZ);
996
8ce6cebc 997 err = dev_get_valid_name(dev, newname, 1);
d9031024
OP
998 if (err < 0)
999 return err;
1da177e4 1000
fcc5a03a 1001rollback:
a1b3f594
EB
1002 ret = device_rename(&dev->dev, dev->name);
1003 if (ret) {
1004 memcpy(dev->name, oldname, IFNAMSIZ);
1005 return ret;
dcc99773 1006 }
7f988eab
HX
1007
1008 write_lock_bh(&dev_base_lock);
92749821 1009 hlist_del(&dev->name_hlist);
72c9528b
ED
1010 write_unlock_bh(&dev_base_lock);
1011
1012 synchronize_rcu();
1013
1014 write_lock_bh(&dev_base_lock);
1015 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
7f988eab
HX
1016 write_unlock_bh(&dev_base_lock);
1017
056925ab 1018 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
fcc5a03a
HX
1019 ret = notifier_to_errno(ret);
1020
1021 if (ret) {
91e9c07b
ED
1022 /* err >= 0 after dev_alloc_name() or stores the first errno */
1023 if (err >= 0) {
fcc5a03a
HX
1024 err = ret;
1025 memcpy(dev->name, oldname, IFNAMSIZ);
1026 goto rollback;
91e9c07b
ED
1027 } else {
1028 printk(KERN_ERR
1029 "%s: name change rollback failed: %d.\n",
1030 dev->name, ret);
fcc5a03a
HX
1031 }
1032 }
1da177e4
LT
1033
1034 return err;
1035}
1036
0b815a1a
SH
1037/**
1038 * dev_set_alias - change ifalias of a device
1039 * @dev: device
1040 * @alias: name up to IFALIASZ
f0db275a 1041 * @len: limit of bytes to copy from info
0b815a1a
SH
1042 *
1043 * Set ifalias for a device,
1044 */
1045int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1046{
1047 ASSERT_RTNL();
1048
1049 if (len >= IFALIASZ)
1050 return -EINVAL;
1051
96ca4a2c
OH
1052 if (!len) {
1053 if (dev->ifalias) {
1054 kfree(dev->ifalias);
1055 dev->ifalias = NULL;
1056 }
1057 return 0;
1058 }
1059
d1b19dff 1060 dev->ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
0b815a1a
SH
1061 if (!dev->ifalias)
1062 return -ENOMEM;
1063
1064 strlcpy(dev->ifalias, alias, len+1);
1065 return len;
1066}
1067
1068
d8a33ac4 1069/**
3041a069 1070 * netdev_features_change - device changes features
d8a33ac4
SH
1071 * @dev: device to cause notification
1072 *
1073 * Called to indicate a device has changed features.
1074 */
1075void netdev_features_change(struct net_device *dev)
1076{
056925ab 1077 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
d8a33ac4
SH
1078}
1079EXPORT_SYMBOL(netdev_features_change);
1080
1da177e4
LT
1081/**
1082 * netdev_state_change - device changes state
1083 * @dev: device to cause notification
1084 *
1085 * Called to indicate a device has changed state. This function calls
1086 * the notifier chains for netdev_chain and sends a NEWLINK message
1087 * to the routing socket.
1088 */
1089void netdev_state_change(struct net_device *dev)
1090{
1091 if (dev->flags & IFF_UP) {
056925ab 1092 call_netdevice_notifiers(NETDEV_CHANGE, dev);
1da177e4
LT
1093 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1094 }
1095}
d1b19dff 1096EXPORT_SYMBOL(netdev_state_change);
1da177e4 1097
3ca5b404 1098int netdev_bonding_change(struct net_device *dev, unsigned long event)
c1da4ac7 1099{
3ca5b404 1100 return call_netdevice_notifiers(event, dev);
c1da4ac7
OG
1101}
1102EXPORT_SYMBOL(netdev_bonding_change);
1103
1da177e4
LT
1104/**
1105 * dev_load - load a network module
c4ea43c5 1106 * @net: the applicable net namespace
1da177e4
LT
1107 * @name: name of interface
1108 *
1109 * If a network interface is not present and the process has suitable
1110 * privileges this function loads the module. If module loading is not
1111 * available in this kernel then it becomes a nop.
1112 */
1113
881d966b 1114void dev_load(struct net *net, const char *name)
1da177e4 1115{
4ec93edb 1116 struct net_device *dev;
1da177e4 1117
72c9528b
ED
1118 rcu_read_lock();
1119 dev = dev_get_by_name_rcu(net, name);
1120 rcu_read_unlock();
1da177e4 1121
a8f80e8f 1122 if (!dev && capable(CAP_NET_ADMIN))
1da177e4
LT
1123 request_module("%s", name);
1124}
d1b19dff 1125EXPORT_SYMBOL(dev_load);
1da177e4 1126
bd380811 1127static int __dev_open(struct net_device *dev)
1da177e4 1128{
d314774c 1129 const struct net_device_ops *ops = dev->netdev_ops;
3b8bcfd5 1130 int ret;
1da177e4 1131
e46b66bc
BH
1132 ASSERT_RTNL();
1133
1da177e4
LT
1134 /*
1135 * Is it even present?
1136 */
1137 if (!netif_device_present(dev))
1138 return -ENODEV;
1139
3b8bcfd5
JB
1140 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1141 ret = notifier_to_errno(ret);
1142 if (ret)
1143 return ret;
1144
1da177e4
LT
1145 /*
1146 * Call device private open method
1147 */
1148 set_bit(__LINK_STATE_START, &dev->state);
bada339b 1149
d314774c
SH
1150 if (ops->ndo_validate_addr)
1151 ret = ops->ndo_validate_addr(dev);
bada339b 1152
d314774c
SH
1153 if (!ret && ops->ndo_open)
1154 ret = ops->ndo_open(dev);
1da177e4 1155
4ec93edb 1156 /*
1da177e4
LT
1157 * If it went open OK then:
1158 */
1159
bada339b
JG
1160 if (ret)
1161 clear_bit(__LINK_STATE_START, &dev->state);
1162 else {
1da177e4
LT
1163 /*
1164 * Set the flags.
1165 */
1166 dev->flags |= IFF_UP;
1167
649274d9
DW
1168 /*
1169 * Enable NET_DMA
1170 */
b4bd07c2 1171 net_dmaengine_get();
649274d9 1172
1da177e4
LT
1173 /*
1174 * Initialize multicasting status
1175 */
4417da66 1176 dev_set_rx_mode(dev);
1da177e4
LT
1177
1178 /*
1179 * Wakeup transmit queue engine
1180 */
1181 dev_activate(dev);
1da177e4 1182 }
bada339b 1183
1da177e4
LT
1184 return ret;
1185}
1186
1187/**
bd380811
PM
1188 * dev_open - prepare an interface for use.
1189 * @dev: device to open
1da177e4 1190 *
bd380811
PM
1191 * Takes a device from down to up state. The device's private open
1192 * function is invoked and then the multicast lists are loaded. Finally
1193 * the device is moved into the up state and a %NETDEV_UP message is
1194 * sent to the netdev notifier chain.
1195 *
1196 * Calling this function on an active interface is a nop. On a failure
1197 * a negative errno code is returned.
1da177e4 1198 */
bd380811
PM
1199int dev_open(struct net_device *dev)
1200{
1201 int ret;
1202
1203 /*
1204 * Is it already up?
1205 */
1206 if (dev->flags & IFF_UP)
1207 return 0;
1208
1209 /*
1210 * Open device
1211 */
1212 ret = __dev_open(dev);
1213 if (ret < 0)
1214 return ret;
1215
1216 /*
1217 * ... and announce new interface.
1218 */
1219 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1220 call_netdevice_notifiers(NETDEV_UP, dev);
1221
1222 return ret;
1223}
1224EXPORT_SYMBOL(dev_open);
1225
1226static int __dev_close(struct net_device *dev)
1da177e4 1227{
d314774c 1228 const struct net_device_ops *ops = dev->netdev_ops;
e46b66bc 1229
bd380811 1230 ASSERT_RTNL();
9d5010db
DM
1231 might_sleep();
1232
1da177e4
LT
1233 /*
1234 * Tell people we are going down, so that they can
1235 * prepare to death, when device is still operating.
1236 */
056925ab 1237 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1da177e4 1238
1da177e4
LT
1239 clear_bit(__LINK_STATE_START, &dev->state);
1240
1241 /* Synchronize to scheduled poll. We cannot touch poll list,
bea3348e
SH
1242 * it can be even on different cpu. So just clear netif_running().
1243 *
1244 * dev->stop() will invoke napi_disable() on all of it's
1245 * napi_struct instances on this device.
1246 */
1da177e4 1247 smp_mb__after_clear_bit(); /* Commit netif_running(). */
1da177e4 1248
d8b2a4d2
ML
1249 dev_deactivate(dev);
1250
1da177e4
LT
1251 /*
1252 * Call the device specific close. This cannot fail.
1253 * Only if device is UP
1254 *
1255 * We allow it to be called even after a DETACH hot-plug
1256 * event.
1257 */
d314774c
SH
1258 if (ops->ndo_stop)
1259 ops->ndo_stop(dev);
1da177e4
LT
1260
1261 /*
1262 * Device is now down.
1263 */
1264
1265 dev->flags &= ~IFF_UP;
1266
1267 /*
bd380811 1268 * Shutdown NET_DMA
1da177e4 1269 */
bd380811
PM
1270 net_dmaengine_put();
1271
1272 return 0;
1273}
1274
1275/**
1276 * dev_close - shutdown an interface.
1277 * @dev: device to shutdown
1278 *
1279 * This function moves an active device into down state. A
1280 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1281 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1282 * chain.
1283 */
1284int dev_close(struct net_device *dev)
1285{
1286 if (!(dev->flags & IFF_UP))
1287 return 0;
1288
1289 __dev_close(dev);
1da177e4 1290
649274d9 1291 /*
bd380811 1292 * Tell people we are down
649274d9 1293 */
bd380811
PM
1294 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1295 call_netdevice_notifiers(NETDEV_DOWN, dev);
649274d9 1296
1da177e4
LT
1297 return 0;
1298}
d1b19dff 1299EXPORT_SYMBOL(dev_close);
1da177e4
LT
1300
1301
0187bdfb
BH
1302/**
1303 * dev_disable_lro - disable Large Receive Offload on a device
1304 * @dev: device
1305 *
1306 * Disable Large Receive Offload (LRO) on a net device. Must be
1307 * called under RTNL. This is needed if received packets may be
1308 * forwarded to another interface.
1309 */
1310void dev_disable_lro(struct net_device *dev)
1311{
1312 if (dev->ethtool_ops && dev->ethtool_ops->get_flags &&
1313 dev->ethtool_ops->set_flags) {
1314 u32 flags = dev->ethtool_ops->get_flags(dev);
1315 if (flags & ETH_FLAG_LRO) {
1316 flags &= ~ETH_FLAG_LRO;
1317 dev->ethtool_ops->set_flags(dev, flags);
1318 }
1319 }
1320 WARN_ON(dev->features & NETIF_F_LRO);
1321}
1322EXPORT_SYMBOL(dev_disable_lro);
1323
1324
881d966b
EB
1325static int dev_boot_phase = 1;
1326
1da177e4
LT
1327/*
1328 * Device change register/unregister. These are not inline or static
1329 * as we export them to the world.
1330 */
1331
1332/**
1333 * register_netdevice_notifier - register a network notifier block
1334 * @nb: notifier
1335 *
1336 * Register a notifier to be called when network device events occur.
1337 * The notifier passed is linked into the kernel structures and must
1338 * not be reused until it has been unregistered. A negative errno code
1339 * is returned on a failure.
1340 *
1341 * When registered all registration and up events are replayed
4ec93edb 1342 * to the new notifier to allow device to have a race free
1da177e4
LT
1343 * view of the network device list.
1344 */
1345
1346int register_netdevice_notifier(struct notifier_block *nb)
1347{
1348 struct net_device *dev;
fcc5a03a 1349 struct net_device *last;
881d966b 1350 struct net *net;
1da177e4
LT
1351 int err;
1352
1353 rtnl_lock();
f07d5b94 1354 err = raw_notifier_chain_register(&netdev_chain, nb);
fcc5a03a
HX
1355 if (err)
1356 goto unlock;
881d966b
EB
1357 if (dev_boot_phase)
1358 goto unlock;
1359 for_each_net(net) {
1360 for_each_netdev(net, dev) {
1361 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1362 err = notifier_to_errno(err);
1363 if (err)
1364 goto rollback;
1365
1366 if (!(dev->flags & IFF_UP))
1367 continue;
1da177e4 1368
881d966b
EB
1369 nb->notifier_call(nb, NETDEV_UP, dev);
1370 }
1da177e4 1371 }
fcc5a03a
HX
1372
1373unlock:
1da177e4
LT
1374 rtnl_unlock();
1375 return err;
fcc5a03a
HX
1376
1377rollback:
1378 last = dev;
881d966b
EB
1379 for_each_net(net) {
1380 for_each_netdev(net, dev) {
1381 if (dev == last)
1382 break;
fcc5a03a 1383
881d966b
EB
1384 if (dev->flags & IFF_UP) {
1385 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1386 nb->notifier_call(nb, NETDEV_DOWN, dev);
1387 }
1388 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
a5ee1551 1389 nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev);
fcc5a03a 1390 }
fcc5a03a 1391 }
c67625a1
PE
1392
1393 raw_notifier_chain_unregister(&netdev_chain, nb);
fcc5a03a 1394 goto unlock;
1da177e4 1395}
d1b19dff 1396EXPORT_SYMBOL(register_netdevice_notifier);
1da177e4
LT
1397
1398/**
1399 * unregister_netdevice_notifier - unregister a network notifier block
1400 * @nb: notifier
1401 *
1402 * Unregister a notifier previously registered by
1403 * register_netdevice_notifier(). The notifier is unlinked into the
1404 * kernel structures and may then be reused. A negative errno code
1405 * is returned on a failure.
1406 */
1407
1408int unregister_netdevice_notifier(struct notifier_block *nb)
1409{
9f514950
HX
1410 int err;
1411
1412 rtnl_lock();
f07d5b94 1413 err = raw_notifier_chain_unregister(&netdev_chain, nb);
9f514950
HX
1414 rtnl_unlock();
1415 return err;
1da177e4 1416}
d1b19dff 1417EXPORT_SYMBOL(unregister_netdevice_notifier);
1da177e4
LT
1418
1419/**
1420 * call_netdevice_notifiers - call all network notifier blocks
1421 * @val: value passed unmodified to notifier function
c4ea43c5 1422 * @dev: net_device pointer passed unmodified to notifier function
1da177e4
LT
1423 *
1424 * Call all network notifier blocks. Parameters and return value
f07d5b94 1425 * are as for raw_notifier_call_chain().
1da177e4
LT
1426 */
1427
ad7379d4 1428int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1da177e4 1429{
ab930471 1430 ASSERT_RTNL();
ad7379d4 1431 return raw_notifier_call_chain(&netdev_chain, val, dev);
1da177e4
LT
1432}
1433
1434/* When > 0 there are consumers of rx skb time stamps */
1435static atomic_t netstamp_needed = ATOMIC_INIT(0);
1436
1437void net_enable_timestamp(void)
1438{
1439 atomic_inc(&netstamp_needed);
1440}
d1b19dff 1441EXPORT_SYMBOL(net_enable_timestamp);
1da177e4
LT
1442
1443void net_disable_timestamp(void)
1444{
1445 atomic_dec(&netstamp_needed);
1446}
d1b19dff 1447EXPORT_SYMBOL(net_disable_timestamp);
1da177e4 1448
3b098e2d 1449static inline void net_timestamp_set(struct sk_buff *skb)
1da177e4
LT
1450{
1451 if (atomic_read(&netstamp_needed))
a61bbcf2 1452 __net_timestamp(skb);
b7aa0bf7
ED
1453 else
1454 skb->tstamp.tv64 = 0;
1da177e4
LT
1455}
1456
3b098e2d
ED
1457static inline void net_timestamp_check(struct sk_buff *skb)
1458{
1459 if (!skb->tstamp.tv64 && atomic_read(&netstamp_needed))
1460 __net_timestamp(skb);
1461}
1462
44540960
AB
1463/**
1464 * dev_forward_skb - loopback an skb to another netif
1465 *
1466 * @dev: destination network device
1467 * @skb: buffer to forward
1468 *
1469 * return values:
1470 * NET_RX_SUCCESS (no congestion)
6ec82562 1471 * NET_RX_DROP (packet was dropped, but freed)
44540960
AB
1472 *
1473 * dev_forward_skb can be used for injecting an skb from the
1474 * start_xmit function of one device into the receive queue
1475 * of another device.
1476 *
1477 * The receiving device may be in another namespace, so
1478 * we have to clear all information in the skb that could
1479 * impact namespace isolation.
1480 */
1481int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1482{
1483 skb_orphan(skb);
c736eefa 1484 nf_reset(skb);
44540960 1485
caf586e5
ED
1486 if (unlikely(!(dev->flags & IFF_UP) ||
1487 (skb->len > (dev->mtu + dev->hard_header_len)))) {
1488 atomic_long_inc(&dev->rx_dropped);
6ec82562 1489 kfree_skb(skb);
44540960 1490 return NET_RX_DROP;
6ec82562 1491 }
8a83a00b 1492 skb_set_dev(skb, dev);
44540960
AB
1493 skb->tstamp.tv64 = 0;
1494 skb->pkt_type = PACKET_HOST;
1495 skb->protocol = eth_type_trans(skb, dev);
44540960
AB
1496 return netif_rx(skb);
1497}
1498EXPORT_SYMBOL_GPL(dev_forward_skb);
1499
1da177e4
LT
1500/*
1501 * Support routine. Sends outgoing frames to any network
1502 * taps currently in use.
1503 */
1504
f6a78bfc 1505static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1da177e4
LT
1506{
1507 struct packet_type *ptype;
a61bbcf2 1508
8caf1539
JP
1509#ifdef CONFIG_NET_CLS_ACT
1510 if (!(skb->tstamp.tv64 && (G_TC_FROM(skb->tc_verd) & AT_INGRESS)))
3b098e2d 1511 net_timestamp_set(skb);
8caf1539 1512#else
3b098e2d 1513 net_timestamp_set(skb);
8caf1539 1514#endif
1da177e4
LT
1515
1516 rcu_read_lock();
1517 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1518 /* Never send packets back to the socket
1519 * they originated from - MvS (miquels@drinkel.ow.org)
1520 */
1521 if ((ptype->dev == dev || !ptype->dev) &&
1522 (ptype->af_packet_priv == NULL ||
1523 (struct sock *)ptype->af_packet_priv != skb->sk)) {
d1b19dff 1524 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1da177e4
LT
1525 if (!skb2)
1526 break;
1527
1528 /* skb->nh should be correctly
1529 set by sender, so that the second statement is
1530 just protection against buggy protocols.
1531 */
459a98ed 1532 skb_reset_mac_header(skb2);
1da177e4 1533
d56f90a7 1534 if (skb_network_header(skb2) < skb2->data ||
27a884dc 1535 skb2->network_header > skb2->tail) {
1da177e4
LT
1536 if (net_ratelimit())
1537 printk(KERN_CRIT "protocol %04x is "
1538 "buggy, dev %s\n",
70777d03
SAS
1539 ntohs(skb2->protocol),
1540 dev->name);
c1d2bbe1 1541 skb_reset_network_header(skb2);
1da177e4
LT
1542 }
1543
b0e380b1 1544 skb2->transport_header = skb2->network_header;
1da177e4 1545 skb2->pkt_type = PACKET_OUTGOING;
f2ccd8fa 1546 ptype->func(skb2, skb->dev, ptype, skb->dev);
1da177e4
LT
1547 }
1548 }
1549 rcu_read_unlock();
1550}
1551
f0796d5c
JF
1552/*
1553 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
1554 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
1555 */
e6484930 1556int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
f0796d5c 1557{
e6484930
TH
1558 if (txq < 1 || txq > dev->num_tx_queues)
1559 return -EINVAL;
f0796d5c 1560
e6484930
TH
1561 if (dev->reg_state == NETREG_REGISTERED) {
1562 ASSERT_RTNL();
1563
1564 if (txq < dev->real_num_tx_queues)
1565 qdisc_reset_all_tx_gt(dev, txq);
f0796d5c 1566 }
e6484930
TH
1567
1568 dev->real_num_tx_queues = txq;
1569 return 0;
f0796d5c
JF
1570}
1571EXPORT_SYMBOL(netif_set_real_num_tx_queues);
56079431 1572
62fe0b40
BH
1573#ifdef CONFIG_RPS
1574/**
1575 * netif_set_real_num_rx_queues - set actual number of RX queues used
1576 * @dev: Network device
1577 * @rxq: Actual number of RX queues
1578 *
1579 * This must be called either with the rtnl_lock held or before
1580 * registration of the net device. Returns 0 on success, or a
4e7f7951
BH
1581 * negative error code. If called before registration, it always
1582 * succeeds.
62fe0b40
BH
1583 */
1584int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
1585{
1586 int rc;
1587
bd25fa7b
TH
1588 if (rxq < 1 || rxq > dev->num_rx_queues)
1589 return -EINVAL;
1590
62fe0b40
BH
1591 if (dev->reg_state == NETREG_REGISTERED) {
1592 ASSERT_RTNL();
1593
62fe0b40
BH
1594 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
1595 rxq);
1596 if (rc)
1597 return rc;
62fe0b40
BH
1598 }
1599
1600 dev->real_num_rx_queues = rxq;
1601 return 0;
1602}
1603EXPORT_SYMBOL(netif_set_real_num_rx_queues);
1604#endif
1605
def82a1d 1606static inline void __netif_reschedule(struct Qdisc *q)
56079431 1607{
def82a1d
JP
1608 struct softnet_data *sd;
1609 unsigned long flags;
56079431 1610
def82a1d
JP
1611 local_irq_save(flags);
1612 sd = &__get_cpu_var(softnet_data);
a9cbd588
CG
1613 q->next_sched = NULL;
1614 *sd->output_queue_tailp = q;
1615 sd->output_queue_tailp = &q->next_sched;
def82a1d
JP
1616 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1617 local_irq_restore(flags);
1618}
1619
1620void __netif_schedule(struct Qdisc *q)
1621{
1622 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
1623 __netif_reschedule(q);
56079431
DV
1624}
1625EXPORT_SYMBOL(__netif_schedule);
1626
bea3348e 1627void dev_kfree_skb_irq(struct sk_buff *skb)
56079431 1628{
3578b0c8 1629 if (atomic_dec_and_test(&skb->users)) {
bea3348e
SH
1630 struct softnet_data *sd;
1631 unsigned long flags;
56079431 1632
bea3348e
SH
1633 local_irq_save(flags);
1634 sd = &__get_cpu_var(softnet_data);
1635 skb->next = sd->completion_queue;
1636 sd->completion_queue = skb;
1637 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1638 local_irq_restore(flags);
1639 }
56079431 1640}
bea3348e 1641EXPORT_SYMBOL(dev_kfree_skb_irq);
56079431
DV
1642
1643void dev_kfree_skb_any(struct sk_buff *skb)
1644{
1645 if (in_irq() || irqs_disabled())
1646 dev_kfree_skb_irq(skb);
1647 else
1648 dev_kfree_skb(skb);
1649}
1650EXPORT_SYMBOL(dev_kfree_skb_any);
1651
1652
bea3348e
SH
1653/**
1654 * netif_device_detach - mark device as removed
1655 * @dev: network device
1656 *
1657 * Mark device as removed from system and therefore no longer available.
1658 */
56079431
DV
1659void netif_device_detach(struct net_device *dev)
1660{
1661 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1662 netif_running(dev)) {
d543103a 1663 netif_tx_stop_all_queues(dev);
56079431
DV
1664 }
1665}
1666EXPORT_SYMBOL(netif_device_detach);
1667
bea3348e
SH
1668/**
1669 * netif_device_attach - mark device as attached
1670 * @dev: network device
1671 *
1672 * Mark device as attached from system and restart if needed.
1673 */
56079431
DV
1674void netif_device_attach(struct net_device *dev)
1675{
1676 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1677 netif_running(dev)) {
d543103a 1678 netif_tx_wake_all_queues(dev);
4ec93edb 1679 __netdev_watchdog_up(dev);
56079431
DV
1680 }
1681}
1682EXPORT_SYMBOL(netif_device_attach);
1683
6de329e2
BH
1684static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1685{
1686 return ((features & NETIF_F_GEN_CSUM) ||
1687 ((features & NETIF_F_IP_CSUM) &&
1688 protocol == htons(ETH_P_IP)) ||
1689 ((features & NETIF_F_IPV6_CSUM) &&
1c8dbcf6
YZ
1690 protocol == htons(ETH_P_IPV6)) ||
1691 ((features & NETIF_F_FCOE_CRC) &&
1692 protocol == htons(ETH_P_FCOE)));
6de329e2
BH
1693}
1694
1695static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
1696{
7b9c6090
JG
1697 int features = dev->features;
1698
1699 if (vlan_tx_tag_present(skb))
1700 features &= dev->vlan_features;
1701
1702 if (can_checksum_protocol(features, skb->protocol))
6de329e2
BH
1703 return true;
1704
1705 if (skb->protocol == htons(ETH_P_8021Q)) {
1706 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1707 if (can_checksum_protocol(dev->features & dev->vlan_features,
1708 veh->h_vlan_encapsulated_proto))
1709 return true;
1710 }
1711
1712 return false;
1713}
56079431 1714
8a83a00b
AB
1715/**
1716 * skb_dev_set -- assign a new device to a buffer
1717 * @skb: buffer for the new device
1718 * @dev: network device
1719 *
1720 * If an skb is owned by a device already, we have to reset
1721 * all data private to the namespace a device belongs to
1722 * before assigning it a new device.
1723 */
1724#ifdef CONFIG_NET_NS
1725void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
1726{
1727 skb_dst_drop(skb);
1728 if (skb->dev && !net_eq(dev_net(skb->dev), dev_net(dev))) {
1729 secpath_reset(skb);
1730 nf_reset(skb);
1731 skb_init_secmark(skb);
1732 skb->mark = 0;
1733 skb->priority = 0;
1734 skb->nf_trace = 0;
1735 skb->ipvs_property = 0;
1736#ifdef CONFIG_NET_SCHED
1737 skb->tc_index = 0;
1738#endif
1739 }
1740 skb->dev = dev;
1741}
1742EXPORT_SYMBOL(skb_set_dev);
1743#endif /* CONFIG_NET_NS */
1744
1da177e4
LT
1745/*
1746 * Invalidate hardware checksum when packet is to be mangled, and
1747 * complete checksum manually on outgoing path.
1748 */
84fa7933 1749int skb_checksum_help(struct sk_buff *skb)
1da177e4 1750{
d3bc23e7 1751 __wsum csum;
663ead3b 1752 int ret = 0, offset;
1da177e4 1753
84fa7933 1754 if (skb->ip_summed == CHECKSUM_COMPLETE)
a430a43d
HX
1755 goto out_set_summed;
1756
1757 if (unlikely(skb_shinfo(skb)->gso_size)) {
a430a43d
HX
1758 /* Let GSO fix up the checksum. */
1759 goto out_set_summed;
1da177e4
LT
1760 }
1761
a030847e
HX
1762 offset = skb->csum_start - skb_headroom(skb);
1763 BUG_ON(offset >= skb_headlen(skb));
1764 csum = skb_checksum(skb, offset, skb->len - offset, 0);
1765
1766 offset += skb->csum_offset;
1767 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
1768
1769 if (skb_cloned(skb) &&
1770 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
1da177e4
LT
1771 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1772 if (ret)
1773 goto out;
1774 }
1775
a030847e 1776 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
a430a43d 1777out_set_summed:
1da177e4 1778 skb->ip_summed = CHECKSUM_NONE;
4ec93edb 1779out:
1da177e4
LT
1780 return ret;
1781}
d1b19dff 1782EXPORT_SYMBOL(skb_checksum_help);
1da177e4 1783
f6a78bfc
HX
1784/**
1785 * skb_gso_segment - Perform segmentation on skb.
1786 * @skb: buffer to segment
576a30eb 1787 * @features: features for the output path (see dev->features)
f6a78bfc
HX
1788 *
1789 * This function segments the given skb and returns a list of segments.
576a30eb
HX
1790 *
1791 * It may return NULL if the skb requires no segmentation. This is
1792 * only possible when GSO is used for verifying header integrity.
f6a78bfc 1793 */
576a30eb 1794struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
f6a78bfc
HX
1795{
1796 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1797 struct packet_type *ptype;
252e3346 1798 __be16 type = skb->protocol;
a430a43d 1799 int err;
f6a78bfc 1800
7b9c6090
JG
1801 if (type == htons(ETH_P_8021Q)) {
1802 struct vlan_ethhdr *veh;
1803
1804 if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
1805 return ERR_PTR(-EINVAL);
1806
1807 veh = (struct vlan_ethhdr *)skb->data;
1808 type = veh->h_vlan_encapsulated_proto;
1809 }
1810
459a98ed 1811 skb_reset_mac_header(skb);
b0e380b1 1812 skb->mac_len = skb->network_header - skb->mac_header;
f6a78bfc
HX
1813 __skb_pull(skb, skb->mac_len);
1814
67fd1a73
HX
1815 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1816 struct net_device *dev = skb->dev;
1817 struct ethtool_drvinfo info = {};
1818
1819 if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
1820 dev->ethtool_ops->get_drvinfo(dev, &info);
1821
1822 WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d "
1823 "ip_summed=%d",
1824 info.driver, dev ? dev->features : 0L,
1825 skb->sk ? skb->sk->sk_route_caps : 0L,
1826 skb->len, skb->data_len, skb->ip_summed);
1827
a430a43d
HX
1828 if (skb_header_cloned(skb) &&
1829 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1830 return ERR_PTR(err);
1831 }
1832
f6a78bfc 1833 rcu_read_lock();
82d8a867
PE
1834 list_for_each_entry_rcu(ptype,
1835 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
f6a78bfc 1836 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
84fa7933 1837 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
a430a43d
HX
1838 err = ptype->gso_send_check(skb);
1839 segs = ERR_PTR(err);
1840 if (err || skb_gso_ok(skb, features))
1841 break;
d56f90a7
ACM
1842 __skb_push(skb, (skb->data -
1843 skb_network_header(skb)));
a430a43d 1844 }
576a30eb 1845 segs = ptype->gso_segment(skb, features);
f6a78bfc
HX
1846 break;
1847 }
1848 }
1849 rcu_read_unlock();
1850
98e399f8 1851 __skb_push(skb, skb->data - skb_mac_header(skb));
576a30eb 1852
f6a78bfc
HX
1853 return segs;
1854}
f6a78bfc
HX
1855EXPORT_SYMBOL(skb_gso_segment);
1856
fb286bb2
HX
1857/* Take action when hardware reception checksum errors are detected. */
1858#ifdef CONFIG_BUG
1859void netdev_rx_csum_fault(struct net_device *dev)
1860{
1861 if (net_ratelimit()) {
4ec93edb 1862 printk(KERN_ERR "%s: hw csum failure.\n",
246a4212 1863 dev ? dev->name : "<unknown>");
fb286bb2
HX
1864 dump_stack();
1865 }
1866}
1867EXPORT_SYMBOL(netdev_rx_csum_fault);
1868#endif
1869
1da177e4
LT
1870/* Actually, we should eliminate this check as soon as we know, that:
1871 * 1. IOMMU is present and allows to map all the memory.
1872 * 2. No high memory really exists on this machine.
1873 */
1874
9092c658 1875static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1da177e4 1876{
3d3a8533 1877#ifdef CONFIG_HIGHMEM
1da177e4 1878 int i;
5acbbd42
FT
1879 if (!(dev->features & NETIF_F_HIGHDMA)) {
1880 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1881 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1882 return 1;
1883 }
1da177e4 1884
5acbbd42
FT
1885 if (PCI_DMA_BUS_IS_PHYS) {
1886 struct device *pdev = dev->dev.parent;
1da177e4 1887
9092c658
ED
1888 if (!pdev)
1889 return 0;
5acbbd42
FT
1890 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1891 dma_addr_t addr = page_to_phys(skb_shinfo(skb)->frags[i].page);
1892 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
1893 return 1;
1894 }
1895 }
3d3a8533 1896#endif
1da177e4
LT
1897 return 0;
1898}
1da177e4 1899
f6a78bfc
HX
1900struct dev_gso_cb {
1901 void (*destructor)(struct sk_buff *skb);
1902};
1903
1904#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
1905
1906static void dev_gso_skb_destructor(struct sk_buff *skb)
1907{
1908 struct dev_gso_cb *cb;
1909
1910 do {
1911 struct sk_buff *nskb = skb->next;
1912
1913 skb->next = nskb->next;
1914 nskb->next = NULL;
1915 kfree_skb(nskb);
1916 } while (skb->next);
1917
1918 cb = DEV_GSO_CB(skb);
1919 if (cb->destructor)
1920 cb->destructor(skb);
1921}
1922
1923/**
1924 * dev_gso_segment - Perform emulated hardware segmentation on skb.
1925 * @skb: buffer to segment
1926 *
1927 * This function segments the given skb and stores the list of segments
1928 * in skb->next.
1929 */
1930static int dev_gso_segment(struct sk_buff *skb)
1931{
1932 struct net_device *dev = skb->dev;
1933 struct sk_buff *segs;
576a30eb
HX
1934 int features = dev->features & ~(illegal_highdma(dev, skb) ?
1935 NETIF_F_SG : 0);
1936
1937 segs = skb_gso_segment(skb, features);
1938
1939 /* Verifying header integrity only. */
1940 if (!segs)
1941 return 0;
f6a78bfc 1942
801678c5 1943 if (IS_ERR(segs))
f6a78bfc
HX
1944 return PTR_ERR(segs);
1945
1946 skb->next = segs;
1947 DEV_GSO_CB(skb)->destructor = skb->destructor;
1948 skb->destructor = dev_gso_skb_destructor;
1949
1950 return 0;
1951}
1952
fc6055a5
ED
1953/*
1954 * Try to orphan skb early, right before transmission by the device.
2244d07b
OH
1955 * We cannot orphan skb if tx timestamp is requested or the sk-reference
1956 * is needed on driver level for other reasons, e.g. see net/can/raw.c
fc6055a5
ED
1957 */
1958static inline void skb_orphan_try(struct sk_buff *skb)
1959{
87fd308c
ED
1960 struct sock *sk = skb->sk;
1961
2244d07b 1962 if (sk && !skb_shinfo(skb)->tx_flags) {
87fd308c
ED
1963 /* skb_tx_hash() wont be able to get sk.
1964 * We copy sk_hash into skb->rxhash
1965 */
1966 if (!skb->rxhash)
1967 skb->rxhash = sk->sk_hash;
fc6055a5 1968 skb_orphan(skb);
87fd308c 1969 }
fc6055a5
ED
1970}
1971
6afff0ca
JF
1972/*
1973 * Returns true if either:
1974 * 1. skb has frag_list and the device doesn't support FRAGLIST, or
1975 * 2. skb is fragmented and the device does not support SG, or if
1976 * at least one of fragments is in highmem and device does not
1977 * support DMA from it.
1978 */
1979static inline int skb_needs_linearize(struct sk_buff *skb,
1980 struct net_device *dev)
1981{
7b9c6090
JG
1982 int features = dev->features;
1983
1984 if (skb->protocol == htons(ETH_P_8021Q) || vlan_tx_tag_present(skb))
1985 features &= dev->vlan_features;
1986
6afff0ca 1987 return skb_is_nonlinear(skb) &&
7b9c6090
JG
1988 ((skb_has_frag_list(skb) && !(features & NETIF_F_FRAGLIST)) ||
1989 (skb_shinfo(skb)->nr_frags && (!(features & NETIF_F_SG) ||
6afff0ca
JF
1990 illegal_highdma(dev, skb))));
1991}
1992
fd2ea0a7
DM
1993int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1994 struct netdev_queue *txq)
f6a78bfc 1995{
00829823 1996 const struct net_device_ops *ops = dev->netdev_ops;
572a9d7b 1997 int rc = NETDEV_TX_OK;
00829823 1998
f6a78bfc 1999 if (likely(!skb->next)) {
9be9a6b9 2000 if (!list_empty(&ptype_all))
f6a78bfc
HX
2001 dev_queue_xmit_nit(skb, dev);
2002
93f154b5
ED
2003 /*
2004 * If device doesnt need skb->dst, release it right now while
2005 * its hot in this cpu cache
2006 */
adf30907
ED
2007 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2008 skb_dst_drop(skb);
2009
fc6055a5 2010 skb_orphan_try(skb);
9ccb8975 2011
7b9c6090
JG
2012 if (vlan_tx_tag_present(skb) &&
2013 !(dev->features & NETIF_F_HW_VLAN_TX)) {
2014 skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb));
2015 if (unlikely(!skb))
2016 goto out;
2017
2018 skb->vlan_tci = 0;
2019 }
2020
9ccb8975
DM
2021 if (netif_needs_gso(dev, skb)) {
2022 if (unlikely(dev_gso_segment(skb)))
2023 goto out_kfree_skb;
2024 if (skb->next)
2025 goto gso;
6afff0ca
JF
2026 } else {
2027 if (skb_needs_linearize(skb, dev) &&
2028 __skb_linearize(skb))
2029 goto out_kfree_skb;
2030
2031 /* If packet is not checksummed and device does not
2032 * support checksumming for this protocol, complete
2033 * checksumming here.
2034 */
2035 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2036 skb_set_transport_header(skb, skb->csum_start -
2037 skb_headroom(skb));
2038 if (!dev_can_checksum(dev, skb) &&
2039 skb_checksum_help(skb))
2040 goto out_kfree_skb;
2041 }
9ccb8975
DM
2042 }
2043
ac45f602 2044 rc = ops->ndo_start_xmit(skb, dev);
ec634fe3 2045 if (rc == NETDEV_TX_OK)
08baf561 2046 txq_trans_update(txq);
ac45f602 2047 return rc;
f6a78bfc
HX
2048 }
2049
576a30eb 2050gso:
f6a78bfc
HX
2051 do {
2052 struct sk_buff *nskb = skb->next;
f6a78bfc
HX
2053
2054 skb->next = nskb->next;
2055 nskb->next = NULL;
068a2de5
KK
2056
2057 /*
2058 * If device doesnt need nskb->dst, release it right now while
2059 * its hot in this cpu cache
2060 */
2061 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2062 skb_dst_drop(nskb);
2063
00829823 2064 rc = ops->ndo_start_xmit(nskb, dev);
ec634fe3 2065 if (unlikely(rc != NETDEV_TX_OK)) {
572a9d7b
PM
2066 if (rc & ~NETDEV_TX_MASK)
2067 goto out_kfree_gso_skb;
f54d9e8d 2068 nskb->next = skb->next;
f6a78bfc
HX
2069 skb->next = nskb;
2070 return rc;
2071 }
08baf561 2072 txq_trans_update(txq);
fd2ea0a7 2073 if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
f54d9e8d 2074 return NETDEV_TX_BUSY;
f6a78bfc 2075 } while (skb->next);
4ec93edb 2076
572a9d7b
PM
2077out_kfree_gso_skb:
2078 if (likely(skb->next == NULL))
2079 skb->destructor = DEV_GSO_CB(skb)->destructor;
f6a78bfc
HX
2080out_kfree_skb:
2081 kfree_skb(skb);
7b9c6090 2082out:
572a9d7b 2083 return rc;
f6a78bfc
HX
2084}
2085
0a9627f2 2086static u32 hashrnd __read_mostly;
b6b2fed1 2087
9247744e 2088u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
8f0f2223 2089{
7019298a 2090 u32 hash;
b6b2fed1 2091
513de11b
DM
2092 if (skb_rx_queue_recorded(skb)) {
2093 hash = skb_get_rx_queue(skb);
d1b19dff 2094 while (unlikely(hash >= dev->real_num_tx_queues))
513de11b
DM
2095 hash -= dev->real_num_tx_queues;
2096 return hash;
2097 }
ec581f6a
ED
2098
2099 if (skb->sk && skb->sk->sk_hash)
7019298a 2100 hash = skb->sk->sk_hash;
ec581f6a 2101 else
87fd308c 2102 hash = (__force u16) skb->protocol ^ skb->rxhash;
0a9627f2 2103 hash = jhash_1word(hash, hashrnd);
b6b2fed1
DM
2104
2105 return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
8f0f2223 2106}
9247744e 2107EXPORT_SYMBOL(skb_tx_hash);
8f0f2223 2108
ed04642f
ED
2109static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
2110{
2111 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
2112 if (net_ratelimit()) {
7a161ea9
ED
2113 pr_warning("%s selects TX queue %d, but "
2114 "real number of TX queues is %d\n",
2115 dev->name, queue_index, dev->real_num_tx_queues);
ed04642f
ED
2116 }
2117 return 0;
2118 }
2119 return queue_index;
2120}
2121
e8a0464c
DM
2122static struct netdev_queue *dev_pick_tx(struct net_device *dev,
2123 struct sk_buff *skb)
2124{
b0f77d0e 2125 int queue_index;
deabc772 2126 const struct net_device_ops *ops = dev->netdev_ops;
a4ee3ce3 2127
deabc772
HS
2128 if (ops->ndo_select_queue) {
2129 queue_index = ops->ndo_select_queue(dev, skb);
2130 queue_index = dev_cap_txqueue(dev, queue_index);
2131 } else {
2132 struct sock *sk = skb->sk;
2133 queue_index = sk_tx_queue_get(sk);
2134 if (queue_index < 0) {
a4ee3ce3 2135
a4ee3ce3
KK
2136 queue_index = 0;
2137 if (dev->real_num_tx_queues > 1)
2138 queue_index = skb_tx_hash(dev, skb);
fd2ea0a7 2139
8728c544 2140 if (sk) {
87eb3670 2141 struct dst_entry *dst = rcu_dereference_check(sk->sk_dst_cache, 1);
8728c544
ED
2142
2143 if (dst && skb_dst(skb) == dst)
2144 sk_tx_queue_set(sk, queue_index);
2145 }
a4ee3ce3
KK
2146 }
2147 }
eae792b7 2148
fd2ea0a7
DM
2149 skb_set_queue_mapping(skb, queue_index);
2150 return netdev_get_tx_queue(dev, queue_index);
e8a0464c
DM
2151}
2152
bbd8a0d3
KK
2153static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2154 struct net_device *dev,
2155 struct netdev_queue *txq)
2156{
2157 spinlock_t *root_lock = qdisc_lock(q);
79640a4c 2158 bool contended = qdisc_is_running(q);
bbd8a0d3
KK
2159 int rc;
2160
79640a4c
ED
2161 /*
2162 * Heuristic to force contended enqueues to serialize on a
2163 * separate lock before trying to get qdisc main lock.
2164 * This permits __QDISC_STATE_RUNNING owner to get the lock more often
2165 * and dequeue packets faster.
2166 */
2167 if (unlikely(contended))
2168 spin_lock(&q->busylock);
2169
bbd8a0d3
KK
2170 spin_lock(root_lock);
2171 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
2172 kfree_skb(skb);
2173 rc = NET_XMIT_DROP;
2174 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
bc135b23 2175 qdisc_run_begin(q)) {
bbd8a0d3
KK
2176 /*
2177 * This is a work-conserving queue; there are no old skbs
2178 * waiting to be sent out; and the qdisc is not running -
2179 * xmit the skb directly.
2180 */
7fee226a
ED
2181 if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
2182 skb_dst_force(skb);
bbd8a0d3 2183 __qdisc_update_bstats(q, skb->len);
79640a4c
ED
2184 if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
2185 if (unlikely(contended)) {
2186 spin_unlock(&q->busylock);
2187 contended = false;
2188 }
bbd8a0d3 2189 __qdisc_run(q);
79640a4c 2190 } else
bc135b23 2191 qdisc_run_end(q);
bbd8a0d3
KK
2192
2193 rc = NET_XMIT_SUCCESS;
2194 } else {
7fee226a 2195 skb_dst_force(skb);
bbd8a0d3 2196 rc = qdisc_enqueue_root(skb, q);
79640a4c
ED
2197 if (qdisc_run_begin(q)) {
2198 if (unlikely(contended)) {
2199 spin_unlock(&q->busylock);
2200 contended = false;
2201 }
2202 __qdisc_run(q);
2203 }
bbd8a0d3
KK
2204 }
2205 spin_unlock(root_lock);
79640a4c
ED
2206 if (unlikely(contended))
2207 spin_unlock(&q->busylock);
bbd8a0d3
KK
2208 return rc;
2209}
2210
745e20f1
ED
2211static DEFINE_PER_CPU(int, xmit_recursion);
2212#define RECURSION_LIMIT 3
2213
d29f749e
DJ
2214/**
2215 * dev_queue_xmit - transmit a buffer
2216 * @skb: buffer to transmit
2217 *
2218 * Queue a buffer for transmission to a network device. The caller must
2219 * have set the device and priority and built the buffer before calling
2220 * this function. The function can be called from an interrupt.
2221 *
2222 * A negative errno code is returned on a failure. A success does not
2223 * guarantee the frame will be transmitted as it may be dropped due
2224 * to congestion or traffic shaping.
2225 *
2226 * -----------------------------------------------------------------------------------
2227 * I notice this method can also return errors from the queue disciplines,
2228 * including NET_XMIT_DROP, which is a positive value. So, errors can also
2229 * be positive.
2230 *
2231 * Regardless of the return value, the skb is consumed, so it is currently
2232 * difficult to retry a send to this method. (You can bump the ref count
2233 * before sending to hold a reference for retry if you are careful.)
2234 *
2235 * When calling this method, interrupts MUST be enabled. This is because
2236 * the BH enable code must have IRQs enabled so that it will not deadlock.
2237 * --BLG
2238 */
1da177e4
LT
2239int dev_queue_xmit(struct sk_buff *skb)
2240{
2241 struct net_device *dev = skb->dev;
dc2b4847 2242 struct netdev_queue *txq;
1da177e4
LT
2243 struct Qdisc *q;
2244 int rc = -ENOMEM;
2245
4ec93edb
YH
2246 /* Disable soft irqs for various locks below. Also
2247 * stops preemption for RCU.
1da177e4 2248 */
4ec93edb 2249 rcu_read_lock_bh();
1da177e4 2250
eae792b7 2251 txq = dev_pick_tx(dev, skb);
a898def2 2252 q = rcu_dereference_bh(txq->qdisc);
37437bb2 2253
1da177e4 2254#ifdef CONFIG_NET_CLS_ACT
d1b19dff 2255 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
1da177e4
LT
2256#endif
2257 if (q->enqueue) {
bbd8a0d3 2258 rc = __dev_xmit_skb(skb, q, dev, txq);
37437bb2 2259 goto out;
1da177e4
LT
2260 }
2261
2262 /* The device has no queue. Common case for software devices:
2263 loopback, all the sorts of tunnels...
2264
932ff279
HX
2265 Really, it is unlikely that netif_tx_lock protection is necessary
2266 here. (f.e. loopback and IP tunnels are clean ignoring statistics
1da177e4
LT
2267 counters.)
2268 However, it is possible, that they rely on protection
2269 made by us here.
2270
2271 Check this and shot the lock. It is not prone from deadlocks.
2272 Either shot noqueue qdisc, it is even simpler 8)
2273 */
2274 if (dev->flags & IFF_UP) {
2275 int cpu = smp_processor_id(); /* ok because BHs are off */
2276
c773e847 2277 if (txq->xmit_lock_owner != cpu) {
1da177e4 2278
745e20f1
ED
2279 if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
2280 goto recursion_alert;
2281
c773e847 2282 HARD_TX_LOCK(dev, txq, cpu);
1da177e4 2283
fd2ea0a7 2284 if (!netif_tx_queue_stopped(txq)) {
745e20f1 2285 __this_cpu_inc(xmit_recursion);
572a9d7b 2286 rc = dev_hard_start_xmit(skb, dev, txq);
745e20f1 2287 __this_cpu_dec(xmit_recursion);
572a9d7b 2288 if (dev_xmit_complete(rc)) {
c773e847 2289 HARD_TX_UNLOCK(dev, txq);
1da177e4
LT
2290 goto out;
2291 }
2292 }
c773e847 2293 HARD_TX_UNLOCK(dev, txq);
1da177e4
LT
2294 if (net_ratelimit())
2295 printk(KERN_CRIT "Virtual device %s asks to "
2296 "queue packet!\n", dev->name);
2297 } else {
2298 /* Recursion is detected! It is possible,
745e20f1
ED
2299 * unfortunately
2300 */
2301recursion_alert:
1da177e4
LT
2302 if (net_ratelimit())
2303 printk(KERN_CRIT "Dead loop on virtual device "
2304 "%s, fix it urgently!\n", dev->name);
2305 }
2306 }
2307
2308 rc = -ENETDOWN;
d4828d85 2309 rcu_read_unlock_bh();
1da177e4 2310
1da177e4
LT
2311 kfree_skb(skb);
2312 return rc;
2313out:
d4828d85 2314 rcu_read_unlock_bh();
1da177e4
LT
2315 return rc;
2316}
d1b19dff 2317EXPORT_SYMBOL(dev_queue_xmit);
1da177e4
LT
2318
2319
2320/*=======================================================================
2321 Receiver routines
2322 =======================================================================*/
2323
6b2bedc3 2324int netdev_max_backlog __read_mostly = 1000;
3b098e2d 2325int netdev_tstamp_prequeue __read_mostly = 1;
6b2bedc3
SH
2326int netdev_budget __read_mostly = 300;
2327int weight_p __read_mostly = 64; /* old backlog weight */
1da177e4 2328
eecfd7c4
ED
2329/* Called with irq disabled */
2330static inline void ____napi_schedule(struct softnet_data *sd,
2331 struct napi_struct *napi)
2332{
2333 list_add_tail(&napi->poll_list, &sd->poll_list);
2334 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2335}
2336
0a9627f2 2337/*
bfb564e7
KK
2338 * __skb_get_rxhash: calculate a flow hash based on src/dst addresses
2339 * and src/dst port numbers. Returns a non-zero hash number on success
2340 * and 0 on failure.
0a9627f2 2341 */
bfb564e7 2342__u32 __skb_get_rxhash(struct sk_buff *skb)
0a9627f2 2343{
12fcdefb 2344 int nhoff, hash = 0, poff;
0a9627f2
TH
2345 struct ipv6hdr *ip6;
2346 struct iphdr *ip;
0a9627f2 2347 u8 ip_proto;
8c52d509
CG
2348 u32 addr1, addr2, ihl;
2349 union {
2350 u32 v32;
2351 u16 v16[2];
2352 } ports;
0a9627f2 2353
bfb564e7 2354 nhoff = skb_network_offset(skb);
0a9627f2
TH
2355
2356 switch (skb->protocol) {
2357 case __constant_htons(ETH_P_IP):
bfb564e7 2358 if (!pskb_may_pull(skb, sizeof(*ip) + nhoff))
0a9627f2
TH
2359 goto done;
2360
1003489e 2361 ip = (struct iphdr *) (skb->data + nhoff);
dbe5775b
CG
2362 if (ip->frag_off & htons(IP_MF | IP_OFFSET))
2363 ip_proto = 0;
2364 else
2365 ip_proto = ip->protocol;
b249dcb8
ED
2366 addr1 = (__force u32) ip->saddr;
2367 addr2 = (__force u32) ip->daddr;
0a9627f2
TH
2368 ihl = ip->ihl;
2369 break;
2370 case __constant_htons(ETH_P_IPV6):
bfb564e7 2371 if (!pskb_may_pull(skb, sizeof(*ip6) + nhoff))
0a9627f2
TH
2372 goto done;
2373
1003489e 2374 ip6 = (struct ipv6hdr *) (skb->data + nhoff);
0a9627f2 2375 ip_proto = ip6->nexthdr;
b249dcb8
ED
2376 addr1 = (__force u32) ip6->saddr.s6_addr32[3];
2377 addr2 = (__force u32) ip6->daddr.s6_addr32[3];
0a9627f2
TH
2378 ihl = (40 >> 2);
2379 break;
2380 default:
2381 goto done;
2382 }
bfb564e7 2383
12fcdefb
CG
2384 ports.v32 = 0;
2385 poff = proto_ports_offset(ip_proto);
2386 if (poff >= 0) {
2387 nhoff += ihl * 4 + poff;
2388 if (pskb_may_pull(skb, nhoff + 4)) {
2389 ports.v32 = * (__force u32 *) (skb->data + nhoff);
8c52d509
CG
2390 if (ports.v16[1] < ports.v16[0])
2391 swap(ports.v16[0], ports.v16[1]);
b249dcb8 2392 }
0a9627f2
TH
2393 }
2394
b249dcb8
ED
2395 /* get a consistent hash (same value on both flow directions) */
2396 if (addr2 < addr1)
2397 swap(addr1, addr2);
0a9627f2 2398
bfb564e7
KK
2399 hash = jhash_3words(addr1, addr2, ports.v32, hashrnd);
2400 if (!hash)
2401 hash = 1;
2402
2403done:
2404 return hash;
2405}
2406EXPORT_SYMBOL(__skb_get_rxhash);
2407
2408#ifdef CONFIG_RPS
2409
2410/* One global table that all flow-based protocols share. */
2411struct rps_sock_flow_table *rps_sock_flow_table __read_mostly;
2412EXPORT_SYMBOL(rps_sock_flow_table);
2413
2414/*
2415 * get_rps_cpu is called from netif_receive_skb and returns the target
2416 * CPU from the RPS map of the receiving queue for a given skb.
2417 * rcu_read_lock must be held on entry.
2418 */
2419static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2420 struct rps_dev_flow **rflowp)
2421{
2422 struct netdev_rx_queue *rxqueue;
6febfca9 2423 struct rps_map *map = NULL;
bfb564e7
KK
2424 struct rps_dev_flow_table *flow_table;
2425 struct rps_sock_flow_table *sock_flow_table;
2426 int cpu = -1;
2427 u16 tcpu;
2428
2429 if (skb_rx_queue_recorded(skb)) {
2430 u16 index = skb_get_rx_queue(skb);
62fe0b40
BH
2431 if (unlikely(index >= dev->real_num_rx_queues)) {
2432 WARN_ONCE(dev->real_num_rx_queues > 1,
2433 "%s received packet on queue %u, but number "
2434 "of RX queues is %u\n",
2435 dev->name, index, dev->real_num_rx_queues);
bfb564e7
KK
2436 goto done;
2437 }
2438 rxqueue = dev->_rx + index;
2439 } else
2440 rxqueue = dev->_rx;
2441
6febfca9
CG
2442 if (rxqueue->rps_map) {
2443 map = rcu_dereference(rxqueue->rps_map);
2444 if (map && map->len == 1) {
2445 tcpu = map->cpus[0];
2446 if (cpu_online(tcpu))
2447 cpu = tcpu;
2448 goto done;
2449 }
2450 } else if (!rxqueue->rps_flow_table) {
bfb564e7 2451 goto done;
6febfca9 2452 }
bfb564e7 2453
2d47b459 2454 skb_reset_network_header(skb);
bfb564e7
KK
2455 if (!skb_get_rxhash(skb))
2456 goto done;
2457
fec5e652
TH
2458 flow_table = rcu_dereference(rxqueue->rps_flow_table);
2459 sock_flow_table = rcu_dereference(rps_sock_flow_table);
2460 if (flow_table && sock_flow_table) {
2461 u16 next_cpu;
2462 struct rps_dev_flow *rflow;
2463
2464 rflow = &flow_table->flows[skb->rxhash & flow_table->mask];
2465 tcpu = rflow->cpu;
2466
2467 next_cpu = sock_flow_table->ents[skb->rxhash &
2468 sock_flow_table->mask];
2469
2470 /*
2471 * If the desired CPU (where last recvmsg was done) is
2472 * different from current CPU (one in the rx-queue flow
2473 * table entry), switch if one of the following holds:
2474 * - Current CPU is unset (equal to RPS_NO_CPU).
2475 * - Current CPU is offline.
2476 * - The current CPU's queue tail has advanced beyond the
2477 * last packet that was enqueued using this table entry.
2478 * This guarantees that all previous packets for the flow
2479 * have been dequeued, thus preserving in order delivery.
2480 */
2481 if (unlikely(tcpu != next_cpu) &&
2482 (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
2483 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
2484 rflow->last_qtail)) >= 0)) {
2485 tcpu = rflow->cpu = next_cpu;
2486 if (tcpu != RPS_NO_CPU)
2487 rflow->last_qtail = per_cpu(softnet_data,
2488 tcpu).input_queue_head;
2489 }
2490 if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
2491 *rflowp = rflow;
2492 cpu = tcpu;
2493 goto done;
2494 }
2495 }
2496
0a9627f2 2497 if (map) {
fec5e652 2498 tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32];
0a9627f2
TH
2499
2500 if (cpu_online(tcpu)) {
2501 cpu = tcpu;
2502 goto done;
2503 }
2504 }
2505
2506done:
0a9627f2
TH
2507 return cpu;
2508}
2509
0a9627f2 2510/* Called from hardirq (IPI) context */
e36fa2f7 2511static void rps_trigger_softirq(void *data)
0a9627f2 2512{
e36fa2f7
ED
2513 struct softnet_data *sd = data;
2514
eecfd7c4 2515 ____napi_schedule(sd, &sd->backlog);
dee42870 2516 sd->received_rps++;
0a9627f2 2517}
e36fa2f7 2518
fec5e652 2519#endif /* CONFIG_RPS */
0a9627f2 2520
e36fa2f7
ED
2521/*
2522 * Check if this softnet_data structure is another cpu one
2523 * If yes, queue it to our IPI list and return 1
2524 * If no, return 0
2525 */
2526static int rps_ipi_queued(struct softnet_data *sd)
2527{
2528#ifdef CONFIG_RPS
2529 struct softnet_data *mysd = &__get_cpu_var(softnet_data);
2530
2531 if (sd != mysd) {
2532 sd->rps_ipi_next = mysd->rps_ipi_list;
2533 mysd->rps_ipi_list = sd;
2534
2535 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2536 return 1;
2537 }
2538#endif /* CONFIG_RPS */
2539 return 0;
2540}
2541
0a9627f2
TH
2542/*
2543 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
2544 * queue (may be a remote CPU queue).
2545 */
fec5e652
TH
2546static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
2547 unsigned int *qtail)
0a9627f2 2548{
e36fa2f7 2549 struct softnet_data *sd;
0a9627f2
TH
2550 unsigned long flags;
2551
e36fa2f7 2552 sd = &per_cpu(softnet_data, cpu);
0a9627f2
TH
2553
2554 local_irq_save(flags);
0a9627f2 2555
e36fa2f7 2556 rps_lock(sd);
6e7676c1
CG
2557 if (skb_queue_len(&sd->input_pkt_queue) <= netdev_max_backlog) {
2558 if (skb_queue_len(&sd->input_pkt_queue)) {
0a9627f2 2559enqueue:
e36fa2f7 2560 __skb_queue_tail(&sd->input_pkt_queue, skb);
76cc8b13 2561 input_queue_tail_incr_save(sd, qtail);
e36fa2f7 2562 rps_unlock(sd);
152102c7 2563 local_irq_restore(flags);
0a9627f2
TH
2564 return NET_RX_SUCCESS;
2565 }
2566
ebda37c2
ED
2567 /* Schedule NAPI for backlog device
2568 * We can use non atomic operation since we own the queue lock
2569 */
2570 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
e36fa2f7 2571 if (!rps_ipi_queued(sd))
eecfd7c4 2572 ____napi_schedule(sd, &sd->backlog);
0a9627f2
TH
2573 }
2574 goto enqueue;
2575 }
2576
dee42870 2577 sd->dropped++;
e36fa2f7 2578 rps_unlock(sd);
0a9627f2 2579
0a9627f2
TH
2580 local_irq_restore(flags);
2581
caf586e5 2582 atomic_long_inc(&skb->dev->rx_dropped);
0a9627f2
TH
2583 kfree_skb(skb);
2584 return NET_RX_DROP;
2585}
1da177e4 2586
1da177e4
LT
2587/**
2588 * netif_rx - post buffer to the network code
2589 * @skb: buffer to post
2590 *
2591 * This function receives a packet from a device driver and queues it for
2592 * the upper (protocol) levels to process. It always succeeds. The buffer
2593 * may be dropped during processing for congestion control or by the
2594 * protocol layers.
2595 *
2596 * return values:
2597 * NET_RX_SUCCESS (no congestion)
1da177e4
LT
2598 * NET_RX_DROP (packet was dropped)
2599 *
2600 */
2601
2602int netif_rx(struct sk_buff *skb)
2603{
b0e28f1e 2604 int ret;
1da177e4
LT
2605
2606 /* if netpoll wants it, pretend we never saw it */
2607 if (netpoll_rx(skb))
2608 return NET_RX_DROP;
2609
3b098e2d
ED
2610 if (netdev_tstamp_prequeue)
2611 net_timestamp_check(skb);
1da177e4 2612
df334545 2613#ifdef CONFIG_RPS
b0e28f1e 2614 {
fec5e652 2615 struct rps_dev_flow voidflow, *rflow = &voidflow;
b0e28f1e
ED
2616 int cpu;
2617
cece1945 2618 preempt_disable();
b0e28f1e 2619 rcu_read_lock();
fec5e652
TH
2620
2621 cpu = get_rps_cpu(skb->dev, skb, &rflow);
b0e28f1e
ED
2622 if (cpu < 0)
2623 cpu = smp_processor_id();
fec5e652
TH
2624
2625 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
2626
b0e28f1e 2627 rcu_read_unlock();
cece1945 2628 preempt_enable();
b0e28f1e 2629 }
1e94d72f 2630#else
fec5e652
TH
2631 {
2632 unsigned int qtail;
2633 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
2634 put_cpu();
2635 }
1e94d72f 2636#endif
b0e28f1e 2637 return ret;
1da177e4 2638}
d1b19dff 2639EXPORT_SYMBOL(netif_rx);
1da177e4
LT
2640
2641int netif_rx_ni(struct sk_buff *skb)
2642{
2643 int err;
2644
2645 preempt_disable();
2646 err = netif_rx(skb);
2647 if (local_softirq_pending())
2648 do_softirq();
2649 preempt_enable();
2650
2651 return err;
2652}
1da177e4
LT
2653EXPORT_SYMBOL(netif_rx_ni);
2654
1da177e4
LT
2655static void net_tx_action(struct softirq_action *h)
2656{
2657 struct softnet_data *sd = &__get_cpu_var(softnet_data);
2658
2659 if (sd->completion_queue) {
2660 struct sk_buff *clist;
2661
2662 local_irq_disable();
2663 clist = sd->completion_queue;
2664 sd->completion_queue = NULL;
2665 local_irq_enable();
2666
2667 while (clist) {
2668 struct sk_buff *skb = clist;
2669 clist = clist->next;
2670
547b792c 2671 WARN_ON(atomic_read(&skb->users));
1da177e4
LT
2672 __kfree_skb(skb);
2673 }
2674 }
2675
2676 if (sd->output_queue) {
37437bb2 2677 struct Qdisc *head;
1da177e4
LT
2678
2679 local_irq_disable();
2680 head = sd->output_queue;
2681 sd->output_queue = NULL;
a9cbd588 2682 sd->output_queue_tailp = &sd->output_queue;
1da177e4
LT
2683 local_irq_enable();
2684
2685 while (head) {
37437bb2
DM
2686 struct Qdisc *q = head;
2687 spinlock_t *root_lock;
2688
1da177e4
LT
2689 head = head->next_sched;
2690
5fb66229 2691 root_lock = qdisc_lock(q);
37437bb2 2692 if (spin_trylock(root_lock)) {
def82a1d
JP
2693 smp_mb__before_clear_bit();
2694 clear_bit(__QDISC_STATE_SCHED,
2695 &q->state);
37437bb2
DM
2696 qdisc_run(q);
2697 spin_unlock(root_lock);
1da177e4 2698 } else {
195648bb 2699 if (!test_bit(__QDISC_STATE_DEACTIVATED,
e8a83e10 2700 &q->state)) {
195648bb 2701 __netif_reschedule(q);
e8a83e10
JP
2702 } else {
2703 smp_mb__before_clear_bit();
2704 clear_bit(__QDISC_STATE_SCHED,
2705 &q->state);
2706 }
1da177e4
LT
2707 }
2708 }
2709 }
2710}
2711
6f05f629
SH
2712static inline int deliver_skb(struct sk_buff *skb,
2713 struct packet_type *pt_prev,
2714 struct net_device *orig_dev)
1da177e4
LT
2715{
2716 atomic_inc(&skb->users);
f2ccd8fa 2717 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1da177e4
LT
2718}
2719
ab95bfe0
JP
2720#if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
2721 (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
da678292
MM
2722/* This hook is defined here for ATM LANE */
2723int (*br_fdb_test_addr_hook)(struct net_device *dev,
2724 unsigned char *addr) __read_mostly;
4fb019a0 2725EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
da678292 2726#endif
1da177e4 2727
1da177e4
LT
2728#ifdef CONFIG_NET_CLS_ACT
2729/* TODO: Maybe we should just force sch_ingress to be compiled in
2730 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
2731 * a compare and 2 stores extra right now if we dont have it on
2732 * but have CONFIG_NET_CLS_ACT
4ec93edb 2733 * NOTE: This doesnt stop any functionality; if you dont have
1da177e4
LT
2734 * the ingress scheduler, you just cant add policies on ingress.
2735 *
2736 */
24824a09 2737static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
1da177e4 2738{
1da177e4 2739 struct net_device *dev = skb->dev;
f697c3e8 2740 u32 ttl = G_TC_RTTL(skb->tc_verd);
555353cf
DM
2741 int result = TC_ACT_OK;
2742 struct Qdisc *q;
4ec93edb 2743
de384830
SH
2744 if (unlikely(MAX_RED_LOOP < ttl++)) {
2745 if (net_ratelimit())
2746 pr_warning( "Redir loop detected Dropping packet (%d->%d)\n",
2747 skb->skb_iif, dev->ifindex);
f697c3e8
HX
2748 return TC_ACT_SHOT;
2749 }
1da177e4 2750
f697c3e8
HX
2751 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
2752 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
1da177e4 2753
83874000 2754 q = rxq->qdisc;
8d50b53d 2755 if (q != &noop_qdisc) {
83874000 2756 spin_lock(qdisc_lock(q));
a9312ae8
DM
2757 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
2758 result = qdisc_enqueue_root(skb, q);
83874000
DM
2759 spin_unlock(qdisc_lock(q));
2760 }
f697c3e8
HX
2761
2762 return result;
2763}
86e65da9 2764
f697c3e8
HX
2765static inline struct sk_buff *handle_ing(struct sk_buff *skb,
2766 struct packet_type **pt_prev,
2767 int *ret, struct net_device *orig_dev)
2768{
24824a09
ED
2769 struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue);
2770
2771 if (!rxq || rxq->qdisc == &noop_qdisc)
f697c3e8 2772 goto out;
1da177e4 2773
f697c3e8
HX
2774 if (*pt_prev) {
2775 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2776 *pt_prev = NULL;
1da177e4
LT
2777 }
2778
24824a09 2779 switch (ing_filter(skb, rxq)) {
f697c3e8
HX
2780 case TC_ACT_SHOT:
2781 case TC_ACT_STOLEN:
2782 kfree_skb(skb);
2783 return NULL;
2784 }
2785
2786out:
2787 skb->tc_verd = 0;
2788 return skb;
1da177e4
LT
2789}
2790#endif
2791
bc1d0411
PM
2792/*
2793 * netif_nit_deliver - deliver received packets to network taps
2794 * @skb: buffer
2795 *
2796 * This function is used to deliver incoming packets to network
2797 * taps. It should be used when the normal netif_receive_skb path
2798 * is bypassed, for example because of VLAN acceleration.
2799 */
2800void netif_nit_deliver(struct sk_buff *skb)
2801{
2802 struct packet_type *ptype;
2803
2804 if (list_empty(&ptype_all))
2805 return;
2806
2807 skb_reset_network_header(skb);
2808 skb_reset_transport_header(skb);
2809 skb->mac_len = skb->network_header - skb->mac_header;
2810
2811 rcu_read_lock();
2812 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2813 if (!ptype->dev || ptype->dev == skb->dev)
2814 deliver_skb(skb, ptype, skb->dev);
2815 }
2816 rcu_read_unlock();
2817}
2818
ab95bfe0
JP
2819/**
2820 * netdev_rx_handler_register - register receive handler
2821 * @dev: device to register a handler for
2822 * @rx_handler: receive handler to register
93e2c32b 2823 * @rx_handler_data: data pointer that is used by rx handler
ab95bfe0
JP
2824 *
2825 * Register a receive hander for a device. This handler will then be
2826 * called from __netif_receive_skb. A negative errno code is returned
2827 * on a failure.
2828 *
2829 * The caller must hold the rtnl_mutex.
2830 */
2831int netdev_rx_handler_register(struct net_device *dev,
93e2c32b
JP
2832 rx_handler_func_t *rx_handler,
2833 void *rx_handler_data)
ab95bfe0
JP
2834{
2835 ASSERT_RTNL();
2836
2837 if (dev->rx_handler)
2838 return -EBUSY;
2839
93e2c32b 2840 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
ab95bfe0
JP
2841 rcu_assign_pointer(dev->rx_handler, rx_handler);
2842
2843 return 0;
2844}
2845EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
2846
2847/**
2848 * netdev_rx_handler_unregister - unregister receive handler
2849 * @dev: device to unregister a handler from
2850 *
2851 * Unregister a receive hander from a device.
2852 *
2853 * The caller must hold the rtnl_mutex.
2854 */
2855void netdev_rx_handler_unregister(struct net_device *dev)
2856{
2857
2858 ASSERT_RTNL();
2859 rcu_assign_pointer(dev->rx_handler, NULL);
93e2c32b 2860 rcu_assign_pointer(dev->rx_handler_data, NULL);
ab95bfe0
JP
2861}
2862EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
2863
acbbc071
ED
2864static inline void skb_bond_set_mac_by_master(struct sk_buff *skb,
2865 struct net_device *master)
2866{
2867 if (skb->pkt_type == PACKET_HOST) {
2868 u16 *dest = (u16 *) eth_hdr(skb)->h_dest;
2869
2870 memcpy(dest, master->dev_addr, ETH_ALEN);
2871 }
2872}
2873
2874/* On bonding slaves other than the currently active slave, suppress
2875 * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and
2876 * ARP on active-backup slaves with arp_validate enabled.
2877 */
2878int __skb_bond_should_drop(struct sk_buff *skb, struct net_device *master)
2879{
2880 struct net_device *dev = skb->dev;
2881
2882 if (master->priv_flags & IFF_MASTER_ARPMON)
2883 dev->last_rx = jiffies;
2884
f350a0a8
JP
2885 if ((master->priv_flags & IFF_MASTER_ALB) &&
2886 (master->priv_flags & IFF_BRIDGE_PORT)) {
acbbc071
ED
2887 /* Do address unmangle. The local destination address
2888 * will be always the one master has. Provides the right
2889 * functionality in a bridge.
2890 */
2891 skb_bond_set_mac_by_master(skb, master);
2892 }
2893
2894 if (dev->priv_flags & IFF_SLAVE_INACTIVE) {
2895 if ((dev->priv_flags & IFF_SLAVE_NEEDARP) &&
2896 skb->protocol == __cpu_to_be16(ETH_P_ARP))
2897 return 0;
2898
2899 if (master->priv_flags & IFF_MASTER_ALB) {
2900 if (skb->pkt_type != PACKET_BROADCAST &&
2901 skb->pkt_type != PACKET_MULTICAST)
2902 return 0;
2903 }
2904 if (master->priv_flags & IFF_MASTER_8023AD &&
2905 skb->protocol == __cpu_to_be16(ETH_P_SLOW))
2906 return 0;
2907
2908 return 1;
2909 }
2910 return 0;
2911}
2912EXPORT_SYMBOL(__skb_bond_should_drop);
2913
10f744d2 2914static int __netif_receive_skb(struct sk_buff *skb)
1da177e4
LT
2915{
2916 struct packet_type *ptype, *pt_prev;
ab95bfe0 2917 rx_handler_func_t *rx_handler;
f2ccd8fa 2918 struct net_device *orig_dev;
0641e4fb 2919 struct net_device *master;
0d7a3681 2920 struct net_device *null_or_orig;
2df4a0fa 2921 struct net_device *orig_or_bond;
1da177e4 2922 int ret = NET_RX_DROP;
252e3346 2923 __be16 type;
1da177e4 2924
3b098e2d
ED
2925 if (!netdev_tstamp_prequeue)
2926 net_timestamp_check(skb);
81bbb3d4 2927
05532121
CG
2928 if (vlan_tx_tag_present(skb))
2929 vlan_hwaccel_do_receive(skb);
9b22ea56 2930
1da177e4 2931 /* if we've gotten here through NAPI, check netpoll */
bea3348e 2932 if (netpoll_receive_skb(skb))
1da177e4
LT
2933 return NET_RX_DROP;
2934
8964be4a
ED
2935 if (!skb->skb_iif)
2936 skb->skb_iif = skb->dev->ifindex;
86e65da9 2937
597a264b
JF
2938 /*
2939 * bonding note: skbs received on inactive slaves should only
2940 * be delivered to pkt handlers that are exact matches. Also
2941 * the deliver_no_wcard flag will be set. If packet handlers
2942 * are sensitive to duplicate packets these skbs will need to
2943 * be dropped at the handler. The vlan accel path may have
2944 * already set the deliver_no_wcard flag.
2945 */
0d7a3681 2946 null_or_orig = NULL;
cc9bd5ce 2947 orig_dev = skb->dev;
0641e4fb 2948 master = ACCESS_ONCE(orig_dev->master);
597a264b
JF
2949 if (skb->deliver_no_wcard)
2950 null_or_orig = orig_dev;
2951 else if (master) {
2952 if (skb_bond_should_drop(skb, master)) {
2953 skb->deliver_no_wcard = 1;
0d7a3681 2954 null_or_orig = orig_dev; /* deliver only exact match */
597a264b 2955 } else
0641e4fb 2956 skb->dev = master;
cc9bd5ce 2957 }
8f903c70 2958
27f39c73 2959 __this_cpu_inc(softnet_data.processed);
c1d2bbe1 2960 skb_reset_network_header(skb);
badff6d0 2961 skb_reset_transport_header(skb);
b0e380b1 2962 skb->mac_len = skb->network_header - skb->mac_header;
1da177e4
LT
2963
2964 pt_prev = NULL;
2965
2966 rcu_read_lock();
2967
2968#ifdef CONFIG_NET_CLS_ACT
2969 if (skb->tc_verd & TC_NCLS) {
2970 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
2971 goto ncls;
2972 }
2973#endif
2974
2975 list_for_each_entry_rcu(ptype, &ptype_all, list) {
f982307f
JE
2976 if (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2977 ptype->dev == orig_dev) {
4ec93edb 2978 if (pt_prev)
f2ccd8fa 2979 ret = deliver_skb(skb, pt_prev, orig_dev);
1da177e4
LT
2980 pt_prev = ptype;
2981 }
2982 }
2983
2984#ifdef CONFIG_NET_CLS_ACT
f697c3e8
HX
2985 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
2986 if (!skb)
1da177e4 2987 goto out;
1da177e4
LT
2988ncls:
2989#endif
2990
ab95bfe0
JP
2991 /* Handle special case of bridge or macvlan */
2992 rx_handler = rcu_dereference(skb->dev->rx_handler);
2993 if (rx_handler) {
2994 if (pt_prev) {
2995 ret = deliver_skb(skb, pt_prev, orig_dev);
2996 pt_prev = NULL;
2997 }
2998 skb = rx_handler(skb);
2999 if (!skb)
3000 goto out;
3001 }
1da177e4 3002
1f3c8804
AG
3003 /*
3004 * Make sure frames received on VLAN interfaces stacked on
3005 * bonding interfaces still make their way to any base bonding
3006 * device that may have registered for a specific ptype. The
3007 * handler may have to adjust skb->dev and orig_dev.
1f3c8804 3008 */
2df4a0fa 3009 orig_or_bond = orig_dev;
1f3c8804
AG
3010 if ((skb->dev->priv_flags & IFF_802_1Q_VLAN) &&
3011 (vlan_dev_real_dev(skb->dev)->priv_flags & IFF_BONDING)) {
2df4a0fa 3012 orig_or_bond = vlan_dev_real_dev(skb->dev);
1f3c8804
AG
3013 }
3014
1da177e4 3015 type = skb->protocol;
82d8a867
PE
3016 list_for_each_entry_rcu(ptype,
3017 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
1f3c8804 3018 if (ptype->type == type && (ptype->dev == null_or_orig ||
ca8d9ea3 3019 ptype->dev == skb->dev || ptype->dev == orig_dev ||
2df4a0fa 3020 ptype->dev == orig_or_bond)) {
4ec93edb 3021 if (pt_prev)
f2ccd8fa 3022 ret = deliver_skb(skb, pt_prev, orig_dev);
1da177e4
LT
3023 pt_prev = ptype;
3024 }
3025 }
3026
3027 if (pt_prev) {
f2ccd8fa 3028 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1da177e4 3029 } else {
caf586e5 3030 atomic_long_inc(&skb->dev->rx_dropped);
1da177e4
LT
3031 kfree_skb(skb);
3032 /* Jamal, now you will not able to escape explaining
3033 * me how you were going to use this. :-)
3034 */
3035 ret = NET_RX_DROP;
3036 }
3037
3038out:
3039 rcu_read_unlock();
3040 return ret;
3041}
0a9627f2
TH
3042
3043/**
3044 * netif_receive_skb - process receive buffer from network
3045 * @skb: buffer to process
3046 *
3047 * netif_receive_skb() is the main receive data processing function.
3048 * It always succeeds. The buffer may be dropped during processing
3049 * for congestion control or by the protocol layers.
3050 *
3051 * This function may only be called from softirq context and interrupts
3052 * should be enabled.
3053 *
3054 * Return values (usually ignored):
3055 * NET_RX_SUCCESS: no congestion
3056 * NET_RX_DROP: packet was dropped
3057 */
3058int netif_receive_skb(struct sk_buff *skb)
3059{
3b098e2d
ED
3060 if (netdev_tstamp_prequeue)
3061 net_timestamp_check(skb);
3062
c1f19b51
RC
3063 if (skb_defer_rx_timestamp(skb))
3064 return NET_RX_SUCCESS;
3065
df334545 3066#ifdef CONFIG_RPS
3b098e2d
ED
3067 {
3068 struct rps_dev_flow voidflow, *rflow = &voidflow;
3069 int cpu, ret;
fec5e652 3070
3b098e2d
ED
3071 rcu_read_lock();
3072
3073 cpu = get_rps_cpu(skb->dev, skb, &rflow);
0a9627f2 3074
3b098e2d
ED
3075 if (cpu >= 0) {
3076 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3077 rcu_read_unlock();
3078 } else {
3079 rcu_read_unlock();
3080 ret = __netif_receive_skb(skb);
3081 }
0a9627f2 3082
3b098e2d 3083 return ret;
fec5e652 3084 }
1e94d72f
TH
3085#else
3086 return __netif_receive_skb(skb);
3087#endif
0a9627f2 3088}
d1b19dff 3089EXPORT_SYMBOL(netif_receive_skb);
1da177e4 3090
88751275
ED
3091/* Network device is going away, flush any packets still pending
3092 * Called with irqs disabled.
3093 */
152102c7 3094static void flush_backlog(void *arg)
6e583ce5 3095{
152102c7 3096 struct net_device *dev = arg;
e36fa2f7 3097 struct softnet_data *sd = &__get_cpu_var(softnet_data);
6e583ce5
SH
3098 struct sk_buff *skb, *tmp;
3099
e36fa2f7 3100 rps_lock(sd);
6e7676c1 3101 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
6e583ce5 3102 if (skb->dev == dev) {
e36fa2f7 3103 __skb_unlink(skb, &sd->input_pkt_queue);
6e583ce5 3104 kfree_skb(skb);
76cc8b13 3105 input_queue_head_incr(sd);
6e583ce5 3106 }
6e7676c1 3107 }
e36fa2f7 3108 rps_unlock(sd);
6e7676c1
CG
3109
3110 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
3111 if (skb->dev == dev) {
3112 __skb_unlink(skb, &sd->process_queue);
3113 kfree_skb(skb);
76cc8b13 3114 input_queue_head_incr(sd);
6e7676c1
CG
3115 }
3116 }
6e583ce5
SH
3117}
3118
d565b0a1
HX
3119static int napi_gro_complete(struct sk_buff *skb)
3120{
3121 struct packet_type *ptype;
3122 __be16 type = skb->protocol;
3123 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
3124 int err = -ENOENT;
3125
fc59f9a3
HX
3126 if (NAPI_GRO_CB(skb)->count == 1) {
3127 skb_shinfo(skb)->gso_size = 0;
d565b0a1 3128 goto out;
fc59f9a3 3129 }
d565b0a1
HX
3130
3131 rcu_read_lock();
3132 list_for_each_entry_rcu(ptype, head, list) {
3133 if (ptype->type != type || ptype->dev || !ptype->gro_complete)
3134 continue;
3135
3136 err = ptype->gro_complete(skb);
3137 break;
3138 }
3139 rcu_read_unlock();
3140
3141 if (err) {
3142 WARN_ON(&ptype->list == head);
3143 kfree_skb(skb);
3144 return NET_RX_SUCCESS;
3145 }
3146
3147out:
d565b0a1
HX
3148 return netif_receive_skb(skb);
3149}
3150
86cac58b 3151inline void napi_gro_flush(struct napi_struct *napi)
d565b0a1
HX
3152{
3153 struct sk_buff *skb, *next;
3154
3155 for (skb = napi->gro_list; skb; skb = next) {
3156 next = skb->next;
3157 skb->next = NULL;
3158 napi_gro_complete(skb);
3159 }
3160
4ae5544f 3161 napi->gro_count = 0;
d565b0a1
HX
3162 napi->gro_list = NULL;
3163}
86cac58b 3164EXPORT_SYMBOL(napi_gro_flush);
d565b0a1 3165
5b252f0c 3166enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
d565b0a1
HX
3167{
3168 struct sk_buff **pp = NULL;
3169 struct packet_type *ptype;
3170 __be16 type = skb->protocol;
3171 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
0da2afd5 3172 int same_flow;
d565b0a1 3173 int mac_len;
5b252f0c 3174 enum gro_result ret;
d565b0a1 3175
ce9e76c8 3176 if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb))
d565b0a1
HX
3177 goto normal;
3178
21dc3301 3179 if (skb_is_gso(skb) || skb_has_frag_list(skb))
f17f5c91
HX
3180 goto normal;
3181
d565b0a1
HX
3182 rcu_read_lock();
3183 list_for_each_entry_rcu(ptype, head, list) {
d565b0a1
HX
3184 if (ptype->type != type || ptype->dev || !ptype->gro_receive)
3185 continue;
3186
86911732 3187 skb_set_network_header(skb, skb_gro_offset(skb));
d565b0a1
HX
3188 mac_len = skb->network_header - skb->mac_header;
3189 skb->mac_len = mac_len;
3190 NAPI_GRO_CB(skb)->same_flow = 0;
3191 NAPI_GRO_CB(skb)->flush = 0;
5d38a079 3192 NAPI_GRO_CB(skb)->free = 0;
d565b0a1 3193
d565b0a1
HX
3194 pp = ptype->gro_receive(&napi->gro_list, skb);
3195 break;
3196 }
3197 rcu_read_unlock();
3198
3199 if (&ptype->list == head)
3200 goto normal;
3201
0da2afd5 3202 same_flow = NAPI_GRO_CB(skb)->same_flow;
5d0d9be8 3203 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
0da2afd5 3204
d565b0a1
HX
3205 if (pp) {
3206 struct sk_buff *nskb = *pp;
3207
3208 *pp = nskb->next;
3209 nskb->next = NULL;
3210 napi_gro_complete(nskb);
4ae5544f 3211 napi->gro_count--;
d565b0a1
HX
3212 }
3213
0da2afd5 3214 if (same_flow)
d565b0a1
HX
3215 goto ok;
3216
4ae5544f 3217 if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
d565b0a1 3218 goto normal;
d565b0a1 3219
4ae5544f 3220 napi->gro_count++;
d565b0a1 3221 NAPI_GRO_CB(skb)->count = 1;
86911732 3222 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
d565b0a1
HX
3223 skb->next = napi->gro_list;
3224 napi->gro_list = skb;
5d0d9be8 3225 ret = GRO_HELD;
d565b0a1 3226
ad0f9904 3227pull:
cb18978c
HX
3228 if (skb_headlen(skb) < skb_gro_offset(skb)) {
3229 int grow = skb_gro_offset(skb) - skb_headlen(skb);
3230
3231 BUG_ON(skb->end - skb->tail < grow);
3232
3233 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
3234
3235 skb->tail += grow;
3236 skb->data_len -= grow;
3237
3238 skb_shinfo(skb)->frags[0].page_offset += grow;
3239 skb_shinfo(skb)->frags[0].size -= grow;
3240
3241 if (unlikely(!skb_shinfo(skb)->frags[0].size)) {
3242 put_page(skb_shinfo(skb)->frags[0].page);
3243 memmove(skb_shinfo(skb)->frags,
3244 skb_shinfo(skb)->frags + 1,
e5093aec 3245 --skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
cb18978c 3246 }
ad0f9904
HX
3247 }
3248
d565b0a1 3249ok:
5d0d9be8 3250 return ret;
d565b0a1
HX
3251
3252normal:
ad0f9904
HX
3253 ret = GRO_NORMAL;
3254 goto pull;
5d38a079 3255}
96e93eab
HX
3256EXPORT_SYMBOL(dev_gro_receive);
3257
40d0802b 3258static inline gro_result_t
5b252f0c 3259__napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
96e93eab
HX
3260{
3261 struct sk_buff *p;
3262
3263 for (p = napi->gro_list; p; p = p->next) {
40d0802b
ED
3264 unsigned long diffs;
3265
3266 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
3267 diffs |= compare_ether_header(skb_mac_header(p),
f64f9e71 3268 skb_gro_mac_header(skb));
40d0802b 3269 NAPI_GRO_CB(p)->same_flow = !diffs;
96e93eab
HX
3270 NAPI_GRO_CB(p)->flush = 0;
3271 }
3272
3273 return dev_gro_receive(napi, skb);
3274}
5d38a079 3275
c7c4b3b6 3276gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
5d38a079 3277{
5d0d9be8
HX
3278 switch (ret) {
3279 case GRO_NORMAL:
c7c4b3b6
BH
3280 if (netif_receive_skb(skb))
3281 ret = GRO_DROP;
3282 break;
5d38a079 3283
5d0d9be8 3284 case GRO_DROP:
5d0d9be8 3285 case GRO_MERGED_FREE:
5d38a079
HX
3286 kfree_skb(skb);
3287 break;
5b252f0c
BH
3288
3289 case GRO_HELD:
3290 case GRO_MERGED:
3291 break;
5d38a079
HX
3292 }
3293
c7c4b3b6 3294 return ret;
5d0d9be8
HX
3295}
3296EXPORT_SYMBOL(napi_skb_finish);
3297
78a478d0
HX
3298void skb_gro_reset_offset(struct sk_buff *skb)
3299{
3300 NAPI_GRO_CB(skb)->data_offset = 0;
3301 NAPI_GRO_CB(skb)->frag0 = NULL;
7489594c 3302 NAPI_GRO_CB(skb)->frag0_len = 0;
78a478d0 3303
78d3fd0b 3304 if (skb->mac_header == skb->tail &&
7489594c 3305 !PageHighMem(skb_shinfo(skb)->frags[0].page)) {
78a478d0
HX
3306 NAPI_GRO_CB(skb)->frag0 =
3307 page_address(skb_shinfo(skb)->frags[0].page) +
3308 skb_shinfo(skb)->frags[0].page_offset;
7489594c
HX
3309 NAPI_GRO_CB(skb)->frag0_len = skb_shinfo(skb)->frags[0].size;
3310 }
78a478d0
HX
3311}
3312EXPORT_SYMBOL(skb_gro_reset_offset);
3313
c7c4b3b6 3314gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
5d0d9be8 3315{
86911732
HX
3316 skb_gro_reset_offset(skb);
3317
5d0d9be8 3318 return napi_skb_finish(__napi_gro_receive(napi, skb), skb);
d565b0a1
HX
3319}
3320EXPORT_SYMBOL(napi_gro_receive);
3321
96e93eab
HX
3322void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
3323{
96e93eab
HX
3324 __skb_pull(skb, skb_headlen(skb));
3325 skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
3326
3327 napi->skb = skb;
3328}
3329EXPORT_SYMBOL(napi_reuse_skb);
3330
76620aaf 3331struct sk_buff *napi_get_frags(struct napi_struct *napi)
5d38a079 3332{
5d38a079 3333 struct sk_buff *skb = napi->skb;
5d38a079
HX
3334
3335 if (!skb) {
89d71a66
ED
3336 skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
3337 if (skb)
3338 napi->skb = skb;
80595d59 3339 }
96e93eab
HX
3340 return skb;
3341}
76620aaf 3342EXPORT_SYMBOL(napi_get_frags);
96e93eab 3343
c7c4b3b6
BH
3344gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
3345 gro_result_t ret)
96e93eab 3346{
5d0d9be8
HX
3347 switch (ret) {
3348 case GRO_NORMAL:
86911732 3349 case GRO_HELD:
e76b69cc 3350 skb->protocol = eth_type_trans(skb, skb->dev);
86911732 3351
c7c4b3b6
BH
3352 if (ret == GRO_HELD)
3353 skb_gro_pull(skb, -ETH_HLEN);
3354 else if (netif_receive_skb(skb))
3355 ret = GRO_DROP;
86911732 3356 break;
5d38a079 3357
5d0d9be8 3358 case GRO_DROP:
5d0d9be8
HX
3359 case GRO_MERGED_FREE:
3360 napi_reuse_skb(napi, skb);
3361 break;
5b252f0c
BH
3362
3363 case GRO_MERGED:
3364 break;
5d0d9be8 3365 }
5d38a079 3366
c7c4b3b6 3367 return ret;
5d38a079 3368}
5d0d9be8
HX
3369EXPORT_SYMBOL(napi_frags_finish);
3370
76620aaf
HX
3371struct sk_buff *napi_frags_skb(struct napi_struct *napi)
3372{
3373 struct sk_buff *skb = napi->skb;
3374 struct ethhdr *eth;
a5b1cf28
HX
3375 unsigned int hlen;
3376 unsigned int off;
76620aaf
HX
3377
3378 napi->skb = NULL;
3379
3380 skb_reset_mac_header(skb);
3381 skb_gro_reset_offset(skb);
3382
a5b1cf28
HX
3383 off = skb_gro_offset(skb);
3384 hlen = off + sizeof(*eth);
3385 eth = skb_gro_header_fast(skb, off);
3386 if (skb_gro_header_hard(skb, hlen)) {
3387 eth = skb_gro_header_slow(skb, hlen, off);
3388 if (unlikely(!eth)) {
3389 napi_reuse_skb(napi, skb);
3390 skb = NULL;
3391 goto out;
3392 }
76620aaf
HX
3393 }
3394
3395 skb_gro_pull(skb, sizeof(*eth));
3396
3397 /*
3398 * This works because the only protocols we care about don't require
3399 * special handling. We'll fix it up properly at the end.
3400 */
3401 skb->protocol = eth->h_proto;
3402
3403out:
3404 return skb;
3405}
3406EXPORT_SYMBOL(napi_frags_skb);
3407
c7c4b3b6 3408gro_result_t napi_gro_frags(struct napi_struct *napi)
5d0d9be8 3409{
76620aaf 3410 struct sk_buff *skb = napi_frags_skb(napi);
5d0d9be8
HX
3411
3412 if (!skb)
c7c4b3b6 3413 return GRO_DROP;
5d0d9be8
HX
3414
3415 return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb));
3416}
5d38a079
HX
3417EXPORT_SYMBOL(napi_gro_frags);
3418
e326bed2
ED
3419/*
3420 * net_rps_action sends any pending IPI's for rps.
3421 * Note: called with local irq disabled, but exits with local irq enabled.
3422 */
3423static void net_rps_action_and_irq_enable(struct softnet_data *sd)
3424{
3425#ifdef CONFIG_RPS
3426 struct softnet_data *remsd = sd->rps_ipi_list;
3427
3428 if (remsd) {
3429 sd->rps_ipi_list = NULL;
3430
3431 local_irq_enable();
3432
3433 /* Send pending IPI's to kick RPS processing on remote cpus. */
3434 while (remsd) {
3435 struct softnet_data *next = remsd->rps_ipi_next;
3436
3437 if (cpu_online(remsd->cpu))
3438 __smp_call_function_single(remsd->cpu,
3439 &remsd->csd, 0);
3440 remsd = next;
3441 }
3442 } else
3443#endif
3444 local_irq_enable();
3445}
3446
bea3348e 3447static int process_backlog(struct napi_struct *napi, int quota)
1da177e4
LT
3448{
3449 int work = 0;
eecfd7c4 3450 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
1da177e4 3451
e326bed2
ED
3452#ifdef CONFIG_RPS
3453 /* Check if we have pending ipi, its better to send them now,
3454 * not waiting net_rx_action() end.
3455 */
3456 if (sd->rps_ipi_list) {
3457 local_irq_disable();
3458 net_rps_action_and_irq_enable(sd);
3459 }
3460#endif
bea3348e 3461 napi->weight = weight_p;
6e7676c1
CG
3462 local_irq_disable();
3463 while (work < quota) {
1da177e4 3464 struct sk_buff *skb;
6e7676c1
CG
3465 unsigned int qlen;
3466
3467 while ((skb = __skb_dequeue(&sd->process_queue))) {
3468 local_irq_enable();
3469 __netif_receive_skb(skb);
6e7676c1 3470 local_irq_disable();
76cc8b13
TH
3471 input_queue_head_incr(sd);
3472 if (++work >= quota) {
3473 local_irq_enable();
3474 return work;
3475 }
6e7676c1 3476 }
1da177e4 3477
e36fa2f7 3478 rps_lock(sd);
6e7676c1 3479 qlen = skb_queue_len(&sd->input_pkt_queue);
76cc8b13 3480 if (qlen)
6e7676c1
CG
3481 skb_queue_splice_tail_init(&sd->input_pkt_queue,
3482 &sd->process_queue);
76cc8b13 3483
6e7676c1 3484 if (qlen < quota - work) {
eecfd7c4
ED
3485 /*
3486 * Inline a custom version of __napi_complete().
3487 * only current cpu owns and manipulates this napi,
3488 * and NAPI_STATE_SCHED is the only possible flag set on backlog.
3489 * we can use a plain write instead of clear_bit(),
3490 * and we dont need an smp_mb() memory barrier.
3491 */
3492 list_del(&napi->poll_list);
3493 napi->state = 0;
3494
6e7676c1 3495 quota = work + qlen;
bea3348e 3496 }
e36fa2f7 3497 rps_unlock(sd);
6e7676c1
CG
3498 }
3499 local_irq_enable();
1da177e4 3500
bea3348e
SH
3501 return work;
3502}
1da177e4 3503
bea3348e
SH
3504/**
3505 * __napi_schedule - schedule for receive
c4ea43c5 3506 * @n: entry to schedule
bea3348e
SH
3507 *
3508 * The entry's receive function will be scheduled to run
3509 */
b5606c2d 3510void __napi_schedule(struct napi_struct *n)
bea3348e
SH
3511{
3512 unsigned long flags;
1da177e4 3513
bea3348e 3514 local_irq_save(flags);
eecfd7c4 3515 ____napi_schedule(&__get_cpu_var(softnet_data), n);
bea3348e 3516 local_irq_restore(flags);
1da177e4 3517}
bea3348e
SH
3518EXPORT_SYMBOL(__napi_schedule);
3519
d565b0a1
HX
3520void __napi_complete(struct napi_struct *n)
3521{
3522 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
3523 BUG_ON(n->gro_list);
3524
3525 list_del(&n->poll_list);
3526 smp_mb__before_clear_bit();
3527 clear_bit(NAPI_STATE_SCHED, &n->state);
3528}
3529EXPORT_SYMBOL(__napi_complete);
3530
3531void napi_complete(struct napi_struct *n)
3532{
3533 unsigned long flags;
3534
3535 /*
3536 * don't let napi dequeue from the cpu poll list
3537 * just in case its running on a different cpu
3538 */
3539 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
3540 return;
3541
3542 napi_gro_flush(n);
3543 local_irq_save(flags);
3544 __napi_complete(n);
3545 local_irq_restore(flags);
3546}
3547EXPORT_SYMBOL(napi_complete);
3548
3549void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
3550 int (*poll)(struct napi_struct *, int), int weight)
3551{
3552 INIT_LIST_HEAD(&napi->poll_list);
4ae5544f 3553 napi->gro_count = 0;
d565b0a1 3554 napi->gro_list = NULL;
5d38a079 3555 napi->skb = NULL;
d565b0a1
HX
3556 napi->poll = poll;
3557 napi->weight = weight;
3558 list_add(&napi->dev_list, &dev->napi_list);
d565b0a1 3559 napi->dev = dev;
5d38a079 3560#ifdef CONFIG_NETPOLL
d565b0a1
HX
3561 spin_lock_init(&napi->poll_lock);
3562 napi->poll_owner = -1;
3563#endif
3564 set_bit(NAPI_STATE_SCHED, &napi->state);
3565}
3566EXPORT_SYMBOL(netif_napi_add);
3567
3568void netif_napi_del(struct napi_struct *napi)
3569{
3570 struct sk_buff *skb, *next;
3571
d7b06636 3572 list_del_init(&napi->dev_list);
76620aaf 3573 napi_free_frags(napi);
d565b0a1
HX
3574
3575 for (skb = napi->gro_list; skb; skb = next) {
3576 next = skb->next;
3577 skb->next = NULL;
3578 kfree_skb(skb);
3579 }
3580
3581 napi->gro_list = NULL;
4ae5544f 3582 napi->gro_count = 0;
d565b0a1
HX
3583}
3584EXPORT_SYMBOL(netif_napi_del);
3585
1da177e4
LT
3586static void net_rx_action(struct softirq_action *h)
3587{
e326bed2 3588 struct softnet_data *sd = &__get_cpu_var(softnet_data);
24f8b238 3589 unsigned long time_limit = jiffies + 2;
51b0bded 3590 int budget = netdev_budget;
53fb95d3
MM
3591 void *have;
3592
1da177e4
LT
3593 local_irq_disable();
3594
e326bed2 3595 while (!list_empty(&sd->poll_list)) {
bea3348e
SH
3596 struct napi_struct *n;
3597 int work, weight;
1da177e4 3598
bea3348e 3599 /* If softirq window is exhuasted then punt.
24f8b238
SH
3600 * Allow this to run for 2 jiffies since which will allow
3601 * an average latency of 1.5/HZ.
bea3348e 3602 */
24f8b238 3603 if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
1da177e4
LT
3604 goto softnet_break;
3605
3606 local_irq_enable();
3607
bea3348e
SH
3608 /* Even though interrupts have been re-enabled, this
3609 * access is safe because interrupts can only add new
3610 * entries to the tail of this list, and only ->poll()
3611 * calls can remove this head entry from the list.
3612 */
e326bed2 3613 n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list);
1da177e4 3614
bea3348e
SH
3615 have = netpoll_poll_lock(n);
3616
3617 weight = n->weight;
3618
0a7606c1
DM
3619 /* This NAPI_STATE_SCHED test is for avoiding a race
3620 * with netpoll's poll_napi(). Only the entity which
3621 * obtains the lock and sees NAPI_STATE_SCHED set will
3622 * actually make the ->poll() call. Therefore we avoid
3623 * accidently calling ->poll() when NAPI is not scheduled.
3624 */
3625 work = 0;
4ea7e386 3626 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
0a7606c1 3627 work = n->poll(n, weight);
4ea7e386
NH
3628 trace_napi_poll(n);
3629 }
bea3348e
SH
3630
3631 WARN_ON_ONCE(work > weight);
3632
3633 budget -= work;
3634
3635 local_irq_disable();
3636
3637 /* Drivers must not modify the NAPI state if they
3638 * consume the entire weight. In such cases this code
3639 * still "owns" the NAPI instance and therefore can
3640 * move the instance around on the list at-will.
3641 */
fed17f30 3642 if (unlikely(work == weight)) {
ff780cd8
HX
3643 if (unlikely(napi_disable_pending(n))) {
3644 local_irq_enable();
3645 napi_complete(n);
3646 local_irq_disable();
3647 } else
e326bed2 3648 list_move_tail(&n->poll_list, &sd->poll_list);
fed17f30 3649 }
bea3348e
SH
3650
3651 netpoll_poll_unlock(have);
1da177e4
LT
3652 }
3653out:
e326bed2 3654 net_rps_action_and_irq_enable(sd);
0a9627f2 3655
db217334
CL
3656#ifdef CONFIG_NET_DMA
3657 /*
3658 * There may not be any more sk_buffs coming right now, so push
3659 * any pending DMA copies to hardware
3660 */
2ba05622 3661 dma_issue_pending_all();
db217334 3662#endif
bea3348e 3663
1da177e4
LT
3664 return;
3665
3666softnet_break:
dee42870 3667 sd->time_squeeze++;
1da177e4
LT
3668 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3669 goto out;
3670}
3671
d1b19dff 3672static gifconf_func_t *gifconf_list[NPROTO];
1da177e4
LT
3673
3674/**
3675 * register_gifconf - register a SIOCGIF handler
3676 * @family: Address family
3677 * @gifconf: Function handler
3678 *
3679 * Register protocol dependent address dumping routines. The handler
3680 * that is passed must not be freed or reused until it has been replaced
3681 * by another handler.
3682 */
d1b19dff 3683int register_gifconf(unsigned int family, gifconf_func_t *gifconf)
1da177e4
LT
3684{
3685 if (family >= NPROTO)
3686 return -EINVAL;
3687 gifconf_list[family] = gifconf;
3688 return 0;
3689}
d1b19dff 3690EXPORT_SYMBOL(register_gifconf);
1da177e4
LT
3691
3692
3693/*
3694 * Map an interface index to its name (SIOCGIFNAME)
3695 */
3696
3697/*
3698 * We need this ioctl for efficient implementation of the
3699 * if_indextoname() function required by the IPv6 API. Without
3700 * it, we would have to search all the interfaces to find a
3701 * match. --pb
3702 */
3703
881d966b 3704static int dev_ifname(struct net *net, struct ifreq __user *arg)
1da177e4
LT
3705{
3706 struct net_device *dev;
3707 struct ifreq ifr;
3708
3709 /*
3710 * Fetch the caller's info block.
3711 */
3712
3713 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
3714 return -EFAULT;
3715
fb699dfd
ED
3716 rcu_read_lock();
3717 dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex);
1da177e4 3718 if (!dev) {
fb699dfd 3719 rcu_read_unlock();
1da177e4
LT
3720 return -ENODEV;
3721 }
3722
3723 strcpy(ifr.ifr_name, dev->name);
fb699dfd 3724 rcu_read_unlock();
1da177e4
LT
3725
3726 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
3727 return -EFAULT;
3728 return 0;
3729}
3730
3731/*
3732 * Perform a SIOCGIFCONF call. This structure will change
3733 * size eventually, and there is nothing I can do about it.
3734 * Thus we will need a 'compatibility mode'.
3735 */
3736
881d966b 3737static int dev_ifconf(struct net *net, char __user *arg)
1da177e4
LT
3738{
3739 struct ifconf ifc;
3740 struct net_device *dev;
3741 char __user *pos;
3742 int len;
3743 int total;
3744 int i;
3745
3746 /*
3747 * Fetch the caller's info block.
3748 */
3749
3750 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
3751 return -EFAULT;
3752
3753 pos = ifc.ifc_buf;
3754 len = ifc.ifc_len;
3755
3756 /*
3757 * Loop over the interfaces, and write an info block for each.
3758 */
3759
3760 total = 0;
881d966b 3761 for_each_netdev(net, dev) {
1da177e4
LT
3762 for (i = 0; i < NPROTO; i++) {
3763 if (gifconf_list[i]) {
3764 int done;
3765 if (!pos)
3766 done = gifconf_list[i](dev, NULL, 0);
3767 else
3768 done = gifconf_list[i](dev, pos + total,
3769 len - total);
3770 if (done < 0)
3771 return -EFAULT;
3772 total += done;
3773 }
3774 }
4ec93edb 3775 }
1da177e4
LT
3776
3777 /*
3778 * All done. Write the updated control block back to the caller.
3779 */
3780 ifc.ifc_len = total;
3781
3782 /*
3783 * Both BSD and Solaris return 0 here, so we do too.
3784 */
3785 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
3786}
3787
3788#ifdef CONFIG_PROC_FS
3789/*
3790 * This is invoked by the /proc filesystem handler to display a device
3791 * in detail.
3792 */
7562f876 3793void *dev_seq_start(struct seq_file *seq, loff_t *pos)
c6d14c84 3794 __acquires(RCU)
1da177e4 3795{
e372c414 3796 struct net *net = seq_file_net(seq);
7562f876 3797 loff_t off;
1da177e4 3798 struct net_device *dev;
1da177e4 3799
c6d14c84 3800 rcu_read_lock();
7562f876
PE
3801 if (!*pos)
3802 return SEQ_START_TOKEN;
1da177e4 3803
7562f876 3804 off = 1;
c6d14c84 3805 for_each_netdev_rcu(net, dev)
7562f876
PE
3806 if (off++ == *pos)
3807 return dev;
1da177e4 3808
7562f876 3809 return NULL;
1da177e4
LT
3810}
3811
3812void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3813{
c6d14c84
ED
3814 struct net_device *dev = (v == SEQ_START_TOKEN) ?
3815 first_net_device(seq_file_net(seq)) :
3816 next_net_device((struct net_device *)v);
3817
1da177e4 3818 ++*pos;
c6d14c84 3819 return rcu_dereference(dev);
1da177e4
LT
3820}
3821
3822void dev_seq_stop(struct seq_file *seq, void *v)
c6d14c84 3823 __releases(RCU)
1da177e4 3824{
c6d14c84 3825 rcu_read_unlock();
1da177e4
LT
3826}
3827
3828static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
3829{
28172739
ED
3830 struct rtnl_link_stats64 temp;
3831 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
1da177e4 3832
be1f3c2c
BH
3833 seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
3834 "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
5a1b5898
RR
3835 dev->name, stats->rx_bytes, stats->rx_packets,
3836 stats->rx_errors,
3837 stats->rx_dropped + stats->rx_missed_errors,
3838 stats->rx_fifo_errors,
3839 stats->rx_length_errors + stats->rx_over_errors +
3840 stats->rx_crc_errors + stats->rx_frame_errors,
3841 stats->rx_compressed, stats->multicast,
3842 stats->tx_bytes, stats->tx_packets,
3843 stats->tx_errors, stats->tx_dropped,
3844 stats->tx_fifo_errors, stats->collisions,
3845 stats->tx_carrier_errors +
3846 stats->tx_aborted_errors +
3847 stats->tx_window_errors +
3848 stats->tx_heartbeat_errors,
3849 stats->tx_compressed);
1da177e4
LT
3850}
3851
3852/*
3853 * Called from the PROCfs module. This now uses the new arbitrary sized
3854 * /proc/net interface to create /proc/net/dev
3855 */
3856static int dev_seq_show(struct seq_file *seq, void *v)
3857{
3858 if (v == SEQ_START_TOKEN)
3859 seq_puts(seq, "Inter-| Receive "
3860 " | Transmit\n"
3861 " face |bytes packets errs drop fifo frame "
3862 "compressed multicast|bytes packets errs "
3863 "drop fifo colls carrier compressed\n");
3864 else
3865 dev_seq_printf_stats(seq, v);
3866 return 0;
3867}
3868
dee42870 3869static struct softnet_data *softnet_get_online(loff_t *pos)
1da177e4 3870{
dee42870 3871 struct softnet_data *sd = NULL;
1da177e4 3872
0c0b0aca 3873 while (*pos < nr_cpu_ids)
4ec93edb 3874 if (cpu_online(*pos)) {
dee42870 3875 sd = &per_cpu(softnet_data, *pos);
1da177e4
LT
3876 break;
3877 } else
3878 ++*pos;
dee42870 3879 return sd;
1da177e4
LT
3880}
3881
3882static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
3883{
3884 return softnet_get_online(pos);
3885}
3886
3887static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3888{
3889 ++*pos;
3890 return softnet_get_online(pos);
3891}
3892
3893static void softnet_seq_stop(struct seq_file *seq, void *v)
3894{
3895}
3896
3897static int softnet_seq_show(struct seq_file *seq, void *v)
3898{
dee42870 3899 struct softnet_data *sd = v;
1da177e4 3900
0a9627f2 3901 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
dee42870 3902 sd->processed, sd->dropped, sd->time_squeeze, 0,
c1ebcdb8 3903 0, 0, 0, 0, /* was fastroute */
dee42870 3904 sd->cpu_collision, sd->received_rps);
1da177e4
LT
3905 return 0;
3906}
3907
f690808e 3908static const struct seq_operations dev_seq_ops = {
1da177e4
LT
3909 .start = dev_seq_start,
3910 .next = dev_seq_next,
3911 .stop = dev_seq_stop,
3912 .show = dev_seq_show,
3913};
3914
3915static int dev_seq_open(struct inode *inode, struct file *file)
3916{
e372c414
DL
3917 return seq_open_net(inode, file, &dev_seq_ops,
3918 sizeof(struct seq_net_private));
1da177e4
LT
3919}
3920
9a32144e 3921static const struct file_operations dev_seq_fops = {
1da177e4
LT
3922 .owner = THIS_MODULE,
3923 .open = dev_seq_open,
3924 .read = seq_read,
3925 .llseek = seq_lseek,
e372c414 3926 .release = seq_release_net,
1da177e4
LT
3927};
3928
f690808e 3929static const struct seq_operations softnet_seq_ops = {
1da177e4
LT
3930 .start = softnet_seq_start,
3931 .next = softnet_seq_next,
3932 .stop = softnet_seq_stop,
3933 .show = softnet_seq_show,
3934};
3935
3936static int softnet_seq_open(struct inode *inode, struct file *file)
3937{
3938 return seq_open(file, &softnet_seq_ops);
3939}
3940
9a32144e 3941static const struct file_operations softnet_seq_fops = {
1da177e4
LT
3942 .owner = THIS_MODULE,
3943 .open = softnet_seq_open,
3944 .read = seq_read,
3945 .llseek = seq_lseek,
3946 .release = seq_release,
3947};
3948
0e1256ff
SH
3949static void *ptype_get_idx(loff_t pos)
3950{
3951 struct packet_type *pt = NULL;
3952 loff_t i = 0;
3953 int t;
3954
3955 list_for_each_entry_rcu(pt, &ptype_all, list) {
3956 if (i == pos)
3957 return pt;
3958 ++i;
3959 }
3960
82d8a867 3961 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
0e1256ff
SH
3962 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
3963 if (i == pos)
3964 return pt;
3965 ++i;
3966 }
3967 }
3968 return NULL;
3969}
3970
3971static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
72348a42 3972 __acquires(RCU)
0e1256ff
SH
3973{
3974 rcu_read_lock();
3975 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
3976}
3977
3978static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3979{
3980 struct packet_type *pt;
3981 struct list_head *nxt;
3982 int hash;
3983
3984 ++*pos;
3985 if (v == SEQ_START_TOKEN)
3986 return ptype_get_idx(0);
3987
3988 pt = v;
3989 nxt = pt->list.next;
3990 if (pt->type == htons(ETH_P_ALL)) {
3991 if (nxt != &ptype_all)
3992 goto found;
3993 hash = 0;
3994 nxt = ptype_base[0].next;
3995 } else
82d8a867 3996 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
0e1256ff
SH
3997
3998 while (nxt == &ptype_base[hash]) {
82d8a867 3999 if (++hash >= PTYPE_HASH_SIZE)
0e1256ff
SH
4000 return NULL;
4001 nxt = ptype_base[hash].next;
4002 }
4003found:
4004 return list_entry(nxt, struct packet_type, list);
4005}
4006
4007static void ptype_seq_stop(struct seq_file *seq, void *v)
72348a42 4008 __releases(RCU)
0e1256ff
SH
4009{
4010 rcu_read_unlock();
4011}
4012
0e1256ff
SH
4013static int ptype_seq_show(struct seq_file *seq, void *v)
4014{
4015 struct packet_type *pt = v;
4016
4017 if (v == SEQ_START_TOKEN)
4018 seq_puts(seq, "Type Device Function\n");
c346dca1 4019 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
0e1256ff
SH
4020 if (pt->type == htons(ETH_P_ALL))
4021 seq_puts(seq, "ALL ");
4022 else
4023 seq_printf(seq, "%04x", ntohs(pt->type));
4024
908cd2da
AD
4025 seq_printf(seq, " %-8s %pF\n",
4026 pt->dev ? pt->dev->name : "", pt->func);
0e1256ff
SH
4027 }
4028
4029 return 0;
4030}
4031
4032static const struct seq_operations ptype_seq_ops = {
4033 .start = ptype_seq_start,
4034 .next = ptype_seq_next,
4035 .stop = ptype_seq_stop,
4036 .show = ptype_seq_show,
4037};
4038
4039static int ptype_seq_open(struct inode *inode, struct file *file)
4040{
2feb27db
PE
4041 return seq_open_net(inode, file, &ptype_seq_ops,
4042 sizeof(struct seq_net_private));
0e1256ff
SH
4043}
4044
4045static const struct file_operations ptype_seq_fops = {
4046 .owner = THIS_MODULE,
4047 .open = ptype_seq_open,
4048 .read = seq_read,
4049 .llseek = seq_lseek,
2feb27db 4050 .release = seq_release_net,
0e1256ff
SH
4051};
4052
4053
4665079c 4054static int __net_init dev_proc_net_init(struct net *net)
1da177e4
LT
4055{
4056 int rc = -ENOMEM;
4057
881d966b 4058 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
1da177e4 4059 goto out;
881d966b 4060 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
1da177e4 4061 goto out_dev;
881d966b 4062 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
457c4cbc 4063 goto out_softnet;
0e1256ff 4064
881d966b 4065 if (wext_proc_init(net))
457c4cbc 4066 goto out_ptype;
1da177e4
LT
4067 rc = 0;
4068out:
4069 return rc;
457c4cbc 4070out_ptype:
881d966b 4071 proc_net_remove(net, "ptype");
1da177e4 4072out_softnet:
881d966b 4073 proc_net_remove(net, "softnet_stat");
1da177e4 4074out_dev:
881d966b 4075 proc_net_remove(net, "dev");
1da177e4
LT
4076 goto out;
4077}
881d966b 4078
4665079c 4079static void __net_exit dev_proc_net_exit(struct net *net)
881d966b
EB
4080{
4081 wext_proc_exit(net);
4082
4083 proc_net_remove(net, "ptype");
4084 proc_net_remove(net, "softnet_stat");
4085 proc_net_remove(net, "dev");
4086}
4087
022cbae6 4088static struct pernet_operations __net_initdata dev_proc_ops = {
881d966b
EB
4089 .init = dev_proc_net_init,
4090 .exit = dev_proc_net_exit,
4091};
4092
4093static int __init dev_proc_init(void)
4094{
4095 return register_pernet_subsys(&dev_proc_ops);
4096}
1da177e4
LT
4097#else
4098#define dev_proc_init() 0
4099#endif /* CONFIG_PROC_FS */
4100
4101
4102/**
4103 * netdev_set_master - set up master/slave pair
4104 * @slave: slave device
4105 * @master: new master device
4106 *
4107 * Changes the master device of the slave. Pass %NULL to break the
4108 * bonding. The caller must hold the RTNL semaphore. On a failure
4109 * a negative errno code is returned. On success the reference counts
4110 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
4111 * function returns zero.
4112 */
4113int netdev_set_master(struct net_device *slave, struct net_device *master)
4114{
4115 struct net_device *old = slave->master;
4116
4117 ASSERT_RTNL();
4118
4119 if (master) {
4120 if (old)
4121 return -EBUSY;
4122 dev_hold(master);
4123 }
4124
4125 slave->master = master;
4ec93edb 4126
283f2fe8
ED
4127 if (old) {
4128 synchronize_net();
1da177e4 4129 dev_put(old);
283f2fe8 4130 }
1da177e4
LT
4131 if (master)
4132 slave->flags |= IFF_SLAVE;
4133 else
4134 slave->flags &= ~IFF_SLAVE;
4135
4136 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
4137 return 0;
4138}
d1b19dff 4139EXPORT_SYMBOL(netdev_set_master);
1da177e4 4140
b6c40d68
PM
4141static void dev_change_rx_flags(struct net_device *dev, int flags)
4142{
d314774c
SH
4143 const struct net_device_ops *ops = dev->netdev_ops;
4144
4145 if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
4146 ops->ndo_change_rx_flags(dev, flags);
b6c40d68
PM
4147}
4148
dad9b335 4149static int __dev_set_promiscuity(struct net_device *dev, int inc)
1da177e4
LT
4150{
4151 unsigned short old_flags = dev->flags;
8192b0c4
DH
4152 uid_t uid;
4153 gid_t gid;
1da177e4 4154
24023451
PM
4155 ASSERT_RTNL();
4156
dad9b335
WC
4157 dev->flags |= IFF_PROMISC;
4158 dev->promiscuity += inc;
4159 if (dev->promiscuity == 0) {
4160 /*
4161 * Avoid overflow.
4162 * If inc causes overflow, untouch promisc and return error.
4163 */
4164 if (inc < 0)
4165 dev->flags &= ~IFF_PROMISC;
4166 else {
4167 dev->promiscuity -= inc;
4168 printk(KERN_WARNING "%s: promiscuity touches roof, "
4169 "set promiscuity failed, promiscuity feature "
4170 "of device might be broken.\n", dev->name);
4171 return -EOVERFLOW;
4172 }
4173 }
52609c0b 4174 if (dev->flags != old_flags) {
1da177e4
LT
4175 printk(KERN_INFO "device %s %s promiscuous mode\n",
4176 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
4ec93edb 4177 "left");
8192b0c4
DH
4178 if (audit_enabled) {
4179 current_uid_gid(&uid, &gid);
7759db82
KHK
4180 audit_log(current->audit_context, GFP_ATOMIC,
4181 AUDIT_ANOM_PROMISCUOUS,
4182 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
4183 dev->name, (dev->flags & IFF_PROMISC),
4184 (old_flags & IFF_PROMISC),
4185 audit_get_loginuid(current),
8192b0c4 4186 uid, gid,
7759db82 4187 audit_get_sessionid(current));
8192b0c4 4188 }
24023451 4189
b6c40d68 4190 dev_change_rx_flags(dev, IFF_PROMISC);
1da177e4 4191 }
dad9b335 4192 return 0;
1da177e4
LT
4193}
4194
4417da66
PM
4195/**
4196 * dev_set_promiscuity - update promiscuity count on a device
4197 * @dev: device
4198 * @inc: modifier
4199 *
4200 * Add or remove promiscuity from a device. While the count in the device
4201 * remains above zero the interface remains promiscuous. Once it hits zero
4202 * the device reverts back to normal filtering operation. A negative inc
4203 * value is used to drop promiscuity on the device.
dad9b335 4204 * Return 0 if successful or a negative errno code on error.
4417da66 4205 */
dad9b335 4206int dev_set_promiscuity(struct net_device *dev, int inc)
4417da66
PM
4207{
4208 unsigned short old_flags = dev->flags;
dad9b335 4209 int err;
4417da66 4210
dad9b335 4211 err = __dev_set_promiscuity(dev, inc);
4b5a698e 4212 if (err < 0)
dad9b335 4213 return err;
4417da66
PM
4214 if (dev->flags != old_flags)
4215 dev_set_rx_mode(dev);
dad9b335 4216 return err;
4417da66 4217}
d1b19dff 4218EXPORT_SYMBOL(dev_set_promiscuity);
4417da66 4219
1da177e4
LT
4220/**
4221 * dev_set_allmulti - update allmulti count on a device
4222 * @dev: device
4223 * @inc: modifier
4224 *
4225 * Add or remove reception of all multicast frames to a device. While the
4226 * count in the device remains above zero the interface remains listening
4227 * to all interfaces. Once it hits zero the device reverts back to normal
4228 * filtering operation. A negative @inc value is used to drop the counter
4229 * when releasing a resource needing all multicasts.
dad9b335 4230 * Return 0 if successful or a negative errno code on error.
1da177e4
LT
4231 */
4232
dad9b335 4233int dev_set_allmulti(struct net_device *dev, int inc)
1da177e4
LT
4234{
4235 unsigned short old_flags = dev->flags;
4236
24023451
PM
4237 ASSERT_RTNL();
4238
1da177e4 4239 dev->flags |= IFF_ALLMULTI;
dad9b335
WC
4240 dev->allmulti += inc;
4241 if (dev->allmulti == 0) {
4242 /*
4243 * Avoid overflow.
4244 * If inc causes overflow, untouch allmulti and return error.
4245 */
4246 if (inc < 0)
4247 dev->flags &= ~IFF_ALLMULTI;
4248 else {
4249 dev->allmulti -= inc;
4250 printk(KERN_WARNING "%s: allmulti touches roof, "
4251 "set allmulti failed, allmulti feature of "
4252 "device might be broken.\n", dev->name);
4253 return -EOVERFLOW;
4254 }
4255 }
24023451 4256 if (dev->flags ^ old_flags) {
b6c40d68 4257 dev_change_rx_flags(dev, IFF_ALLMULTI);
4417da66 4258 dev_set_rx_mode(dev);
24023451 4259 }
dad9b335 4260 return 0;
4417da66 4261}
d1b19dff 4262EXPORT_SYMBOL(dev_set_allmulti);
4417da66
PM
4263
4264/*
4265 * Upload unicast and multicast address lists to device and
4266 * configure RX filtering. When the device doesn't support unicast
53ccaae1 4267 * filtering it is put in promiscuous mode while unicast addresses
4417da66
PM
4268 * are present.
4269 */
4270void __dev_set_rx_mode(struct net_device *dev)
4271{
d314774c
SH
4272 const struct net_device_ops *ops = dev->netdev_ops;
4273
4417da66
PM
4274 /* dev_open will call this function so the list will stay sane. */
4275 if (!(dev->flags&IFF_UP))
4276 return;
4277
4278 if (!netif_device_present(dev))
40b77c94 4279 return;
4417da66 4280
d314774c
SH
4281 if (ops->ndo_set_rx_mode)
4282 ops->ndo_set_rx_mode(dev);
4417da66
PM
4283 else {
4284 /* Unicast addresses changes may only happen under the rtnl,
4285 * therefore calling __dev_set_promiscuity here is safe.
4286 */
32e7bfc4 4287 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
4417da66
PM
4288 __dev_set_promiscuity(dev, 1);
4289 dev->uc_promisc = 1;
32e7bfc4 4290 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
4417da66
PM
4291 __dev_set_promiscuity(dev, -1);
4292 dev->uc_promisc = 0;
4293 }
4294
d314774c
SH
4295 if (ops->ndo_set_multicast_list)
4296 ops->ndo_set_multicast_list(dev);
4417da66
PM
4297 }
4298}
4299
4300void dev_set_rx_mode(struct net_device *dev)
4301{
b9e40857 4302 netif_addr_lock_bh(dev);
4417da66 4303 __dev_set_rx_mode(dev);
b9e40857 4304 netif_addr_unlock_bh(dev);
1da177e4
LT
4305}
4306
f0db275a
SH
4307/**
4308 * dev_get_flags - get flags reported to userspace
4309 * @dev: device
4310 *
4311 * Get the combination of flag bits exported through APIs to userspace.
4312 */
1da177e4
LT
4313unsigned dev_get_flags(const struct net_device *dev)
4314{
4315 unsigned flags;
4316
4317 flags = (dev->flags & ~(IFF_PROMISC |
4318 IFF_ALLMULTI |
b00055aa
SR
4319 IFF_RUNNING |
4320 IFF_LOWER_UP |
4321 IFF_DORMANT)) |
1da177e4
LT
4322 (dev->gflags & (IFF_PROMISC |
4323 IFF_ALLMULTI));
4324
b00055aa
SR
4325 if (netif_running(dev)) {
4326 if (netif_oper_up(dev))
4327 flags |= IFF_RUNNING;
4328 if (netif_carrier_ok(dev))
4329 flags |= IFF_LOWER_UP;
4330 if (netif_dormant(dev))
4331 flags |= IFF_DORMANT;
4332 }
1da177e4
LT
4333
4334 return flags;
4335}
d1b19dff 4336EXPORT_SYMBOL(dev_get_flags);
1da177e4 4337
bd380811 4338int __dev_change_flags(struct net_device *dev, unsigned int flags)
1da177e4 4339{
1da177e4 4340 int old_flags = dev->flags;
bd380811 4341 int ret;
1da177e4 4342
24023451
PM
4343 ASSERT_RTNL();
4344
1da177e4
LT
4345 /*
4346 * Set the flags on our device.
4347 */
4348
4349 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
4350 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
4351 IFF_AUTOMEDIA)) |
4352 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
4353 IFF_ALLMULTI));
4354
4355 /*
4356 * Load in the correct multicast list now the flags have changed.
4357 */
4358
b6c40d68
PM
4359 if ((old_flags ^ flags) & IFF_MULTICAST)
4360 dev_change_rx_flags(dev, IFF_MULTICAST);
24023451 4361
4417da66 4362 dev_set_rx_mode(dev);
1da177e4
LT
4363
4364 /*
4365 * Have we downed the interface. We handle IFF_UP ourselves
4366 * according to user attempts to set it, rather than blindly
4367 * setting it.
4368 */
4369
4370 ret = 0;
4371 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
bd380811 4372 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
1da177e4
LT
4373
4374 if (!ret)
4417da66 4375 dev_set_rx_mode(dev);
1da177e4
LT
4376 }
4377
1da177e4 4378 if ((flags ^ dev->gflags) & IFF_PROMISC) {
d1b19dff
ED
4379 int inc = (flags & IFF_PROMISC) ? 1 : -1;
4380
1da177e4
LT
4381 dev->gflags ^= IFF_PROMISC;
4382 dev_set_promiscuity(dev, inc);
4383 }
4384
4385 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
4386 is important. Some (broken) drivers set IFF_PROMISC, when
4387 IFF_ALLMULTI is requested not asking us and not reporting.
4388 */
4389 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
d1b19dff
ED
4390 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
4391
1da177e4
LT
4392 dev->gflags ^= IFF_ALLMULTI;
4393 dev_set_allmulti(dev, inc);
4394 }
4395
bd380811
PM
4396 return ret;
4397}
4398
4399void __dev_notify_flags(struct net_device *dev, unsigned int old_flags)
4400{
4401 unsigned int changes = dev->flags ^ old_flags;
4402
4403 if (changes & IFF_UP) {
4404 if (dev->flags & IFF_UP)
4405 call_netdevice_notifiers(NETDEV_UP, dev);
4406 else
4407 call_netdevice_notifiers(NETDEV_DOWN, dev);
4408 }
4409
4410 if (dev->flags & IFF_UP &&
4411 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE)))
4412 call_netdevice_notifiers(NETDEV_CHANGE, dev);
4413}
4414
4415/**
4416 * dev_change_flags - change device settings
4417 * @dev: device
4418 * @flags: device state flags
4419 *
4420 * Change settings on device based state flags. The flags are
4421 * in the userspace exported format.
4422 */
4423int dev_change_flags(struct net_device *dev, unsigned flags)
4424{
4425 int ret, changes;
4426 int old_flags = dev->flags;
4427
4428 ret = __dev_change_flags(dev, flags);
4429 if (ret < 0)
4430 return ret;
4431
4432 changes = old_flags ^ dev->flags;
7c355f53
TG
4433 if (changes)
4434 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
1da177e4 4435
bd380811 4436 __dev_notify_flags(dev, old_flags);
1da177e4
LT
4437 return ret;
4438}
d1b19dff 4439EXPORT_SYMBOL(dev_change_flags);
1da177e4 4440
f0db275a
SH
4441/**
4442 * dev_set_mtu - Change maximum transfer unit
4443 * @dev: device
4444 * @new_mtu: new transfer unit
4445 *
4446 * Change the maximum transfer size of the network device.
4447 */
1da177e4
LT
4448int dev_set_mtu(struct net_device *dev, int new_mtu)
4449{
d314774c 4450 const struct net_device_ops *ops = dev->netdev_ops;
1da177e4
LT
4451 int err;
4452
4453 if (new_mtu == dev->mtu)
4454 return 0;
4455
4456 /* MTU must be positive. */
4457 if (new_mtu < 0)
4458 return -EINVAL;
4459
4460 if (!netif_device_present(dev))
4461 return -ENODEV;
4462
4463 err = 0;
d314774c
SH
4464 if (ops->ndo_change_mtu)
4465 err = ops->ndo_change_mtu(dev, new_mtu);
1da177e4
LT
4466 else
4467 dev->mtu = new_mtu;
d314774c 4468
1da177e4 4469 if (!err && dev->flags & IFF_UP)
056925ab 4470 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
1da177e4
LT
4471 return err;
4472}
d1b19dff 4473EXPORT_SYMBOL(dev_set_mtu);
1da177e4 4474
f0db275a
SH
4475/**
4476 * dev_set_mac_address - Change Media Access Control Address
4477 * @dev: device
4478 * @sa: new address
4479 *
4480 * Change the hardware (MAC) address of the device
4481 */
1da177e4
LT
4482int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
4483{
d314774c 4484 const struct net_device_ops *ops = dev->netdev_ops;
1da177e4
LT
4485 int err;
4486
d314774c 4487 if (!ops->ndo_set_mac_address)
1da177e4
LT
4488 return -EOPNOTSUPP;
4489 if (sa->sa_family != dev->type)
4490 return -EINVAL;
4491 if (!netif_device_present(dev))
4492 return -ENODEV;
d314774c 4493 err = ops->ndo_set_mac_address(dev, sa);
1da177e4 4494 if (!err)
056925ab 4495 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
1da177e4
LT
4496 return err;
4497}
d1b19dff 4498EXPORT_SYMBOL(dev_set_mac_address);
1da177e4
LT
4499
4500/*
3710becf 4501 * Perform the SIOCxIFxxx calls, inside rcu_read_lock()
1da177e4 4502 */
14e3e079 4503static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
1da177e4
LT
4504{
4505 int err;
3710becf 4506 struct net_device *dev = dev_get_by_name_rcu(net, ifr->ifr_name);
1da177e4
LT
4507
4508 if (!dev)
4509 return -ENODEV;
4510
4511 switch (cmd) {
d1b19dff
ED
4512 case SIOCGIFFLAGS: /* Get interface flags */
4513 ifr->ifr_flags = (short) dev_get_flags(dev);
4514 return 0;
1da177e4 4515
d1b19dff
ED
4516 case SIOCGIFMETRIC: /* Get the metric on the interface
4517 (currently unused) */
4518 ifr->ifr_metric = 0;
4519 return 0;
1da177e4 4520
d1b19dff
ED
4521 case SIOCGIFMTU: /* Get the MTU of a device */
4522 ifr->ifr_mtu = dev->mtu;
4523 return 0;
1da177e4 4524
d1b19dff
ED
4525 case SIOCGIFHWADDR:
4526 if (!dev->addr_len)
4527 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
4528 else
4529 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
4530 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4531 ifr->ifr_hwaddr.sa_family = dev->type;
4532 return 0;
1da177e4 4533
d1b19dff
ED
4534 case SIOCGIFSLAVE:
4535 err = -EINVAL;
4536 break;
14e3e079 4537
d1b19dff
ED
4538 case SIOCGIFMAP:
4539 ifr->ifr_map.mem_start = dev->mem_start;
4540 ifr->ifr_map.mem_end = dev->mem_end;
4541 ifr->ifr_map.base_addr = dev->base_addr;
4542 ifr->ifr_map.irq = dev->irq;
4543 ifr->ifr_map.dma = dev->dma;
4544 ifr->ifr_map.port = dev->if_port;
4545 return 0;
14e3e079 4546
d1b19dff
ED
4547 case SIOCGIFINDEX:
4548 ifr->ifr_ifindex = dev->ifindex;
4549 return 0;
14e3e079 4550
d1b19dff
ED
4551 case SIOCGIFTXQLEN:
4552 ifr->ifr_qlen = dev->tx_queue_len;
4553 return 0;
14e3e079 4554
d1b19dff
ED
4555 default:
4556 /* dev_ioctl() should ensure this case
4557 * is never reached
4558 */
4559 WARN_ON(1);
4560 err = -EINVAL;
4561 break;
14e3e079
JG
4562
4563 }
4564 return err;
4565}
4566
4567/*
4568 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
4569 */
4570static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
4571{
4572 int err;
4573 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
5f2f6da7 4574 const struct net_device_ops *ops;
14e3e079
JG
4575
4576 if (!dev)
4577 return -ENODEV;
4578
5f2f6da7
JP
4579 ops = dev->netdev_ops;
4580
14e3e079 4581 switch (cmd) {
d1b19dff
ED
4582 case SIOCSIFFLAGS: /* Set interface flags */
4583 return dev_change_flags(dev, ifr->ifr_flags);
14e3e079 4584
d1b19dff
ED
4585 case SIOCSIFMETRIC: /* Set the metric on the interface
4586 (currently unused) */
4587 return -EOPNOTSUPP;
14e3e079 4588
d1b19dff
ED
4589 case SIOCSIFMTU: /* Set the MTU of a device */
4590 return dev_set_mtu(dev, ifr->ifr_mtu);
1da177e4 4591
d1b19dff
ED
4592 case SIOCSIFHWADDR:
4593 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
1da177e4 4594
d1b19dff
ED
4595 case SIOCSIFHWBROADCAST:
4596 if (ifr->ifr_hwaddr.sa_family != dev->type)
4597 return -EINVAL;
4598 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
4599 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4600 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4601 return 0;
1da177e4 4602
d1b19dff
ED
4603 case SIOCSIFMAP:
4604 if (ops->ndo_set_config) {
1da177e4
LT
4605 if (!netif_device_present(dev))
4606 return -ENODEV;
d1b19dff
ED
4607 return ops->ndo_set_config(dev, &ifr->ifr_map);
4608 }
4609 return -EOPNOTSUPP;
1da177e4 4610
d1b19dff
ED
4611 case SIOCADDMULTI:
4612 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
4613 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4614 return -EINVAL;
4615 if (!netif_device_present(dev))
4616 return -ENODEV;
22bedad3 4617 return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data);
d1b19dff
ED
4618
4619 case SIOCDELMULTI:
4620 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
4621 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4622 return -EINVAL;
4623 if (!netif_device_present(dev))
4624 return -ENODEV;
22bedad3 4625 return dev_mc_del_global(dev, ifr->ifr_hwaddr.sa_data);
1da177e4 4626
d1b19dff
ED
4627 case SIOCSIFTXQLEN:
4628 if (ifr->ifr_qlen < 0)
4629 return -EINVAL;
4630 dev->tx_queue_len = ifr->ifr_qlen;
4631 return 0;
1da177e4 4632
d1b19dff
ED
4633 case SIOCSIFNAME:
4634 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
4635 return dev_change_name(dev, ifr->ifr_newname);
1da177e4 4636
d1b19dff
ED
4637 /*
4638 * Unknown or private ioctl
4639 */
4640 default:
4641 if ((cmd >= SIOCDEVPRIVATE &&
4642 cmd <= SIOCDEVPRIVATE + 15) ||
4643 cmd == SIOCBONDENSLAVE ||
4644 cmd == SIOCBONDRELEASE ||
4645 cmd == SIOCBONDSETHWADDR ||
4646 cmd == SIOCBONDSLAVEINFOQUERY ||
4647 cmd == SIOCBONDINFOQUERY ||
4648 cmd == SIOCBONDCHANGEACTIVE ||
4649 cmd == SIOCGMIIPHY ||
4650 cmd == SIOCGMIIREG ||
4651 cmd == SIOCSMIIREG ||
4652 cmd == SIOCBRADDIF ||
4653 cmd == SIOCBRDELIF ||
4654 cmd == SIOCSHWTSTAMP ||
4655 cmd == SIOCWANDEV) {
4656 err = -EOPNOTSUPP;
4657 if (ops->ndo_do_ioctl) {
4658 if (netif_device_present(dev))
4659 err = ops->ndo_do_ioctl(dev, ifr, cmd);
4660 else
4661 err = -ENODEV;
4662 }
4663 } else
4664 err = -EINVAL;
1da177e4
LT
4665
4666 }
4667 return err;
4668}
4669
4670/*
4671 * This function handles all "interface"-type I/O control requests. The actual
4672 * 'doing' part of this is dev_ifsioc above.
4673 */
4674
4675/**
4676 * dev_ioctl - network device ioctl
c4ea43c5 4677 * @net: the applicable net namespace
1da177e4
LT
4678 * @cmd: command to issue
4679 * @arg: pointer to a struct ifreq in user space
4680 *
4681 * Issue ioctl functions to devices. This is normally called by the
4682 * user space syscall interfaces but can sometimes be useful for
4683 * other purposes. The return value is the return from the syscall if
4684 * positive or a negative errno code on error.
4685 */
4686
881d966b 4687int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
1da177e4
LT
4688{
4689 struct ifreq ifr;
4690 int ret;
4691 char *colon;
4692
4693 /* One special case: SIOCGIFCONF takes ifconf argument
4694 and requires shared lock, because it sleeps writing
4695 to user space.
4696 */
4697
4698 if (cmd == SIOCGIFCONF) {
6756ae4b 4699 rtnl_lock();
881d966b 4700 ret = dev_ifconf(net, (char __user *) arg);
6756ae4b 4701 rtnl_unlock();
1da177e4
LT
4702 return ret;
4703 }
4704 if (cmd == SIOCGIFNAME)
881d966b 4705 return dev_ifname(net, (struct ifreq __user *)arg);
1da177e4
LT
4706
4707 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
4708 return -EFAULT;
4709
4710 ifr.ifr_name[IFNAMSIZ-1] = 0;
4711
4712 colon = strchr(ifr.ifr_name, ':');
4713 if (colon)
4714 *colon = 0;
4715
4716 /*
4717 * See which interface the caller is talking about.
4718 */
4719
4720 switch (cmd) {
d1b19dff
ED
4721 /*
4722 * These ioctl calls:
4723 * - can be done by all.
4724 * - atomic and do not require locking.
4725 * - return a value
4726 */
4727 case SIOCGIFFLAGS:
4728 case SIOCGIFMETRIC:
4729 case SIOCGIFMTU:
4730 case SIOCGIFHWADDR:
4731 case SIOCGIFSLAVE:
4732 case SIOCGIFMAP:
4733 case SIOCGIFINDEX:
4734 case SIOCGIFTXQLEN:
4735 dev_load(net, ifr.ifr_name);
3710becf 4736 rcu_read_lock();
d1b19dff 4737 ret = dev_ifsioc_locked(net, &ifr, cmd);
3710becf 4738 rcu_read_unlock();
d1b19dff
ED
4739 if (!ret) {
4740 if (colon)
4741 *colon = ':';
4742 if (copy_to_user(arg, &ifr,
4743 sizeof(struct ifreq)))
4744 ret = -EFAULT;
4745 }
4746 return ret;
1da177e4 4747
d1b19dff
ED
4748 case SIOCETHTOOL:
4749 dev_load(net, ifr.ifr_name);
4750 rtnl_lock();
4751 ret = dev_ethtool(net, &ifr);
4752 rtnl_unlock();
4753 if (!ret) {
4754 if (colon)
4755 *colon = ':';
4756 if (copy_to_user(arg, &ifr,
4757 sizeof(struct ifreq)))
4758 ret = -EFAULT;
4759 }
4760 return ret;
1da177e4 4761
d1b19dff
ED
4762 /*
4763 * These ioctl calls:
4764 * - require superuser power.
4765 * - require strict serialization.
4766 * - return a value
4767 */
4768 case SIOCGMIIPHY:
4769 case SIOCGMIIREG:
4770 case SIOCSIFNAME:
4771 if (!capable(CAP_NET_ADMIN))
4772 return -EPERM;
4773 dev_load(net, ifr.ifr_name);
4774 rtnl_lock();
4775 ret = dev_ifsioc(net, &ifr, cmd);
4776 rtnl_unlock();
4777 if (!ret) {
4778 if (colon)
4779 *colon = ':';
4780 if (copy_to_user(arg, &ifr,
4781 sizeof(struct ifreq)))
4782 ret = -EFAULT;
4783 }
4784 return ret;
1da177e4 4785
d1b19dff
ED
4786 /*
4787 * These ioctl calls:
4788 * - require superuser power.
4789 * - require strict serialization.
4790 * - do not return a value
4791 */
4792 case SIOCSIFFLAGS:
4793 case SIOCSIFMETRIC:
4794 case SIOCSIFMTU:
4795 case SIOCSIFMAP:
4796 case SIOCSIFHWADDR:
4797 case SIOCSIFSLAVE:
4798 case SIOCADDMULTI:
4799 case SIOCDELMULTI:
4800 case SIOCSIFHWBROADCAST:
4801 case SIOCSIFTXQLEN:
4802 case SIOCSMIIREG:
4803 case SIOCBONDENSLAVE:
4804 case SIOCBONDRELEASE:
4805 case SIOCBONDSETHWADDR:
4806 case SIOCBONDCHANGEACTIVE:
4807 case SIOCBRADDIF:
4808 case SIOCBRDELIF:
4809 case SIOCSHWTSTAMP:
4810 if (!capable(CAP_NET_ADMIN))
4811 return -EPERM;
4812 /* fall through */
4813 case SIOCBONDSLAVEINFOQUERY:
4814 case SIOCBONDINFOQUERY:
4815 dev_load(net, ifr.ifr_name);
4816 rtnl_lock();
4817 ret = dev_ifsioc(net, &ifr, cmd);
4818 rtnl_unlock();
4819 return ret;
4820
4821 case SIOCGIFMEM:
4822 /* Get the per device memory space. We can add this but
4823 * currently do not support it */
4824 case SIOCSIFMEM:
4825 /* Set the per device memory buffer space.
4826 * Not applicable in our case */
4827 case SIOCSIFLINK:
4828 return -EINVAL;
4829
4830 /*
4831 * Unknown or private ioctl.
4832 */
4833 default:
4834 if (cmd == SIOCWANDEV ||
4835 (cmd >= SIOCDEVPRIVATE &&
4836 cmd <= SIOCDEVPRIVATE + 15)) {
881d966b 4837 dev_load(net, ifr.ifr_name);
1da177e4 4838 rtnl_lock();
881d966b 4839 ret = dev_ifsioc(net, &ifr, cmd);
1da177e4 4840 rtnl_unlock();
d1b19dff
ED
4841 if (!ret && copy_to_user(arg, &ifr,
4842 sizeof(struct ifreq)))
4843 ret = -EFAULT;
1da177e4 4844 return ret;
d1b19dff
ED
4845 }
4846 /* Take care of Wireless Extensions */
4847 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
4848 return wext_handle_ioctl(net, &ifr, cmd, arg);
4849 return -EINVAL;
1da177e4
LT
4850 }
4851}
4852
4853
4854/**
4855 * dev_new_index - allocate an ifindex
c4ea43c5 4856 * @net: the applicable net namespace
1da177e4
LT
4857 *
4858 * Returns a suitable unique value for a new device interface
4859 * number. The caller must hold the rtnl semaphore or the
4860 * dev_base_lock to be sure it remains unique.
4861 */
881d966b 4862static int dev_new_index(struct net *net)
1da177e4
LT
4863{
4864 static int ifindex;
4865 for (;;) {
4866 if (++ifindex <= 0)
4867 ifindex = 1;
881d966b 4868 if (!__dev_get_by_index(net, ifindex))
1da177e4
LT
4869 return ifindex;
4870 }
4871}
4872
1da177e4 4873/* Delayed registration/unregisteration */
3b5b34fd 4874static LIST_HEAD(net_todo_list);
1da177e4 4875
6f05f629 4876static void net_set_todo(struct net_device *dev)
1da177e4 4877{
1da177e4 4878 list_add_tail(&dev->todo_list, &net_todo_list);
1da177e4
LT
4879}
4880
9b5e383c 4881static void rollback_registered_many(struct list_head *head)
93ee31f1 4882{
e93737b0 4883 struct net_device *dev, *tmp;
9b5e383c 4884
93ee31f1
DL
4885 BUG_ON(dev_boot_phase);
4886 ASSERT_RTNL();
4887
e93737b0 4888 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
9b5e383c 4889 /* Some devices call without registering
e93737b0
KK
4890 * for initialization unwind. Remove those
4891 * devices and proceed with the remaining.
9b5e383c
ED
4892 */
4893 if (dev->reg_state == NETREG_UNINITIALIZED) {
4894 pr_debug("unregister_netdevice: device %s/%p never "
4895 "was registered\n", dev->name, dev);
93ee31f1 4896
9b5e383c 4897 WARN_ON(1);
e93737b0
KK
4898 list_del(&dev->unreg_list);
4899 continue;
9b5e383c 4900 }
93ee31f1 4901
9b5e383c 4902 BUG_ON(dev->reg_state != NETREG_REGISTERED);
93ee31f1 4903
9b5e383c
ED
4904 /* If device is running, close it first. */
4905 dev_close(dev);
93ee31f1 4906
9b5e383c
ED
4907 /* And unlink it from device chain. */
4908 unlist_netdevice(dev);
93ee31f1 4909
9b5e383c
ED
4910 dev->reg_state = NETREG_UNREGISTERING;
4911 }
93ee31f1
DL
4912
4913 synchronize_net();
4914
9b5e383c
ED
4915 list_for_each_entry(dev, head, unreg_list) {
4916 /* Shutdown queueing discipline. */
4917 dev_shutdown(dev);
93ee31f1
DL
4918
4919
9b5e383c
ED
4920 /* Notify protocols, that we are about to destroy
4921 this device. They should clean all the things.
4922 */
4923 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
93ee31f1 4924
a2835763
PM
4925 if (!dev->rtnl_link_ops ||
4926 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
4927 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
4928
9b5e383c
ED
4929 /*
4930 * Flush the unicast and multicast chains
4931 */
a748ee24 4932 dev_uc_flush(dev);
22bedad3 4933 dev_mc_flush(dev);
93ee31f1 4934
9b5e383c
ED
4935 if (dev->netdev_ops->ndo_uninit)
4936 dev->netdev_ops->ndo_uninit(dev);
93ee31f1 4937
9b5e383c
ED
4938 /* Notifier chain MUST detach us from master device. */
4939 WARN_ON(dev->master);
93ee31f1 4940
9b5e383c
ED
4941 /* Remove entries from kobject tree */
4942 netdev_unregister_kobject(dev);
4943 }
93ee31f1 4944
a5ee1551 4945 /* Process any work delayed until the end of the batch */
e5e26d75 4946 dev = list_first_entry(head, struct net_device, unreg_list);
a5ee1551 4947 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
93ee31f1 4948
ef885afb 4949 rcu_barrier();
395264d5 4950
a5ee1551 4951 list_for_each_entry(dev, head, unreg_list)
9b5e383c
ED
4952 dev_put(dev);
4953}
4954
4955static void rollback_registered(struct net_device *dev)
4956{
4957 LIST_HEAD(single);
4958
4959 list_add(&dev->unreg_list, &single);
4960 rollback_registered_many(&single);
93ee31f1
DL
4961}
4962
b63365a2
HX
4963unsigned long netdev_fix_features(unsigned long features, const char *name)
4964{
4965 /* Fix illegal SG+CSUM combinations. */
4966 if ((features & NETIF_F_SG) &&
4967 !(features & NETIF_F_ALL_CSUM)) {
4968 if (name)
4969 printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no "
4970 "checksum feature.\n", name);
4971 features &= ~NETIF_F_SG;
4972 }
4973
4974 /* TSO requires that SG is present as well. */
4975 if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) {
4976 if (name)
4977 printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no "
4978 "SG feature.\n", name);
4979 features &= ~NETIF_F_TSO;
4980 }
4981
4982 if (features & NETIF_F_UFO) {
4983 if (!(features & NETIF_F_GEN_CSUM)) {
4984 if (name)
4985 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4986 "since no NETIF_F_HW_CSUM feature.\n",
4987 name);
4988 features &= ~NETIF_F_UFO;
4989 }
4990
4991 if (!(features & NETIF_F_SG)) {
4992 if (name)
4993 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4994 "since no NETIF_F_SG feature.\n", name);
4995 features &= ~NETIF_F_UFO;
4996 }
4997 }
4998
4999 return features;
5000}
5001EXPORT_SYMBOL(netdev_fix_features);
5002
fc4a7489
PM
5003/**
5004 * netif_stacked_transfer_operstate - transfer operstate
5005 * @rootdev: the root or lower level device to transfer state from
5006 * @dev: the device to transfer operstate to
5007 *
5008 * Transfer operational state from root to device. This is normally
5009 * called when a stacking relationship exists between the root
5010 * device and the device(a leaf device).
5011 */
5012void netif_stacked_transfer_operstate(const struct net_device *rootdev,
5013 struct net_device *dev)
5014{
5015 if (rootdev->operstate == IF_OPER_DORMANT)
5016 netif_dormant_on(dev);
5017 else
5018 netif_dormant_off(dev);
5019
5020 if (netif_carrier_ok(rootdev)) {
5021 if (!netif_carrier_ok(dev))
5022 netif_carrier_on(dev);
5023 } else {
5024 if (netif_carrier_ok(dev))
5025 netif_carrier_off(dev);
5026 }
5027}
5028EXPORT_SYMBOL(netif_stacked_transfer_operstate);
5029
1b4bf461
ED
5030static int netif_alloc_rx_queues(struct net_device *dev)
5031{
5032#ifdef CONFIG_RPS
5033 unsigned int i, count = dev->num_rx_queues;
bd25fa7b 5034 struct netdev_rx_queue *rx;
1b4bf461 5035
bd25fa7b 5036 BUG_ON(count < 1);
1b4bf461 5037
bd25fa7b
TH
5038 rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
5039 if (!rx) {
5040 pr_err("netdev: Unable to allocate %u rx queues.\n", count);
5041 return -ENOMEM;
1b4bf461 5042 }
bd25fa7b
TH
5043 dev->_rx = rx;
5044
5045 /*
5046 * Set a pointer to first element in the array which holds the
5047 * reference count.
5048 */
5049 for (i = 0; i < count; i++)
5050 rx[i].first = rx;
1b4bf461
ED
5051#endif
5052 return 0;
5053}
5054
e6484930
TH
5055static int netif_alloc_netdev_queues(struct net_device *dev)
5056{
5057 unsigned int count = dev->num_tx_queues;
5058 struct netdev_queue *tx;
5059
5060 BUG_ON(count < 1);
5061
5062 tx = kcalloc(count, sizeof(struct netdev_queue), GFP_KERNEL);
5063 if (!tx) {
5064 pr_err("netdev: Unable to allocate %u tx queues.\n",
5065 count);
5066 return -ENOMEM;
5067 }
5068 dev->_tx = tx;
5069 return 0;
5070}
5071
5072static void netdev_init_one_queue(struct net_device *dev,
5073 struct netdev_queue *queue,
5074 void *_unused)
5075{
5076 queue->dev = dev;
5077
5078 /* Initialize queue lock */
5079 spin_lock_init(&queue->_xmit_lock);
5080 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
5081 queue->xmit_lock_owner = -1;
5082}
5083
5084static void netdev_init_queues(struct net_device *dev)
5085{
5086 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
5087 spin_lock_init(&dev->tx_global_lock);
5088}
5089
1da177e4
LT
5090/**
5091 * register_netdevice - register a network device
5092 * @dev: device to register
5093 *
5094 * Take a completed network device structure and add it to the kernel
5095 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5096 * chain. 0 is returned on success. A negative errno code is returned
5097 * on a failure to set up the device, or if the name is a duplicate.
5098 *
5099 * Callers must hold the rtnl semaphore. You may want
5100 * register_netdev() instead of this.
5101 *
5102 * BUGS:
5103 * The locking appears insufficient to guarantee two parallel registers
5104 * will not get the same name.
5105 */
5106
5107int register_netdevice(struct net_device *dev)
5108{
1da177e4 5109 int ret;
d314774c 5110 struct net *net = dev_net(dev);
1da177e4
LT
5111
5112 BUG_ON(dev_boot_phase);
5113 ASSERT_RTNL();
5114
b17a7c17
SH
5115 might_sleep();
5116
1da177e4
LT
5117 /* When net_device's are persistent, this will be fatal. */
5118 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
d314774c 5119 BUG_ON(!net);
1da177e4 5120
f1f28aa3 5121 spin_lock_init(&dev->addr_list_lock);
cf508b12 5122 netdev_set_addr_lockdep_class(dev);
1da177e4 5123
1da177e4
LT
5124 dev->iflink = -1;
5125
1b4bf461
ED
5126 ret = netif_alloc_rx_queues(dev);
5127 if (ret)
5128 goto out;
0a9627f2 5129
e6484930
TH
5130 ret = netif_alloc_netdev_queues(dev);
5131 if (ret)
5132 goto out;
5133
5134 netdev_init_queues(dev);
5135
1da177e4 5136 /* Init, if this function is available */
d314774c
SH
5137 if (dev->netdev_ops->ndo_init) {
5138 ret = dev->netdev_ops->ndo_init(dev);
1da177e4
LT
5139 if (ret) {
5140 if (ret > 0)
5141 ret = -EIO;
90833aa4 5142 goto out;
1da177e4
LT
5143 }
5144 }
4ec93edb 5145
8ce6cebc 5146 ret = dev_get_valid_name(dev, dev->name, 0);
d9031024 5147 if (ret)
7ce1b0ed 5148 goto err_uninit;
1da177e4 5149
881d966b 5150 dev->ifindex = dev_new_index(net);
1da177e4
LT
5151 if (dev->iflink == -1)
5152 dev->iflink = dev->ifindex;
5153
d212f87b
SH
5154 /* Fix illegal checksum combinations */
5155 if ((dev->features & NETIF_F_HW_CSUM) &&
5156 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5157 printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
5158 dev->name);
5159 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
5160 }
5161
5162 if ((dev->features & NETIF_F_NO_CSUM) &&
5163 (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5164 printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
5165 dev->name);
5166 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
5167 }
5168
b63365a2 5169 dev->features = netdev_fix_features(dev->features, dev->name);
1da177e4 5170
e5a4a72d
LB
5171 /* Enable software GSO if SG is supported. */
5172 if (dev->features & NETIF_F_SG)
5173 dev->features |= NETIF_F_GSO;
5174
c5256c51
ED
5175 /* Enable GRO and NETIF_F_HIGHDMA for vlans by default,
5176 * vlan_dev_init() will do the dev->features check, so these features
5177 * are enabled only if supported by underlying device.
16c3ea78 5178 */
c5256c51 5179 dev->vlan_features |= (NETIF_F_GRO | NETIF_F_HIGHDMA);
16c3ea78 5180
7ffbe3fd
JB
5181 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
5182 ret = notifier_to_errno(ret);
5183 if (ret)
5184 goto err_uninit;
5185
8b41d188 5186 ret = netdev_register_kobject(dev);
b17a7c17 5187 if (ret)
7ce1b0ed 5188 goto err_uninit;
b17a7c17
SH
5189 dev->reg_state = NETREG_REGISTERED;
5190
1da177e4
LT
5191 /*
5192 * Default initial state at registry is that the
5193 * device is present.
5194 */
5195
5196 set_bit(__LINK_STATE_PRESENT, &dev->state);
5197
1da177e4 5198 dev_init_scheduler(dev);
1da177e4 5199 dev_hold(dev);
ce286d32 5200 list_netdevice(dev);
1da177e4
LT
5201
5202 /* Notify protocols, that a new device appeared. */
056925ab 5203 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
fcc5a03a 5204 ret = notifier_to_errno(ret);
93ee31f1
DL
5205 if (ret) {
5206 rollback_registered(dev);
5207 dev->reg_state = NETREG_UNREGISTERED;
5208 }
d90a909e
EB
5209 /*
5210 * Prevent userspace races by waiting until the network
5211 * device is fully setup before sending notifications.
5212 */
a2835763
PM
5213 if (!dev->rtnl_link_ops ||
5214 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
5215 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
1da177e4
LT
5216
5217out:
5218 return ret;
7ce1b0ed
HX
5219
5220err_uninit:
d314774c
SH
5221 if (dev->netdev_ops->ndo_uninit)
5222 dev->netdev_ops->ndo_uninit(dev);
7ce1b0ed 5223 goto out;
1da177e4 5224}
d1b19dff 5225EXPORT_SYMBOL(register_netdevice);
1da177e4 5226
937f1ba5
BH
5227/**
5228 * init_dummy_netdev - init a dummy network device for NAPI
5229 * @dev: device to init
5230 *
5231 * This takes a network device structure and initialize the minimum
5232 * amount of fields so it can be used to schedule NAPI polls without
5233 * registering a full blown interface. This is to be used by drivers
5234 * that need to tie several hardware interfaces to a single NAPI
5235 * poll scheduler due to HW limitations.
5236 */
5237int init_dummy_netdev(struct net_device *dev)
5238{
5239 /* Clear everything. Note we don't initialize spinlocks
5240 * are they aren't supposed to be taken by any of the
5241 * NAPI code and this dummy netdev is supposed to be
5242 * only ever used for NAPI polls
5243 */
5244 memset(dev, 0, sizeof(struct net_device));
5245
5246 /* make sure we BUG if trying to hit standard
5247 * register/unregister code path
5248 */
5249 dev->reg_state = NETREG_DUMMY;
5250
937f1ba5
BH
5251 /* NAPI wants this */
5252 INIT_LIST_HEAD(&dev->napi_list);
5253
5254 /* a dummy interface is started by default */
5255 set_bit(__LINK_STATE_PRESENT, &dev->state);
5256 set_bit(__LINK_STATE_START, &dev->state);
5257
29b4433d
ED
5258 /* Note : We dont allocate pcpu_refcnt for dummy devices,
5259 * because users of this 'device' dont need to change
5260 * its refcount.
5261 */
5262
937f1ba5
BH
5263 return 0;
5264}
5265EXPORT_SYMBOL_GPL(init_dummy_netdev);
5266
5267
1da177e4
LT
5268/**
5269 * register_netdev - register a network device
5270 * @dev: device to register
5271 *
5272 * Take a completed network device structure and add it to the kernel
5273 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5274 * chain. 0 is returned on success. A negative errno code is returned
5275 * on a failure to set up the device, or if the name is a duplicate.
5276 *
38b4da38 5277 * This is a wrapper around register_netdevice that takes the rtnl semaphore
1da177e4
LT
5278 * and expands the device name if you passed a format string to
5279 * alloc_netdev.
5280 */
5281int register_netdev(struct net_device *dev)
5282{
5283 int err;
5284
5285 rtnl_lock();
5286
5287 /*
5288 * If the name is a format string the caller wants us to do a
5289 * name allocation.
5290 */
5291 if (strchr(dev->name, '%')) {
5292 err = dev_alloc_name(dev, dev->name);
5293 if (err < 0)
5294 goto out;
5295 }
4ec93edb 5296
1da177e4
LT
5297 err = register_netdevice(dev);
5298out:
5299 rtnl_unlock();
5300 return err;
5301}
5302EXPORT_SYMBOL(register_netdev);
5303
29b4433d
ED
5304int netdev_refcnt_read(const struct net_device *dev)
5305{
5306 int i, refcnt = 0;
5307
5308 for_each_possible_cpu(i)
5309 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
5310 return refcnt;
5311}
5312EXPORT_SYMBOL(netdev_refcnt_read);
5313
1da177e4
LT
5314/*
5315 * netdev_wait_allrefs - wait until all references are gone.
5316 *
5317 * This is called when unregistering network devices.
5318 *
5319 * Any protocol or device that holds a reference should register
5320 * for netdevice notification, and cleanup and put back the
5321 * reference if they receive an UNREGISTER event.
5322 * We can get stuck here if buggy protocols don't correctly
4ec93edb 5323 * call dev_put.
1da177e4
LT
5324 */
5325static void netdev_wait_allrefs(struct net_device *dev)
5326{
5327 unsigned long rebroadcast_time, warning_time;
29b4433d 5328 int refcnt;
1da177e4 5329
e014debe
ED
5330 linkwatch_forget_dev(dev);
5331
1da177e4 5332 rebroadcast_time = warning_time = jiffies;
29b4433d
ED
5333 refcnt = netdev_refcnt_read(dev);
5334
5335 while (refcnt != 0) {
1da177e4 5336 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
6756ae4b 5337 rtnl_lock();
1da177e4
LT
5338
5339 /* Rebroadcast unregister notification */
056925ab 5340 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
a5ee1551 5341 /* don't resend NETDEV_UNREGISTER_BATCH, _BATCH users
395264d5 5342 * should have already handle it the first time */
1da177e4
LT
5343
5344 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
5345 &dev->state)) {
5346 /* We must not have linkwatch events
5347 * pending on unregister. If this
5348 * happens, we simply run the queue
5349 * unscheduled, resulting in a noop
5350 * for this device.
5351 */
5352 linkwatch_run_queue();
5353 }
5354
6756ae4b 5355 __rtnl_unlock();
1da177e4
LT
5356
5357 rebroadcast_time = jiffies;
5358 }
5359
5360 msleep(250);
5361
29b4433d
ED
5362 refcnt = netdev_refcnt_read(dev);
5363
1da177e4
LT
5364 if (time_after(jiffies, warning_time + 10 * HZ)) {
5365 printk(KERN_EMERG "unregister_netdevice: "
5366 "waiting for %s to become free. Usage "
5367 "count = %d\n",
29b4433d 5368 dev->name, refcnt);
1da177e4
LT
5369 warning_time = jiffies;
5370 }
5371 }
5372}
5373
5374/* The sequence is:
5375 *
5376 * rtnl_lock();
5377 * ...
5378 * register_netdevice(x1);
5379 * register_netdevice(x2);
5380 * ...
5381 * unregister_netdevice(y1);
5382 * unregister_netdevice(y2);
5383 * ...
5384 * rtnl_unlock();
5385 * free_netdev(y1);
5386 * free_netdev(y2);
5387 *
58ec3b4d 5388 * We are invoked by rtnl_unlock().
1da177e4 5389 * This allows us to deal with problems:
b17a7c17 5390 * 1) We can delete sysfs objects which invoke hotplug
1da177e4
LT
5391 * without deadlocking with linkwatch via keventd.
5392 * 2) Since we run with the RTNL semaphore not held, we can sleep
5393 * safely in order to wait for the netdev refcnt to drop to zero.
58ec3b4d
HX
5394 *
5395 * We must not return until all unregister events added during
5396 * the interval the lock was held have been completed.
1da177e4 5397 */
1da177e4
LT
5398void netdev_run_todo(void)
5399{
626ab0e6 5400 struct list_head list;
1da177e4 5401
1da177e4 5402 /* Snapshot list, allow later requests */
626ab0e6 5403 list_replace_init(&net_todo_list, &list);
58ec3b4d
HX
5404
5405 __rtnl_unlock();
626ab0e6 5406
1da177e4
LT
5407 while (!list_empty(&list)) {
5408 struct net_device *dev
e5e26d75 5409 = list_first_entry(&list, struct net_device, todo_list);
1da177e4
LT
5410 list_del(&dev->todo_list);
5411
b17a7c17
SH
5412 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
5413 printk(KERN_ERR "network todo '%s' but state %d\n",
5414 dev->name, dev->reg_state);
5415 dump_stack();
5416 continue;
5417 }
1da177e4 5418
b17a7c17 5419 dev->reg_state = NETREG_UNREGISTERED;
1da177e4 5420
152102c7 5421 on_each_cpu(flush_backlog, dev, 1);
6e583ce5 5422
b17a7c17 5423 netdev_wait_allrefs(dev);
1da177e4 5424
b17a7c17 5425 /* paranoia */
29b4433d 5426 BUG_ON(netdev_refcnt_read(dev));
95ae6b22 5427 WARN_ON(rcu_dereference_raw(dev->ip_ptr));
547b792c
IJ
5428 WARN_ON(dev->ip6_ptr);
5429 WARN_ON(dev->dn_ptr);
1da177e4 5430
b17a7c17
SH
5431 if (dev->destructor)
5432 dev->destructor(dev);
9093bbb2
SH
5433
5434 /* Free network device */
5435 kobject_put(&dev->dev.kobj);
1da177e4 5436 }
1da177e4
LT
5437}
5438
d83345ad
ED
5439/**
5440 * dev_txq_stats_fold - fold tx_queues stats
5441 * @dev: device to get statistics from
3cfde79c 5442 * @stats: struct rtnl_link_stats64 to hold results
d83345ad
ED
5443 */
5444void dev_txq_stats_fold(const struct net_device *dev,
3cfde79c 5445 struct rtnl_link_stats64 *stats)
d83345ad 5446{
bd27290a 5447 u64 tx_bytes = 0, tx_packets = 0, tx_dropped = 0;
d83345ad
ED
5448 unsigned int i;
5449 struct netdev_queue *txq;
5450
5451 for (i = 0; i < dev->num_tx_queues; i++) {
5452 txq = netdev_get_tx_queue(dev, i);
bd27290a 5453 spin_lock_bh(&txq->_xmit_lock);
d83345ad
ED
5454 tx_bytes += txq->tx_bytes;
5455 tx_packets += txq->tx_packets;
5456 tx_dropped += txq->tx_dropped;
bd27290a 5457 spin_unlock_bh(&txq->_xmit_lock);
d83345ad
ED
5458 }
5459 if (tx_bytes || tx_packets || tx_dropped) {
5460 stats->tx_bytes = tx_bytes;
5461 stats->tx_packets = tx_packets;
5462 stats->tx_dropped = tx_dropped;
5463 }
5464}
5465EXPORT_SYMBOL(dev_txq_stats_fold);
5466
3cfde79c
BH
5467/* Convert net_device_stats to rtnl_link_stats64. They have the same
5468 * fields in the same order, with only the type differing.
5469 */
5470static void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
5471 const struct net_device_stats *netdev_stats)
5472{
5473#if BITS_PER_LONG == 64
5474 BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
5475 memcpy(stats64, netdev_stats, sizeof(*stats64));
5476#else
5477 size_t i, n = sizeof(*stats64) / sizeof(u64);
5478 const unsigned long *src = (const unsigned long *)netdev_stats;
5479 u64 *dst = (u64 *)stats64;
5480
5481 BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) !=
5482 sizeof(*stats64) / sizeof(u64));
5483 for (i = 0; i < n; i++)
5484 dst[i] = src[i];
5485#endif
5486}
5487
eeda3fd6
SH
5488/**
5489 * dev_get_stats - get network device statistics
5490 * @dev: device to get statistics from
28172739 5491 * @storage: place to store stats
eeda3fd6 5492 *
d7753516
BH
5493 * Get network statistics from device. Return @storage.
5494 * The device driver may provide its own method by setting
5495 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
5496 * otherwise the internal statistics structure is used.
eeda3fd6 5497 */
d7753516
BH
5498struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
5499 struct rtnl_link_stats64 *storage)
7004bf25 5500{
eeda3fd6
SH
5501 const struct net_device_ops *ops = dev->netdev_ops;
5502
28172739
ED
5503 if (ops->ndo_get_stats64) {
5504 memset(storage, 0, sizeof(*storage));
caf586e5
ED
5505 ops->ndo_get_stats64(dev, storage);
5506 } else if (ops->ndo_get_stats) {
3cfde79c 5507 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
caf586e5
ED
5508 } else {
5509 netdev_stats_to_stats64(storage, &dev->stats);
5510 dev_txq_stats_fold(dev, storage);
28172739 5511 }
caf586e5 5512 storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
28172739 5513 return storage;
c45d286e 5514}
eeda3fd6 5515EXPORT_SYMBOL(dev_get_stats);
c45d286e 5516
24824a09
ED
5517struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
5518{
5519 struct netdev_queue *queue = dev_ingress_queue(dev);
5520
5521#ifdef CONFIG_NET_CLS_ACT
5522 if (queue)
5523 return queue;
5524 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
5525 if (!queue)
5526 return NULL;
5527 netdev_init_one_queue(dev, queue, NULL);
24824a09
ED
5528 queue->qdisc = &noop_qdisc;
5529 queue->qdisc_sleeping = &noop_qdisc;
5530 rcu_assign_pointer(dev->ingress_queue, queue);
5531#endif
5532 return queue;
5533}
5534
1da177e4 5535/**
f25f4e44 5536 * alloc_netdev_mq - allocate network device
1da177e4
LT
5537 * @sizeof_priv: size of private data to allocate space for
5538 * @name: device name format string
5539 * @setup: callback to initialize device
f25f4e44 5540 * @queue_count: the number of subqueues to allocate
1da177e4
LT
5541 *
5542 * Allocates a struct net_device with private data area for driver use
f25f4e44
PWJ
5543 * and performs basic initialization. Also allocates subquue structs
5544 * for each queue on the device at the end of the netdevice.
1da177e4 5545 */
f25f4e44
PWJ
5546struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
5547 void (*setup)(struct net_device *), unsigned int queue_count)
1da177e4 5548{
1da177e4 5549 struct net_device *dev;
7943986c 5550 size_t alloc_size;
1ce8e7b5 5551 struct net_device *p;
1da177e4 5552
b6fe17d6
SH
5553 BUG_ON(strlen(name) >= sizeof(dev->name));
5554
55513fb4
TH
5555 if (queue_count < 1) {
5556 pr_err("alloc_netdev: Unable to allocate device "
5557 "with zero queues.\n");
5558 return NULL;
5559 }
5560
fd2ea0a7 5561 alloc_size = sizeof(struct net_device);
d1643d24
AD
5562 if (sizeof_priv) {
5563 /* ensure 32-byte alignment of private area */
1ce8e7b5 5564 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
d1643d24
AD
5565 alloc_size += sizeof_priv;
5566 }
5567 /* ensure 32-byte alignment of whole construct */
1ce8e7b5 5568 alloc_size += NETDEV_ALIGN - 1;
1da177e4 5569
31380de9 5570 p = kzalloc(alloc_size, GFP_KERNEL);
1da177e4 5571 if (!p) {
b6fe17d6 5572 printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
1da177e4
LT
5573 return NULL;
5574 }
1da177e4 5575
1ce8e7b5 5576 dev = PTR_ALIGN(p, NETDEV_ALIGN);
1da177e4 5577 dev->padded = (char *)dev - (char *)p;
ab9c73cc 5578
29b4433d
ED
5579 dev->pcpu_refcnt = alloc_percpu(int);
5580 if (!dev->pcpu_refcnt)
e6484930 5581 goto free_p;
ab9c73cc 5582
29b4433d
ED
5583 if (dev_addr_init(dev))
5584 goto free_pcpu;
5585
22bedad3 5586 dev_mc_init(dev);
a748ee24 5587 dev_uc_init(dev);
ccffad25 5588
c346dca1 5589 dev_net_set(dev, &init_net);
1da177e4 5590
e8a0464c 5591 dev->num_tx_queues = queue_count;
fd2ea0a7 5592 dev->real_num_tx_queues = queue_count;
e8a0464c 5593
df334545 5594#ifdef CONFIG_RPS
0a9627f2 5595 dev->num_rx_queues = queue_count;
62fe0b40 5596 dev->real_num_rx_queues = queue_count;
df334545 5597#endif
0a9627f2 5598
82cc1a7a 5599 dev->gso_max_size = GSO_MAX_SIZE;
1da177e4 5600
15682bc4
PWJ
5601 INIT_LIST_HEAD(&dev->ethtool_ntuple_list.list);
5602 dev->ethtool_ntuple_list.count = 0;
d565b0a1 5603 INIT_LIST_HEAD(&dev->napi_list);
9fdce099 5604 INIT_LIST_HEAD(&dev->unreg_list);
e014debe 5605 INIT_LIST_HEAD(&dev->link_watch_list);
93f154b5 5606 dev->priv_flags = IFF_XMIT_DST_RELEASE;
1da177e4
LT
5607 setup(dev);
5608 strcpy(dev->name, name);
5609 return dev;
ab9c73cc 5610
29b4433d
ED
5611free_pcpu:
5612 free_percpu(dev->pcpu_refcnt);
ab9c73cc
JP
5613free_p:
5614 kfree(p);
5615 return NULL;
1da177e4 5616}
f25f4e44 5617EXPORT_SYMBOL(alloc_netdev_mq);
1da177e4
LT
5618
5619/**
5620 * free_netdev - free network device
5621 * @dev: device
5622 *
4ec93edb
YH
5623 * This function does the last stage of destroying an allocated device
5624 * interface. The reference to the device object is released.
1da177e4
LT
5625 * If this is the last reference then it will be freed.
5626 */
5627void free_netdev(struct net_device *dev)
5628{
d565b0a1
HX
5629 struct napi_struct *p, *n;
5630
f3005d7f
DL
5631 release_net(dev_net(dev));
5632
e8a0464c
DM
5633 kfree(dev->_tx);
5634
24824a09
ED
5635 kfree(rcu_dereference_raw(dev->ingress_queue));
5636
f001fde5
JP
5637 /* Flush device addresses */
5638 dev_addr_flush(dev);
5639
15682bc4
PWJ
5640 /* Clear ethtool n-tuple list */
5641 ethtool_ntuple_flush(dev);
5642
d565b0a1
HX
5643 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
5644 netif_napi_del(p);
5645
29b4433d
ED
5646 free_percpu(dev->pcpu_refcnt);
5647 dev->pcpu_refcnt = NULL;
5648
3041a069 5649 /* Compatibility with error handling in drivers */
1da177e4
LT
5650 if (dev->reg_state == NETREG_UNINITIALIZED) {
5651 kfree((char *)dev - dev->padded);
5652 return;
5653 }
5654
5655 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
5656 dev->reg_state = NETREG_RELEASED;
5657
43cb76d9
GKH
5658 /* will free via device release */
5659 put_device(&dev->dev);
1da177e4 5660}
d1b19dff 5661EXPORT_SYMBOL(free_netdev);
4ec93edb 5662
f0db275a
SH
5663/**
5664 * synchronize_net - Synchronize with packet receive processing
5665 *
5666 * Wait for packets currently being received to be done.
5667 * Does not block later packets from starting.
5668 */
4ec93edb 5669void synchronize_net(void)
1da177e4
LT
5670{
5671 might_sleep();
fbd568a3 5672 synchronize_rcu();
1da177e4 5673}
d1b19dff 5674EXPORT_SYMBOL(synchronize_net);
1da177e4
LT
5675
5676/**
44a0873d 5677 * unregister_netdevice_queue - remove device from the kernel
1da177e4 5678 * @dev: device
44a0873d 5679 * @head: list
6ebfbc06 5680 *
1da177e4 5681 * This function shuts down a device interface and removes it
d59b54b1 5682 * from the kernel tables.
44a0873d 5683 * If head not NULL, device is queued to be unregistered later.
1da177e4
LT
5684 *
5685 * Callers must hold the rtnl semaphore. You may want
5686 * unregister_netdev() instead of this.
5687 */
5688
44a0873d 5689void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
1da177e4 5690{
a6620712
HX
5691 ASSERT_RTNL();
5692
44a0873d 5693 if (head) {
9fdce099 5694 list_move_tail(&dev->unreg_list, head);
44a0873d
ED
5695 } else {
5696 rollback_registered(dev);
5697 /* Finish processing unregister after unlock */
5698 net_set_todo(dev);
5699 }
1da177e4 5700}
44a0873d 5701EXPORT_SYMBOL(unregister_netdevice_queue);
1da177e4 5702
9b5e383c
ED
5703/**
5704 * unregister_netdevice_many - unregister many devices
5705 * @head: list of devices
9b5e383c
ED
5706 */
5707void unregister_netdevice_many(struct list_head *head)
5708{
5709 struct net_device *dev;
5710
5711 if (!list_empty(head)) {
5712 rollback_registered_many(head);
5713 list_for_each_entry(dev, head, unreg_list)
5714 net_set_todo(dev);
5715 }
5716}
63c8099d 5717EXPORT_SYMBOL(unregister_netdevice_many);
9b5e383c 5718
1da177e4
LT
5719/**
5720 * unregister_netdev - remove device from the kernel
5721 * @dev: device
5722 *
5723 * This function shuts down a device interface and removes it
d59b54b1 5724 * from the kernel tables.
1da177e4
LT
5725 *
5726 * This is just a wrapper for unregister_netdevice that takes
5727 * the rtnl semaphore. In general you want to use this and not
5728 * unregister_netdevice.
5729 */
5730void unregister_netdev(struct net_device *dev)
5731{
5732 rtnl_lock();
5733 unregister_netdevice(dev);
5734 rtnl_unlock();
5735}
1da177e4
LT
5736EXPORT_SYMBOL(unregister_netdev);
5737
ce286d32
EB
5738/**
5739 * dev_change_net_namespace - move device to different nethost namespace
5740 * @dev: device
5741 * @net: network namespace
5742 * @pat: If not NULL name pattern to try if the current device name
5743 * is already taken in the destination network namespace.
5744 *
5745 * This function shuts down a device interface and moves it
5746 * to a new network namespace. On success 0 is returned, on
5747 * a failure a netagive errno code is returned.
5748 *
5749 * Callers must hold the rtnl semaphore.
5750 */
5751
5752int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
5753{
ce286d32
EB
5754 int err;
5755
5756 ASSERT_RTNL();
5757
5758 /* Don't allow namespace local devices to be moved. */
5759 err = -EINVAL;
5760 if (dev->features & NETIF_F_NETNS_LOCAL)
5761 goto out;
5762
5763 /* Ensure the device has been registrered */
5764 err = -EINVAL;
5765 if (dev->reg_state != NETREG_REGISTERED)
5766 goto out;
5767
5768 /* Get out if there is nothing todo */
5769 err = 0;
878628fb 5770 if (net_eq(dev_net(dev), net))
ce286d32
EB
5771 goto out;
5772
5773 /* Pick the destination device name, and ensure
5774 * we can use it in the destination network namespace.
5775 */
5776 err = -EEXIST;
d9031024 5777 if (__dev_get_by_name(net, dev->name)) {
ce286d32
EB
5778 /* We get here if we can't use the current device name */
5779 if (!pat)
5780 goto out;
8ce6cebc 5781 if (dev_get_valid_name(dev, pat, 1))
ce286d32
EB
5782 goto out;
5783 }
5784
5785 /*
5786 * And now a mini version of register_netdevice unregister_netdevice.
5787 */
5788
5789 /* If device is running close it first. */
9b772652 5790 dev_close(dev);
ce286d32
EB
5791
5792 /* And unlink it from device chain */
5793 err = -ENODEV;
5794 unlist_netdevice(dev);
5795
5796 synchronize_net();
5797
5798 /* Shutdown queueing discipline. */
5799 dev_shutdown(dev);
5800
5801 /* Notify protocols, that we are about to destroy
5802 this device. They should clean all the things.
3b27e105
DL
5803
5804 Note that dev->reg_state stays at NETREG_REGISTERED.
5805 This is wanted because this way 8021q and macvlan know
5806 the device is just moving and can keep their slaves up.
ce286d32
EB
5807 */
5808 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
a5ee1551 5809 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
ce286d32
EB
5810
5811 /*
5812 * Flush the unicast and multicast chains
5813 */
a748ee24 5814 dev_uc_flush(dev);
22bedad3 5815 dev_mc_flush(dev);
ce286d32
EB
5816
5817 /* Actually switch the network namespace */
c346dca1 5818 dev_net_set(dev, net);
ce286d32 5819
ce286d32
EB
5820 /* If there is an ifindex conflict assign a new one */
5821 if (__dev_get_by_index(net, dev->ifindex)) {
5822 int iflink = (dev->iflink == dev->ifindex);
5823 dev->ifindex = dev_new_index(net);
5824 if (iflink)
5825 dev->iflink = dev->ifindex;
5826 }
5827
8b41d188 5828 /* Fixup kobjects */
a1b3f594 5829 err = device_rename(&dev->dev, dev->name);
8b41d188 5830 WARN_ON(err);
ce286d32
EB
5831
5832 /* Add the device back in the hashes */
5833 list_netdevice(dev);
5834
5835 /* Notify protocols, that a new device appeared. */
5836 call_netdevice_notifiers(NETDEV_REGISTER, dev);
5837
d90a909e
EB
5838 /*
5839 * Prevent userspace races by waiting until the network
5840 * device is fully setup before sending notifications.
5841 */
5842 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
5843
ce286d32
EB
5844 synchronize_net();
5845 err = 0;
5846out:
5847 return err;
5848}
463d0183 5849EXPORT_SYMBOL_GPL(dev_change_net_namespace);
ce286d32 5850
1da177e4
LT
5851static int dev_cpu_callback(struct notifier_block *nfb,
5852 unsigned long action,
5853 void *ocpu)
5854{
5855 struct sk_buff **list_skb;
1da177e4
LT
5856 struct sk_buff *skb;
5857 unsigned int cpu, oldcpu = (unsigned long)ocpu;
5858 struct softnet_data *sd, *oldsd;
5859
8bb78442 5860 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
1da177e4
LT
5861 return NOTIFY_OK;
5862
5863 local_irq_disable();
5864 cpu = smp_processor_id();
5865 sd = &per_cpu(softnet_data, cpu);
5866 oldsd = &per_cpu(softnet_data, oldcpu);
5867
5868 /* Find end of our completion_queue. */
5869 list_skb = &sd->completion_queue;
5870 while (*list_skb)
5871 list_skb = &(*list_skb)->next;
5872 /* Append completion queue from offline CPU. */
5873 *list_skb = oldsd->completion_queue;
5874 oldsd->completion_queue = NULL;
5875
1da177e4 5876 /* Append output queue from offline CPU. */
a9cbd588
CG
5877 if (oldsd->output_queue) {
5878 *sd->output_queue_tailp = oldsd->output_queue;
5879 sd->output_queue_tailp = oldsd->output_queue_tailp;
5880 oldsd->output_queue = NULL;
5881 oldsd->output_queue_tailp = &oldsd->output_queue;
5882 }
1da177e4
LT
5883
5884 raise_softirq_irqoff(NET_TX_SOFTIRQ);
5885 local_irq_enable();
5886
5887 /* Process offline CPU's input_pkt_queue */
76cc8b13 5888 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
1da177e4 5889 netif_rx(skb);
76cc8b13 5890 input_queue_head_incr(oldsd);
fec5e652 5891 }
76cc8b13 5892 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
6e7676c1 5893 netif_rx(skb);
76cc8b13
TH
5894 input_queue_head_incr(oldsd);
5895 }
1da177e4
LT
5896
5897 return NOTIFY_OK;
5898}
1da177e4
LT
5899
5900
7f353bf2 5901/**
b63365a2
HX
5902 * netdev_increment_features - increment feature set by one
5903 * @all: current feature set
5904 * @one: new feature set
5905 * @mask: mask feature set
7f353bf2
HX
5906 *
5907 * Computes a new feature set after adding a device with feature set
b63365a2
HX
5908 * @one to the master device with current feature set @all. Will not
5909 * enable anything that is off in @mask. Returns the new feature set.
7f353bf2 5910 */
b63365a2
HX
5911unsigned long netdev_increment_features(unsigned long all, unsigned long one,
5912 unsigned long mask)
5913{
5914 /* If device needs checksumming, downgrade to it. */
d1b19dff 5915 if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
b63365a2
HX
5916 all ^= NETIF_F_NO_CSUM | (one & NETIF_F_ALL_CSUM);
5917 else if (mask & NETIF_F_ALL_CSUM) {
5918 /* If one device supports v4/v6 checksumming, set for all. */
5919 if (one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM) &&
5920 !(all & NETIF_F_GEN_CSUM)) {
5921 all &= ~NETIF_F_ALL_CSUM;
5922 all |= one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
5923 }
e2a6b852 5924
b63365a2
HX
5925 /* If one device supports hw checksumming, set for all. */
5926 if (one & NETIF_F_GEN_CSUM && !(all & NETIF_F_GEN_CSUM)) {
5927 all &= ~NETIF_F_ALL_CSUM;
5928 all |= NETIF_F_HW_CSUM;
5929 }
5930 }
7f353bf2 5931
b63365a2 5932 one |= NETIF_F_ALL_CSUM;
7f353bf2 5933
b63365a2 5934 one |= all & NETIF_F_ONE_FOR_ALL;
d9f5950f 5935 all &= one | NETIF_F_LLTX | NETIF_F_GSO | NETIF_F_UFO;
b63365a2 5936 all |= one & mask & NETIF_F_ONE_FOR_ALL;
7f353bf2
HX
5937
5938 return all;
5939}
b63365a2 5940EXPORT_SYMBOL(netdev_increment_features);
7f353bf2 5941
30d97d35
PE
5942static struct hlist_head *netdev_create_hash(void)
5943{
5944 int i;
5945 struct hlist_head *hash;
5946
5947 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
5948 if (hash != NULL)
5949 for (i = 0; i < NETDEV_HASHENTRIES; i++)
5950 INIT_HLIST_HEAD(&hash[i]);
5951
5952 return hash;
5953}
5954
881d966b 5955/* Initialize per network namespace state */
4665079c 5956static int __net_init netdev_init(struct net *net)
881d966b 5957{
881d966b 5958 INIT_LIST_HEAD(&net->dev_base_head);
881d966b 5959
30d97d35
PE
5960 net->dev_name_head = netdev_create_hash();
5961 if (net->dev_name_head == NULL)
5962 goto err_name;
881d966b 5963
30d97d35
PE
5964 net->dev_index_head = netdev_create_hash();
5965 if (net->dev_index_head == NULL)
5966 goto err_idx;
881d966b
EB
5967
5968 return 0;
30d97d35
PE
5969
5970err_idx:
5971 kfree(net->dev_name_head);
5972err_name:
5973 return -ENOMEM;
881d966b
EB
5974}
5975
f0db275a
SH
5976/**
5977 * netdev_drivername - network driver for the device
5978 * @dev: network device
5979 * @buffer: buffer for resulting name
5980 * @len: size of buffer
5981 *
5982 * Determine network driver for device.
5983 */
cf04a4c7 5984char *netdev_drivername(const struct net_device *dev, char *buffer, int len)
6579e57b 5985{
cf04a4c7
SH
5986 const struct device_driver *driver;
5987 const struct device *parent;
6579e57b
AV
5988
5989 if (len <= 0 || !buffer)
5990 return buffer;
5991 buffer[0] = 0;
5992
5993 parent = dev->dev.parent;
5994
5995 if (!parent)
5996 return buffer;
5997
5998 driver = parent->driver;
5999 if (driver && driver->name)
6000 strlcpy(buffer, driver->name, len);
6001 return buffer;
6002}
6003
256df2f3
JP
6004static int __netdev_printk(const char *level, const struct net_device *dev,
6005 struct va_format *vaf)
6006{
6007 int r;
6008
6009 if (dev && dev->dev.parent)
6010 r = dev_printk(level, dev->dev.parent, "%s: %pV",
6011 netdev_name(dev), vaf);
6012 else if (dev)
6013 r = printk("%s%s: %pV", level, netdev_name(dev), vaf);
6014 else
6015 r = printk("%s(NULL net_device): %pV", level, vaf);
6016
6017 return r;
6018}
6019
6020int netdev_printk(const char *level, const struct net_device *dev,
6021 const char *format, ...)
6022{
6023 struct va_format vaf;
6024 va_list args;
6025 int r;
6026
6027 va_start(args, format);
6028
6029 vaf.fmt = format;
6030 vaf.va = &args;
6031
6032 r = __netdev_printk(level, dev, &vaf);
6033 va_end(args);
6034
6035 return r;
6036}
6037EXPORT_SYMBOL(netdev_printk);
6038
6039#define define_netdev_printk_level(func, level) \
6040int func(const struct net_device *dev, const char *fmt, ...) \
6041{ \
6042 int r; \
6043 struct va_format vaf; \
6044 va_list args; \
6045 \
6046 va_start(args, fmt); \
6047 \
6048 vaf.fmt = fmt; \
6049 vaf.va = &args; \
6050 \
6051 r = __netdev_printk(level, dev, &vaf); \
6052 va_end(args); \
6053 \
6054 return r; \
6055} \
6056EXPORT_SYMBOL(func);
6057
6058define_netdev_printk_level(netdev_emerg, KERN_EMERG);
6059define_netdev_printk_level(netdev_alert, KERN_ALERT);
6060define_netdev_printk_level(netdev_crit, KERN_CRIT);
6061define_netdev_printk_level(netdev_err, KERN_ERR);
6062define_netdev_printk_level(netdev_warn, KERN_WARNING);
6063define_netdev_printk_level(netdev_notice, KERN_NOTICE);
6064define_netdev_printk_level(netdev_info, KERN_INFO);
6065
4665079c 6066static void __net_exit netdev_exit(struct net *net)
881d966b
EB
6067{
6068 kfree(net->dev_name_head);
6069 kfree(net->dev_index_head);
6070}
6071
022cbae6 6072static struct pernet_operations __net_initdata netdev_net_ops = {
881d966b
EB
6073 .init = netdev_init,
6074 .exit = netdev_exit,
6075};
6076
4665079c 6077static void __net_exit default_device_exit(struct net *net)
ce286d32 6078{
e008b5fc 6079 struct net_device *dev, *aux;
ce286d32 6080 /*
e008b5fc 6081 * Push all migratable network devices back to the
ce286d32
EB
6082 * initial network namespace
6083 */
6084 rtnl_lock();
e008b5fc 6085 for_each_netdev_safe(net, dev, aux) {
ce286d32 6086 int err;
aca51397 6087 char fb_name[IFNAMSIZ];
ce286d32
EB
6088
6089 /* Ignore unmoveable devices (i.e. loopback) */
6090 if (dev->features & NETIF_F_NETNS_LOCAL)
6091 continue;
6092
e008b5fc
EB
6093 /* Leave virtual devices for the generic cleanup */
6094 if (dev->rtnl_link_ops)
6095 continue;
d0c082ce 6096
ce286d32 6097 /* Push remaing network devices to init_net */
aca51397
PE
6098 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
6099 err = dev_change_net_namespace(dev, &init_net, fb_name);
ce286d32 6100 if (err) {
aca51397 6101 printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
ce286d32 6102 __func__, dev->name, err);
aca51397 6103 BUG();
ce286d32
EB
6104 }
6105 }
6106 rtnl_unlock();
6107}
6108
04dc7f6b
EB
6109static void __net_exit default_device_exit_batch(struct list_head *net_list)
6110{
6111 /* At exit all network devices most be removed from a network
6112 * namespace. Do this in the reverse order of registeration.
6113 * Do this across as many network namespaces as possible to
6114 * improve batching efficiency.
6115 */
6116 struct net_device *dev;
6117 struct net *net;
6118 LIST_HEAD(dev_kill_list);
6119
6120 rtnl_lock();
6121 list_for_each_entry(net, net_list, exit_list) {
6122 for_each_netdev_reverse(net, dev) {
6123 if (dev->rtnl_link_ops)
6124 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
6125 else
6126 unregister_netdevice_queue(dev, &dev_kill_list);
6127 }
6128 }
6129 unregister_netdevice_many(&dev_kill_list);
6130 rtnl_unlock();
6131}
6132
022cbae6 6133static struct pernet_operations __net_initdata default_device_ops = {
ce286d32 6134 .exit = default_device_exit,
04dc7f6b 6135 .exit_batch = default_device_exit_batch,
ce286d32
EB
6136};
6137
1da177e4
LT
6138/*
6139 * Initialize the DEV module. At boot time this walks the device list and
6140 * unhooks any devices that fail to initialise (normally hardware not
6141 * present) and leaves us with a valid list of present and active devices.
6142 *
6143 */
6144
6145/*
6146 * This is called single threaded during boot, so no need
6147 * to take the rtnl semaphore.
6148 */
6149static int __init net_dev_init(void)
6150{
6151 int i, rc = -ENOMEM;
6152
6153 BUG_ON(!dev_boot_phase);
6154
1da177e4
LT
6155 if (dev_proc_init())
6156 goto out;
6157
8b41d188 6158 if (netdev_kobject_init())
1da177e4
LT
6159 goto out;
6160
6161 INIT_LIST_HEAD(&ptype_all);
82d8a867 6162 for (i = 0; i < PTYPE_HASH_SIZE; i++)
1da177e4
LT
6163 INIT_LIST_HEAD(&ptype_base[i]);
6164
881d966b
EB
6165 if (register_pernet_subsys(&netdev_net_ops))
6166 goto out;
1da177e4
LT
6167
6168 /*
6169 * Initialise the packet receive queues.
6170 */
6171
6f912042 6172 for_each_possible_cpu(i) {
e36fa2f7 6173 struct softnet_data *sd = &per_cpu(softnet_data, i);
1da177e4 6174
dee42870 6175 memset(sd, 0, sizeof(*sd));
e36fa2f7 6176 skb_queue_head_init(&sd->input_pkt_queue);
6e7676c1 6177 skb_queue_head_init(&sd->process_queue);
e36fa2f7
ED
6178 sd->completion_queue = NULL;
6179 INIT_LIST_HEAD(&sd->poll_list);
a9cbd588
CG
6180 sd->output_queue = NULL;
6181 sd->output_queue_tailp = &sd->output_queue;
df334545 6182#ifdef CONFIG_RPS
e36fa2f7
ED
6183 sd->csd.func = rps_trigger_softirq;
6184 sd->csd.info = sd;
6185 sd->csd.flags = 0;
6186 sd->cpu = i;
1e94d72f 6187#endif
0a9627f2 6188
e36fa2f7
ED
6189 sd->backlog.poll = process_backlog;
6190 sd->backlog.weight = weight_p;
6191 sd->backlog.gro_list = NULL;
6192 sd->backlog.gro_count = 0;
1da177e4
LT
6193 }
6194
1da177e4
LT
6195 dev_boot_phase = 0;
6196
505d4f73
EB
6197 /* The loopback device is special if any other network devices
6198 * is present in a network namespace the loopback device must
6199 * be present. Since we now dynamically allocate and free the
6200 * loopback device ensure this invariant is maintained by
6201 * keeping the loopback device as the first device on the
6202 * list of network devices. Ensuring the loopback devices
6203 * is the first device that appears and the last network device
6204 * that disappears.
6205 */
6206 if (register_pernet_device(&loopback_net_ops))
6207 goto out;
6208
6209 if (register_pernet_device(&default_device_ops))
6210 goto out;
6211
962cf36c
CM
6212 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
6213 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
1da177e4
LT
6214
6215 hotcpu_notifier(dev_cpu_callback, 0);
6216 dst_init();
6217 dev_mcast_init();
6218 rc = 0;
6219out:
6220 return rc;
6221}
6222
6223subsys_initcall(net_dev_init);
6224
e88721f8
KK
6225static int __init initialize_hashrnd(void)
6226{
0a9627f2 6227 get_random_bytes(&hashrnd, sizeof(hashrnd));
e88721f8
KK
6228 return 0;
6229}
6230
6231late_initcall_sync(initialize_hashrnd);
6232