]> bbs.cooldavid.org Git - net-next-2.6.git/blame - net/core/dev.c
drivers/net/usb/asix.c: Fix unaligned accesses
[net-next-2.6.git] / net / core / dev.c
CommitLineData
1da177e4
LT
1/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
02c30a84 10 * Authors: Ross Biro
1da177e4
LT
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
74
75#include <asm/uaccess.h>
76#include <asm/system.h>
77#include <linux/bitops.h>
4fc268d2 78#include <linux/capability.h>
1da177e4
LT
79#include <linux/cpu.h>
80#include <linux/types.h>
81#include <linux/kernel.h>
08e9897d 82#include <linux/hash.h>
5a0e3ad6 83#include <linux/slab.h>
1da177e4 84#include <linux/sched.h>
4a3e2f71 85#include <linux/mutex.h>
1da177e4
LT
86#include <linux/string.h>
87#include <linux/mm.h>
88#include <linux/socket.h>
89#include <linux/sockios.h>
90#include <linux/errno.h>
91#include <linux/interrupt.h>
92#include <linux/if_ether.h>
93#include <linux/netdevice.h>
94#include <linux/etherdevice.h>
0187bdfb 95#include <linux/ethtool.h>
1da177e4
LT
96#include <linux/notifier.h>
97#include <linux/skbuff.h>
457c4cbc 98#include <net/net_namespace.h>
1da177e4
LT
99#include <net/sock.h>
100#include <linux/rtnetlink.h>
101#include <linux/proc_fs.h>
102#include <linux/seq_file.h>
103#include <linux/stat.h>
104#include <linux/if_bridge.h>
b863ceb7 105#include <linux/if_macvlan.h>
1da177e4
LT
106#include <net/dst.h>
107#include <net/pkt_sched.h>
108#include <net/checksum.h>
44540960 109#include <net/xfrm.h>
1da177e4
LT
110#include <linux/highmem.h>
111#include <linux/init.h>
112#include <linux/kmod.h>
113#include <linux/module.h>
1da177e4
LT
114#include <linux/netpoll.h>
115#include <linux/rcupdate.h>
116#include <linux/delay.h>
295f4a1f 117#include <net/wext.h>
1da177e4 118#include <net/iw_handler.h>
1da177e4 119#include <asm/current.h>
5bdb9886 120#include <linux/audit.h>
db217334 121#include <linux/dmaengine.h>
f6a78bfc 122#include <linux/err.h>
c7fa9d18 123#include <linux/ctype.h>
723e98b7 124#include <linux/if_arp.h>
6de329e2 125#include <linux/if_vlan.h>
8f0f2223 126#include <linux/ip.h>
ad55dcaf 127#include <net/ip.h>
8f0f2223
DM
128#include <linux/ipv6.h>
129#include <linux/in.h>
b6b2fed1
DM
130#include <linux/jhash.h>
131#include <linux/random.h>
9cbc1cb8 132#include <trace/events/napi.h>
5acbbd42 133#include <linux/pci.h>
1da177e4 134
342709ef
PE
135#include "net-sysfs.h"
136
d565b0a1
HX
137/* Instead of increasing this, you should create a hash table. */
138#define MAX_GRO_SKBS 8
139
5d38a079
HX
140/* This should be increased if a protocol with a bigger head is added. */
141#define GRO_MAX_HEAD (MAX_HEADER + 128)
142
1da177e4
LT
143/*
144 * The list of packet types we will receive (as opposed to discard)
145 * and the routines to invoke.
146 *
147 * Why 16. Because with 16 the only overlap we get on a hash of the
148 * low nibble of the protocol value is RARP/SNAP/X.25.
149 *
150 * NOTE: That is no longer true with the addition of VLAN tags. Not
151 * sure which should go first, but I bet it won't make much
152 * difference if we are running VLANs. The good news is that
153 * this protocol won't be in the list unless compiled in, so
3041a069 154 * the average user (w/out VLANs) will not be adversely affected.
1da177e4
LT
155 * --BLG
156 *
157 * 0800 IP
158 * 8100 802.1Q VLAN
159 * 0001 802.3
160 * 0002 AX.25
161 * 0004 802.2
162 * 8035 RARP
163 * 0005 SNAP
164 * 0805 X.25
165 * 0806 ARP
166 * 8137 IPX
167 * 0009 Localtalk
168 * 86DD IPv6
169 */
170
82d8a867
PE
171#define PTYPE_HASH_SIZE (16)
172#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
173
1da177e4 174static DEFINE_SPINLOCK(ptype_lock);
82d8a867 175static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
6b2bedc3 176static struct list_head ptype_all __read_mostly; /* Taps */
1da177e4 177
1da177e4 178/*
7562f876 179 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
1da177e4
LT
180 * semaphore.
181 *
c6d14c84 182 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
1da177e4
LT
183 *
184 * Writers must hold the rtnl semaphore while they loop through the
7562f876 185 * dev_base_head list, and hold dev_base_lock for writing when they do the
1da177e4
LT
186 * actual updates. This allows pure readers to access the list even
187 * while a writer is preparing to update it.
188 *
189 * To put it another way, dev_base_lock is held for writing only to
190 * protect against pure readers; the rtnl semaphore provides the
191 * protection against other writers.
192 *
193 * See, for example usages, register_netdevice() and
194 * unregister_netdevice(), which must be called with the rtnl
195 * semaphore held.
196 */
1da177e4 197DEFINE_RWLOCK(dev_base_lock);
1da177e4
LT
198EXPORT_SYMBOL(dev_base_lock);
199
881d966b 200static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
1da177e4
LT
201{
202 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
08e9897d 203 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
1da177e4
LT
204}
205
881d966b 206static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
1da177e4 207{
7c28bd0b 208 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
1da177e4
LT
209}
210
e36fa2f7 211static inline void rps_lock(struct softnet_data *sd)
152102c7
CG
212{
213#ifdef CONFIG_RPS
e36fa2f7 214 spin_lock(&sd->input_pkt_queue.lock);
152102c7
CG
215#endif
216}
217
e36fa2f7 218static inline void rps_unlock(struct softnet_data *sd)
152102c7
CG
219{
220#ifdef CONFIG_RPS
e36fa2f7 221 spin_unlock(&sd->input_pkt_queue.lock);
152102c7
CG
222#endif
223}
224
ce286d32
EB
225/* Device list insertion */
226static int list_netdevice(struct net_device *dev)
227{
c346dca1 228 struct net *net = dev_net(dev);
ce286d32
EB
229
230 ASSERT_RTNL();
231
232 write_lock_bh(&dev_base_lock);
c6d14c84 233 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
72c9528b 234 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
fb699dfd
ED
235 hlist_add_head_rcu(&dev->index_hlist,
236 dev_index_hash(net, dev->ifindex));
ce286d32
EB
237 write_unlock_bh(&dev_base_lock);
238 return 0;
239}
240
fb699dfd
ED
241/* Device list removal
242 * caller must respect a RCU grace period before freeing/reusing dev
243 */
ce286d32
EB
244static void unlist_netdevice(struct net_device *dev)
245{
246 ASSERT_RTNL();
247
248 /* Unlink dev from the device chain */
249 write_lock_bh(&dev_base_lock);
c6d14c84 250 list_del_rcu(&dev->dev_list);
72c9528b 251 hlist_del_rcu(&dev->name_hlist);
fb699dfd 252 hlist_del_rcu(&dev->index_hlist);
ce286d32
EB
253 write_unlock_bh(&dev_base_lock);
254}
255
1da177e4
LT
256/*
257 * Our notifier list
258 */
259
f07d5b94 260static RAW_NOTIFIER_HEAD(netdev_chain);
1da177e4
LT
261
262/*
263 * Device drivers call our routines to queue packets here. We empty the
264 * queue in the local softnet handler.
265 */
bea3348e 266
9958da05 267DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
d1b19dff 268EXPORT_PER_CPU_SYMBOL(softnet_data);
1da177e4 269
cf508b12 270#ifdef CONFIG_LOCKDEP
723e98b7 271/*
c773e847 272 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
723e98b7
JP
273 * according to dev->type
274 */
275static const unsigned short netdev_lock_type[] =
276 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
277 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
278 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
279 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
280 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
281 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
282 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
283 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
284 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
285 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
286 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
287 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
288 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
2d91d78b 289 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET,
929122cd 290 ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154,
fcb94e42 291 ARPHRD_VOID, ARPHRD_NONE};
723e98b7 292
36cbd3dc 293static const char *const netdev_lock_name[] =
723e98b7
JP
294 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
295 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
296 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
297 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
298 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
299 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
300 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
301 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
302 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
303 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
304 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
305 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
306 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
2d91d78b 307 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET",
929122cd 308 "_xmit_PHONET_PIPE", "_xmit_IEEE802154",
fcb94e42 309 "_xmit_VOID", "_xmit_NONE"};
723e98b7
JP
310
311static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
cf508b12 312static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
723e98b7
JP
313
314static inline unsigned short netdev_lock_pos(unsigned short dev_type)
315{
316 int i;
317
318 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
319 if (netdev_lock_type[i] == dev_type)
320 return i;
321 /* the last key is used by default */
322 return ARRAY_SIZE(netdev_lock_type) - 1;
323}
324
cf508b12
DM
325static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
326 unsigned short dev_type)
723e98b7
JP
327{
328 int i;
329
330 i = netdev_lock_pos(dev_type);
331 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
332 netdev_lock_name[i]);
333}
cf508b12
DM
334
335static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
336{
337 int i;
338
339 i = netdev_lock_pos(dev->type);
340 lockdep_set_class_and_name(&dev->addr_list_lock,
341 &netdev_addr_lock_key[i],
342 netdev_lock_name[i]);
343}
723e98b7 344#else
cf508b12
DM
345static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
346 unsigned short dev_type)
347{
348}
349static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
723e98b7
JP
350{
351}
352#endif
1da177e4
LT
353
354/*******************************************************************************
355
356 Protocol management and registration routines
357
358*******************************************************************************/
359
1da177e4
LT
360/*
361 * Add a protocol ID to the list. Now that the input handler is
362 * smarter we can dispense with all the messy stuff that used to be
363 * here.
364 *
365 * BEWARE!!! Protocol handlers, mangling input packets,
366 * MUST BE last in hash buckets and checking protocol handlers
367 * MUST start from promiscuous ptype_all chain in net_bh.
368 * It is true now, do not change it.
369 * Explanation follows: if protocol handler, mangling packet, will
370 * be the first on list, it is not able to sense, that packet
371 * is cloned and should be copied-on-write, so that it will
372 * change it and subsequent readers will get broken packet.
373 * --ANK (980803)
374 */
375
376/**
377 * dev_add_pack - add packet handler
378 * @pt: packet type declaration
379 *
380 * Add a protocol handler to the networking stack. The passed &packet_type
381 * is linked into kernel lists and may not be freed until it has been
382 * removed from the kernel lists.
383 *
4ec93edb 384 * This call does not sleep therefore it can not
1da177e4
LT
385 * guarantee all CPU's that are in middle of receiving packets
386 * will see the new packet type (until the next received packet).
387 */
388
389void dev_add_pack(struct packet_type *pt)
390{
391 int hash;
392
393 spin_lock_bh(&ptype_lock);
9be9a6b9 394 if (pt->type == htons(ETH_P_ALL))
1da177e4 395 list_add_rcu(&pt->list, &ptype_all);
9be9a6b9 396 else {
82d8a867 397 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
1da177e4
LT
398 list_add_rcu(&pt->list, &ptype_base[hash]);
399 }
400 spin_unlock_bh(&ptype_lock);
401}
d1b19dff 402EXPORT_SYMBOL(dev_add_pack);
1da177e4 403
1da177e4
LT
404/**
405 * __dev_remove_pack - remove packet handler
406 * @pt: packet type declaration
407 *
408 * Remove a protocol handler that was previously added to the kernel
409 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
410 * from the kernel lists and can be freed or reused once this function
4ec93edb 411 * returns.
1da177e4
LT
412 *
413 * The packet type might still be in use by receivers
414 * and must not be freed until after all the CPU's have gone
415 * through a quiescent state.
416 */
417void __dev_remove_pack(struct packet_type *pt)
418{
419 struct list_head *head;
420 struct packet_type *pt1;
421
422 spin_lock_bh(&ptype_lock);
423
9be9a6b9 424 if (pt->type == htons(ETH_P_ALL))
1da177e4 425 head = &ptype_all;
9be9a6b9 426 else
82d8a867 427 head = &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
1da177e4
LT
428
429 list_for_each_entry(pt1, head, list) {
430 if (pt == pt1) {
431 list_del_rcu(&pt->list);
432 goto out;
433 }
434 }
435
436 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
437out:
438 spin_unlock_bh(&ptype_lock);
439}
d1b19dff
ED
440EXPORT_SYMBOL(__dev_remove_pack);
441
1da177e4
LT
442/**
443 * dev_remove_pack - remove packet handler
444 * @pt: packet type declaration
445 *
446 * Remove a protocol handler that was previously added to the kernel
447 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
448 * from the kernel lists and can be freed or reused once this function
449 * returns.
450 *
451 * This call sleeps to guarantee that no CPU is looking at the packet
452 * type after return.
453 */
454void dev_remove_pack(struct packet_type *pt)
455{
456 __dev_remove_pack(pt);
4ec93edb 457
1da177e4
LT
458 synchronize_net();
459}
d1b19dff 460EXPORT_SYMBOL(dev_remove_pack);
1da177e4
LT
461
462/******************************************************************************
463
464 Device Boot-time Settings Routines
465
466*******************************************************************************/
467
468/* Boot time configuration table */
469static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
470
471/**
472 * netdev_boot_setup_add - add new setup entry
473 * @name: name of the device
474 * @map: configured settings for the device
475 *
476 * Adds new setup entry to the dev_boot_setup list. The function
477 * returns 0 on error and 1 on success. This is a generic routine to
478 * all netdevices.
479 */
480static int netdev_boot_setup_add(char *name, struct ifmap *map)
481{
482 struct netdev_boot_setup *s;
483 int i;
484
485 s = dev_boot_setup;
486 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
487 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
488 memset(s[i].name, 0, sizeof(s[i].name));
93b3cff9 489 strlcpy(s[i].name, name, IFNAMSIZ);
1da177e4
LT
490 memcpy(&s[i].map, map, sizeof(s[i].map));
491 break;
492 }
493 }
494
495 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
496}
497
498/**
499 * netdev_boot_setup_check - check boot time settings
500 * @dev: the netdevice
501 *
502 * Check boot time settings for the device.
503 * The found settings are set for the device to be used
504 * later in the device probing.
505 * Returns 0 if no settings found, 1 if they are.
506 */
507int netdev_boot_setup_check(struct net_device *dev)
508{
509 struct netdev_boot_setup *s = dev_boot_setup;
510 int i;
511
512 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
513 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
93b3cff9 514 !strcmp(dev->name, s[i].name)) {
1da177e4
LT
515 dev->irq = s[i].map.irq;
516 dev->base_addr = s[i].map.base_addr;
517 dev->mem_start = s[i].map.mem_start;
518 dev->mem_end = s[i].map.mem_end;
519 return 1;
520 }
521 }
522 return 0;
523}
d1b19dff 524EXPORT_SYMBOL(netdev_boot_setup_check);
1da177e4
LT
525
526
527/**
528 * netdev_boot_base - get address from boot time settings
529 * @prefix: prefix for network device
530 * @unit: id for network device
531 *
532 * Check boot time settings for the base address of device.
533 * The found settings are set for the device to be used
534 * later in the device probing.
535 * Returns 0 if no settings found.
536 */
537unsigned long netdev_boot_base(const char *prefix, int unit)
538{
539 const struct netdev_boot_setup *s = dev_boot_setup;
540 char name[IFNAMSIZ];
541 int i;
542
543 sprintf(name, "%s%d", prefix, unit);
544
545 /*
546 * If device already registered then return base of 1
547 * to indicate not to probe for this interface
548 */
881d966b 549 if (__dev_get_by_name(&init_net, name))
1da177e4
LT
550 return 1;
551
552 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
553 if (!strcmp(name, s[i].name))
554 return s[i].map.base_addr;
555 return 0;
556}
557
558/*
559 * Saves at boot time configured settings for any netdevice.
560 */
561int __init netdev_boot_setup(char *str)
562{
563 int ints[5];
564 struct ifmap map;
565
566 str = get_options(str, ARRAY_SIZE(ints), ints);
567 if (!str || !*str)
568 return 0;
569
570 /* Save settings */
571 memset(&map, 0, sizeof(map));
572 if (ints[0] > 0)
573 map.irq = ints[1];
574 if (ints[0] > 1)
575 map.base_addr = ints[2];
576 if (ints[0] > 2)
577 map.mem_start = ints[3];
578 if (ints[0] > 3)
579 map.mem_end = ints[4];
580
581 /* Add new entry to the list */
582 return netdev_boot_setup_add(str, &map);
583}
584
585__setup("netdev=", netdev_boot_setup);
586
587/*******************************************************************************
588
589 Device Interface Subroutines
590
591*******************************************************************************/
592
593/**
594 * __dev_get_by_name - find a device by its name
c4ea43c5 595 * @net: the applicable net namespace
1da177e4
LT
596 * @name: name to find
597 *
598 * Find an interface by name. Must be called under RTNL semaphore
599 * or @dev_base_lock. If the name is found a pointer to the device
600 * is returned. If the name is not found then %NULL is returned. The
601 * reference counters are not incremented so the caller must be
602 * careful with locks.
603 */
604
881d966b 605struct net_device *__dev_get_by_name(struct net *net, const char *name)
1da177e4
LT
606{
607 struct hlist_node *p;
0bd8d536
ED
608 struct net_device *dev;
609 struct hlist_head *head = dev_name_hash(net, name);
1da177e4 610
0bd8d536 611 hlist_for_each_entry(dev, p, head, name_hlist)
1da177e4
LT
612 if (!strncmp(dev->name, name, IFNAMSIZ))
613 return dev;
0bd8d536 614
1da177e4
LT
615 return NULL;
616}
d1b19dff 617EXPORT_SYMBOL(__dev_get_by_name);
1da177e4 618
72c9528b
ED
619/**
620 * dev_get_by_name_rcu - find a device by its name
621 * @net: the applicable net namespace
622 * @name: name to find
623 *
624 * Find an interface by name.
625 * If the name is found a pointer to the device is returned.
626 * If the name is not found then %NULL is returned.
627 * The reference counters are not incremented so the caller must be
628 * careful with locks. The caller must hold RCU lock.
629 */
630
631struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
632{
633 struct hlist_node *p;
634 struct net_device *dev;
635 struct hlist_head *head = dev_name_hash(net, name);
636
637 hlist_for_each_entry_rcu(dev, p, head, name_hlist)
638 if (!strncmp(dev->name, name, IFNAMSIZ))
639 return dev;
640
641 return NULL;
642}
643EXPORT_SYMBOL(dev_get_by_name_rcu);
644
1da177e4
LT
645/**
646 * dev_get_by_name - find a device by its name
c4ea43c5 647 * @net: the applicable net namespace
1da177e4
LT
648 * @name: name to find
649 *
650 * Find an interface by name. This can be called from any
651 * context and does its own locking. The returned handle has
652 * the usage count incremented and the caller must use dev_put() to
653 * release it when it is no longer needed. %NULL is returned if no
654 * matching device is found.
655 */
656
881d966b 657struct net_device *dev_get_by_name(struct net *net, const char *name)
1da177e4
LT
658{
659 struct net_device *dev;
660
72c9528b
ED
661 rcu_read_lock();
662 dev = dev_get_by_name_rcu(net, name);
1da177e4
LT
663 if (dev)
664 dev_hold(dev);
72c9528b 665 rcu_read_unlock();
1da177e4
LT
666 return dev;
667}
d1b19dff 668EXPORT_SYMBOL(dev_get_by_name);
1da177e4
LT
669
670/**
671 * __dev_get_by_index - find a device by its ifindex
c4ea43c5 672 * @net: the applicable net namespace
1da177e4
LT
673 * @ifindex: index of device
674 *
675 * Search for an interface by index. Returns %NULL if the device
676 * is not found or a pointer to the device. The device has not
677 * had its reference counter increased so the caller must be careful
678 * about locking. The caller must hold either the RTNL semaphore
679 * or @dev_base_lock.
680 */
681
881d966b 682struct net_device *__dev_get_by_index(struct net *net, int ifindex)
1da177e4
LT
683{
684 struct hlist_node *p;
0bd8d536
ED
685 struct net_device *dev;
686 struct hlist_head *head = dev_index_hash(net, ifindex);
1da177e4 687
0bd8d536 688 hlist_for_each_entry(dev, p, head, index_hlist)
1da177e4
LT
689 if (dev->ifindex == ifindex)
690 return dev;
0bd8d536 691
1da177e4
LT
692 return NULL;
693}
d1b19dff 694EXPORT_SYMBOL(__dev_get_by_index);
1da177e4 695
fb699dfd
ED
696/**
697 * dev_get_by_index_rcu - find a device by its ifindex
698 * @net: the applicable net namespace
699 * @ifindex: index of device
700 *
701 * Search for an interface by index. Returns %NULL if the device
702 * is not found or a pointer to the device. The device has not
703 * had its reference counter increased so the caller must be careful
704 * about locking. The caller must hold RCU lock.
705 */
706
707struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
708{
709 struct hlist_node *p;
710 struct net_device *dev;
711 struct hlist_head *head = dev_index_hash(net, ifindex);
712
713 hlist_for_each_entry_rcu(dev, p, head, index_hlist)
714 if (dev->ifindex == ifindex)
715 return dev;
716
717 return NULL;
718}
719EXPORT_SYMBOL(dev_get_by_index_rcu);
720
1da177e4
LT
721
722/**
723 * dev_get_by_index - find a device by its ifindex
c4ea43c5 724 * @net: the applicable net namespace
1da177e4
LT
725 * @ifindex: index of device
726 *
727 * Search for an interface by index. Returns NULL if the device
728 * is not found or a pointer to the device. The device returned has
729 * had a reference added and the pointer is safe until the user calls
730 * dev_put to indicate they have finished with it.
731 */
732
881d966b 733struct net_device *dev_get_by_index(struct net *net, int ifindex)
1da177e4
LT
734{
735 struct net_device *dev;
736
fb699dfd
ED
737 rcu_read_lock();
738 dev = dev_get_by_index_rcu(net, ifindex);
1da177e4
LT
739 if (dev)
740 dev_hold(dev);
fb699dfd 741 rcu_read_unlock();
1da177e4
LT
742 return dev;
743}
d1b19dff 744EXPORT_SYMBOL(dev_get_by_index);
1da177e4
LT
745
746/**
747 * dev_getbyhwaddr - find a device by its hardware address
c4ea43c5 748 * @net: the applicable net namespace
1da177e4
LT
749 * @type: media type of device
750 * @ha: hardware address
751 *
752 * Search for an interface by MAC address. Returns NULL if the device
753 * is not found or a pointer to the device. The caller must hold the
754 * rtnl semaphore. The returned device has not had its ref count increased
755 * and the caller must therefore be careful about locking
756 *
757 * BUGS:
758 * If the API was consistent this would be __dev_get_by_hwaddr
759 */
760
881d966b 761struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
1da177e4
LT
762{
763 struct net_device *dev;
764
765 ASSERT_RTNL();
766
81103a52 767 for_each_netdev(net, dev)
1da177e4
LT
768 if (dev->type == type &&
769 !memcmp(dev->dev_addr, ha, dev->addr_len))
7562f876
PE
770 return dev;
771
772 return NULL;
1da177e4 773}
cf309e3f
JF
774EXPORT_SYMBOL(dev_getbyhwaddr);
775
881d966b 776struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
1da177e4
LT
777{
778 struct net_device *dev;
779
4e9cac2b 780 ASSERT_RTNL();
881d966b 781 for_each_netdev(net, dev)
4e9cac2b 782 if (dev->type == type)
7562f876
PE
783 return dev;
784
785 return NULL;
4e9cac2b 786}
4e9cac2b
PM
787EXPORT_SYMBOL(__dev_getfirstbyhwtype);
788
881d966b 789struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
4e9cac2b 790{
99fe3c39 791 struct net_device *dev, *ret = NULL;
4e9cac2b 792
99fe3c39
ED
793 rcu_read_lock();
794 for_each_netdev_rcu(net, dev)
795 if (dev->type == type) {
796 dev_hold(dev);
797 ret = dev;
798 break;
799 }
800 rcu_read_unlock();
801 return ret;
1da177e4 802}
1da177e4
LT
803EXPORT_SYMBOL(dev_getfirstbyhwtype);
804
805/**
806 * dev_get_by_flags - find any device with given flags
c4ea43c5 807 * @net: the applicable net namespace
1da177e4
LT
808 * @if_flags: IFF_* values
809 * @mask: bitmask of bits in if_flags to check
810 *
811 * Search for any interface with the given flags. Returns NULL if a device
4ec93edb 812 * is not found or a pointer to the device. The device returned has
1da177e4
LT
813 * had a reference added and the pointer is safe until the user calls
814 * dev_put to indicate they have finished with it.
815 */
816
d1b19dff
ED
817struct net_device *dev_get_by_flags(struct net *net, unsigned short if_flags,
818 unsigned short mask)
1da177e4 819{
7562f876 820 struct net_device *dev, *ret;
1da177e4 821
7562f876 822 ret = NULL;
c6d14c84
ED
823 rcu_read_lock();
824 for_each_netdev_rcu(net, dev) {
1da177e4
LT
825 if (((dev->flags ^ if_flags) & mask) == 0) {
826 dev_hold(dev);
7562f876 827 ret = dev;
1da177e4
LT
828 break;
829 }
830 }
c6d14c84 831 rcu_read_unlock();
7562f876 832 return ret;
1da177e4 833}
d1b19dff 834EXPORT_SYMBOL(dev_get_by_flags);
1da177e4
LT
835
836/**
837 * dev_valid_name - check if name is okay for network device
838 * @name: name string
839 *
840 * Network device names need to be valid file names to
c7fa9d18
DM
841 * to allow sysfs to work. We also disallow any kind of
842 * whitespace.
1da177e4 843 */
c2373ee9 844int dev_valid_name(const char *name)
1da177e4 845{
c7fa9d18
DM
846 if (*name == '\0')
847 return 0;
b6fe17d6
SH
848 if (strlen(name) >= IFNAMSIZ)
849 return 0;
c7fa9d18
DM
850 if (!strcmp(name, ".") || !strcmp(name, ".."))
851 return 0;
852
853 while (*name) {
854 if (*name == '/' || isspace(*name))
855 return 0;
856 name++;
857 }
858 return 1;
1da177e4 859}
d1b19dff 860EXPORT_SYMBOL(dev_valid_name);
1da177e4
LT
861
862/**
b267b179
EB
863 * __dev_alloc_name - allocate a name for a device
864 * @net: network namespace to allocate the device name in
1da177e4 865 * @name: name format string
b267b179 866 * @buf: scratch buffer and result name string
1da177e4
LT
867 *
868 * Passed a format string - eg "lt%d" it will try and find a suitable
3041a069
SH
869 * id. It scans list of devices to build up a free map, then chooses
870 * the first empty slot. The caller must hold the dev_base or rtnl lock
871 * while allocating the name and adding the device in order to avoid
872 * duplicates.
873 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
874 * Returns the number of the unit assigned or a negative errno code.
1da177e4
LT
875 */
876
b267b179 877static int __dev_alloc_name(struct net *net, const char *name, char *buf)
1da177e4
LT
878{
879 int i = 0;
1da177e4
LT
880 const char *p;
881 const int max_netdevices = 8*PAGE_SIZE;
cfcabdcc 882 unsigned long *inuse;
1da177e4
LT
883 struct net_device *d;
884
885 p = strnchr(name, IFNAMSIZ-1, '%');
886 if (p) {
887 /*
888 * Verify the string as this thing may have come from
889 * the user. There must be either one "%d" and no other "%"
890 * characters.
891 */
892 if (p[1] != 'd' || strchr(p + 2, '%'))
893 return -EINVAL;
894
895 /* Use one page as a bit array of possible slots */
cfcabdcc 896 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
1da177e4
LT
897 if (!inuse)
898 return -ENOMEM;
899
881d966b 900 for_each_netdev(net, d) {
1da177e4
LT
901 if (!sscanf(d->name, name, &i))
902 continue;
903 if (i < 0 || i >= max_netdevices)
904 continue;
905
906 /* avoid cases where sscanf is not exact inverse of printf */
b267b179 907 snprintf(buf, IFNAMSIZ, name, i);
1da177e4
LT
908 if (!strncmp(buf, d->name, IFNAMSIZ))
909 set_bit(i, inuse);
910 }
911
912 i = find_first_zero_bit(inuse, max_netdevices);
913 free_page((unsigned long) inuse);
914 }
915
d9031024
OP
916 if (buf != name)
917 snprintf(buf, IFNAMSIZ, name, i);
b267b179 918 if (!__dev_get_by_name(net, buf))
1da177e4 919 return i;
1da177e4
LT
920
921 /* It is possible to run out of possible slots
922 * when the name is long and there isn't enough space left
923 * for the digits, or if all bits are used.
924 */
925 return -ENFILE;
926}
927
b267b179
EB
928/**
929 * dev_alloc_name - allocate a name for a device
930 * @dev: device
931 * @name: name format string
932 *
933 * Passed a format string - eg "lt%d" it will try and find a suitable
934 * id. It scans list of devices to build up a free map, then chooses
935 * the first empty slot. The caller must hold the dev_base or rtnl lock
936 * while allocating the name and adding the device in order to avoid
937 * duplicates.
938 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
939 * Returns the number of the unit assigned or a negative errno code.
940 */
941
942int dev_alloc_name(struct net_device *dev, const char *name)
943{
944 char buf[IFNAMSIZ];
945 struct net *net;
946 int ret;
947
c346dca1
YH
948 BUG_ON(!dev_net(dev));
949 net = dev_net(dev);
b267b179
EB
950 ret = __dev_alloc_name(net, name, buf);
951 if (ret >= 0)
952 strlcpy(dev->name, buf, IFNAMSIZ);
953 return ret;
954}
d1b19dff 955EXPORT_SYMBOL(dev_alloc_name);
b267b179 956
d9031024
OP
957static int dev_get_valid_name(struct net *net, const char *name, char *buf,
958 bool fmt)
959{
960 if (!dev_valid_name(name))
961 return -EINVAL;
962
963 if (fmt && strchr(name, '%'))
964 return __dev_alloc_name(net, name, buf);
965 else if (__dev_get_by_name(net, name))
966 return -EEXIST;
967 else if (buf != name)
968 strlcpy(buf, name, IFNAMSIZ);
969
970 return 0;
971}
1da177e4
LT
972
973/**
974 * dev_change_name - change name of a device
975 * @dev: device
976 * @newname: name (or format string) must be at least IFNAMSIZ
977 *
978 * Change name of a device, can pass format strings "eth%d".
979 * for wildcarding.
980 */
cf04a4c7 981int dev_change_name(struct net_device *dev, const char *newname)
1da177e4 982{
fcc5a03a 983 char oldname[IFNAMSIZ];
1da177e4 984 int err = 0;
fcc5a03a 985 int ret;
881d966b 986 struct net *net;
1da177e4
LT
987
988 ASSERT_RTNL();
c346dca1 989 BUG_ON(!dev_net(dev));
1da177e4 990
c346dca1 991 net = dev_net(dev);
1da177e4
LT
992 if (dev->flags & IFF_UP)
993 return -EBUSY;
994
c8d90dca
SH
995 if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
996 return 0;
997
fcc5a03a
HX
998 memcpy(oldname, dev->name, IFNAMSIZ);
999
d9031024
OP
1000 err = dev_get_valid_name(net, newname, dev->name, 1);
1001 if (err < 0)
1002 return err;
1da177e4 1003
fcc5a03a 1004rollback:
3891845e
EB
1005 /* For now only devices in the initial network namespace
1006 * are in sysfs.
1007 */
09ad9bc7 1008 if (net_eq(net, &init_net)) {
3891845e
EB
1009 ret = device_rename(&dev->dev, dev->name);
1010 if (ret) {
1011 memcpy(dev->name, oldname, IFNAMSIZ);
1012 return ret;
1013 }
dcc99773 1014 }
7f988eab
HX
1015
1016 write_lock_bh(&dev_base_lock);
92749821 1017 hlist_del(&dev->name_hlist);
72c9528b
ED
1018 write_unlock_bh(&dev_base_lock);
1019
1020 synchronize_rcu();
1021
1022 write_lock_bh(&dev_base_lock);
1023 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
7f988eab
HX
1024 write_unlock_bh(&dev_base_lock);
1025
056925ab 1026 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
fcc5a03a
HX
1027 ret = notifier_to_errno(ret);
1028
1029 if (ret) {
91e9c07b
ED
1030 /* err >= 0 after dev_alloc_name() or stores the first errno */
1031 if (err >= 0) {
fcc5a03a
HX
1032 err = ret;
1033 memcpy(dev->name, oldname, IFNAMSIZ);
1034 goto rollback;
91e9c07b
ED
1035 } else {
1036 printk(KERN_ERR
1037 "%s: name change rollback failed: %d.\n",
1038 dev->name, ret);
fcc5a03a
HX
1039 }
1040 }
1da177e4
LT
1041
1042 return err;
1043}
1044
0b815a1a
SH
1045/**
1046 * dev_set_alias - change ifalias of a device
1047 * @dev: device
1048 * @alias: name up to IFALIASZ
f0db275a 1049 * @len: limit of bytes to copy from info
0b815a1a
SH
1050 *
1051 * Set ifalias for a device,
1052 */
1053int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1054{
1055 ASSERT_RTNL();
1056
1057 if (len >= IFALIASZ)
1058 return -EINVAL;
1059
96ca4a2c
OH
1060 if (!len) {
1061 if (dev->ifalias) {
1062 kfree(dev->ifalias);
1063 dev->ifalias = NULL;
1064 }
1065 return 0;
1066 }
1067
d1b19dff 1068 dev->ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
0b815a1a
SH
1069 if (!dev->ifalias)
1070 return -ENOMEM;
1071
1072 strlcpy(dev->ifalias, alias, len+1);
1073 return len;
1074}
1075
1076
d8a33ac4 1077/**
3041a069 1078 * netdev_features_change - device changes features
d8a33ac4
SH
1079 * @dev: device to cause notification
1080 *
1081 * Called to indicate a device has changed features.
1082 */
1083void netdev_features_change(struct net_device *dev)
1084{
056925ab 1085 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
d8a33ac4
SH
1086}
1087EXPORT_SYMBOL(netdev_features_change);
1088
1da177e4
LT
1089/**
1090 * netdev_state_change - device changes state
1091 * @dev: device to cause notification
1092 *
1093 * Called to indicate a device has changed state. This function calls
1094 * the notifier chains for netdev_chain and sends a NEWLINK message
1095 * to the routing socket.
1096 */
1097void netdev_state_change(struct net_device *dev)
1098{
1099 if (dev->flags & IFF_UP) {
056925ab 1100 call_netdevice_notifiers(NETDEV_CHANGE, dev);
1da177e4
LT
1101 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1102 }
1103}
d1b19dff 1104EXPORT_SYMBOL(netdev_state_change);
1da177e4 1105
3ca5b404 1106int netdev_bonding_change(struct net_device *dev, unsigned long event)
c1da4ac7 1107{
3ca5b404 1108 return call_netdevice_notifiers(event, dev);
c1da4ac7
OG
1109}
1110EXPORT_SYMBOL(netdev_bonding_change);
1111
1da177e4
LT
1112/**
1113 * dev_load - load a network module
c4ea43c5 1114 * @net: the applicable net namespace
1da177e4
LT
1115 * @name: name of interface
1116 *
1117 * If a network interface is not present and the process has suitable
1118 * privileges this function loads the module. If module loading is not
1119 * available in this kernel then it becomes a nop.
1120 */
1121
881d966b 1122void dev_load(struct net *net, const char *name)
1da177e4 1123{
4ec93edb 1124 struct net_device *dev;
1da177e4 1125
72c9528b
ED
1126 rcu_read_lock();
1127 dev = dev_get_by_name_rcu(net, name);
1128 rcu_read_unlock();
1da177e4 1129
a8f80e8f 1130 if (!dev && capable(CAP_NET_ADMIN))
1da177e4
LT
1131 request_module("%s", name);
1132}
d1b19dff 1133EXPORT_SYMBOL(dev_load);
1da177e4 1134
bd380811 1135static int __dev_open(struct net_device *dev)
1da177e4 1136{
d314774c 1137 const struct net_device_ops *ops = dev->netdev_ops;
3b8bcfd5 1138 int ret;
1da177e4 1139
e46b66bc
BH
1140 ASSERT_RTNL();
1141
1da177e4
LT
1142 /*
1143 * Is it even present?
1144 */
1145 if (!netif_device_present(dev))
1146 return -ENODEV;
1147
3b8bcfd5
JB
1148 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1149 ret = notifier_to_errno(ret);
1150 if (ret)
1151 return ret;
1152
1da177e4
LT
1153 /*
1154 * Call device private open method
1155 */
1156 set_bit(__LINK_STATE_START, &dev->state);
bada339b 1157
d314774c
SH
1158 if (ops->ndo_validate_addr)
1159 ret = ops->ndo_validate_addr(dev);
bada339b 1160
d314774c
SH
1161 if (!ret && ops->ndo_open)
1162 ret = ops->ndo_open(dev);
1da177e4 1163
4ec93edb 1164 /*
1da177e4
LT
1165 * If it went open OK then:
1166 */
1167
bada339b
JG
1168 if (ret)
1169 clear_bit(__LINK_STATE_START, &dev->state);
1170 else {
1da177e4
LT
1171 /*
1172 * Set the flags.
1173 */
1174 dev->flags |= IFF_UP;
1175
649274d9
DW
1176 /*
1177 * Enable NET_DMA
1178 */
b4bd07c2 1179 net_dmaengine_get();
649274d9 1180
1da177e4
LT
1181 /*
1182 * Initialize multicasting status
1183 */
4417da66 1184 dev_set_rx_mode(dev);
1da177e4
LT
1185
1186 /*
1187 * Wakeup transmit queue engine
1188 */
1189 dev_activate(dev);
1da177e4 1190 }
bada339b 1191
1da177e4
LT
1192 return ret;
1193}
1194
1195/**
bd380811
PM
1196 * dev_open - prepare an interface for use.
1197 * @dev: device to open
1da177e4 1198 *
bd380811
PM
1199 * Takes a device from down to up state. The device's private open
1200 * function is invoked and then the multicast lists are loaded. Finally
1201 * the device is moved into the up state and a %NETDEV_UP message is
1202 * sent to the netdev notifier chain.
1203 *
1204 * Calling this function on an active interface is a nop. On a failure
1205 * a negative errno code is returned.
1da177e4 1206 */
bd380811
PM
1207int dev_open(struct net_device *dev)
1208{
1209 int ret;
1210
1211 /*
1212 * Is it already up?
1213 */
1214 if (dev->flags & IFF_UP)
1215 return 0;
1216
1217 /*
1218 * Open device
1219 */
1220 ret = __dev_open(dev);
1221 if (ret < 0)
1222 return ret;
1223
1224 /*
1225 * ... and announce new interface.
1226 */
1227 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1228 call_netdevice_notifiers(NETDEV_UP, dev);
1229
1230 return ret;
1231}
1232EXPORT_SYMBOL(dev_open);
1233
1234static int __dev_close(struct net_device *dev)
1da177e4 1235{
d314774c 1236 const struct net_device_ops *ops = dev->netdev_ops;
e46b66bc 1237
bd380811 1238 ASSERT_RTNL();
9d5010db
DM
1239 might_sleep();
1240
1da177e4
LT
1241 /*
1242 * Tell people we are going down, so that they can
1243 * prepare to death, when device is still operating.
1244 */
056925ab 1245 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1da177e4 1246
1da177e4
LT
1247 clear_bit(__LINK_STATE_START, &dev->state);
1248
1249 /* Synchronize to scheduled poll. We cannot touch poll list,
bea3348e
SH
1250 * it can be even on different cpu. So just clear netif_running().
1251 *
1252 * dev->stop() will invoke napi_disable() on all of it's
1253 * napi_struct instances on this device.
1254 */
1da177e4 1255 smp_mb__after_clear_bit(); /* Commit netif_running(). */
1da177e4 1256
d8b2a4d2
ML
1257 dev_deactivate(dev);
1258
1da177e4
LT
1259 /*
1260 * Call the device specific close. This cannot fail.
1261 * Only if device is UP
1262 *
1263 * We allow it to be called even after a DETACH hot-plug
1264 * event.
1265 */
d314774c
SH
1266 if (ops->ndo_stop)
1267 ops->ndo_stop(dev);
1da177e4
LT
1268
1269 /*
1270 * Device is now down.
1271 */
1272
1273 dev->flags &= ~IFF_UP;
1274
1275 /*
bd380811 1276 * Shutdown NET_DMA
1da177e4 1277 */
bd380811
PM
1278 net_dmaengine_put();
1279
1280 return 0;
1281}
1282
1283/**
1284 * dev_close - shutdown an interface.
1285 * @dev: device to shutdown
1286 *
1287 * This function moves an active device into down state. A
1288 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1289 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1290 * chain.
1291 */
1292int dev_close(struct net_device *dev)
1293{
1294 if (!(dev->flags & IFF_UP))
1295 return 0;
1296
1297 __dev_close(dev);
1da177e4 1298
649274d9 1299 /*
bd380811 1300 * Tell people we are down
649274d9 1301 */
bd380811
PM
1302 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1303 call_netdevice_notifiers(NETDEV_DOWN, dev);
649274d9 1304
1da177e4
LT
1305 return 0;
1306}
d1b19dff 1307EXPORT_SYMBOL(dev_close);
1da177e4
LT
1308
1309
0187bdfb
BH
1310/**
1311 * dev_disable_lro - disable Large Receive Offload on a device
1312 * @dev: device
1313 *
1314 * Disable Large Receive Offload (LRO) on a net device. Must be
1315 * called under RTNL. This is needed if received packets may be
1316 * forwarded to another interface.
1317 */
1318void dev_disable_lro(struct net_device *dev)
1319{
1320 if (dev->ethtool_ops && dev->ethtool_ops->get_flags &&
1321 dev->ethtool_ops->set_flags) {
1322 u32 flags = dev->ethtool_ops->get_flags(dev);
1323 if (flags & ETH_FLAG_LRO) {
1324 flags &= ~ETH_FLAG_LRO;
1325 dev->ethtool_ops->set_flags(dev, flags);
1326 }
1327 }
1328 WARN_ON(dev->features & NETIF_F_LRO);
1329}
1330EXPORT_SYMBOL(dev_disable_lro);
1331
1332
881d966b
EB
1333static int dev_boot_phase = 1;
1334
1da177e4
LT
1335/*
1336 * Device change register/unregister. These are not inline or static
1337 * as we export them to the world.
1338 */
1339
1340/**
1341 * register_netdevice_notifier - register a network notifier block
1342 * @nb: notifier
1343 *
1344 * Register a notifier to be called when network device events occur.
1345 * The notifier passed is linked into the kernel structures and must
1346 * not be reused until it has been unregistered. A negative errno code
1347 * is returned on a failure.
1348 *
1349 * When registered all registration and up events are replayed
4ec93edb 1350 * to the new notifier to allow device to have a race free
1da177e4
LT
1351 * view of the network device list.
1352 */
1353
1354int register_netdevice_notifier(struct notifier_block *nb)
1355{
1356 struct net_device *dev;
fcc5a03a 1357 struct net_device *last;
881d966b 1358 struct net *net;
1da177e4
LT
1359 int err;
1360
1361 rtnl_lock();
f07d5b94 1362 err = raw_notifier_chain_register(&netdev_chain, nb);
fcc5a03a
HX
1363 if (err)
1364 goto unlock;
881d966b
EB
1365 if (dev_boot_phase)
1366 goto unlock;
1367 for_each_net(net) {
1368 for_each_netdev(net, dev) {
1369 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1370 err = notifier_to_errno(err);
1371 if (err)
1372 goto rollback;
1373
1374 if (!(dev->flags & IFF_UP))
1375 continue;
1da177e4 1376
881d966b
EB
1377 nb->notifier_call(nb, NETDEV_UP, dev);
1378 }
1da177e4 1379 }
fcc5a03a
HX
1380
1381unlock:
1da177e4
LT
1382 rtnl_unlock();
1383 return err;
fcc5a03a
HX
1384
1385rollback:
1386 last = dev;
881d966b
EB
1387 for_each_net(net) {
1388 for_each_netdev(net, dev) {
1389 if (dev == last)
1390 break;
fcc5a03a 1391
881d966b
EB
1392 if (dev->flags & IFF_UP) {
1393 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1394 nb->notifier_call(nb, NETDEV_DOWN, dev);
1395 }
1396 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
a5ee1551 1397 nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev);
fcc5a03a 1398 }
fcc5a03a 1399 }
c67625a1
PE
1400
1401 raw_notifier_chain_unregister(&netdev_chain, nb);
fcc5a03a 1402 goto unlock;
1da177e4 1403}
d1b19dff 1404EXPORT_SYMBOL(register_netdevice_notifier);
1da177e4
LT
1405
1406/**
1407 * unregister_netdevice_notifier - unregister a network notifier block
1408 * @nb: notifier
1409 *
1410 * Unregister a notifier previously registered by
1411 * register_netdevice_notifier(). The notifier is unlinked into the
1412 * kernel structures and may then be reused. A negative errno code
1413 * is returned on a failure.
1414 */
1415
1416int unregister_netdevice_notifier(struct notifier_block *nb)
1417{
9f514950
HX
1418 int err;
1419
1420 rtnl_lock();
f07d5b94 1421 err = raw_notifier_chain_unregister(&netdev_chain, nb);
9f514950
HX
1422 rtnl_unlock();
1423 return err;
1da177e4 1424}
d1b19dff 1425EXPORT_SYMBOL(unregister_netdevice_notifier);
1da177e4
LT
1426
1427/**
1428 * call_netdevice_notifiers - call all network notifier blocks
1429 * @val: value passed unmodified to notifier function
c4ea43c5 1430 * @dev: net_device pointer passed unmodified to notifier function
1da177e4
LT
1431 *
1432 * Call all network notifier blocks. Parameters and return value
f07d5b94 1433 * are as for raw_notifier_call_chain().
1da177e4
LT
1434 */
1435
ad7379d4 1436int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1da177e4 1437{
ab930471 1438 ASSERT_RTNL();
ad7379d4 1439 return raw_notifier_call_chain(&netdev_chain, val, dev);
1da177e4
LT
1440}
1441
1442/* When > 0 there are consumers of rx skb time stamps */
1443static atomic_t netstamp_needed = ATOMIC_INIT(0);
1444
1445void net_enable_timestamp(void)
1446{
1447 atomic_inc(&netstamp_needed);
1448}
d1b19dff 1449EXPORT_SYMBOL(net_enable_timestamp);
1da177e4
LT
1450
1451void net_disable_timestamp(void)
1452{
1453 atomic_dec(&netstamp_needed);
1454}
d1b19dff 1455EXPORT_SYMBOL(net_disable_timestamp);
1da177e4 1456
3b098e2d 1457static inline void net_timestamp_set(struct sk_buff *skb)
1da177e4
LT
1458{
1459 if (atomic_read(&netstamp_needed))
a61bbcf2 1460 __net_timestamp(skb);
b7aa0bf7
ED
1461 else
1462 skb->tstamp.tv64 = 0;
1da177e4
LT
1463}
1464
3b098e2d
ED
1465static inline void net_timestamp_check(struct sk_buff *skb)
1466{
1467 if (!skb->tstamp.tv64 && atomic_read(&netstamp_needed))
1468 __net_timestamp(skb);
1469}
1470
44540960
AB
1471/**
1472 * dev_forward_skb - loopback an skb to another netif
1473 *
1474 * @dev: destination network device
1475 * @skb: buffer to forward
1476 *
1477 * return values:
1478 * NET_RX_SUCCESS (no congestion)
6ec82562 1479 * NET_RX_DROP (packet was dropped, but freed)
44540960
AB
1480 *
1481 * dev_forward_skb can be used for injecting an skb from the
1482 * start_xmit function of one device into the receive queue
1483 * of another device.
1484 *
1485 * The receiving device may be in another namespace, so
1486 * we have to clear all information in the skb that could
1487 * impact namespace isolation.
1488 */
1489int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1490{
1491 skb_orphan(skb);
1492
6ec82562
ED
1493 if (!(dev->flags & IFF_UP) ||
1494 (skb->len > (dev->mtu + dev->hard_header_len))) {
1495 kfree_skb(skb);
44540960 1496 return NET_RX_DROP;
6ec82562 1497 }
8a83a00b 1498 skb_set_dev(skb, dev);
44540960
AB
1499 skb->tstamp.tv64 = 0;
1500 skb->pkt_type = PACKET_HOST;
1501 skb->protocol = eth_type_trans(skb, dev);
44540960
AB
1502 return netif_rx(skb);
1503}
1504EXPORT_SYMBOL_GPL(dev_forward_skb);
1505
1da177e4
LT
1506/*
1507 * Support routine. Sends outgoing frames to any network
1508 * taps currently in use.
1509 */
1510
f6a78bfc 1511static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1da177e4
LT
1512{
1513 struct packet_type *ptype;
a61bbcf2 1514
8caf1539
JP
1515#ifdef CONFIG_NET_CLS_ACT
1516 if (!(skb->tstamp.tv64 && (G_TC_FROM(skb->tc_verd) & AT_INGRESS)))
3b098e2d 1517 net_timestamp_set(skb);
8caf1539 1518#else
3b098e2d 1519 net_timestamp_set(skb);
8caf1539 1520#endif
1da177e4
LT
1521
1522 rcu_read_lock();
1523 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1524 /* Never send packets back to the socket
1525 * they originated from - MvS (miquels@drinkel.ow.org)
1526 */
1527 if ((ptype->dev == dev || !ptype->dev) &&
1528 (ptype->af_packet_priv == NULL ||
1529 (struct sock *)ptype->af_packet_priv != skb->sk)) {
d1b19dff 1530 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1da177e4
LT
1531 if (!skb2)
1532 break;
1533
1534 /* skb->nh should be correctly
1535 set by sender, so that the second statement is
1536 just protection against buggy protocols.
1537 */
459a98ed 1538 skb_reset_mac_header(skb2);
1da177e4 1539
d56f90a7 1540 if (skb_network_header(skb2) < skb2->data ||
27a884dc 1541 skb2->network_header > skb2->tail) {
1da177e4
LT
1542 if (net_ratelimit())
1543 printk(KERN_CRIT "protocol %04x is "
1544 "buggy, dev %s\n",
1545 skb2->protocol, dev->name);
c1d2bbe1 1546 skb_reset_network_header(skb2);
1da177e4
LT
1547 }
1548
b0e380b1 1549 skb2->transport_header = skb2->network_header;
1da177e4 1550 skb2->pkt_type = PACKET_OUTGOING;
f2ccd8fa 1551 ptype->func(skb2, skb->dev, ptype, skb->dev);
1da177e4
LT
1552 }
1553 }
1554 rcu_read_unlock();
1555}
1556
56079431 1557
def82a1d 1558static inline void __netif_reschedule(struct Qdisc *q)
56079431 1559{
def82a1d
JP
1560 struct softnet_data *sd;
1561 unsigned long flags;
56079431 1562
def82a1d
JP
1563 local_irq_save(flags);
1564 sd = &__get_cpu_var(softnet_data);
a9cbd588
CG
1565 q->next_sched = NULL;
1566 *sd->output_queue_tailp = q;
1567 sd->output_queue_tailp = &q->next_sched;
def82a1d
JP
1568 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1569 local_irq_restore(flags);
1570}
1571
1572void __netif_schedule(struct Qdisc *q)
1573{
1574 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
1575 __netif_reschedule(q);
56079431
DV
1576}
1577EXPORT_SYMBOL(__netif_schedule);
1578
bea3348e 1579void dev_kfree_skb_irq(struct sk_buff *skb)
56079431 1580{
bea3348e
SH
1581 if (atomic_dec_and_test(&skb->users)) {
1582 struct softnet_data *sd;
1583 unsigned long flags;
56079431 1584
bea3348e
SH
1585 local_irq_save(flags);
1586 sd = &__get_cpu_var(softnet_data);
1587 skb->next = sd->completion_queue;
1588 sd->completion_queue = skb;
1589 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1590 local_irq_restore(flags);
1591 }
56079431 1592}
bea3348e 1593EXPORT_SYMBOL(dev_kfree_skb_irq);
56079431
DV
1594
1595void dev_kfree_skb_any(struct sk_buff *skb)
1596{
1597 if (in_irq() || irqs_disabled())
1598 dev_kfree_skb_irq(skb);
1599 else
1600 dev_kfree_skb(skb);
1601}
1602EXPORT_SYMBOL(dev_kfree_skb_any);
1603
1604
bea3348e
SH
1605/**
1606 * netif_device_detach - mark device as removed
1607 * @dev: network device
1608 *
1609 * Mark device as removed from system and therefore no longer available.
1610 */
56079431
DV
1611void netif_device_detach(struct net_device *dev)
1612{
1613 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1614 netif_running(dev)) {
d543103a 1615 netif_tx_stop_all_queues(dev);
56079431
DV
1616 }
1617}
1618EXPORT_SYMBOL(netif_device_detach);
1619
bea3348e
SH
1620/**
1621 * netif_device_attach - mark device as attached
1622 * @dev: network device
1623 *
1624 * Mark device as attached from system and restart if needed.
1625 */
56079431
DV
1626void netif_device_attach(struct net_device *dev)
1627{
1628 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1629 netif_running(dev)) {
d543103a 1630 netif_tx_wake_all_queues(dev);
4ec93edb 1631 __netdev_watchdog_up(dev);
56079431
DV
1632 }
1633}
1634EXPORT_SYMBOL(netif_device_attach);
1635
6de329e2
BH
1636static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1637{
1638 return ((features & NETIF_F_GEN_CSUM) ||
1639 ((features & NETIF_F_IP_CSUM) &&
1640 protocol == htons(ETH_P_IP)) ||
1641 ((features & NETIF_F_IPV6_CSUM) &&
1c8dbcf6
YZ
1642 protocol == htons(ETH_P_IPV6)) ||
1643 ((features & NETIF_F_FCOE_CRC) &&
1644 protocol == htons(ETH_P_FCOE)));
6de329e2
BH
1645}
1646
1647static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
1648{
1649 if (can_checksum_protocol(dev->features, skb->protocol))
1650 return true;
1651
1652 if (skb->protocol == htons(ETH_P_8021Q)) {
1653 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1654 if (can_checksum_protocol(dev->features & dev->vlan_features,
1655 veh->h_vlan_encapsulated_proto))
1656 return true;
1657 }
1658
1659 return false;
1660}
56079431 1661
8a83a00b
AB
1662/**
1663 * skb_dev_set -- assign a new device to a buffer
1664 * @skb: buffer for the new device
1665 * @dev: network device
1666 *
1667 * If an skb is owned by a device already, we have to reset
1668 * all data private to the namespace a device belongs to
1669 * before assigning it a new device.
1670 */
1671#ifdef CONFIG_NET_NS
1672void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
1673{
1674 skb_dst_drop(skb);
1675 if (skb->dev && !net_eq(dev_net(skb->dev), dev_net(dev))) {
1676 secpath_reset(skb);
1677 nf_reset(skb);
1678 skb_init_secmark(skb);
1679 skb->mark = 0;
1680 skb->priority = 0;
1681 skb->nf_trace = 0;
1682 skb->ipvs_property = 0;
1683#ifdef CONFIG_NET_SCHED
1684 skb->tc_index = 0;
1685#endif
1686 }
1687 skb->dev = dev;
1688}
1689EXPORT_SYMBOL(skb_set_dev);
1690#endif /* CONFIG_NET_NS */
1691
1da177e4
LT
1692/*
1693 * Invalidate hardware checksum when packet is to be mangled, and
1694 * complete checksum manually on outgoing path.
1695 */
84fa7933 1696int skb_checksum_help(struct sk_buff *skb)
1da177e4 1697{
d3bc23e7 1698 __wsum csum;
663ead3b 1699 int ret = 0, offset;
1da177e4 1700
84fa7933 1701 if (skb->ip_summed == CHECKSUM_COMPLETE)
a430a43d
HX
1702 goto out_set_summed;
1703
1704 if (unlikely(skb_shinfo(skb)->gso_size)) {
a430a43d
HX
1705 /* Let GSO fix up the checksum. */
1706 goto out_set_summed;
1da177e4
LT
1707 }
1708
a030847e
HX
1709 offset = skb->csum_start - skb_headroom(skb);
1710 BUG_ON(offset >= skb_headlen(skb));
1711 csum = skb_checksum(skb, offset, skb->len - offset, 0);
1712
1713 offset += skb->csum_offset;
1714 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
1715
1716 if (skb_cloned(skb) &&
1717 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
1da177e4
LT
1718 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1719 if (ret)
1720 goto out;
1721 }
1722
a030847e 1723 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
a430a43d 1724out_set_summed:
1da177e4 1725 skb->ip_summed = CHECKSUM_NONE;
4ec93edb 1726out:
1da177e4
LT
1727 return ret;
1728}
d1b19dff 1729EXPORT_SYMBOL(skb_checksum_help);
1da177e4 1730
f6a78bfc
HX
1731/**
1732 * skb_gso_segment - Perform segmentation on skb.
1733 * @skb: buffer to segment
576a30eb 1734 * @features: features for the output path (see dev->features)
f6a78bfc
HX
1735 *
1736 * This function segments the given skb and returns a list of segments.
576a30eb
HX
1737 *
1738 * It may return NULL if the skb requires no segmentation. This is
1739 * only possible when GSO is used for verifying header integrity.
f6a78bfc 1740 */
576a30eb 1741struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
f6a78bfc
HX
1742{
1743 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1744 struct packet_type *ptype;
252e3346 1745 __be16 type = skb->protocol;
a430a43d 1746 int err;
f6a78bfc 1747
459a98ed 1748 skb_reset_mac_header(skb);
b0e380b1 1749 skb->mac_len = skb->network_header - skb->mac_header;
f6a78bfc
HX
1750 __skb_pull(skb, skb->mac_len);
1751
67fd1a73
HX
1752 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1753 struct net_device *dev = skb->dev;
1754 struct ethtool_drvinfo info = {};
1755
1756 if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
1757 dev->ethtool_ops->get_drvinfo(dev, &info);
1758
1759 WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d "
1760 "ip_summed=%d",
1761 info.driver, dev ? dev->features : 0L,
1762 skb->sk ? skb->sk->sk_route_caps : 0L,
1763 skb->len, skb->data_len, skb->ip_summed);
1764
a430a43d
HX
1765 if (skb_header_cloned(skb) &&
1766 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1767 return ERR_PTR(err);
1768 }
1769
f6a78bfc 1770 rcu_read_lock();
82d8a867
PE
1771 list_for_each_entry_rcu(ptype,
1772 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
f6a78bfc 1773 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
84fa7933 1774 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
a430a43d
HX
1775 err = ptype->gso_send_check(skb);
1776 segs = ERR_PTR(err);
1777 if (err || skb_gso_ok(skb, features))
1778 break;
d56f90a7
ACM
1779 __skb_push(skb, (skb->data -
1780 skb_network_header(skb)));
a430a43d 1781 }
576a30eb 1782 segs = ptype->gso_segment(skb, features);
f6a78bfc
HX
1783 break;
1784 }
1785 }
1786 rcu_read_unlock();
1787
98e399f8 1788 __skb_push(skb, skb->data - skb_mac_header(skb));
576a30eb 1789
f6a78bfc
HX
1790 return segs;
1791}
f6a78bfc
HX
1792EXPORT_SYMBOL(skb_gso_segment);
1793
fb286bb2
HX
1794/* Take action when hardware reception checksum errors are detected. */
1795#ifdef CONFIG_BUG
1796void netdev_rx_csum_fault(struct net_device *dev)
1797{
1798 if (net_ratelimit()) {
4ec93edb 1799 printk(KERN_ERR "%s: hw csum failure.\n",
246a4212 1800 dev ? dev->name : "<unknown>");
fb286bb2
HX
1801 dump_stack();
1802 }
1803}
1804EXPORT_SYMBOL(netdev_rx_csum_fault);
1805#endif
1806
1da177e4
LT
1807/* Actually, we should eliminate this check as soon as we know, that:
1808 * 1. IOMMU is present and allows to map all the memory.
1809 * 2. No high memory really exists on this machine.
1810 */
1811
9092c658 1812static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1da177e4 1813{
3d3a8533 1814#ifdef CONFIG_HIGHMEM
1da177e4 1815 int i;
5acbbd42
FT
1816 if (!(dev->features & NETIF_F_HIGHDMA)) {
1817 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1818 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1819 return 1;
1820 }
1da177e4 1821
5acbbd42
FT
1822 if (PCI_DMA_BUS_IS_PHYS) {
1823 struct device *pdev = dev->dev.parent;
1da177e4 1824
9092c658
ED
1825 if (!pdev)
1826 return 0;
5acbbd42
FT
1827 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1828 dma_addr_t addr = page_to_phys(skb_shinfo(skb)->frags[i].page);
1829 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
1830 return 1;
1831 }
1832 }
3d3a8533 1833#endif
1da177e4
LT
1834 return 0;
1835}
1da177e4 1836
f6a78bfc
HX
1837struct dev_gso_cb {
1838 void (*destructor)(struct sk_buff *skb);
1839};
1840
1841#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
1842
1843static void dev_gso_skb_destructor(struct sk_buff *skb)
1844{
1845 struct dev_gso_cb *cb;
1846
1847 do {
1848 struct sk_buff *nskb = skb->next;
1849
1850 skb->next = nskb->next;
1851 nskb->next = NULL;
1852 kfree_skb(nskb);
1853 } while (skb->next);
1854
1855 cb = DEV_GSO_CB(skb);
1856 if (cb->destructor)
1857 cb->destructor(skb);
1858}
1859
1860/**
1861 * dev_gso_segment - Perform emulated hardware segmentation on skb.
1862 * @skb: buffer to segment
1863 *
1864 * This function segments the given skb and stores the list of segments
1865 * in skb->next.
1866 */
1867static int dev_gso_segment(struct sk_buff *skb)
1868{
1869 struct net_device *dev = skb->dev;
1870 struct sk_buff *segs;
576a30eb
HX
1871 int features = dev->features & ~(illegal_highdma(dev, skb) ?
1872 NETIF_F_SG : 0);
1873
1874 segs = skb_gso_segment(skb, features);
1875
1876 /* Verifying header integrity only. */
1877 if (!segs)
1878 return 0;
f6a78bfc 1879
801678c5 1880 if (IS_ERR(segs))
f6a78bfc
HX
1881 return PTR_ERR(segs);
1882
1883 skb->next = segs;
1884 DEV_GSO_CB(skb)->destructor = skb->destructor;
1885 skb->destructor = dev_gso_skb_destructor;
1886
1887 return 0;
1888}
1889
fc6055a5
ED
1890/*
1891 * Try to orphan skb early, right before transmission by the device.
1892 * We cannot orphan skb if tx timestamp is requested, since
1893 * drivers need to call skb_tstamp_tx() to send the timestamp.
1894 */
1895static inline void skb_orphan_try(struct sk_buff *skb)
1896{
1897 if (!skb_tx(skb)->flags)
1898 skb_orphan(skb);
1899}
1900
fd2ea0a7
DM
1901int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1902 struct netdev_queue *txq)
f6a78bfc 1903{
00829823 1904 const struct net_device_ops *ops = dev->netdev_ops;
572a9d7b 1905 int rc = NETDEV_TX_OK;
00829823 1906
f6a78bfc 1907 if (likely(!skb->next)) {
9be9a6b9 1908 if (!list_empty(&ptype_all))
f6a78bfc
HX
1909 dev_queue_xmit_nit(skb, dev);
1910
93f154b5
ED
1911 /*
1912 * If device doesnt need skb->dst, release it right now while
1913 * its hot in this cpu cache
1914 */
adf30907
ED
1915 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
1916 skb_dst_drop(skb);
1917
fc6055a5 1918 skb_orphan_try(skb);
9ccb8975
DM
1919
1920 if (netif_needs_gso(dev, skb)) {
1921 if (unlikely(dev_gso_segment(skb)))
1922 goto out_kfree_skb;
1923 if (skb->next)
1924 goto gso;
1925 }
1926
ac45f602 1927 rc = ops->ndo_start_xmit(skb, dev);
ec634fe3 1928 if (rc == NETDEV_TX_OK)
08baf561 1929 txq_trans_update(txq);
ac45f602 1930 return rc;
f6a78bfc
HX
1931 }
1932
576a30eb 1933gso:
f6a78bfc
HX
1934 do {
1935 struct sk_buff *nskb = skb->next;
f6a78bfc
HX
1936
1937 skb->next = nskb->next;
1938 nskb->next = NULL;
068a2de5
KK
1939
1940 /*
1941 * If device doesnt need nskb->dst, release it right now while
1942 * its hot in this cpu cache
1943 */
1944 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
1945 skb_dst_drop(nskb);
1946
00829823 1947 rc = ops->ndo_start_xmit(nskb, dev);
ec634fe3 1948 if (unlikely(rc != NETDEV_TX_OK)) {
572a9d7b
PM
1949 if (rc & ~NETDEV_TX_MASK)
1950 goto out_kfree_gso_skb;
f54d9e8d 1951 nskb->next = skb->next;
f6a78bfc
HX
1952 skb->next = nskb;
1953 return rc;
1954 }
08baf561 1955 txq_trans_update(txq);
fd2ea0a7 1956 if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
f54d9e8d 1957 return NETDEV_TX_BUSY;
f6a78bfc 1958 } while (skb->next);
4ec93edb 1959
572a9d7b
PM
1960out_kfree_gso_skb:
1961 if (likely(skb->next == NULL))
1962 skb->destructor = DEV_GSO_CB(skb)->destructor;
f6a78bfc
HX
1963out_kfree_skb:
1964 kfree_skb(skb);
572a9d7b 1965 return rc;
f6a78bfc
HX
1966}
1967
0a9627f2 1968static u32 hashrnd __read_mostly;
b6b2fed1 1969
9247744e 1970u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
8f0f2223 1971{
7019298a 1972 u32 hash;
b6b2fed1 1973
513de11b
DM
1974 if (skb_rx_queue_recorded(skb)) {
1975 hash = skb_get_rx_queue(skb);
d1b19dff 1976 while (unlikely(hash >= dev->real_num_tx_queues))
513de11b
DM
1977 hash -= dev->real_num_tx_queues;
1978 return hash;
1979 }
ec581f6a
ED
1980
1981 if (skb->sk && skb->sk->sk_hash)
7019298a 1982 hash = skb->sk->sk_hash;
ec581f6a 1983 else
b249dcb8 1984 hash = (__force u16) skb->protocol;
d5a9e24a 1985
0a9627f2 1986 hash = jhash_1word(hash, hashrnd);
b6b2fed1
DM
1987
1988 return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
8f0f2223 1989}
9247744e 1990EXPORT_SYMBOL(skb_tx_hash);
8f0f2223 1991
ed04642f
ED
1992static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
1993{
1994 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
1995 if (net_ratelimit()) {
7a161ea9
ED
1996 pr_warning("%s selects TX queue %d, but "
1997 "real number of TX queues is %d\n",
1998 dev->name, queue_index, dev->real_num_tx_queues);
ed04642f
ED
1999 }
2000 return 0;
2001 }
2002 return queue_index;
2003}
2004
e8a0464c
DM
2005static struct netdev_queue *dev_pick_tx(struct net_device *dev,
2006 struct sk_buff *skb)
2007{
a4ee3ce3
KK
2008 u16 queue_index;
2009 struct sock *sk = skb->sk;
2010
2011 if (sk_tx_queue_recorded(sk)) {
2012 queue_index = sk_tx_queue_get(sk);
2013 } else {
2014 const struct net_device_ops *ops = dev->netdev_ops;
2015
2016 if (ops->ndo_select_queue) {
2017 queue_index = ops->ndo_select_queue(dev, skb);
ed04642f 2018 queue_index = dev_cap_txqueue(dev, queue_index);
a4ee3ce3
KK
2019 } else {
2020 queue_index = 0;
2021 if (dev->real_num_tx_queues > 1)
2022 queue_index = skb_tx_hash(dev, skb);
fd2ea0a7 2023
8728c544 2024 if (sk) {
87eb3670 2025 struct dst_entry *dst = rcu_dereference_check(sk->sk_dst_cache, 1);
8728c544
ED
2026
2027 if (dst && skb_dst(skb) == dst)
2028 sk_tx_queue_set(sk, queue_index);
2029 }
a4ee3ce3
KK
2030 }
2031 }
eae792b7 2032
fd2ea0a7
DM
2033 skb_set_queue_mapping(skb, queue_index);
2034 return netdev_get_tx_queue(dev, queue_index);
e8a0464c
DM
2035}
2036
bbd8a0d3
KK
2037static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2038 struct net_device *dev,
2039 struct netdev_queue *txq)
2040{
2041 spinlock_t *root_lock = qdisc_lock(q);
2042 int rc;
2043
2044 spin_lock(root_lock);
2045 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
2046 kfree_skb(skb);
2047 rc = NET_XMIT_DROP;
2048 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
2049 !test_and_set_bit(__QDISC_STATE_RUNNING, &q->state)) {
2050 /*
2051 * This is a work-conserving queue; there are no old skbs
2052 * waiting to be sent out; and the qdisc is not running -
2053 * xmit the skb directly.
2054 */
2055 __qdisc_update_bstats(q, skb->len);
2056 if (sch_direct_xmit(skb, q, dev, txq, root_lock))
2057 __qdisc_run(q);
2058 else
2059 clear_bit(__QDISC_STATE_RUNNING, &q->state);
2060
2061 rc = NET_XMIT_SUCCESS;
2062 } else {
2063 rc = qdisc_enqueue_root(skb, q);
2064 qdisc_run(q);
2065 }
2066 spin_unlock(root_lock);
2067
2068 return rc;
2069}
2070
4b258461
KK
2071/*
2072 * Returns true if either:
2073 * 1. skb has frag_list and the device doesn't support FRAGLIST, or
2074 * 2. skb is fragmented and the device does not support SG, or if
2075 * at least one of fragments is in highmem and device does not
2076 * support DMA from it.
2077 */
2078static inline int skb_needs_linearize(struct sk_buff *skb,
2079 struct net_device *dev)
2080{
2081 return (skb_has_frags(skb) && !(dev->features & NETIF_F_FRAGLIST)) ||
2082 (skb_shinfo(skb)->nr_frags && (!(dev->features & NETIF_F_SG) ||
2083 illegal_highdma(dev, skb)));
2084}
2085
d29f749e
DJ
2086/**
2087 * dev_queue_xmit - transmit a buffer
2088 * @skb: buffer to transmit
2089 *
2090 * Queue a buffer for transmission to a network device. The caller must
2091 * have set the device and priority and built the buffer before calling
2092 * this function. The function can be called from an interrupt.
2093 *
2094 * A negative errno code is returned on a failure. A success does not
2095 * guarantee the frame will be transmitted as it may be dropped due
2096 * to congestion or traffic shaping.
2097 *
2098 * -----------------------------------------------------------------------------------
2099 * I notice this method can also return errors from the queue disciplines,
2100 * including NET_XMIT_DROP, which is a positive value. So, errors can also
2101 * be positive.
2102 *
2103 * Regardless of the return value, the skb is consumed, so it is currently
2104 * difficult to retry a send to this method. (You can bump the ref count
2105 * before sending to hold a reference for retry if you are careful.)
2106 *
2107 * When calling this method, interrupts MUST be enabled. This is because
2108 * the BH enable code must have IRQs enabled so that it will not deadlock.
2109 * --BLG
2110 */
1da177e4
LT
2111int dev_queue_xmit(struct sk_buff *skb)
2112{
2113 struct net_device *dev = skb->dev;
dc2b4847 2114 struct netdev_queue *txq;
1da177e4
LT
2115 struct Qdisc *q;
2116 int rc = -ENOMEM;
2117
f6a78bfc
HX
2118 /* GSO will handle the following emulations directly. */
2119 if (netif_needs_gso(dev, skb))
2120 goto gso;
2121
4b258461
KK
2122 /* Convert a paged skb to linear, if required */
2123 if (skb_needs_linearize(skb, dev) && __skb_linearize(skb))
1da177e4
LT
2124 goto out_kfree_skb;
2125
2126 /* If packet is not checksummed and device does not support
2127 * checksumming for this protocol, complete checksumming here.
2128 */
663ead3b
HX
2129 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2130 skb_set_transport_header(skb, skb->csum_start -
2131 skb_headroom(skb));
6de329e2
BH
2132 if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb))
2133 goto out_kfree_skb;
663ead3b 2134 }
1da177e4 2135
f6a78bfc 2136gso:
4ec93edb
YH
2137 /* Disable soft irqs for various locks below. Also
2138 * stops preemption for RCU.
1da177e4 2139 */
4ec93edb 2140 rcu_read_lock_bh();
1da177e4 2141
eae792b7 2142 txq = dev_pick_tx(dev, skb);
a898def2 2143 q = rcu_dereference_bh(txq->qdisc);
37437bb2 2144
1da177e4 2145#ifdef CONFIG_NET_CLS_ACT
d1b19dff 2146 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
1da177e4
LT
2147#endif
2148 if (q->enqueue) {
bbd8a0d3 2149 rc = __dev_xmit_skb(skb, q, dev, txq);
37437bb2 2150 goto out;
1da177e4
LT
2151 }
2152
2153 /* The device has no queue. Common case for software devices:
2154 loopback, all the sorts of tunnels...
2155
932ff279
HX
2156 Really, it is unlikely that netif_tx_lock protection is necessary
2157 here. (f.e. loopback and IP tunnels are clean ignoring statistics
1da177e4
LT
2158 counters.)
2159 However, it is possible, that they rely on protection
2160 made by us here.
2161
2162 Check this and shot the lock. It is not prone from deadlocks.
2163 Either shot noqueue qdisc, it is even simpler 8)
2164 */
2165 if (dev->flags & IFF_UP) {
2166 int cpu = smp_processor_id(); /* ok because BHs are off */
2167
c773e847 2168 if (txq->xmit_lock_owner != cpu) {
1da177e4 2169
c773e847 2170 HARD_TX_LOCK(dev, txq, cpu);
1da177e4 2171
fd2ea0a7 2172 if (!netif_tx_queue_stopped(txq)) {
572a9d7b
PM
2173 rc = dev_hard_start_xmit(skb, dev, txq);
2174 if (dev_xmit_complete(rc)) {
c773e847 2175 HARD_TX_UNLOCK(dev, txq);
1da177e4
LT
2176 goto out;
2177 }
2178 }
c773e847 2179 HARD_TX_UNLOCK(dev, txq);
1da177e4
LT
2180 if (net_ratelimit())
2181 printk(KERN_CRIT "Virtual device %s asks to "
2182 "queue packet!\n", dev->name);
2183 } else {
2184 /* Recursion is detected! It is possible,
2185 * unfortunately */
2186 if (net_ratelimit())
2187 printk(KERN_CRIT "Dead loop on virtual device "
2188 "%s, fix it urgently!\n", dev->name);
2189 }
2190 }
2191
2192 rc = -ENETDOWN;
d4828d85 2193 rcu_read_unlock_bh();
1da177e4
LT
2194
2195out_kfree_skb:
2196 kfree_skb(skb);
2197 return rc;
2198out:
d4828d85 2199 rcu_read_unlock_bh();
1da177e4
LT
2200 return rc;
2201}
d1b19dff 2202EXPORT_SYMBOL(dev_queue_xmit);
1da177e4
LT
2203
2204
2205/*=======================================================================
2206 Receiver routines
2207 =======================================================================*/
2208
6b2bedc3 2209int netdev_max_backlog __read_mostly = 1000;
3b098e2d 2210int netdev_tstamp_prequeue __read_mostly = 1;
6b2bedc3
SH
2211int netdev_budget __read_mostly = 300;
2212int weight_p __read_mostly = 64; /* old backlog weight */
1da177e4 2213
eecfd7c4
ED
2214/* Called with irq disabled */
2215static inline void ____napi_schedule(struct softnet_data *sd,
2216 struct napi_struct *napi)
2217{
2218 list_add_tail(&napi->poll_list, &sd->poll_list);
2219 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2220}
2221
df334545 2222#ifdef CONFIG_RPS
fec5e652
TH
2223
2224/* One global table that all flow-based protocols share. */
8770acf0 2225struct rps_sock_flow_table *rps_sock_flow_table __read_mostly;
fec5e652
TH
2226EXPORT_SYMBOL(rps_sock_flow_table);
2227
0a9627f2
TH
2228/*
2229 * get_rps_cpu is called from netif_receive_skb and returns the target
2230 * CPU from the RPS map of the receiving queue for a given skb.
b0e28f1e 2231 * rcu_read_lock must be held on entry.
0a9627f2 2232 */
fec5e652
TH
2233static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2234 struct rps_dev_flow **rflowp)
0a9627f2
TH
2235{
2236 struct ipv6hdr *ip6;
2237 struct iphdr *ip;
2238 struct netdev_rx_queue *rxqueue;
2239 struct rps_map *map;
fec5e652
TH
2240 struct rps_dev_flow_table *flow_table;
2241 struct rps_sock_flow_table *sock_flow_table;
0a9627f2
TH
2242 int cpu = -1;
2243 u8 ip_proto;
fec5e652 2244 u16 tcpu;
8c52d509
CG
2245 u32 addr1, addr2, ihl;
2246 union {
2247 u32 v32;
2248 u16 v16[2];
2249 } ports;
0a9627f2 2250
0a9627f2
TH
2251 if (skb_rx_queue_recorded(skb)) {
2252 u16 index = skb_get_rx_queue(skb);
2253 if (unlikely(index >= dev->num_rx_queues)) {
2254 if (net_ratelimit()) {
7a161ea9
ED
2255 pr_warning("%s received packet on queue "
2256 "%u, but number of RX queues is %u\n",
2257 dev->name, index, dev->num_rx_queues);
0a9627f2
TH
2258 }
2259 goto done;
2260 }
2261 rxqueue = dev->_rx + index;
2262 } else
2263 rxqueue = dev->_rx;
2264
fec5e652 2265 if (!rxqueue->rps_map && !rxqueue->rps_flow_table)
0a9627f2
TH
2266 goto done;
2267
2268 if (skb->rxhash)
2269 goto got_hash; /* Skip hash computation on packet header */
2270
2271 switch (skb->protocol) {
2272 case __constant_htons(ETH_P_IP):
2273 if (!pskb_may_pull(skb, sizeof(*ip)))
2274 goto done;
2275
2276 ip = (struct iphdr *) skb->data;
2277 ip_proto = ip->protocol;
b249dcb8
ED
2278 addr1 = (__force u32) ip->saddr;
2279 addr2 = (__force u32) ip->daddr;
0a9627f2
TH
2280 ihl = ip->ihl;
2281 break;
2282 case __constant_htons(ETH_P_IPV6):
2283 if (!pskb_may_pull(skb, sizeof(*ip6)))
2284 goto done;
2285
2286 ip6 = (struct ipv6hdr *) skb->data;
2287 ip_proto = ip6->nexthdr;
b249dcb8
ED
2288 addr1 = (__force u32) ip6->saddr.s6_addr32[3];
2289 addr2 = (__force u32) ip6->daddr.s6_addr32[3];
0a9627f2
TH
2290 ihl = (40 >> 2);
2291 break;
2292 default:
2293 goto done;
2294 }
0a9627f2
TH
2295 switch (ip_proto) {
2296 case IPPROTO_TCP:
2297 case IPPROTO_UDP:
2298 case IPPROTO_DCCP:
2299 case IPPROTO_ESP:
2300 case IPPROTO_AH:
2301 case IPPROTO_SCTP:
2302 case IPPROTO_UDPLITE:
b249dcb8 2303 if (pskb_may_pull(skb, (ihl * 4) + 4)) {
8c52d509
CG
2304 ports.v32 = * (__force u32 *) (skb->data + (ihl * 4));
2305 if (ports.v16[1] < ports.v16[0])
2306 swap(ports.v16[0], ports.v16[1]);
2307 break;
b249dcb8 2308 }
0a9627f2 2309 default:
8c52d509 2310 ports.v32 = 0;
0a9627f2
TH
2311 break;
2312 }
2313
b249dcb8
ED
2314 /* get a consistent hash (same value on both flow directions) */
2315 if (addr2 < addr1)
2316 swap(addr1, addr2);
8c52d509 2317 skb->rxhash = jhash_3words(addr1, addr2, ports.v32, hashrnd);
0a9627f2
TH
2318 if (!skb->rxhash)
2319 skb->rxhash = 1;
2320
2321got_hash:
fec5e652
TH
2322 flow_table = rcu_dereference(rxqueue->rps_flow_table);
2323 sock_flow_table = rcu_dereference(rps_sock_flow_table);
2324 if (flow_table && sock_flow_table) {
2325 u16 next_cpu;
2326 struct rps_dev_flow *rflow;
2327
2328 rflow = &flow_table->flows[skb->rxhash & flow_table->mask];
2329 tcpu = rflow->cpu;
2330
2331 next_cpu = sock_flow_table->ents[skb->rxhash &
2332 sock_flow_table->mask];
2333
2334 /*
2335 * If the desired CPU (where last recvmsg was done) is
2336 * different from current CPU (one in the rx-queue flow
2337 * table entry), switch if one of the following holds:
2338 * - Current CPU is unset (equal to RPS_NO_CPU).
2339 * - Current CPU is offline.
2340 * - The current CPU's queue tail has advanced beyond the
2341 * last packet that was enqueued using this table entry.
2342 * This guarantees that all previous packets for the flow
2343 * have been dequeued, thus preserving in order delivery.
2344 */
2345 if (unlikely(tcpu != next_cpu) &&
2346 (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
2347 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
2348 rflow->last_qtail)) >= 0)) {
2349 tcpu = rflow->cpu = next_cpu;
2350 if (tcpu != RPS_NO_CPU)
2351 rflow->last_qtail = per_cpu(softnet_data,
2352 tcpu).input_queue_head;
2353 }
2354 if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
2355 *rflowp = rflow;
2356 cpu = tcpu;
2357 goto done;
2358 }
2359 }
2360
0a9627f2
TH
2361 map = rcu_dereference(rxqueue->rps_map);
2362 if (map) {
fec5e652 2363 tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32];
0a9627f2
TH
2364
2365 if (cpu_online(tcpu)) {
2366 cpu = tcpu;
2367 goto done;
2368 }
2369 }
2370
2371done:
0a9627f2
TH
2372 return cpu;
2373}
2374
0a9627f2 2375/* Called from hardirq (IPI) context */
e36fa2f7 2376static void rps_trigger_softirq(void *data)
0a9627f2 2377{
e36fa2f7
ED
2378 struct softnet_data *sd = data;
2379
eecfd7c4 2380 ____napi_schedule(sd, &sd->backlog);
dee42870 2381 sd->received_rps++;
0a9627f2 2382}
e36fa2f7 2383
fec5e652 2384#endif /* CONFIG_RPS */
0a9627f2 2385
e36fa2f7
ED
2386/*
2387 * Check if this softnet_data structure is another cpu one
2388 * If yes, queue it to our IPI list and return 1
2389 * If no, return 0
2390 */
2391static int rps_ipi_queued(struct softnet_data *sd)
2392{
2393#ifdef CONFIG_RPS
2394 struct softnet_data *mysd = &__get_cpu_var(softnet_data);
2395
2396 if (sd != mysd) {
2397 sd->rps_ipi_next = mysd->rps_ipi_list;
2398 mysd->rps_ipi_list = sd;
2399
2400 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2401 return 1;
2402 }
2403#endif /* CONFIG_RPS */
2404 return 0;
2405}
2406
0a9627f2
TH
2407/*
2408 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
2409 * queue (may be a remote CPU queue).
2410 */
fec5e652
TH
2411static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
2412 unsigned int *qtail)
0a9627f2 2413{
e36fa2f7 2414 struct softnet_data *sd;
0a9627f2
TH
2415 unsigned long flags;
2416
e36fa2f7 2417 sd = &per_cpu(softnet_data, cpu);
0a9627f2
TH
2418
2419 local_irq_save(flags);
0a9627f2 2420
e36fa2f7 2421 rps_lock(sd);
6e7676c1
CG
2422 if (skb_queue_len(&sd->input_pkt_queue) <= netdev_max_backlog) {
2423 if (skb_queue_len(&sd->input_pkt_queue)) {
0a9627f2 2424enqueue:
e36fa2f7 2425 __skb_queue_tail(&sd->input_pkt_queue, skb);
fec5e652 2426#ifdef CONFIG_RPS
6e7676c1
CG
2427 *qtail = sd->input_queue_head +
2428 skb_queue_len(&sd->input_pkt_queue);
fec5e652 2429#endif
e36fa2f7 2430 rps_unlock(sd);
152102c7 2431 local_irq_restore(flags);
0a9627f2
TH
2432 return NET_RX_SUCCESS;
2433 }
2434
2435 /* Schedule NAPI for backlog device */
e36fa2f7
ED
2436 if (napi_schedule_prep(&sd->backlog)) {
2437 if (!rps_ipi_queued(sd))
eecfd7c4 2438 ____napi_schedule(sd, &sd->backlog);
0a9627f2
TH
2439 }
2440 goto enqueue;
2441 }
2442
dee42870 2443 sd->dropped++;
e36fa2f7 2444 rps_unlock(sd);
0a9627f2 2445
0a9627f2
TH
2446 local_irq_restore(flags);
2447
2448 kfree_skb(skb);
2449 return NET_RX_DROP;
2450}
1da177e4 2451
1da177e4
LT
2452/**
2453 * netif_rx - post buffer to the network code
2454 * @skb: buffer to post
2455 *
2456 * This function receives a packet from a device driver and queues it for
2457 * the upper (protocol) levels to process. It always succeeds. The buffer
2458 * may be dropped during processing for congestion control or by the
2459 * protocol layers.
2460 *
2461 * return values:
2462 * NET_RX_SUCCESS (no congestion)
1da177e4
LT
2463 * NET_RX_DROP (packet was dropped)
2464 *
2465 */
2466
2467int netif_rx(struct sk_buff *skb)
2468{
b0e28f1e 2469 int ret;
1da177e4
LT
2470
2471 /* if netpoll wants it, pretend we never saw it */
2472 if (netpoll_rx(skb))
2473 return NET_RX_DROP;
2474
3b098e2d
ED
2475 if (netdev_tstamp_prequeue)
2476 net_timestamp_check(skb);
1da177e4 2477
df334545 2478#ifdef CONFIG_RPS
b0e28f1e 2479 {
fec5e652 2480 struct rps_dev_flow voidflow, *rflow = &voidflow;
b0e28f1e
ED
2481 int cpu;
2482
2483 rcu_read_lock();
fec5e652
TH
2484
2485 cpu = get_rps_cpu(skb->dev, skb, &rflow);
b0e28f1e
ED
2486 if (cpu < 0)
2487 cpu = smp_processor_id();
fec5e652
TH
2488
2489 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
2490
b0e28f1e
ED
2491 rcu_read_unlock();
2492 }
1e94d72f 2493#else
fec5e652
TH
2494 {
2495 unsigned int qtail;
2496 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
2497 put_cpu();
2498 }
1e94d72f 2499#endif
b0e28f1e 2500 return ret;
1da177e4 2501}
d1b19dff 2502EXPORT_SYMBOL(netif_rx);
1da177e4
LT
2503
2504int netif_rx_ni(struct sk_buff *skb)
2505{
2506 int err;
2507
2508 preempt_disable();
2509 err = netif_rx(skb);
2510 if (local_softirq_pending())
2511 do_softirq();
2512 preempt_enable();
2513
2514 return err;
2515}
1da177e4
LT
2516EXPORT_SYMBOL(netif_rx_ni);
2517
1da177e4
LT
2518static void net_tx_action(struct softirq_action *h)
2519{
2520 struct softnet_data *sd = &__get_cpu_var(softnet_data);
2521
2522 if (sd->completion_queue) {
2523 struct sk_buff *clist;
2524
2525 local_irq_disable();
2526 clist = sd->completion_queue;
2527 sd->completion_queue = NULL;
2528 local_irq_enable();
2529
2530 while (clist) {
2531 struct sk_buff *skb = clist;
2532 clist = clist->next;
2533
547b792c 2534 WARN_ON(atomic_read(&skb->users));
1da177e4
LT
2535 __kfree_skb(skb);
2536 }
2537 }
2538
2539 if (sd->output_queue) {
37437bb2 2540 struct Qdisc *head;
1da177e4
LT
2541
2542 local_irq_disable();
2543 head = sd->output_queue;
2544 sd->output_queue = NULL;
a9cbd588 2545 sd->output_queue_tailp = &sd->output_queue;
1da177e4
LT
2546 local_irq_enable();
2547
2548 while (head) {
37437bb2
DM
2549 struct Qdisc *q = head;
2550 spinlock_t *root_lock;
2551
1da177e4
LT
2552 head = head->next_sched;
2553
5fb66229 2554 root_lock = qdisc_lock(q);
37437bb2 2555 if (spin_trylock(root_lock)) {
def82a1d
JP
2556 smp_mb__before_clear_bit();
2557 clear_bit(__QDISC_STATE_SCHED,
2558 &q->state);
37437bb2
DM
2559 qdisc_run(q);
2560 spin_unlock(root_lock);
1da177e4 2561 } else {
195648bb 2562 if (!test_bit(__QDISC_STATE_DEACTIVATED,
e8a83e10 2563 &q->state)) {
195648bb 2564 __netif_reschedule(q);
e8a83e10
JP
2565 } else {
2566 smp_mb__before_clear_bit();
2567 clear_bit(__QDISC_STATE_SCHED,
2568 &q->state);
2569 }
1da177e4
LT
2570 }
2571 }
2572 }
2573}
2574
6f05f629
SH
2575static inline int deliver_skb(struct sk_buff *skb,
2576 struct packet_type *pt_prev,
2577 struct net_device *orig_dev)
1da177e4
LT
2578{
2579 atomic_inc(&skb->users);
f2ccd8fa 2580 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1da177e4
LT
2581}
2582
2583#if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
da678292
MM
2584
2585#if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
2586/* This hook is defined here for ATM LANE */
2587int (*br_fdb_test_addr_hook)(struct net_device *dev,
2588 unsigned char *addr) __read_mostly;
4fb019a0 2589EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
da678292 2590#endif
1da177e4 2591
6229e362
SH
2592/*
2593 * If bridge module is loaded call bridging hook.
2594 * returns NULL if packet was consumed.
2595 */
2596struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
2597 struct sk_buff *skb) __read_mostly;
4fb019a0 2598EXPORT_SYMBOL_GPL(br_handle_frame_hook);
da678292 2599
6229e362
SH
2600static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
2601 struct packet_type **pt_prev, int *ret,
2602 struct net_device *orig_dev)
1da177e4
LT
2603{
2604 struct net_bridge_port *port;
2605
6229e362
SH
2606 if (skb->pkt_type == PACKET_LOOPBACK ||
2607 (port = rcu_dereference(skb->dev->br_port)) == NULL)
2608 return skb;
1da177e4
LT
2609
2610 if (*pt_prev) {
6229e362 2611 *ret = deliver_skb(skb, *pt_prev, orig_dev);
1da177e4 2612 *pt_prev = NULL;
4ec93edb
YH
2613 }
2614
6229e362 2615 return br_handle_frame_hook(port, skb);
1da177e4
LT
2616}
2617#else
6229e362 2618#define handle_bridge(skb, pt_prev, ret, orig_dev) (skb)
1da177e4
LT
2619#endif
2620
b863ceb7 2621#if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
a14462f1
JP
2622struct sk_buff *(*macvlan_handle_frame_hook)(struct macvlan_port *p,
2623 struct sk_buff *skb) __read_mostly;
b863ceb7
PM
2624EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook);
2625
2626static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
2627 struct packet_type **pt_prev,
2628 int *ret,
2629 struct net_device *orig_dev)
2630{
a14462f1
JP
2631 struct macvlan_port *port;
2632
2633 port = rcu_dereference(skb->dev->macvlan_port);
2634 if (!port)
b863ceb7
PM
2635 return skb;
2636
2637 if (*pt_prev) {
2638 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2639 *pt_prev = NULL;
2640 }
a14462f1 2641 return macvlan_handle_frame_hook(port, skb);
b863ceb7
PM
2642}
2643#else
2644#define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb)
2645#endif
2646
1da177e4
LT
2647#ifdef CONFIG_NET_CLS_ACT
2648/* TODO: Maybe we should just force sch_ingress to be compiled in
2649 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
2650 * a compare and 2 stores extra right now if we dont have it on
2651 * but have CONFIG_NET_CLS_ACT
4ec93edb 2652 * NOTE: This doesnt stop any functionality; if you dont have
1da177e4
LT
2653 * the ingress scheduler, you just cant add policies on ingress.
2654 *
2655 */
4ec93edb 2656static int ing_filter(struct sk_buff *skb)
1da177e4 2657{
1da177e4 2658 struct net_device *dev = skb->dev;
f697c3e8 2659 u32 ttl = G_TC_RTTL(skb->tc_verd);
555353cf
DM
2660 struct netdev_queue *rxq;
2661 int result = TC_ACT_OK;
2662 struct Qdisc *q;
4ec93edb 2663
f697c3e8
HX
2664 if (MAX_RED_LOOP < ttl++) {
2665 printk(KERN_WARNING
2666 "Redir loop detected Dropping packet (%d->%d)\n",
8964be4a 2667 skb->skb_iif, dev->ifindex);
f697c3e8
HX
2668 return TC_ACT_SHOT;
2669 }
1da177e4 2670
f697c3e8
HX
2671 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
2672 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
1da177e4 2673
555353cf
DM
2674 rxq = &dev->rx_queue;
2675
83874000 2676 q = rxq->qdisc;
8d50b53d 2677 if (q != &noop_qdisc) {
83874000 2678 spin_lock(qdisc_lock(q));
a9312ae8
DM
2679 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
2680 result = qdisc_enqueue_root(skb, q);
83874000
DM
2681 spin_unlock(qdisc_lock(q));
2682 }
f697c3e8
HX
2683
2684 return result;
2685}
86e65da9 2686
f697c3e8
HX
2687static inline struct sk_buff *handle_ing(struct sk_buff *skb,
2688 struct packet_type **pt_prev,
2689 int *ret, struct net_device *orig_dev)
2690{
8d50b53d 2691 if (skb->dev->rx_queue.qdisc == &noop_qdisc)
f697c3e8 2692 goto out;
1da177e4 2693
f697c3e8
HX
2694 if (*pt_prev) {
2695 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2696 *pt_prev = NULL;
2697 } else {
2698 /* Huh? Why does turning on AF_PACKET affect this? */
2699 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
1da177e4
LT
2700 }
2701
f697c3e8
HX
2702 switch (ing_filter(skb)) {
2703 case TC_ACT_SHOT:
2704 case TC_ACT_STOLEN:
2705 kfree_skb(skb);
2706 return NULL;
2707 }
2708
2709out:
2710 skb->tc_verd = 0;
2711 return skb;
1da177e4
LT
2712}
2713#endif
2714
bc1d0411
PM
2715/*
2716 * netif_nit_deliver - deliver received packets to network taps
2717 * @skb: buffer
2718 *
2719 * This function is used to deliver incoming packets to network
2720 * taps. It should be used when the normal netif_receive_skb path
2721 * is bypassed, for example because of VLAN acceleration.
2722 */
2723void netif_nit_deliver(struct sk_buff *skb)
2724{
2725 struct packet_type *ptype;
2726
2727 if (list_empty(&ptype_all))
2728 return;
2729
2730 skb_reset_network_header(skb);
2731 skb_reset_transport_header(skb);
2732 skb->mac_len = skb->network_header - skb->mac_header;
2733
2734 rcu_read_lock();
2735 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2736 if (!ptype->dev || ptype->dev == skb->dev)
2737 deliver_skb(skb, ptype, skb->dev);
2738 }
2739 rcu_read_unlock();
2740}
2741
acbbc071
ED
2742static inline void skb_bond_set_mac_by_master(struct sk_buff *skb,
2743 struct net_device *master)
2744{
2745 if (skb->pkt_type == PACKET_HOST) {
2746 u16 *dest = (u16 *) eth_hdr(skb)->h_dest;
2747
2748 memcpy(dest, master->dev_addr, ETH_ALEN);
2749 }
2750}
2751
2752/* On bonding slaves other than the currently active slave, suppress
2753 * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and
2754 * ARP on active-backup slaves with arp_validate enabled.
2755 */
2756int __skb_bond_should_drop(struct sk_buff *skb, struct net_device *master)
2757{
2758 struct net_device *dev = skb->dev;
2759
2760 if (master->priv_flags & IFF_MASTER_ARPMON)
2761 dev->last_rx = jiffies;
2762
2763 if ((master->priv_flags & IFF_MASTER_ALB) && master->br_port) {
2764 /* Do address unmangle. The local destination address
2765 * will be always the one master has. Provides the right
2766 * functionality in a bridge.
2767 */
2768 skb_bond_set_mac_by_master(skb, master);
2769 }
2770
2771 if (dev->priv_flags & IFF_SLAVE_INACTIVE) {
2772 if ((dev->priv_flags & IFF_SLAVE_NEEDARP) &&
2773 skb->protocol == __cpu_to_be16(ETH_P_ARP))
2774 return 0;
2775
2776 if (master->priv_flags & IFF_MASTER_ALB) {
2777 if (skb->pkt_type != PACKET_BROADCAST &&
2778 skb->pkt_type != PACKET_MULTICAST)
2779 return 0;
2780 }
2781 if (master->priv_flags & IFF_MASTER_8023AD &&
2782 skb->protocol == __cpu_to_be16(ETH_P_SLOW))
2783 return 0;
2784
2785 return 1;
2786 }
2787 return 0;
2788}
2789EXPORT_SYMBOL(__skb_bond_should_drop);
2790
10f744d2 2791static int __netif_receive_skb(struct sk_buff *skb)
1da177e4
LT
2792{
2793 struct packet_type *ptype, *pt_prev;
f2ccd8fa 2794 struct net_device *orig_dev;
0641e4fb 2795 struct net_device *master;
0d7a3681 2796 struct net_device *null_or_orig;
ca8d9ea3 2797 struct net_device *null_or_bond;
1da177e4 2798 int ret = NET_RX_DROP;
252e3346 2799 __be16 type;
1da177e4 2800
3b098e2d
ED
2801 if (!netdev_tstamp_prequeue)
2802 net_timestamp_check(skb);
81bbb3d4 2803
05423b24 2804 if (vlan_tx_tag_present(skb) && vlan_hwaccel_do_receive(skb))
9b22ea56
PM
2805 return NET_RX_SUCCESS;
2806
1da177e4 2807 /* if we've gotten here through NAPI, check netpoll */
bea3348e 2808 if (netpoll_receive_skb(skb))
1da177e4
LT
2809 return NET_RX_DROP;
2810
8964be4a
ED
2811 if (!skb->skb_iif)
2812 skb->skb_iif = skb->dev->ifindex;
86e65da9 2813
0d7a3681 2814 null_or_orig = NULL;
cc9bd5ce 2815 orig_dev = skb->dev;
0641e4fb
ED
2816 master = ACCESS_ONCE(orig_dev->master);
2817 if (master) {
2818 if (skb_bond_should_drop(skb, master))
0d7a3681
JE
2819 null_or_orig = orig_dev; /* deliver only exact match */
2820 else
0641e4fb 2821 skb->dev = master;
cc9bd5ce 2822 }
8f903c70 2823
dee42870 2824 __get_cpu_var(softnet_data).processed++;
1da177e4 2825
c1d2bbe1 2826 skb_reset_network_header(skb);
badff6d0 2827 skb_reset_transport_header(skb);
b0e380b1 2828 skb->mac_len = skb->network_header - skb->mac_header;
1da177e4
LT
2829
2830 pt_prev = NULL;
2831
2832 rcu_read_lock();
2833
2834#ifdef CONFIG_NET_CLS_ACT
2835 if (skb->tc_verd & TC_NCLS) {
2836 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
2837 goto ncls;
2838 }
2839#endif
2840
2841 list_for_each_entry_rcu(ptype, &ptype_all, list) {
f982307f
JE
2842 if (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2843 ptype->dev == orig_dev) {
4ec93edb 2844 if (pt_prev)
f2ccd8fa 2845 ret = deliver_skb(skb, pt_prev, orig_dev);
1da177e4
LT
2846 pt_prev = ptype;
2847 }
2848 }
2849
2850#ifdef CONFIG_NET_CLS_ACT
f697c3e8
HX
2851 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
2852 if (!skb)
1da177e4 2853 goto out;
1da177e4
LT
2854ncls:
2855#endif
2856
6229e362 2857 skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
b863ceb7
PM
2858 if (!skb)
2859 goto out;
2860 skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
6229e362 2861 if (!skb)
1da177e4
LT
2862 goto out;
2863
1f3c8804
AG
2864 /*
2865 * Make sure frames received on VLAN interfaces stacked on
2866 * bonding interfaces still make their way to any base bonding
2867 * device that may have registered for a specific ptype. The
2868 * handler may have to adjust skb->dev and orig_dev.
1f3c8804 2869 */
ca8d9ea3 2870 null_or_bond = NULL;
1f3c8804
AG
2871 if ((skb->dev->priv_flags & IFF_802_1Q_VLAN) &&
2872 (vlan_dev_real_dev(skb->dev)->priv_flags & IFF_BONDING)) {
ca8d9ea3 2873 null_or_bond = vlan_dev_real_dev(skb->dev);
1f3c8804
AG
2874 }
2875
1da177e4 2876 type = skb->protocol;
82d8a867
PE
2877 list_for_each_entry_rcu(ptype,
2878 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
1f3c8804 2879 if (ptype->type == type && (ptype->dev == null_or_orig ||
ca8d9ea3
AG
2880 ptype->dev == skb->dev || ptype->dev == orig_dev ||
2881 ptype->dev == null_or_bond)) {
4ec93edb 2882 if (pt_prev)
f2ccd8fa 2883 ret = deliver_skb(skb, pt_prev, orig_dev);
1da177e4
LT
2884 pt_prev = ptype;
2885 }
2886 }
2887
2888 if (pt_prev) {
f2ccd8fa 2889 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1da177e4
LT
2890 } else {
2891 kfree_skb(skb);
2892 /* Jamal, now you will not able to escape explaining
2893 * me how you were going to use this. :-)
2894 */
2895 ret = NET_RX_DROP;
2896 }
2897
2898out:
2899 rcu_read_unlock();
2900 return ret;
2901}
0a9627f2
TH
2902
2903/**
2904 * netif_receive_skb - process receive buffer from network
2905 * @skb: buffer to process
2906 *
2907 * netif_receive_skb() is the main receive data processing function.
2908 * It always succeeds. The buffer may be dropped during processing
2909 * for congestion control or by the protocol layers.
2910 *
2911 * This function may only be called from softirq context and interrupts
2912 * should be enabled.
2913 *
2914 * Return values (usually ignored):
2915 * NET_RX_SUCCESS: no congestion
2916 * NET_RX_DROP: packet was dropped
2917 */
2918int netif_receive_skb(struct sk_buff *skb)
2919{
3b098e2d
ED
2920 if (netdev_tstamp_prequeue)
2921 net_timestamp_check(skb);
2922
df334545 2923#ifdef CONFIG_RPS
3b098e2d
ED
2924 {
2925 struct rps_dev_flow voidflow, *rflow = &voidflow;
2926 int cpu, ret;
fec5e652 2927
3b098e2d
ED
2928 rcu_read_lock();
2929
2930 cpu = get_rps_cpu(skb->dev, skb, &rflow);
0a9627f2 2931
3b098e2d
ED
2932 if (cpu >= 0) {
2933 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
2934 rcu_read_unlock();
2935 } else {
2936 rcu_read_unlock();
2937 ret = __netif_receive_skb(skb);
2938 }
0a9627f2 2939
3b098e2d 2940 return ret;
fec5e652 2941 }
1e94d72f
TH
2942#else
2943 return __netif_receive_skb(skb);
2944#endif
0a9627f2 2945}
d1b19dff 2946EXPORT_SYMBOL(netif_receive_skb);
1da177e4 2947
88751275
ED
2948/* Network device is going away, flush any packets still pending
2949 * Called with irqs disabled.
2950 */
152102c7 2951static void flush_backlog(void *arg)
6e583ce5 2952{
152102c7 2953 struct net_device *dev = arg;
e36fa2f7 2954 struct softnet_data *sd = &__get_cpu_var(softnet_data);
6e583ce5
SH
2955 struct sk_buff *skb, *tmp;
2956
e36fa2f7 2957 rps_lock(sd);
6e7676c1 2958 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
6e583ce5 2959 if (skb->dev == dev) {
e36fa2f7 2960 __skb_unlink(skb, &sd->input_pkt_queue);
6e583ce5 2961 kfree_skb(skb);
6e7676c1 2962 input_queue_head_add(sd, 1);
6e583ce5 2963 }
6e7676c1 2964 }
e36fa2f7 2965 rps_unlock(sd);
6e7676c1
CG
2966
2967 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
2968 if (skb->dev == dev) {
2969 __skb_unlink(skb, &sd->process_queue);
2970 kfree_skb(skb);
2971 }
2972 }
6e583ce5
SH
2973}
2974
d565b0a1
HX
2975static int napi_gro_complete(struct sk_buff *skb)
2976{
2977 struct packet_type *ptype;
2978 __be16 type = skb->protocol;
2979 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
2980 int err = -ENOENT;
2981
fc59f9a3
HX
2982 if (NAPI_GRO_CB(skb)->count == 1) {
2983 skb_shinfo(skb)->gso_size = 0;
d565b0a1 2984 goto out;
fc59f9a3 2985 }
d565b0a1
HX
2986
2987 rcu_read_lock();
2988 list_for_each_entry_rcu(ptype, head, list) {
2989 if (ptype->type != type || ptype->dev || !ptype->gro_complete)
2990 continue;
2991
2992 err = ptype->gro_complete(skb);
2993 break;
2994 }
2995 rcu_read_unlock();
2996
2997 if (err) {
2998 WARN_ON(&ptype->list == head);
2999 kfree_skb(skb);
3000 return NET_RX_SUCCESS;
3001 }
3002
3003out:
d565b0a1
HX
3004 return netif_receive_skb(skb);
3005}
3006
11380a4b 3007static void napi_gro_flush(struct napi_struct *napi)
d565b0a1
HX
3008{
3009 struct sk_buff *skb, *next;
3010
3011 for (skb = napi->gro_list; skb; skb = next) {
3012 next = skb->next;
3013 skb->next = NULL;
3014 napi_gro_complete(skb);
3015 }
3016
4ae5544f 3017 napi->gro_count = 0;
d565b0a1
HX
3018 napi->gro_list = NULL;
3019}
d565b0a1 3020
5b252f0c 3021enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
d565b0a1
HX
3022{
3023 struct sk_buff **pp = NULL;
3024 struct packet_type *ptype;
3025 __be16 type = skb->protocol;
3026 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
0da2afd5 3027 int same_flow;
d565b0a1 3028 int mac_len;
5b252f0c 3029 enum gro_result ret;
d565b0a1
HX
3030
3031 if (!(skb->dev->features & NETIF_F_GRO))
3032 goto normal;
3033
4cf704fb 3034 if (skb_is_gso(skb) || skb_has_frags(skb))
f17f5c91
HX
3035 goto normal;
3036
d565b0a1
HX
3037 rcu_read_lock();
3038 list_for_each_entry_rcu(ptype, head, list) {
d565b0a1
HX
3039 if (ptype->type != type || ptype->dev || !ptype->gro_receive)
3040 continue;
3041
86911732 3042 skb_set_network_header(skb, skb_gro_offset(skb));
d565b0a1
HX
3043 mac_len = skb->network_header - skb->mac_header;
3044 skb->mac_len = mac_len;
3045 NAPI_GRO_CB(skb)->same_flow = 0;
3046 NAPI_GRO_CB(skb)->flush = 0;
5d38a079 3047 NAPI_GRO_CB(skb)->free = 0;
d565b0a1 3048
d565b0a1
HX
3049 pp = ptype->gro_receive(&napi->gro_list, skb);
3050 break;
3051 }
3052 rcu_read_unlock();
3053
3054 if (&ptype->list == head)
3055 goto normal;
3056
0da2afd5 3057 same_flow = NAPI_GRO_CB(skb)->same_flow;
5d0d9be8 3058 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
0da2afd5 3059
d565b0a1
HX
3060 if (pp) {
3061 struct sk_buff *nskb = *pp;
3062
3063 *pp = nskb->next;
3064 nskb->next = NULL;
3065 napi_gro_complete(nskb);
4ae5544f 3066 napi->gro_count--;
d565b0a1
HX
3067 }
3068
0da2afd5 3069 if (same_flow)
d565b0a1
HX
3070 goto ok;
3071
4ae5544f 3072 if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
d565b0a1 3073 goto normal;
d565b0a1 3074
4ae5544f 3075 napi->gro_count++;
d565b0a1 3076 NAPI_GRO_CB(skb)->count = 1;
86911732 3077 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
d565b0a1
HX
3078 skb->next = napi->gro_list;
3079 napi->gro_list = skb;
5d0d9be8 3080 ret = GRO_HELD;
d565b0a1 3081
ad0f9904 3082pull:
cb18978c
HX
3083 if (skb_headlen(skb) < skb_gro_offset(skb)) {
3084 int grow = skb_gro_offset(skb) - skb_headlen(skb);
3085
3086 BUG_ON(skb->end - skb->tail < grow);
3087
3088 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
3089
3090 skb->tail += grow;
3091 skb->data_len -= grow;
3092
3093 skb_shinfo(skb)->frags[0].page_offset += grow;
3094 skb_shinfo(skb)->frags[0].size -= grow;
3095
3096 if (unlikely(!skb_shinfo(skb)->frags[0].size)) {
3097 put_page(skb_shinfo(skb)->frags[0].page);
3098 memmove(skb_shinfo(skb)->frags,
3099 skb_shinfo(skb)->frags + 1,
3100 --skb_shinfo(skb)->nr_frags);
3101 }
ad0f9904
HX
3102 }
3103
d565b0a1 3104ok:
5d0d9be8 3105 return ret;
d565b0a1
HX
3106
3107normal:
ad0f9904
HX
3108 ret = GRO_NORMAL;
3109 goto pull;
5d38a079 3110}
96e93eab
HX
3111EXPORT_SYMBOL(dev_gro_receive);
3112
5b252f0c
BH
3113static gro_result_t
3114__napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
96e93eab
HX
3115{
3116 struct sk_buff *p;
3117
d1c76af9
HX
3118 if (netpoll_rx_on(skb))
3119 return GRO_NORMAL;
3120
96e93eab 3121 for (p = napi->gro_list; p; p = p->next) {
f64f9e71
JP
3122 NAPI_GRO_CB(p)->same_flow =
3123 (p->dev == skb->dev) &&
3124 !compare_ether_header(skb_mac_header(p),
3125 skb_gro_mac_header(skb));
96e93eab
HX
3126 NAPI_GRO_CB(p)->flush = 0;
3127 }
3128
3129 return dev_gro_receive(napi, skb);
3130}
5d38a079 3131
c7c4b3b6 3132gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
5d38a079 3133{
5d0d9be8
HX
3134 switch (ret) {
3135 case GRO_NORMAL:
c7c4b3b6
BH
3136 if (netif_receive_skb(skb))
3137 ret = GRO_DROP;
3138 break;
5d38a079 3139
5d0d9be8 3140 case GRO_DROP:
5d0d9be8 3141 case GRO_MERGED_FREE:
5d38a079
HX
3142 kfree_skb(skb);
3143 break;
5b252f0c
BH
3144
3145 case GRO_HELD:
3146 case GRO_MERGED:
3147 break;
5d38a079
HX
3148 }
3149
c7c4b3b6 3150 return ret;
5d0d9be8
HX
3151}
3152EXPORT_SYMBOL(napi_skb_finish);
3153
78a478d0
HX
3154void skb_gro_reset_offset(struct sk_buff *skb)
3155{
3156 NAPI_GRO_CB(skb)->data_offset = 0;
3157 NAPI_GRO_CB(skb)->frag0 = NULL;
7489594c 3158 NAPI_GRO_CB(skb)->frag0_len = 0;
78a478d0 3159
78d3fd0b 3160 if (skb->mac_header == skb->tail &&
7489594c 3161 !PageHighMem(skb_shinfo(skb)->frags[0].page)) {
78a478d0
HX
3162 NAPI_GRO_CB(skb)->frag0 =
3163 page_address(skb_shinfo(skb)->frags[0].page) +
3164 skb_shinfo(skb)->frags[0].page_offset;
7489594c
HX
3165 NAPI_GRO_CB(skb)->frag0_len = skb_shinfo(skb)->frags[0].size;
3166 }
78a478d0
HX
3167}
3168EXPORT_SYMBOL(skb_gro_reset_offset);
3169
c7c4b3b6 3170gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
5d0d9be8 3171{
86911732
HX
3172 skb_gro_reset_offset(skb);
3173
5d0d9be8 3174 return napi_skb_finish(__napi_gro_receive(napi, skb), skb);
d565b0a1
HX
3175}
3176EXPORT_SYMBOL(napi_gro_receive);
3177
96e93eab
HX
3178void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
3179{
96e93eab
HX
3180 __skb_pull(skb, skb_headlen(skb));
3181 skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
3182
3183 napi->skb = skb;
3184}
3185EXPORT_SYMBOL(napi_reuse_skb);
3186
76620aaf 3187struct sk_buff *napi_get_frags(struct napi_struct *napi)
5d38a079 3188{
5d38a079 3189 struct sk_buff *skb = napi->skb;
5d38a079
HX
3190
3191 if (!skb) {
89d71a66
ED
3192 skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
3193 if (skb)
3194 napi->skb = skb;
80595d59 3195 }
96e93eab
HX
3196 return skb;
3197}
76620aaf 3198EXPORT_SYMBOL(napi_get_frags);
96e93eab 3199
c7c4b3b6
BH
3200gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
3201 gro_result_t ret)
96e93eab 3202{
5d0d9be8
HX
3203 switch (ret) {
3204 case GRO_NORMAL:
86911732 3205 case GRO_HELD:
e76b69cc 3206 skb->protocol = eth_type_trans(skb, skb->dev);
86911732 3207
c7c4b3b6
BH
3208 if (ret == GRO_HELD)
3209 skb_gro_pull(skb, -ETH_HLEN);
3210 else if (netif_receive_skb(skb))
3211 ret = GRO_DROP;
86911732 3212 break;
5d38a079 3213
5d0d9be8 3214 case GRO_DROP:
5d0d9be8
HX
3215 case GRO_MERGED_FREE:
3216 napi_reuse_skb(napi, skb);
3217 break;
5b252f0c
BH
3218
3219 case GRO_MERGED:
3220 break;
5d0d9be8 3221 }
5d38a079 3222
c7c4b3b6 3223 return ret;
5d38a079 3224}
5d0d9be8
HX
3225EXPORT_SYMBOL(napi_frags_finish);
3226
76620aaf
HX
3227struct sk_buff *napi_frags_skb(struct napi_struct *napi)
3228{
3229 struct sk_buff *skb = napi->skb;
3230 struct ethhdr *eth;
a5b1cf28
HX
3231 unsigned int hlen;
3232 unsigned int off;
76620aaf
HX
3233
3234 napi->skb = NULL;
3235
3236 skb_reset_mac_header(skb);
3237 skb_gro_reset_offset(skb);
3238
a5b1cf28
HX
3239 off = skb_gro_offset(skb);
3240 hlen = off + sizeof(*eth);
3241 eth = skb_gro_header_fast(skb, off);
3242 if (skb_gro_header_hard(skb, hlen)) {
3243 eth = skb_gro_header_slow(skb, hlen, off);
3244 if (unlikely(!eth)) {
3245 napi_reuse_skb(napi, skb);
3246 skb = NULL;
3247 goto out;
3248 }
76620aaf
HX
3249 }
3250
3251 skb_gro_pull(skb, sizeof(*eth));
3252
3253 /*
3254 * This works because the only protocols we care about don't require
3255 * special handling. We'll fix it up properly at the end.
3256 */
3257 skb->protocol = eth->h_proto;
3258
3259out:
3260 return skb;
3261}
3262EXPORT_SYMBOL(napi_frags_skb);
3263
c7c4b3b6 3264gro_result_t napi_gro_frags(struct napi_struct *napi)
5d0d9be8 3265{
76620aaf 3266 struct sk_buff *skb = napi_frags_skb(napi);
5d0d9be8
HX
3267
3268 if (!skb)
c7c4b3b6 3269 return GRO_DROP;
5d0d9be8
HX
3270
3271 return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb));
3272}
5d38a079
HX
3273EXPORT_SYMBOL(napi_gro_frags);
3274
e326bed2
ED
3275/*
3276 * net_rps_action sends any pending IPI's for rps.
3277 * Note: called with local irq disabled, but exits with local irq enabled.
3278 */
3279static void net_rps_action_and_irq_enable(struct softnet_data *sd)
3280{
3281#ifdef CONFIG_RPS
3282 struct softnet_data *remsd = sd->rps_ipi_list;
3283
3284 if (remsd) {
3285 sd->rps_ipi_list = NULL;
3286
3287 local_irq_enable();
3288
3289 /* Send pending IPI's to kick RPS processing on remote cpus. */
3290 while (remsd) {
3291 struct softnet_data *next = remsd->rps_ipi_next;
3292
3293 if (cpu_online(remsd->cpu))
3294 __smp_call_function_single(remsd->cpu,
3295 &remsd->csd, 0);
3296 remsd = next;
3297 }
3298 } else
3299#endif
3300 local_irq_enable();
3301}
3302
bea3348e 3303static int process_backlog(struct napi_struct *napi, int quota)
1da177e4
LT
3304{
3305 int work = 0;
eecfd7c4 3306 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
1da177e4 3307
e326bed2
ED
3308#ifdef CONFIG_RPS
3309 /* Check if we have pending ipi, its better to send them now,
3310 * not waiting net_rx_action() end.
3311 */
3312 if (sd->rps_ipi_list) {
3313 local_irq_disable();
3314 net_rps_action_and_irq_enable(sd);
3315 }
3316#endif
bea3348e 3317 napi->weight = weight_p;
6e7676c1
CG
3318 local_irq_disable();
3319 while (work < quota) {
1da177e4 3320 struct sk_buff *skb;
6e7676c1
CG
3321 unsigned int qlen;
3322
3323 while ((skb = __skb_dequeue(&sd->process_queue))) {
3324 local_irq_enable();
3325 __netif_receive_skb(skb);
3326 if (++work >= quota)
3327 return work;
3328 local_irq_disable();
3329 }
1da177e4 3330
e36fa2f7 3331 rps_lock(sd);
6e7676c1
CG
3332 qlen = skb_queue_len(&sd->input_pkt_queue);
3333 if (qlen) {
3334 input_queue_head_add(sd, qlen);
3335 skb_queue_splice_tail_init(&sd->input_pkt_queue,
3336 &sd->process_queue);
3337 }
3338 if (qlen < quota - work) {
eecfd7c4
ED
3339 /*
3340 * Inline a custom version of __napi_complete().
3341 * only current cpu owns and manipulates this napi,
3342 * and NAPI_STATE_SCHED is the only possible flag set on backlog.
3343 * we can use a plain write instead of clear_bit(),
3344 * and we dont need an smp_mb() memory barrier.
3345 */
3346 list_del(&napi->poll_list);
3347 napi->state = 0;
3348
6e7676c1 3349 quota = work + qlen;
bea3348e 3350 }
e36fa2f7 3351 rps_unlock(sd);
6e7676c1
CG
3352 }
3353 local_irq_enable();
1da177e4 3354
bea3348e
SH
3355 return work;
3356}
1da177e4 3357
bea3348e
SH
3358/**
3359 * __napi_schedule - schedule for receive
c4ea43c5 3360 * @n: entry to schedule
bea3348e
SH
3361 *
3362 * The entry's receive function will be scheduled to run
3363 */
b5606c2d 3364void __napi_schedule(struct napi_struct *n)
bea3348e
SH
3365{
3366 unsigned long flags;
1da177e4 3367
bea3348e 3368 local_irq_save(flags);
eecfd7c4 3369 ____napi_schedule(&__get_cpu_var(softnet_data), n);
bea3348e 3370 local_irq_restore(flags);
1da177e4 3371}
bea3348e
SH
3372EXPORT_SYMBOL(__napi_schedule);
3373
d565b0a1
HX
3374void __napi_complete(struct napi_struct *n)
3375{
3376 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
3377 BUG_ON(n->gro_list);
3378
3379 list_del(&n->poll_list);
3380 smp_mb__before_clear_bit();
3381 clear_bit(NAPI_STATE_SCHED, &n->state);
3382}
3383EXPORT_SYMBOL(__napi_complete);
3384
3385void napi_complete(struct napi_struct *n)
3386{
3387 unsigned long flags;
3388
3389 /*
3390 * don't let napi dequeue from the cpu poll list
3391 * just in case its running on a different cpu
3392 */
3393 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
3394 return;
3395
3396 napi_gro_flush(n);
3397 local_irq_save(flags);
3398 __napi_complete(n);
3399 local_irq_restore(flags);
3400}
3401EXPORT_SYMBOL(napi_complete);
3402
3403void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
3404 int (*poll)(struct napi_struct *, int), int weight)
3405{
3406 INIT_LIST_HEAD(&napi->poll_list);
4ae5544f 3407 napi->gro_count = 0;
d565b0a1 3408 napi->gro_list = NULL;
5d38a079 3409 napi->skb = NULL;
d565b0a1
HX
3410 napi->poll = poll;
3411 napi->weight = weight;
3412 list_add(&napi->dev_list, &dev->napi_list);
d565b0a1 3413 napi->dev = dev;
5d38a079 3414#ifdef CONFIG_NETPOLL
d565b0a1
HX
3415 spin_lock_init(&napi->poll_lock);
3416 napi->poll_owner = -1;
3417#endif
3418 set_bit(NAPI_STATE_SCHED, &napi->state);
3419}
3420EXPORT_SYMBOL(netif_napi_add);
3421
3422void netif_napi_del(struct napi_struct *napi)
3423{
3424 struct sk_buff *skb, *next;
3425
d7b06636 3426 list_del_init(&napi->dev_list);
76620aaf 3427 napi_free_frags(napi);
d565b0a1
HX
3428
3429 for (skb = napi->gro_list; skb; skb = next) {
3430 next = skb->next;
3431 skb->next = NULL;
3432 kfree_skb(skb);
3433 }
3434
3435 napi->gro_list = NULL;
4ae5544f 3436 napi->gro_count = 0;
d565b0a1
HX
3437}
3438EXPORT_SYMBOL(netif_napi_del);
3439
1da177e4
LT
3440static void net_rx_action(struct softirq_action *h)
3441{
e326bed2 3442 struct softnet_data *sd = &__get_cpu_var(softnet_data);
24f8b238 3443 unsigned long time_limit = jiffies + 2;
51b0bded 3444 int budget = netdev_budget;
53fb95d3
MM
3445 void *have;
3446
1da177e4
LT
3447 local_irq_disable();
3448
e326bed2 3449 while (!list_empty(&sd->poll_list)) {
bea3348e
SH
3450 struct napi_struct *n;
3451 int work, weight;
1da177e4 3452
bea3348e 3453 /* If softirq window is exhuasted then punt.
24f8b238
SH
3454 * Allow this to run for 2 jiffies since which will allow
3455 * an average latency of 1.5/HZ.
bea3348e 3456 */
24f8b238 3457 if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
1da177e4
LT
3458 goto softnet_break;
3459
3460 local_irq_enable();
3461
bea3348e
SH
3462 /* Even though interrupts have been re-enabled, this
3463 * access is safe because interrupts can only add new
3464 * entries to the tail of this list, and only ->poll()
3465 * calls can remove this head entry from the list.
3466 */
e326bed2 3467 n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list);
1da177e4 3468
bea3348e
SH
3469 have = netpoll_poll_lock(n);
3470
3471 weight = n->weight;
3472
0a7606c1
DM
3473 /* This NAPI_STATE_SCHED test is for avoiding a race
3474 * with netpoll's poll_napi(). Only the entity which
3475 * obtains the lock and sees NAPI_STATE_SCHED set will
3476 * actually make the ->poll() call. Therefore we avoid
3477 * accidently calling ->poll() when NAPI is not scheduled.
3478 */
3479 work = 0;
4ea7e386 3480 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
0a7606c1 3481 work = n->poll(n, weight);
4ea7e386
NH
3482 trace_napi_poll(n);
3483 }
bea3348e
SH
3484
3485 WARN_ON_ONCE(work > weight);
3486
3487 budget -= work;
3488
3489 local_irq_disable();
3490
3491 /* Drivers must not modify the NAPI state if they
3492 * consume the entire weight. In such cases this code
3493 * still "owns" the NAPI instance and therefore can
3494 * move the instance around on the list at-will.
3495 */
fed17f30 3496 if (unlikely(work == weight)) {
ff780cd8
HX
3497 if (unlikely(napi_disable_pending(n))) {
3498 local_irq_enable();
3499 napi_complete(n);
3500 local_irq_disable();
3501 } else
e326bed2 3502 list_move_tail(&n->poll_list, &sd->poll_list);
fed17f30 3503 }
bea3348e
SH
3504
3505 netpoll_poll_unlock(have);
1da177e4
LT
3506 }
3507out:
e326bed2 3508 net_rps_action_and_irq_enable(sd);
0a9627f2 3509
db217334
CL
3510#ifdef CONFIG_NET_DMA
3511 /*
3512 * There may not be any more sk_buffs coming right now, so push
3513 * any pending DMA copies to hardware
3514 */
2ba05622 3515 dma_issue_pending_all();
db217334 3516#endif
bea3348e 3517
1da177e4
LT
3518 return;
3519
3520softnet_break:
dee42870 3521 sd->time_squeeze++;
1da177e4
LT
3522 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3523 goto out;
3524}
3525
d1b19dff 3526static gifconf_func_t *gifconf_list[NPROTO];
1da177e4
LT
3527
3528/**
3529 * register_gifconf - register a SIOCGIF handler
3530 * @family: Address family
3531 * @gifconf: Function handler
3532 *
3533 * Register protocol dependent address dumping routines. The handler
3534 * that is passed must not be freed or reused until it has been replaced
3535 * by another handler.
3536 */
d1b19dff 3537int register_gifconf(unsigned int family, gifconf_func_t *gifconf)
1da177e4
LT
3538{
3539 if (family >= NPROTO)
3540 return -EINVAL;
3541 gifconf_list[family] = gifconf;
3542 return 0;
3543}
d1b19dff 3544EXPORT_SYMBOL(register_gifconf);
1da177e4
LT
3545
3546
3547/*
3548 * Map an interface index to its name (SIOCGIFNAME)
3549 */
3550
3551/*
3552 * We need this ioctl for efficient implementation of the
3553 * if_indextoname() function required by the IPv6 API. Without
3554 * it, we would have to search all the interfaces to find a
3555 * match. --pb
3556 */
3557
881d966b 3558static int dev_ifname(struct net *net, struct ifreq __user *arg)
1da177e4
LT
3559{
3560 struct net_device *dev;
3561 struct ifreq ifr;
3562
3563 /*
3564 * Fetch the caller's info block.
3565 */
3566
3567 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
3568 return -EFAULT;
3569
fb699dfd
ED
3570 rcu_read_lock();
3571 dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex);
1da177e4 3572 if (!dev) {
fb699dfd 3573 rcu_read_unlock();
1da177e4
LT
3574 return -ENODEV;
3575 }
3576
3577 strcpy(ifr.ifr_name, dev->name);
fb699dfd 3578 rcu_read_unlock();
1da177e4
LT
3579
3580 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
3581 return -EFAULT;
3582 return 0;
3583}
3584
3585/*
3586 * Perform a SIOCGIFCONF call. This structure will change
3587 * size eventually, and there is nothing I can do about it.
3588 * Thus we will need a 'compatibility mode'.
3589 */
3590
881d966b 3591static int dev_ifconf(struct net *net, char __user *arg)
1da177e4
LT
3592{
3593 struct ifconf ifc;
3594 struct net_device *dev;
3595 char __user *pos;
3596 int len;
3597 int total;
3598 int i;
3599
3600 /*
3601 * Fetch the caller's info block.
3602 */
3603
3604 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
3605 return -EFAULT;
3606
3607 pos = ifc.ifc_buf;
3608 len = ifc.ifc_len;
3609
3610 /*
3611 * Loop over the interfaces, and write an info block for each.
3612 */
3613
3614 total = 0;
881d966b 3615 for_each_netdev(net, dev) {
1da177e4
LT
3616 for (i = 0; i < NPROTO; i++) {
3617 if (gifconf_list[i]) {
3618 int done;
3619 if (!pos)
3620 done = gifconf_list[i](dev, NULL, 0);
3621 else
3622 done = gifconf_list[i](dev, pos + total,
3623 len - total);
3624 if (done < 0)
3625 return -EFAULT;
3626 total += done;
3627 }
3628 }
4ec93edb 3629 }
1da177e4
LT
3630
3631 /*
3632 * All done. Write the updated control block back to the caller.
3633 */
3634 ifc.ifc_len = total;
3635
3636 /*
3637 * Both BSD and Solaris return 0 here, so we do too.
3638 */
3639 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
3640}
3641
3642#ifdef CONFIG_PROC_FS
3643/*
3644 * This is invoked by the /proc filesystem handler to display a device
3645 * in detail.
3646 */
7562f876 3647void *dev_seq_start(struct seq_file *seq, loff_t *pos)
c6d14c84 3648 __acquires(RCU)
1da177e4 3649{
e372c414 3650 struct net *net = seq_file_net(seq);
7562f876 3651 loff_t off;
1da177e4 3652 struct net_device *dev;
1da177e4 3653
c6d14c84 3654 rcu_read_lock();
7562f876
PE
3655 if (!*pos)
3656 return SEQ_START_TOKEN;
1da177e4 3657
7562f876 3658 off = 1;
c6d14c84 3659 for_each_netdev_rcu(net, dev)
7562f876
PE
3660 if (off++ == *pos)
3661 return dev;
1da177e4 3662
7562f876 3663 return NULL;
1da177e4
LT
3664}
3665
3666void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3667{
c6d14c84
ED
3668 struct net_device *dev = (v == SEQ_START_TOKEN) ?
3669 first_net_device(seq_file_net(seq)) :
3670 next_net_device((struct net_device *)v);
3671
1da177e4 3672 ++*pos;
c6d14c84 3673 return rcu_dereference(dev);
1da177e4
LT
3674}
3675
3676void dev_seq_stop(struct seq_file *seq, void *v)
c6d14c84 3677 __releases(RCU)
1da177e4 3678{
c6d14c84 3679 rcu_read_unlock();
1da177e4
LT
3680}
3681
3682static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
3683{
eeda3fd6 3684 const struct net_device_stats *stats = dev_get_stats(dev);
1da177e4 3685
2d13bafe 3686 seq_printf(seq, "%6s: %7lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
5a1b5898
RR
3687 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
3688 dev->name, stats->rx_bytes, stats->rx_packets,
3689 stats->rx_errors,
3690 stats->rx_dropped + stats->rx_missed_errors,
3691 stats->rx_fifo_errors,
3692 stats->rx_length_errors + stats->rx_over_errors +
3693 stats->rx_crc_errors + stats->rx_frame_errors,
3694 stats->rx_compressed, stats->multicast,
3695 stats->tx_bytes, stats->tx_packets,
3696 stats->tx_errors, stats->tx_dropped,
3697 stats->tx_fifo_errors, stats->collisions,
3698 stats->tx_carrier_errors +
3699 stats->tx_aborted_errors +
3700 stats->tx_window_errors +
3701 stats->tx_heartbeat_errors,
3702 stats->tx_compressed);
1da177e4
LT
3703}
3704
3705/*
3706 * Called from the PROCfs module. This now uses the new arbitrary sized
3707 * /proc/net interface to create /proc/net/dev
3708 */
3709static int dev_seq_show(struct seq_file *seq, void *v)
3710{
3711 if (v == SEQ_START_TOKEN)
3712 seq_puts(seq, "Inter-| Receive "
3713 " | Transmit\n"
3714 " face |bytes packets errs drop fifo frame "
3715 "compressed multicast|bytes packets errs "
3716 "drop fifo colls carrier compressed\n");
3717 else
3718 dev_seq_printf_stats(seq, v);
3719 return 0;
3720}
3721
dee42870 3722static struct softnet_data *softnet_get_online(loff_t *pos)
1da177e4 3723{
dee42870 3724 struct softnet_data *sd = NULL;
1da177e4 3725
0c0b0aca 3726 while (*pos < nr_cpu_ids)
4ec93edb 3727 if (cpu_online(*pos)) {
dee42870 3728 sd = &per_cpu(softnet_data, *pos);
1da177e4
LT
3729 break;
3730 } else
3731 ++*pos;
dee42870 3732 return sd;
1da177e4
LT
3733}
3734
3735static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
3736{
3737 return softnet_get_online(pos);
3738}
3739
3740static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3741{
3742 ++*pos;
3743 return softnet_get_online(pos);
3744}
3745
3746static void softnet_seq_stop(struct seq_file *seq, void *v)
3747{
3748}
3749
3750static int softnet_seq_show(struct seq_file *seq, void *v)
3751{
dee42870 3752 struct softnet_data *sd = v;
1da177e4 3753
0a9627f2 3754 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
dee42870 3755 sd->processed, sd->dropped, sd->time_squeeze, 0,
c1ebcdb8 3756 0, 0, 0, 0, /* was fastroute */
dee42870 3757 sd->cpu_collision, sd->received_rps);
1da177e4
LT
3758 return 0;
3759}
3760
f690808e 3761static const struct seq_operations dev_seq_ops = {
1da177e4
LT
3762 .start = dev_seq_start,
3763 .next = dev_seq_next,
3764 .stop = dev_seq_stop,
3765 .show = dev_seq_show,
3766};
3767
3768static int dev_seq_open(struct inode *inode, struct file *file)
3769{
e372c414
DL
3770 return seq_open_net(inode, file, &dev_seq_ops,
3771 sizeof(struct seq_net_private));
1da177e4
LT
3772}
3773
9a32144e 3774static const struct file_operations dev_seq_fops = {
1da177e4
LT
3775 .owner = THIS_MODULE,
3776 .open = dev_seq_open,
3777 .read = seq_read,
3778 .llseek = seq_lseek,
e372c414 3779 .release = seq_release_net,
1da177e4
LT
3780};
3781
f690808e 3782static const struct seq_operations softnet_seq_ops = {
1da177e4
LT
3783 .start = softnet_seq_start,
3784 .next = softnet_seq_next,
3785 .stop = softnet_seq_stop,
3786 .show = softnet_seq_show,
3787};
3788
3789static int softnet_seq_open(struct inode *inode, struct file *file)
3790{
3791 return seq_open(file, &softnet_seq_ops);
3792}
3793
9a32144e 3794static const struct file_operations softnet_seq_fops = {
1da177e4
LT
3795 .owner = THIS_MODULE,
3796 .open = softnet_seq_open,
3797 .read = seq_read,
3798 .llseek = seq_lseek,
3799 .release = seq_release,
3800};
3801
0e1256ff
SH
3802static void *ptype_get_idx(loff_t pos)
3803{
3804 struct packet_type *pt = NULL;
3805 loff_t i = 0;
3806 int t;
3807
3808 list_for_each_entry_rcu(pt, &ptype_all, list) {
3809 if (i == pos)
3810 return pt;
3811 ++i;
3812 }
3813
82d8a867 3814 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
0e1256ff
SH
3815 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
3816 if (i == pos)
3817 return pt;
3818 ++i;
3819 }
3820 }
3821 return NULL;
3822}
3823
3824static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
72348a42 3825 __acquires(RCU)
0e1256ff
SH
3826{
3827 rcu_read_lock();
3828 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
3829}
3830
3831static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3832{
3833 struct packet_type *pt;
3834 struct list_head *nxt;
3835 int hash;
3836
3837 ++*pos;
3838 if (v == SEQ_START_TOKEN)
3839 return ptype_get_idx(0);
3840
3841 pt = v;
3842 nxt = pt->list.next;
3843 if (pt->type == htons(ETH_P_ALL)) {
3844 if (nxt != &ptype_all)
3845 goto found;
3846 hash = 0;
3847 nxt = ptype_base[0].next;
3848 } else
82d8a867 3849 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
0e1256ff
SH
3850
3851 while (nxt == &ptype_base[hash]) {
82d8a867 3852 if (++hash >= PTYPE_HASH_SIZE)
0e1256ff
SH
3853 return NULL;
3854 nxt = ptype_base[hash].next;
3855 }
3856found:
3857 return list_entry(nxt, struct packet_type, list);
3858}
3859
3860static void ptype_seq_stop(struct seq_file *seq, void *v)
72348a42 3861 __releases(RCU)
0e1256ff
SH
3862{
3863 rcu_read_unlock();
3864}
3865
0e1256ff
SH
3866static int ptype_seq_show(struct seq_file *seq, void *v)
3867{
3868 struct packet_type *pt = v;
3869
3870 if (v == SEQ_START_TOKEN)
3871 seq_puts(seq, "Type Device Function\n");
c346dca1 3872 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
0e1256ff
SH
3873 if (pt->type == htons(ETH_P_ALL))
3874 seq_puts(seq, "ALL ");
3875 else
3876 seq_printf(seq, "%04x", ntohs(pt->type));
3877
908cd2da
AD
3878 seq_printf(seq, " %-8s %pF\n",
3879 pt->dev ? pt->dev->name : "", pt->func);
0e1256ff
SH
3880 }
3881
3882 return 0;
3883}
3884
3885static const struct seq_operations ptype_seq_ops = {
3886 .start = ptype_seq_start,
3887 .next = ptype_seq_next,
3888 .stop = ptype_seq_stop,
3889 .show = ptype_seq_show,
3890};
3891
3892static int ptype_seq_open(struct inode *inode, struct file *file)
3893{
2feb27db
PE
3894 return seq_open_net(inode, file, &ptype_seq_ops,
3895 sizeof(struct seq_net_private));
0e1256ff
SH
3896}
3897
3898static const struct file_operations ptype_seq_fops = {
3899 .owner = THIS_MODULE,
3900 .open = ptype_seq_open,
3901 .read = seq_read,
3902 .llseek = seq_lseek,
2feb27db 3903 .release = seq_release_net,
0e1256ff
SH
3904};
3905
3906
4665079c 3907static int __net_init dev_proc_net_init(struct net *net)
1da177e4
LT
3908{
3909 int rc = -ENOMEM;
3910
881d966b 3911 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
1da177e4 3912 goto out;
881d966b 3913 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
1da177e4 3914 goto out_dev;
881d966b 3915 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
457c4cbc 3916 goto out_softnet;
0e1256ff 3917
881d966b 3918 if (wext_proc_init(net))
457c4cbc 3919 goto out_ptype;
1da177e4
LT
3920 rc = 0;
3921out:
3922 return rc;
457c4cbc 3923out_ptype:
881d966b 3924 proc_net_remove(net, "ptype");
1da177e4 3925out_softnet:
881d966b 3926 proc_net_remove(net, "softnet_stat");
1da177e4 3927out_dev:
881d966b 3928 proc_net_remove(net, "dev");
1da177e4
LT
3929 goto out;
3930}
881d966b 3931
4665079c 3932static void __net_exit dev_proc_net_exit(struct net *net)
881d966b
EB
3933{
3934 wext_proc_exit(net);
3935
3936 proc_net_remove(net, "ptype");
3937 proc_net_remove(net, "softnet_stat");
3938 proc_net_remove(net, "dev");
3939}
3940
022cbae6 3941static struct pernet_operations __net_initdata dev_proc_ops = {
881d966b
EB
3942 .init = dev_proc_net_init,
3943 .exit = dev_proc_net_exit,
3944};
3945
3946static int __init dev_proc_init(void)
3947{
3948 return register_pernet_subsys(&dev_proc_ops);
3949}
1da177e4
LT
3950#else
3951#define dev_proc_init() 0
3952#endif /* CONFIG_PROC_FS */
3953
3954
3955/**
3956 * netdev_set_master - set up master/slave pair
3957 * @slave: slave device
3958 * @master: new master device
3959 *
3960 * Changes the master device of the slave. Pass %NULL to break the
3961 * bonding. The caller must hold the RTNL semaphore. On a failure
3962 * a negative errno code is returned. On success the reference counts
3963 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
3964 * function returns zero.
3965 */
3966int netdev_set_master(struct net_device *slave, struct net_device *master)
3967{
3968 struct net_device *old = slave->master;
3969
3970 ASSERT_RTNL();
3971
3972 if (master) {
3973 if (old)
3974 return -EBUSY;
3975 dev_hold(master);
3976 }
3977
3978 slave->master = master;
4ec93edb 3979
283f2fe8
ED
3980 if (old) {
3981 synchronize_net();
1da177e4 3982 dev_put(old);
283f2fe8 3983 }
1da177e4
LT
3984 if (master)
3985 slave->flags |= IFF_SLAVE;
3986 else
3987 slave->flags &= ~IFF_SLAVE;
3988
3989 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
3990 return 0;
3991}
d1b19dff 3992EXPORT_SYMBOL(netdev_set_master);
1da177e4 3993
b6c40d68
PM
3994static void dev_change_rx_flags(struct net_device *dev, int flags)
3995{
d314774c
SH
3996 const struct net_device_ops *ops = dev->netdev_ops;
3997
3998 if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
3999 ops->ndo_change_rx_flags(dev, flags);
b6c40d68
PM
4000}
4001
dad9b335 4002static int __dev_set_promiscuity(struct net_device *dev, int inc)
1da177e4
LT
4003{
4004 unsigned short old_flags = dev->flags;
8192b0c4
DH
4005 uid_t uid;
4006 gid_t gid;
1da177e4 4007
24023451
PM
4008 ASSERT_RTNL();
4009
dad9b335
WC
4010 dev->flags |= IFF_PROMISC;
4011 dev->promiscuity += inc;
4012 if (dev->promiscuity == 0) {
4013 /*
4014 * Avoid overflow.
4015 * If inc causes overflow, untouch promisc and return error.
4016 */
4017 if (inc < 0)
4018 dev->flags &= ~IFF_PROMISC;
4019 else {
4020 dev->promiscuity -= inc;
4021 printk(KERN_WARNING "%s: promiscuity touches roof, "
4022 "set promiscuity failed, promiscuity feature "
4023 "of device might be broken.\n", dev->name);
4024 return -EOVERFLOW;
4025 }
4026 }
52609c0b 4027 if (dev->flags != old_flags) {
1da177e4
LT
4028 printk(KERN_INFO "device %s %s promiscuous mode\n",
4029 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
4ec93edb 4030 "left");
8192b0c4
DH
4031 if (audit_enabled) {
4032 current_uid_gid(&uid, &gid);
7759db82
KHK
4033 audit_log(current->audit_context, GFP_ATOMIC,
4034 AUDIT_ANOM_PROMISCUOUS,
4035 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
4036 dev->name, (dev->flags & IFF_PROMISC),
4037 (old_flags & IFF_PROMISC),
4038 audit_get_loginuid(current),
8192b0c4 4039 uid, gid,
7759db82 4040 audit_get_sessionid(current));
8192b0c4 4041 }
24023451 4042
b6c40d68 4043 dev_change_rx_flags(dev, IFF_PROMISC);
1da177e4 4044 }
dad9b335 4045 return 0;
1da177e4
LT
4046}
4047
4417da66
PM
4048/**
4049 * dev_set_promiscuity - update promiscuity count on a device
4050 * @dev: device
4051 * @inc: modifier
4052 *
4053 * Add or remove promiscuity from a device. While the count in the device
4054 * remains above zero the interface remains promiscuous. Once it hits zero
4055 * the device reverts back to normal filtering operation. A negative inc
4056 * value is used to drop promiscuity on the device.
dad9b335 4057 * Return 0 if successful or a negative errno code on error.
4417da66 4058 */
dad9b335 4059int dev_set_promiscuity(struct net_device *dev, int inc)
4417da66
PM
4060{
4061 unsigned short old_flags = dev->flags;
dad9b335 4062 int err;
4417da66 4063
dad9b335 4064 err = __dev_set_promiscuity(dev, inc);
4b5a698e 4065 if (err < 0)
dad9b335 4066 return err;
4417da66
PM
4067 if (dev->flags != old_flags)
4068 dev_set_rx_mode(dev);
dad9b335 4069 return err;
4417da66 4070}
d1b19dff 4071EXPORT_SYMBOL(dev_set_promiscuity);
4417da66 4072
1da177e4
LT
4073/**
4074 * dev_set_allmulti - update allmulti count on a device
4075 * @dev: device
4076 * @inc: modifier
4077 *
4078 * Add or remove reception of all multicast frames to a device. While the
4079 * count in the device remains above zero the interface remains listening
4080 * to all interfaces. Once it hits zero the device reverts back to normal
4081 * filtering operation. A negative @inc value is used to drop the counter
4082 * when releasing a resource needing all multicasts.
dad9b335 4083 * Return 0 if successful or a negative errno code on error.
1da177e4
LT
4084 */
4085
dad9b335 4086int dev_set_allmulti(struct net_device *dev, int inc)
1da177e4
LT
4087{
4088 unsigned short old_flags = dev->flags;
4089
24023451
PM
4090 ASSERT_RTNL();
4091
1da177e4 4092 dev->flags |= IFF_ALLMULTI;
dad9b335
WC
4093 dev->allmulti += inc;
4094 if (dev->allmulti == 0) {
4095 /*
4096 * Avoid overflow.
4097 * If inc causes overflow, untouch allmulti and return error.
4098 */
4099 if (inc < 0)
4100 dev->flags &= ~IFF_ALLMULTI;
4101 else {
4102 dev->allmulti -= inc;
4103 printk(KERN_WARNING "%s: allmulti touches roof, "
4104 "set allmulti failed, allmulti feature of "
4105 "device might be broken.\n", dev->name);
4106 return -EOVERFLOW;
4107 }
4108 }
24023451 4109 if (dev->flags ^ old_flags) {
b6c40d68 4110 dev_change_rx_flags(dev, IFF_ALLMULTI);
4417da66 4111 dev_set_rx_mode(dev);
24023451 4112 }
dad9b335 4113 return 0;
4417da66 4114}
d1b19dff 4115EXPORT_SYMBOL(dev_set_allmulti);
4417da66
PM
4116
4117/*
4118 * Upload unicast and multicast address lists to device and
4119 * configure RX filtering. When the device doesn't support unicast
53ccaae1 4120 * filtering it is put in promiscuous mode while unicast addresses
4417da66
PM
4121 * are present.
4122 */
4123void __dev_set_rx_mode(struct net_device *dev)
4124{
d314774c
SH
4125 const struct net_device_ops *ops = dev->netdev_ops;
4126
4417da66
PM
4127 /* dev_open will call this function so the list will stay sane. */
4128 if (!(dev->flags&IFF_UP))
4129 return;
4130
4131 if (!netif_device_present(dev))
40b77c94 4132 return;
4417da66 4133
d314774c
SH
4134 if (ops->ndo_set_rx_mode)
4135 ops->ndo_set_rx_mode(dev);
4417da66
PM
4136 else {
4137 /* Unicast addresses changes may only happen under the rtnl,
4138 * therefore calling __dev_set_promiscuity here is safe.
4139 */
32e7bfc4 4140 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
4417da66
PM
4141 __dev_set_promiscuity(dev, 1);
4142 dev->uc_promisc = 1;
32e7bfc4 4143 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
4417da66
PM
4144 __dev_set_promiscuity(dev, -1);
4145 dev->uc_promisc = 0;
4146 }
4147
d314774c
SH
4148 if (ops->ndo_set_multicast_list)
4149 ops->ndo_set_multicast_list(dev);
4417da66
PM
4150 }
4151}
4152
4153void dev_set_rx_mode(struct net_device *dev)
4154{
b9e40857 4155 netif_addr_lock_bh(dev);
4417da66 4156 __dev_set_rx_mode(dev);
b9e40857 4157 netif_addr_unlock_bh(dev);
1da177e4
LT
4158}
4159
f0db275a
SH
4160/**
4161 * dev_get_flags - get flags reported to userspace
4162 * @dev: device
4163 *
4164 * Get the combination of flag bits exported through APIs to userspace.
4165 */
1da177e4
LT
4166unsigned dev_get_flags(const struct net_device *dev)
4167{
4168 unsigned flags;
4169
4170 flags = (dev->flags & ~(IFF_PROMISC |
4171 IFF_ALLMULTI |
b00055aa
SR
4172 IFF_RUNNING |
4173 IFF_LOWER_UP |
4174 IFF_DORMANT)) |
1da177e4
LT
4175 (dev->gflags & (IFF_PROMISC |
4176 IFF_ALLMULTI));
4177
b00055aa
SR
4178 if (netif_running(dev)) {
4179 if (netif_oper_up(dev))
4180 flags |= IFF_RUNNING;
4181 if (netif_carrier_ok(dev))
4182 flags |= IFF_LOWER_UP;
4183 if (netif_dormant(dev))
4184 flags |= IFF_DORMANT;
4185 }
1da177e4
LT
4186
4187 return flags;
4188}
d1b19dff 4189EXPORT_SYMBOL(dev_get_flags);
1da177e4 4190
bd380811 4191int __dev_change_flags(struct net_device *dev, unsigned int flags)
1da177e4 4192{
1da177e4 4193 int old_flags = dev->flags;
bd380811 4194 int ret;
1da177e4 4195
24023451
PM
4196 ASSERT_RTNL();
4197
1da177e4
LT
4198 /*
4199 * Set the flags on our device.
4200 */
4201
4202 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
4203 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
4204 IFF_AUTOMEDIA)) |
4205 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
4206 IFF_ALLMULTI));
4207
4208 /*
4209 * Load in the correct multicast list now the flags have changed.
4210 */
4211
b6c40d68
PM
4212 if ((old_flags ^ flags) & IFF_MULTICAST)
4213 dev_change_rx_flags(dev, IFF_MULTICAST);
24023451 4214
4417da66 4215 dev_set_rx_mode(dev);
1da177e4
LT
4216
4217 /*
4218 * Have we downed the interface. We handle IFF_UP ourselves
4219 * according to user attempts to set it, rather than blindly
4220 * setting it.
4221 */
4222
4223 ret = 0;
4224 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
bd380811 4225 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
1da177e4
LT
4226
4227 if (!ret)
4417da66 4228 dev_set_rx_mode(dev);
1da177e4
LT
4229 }
4230
1da177e4 4231 if ((flags ^ dev->gflags) & IFF_PROMISC) {
d1b19dff
ED
4232 int inc = (flags & IFF_PROMISC) ? 1 : -1;
4233
1da177e4
LT
4234 dev->gflags ^= IFF_PROMISC;
4235 dev_set_promiscuity(dev, inc);
4236 }
4237
4238 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
4239 is important. Some (broken) drivers set IFF_PROMISC, when
4240 IFF_ALLMULTI is requested not asking us and not reporting.
4241 */
4242 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
d1b19dff
ED
4243 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
4244
1da177e4
LT
4245 dev->gflags ^= IFF_ALLMULTI;
4246 dev_set_allmulti(dev, inc);
4247 }
4248
bd380811
PM
4249 return ret;
4250}
4251
4252void __dev_notify_flags(struct net_device *dev, unsigned int old_flags)
4253{
4254 unsigned int changes = dev->flags ^ old_flags;
4255
4256 if (changes & IFF_UP) {
4257 if (dev->flags & IFF_UP)
4258 call_netdevice_notifiers(NETDEV_UP, dev);
4259 else
4260 call_netdevice_notifiers(NETDEV_DOWN, dev);
4261 }
4262
4263 if (dev->flags & IFF_UP &&
4264 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE)))
4265 call_netdevice_notifiers(NETDEV_CHANGE, dev);
4266}
4267
4268/**
4269 * dev_change_flags - change device settings
4270 * @dev: device
4271 * @flags: device state flags
4272 *
4273 * Change settings on device based state flags. The flags are
4274 * in the userspace exported format.
4275 */
4276int dev_change_flags(struct net_device *dev, unsigned flags)
4277{
4278 int ret, changes;
4279 int old_flags = dev->flags;
4280
4281 ret = __dev_change_flags(dev, flags);
4282 if (ret < 0)
4283 return ret;
4284
4285 changes = old_flags ^ dev->flags;
7c355f53
TG
4286 if (changes)
4287 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
1da177e4 4288
bd380811 4289 __dev_notify_flags(dev, old_flags);
1da177e4
LT
4290 return ret;
4291}
d1b19dff 4292EXPORT_SYMBOL(dev_change_flags);
1da177e4 4293
f0db275a
SH
4294/**
4295 * dev_set_mtu - Change maximum transfer unit
4296 * @dev: device
4297 * @new_mtu: new transfer unit
4298 *
4299 * Change the maximum transfer size of the network device.
4300 */
1da177e4
LT
4301int dev_set_mtu(struct net_device *dev, int new_mtu)
4302{
d314774c 4303 const struct net_device_ops *ops = dev->netdev_ops;
1da177e4
LT
4304 int err;
4305
4306 if (new_mtu == dev->mtu)
4307 return 0;
4308
4309 /* MTU must be positive. */
4310 if (new_mtu < 0)
4311 return -EINVAL;
4312
4313 if (!netif_device_present(dev))
4314 return -ENODEV;
4315
4316 err = 0;
d314774c
SH
4317 if (ops->ndo_change_mtu)
4318 err = ops->ndo_change_mtu(dev, new_mtu);
1da177e4
LT
4319 else
4320 dev->mtu = new_mtu;
d314774c 4321
1da177e4 4322 if (!err && dev->flags & IFF_UP)
056925ab 4323 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
1da177e4
LT
4324 return err;
4325}
d1b19dff 4326EXPORT_SYMBOL(dev_set_mtu);
1da177e4 4327
f0db275a
SH
4328/**
4329 * dev_set_mac_address - Change Media Access Control Address
4330 * @dev: device
4331 * @sa: new address
4332 *
4333 * Change the hardware (MAC) address of the device
4334 */
1da177e4
LT
4335int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
4336{
d314774c 4337 const struct net_device_ops *ops = dev->netdev_ops;
1da177e4
LT
4338 int err;
4339
d314774c 4340 if (!ops->ndo_set_mac_address)
1da177e4
LT
4341 return -EOPNOTSUPP;
4342 if (sa->sa_family != dev->type)
4343 return -EINVAL;
4344 if (!netif_device_present(dev))
4345 return -ENODEV;
d314774c 4346 err = ops->ndo_set_mac_address(dev, sa);
1da177e4 4347 if (!err)
056925ab 4348 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
1da177e4
LT
4349 return err;
4350}
d1b19dff 4351EXPORT_SYMBOL(dev_set_mac_address);
1da177e4
LT
4352
4353/*
3710becf 4354 * Perform the SIOCxIFxxx calls, inside rcu_read_lock()
1da177e4 4355 */
14e3e079 4356static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
1da177e4
LT
4357{
4358 int err;
3710becf 4359 struct net_device *dev = dev_get_by_name_rcu(net, ifr->ifr_name);
1da177e4
LT
4360
4361 if (!dev)
4362 return -ENODEV;
4363
4364 switch (cmd) {
d1b19dff
ED
4365 case SIOCGIFFLAGS: /* Get interface flags */
4366 ifr->ifr_flags = (short) dev_get_flags(dev);
4367 return 0;
1da177e4 4368
d1b19dff
ED
4369 case SIOCGIFMETRIC: /* Get the metric on the interface
4370 (currently unused) */
4371 ifr->ifr_metric = 0;
4372 return 0;
1da177e4 4373
d1b19dff
ED
4374 case SIOCGIFMTU: /* Get the MTU of a device */
4375 ifr->ifr_mtu = dev->mtu;
4376 return 0;
1da177e4 4377
d1b19dff
ED
4378 case SIOCGIFHWADDR:
4379 if (!dev->addr_len)
4380 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
4381 else
4382 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
4383 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4384 ifr->ifr_hwaddr.sa_family = dev->type;
4385 return 0;
1da177e4 4386
d1b19dff
ED
4387 case SIOCGIFSLAVE:
4388 err = -EINVAL;
4389 break;
14e3e079 4390
d1b19dff
ED
4391 case SIOCGIFMAP:
4392 ifr->ifr_map.mem_start = dev->mem_start;
4393 ifr->ifr_map.mem_end = dev->mem_end;
4394 ifr->ifr_map.base_addr = dev->base_addr;
4395 ifr->ifr_map.irq = dev->irq;
4396 ifr->ifr_map.dma = dev->dma;
4397 ifr->ifr_map.port = dev->if_port;
4398 return 0;
14e3e079 4399
d1b19dff
ED
4400 case SIOCGIFINDEX:
4401 ifr->ifr_ifindex = dev->ifindex;
4402 return 0;
14e3e079 4403
d1b19dff
ED
4404 case SIOCGIFTXQLEN:
4405 ifr->ifr_qlen = dev->tx_queue_len;
4406 return 0;
14e3e079 4407
d1b19dff
ED
4408 default:
4409 /* dev_ioctl() should ensure this case
4410 * is never reached
4411 */
4412 WARN_ON(1);
4413 err = -EINVAL;
4414 break;
14e3e079
JG
4415
4416 }
4417 return err;
4418}
4419
4420/*
4421 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
4422 */
4423static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
4424{
4425 int err;
4426 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
5f2f6da7 4427 const struct net_device_ops *ops;
14e3e079
JG
4428
4429 if (!dev)
4430 return -ENODEV;
4431
5f2f6da7
JP
4432 ops = dev->netdev_ops;
4433
14e3e079 4434 switch (cmd) {
d1b19dff
ED
4435 case SIOCSIFFLAGS: /* Set interface flags */
4436 return dev_change_flags(dev, ifr->ifr_flags);
14e3e079 4437
d1b19dff
ED
4438 case SIOCSIFMETRIC: /* Set the metric on the interface
4439 (currently unused) */
4440 return -EOPNOTSUPP;
14e3e079 4441
d1b19dff
ED
4442 case SIOCSIFMTU: /* Set the MTU of a device */
4443 return dev_set_mtu(dev, ifr->ifr_mtu);
1da177e4 4444
d1b19dff
ED
4445 case SIOCSIFHWADDR:
4446 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
1da177e4 4447
d1b19dff
ED
4448 case SIOCSIFHWBROADCAST:
4449 if (ifr->ifr_hwaddr.sa_family != dev->type)
4450 return -EINVAL;
4451 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
4452 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4453 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4454 return 0;
1da177e4 4455
d1b19dff
ED
4456 case SIOCSIFMAP:
4457 if (ops->ndo_set_config) {
1da177e4
LT
4458 if (!netif_device_present(dev))
4459 return -ENODEV;
d1b19dff
ED
4460 return ops->ndo_set_config(dev, &ifr->ifr_map);
4461 }
4462 return -EOPNOTSUPP;
1da177e4 4463
d1b19dff
ED
4464 case SIOCADDMULTI:
4465 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
4466 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4467 return -EINVAL;
4468 if (!netif_device_present(dev))
4469 return -ENODEV;
22bedad3 4470 return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data);
d1b19dff
ED
4471
4472 case SIOCDELMULTI:
4473 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
4474 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4475 return -EINVAL;
4476 if (!netif_device_present(dev))
4477 return -ENODEV;
22bedad3 4478 return dev_mc_del_global(dev, ifr->ifr_hwaddr.sa_data);
1da177e4 4479
d1b19dff
ED
4480 case SIOCSIFTXQLEN:
4481 if (ifr->ifr_qlen < 0)
4482 return -EINVAL;
4483 dev->tx_queue_len = ifr->ifr_qlen;
4484 return 0;
1da177e4 4485
d1b19dff
ED
4486 case SIOCSIFNAME:
4487 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
4488 return dev_change_name(dev, ifr->ifr_newname);
1da177e4 4489
d1b19dff
ED
4490 /*
4491 * Unknown or private ioctl
4492 */
4493 default:
4494 if ((cmd >= SIOCDEVPRIVATE &&
4495 cmd <= SIOCDEVPRIVATE + 15) ||
4496 cmd == SIOCBONDENSLAVE ||
4497 cmd == SIOCBONDRELEASE ||
4498 cmd == SIOCBONDSETHWADDR ||
4499 cmd == SIOCBONDSLAVEINFOQUERY ||
4500 cmd == SIOCBONDINFOQUERY ||
4501 cmd == SIOCBONDCHANGEACTIVE ||
4502 cmd == SIOCGMIIPHY ||
4503 cmd == SIOCGMIIREG ||
4504 cmd == SIOCSMIIREG ||
4505 cmd == SIOCBRADDIF ||
4506 cmd == SIOCBRDELIF ||
4507 cmd == SIOCSHWTSTAMP ||
4508 cmd == SIOCWANDEV) {
4509 err = -EOPNOTSUPP;
4510 if (ops->ndo_do_ioctl) {
4511 if (netif_device_present(dev))
4512 err = ops->ndo_do_ioctl(dev, ifr, cmd);
4513 else
4514 err = -ENODEV;
4515 }
4516 } else
4517 err = -EINVAL;
1da177e4
LT
4518
4519 }
4520 return err;
4521}
4522
4523/*
4524 * This function handles all "interface"-type I/O control requests. The actual
4525 * 'doing' part of this is dev_ifsioc above.
4526 */
4527
4528/**
4529 * dev_ioctl - network device ioctl
c4ea43c5 4530 * @net: the applicable net namespace
1da177e4
LT
4531 * @cmd: command to issue
4532 * @arg: pointer to a struct ifreq in user space
4533 *
4534 * Issue ioctl functions to devices. This is normally called by the
4535 * user space syscall interfaces but can sometimes be useful for
4536 * other purposes. The return value is the return from the syscall if
4537 * positive or a negative errno code on error.
4538 */
4539
881d966b 4540int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
1da177e4
LT
4541{
4542 struct ifreq ifr;
4543 int ret;
4544 char *colon;
4545
4546 /* One special case: SIOCGIFCONF takes ifconf argument
4547 and requires shared lock, because it sleeps writing
4548 to user space.
4549 */
4550
4551 if (cmd == SIOCGIFCONF) {
6756ae4b 4552 rtnl_lock();
881d966b 4553 ret = dev_ifconf(net, (char __user *) arg);
6756ae4b 4554 rtnl_unlock();
1da177e4
LT
4555 return ret;
4556 }
4557 if (cmd == SIOCGIFNAME)
881d966b 4558 return dev_ifname(net, (struct ifreq __user *)arg);
1da177e4
LT
4559
4560 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
4561 return -EFAULT;
4562
4563 ifr.ifr_name[IFNAMSIZ-1] = 0;
4564
4565 colon = strchr(ifr.ifr_name, ':');
4566 if (colon)
4567 *colon = 0;
4568
4569 /*
4570 * See which interface the caller is talking about.
4571 */
4572
4573 switch (cmd) {
d1b19dff
ED
4574 /*
4575 * These ioctl calls:
4576 * - can be done by all.
4577 * - atomic and do not require locking.
4578 * - return a value
4579 */
4580 case SIOCGIFFLAGS:
4581 case SIOCGIFMETRIC:
4582 case SIOCGIFMTU:
4583 case SIOCGIFHWADDR:
4584 case SIOCGIFSLAVE:
4585 case SIOCGIFMAP:
4586 case SIOCGIFINDEX:
4587 case SIOCGIFTXQLEN:
4588 dev_load(net, ifr.ifr_name);
3710becf 4589 rcu_read_lock();
d1b19dff 4590 ret = dev_ifsioc_locked(net, &ifr, cmd);
3710becf 4591 rcu_read_unlock();
d1b19dff
ED
4592 if (!ret) {
4593 if (colon)
4594 *colon = ':';
4595 if (copy_to_user(arg, &ifr,
4596 sizeof(struct ifreq)))
4597 ret = -EFAULT;
4598 }
4599 return ret;
1da177e4 4600
d1b19dff
ED
4601 case SIOCETHTOOL:
4602 dev_load(net, ifr.ifr_name);
4603 rtnl_lock();
4604 ret = dev_ethtool(net, &ifr);
4605 rtnl_unlock();
4606 if (!ret) {
4607 if (colon)
4608 *colon = ':';
4609 if (copy_to_user(arg, &ifr,
4610 sizeof(struct ifreq)))
4611 ret = -EFAULT;
4612 }
4613 return ret;
1da177e4 4614
d1b19dff
ED
4615 /*
4616 * These ioctl calls:
4617 * - require superuser power.
4618 * - require strict serialization.
4619 * - return a value
4620 */
4621 case SIOCGMIIPHY:
4622 case SIOCGMIIREG:
4623 case SIOCSIFNAME:
4624 if (!capable(CAP_NET_ADMIN))
4625 return -EPERM;
4626 dev_load(net, ifr.ifr_name);
4627 rtnl_lock();
4628 ret = dev_ifsioc(net, &ifr, cmd);
4629 rtnl_unlock();
4630 if (!ret) {
4631 if (colon)
4632 *colon = ':';
4633 if (copy_to_user(arg, &ifr,
4634 sizeof(struct ifreq)))
4635 ret = -EFAULT;
4636 }
4637 return ret;
1da177e4 4638
d1b19dff
ED
4639 /*
4640 * These ioctl calls:
4641 * - require superuser power.
4642 * - require strict serialization.
4643 * - do not return a value
4644 */
4645 case SIOCSIFFLAGS:
4646 case SIOCSIFMETRIC:
4647 case SIOCSIFMTU:
4648 case SIOCSIFMAP:
4649 case SIOCSIFHWADDR:
4650 case SIOCSIFSLAVE:
4651 case SIOCADDMULTI:
4652 case SIOCDELMULTI:
4653 case SIOCSIFHWBROADCAST:
4654 case SIOCSIFTXQLEN:
4655 case SIOCSMIIREG:
4656 case SIOCBONDENSLAVE:
4657 case SIOCBONDRELEASE:
4658 case SIOCBONDSETHWADDR:
4659 case SIOCBONDCHANGEACTIVE:
4660 case SIOCBRADDIF:
4661 case SIOCBRDELIF:
4662 case SIOCSHWTSTAMP:
4663 if (!capable(CAP_NET_ADMIN))
4664 return -EPERM;
4665 /* fall through */
4666 case SIOCBONDSLAVEINFOQUERY:
4667 case SIOCBONDINFOQUERY:
4668 dev_load(net, ifr.ifr_name);
4669 rtnl_lock();
4670 ret = dev_ifsioc(net, &ifr, cmd);
4671 rtnl_unlock();
4672 return ret;
4673
4674 case SIOCGIFMEM:
4675 /* Get the per device memory space. We can add this but
4676 * currently do not support it */
4677 case SIOCSIFMEM:
4678 /* Set the per device memory buffer space.
4679 * Not applicable in our case */
4680 case SIOCSIFLINK:
4681 return -EINVAL;
4682
4683 /*
4684 * Unknown or private ioctl.
4685 */
4686 default:
4687 if (cmd == SIOCWANDEV ||
4688 (cmd >= SIOCDEVPRIVATE &&
4689 cmd <= SIOCDEVPRIVATE + 15)) {
881d966b 4690 dev_load(net, ifr.ifr_name);
1da177e4 4691 rtnl_lock();
881d966b 4692 ret = dev_ifsioc(net, &ifr, cmd);
1da177e4 4693 rtnl_unlock();
d1b19dff
ED
4694 if (!ret && copy_to_user(arg, &ifr,
4695 sizeof(struct ifreq)))
4696 ret = -EFAULT;
1da177e4 4697 return ret;
d1b19dff
ED
4698 }
4699 /* Take care of Wireless Extensions */
4700 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
4701 return wext_handle_ioctl(net, &ifr, cmd, arg);
4702 return -EINVAL;
1da177e4
LT
4703 }
4704}
4705
4706
4707/**
4708 * dev_new_index - allocate an ifindex
c4ea43c5 4709 * @net: the applicable net namespace
1da177e4
LT
4710 *
4711 * Returns a suitable unique value for a new device interface
4712 * number. The caller must hold the rtnl semaphore or the
4713 * dev_base_lock to be sure it remains unique.
4714 */
881d966b 4715static int dev_new_index(struct net *net)
1da177e4
LT
4716{
4717 static int ifindex;
4718 for (;;) {
4719 if (++ifindex <= 0)
4720 ifindex = 1;
881d966b 4721 if (!__dev_get_by_index(net, ifindex))
1da177e4
LT
4722 return ifindex;
4723 }
4724}
4725
1da177e4 4726/* Delayed registration/unregisteration */
3b5b34fd 4727static LIST_HEAD(net_todo_list);
1da177e4 4728
6f05f629 4729static void net_set_todo(struct net_device *dev)
1da177e4 4730{
1da177e4 4731 list_add_tail(&dev->todo_list, &net_todo_list);
1da177e4
LT
4732}
4733
9b5e383c 4734static void rollback_registered_many(struct list_head *head)
93ee31f1 4735{
e93737b0 4736 struct net_device *dev, *tmp;
9b5e383c 4737
93ee31f1
DL
4738 BUG_ON(dev_boot_phase);
4739 ASSERT_RTNL();
4740
e93737b0 4741 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
9b5e383c 4742 /* Some devices call without registering
e93737b0
KK
4743 * for initialization unwind. Remove those
4744 * devices and proceed with the remaining.
9b5e383c
ED
4745 */
4746 if (dev->reg_state == NETREG_UNINITIALIZED) {
4747 pr_debug("unregister_netdevice: device %s/%p never "
4748 "was registered\n", dev->name, dev);
93ee31f1 4749
9b5e383c 4750 WARN_ON(1);
e93737b0
KK
4751 list_del(&dev->unreg_list);
4752 continue;
9b5e383c 4753 }
93ee31f1 4754
9b5e383c 4755 BUG_ON(dev->reg_state != NETREG_REGISTERED);
93ee31f1 4756
9b5e383c
ED
4757 /* If device is running, close it first. */
4758 dev_close(dev);
93ee31f1 4759
9b5e383c
ED
4760 /* And unlink it from device chain. */
4761 unlist_netdevice(dev);
93ee31f1 4762
9b5e383c
ED
4763 dev->reg_state = NETREG_UNREGISTERING;
4764 }
93ee31f1
DL
4765
4766 synchronize_net();
4767
9b5e383c
ED
4768 list_for_each_entry(dev, head, unreg_list) {
4769 /* Shutdown queueing discipline. */
4770 dev_shutdown(dev);
93ee31f1
DL
4771
4772
9b5e383c
ED
4773 /* Notify protocols, that we are about to destroy
4774 this device. They should clean all the things.
4775 */
4776 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
93ee31f1 4777
a2835763
PM
4778 if (!dev->rtnl_link_ops ||
4779 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
4780 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
4781
9b5e383c
ED
4782 /*
4783 * Flush the unicast and multicast chains
4784 */
a748ee24 4785 dev_uc_flush(dev);
22bedad3 4786 dev_mc_flush(dev);
93ee31f1 4787
9b5e383c
ED
4788 if (dev->netdev_ops->ndo_uninit)
4789 dev->netdev_ops->ndo_uninit(dev);
93ee31f1 4790
9b5e383c
ED
4791 /* Notifier chain MUST detach us from master device. */
4792 WARN_ON(dev->master);
93ee31f1 4793
9b5e383c
ED
4794 /* Remove entries from kobject tree */
4795 netdev_unregister_kobject(dev);
4796 }
93ee31f1 4797
a5ee1551 4798 /* Process any work delayed until the end of the batch */
e5e26d75 4799 dev = list_first_entry(head, struct net_device, unreg_list);
a5ee1551 4800 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
93ee31f1 4801
a5ee1551 4802 synchronize_net();
395264d5 4803
a5ee1551 4804 list_for_each_entry(dev, head, unreg_list)
9b5e383c
ED
4805 dev_put(dev);
4806}
4807
4808static void rollback_registered(struct net_device *dev)
4809{
4810 LIST_HEAD(single);
4811
4812 list_add(&dev->unreg_list, &single);
4813 rollback_registered_many(&single);
93ee31f1
DL
4814}
4815
e8a0464c
DM
4816static void __netdev_init_queue_locks_one(struct net_device *dev,
4817 struct netdev_queue *dev_queue,
4818 void *_unused)
c773e847
DM
4819{
4820 spin_lock_init(&dev_queue->_xmit_lock);
cf508b12 4821 netdev_set_xmit_lockdep_class(&dev_queue->_xmit_lock, dev->type);
c773e847
DM
4822 dev_queue->xmit_lock_owner = -1;
4823}
4824
4825static void netdev_init_queue_locks(struct net_device *dev)
4826{
e8a0464c
DM
4827 netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL);
4828 __netdev_init_queue_locks_one(dev, &dev->rx_queue, NULL);
c773e847
DM
4829}
4830
b63365a2
HX
4831unsigned long netdev_fix_features(unsigned long features, const char *name)
4832{
4833 /* Fix illegal SG+CSUM combinations. */
4834 if ((features & NETIF_F_SG) &&
4835 !(features & NETIF_F_ALL_CSUM)) {
4836 if (name)
4837 printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no "
4838 "checksum feature.\n", name);
4839 features &= ~NETIF_F_SG;
4840 }
4841
4842 /* TSO requires that SG is present as well. */
4843 if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) {
4844 if (name)
4845 printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no "
4846 "SG feature.\n", name);
4847 features &= ~NETIF_F_TSO;
4848 }
4849
4850 if (features & NETIF_F_UFO) {
4851 if (!(features & NETIF_F_GEN_CSUM)) {
4852 if (name)
4853 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4854 "since no NETIF_F_HW_CSUM feature.\n",
4855 name);
4856 features &= ~NETIF_F_UFO;
4857 }
4858
4859 if (!(features & NETIF_F_SG)) {
4860 if (name)
4861 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4862 "since no NETIF_F_SG feature.\n", name);
4863 features &= ~NETIF_F_UFO;
4864 }
4865 }
4866
4867 return features;
4868}
4869EXPORT_SYMBOL(netdev_fix_features);
4870
fc4a7489
PM
4871/**
4872 * netif_stacked_transfer_operstate - transfer operstate
4873 * @rootdev: the root or lower level device to transfer state from
4874 * @dev: the device to transfer operstate to
4875 *
4876 * Transfer operational state from root to device. This is normally
4877 * called when a stacking relationship exists between the root
4878 * device and the device(a leaf device).
4879 */
4880void netif_stacked_transfer_operstate(const struct net_device *rootdev,
4881 struct net_device *dev)
4882{
4883 if (rootdev->operstate == IF_OPER_DORMANT)
4884 netif_dormant_on(dev);
4885 else
4886 netif_dormant_off(dev);
4887
4888 if (netif_carrier_ok(rootdev)) {
4889 if (!netif_carrier_ok(dev))
4890 netif_carrier_on(dev);
4891 } else {
4892 if (netif_carrier_ok(dev))
4893 netif_carrier_off(dev);
4894 }
4895}
4896EXPORT_SYMBOL(netif_stacked_transfer_operstate);
4897
1da177e4
LT
4898/**
4899 * register_netdevice - register a network device
4900 * @dev: device to register
4901 *
4902 * Take a completed network device structure and add it to the kernel
4903 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
4904 * chain. 0 is returned on success. A negative errno code is returned
4905 * on a failure to set up the device, or if the name is a duplicate.
4906 *
4907 * Callers must hold the rtnl semaphore. You may want
4908 * register_netdev() instead of this.
4909 *
4910 * BUGS:
4911 * The locking appears insufficient to guarantee two parallel registers
4912 * will not get the same name.
4913 */
4914
4915int register_netdevice(struct net_device *dev)
4916{
1da177e4 4917 int ret;
d314774c 4918 struct net *net = dev_net(dev);
1da177e4
LT
4919
4920 BUG_ON(dev_boot_phase);
4921 ASSERT_RTNL();
4922
b17a7c17
SH
4923 might_sleep();
4924
1da177e4
LT
4925 /* When net_device's are persistent, this will be fatal. */
4926 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
d314774c 4927 BUG_ON(!net);
1da177e4 4928
f1f28aa3 4929 spin_lock_init(&dev->addr_list_lock);
cf508b12 4930 netdev_set_addr_lockdep_class(dev);
c773e847 4931 netdev_init_queue_locks(dev);
1da177e4 4932
1da177e4
LT
4933 dev->iflink = -1;
4934
df334545 4935#ifdef CONFIG_RPS
0a9627f2
TH
4936 if (!dev->num_rx_queues) {
4937 /*
4938 * Allocate a single RX queue if driver never called
4939 * alloc_netdev_mq
4940 */
4941
4942 dev->_rx = kzalloc(sizeof(struct netdev_rx_queue), GFP_KERNEL);
4943 if (!dev->_rx) {
4944 ret = -ENOMEM;
4945 goto out;
4946 }
4947
4948 dev->_rx->first = dev->_rx;
4949 atomic_set(&dev->_rx->count, 1);
4950 dev->num_rx_queues = 1;
4951 }
df334545 4952#endif
1da177e4 4953 /* Init, if this function is available */
d314774c
SH
4954 if (dev->netdev_ops->ndo_init) {
4955 ret = dev->netdev_ops->ndo_init(dev);
1da177e4
LT
4956 if (ret) {
4957 if (ret > 0)
4958 ret = -EIO;
90833aa4 4959 goto out;
1da177e4
LT
4960 }
4961 }
4ec93edb 4962
d9031024
OP
4963 ret = dev_get_valid_name(net, dev->name, dev->name, 0);
4964 if (ret)
7ce1b0ed 4965 goto err_uninit;
1da177e4 4966
881d966b 4967 dev->ifindex = dev_new_index(net);
1da177e4
LT
4968 if (dev->iflink == -1)
4969 dev->iflink = dev->ifindex;
4970
d212f87b
SH
4971 /* Fix illegal checksum combinations */
4972 if ((dev->features & NETIF_F_HW_CSUM) &&
4973 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
4974 printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
4975 dev->name);
4976 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
4977 }
4978
4979 if ((dev->features & NETIF_F_NO_CSUM) &&
4980 (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
4981 printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
4982 dev->name);
4983 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
4984 }
4985
b63365a2 4986 dev->features = netdev_fix_features(dev->features, dev->name);
1da177e4 4987
e5a4a72d
LB
4988 /* Enable software GSO if SG is supported. */
4989 if (dev->features & NETIF_F_SG)
4990 dev->features |= NETIF_F_GSO;
4991
aaf8cdc3 4992 netdev_initialize_kobject(dev);
7ffbe3fd
JB
4993
4994 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
4995 ret = notifier_to_errno(ret);
4996 if (ret)
4997 goto err_uninit;
4998
8b41d188 4999 ret = netdev_register_kobject(dev);
b17a7c17 5000 if (ret)
7ce1b0ed 5001 goto err_uninit;
b17a7c17
SH
5002 dev->reg_state = NETREG_REGISTERED;
5003
1da177e4
LT
5004 /*
5005 * Default initial state at registry is that the
5006 * device is present.
5007 */
5008
5009 set_bit(__LINK_STATE_PRESENT, &dev->state);
5010
1da177e4 5011 dev_init_scheduler(dev);
1da177e4 5012 dev_hold(dev);
ce286d32 5013 list_netdevice(dev);
1da177e4
LT
5014
5015 /* Notify protocols, that a new device appeared. */
056925ab 5016 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
fcc5a03a 5017 ret = notifier_to_errno(ret);
93ee31f1
DL
5018 if (ret) {
5019 rollback_registered(dev);
5020 dev->reg_state = NETREG_UNREGISTERED;
5021 }
d90a909e
EB
5022 /*
5023 * Prevent userspace races by waiting until the network
5024 * device is fully setup before sending notifications.
5025 */
a2835763
PM
5026 if (!dev->rtnl_link_ops ||
5027 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
5028 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
1da177e4
LT
5029
5030out:
5031 return ret;
7ce1b0ed
HX
5032
5033err_uninit:
d314774c
SH
5034 if (dev->netdev_ops->ndo_uninit)
5035 dev->netdev_ops->ndo_uninit(dev);
7ce1b0ed 5036 goto out;
1da177e4 5037}
d1b19dff 5038EXPORT_SYMBOL(register_netdevice);
1da177e4 5039
937f1ba5
BH
5040/**
5041 * init_dummy_netdev - init a dummy network device for NAPI
5042 * @dev: device to init
5043 *
5044 * This takes a network device structure and initialize the minimum
5045 * amount of fields so it can be used to schedule NAPI polls without
5046 * registering a full blown interface. This is to be used by drivers
5047 * that need to tie several hardware interfaces to a single NAPI
5048 * poll scheduler due to HW limitations.
5049 */
5050int init_dummy_netdev(struct net_device *dev)
5051{
5052 /* Clear everything. Note we don't initialize spinlocks
5053 * are they aren't supposed to be taken by any of the
5054 * NAPI code and this dummy netdev is supposed to be
5055 * only ever used for NAPI polls
5056 */
5057 memset(dev, 0, sizeof(struct net_device));
5058
5059 /* make sure we BUG if trying to hit standard
5060 * register/unregister code path
5061 */
5062 dev->reg_state = NETREG_DUMMY;
5063
5064 /* initialize the ref count */
5065 atomic_set(&dev->refcnt, 1);
5066
5067 /* NAPI wants this */
5068 INIT_LIST_HEAD(&dev->napi_list);
5069
5070 /* a dummy interface is started by default */
5071 set_bit(__LINK_STATE_PRESENT, &dev->state);
5072 set_bit(__LINK_STATE_START, &dev->state);
5073
5074 return 0;
5075}
5076EXPORT_SYMBOL_GPL(init_dummy_netdev);
5077
5078
1da177e4
LT
5079/**
5080 * register_netdev - register a network device
5081 * @dev: device to register
5082 *
5083 * Take a completed network device structure and add it to the kernel
5084 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5085 * chain. 0 is returned on success. A negative errno code is returned
5086 * on a failure to set up the device, or if the name is a duplicate.
5087 *
38b4da38 5088 * This is a wrapper around register_netdevice that takes the rtnl semaphore
1da177e4
LT
5089 * and expands the device name if you passed a format string to
5090 * alloc_netdev.
5091 */
5092int register_netdev(struct net_device *dev)
5093{
5094 int err;
5095
5096 rtnl_lock();
5097
5098 /*
5099 * If the name is a format string the caller wants us to do a
5100 * name allocation.
5101 */
5102 if (strchr(dev->name, '%')) {
5103 err = dev_alloc_name(dev, dev->name);
5104 if (err < 0)
5105 goto out;
5106 }
4ec93edb 5107
1da177e4
LT
5108 err = register_netdevice(dev);
5109out:
5110 rtnl_unlock();
5111 return err;
5112}
5113EXPORT_SYMBOL(register_netdev);
5114
5115/*
5116 * netdev_wait_allrefs - wait until all references are gone.
5117 *
5118 * This is called when unregistering network devices.
5119 *
5120 * Any protocol or device that holds a reference should register
5121 * for netdevice notification, and cleanup and put back the
5122 * reference if they receive an UNREGISTER event.
5123 * We can get stuck here if buggy protocols don't correctly
4ec93edb 5124 * call dev_put.
1da177e4
LT
5125 */
5126static void netdev_wait_allrefs(struct net_device *dev)
5127{
5128 unsigned long rebroadcast_time, warning_time;
5129
e014debe
ED
5130 linkwatch_forget_dev(dev);
5131
1da177e4
LT
5132 rebroadcast_time = warning_time = jiffies;
5133 while (atomic_read(&dev->refcnt) != 0) {
5134 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
6756ae4b 5135 rtnl_lock();
1da177e4
LT
5136
5137 /* Rebroadcast unregister notification */
056925ab 5138 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
a5ee1551 5139 /* don't resend NETDEV_UNREGISTER_BATCH, _BATCH users
395264d5 5140 * should have already handle it the first time */
1da177e4
LT
5141
5142 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
5143 &dev->state)) {
5144 /* We must not have linkwatch events
5145 * pending on unregister. If this
5146 * happens, we simply run the queue
5147 * unscheduled, resulting in a noop
5148 * for this device.
5149 */
5150 linkwatch_run_queue();
5151 }
5152
6756ae4b 5153 __rtnl_unlock();
1da177e4
LT
5154
5155 rebroadcast_time = jiffies;
5156 }
5157
5158 msleep(250);
5159
5160 if (time_after(jiffies, warning_time + 10 * HZ)) {
5161 printk(KERN_EMERG "unregister_netdevice: "
5162 "waiting for %s to become free. Usage "
5163 "count = %d\n",
5164 dev->name, atomic_read(&dev->refcnt));
5165 warning_time = jiffies;
5166 }
5167 }
5168}
5169
5170/* The sequence is:
5171 *
5172 * rtnl_lock();
5173 * ...
5174 * register_netdevice(x1);
5175 * register_netdevice(x2);
5176 * ...
5177 * unregister_netdevice(y1);
5178 * unregister_netdevice(y2);
5179 * ...
5180 * rtnl_unlock();
5181 * free_netdev(y1);
5182 * free_netdev(y2);
5183 *
58ec3b4d 5184 * We are invoked by rtnl_unlock().
1da177e4 5185 * This allows us to deal with problems:
b17a7c17 5186 * 1) We can delete sysfs objects which invoke hotplug
1da177e4
LT
5187 * without deadlocking with linkwatch via keventd.
5188 * 2) Since we run with the RTNL semaphore not held, we can sleep
5189 * safely in order to wait for the netdev refcnt to drop to zero.
58ec3b4d
HX
5190 *
5191 * We must not return until all unregister events added during
5192 * the interval the lock was held have been completed.
1da177e4 5193 */
1da177e4
LT
5194void netdev_run_todo(void)
5195{
626ab0e6 5196 struct list_head list;
1da177e4 5197
1da177e4 5198 /* Snapshot list, allow later requests */
626ab0e6 5199 list_replace_init(&net_todo_list, &list);
58ec3b4d
HX
5200
5201 __rtnl_unlock();
626ab0e6 5202
1da177e4
LT
5203 while (!list_empty(&list)) {
5204 struct net_device *dev
e5e26d75 5205 = list_first_entry(&list, struct net_device, todo_list);
1da177e4
LT
5206 list_del(&dev->todo_list);
5207
b17a7c17
SH
5208 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
5209 printk(KERN_ERR "network todo '%s' but state %d\n",
5210 dev->name, dev->reg_state);
5211 dump_stack();
5212 continue;
5213 }
1da177e4 5214
b17a7c17 5215 dev->reg_state = NETREG_UNREGISTERED;
1da177e4 5216
152102c7 5217 on_each_cpu(flush_backlog, dev, 1);
6e583ce5 5218
b17a7c17 5219 netdev_wait_allrefs(dev);
1da177e4 5220
b17a7c17
SH
5221 /* paranoia */
5222 BUG_ON(atomic_read(&dev->refcnt));
547b792c
IJ
5223 WARN_ON(dev->ip_ptr);
5224 WARN_ON(dev->ip6_ptr);
5225 WARN_ON(dev->dn_ptr);
1da177e4 5226
b17a7c17
SH
5227 if (dev->destructor)
5228 dev->destructor(dev);
9093bbb2
SH
5229
5230 /* Free network device */
5231 kobject_put(&dev->dev.kobj);
1da177e4 5232 }
1da177e4
LT
5233}
5234
d83345ad
ED
5235/**
5236 * dev_txq_stats_fold - fold tx_queues stats
5237 * @dev: device to get statistics from
5238 * @stats: struct net_device_stats to hold results
5239 */
5240void dev_txq_stats_fold(const struct net_device *dev,
5241 struct net_device_stats *stats)
5242{
5243 unsigned long tx_bytes = 0, tx_packets = 0, tx_dropped = 0;
5244 unsigned int i;
5245 struct netdev_queue *txq;
5246
5247 for (i = 0; i < dev->num_tx_queues; i++) {
5248 txq = netdev_get_tx_queue(dev, i);
5249 tx_bytes += txq->tx_bytes;
5250 tx_packets += txq->tx_packets;
5251 tx_dropped += txq->tx_dropped;
5252 }
5253 if (tx_bytes || tx_packets || tx_dropped) {
5254 stats->tx_bytes = tx_bytes;
5255 stats->tx_packets = tx_packets;
5256 stats->tx_dropped = tx_dropped;
5257 }
5258}
5259EXPORT_SYMBOL(dev_txq_stats_fold);
5260
eeda3fd6
SH
5261/**
5262 * dev_get_stats - get network device statistics
5263 * @dev: device to get statistics from
5264 *
5265 * Get network statistics from device. The device driver may provide
5266 * its own method by setting dev->netdev_ops->get_stats; otherwise
5267 * the internal statistics structure is used.
5268 */
5269const struct net_device_stats *dev_get_stats(struct net_device *dev)
7004bf25 5270{
eeda3fd6
SH
5271 const struct net_device_ops *ops = dev->netdev_ops;
5272
5273 if (ops->ndo_get_stats)
5274 return ops->ndo_get_stats(dev);
d83345ad
ED
5275
5276 dev_txq_stats_fold(dev, &dev->stats);
5277 return &dev->stats;
c45d286e 5278}
eeda3fd6 5279EXPORT_SYMBOL(dev_get_stats);
c45d286e 5280
dc2b4847 5281static void netdev_init_one_queue(struct net_device *dev,
e8a0464c
DM
5282 struct netdev_queue *queue,
5283 void *_unused)
dc2b4847 5284{
dc2b4847
DM
5285 queue->dev = dev;
5286}
5287
bb949fbd
DM
5288static void netdev_init_queues(struct net_device *dev)
5289{
e8a0464c
DM
5290 netdev_init_one_queue(dev, &dev->rx_queue, NULL);
5291 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
c3f26a26 5292 spin_lock_init(&dev->tx_global_lock);
bb949fbd
DM
5293}
5294
1da177e4 5295/**
f25f4e44 5296 * alloc_netdev_mq - allocate network device
1da177e4
LT
5297 * @sizeof_priv: size of private data to allocate space for
5298 * @name: device name format string
5299 * @setup: callback to initialize device
f25f4e44 5300 * @queue_count: the number of subqueues to allocate
1da177e4
LT
5301 *
5302 * Allocates a struct net_device with private data area for driver use
f25f4e44
PWJ
5303 * and performs basic initialization. Also allocates subquue structs
5304 * for each queue on the device at the end of the netdevice.
1da177e4 5305 */
f25f4e44
PWJ
5306struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
5307 void (*setup)(struct net_device *), unsigned int queue_count)
1da177e4 5308{
e8a0464c 5309 struct netdev_queue *tx;
1da177e4 5310 struct net_device *dev;
7943986c 5311 size_t alloc_size;
1ce8e7b5 5312 struct net_device *p;
df334545
ED
5313#ifdef CONFIG_RPS
5314 struct netdev_rx_queue *rx;
0a9627f2 5315 int i;
df334545 5316#endif
1da177e4 5317
b6fe17d6
SH
5318 BUG_ON(strlen(name) >= sizeof(dev->name));
5319
fd2ea0a7 5320 alloc_size = sizeof(struct net_device);
d1643d24
AD
5321 if (sizeof_priv) {
5322 /* ensure 32-byte alignment of private area */
1ce8e7b5 5323 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
d1643d24
AD
5324 alloc_size += sizeof_priv;
5325 }
5326 /* ensure 32-byte alignment of whole construct */
1ce8e7b5 5327 alloc_size += NETDEV_ALIGN - 1;
1da177e4 5328
31380de9 5329 p = kzalloc(alloc_size, GFP_KERNEL);
1da177e4 5330 if (!p) {
b6fe17d6 5331 printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
1da177e4
LT
5332 return NULL;
5333 }
1da177e4 5334
7943986c 5335 tx = kcalloc(queue_count, sizeof(struct netdev_queue), GFP_KERNEL);
e8a0464c
DM
5336 if (!tx) {
5337 printk(KERN_ERR "alloc_netdev: Unable to allocate "
5338 "tx qdiscs.\n");
ab9c73cc 5339 goto free_p;
e8a0464c
DM
5340 }
5341
df334545 5342#ifdef CONFIG_RPS
0a9627f2
TH
5343 rx = kcalloc(queue_count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
5344 if (!rx) {
5345 printk(KERN_ERR "alloc_netdev: Unable to allocate "
5346 "rx queues.\n");
5347 goto free_tx;
5348 }
5349
5350 atomic_set(&rx->count, queue_count);
5351
5352 /*
5353 * Set a pointer to first element in the array which holds the
5354 * reference count.
5355 */
5356 for (i = 0; i < queue_count; i++)
5357 rx[i].first = rx;
df334545 5358#endif
0a9627f2 5359
1ce8e7b5 5360 dev = PTR_ALIGN(p, NETDEV_ALIGN);
1da177e4 5361 dev->padded = (char *)dev - (char *)p;
ab9c73cc
JP
5362
5363 if (dev_addr_init(dev))
0a9627f2 5364 goto free_rx;
ab9c73cc 5365
22bedad3 5366 dev_mc_init(dev);
a748ee24 5367 dev_uc_init(dev);
ccffad25 5368
c346dca1 5369 dev_net_set(dev, &init_net);
1da177e4 5370
e8a0464c
DM
5371 dev->_tx = tx;
5372 dev->num_tx_queues = queue_count;
fd2ea0a7 5373 dev->real_num_tx_queues = queue_count;
e8a0464c 5374
df334545 5375#ifdef CONFIG_RPS
0a9627f2
TH
5376 dev->_rx = rx;
5377 dev->num_rx_queues = queue_count;
df334545 5378#endif
0a9627f2 5379
82cc1a7a 5380 dev->gso_max_size = GSO_MAX_SIZE;
1da177e4 5381
bb949fbd
DM
5382 netdev_init_queues(dev);
5383
15682bc4
PWJ
5384 INIT_LIST_HEAD(&dev->ethtool_ntuple_list.list);
5385 dev->ethtool_ntuple_list.count = 0;
d565b0a1 5386 INIT_LIST_HEAD(&dev->napi_list);
9fdce099 5387 INIT_LIST_HEAD(&dev->unreg_list);
e014debe 5388 INIT_LIST_HEAD(&dev->link_watch_list);
93f154b5 5389 dev->priv_flags = IFF_XMIT_DST_RELEASE;
1da177e4
LT
5390 setup(dev);
5391 strcpy(dev->name, name);
5392 return dev;
ab9c73cc 5393
0a9627f2 5394free_rx:
df334545 5395#ifdef CONFIG_RPS
0a9627f2 5396 kfree(rx);
ab9c73cc 5397free_tx:
df334545 5398#endif
ab9c73cc 5399 kfree(tx);
ab9c73cc
JP
5400free_p:
5401 kfree(p);
5402 return NULL;
1da177e4 5403}
f25f4e44 5404EXPORT_SYMBOL(alloc_netdev_mq);
1da177e4
LT
5405
5406/**
5407 * free_netdev - free network device
5408 * @dev: device
5409 *
4ec93edb
YH
5410 * This function does the last stage of destroying an allocated device
5411 * interface. The reference to the device object is released.
1da177e4
LT
5412 * If this is the last reference then it will be freed.
5413 */
5414void free_netdev(struct net_device *dev)
5415{
d565b0a1
HX
5416 struct napi_struct *p, *n;
5417
f3005d7f
DL
5418 release_net(dev_net(dev));
5419
e8a0464c
DM
5420 kfree(dev->_tx);
5421
f001fde5
JP
5422 /* Flush device addresses */
5423 dev_addr_flush(dev);
5424
15682bc4
PWJ
5425 /* Clear ethtool n-tuple list */
5426 ethtool_ntuple_flush(dev);
5427
d565b0a1
HX
5428 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
5429 netif_napi_del(p);
5430
3041a069 5431 /* Compatibility with error handling in drivers */
1da177e4
LT
5432 if (dev->reg_state == NETREG_UNINITIALIZED) {
5433 kfree((char *)dev - dev->padded);
5434 return;
5435 }
5436
5437 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
5438 dev->reg_state = NETREG_RELEASED;
5439
43cb76d9
GKH
5440 /* will free via device release */
5441 put_device(&dev->dev);
1da177e4 5442}
d1b19dff 5443EXPORT_SYMBOL(free_netdev);
4ec93edb 5444
f0db275a
SH
5445/**
5446 * synchronize_net - Synchronize with packet receive processing
5447 *
5448 * Wait for packets currently being received to be done.
5449 * Does not block later packets from starting.
5450 */
4ec93edb 5451void synchronize_net(void)
1da177e4
LT
5452{
5453 might_sleep();
fbd568a3 5454 synchronize_rcu();
1da177e4 5455}
d1b19dff 5456EXPORT_SYMBOL(synchronize_net);
1da177e4
LT
5457
5458/**
44a0873d 5459 * unregister_netdevice_queue - remove device from the kernel
1da177e4 5460 * @dev: device
44a0873d 5461 * @head: list
6ebfbc06 5462 *
1da177e4 5463 * This function shuts down a device interface and removes it
d59b54b1 5464 * from the kernel tables.
44a0873d 5465 * If head not NULL, device is queued to be unregistered later.
1da177e4
LT
5466 *
5467 * Callers must hold the rtnl semaphore. You may want
5468 * unregister_netdev() instead of this.
5469 */
5470
44a0873d 5471void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
1da177e4 5472{
a6620712
HX
5473 ASSERT_RTNL();
5474
44a0873d 5475 if (head) {
9fdce099 5476 list_move_tail(&dev->unreg_list, head);
44a0873d
ED
5477 } else {
5478 rollback_registered(dev);
5479 /* Finish processing unregister after unlock */
5480 net_set_todo(dev);
5481 }
1da177e4 5482}
44a0873d 5483EXPORT_SYMBOL(unregister_netdevice_queue);
1da177e4 5484
9b5e383c
ED
5485/**
5486 * unregister_netdevice_many - unregister many devices
5487 * @head: list of devices
9b5e383c
ED
5488 */
5489void unregister_netdevice_many(struct list_head *head)
5490{
5491 struct net_device *dev;
5492
5493 if (!list_empty(head)) {
5494 rollback_registered_many(head);
5495 list_for_each_entry(dev, head, unreg_list)
5496 net_set_todo(dev);
5497 }
5498}
63c8099d 5499EXPORT_SYMBOL(unregister_netdevice_many);
9b5e383c 5500
1da177e4
LT
5501/**
5502 * unregister_netdev - remove device from the kernel
5503 * @dev: device
5504 *
5505 * This function shuts down a device interface and removes it
d59b54b1 5506 * from the kernel tables.
1da177e4
LT
5507 *
5508 * This is just a wrapper for unregister_netdevice that takes
5509 * the rtnl semaphore. In general you want to use this and not
5510 * unregister_netdevice.
5511 */
5512void unregister_netdev(struct net_device *dev)
5513{
5514 rtnl_lock();
5515 unregister_netdevice(dev);
5516 rtnl_unlock();
5517}
1da177e4
LT
5518EXPORT_SYMBOL(unregister_netdev);
5519
ce286d32
EB
5520/**
5521 * dev_change_net_namespace - move device to different nethost namespace
5522 * @dev: device
5523 * @net: network namespace
5524 * @pat: If not NULL name pattern to try if the current device name
5525 * is already taken in the destination network namespace.
5526 *
5527 * This function shuts down a device interface and moves it
5528 * to a new network namespace. On success 0 is returned, on
5529 * a failure a netagive errno code is returned.
5530 *
5531 * Callers must hold the rtnl semaphore.
5532 */
5533
5534int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
5535{
ce286d32
EB
5536 int err;
5537
5538 ASSERT_RTNL();
5539
5540 /* Don't allow namespace local devices to be moved. */
5541 err = -EINVAL;
5542 if (dev->features & NETIF_F_NETNS_LOCAL)
5543 goto out;
5544
3891845e
EB
5545#ifdef CONFIG_SYSFS
5546 /* Don't allow real devices to be moved when sysfs
5547 * is enabled.
5548 */
5549 err = -EINVAL;
5550 if (dev->dev.parent)
5551 goto out;
5552#endif
5553
ce286d32
EB
5554 /* Ensure the device has been registrered */
5555 err = -EINVAL;
5556 if (dev->reg_state != NETREG_REGISTERED)
5557 goto out;
5558
5559 /* Get out if there is nothing todo */
5560 err = 0;
878628fb 5561 if (net_eq(dev_net(dev), net))
ce286d32
EB
5562 goto out;
5563
5564 /* Pick the destination device name, and ensure
5565 * we can use it in the destination network namespace.
5566 */
5567 err = -EEXIST;
d9031024 5568 if (__dev_get_by_name(net, dev->name)) {
ce286d32
EB
5569 /* We get here if we can't use the current device name */
5570 if (!pat)
5571 goto out;
d9031024 5572 if (dev_get_valid_name(net, pat, dev->name, 1))
ce286d32
EB
5573 goto out;
5574 }
5575
5576 /*
5577 * And now a mini version of register_netdevice unregister_netdevice.
5578 */
5579
5580 /* If device is running close it first. */
9b772652 5581 dev_close(dev);
ce286d32
EB
5582
5583 /* And unlink it from device chain */
5584 err = -ENODEV;
5585 unlist_netdevice(dev);
5586
5587 synchronize_net();
5588
5589 /* Shutdown queueing discipline. */
5590 dev_shutdown(dev);
5591
5592 /* Notify protocols, that we are about to destroy
5593 this device. They should clean all the things.
5594 */
5595 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
a5ee1551 5596 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
ce286d32
EB
5597
5598 /*
5599 * Flush the unicast and multicast chains
5600 */
a748ee24 5601 dev_uc_flush(dev);
22bedad3 5602 dev_mc_flush(dev);
ce286d32 5603
3891845e
EB
5604 netdev_unregister_kobject(dev);
5605
ce286d32 5606 /* Actually switch the network namespace */
c346dca1 5607 dev_net_set(dev, net);
ce286d32 5608
ce286d32
EB
5609 /* If there is an ifindex conflict assign a new one */
5610 if (__dev_get_by_index(net, dev->ifindex)) {
5611 int iflink = (dev->iflink == dev->ifindex);
5612 dev->ifindex = dev_new_index(net);
5613 if (iflink)
5614 dev->iflink = dev->ifindex;
5615 }
5616
8b41d188 5617 /* Fixup kobjects */
aaf8cdc3 5618 err = netdev_register_kobject(dev);
8b41d188 5619 WARN_ON(err);
ce286d32
EB
5620
5621 /* Add the device back in the hashes */
5622 list_netdevice(dev);
5623
5624 /* Notify protocols, that a new device appeared. */
5625 call_netdevice_notifiers(NETDEV_REGISTER, dev);
5626
d90a909e
EB
5627 /*
5628 * Prevent userspace races by waiting until the network
5629 * device is fully setup before sending notifications.
5630 */
5631 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
5632
ce286d32
EB
5633 synchronize_net();
5634 err = 0;
5635out:
5636 return err;
5637}
463d0183 5638EXPORT_SYMBOL_GPL(dev_change_net_namespace);
ce286d32 5639
1da177e4
LT
5640static int dev_cpu_callback(struct notifier_block *nfb,
5641 unsigned long action,
5642 void *ocpu)
5643{
5644 struct sk_buff **list_skb;
1da177e4
LT
5645 struct sk_buff *skb;
5646 unsigned int cpu, oldcpu = (unsigned long)ocpu;
5647 struct softnet_data *sd, *oldsd;
5648
8bb78442 5649 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
1da177e4
LT
5650 return NOTIFY_OK;
5651
5652 local_irq_disable();
5653 cpu = smp_processor_id();
5654 sd = &per_cpu(softnet_data, cpu);
5655 oldsd = &per_cpu(softnet_data, oldcpu);
5656
5657 /* Find end of our completion_queue. */
5658 list_skb = &sd->completion_queue;
5659 while (*list_skb)
5660 list_skb = &(*list_skb)->next;
5661 /* Append completion queue from offline CPU. */
5662 *list_skb = oldsd->completion_queue;
5663 oldsd->completion_queue = NULL;
5664
1da177e4 5665 /* Append output queue from offline CPU. */
a9cbd588
CG
5666 if (oldsd->output_queue) {
5667 *sd->output_queue_tailp = oldsd->output_queue;
5668 sd->output_queue_tailp = oldsd->output_queue_tailp;
5669 oldsd->output_queue = NULL;
5670 oldsd->output_queue_tailp = &oldsd->output_queue;
5671 }
1da177e4
LT
5672
5673 raise_softirq_irqoff(NET_TX_SOFTIRQ);
5674 local_irq_enable();
5675
5676 /* Process offline CPU's input_pkt_queue */
fec5e652 5677 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
1da177e4 5678 netif_rx(skb);
6e7676c1 5679 input_queue_head_add(oldsd, 1);
fec5e652 5680 }
6e7676c1
CG
5681 while ((skb = __skb_dequeue(&oldsd->process_queue)))
5682 netif_rx(skb);
1da177e4
LT
5683
5684 return NOTIFY_OK;
5685}
1da177e4
LT
5686
5687
7f353bf2 5688/**
b63365a2
HX
5689 * netdev_increment_features - increment feature set by one
5690 * @all: current feature set
5691 * @one: new feature set
5692 * @mask: mask feature set
7f353bf2
HX
5693 *
5694 * Computes a new feature set after adding a device with feature set
b63365a2
HX
5695 * @one to the master device with current feature set @all. Will not
5696 * enable anything that is off in @mask. Returns the new feature set.
7f353bf2 5697 */
b63365a2
HX
5698unsigned long netdev_increment_features(unsigned long all, unsigned long one,
5699 unsigned long mask)
5700{
5701 /* If device needs checksumming, downgrade to it. */
d1b19dff 5702 if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
b63365a2
HX
5703 all ^= NETIF_F_NO_CSUM | (one & NETIF_F_ALL_CSUM);
5704 else if (mask & NETIF_F_ALL_CSUM) {
5705 /* If one device supports v4/v6 checksumming, set for all. */
5706 if (one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM) &&
5707 !(all & NETIF_F_GEN_CSUM)) {
5708 all &= ~NETIF_F_ALL_CSUM;
5709 all |= one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
5710 }
e2a6b852 5711
b63365a2
HX
5712 /* If one device supports hw checksumming, set for all. */
5713 if (one & NETIF_F_GEN_CSUM && !(all & NETIF_F_GEN_CSUM)) {
5714 all &= ~NETIF_F_ALL_CSUM;
5715 all |= NETIF_F_HW_CSUM;
5716 }
5717 }
7f353bf2 5718
b63365a2 5719 one |= NETIF_F_ALL_CSUM;
7f353bf2 5720
b63365a2 5721 one |= all & NETIF_F_ONE_FOR_ALL;
d9f5950f 5722 all &= one | NETIF_F_LLTX | NETIF_F_GSO | NETIF_F_UFO;
b63365a2 5723 all |= one & mask & NETIF_F_ONE_FOR_ALL;
7f353bf2
HX
5724
5725 return all;
5726}
b63365a2 5727EXPORT_SYMBOL(netdev_increment_features);
7f353bf2 5728
30d97d35
PE
5729static struct hlist_head *netdev_create_hash(void)
5730{
5731 int i;
5732 struct hlist_head *hash;
5733
5734 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
5735 if (hash != NULL)
5736 for (i = 0; i < NETDEV_HASHENTRIES; i++)
5737 INIT_HLIST_HEAD(&hash[i]);
5738
5739 return hash;
5740}
5741
881d966b 5742/* Initialize per network namespace state */
4665079c 5743static int __net_init netdev_init(struct net *net)
881d966b 5744{
881d966b 5745 INIT_LIST_HEAD(&net->dev_base_head);
881d966b 5746
30d97d35
PE
5747 net->dev_name_head = netdev_create_hash();
5748 if (net->dev_name_head == NULL)
5749 goto err_name;
881d966b 5750
30d97d35
PE
5751 net->dev_index_head = netdev_create_hash();
5752 if (net->dev_index_head == NULL)
5753 goto err_idx;
881d966b
EB
5754
5755 return 0;
30d97d35
PE
5756
5757err_idx:
5758 kfree(net->dev_name_head);
5759err_name:
5760 return -ENOMEM;
881d966b
EB
5761}
5762
f0db275a
SH
5763/**
5764 * netdev_drivername - network driver for the device
5765 * @dev: network device
5766 * @buffer: buffer for resulting name
5767 * @len: size of buffer
5768 *
5769 * Determine network driver for device.
5770 */
cf04a4c7 5771char *netdev_drivername(const struct net_device *dev, char *buffer, int len)
6579e57b 5772{
cf04a4c7
SH
5773 const struct device_driver *driver;
5774 const struct device *parent;
6579e57b
AV
5775
5776 if (len <= 0 || !buffer)
5777 return buffer;
5778 buffer[0] = 0;
5779
5780 parent = dev->dev.parent;
5781
5782 if (!parent)
5783 return buffer;
5784
5785 driver = parent->driver;
5786 if (driver && driver->name)
5787 strlcpy(buffer, driver->name, len);
5788 return buffer;
5789}
5790
4665079c 5791static void __net_exit netdev_exit(struct net *net)
881d966b
EB
5792{
5793 kfree(net->dev_name_head);
5794 kfree(net->dev_index_head);
5795}
5796
022cbae6 5797static struct pernet_operations __net_initdata netdev_net_ops = {
881d966b
EB
5798 .init = netdev_init,
5799 .exit = netdev_exit,
5800};
5801
4665079c 5802static void __net_exit default_device_exit(struct net *net)
ce286d32 5803{
e008b5fc 5804 struct net_device *dev, *aux;
ce286d32 5805 /*
e008b5fc 5806 * Push all migratable network devices back to the
ce286d32
EB
5807 * initial network namespace
5808 */
5809 rtnl_lock();
e008b5fc 5810 for_each_netdev_safe(net, dev, aux) {
ce286d32 5811 int err;
aca51397 5812 char fb_name[IFNAMSIZ];
ce286d32
EB
5813
5814 /* Ignore unmoveable devices (i.e. loopback) */
5815 if (dev->features & NETIF_F_NETNS_LOCAL)
5816 continue;
5817
e008b5fc
EB
5818 /* Leave virtual devices for the generic cleanup */
5819 if (dev->rtnl_link_ops)
5820 continue;
d0c082ce 5821
ce286d32 5822 /* Push remaing network devices to init_net */
aca51397
PE
5823 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
5824 err = dev_change_net_namespace(dev, &init_net, fb_name);
ce286d32 5825 if (err) {
aca51397 5826 printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
ce286d32 5827 __func__, dev->name, err);
aca51397 5828 BUG();
ce286d32
EB
5829 }
5830 }
5831 rtnl_unlock();
5832}
5833
04dc7f6b
EB
5834static void __net_exit default_device_exit_batch(struct list_head *net_list)
5835{
5836 /* At exit all network devices most be removed from a network
5837 * namespace. Do this in the reverse order of registeration.
5838 * Do this across as many network namespaces as possible to
5839 * improve batching efficiency.
5840 */
5841 struct net_device *dev;
5842 struct net *net;
5843 LIST_HEAD(dev_kill_list);
5844
5845 rtnl_lock();
5846 list_for_each_entry(net, net_list, exit_list) {
5847 for_each_netdev_reverse(net, dev) {
5848 if (dev->rtnl_link_ops)
5849 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
5850 else
5851 unregister_netdevice_queue(dev, &dev_kill_list);
5852 }
5853 }
5854 unregister_netdevice_many(&dev_kill_list);
5855 rtnl_unlock();
5856}
5857
022cbae6 5858static struct pernet_operations __net_initdata default_device_ops = {
ce286d32 5859 .exit = default_device_exit,
04dc7f6b 5860 .exit_batch = default_device_exit_batch,
ce286d32
EB
5861};
5862
1da177e4
LT
5863/*
5864 * Initialize the DEV module. At boot time this walks the device list and
5865 * unhooks any devices that fail to initialise (normally hardware not
5866 * present) and leaves us with a valid list of present and active devices.
5867 *
5868 */
5869
5870/*
5871 * This is called single threaded during boot, so no need
5872 * to take the rtnl semaphore.
5873 */
5874static int __init net_dev_init(void)
5875{
5876 int i, rc = -ENOMEM;
5877
5878 BUG_ON(!dev_boot_phase);
5879
1da177e4
LT
5880 if (dev_proc_init())
5881 goto out;
5882
8b41d188 5883 if (netdev_kobject_init())
1da177e4
LT
5884 goto out;
5885
5886 INIT_LIST_HEAD(&ptype_all);
82d8a867 5887 for (i = 0; i < PTYPE_HASH_SIZE; i++)
1da177e4
LT
5888 INIT_LIST_HEAD(&ptype_base[i]);
5889
881d966b
EB
5890 if (register_pernet_subsys(&netdev_net_ops))
5891 goto out;
1da177e4
LT
5892
5893 /*
5894 * Initialise the packet receive queues.
5895 */
5896
6f912042 5897 for_each_possible_cpu(i) {
e36fa2f7 5898 struct softnet_data *sd = &per_cpu(softnet_data, i);
1da177e4 5899
dee42870 5900 memset(sd, 0, sizeof(*sd));
e36fa2f7 5901 skb_queue_head_init(&sd->input_pkt_queue);
6e7676c1 5902 skb_queue_head_init(&sd->process_queue);
e36fa2f7
ED
5903 sd->completion_queue = NULL;
5904 INIT_LIST_HEAD(&sd->poll_list);
a9cbd588
CG
5905 sd->output_queue = NULL;
5906 sd->output_queue_tailp = &sd->output_queue;
df334545 5907#ifdef CONFIG_RPS
e36fa2f7
ED
5908 sd->csd.func = rps_trigger_softirq;
5909 sd->csd.info = sd;
5910 sd->csd.flags = 0;
5911 sd->cpu = i;
1e94d72f 5912#endif
0a9627f2 5913
e36fa2f7
ED
5914 sd->backlog.poll = process_backlog;
5915 sd->backlog.weight = weight_p;
5916 sd->backlog.gro_list = NULL;
5917 sd->backlog.gro_count = 0;
1da177e4
LT
5918 }
5919
1da177e4
LT
5920 dev_boot_phase = 0;
5921
505d4f73
EB
5922 /* The loopback device is special if any other network devices
5923 * is present in a network namespace the loopback device must
5924 * be present. Since we now dynamically allocate and free the
5925 * loopback device ensure this invariant is maintained by
5926 * keeping the loopback device as the first device on the
5927 * list of network devices. Ensuring the loopback devices
5928 * is the first device that appears and the last network device
5929 * that disappears.
5930 */
5931 if (register_pernet_device(&loopback_net_ops))
5932 goto out;
5933
5934 if (register_pernet_device(&default_device_ops))
5935 goto out;
5936
962cf36c
CM
5937 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
5938 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
1da177e4
LT
5939
5940 hotcpu_notifier(dev_cpu_callback, 0);
5941 dst_init();
5942 dev_mcast_init();
5943 rc = 0;
5944out:
5945 return rc;
5946}
5947
5948subsys_initcall(net_dev_init);
5949
e88721f8
KK
5950static int __init initialize_hashrnd(void)
5951{
0a9627f2 5952 get_random_bytes(&hashrnd, sizeof(hashrnd));
e88721f8
KK
5953 return 0;
5954}
5955
5956late_initcall_sync(initialize_hashrnd);
5957