]> bbs.cooldavid.org Git - net-next-2.6.git/blame - net/core/dev.c
rfs: Receive Flow Steering
[net-next-2.6.git] / net / core / dev.c
CommitLineData
1da177e4
LT
1/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
02c30a84 10 * Authors: Ross Biro
1da177e4
LT
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
74
75#include <asm/uaccess.h>
76#include <asm/system.h>
77#include <linux/bitops.h>
4fc268d2 78#include <linux/capability.h>
1da177e4
LT
79#include <linux/cpu.h>
80#include <linux/types.h>
81#include <linux/kernel.h>
08e9897d 82#include <linux/hash.h>
5a0e3ad6 83#include <linux/slab.h>
1da177e4 84#include <linux/sched.h>
4a3e2f71 85#include <linux/mutex.h>
1da177e4
LT
86#include <linux/string.h>
87#include <linux/mm.h>
88#include <linux/socket.h>
89#include <linux/sockios.h>
90#include <linux/errno.h>
91#include <linux/interrupt.h>
92#include <linux/if_ether.h>
93#include <linux/netdevice.h>
94#include <linux/etherdevice.h>
0187bdfb 95#include <linux/ethtool.h>
1da177e4
LT
96#include <linux/notifier.h>
97#include <linux/skbuff.h>
457c4cbc 98#include <net/net_namespace.h>
1da177e4
LT
99#include <net/sock.h>
100#include <linux/rtnetlink.h>
101#include <linux/proc_fs.h>
102#include <linux/seq_file.h>
103#include <linux/stat.h>
104#include <linux/if_bridge.h>
b863ceb7 105#include <linux/if_macvlan.h>
1da177e4
LT
106#include <net/dst.h>
107#include <net/pkt_sched.h>
108#include <net/checksum.h>
44540960 109#include <net/xfrm.h>
1da177e4
LT
110#include <linux/highmem.h>
111#include <linux/init.h>
112#include <linux/kmod.h>
113#include <linux/module.h>
1da177e4
LT
114#include <linux/netpoll.h>
115#include <linux/rcupdate.h>
116#include <linux/delay.h>
295f4a1f 117#include <net/wext.h>
1da177e4 118#include <net/iw_handler.h>
1da177e4 119#include <asm/current.h>
5bdb9886 120#include <linux/audit.h>
db217334 121#include <linux/dmaengine.h>
f6a78bfc 122#include <linux/err.h>
c7fa9d18 123#include <linux/ctype.h>
723e98b7 124#include <linux/if_arp.h>
6de329e2 125#include <linux/if_vlan.h>
8f0f2223 126#include <linux/ip.h>
ad55dcaf 127#include <net/ip.h>
8f0f2223
DM
128#include <linux/ipv6.h>
129#include <linux/in.h>
b6b2fed1
DM
130#include <linux/jhash.h>
131#include <linux/random.h>
9cbc1cb8 132#include <trace/events/napi.h>
5acbbd42 133#include <linux/pci.h>
1da177e4 134
342709ef
PE
135#include "net-sysfs.h"
136
d565b0a1
HX
137/* Instead of increasing this, you should create a hash table. */
138#define MAX_GRO_SKBS 8
139
5d38a079
HX
140/* This should be increased if a protocol with a bigger head is added. */
141#define GRO_MAX_HEAD (MAX_HEADER + 128)
142
1da177e4
LT
143/*
144 * The list of packet types we will receive (as opposed to discard)
145 * and the routines to invoke.
146 *
147 * Why 16. Because with 16 the only overlap we get on a hash of the
148 * low nibble of the protocol value is RARP/SNAP/X.25.
149 *
150 * NOTE: That is no longer true with the addition of VLAN tags. Not
151 * sure which should go first, but I bet it won't make much
152 * difference if we are running VLANs. The good news is that
153 * this protocol won't be in the list unless compiled in, so
3041a069 154 * the average user (w/out VLANs) will not be adversely affected.
1da177e4
LT
155 * --BLG
156 *
157 * 0800 IP
158 * 8100 802.1Q VLAN
159 * 0001 802.3
160 * 0002 AX.25
161 * 0004 802.2
162 * 8035 RARP
163 * 0005 SNAP
164 * 0805 X.25
165 * 0806 ARP
166 * 8137 IPX
167 * 0009 Localtalk
168 * 86DD IPv6
169 */
170
82d8a867
PE
171#define PTYPE_HASH_SIZE (16)
172#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
173
1da177e4 174static DEFINE_SPINLOCK(ptype_lock);
82d8a867 175static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
6b2bedc3 176static struct list_head ptype_all __read_mostly; /* Taps */
1da177e4 177
1da177e4 178/*
7562f876 179 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
1da177e4
LT
180 * semaphore.
181 *
c6d14c84 182 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
1da177e4
LT
183 *
184 * Writers must hold the rtnl semaphore while they loop through the
7562f876 185 * dev_base_head list, and hold dev_base_lock for writing when they do the
1da177e4
LT
186 * actual updates. This allows pure readers to access the list even
187 * while a writer is preparing to update it.
188 *
189 * To put it another way, dev_base_lock is held for writing only to
190 * protect against pure readers; the rtnl semaphore provides the
191 * protection against other writers.
192 *
193 * See, for example usages, register_netdevice() and
194 * unregister_netdevice(), which must be called with the rtnl
195 * semaphore held.
196 */
1da177e4 197DEFINE_RWLOCK(dev_base_lock);
1da177e4
LT
198EXPORT_SYMBOL(dev_base_lock);
199
881d966b 200static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
1da177e4
LT
201{
202 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
08e9897d 203 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
1da177e4
LT
204}
205
881d966b 206static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
1da177e4 207{
7c28bd0b 208 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
1da177e4
LT
209}
210
152102c7
CG
211static inline void rps_lock(struct softnet_data *queue)
212{
213#ifdef CONFIG_RPS
214 spin_lock(&queue->input_pkt_queue.lock);
215#endif
216}
217
218static inline void rps_unlock(struct softnet_data *queue)
219{
220#ifdef CONFIG_RPS
221 spin_unlock(&queue->input_pkt_queue.lock);
222#endif
223}
224
ce286d32
EB
225/* Device list insertion */
226static int list_netdevice(struct net_device *dev)
227{
c346dca1 228 struct net *net = dev_net(dev);
ce286d32
EB
229
230 ASSERT_RTNL();
231
232 write_lock_bh(&dev_base_lock);
c6d14c84 233 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
72c9528b 234 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
fb699dfd
ED
235 hlist_add_head_rcu(&dev->index_hlist,
236 dev_index_hash(net, dev->ifindex));
ce286d32
EB
237 write_unlock_bh(&dev_base_lock);
238 return 0;
239}
240
fb699dfd
ED
241/* Device list removal
242 * caller must respect a RCU grace period before freeing/reusing dev
243 */
ce286d32
EB
244static void unlist_netdevice(struct net_device *dev)
245{
246 ASSERT_RTNL();
247
248 /* Unlink dev from the device chain */
249 write_lock_bh(&dev_base_lock);
c6d14c84 250 list_del_rcu(&dev->dev_list);
72c9528b 251 hlist_del_rcu(&dev->name_hlist);
fb699dfd 252 hlist_del_rcu(&dev->index_hlist);
ce286d32
EB
253 write_unlock_bh(&dev_base_lock);
254}
255
1da177e4
LT
256/*
257 * Our notifier list
258 */
259
f07d5b94 260static RAW_NOTIFIER_HEAD(netdev_chain);
1da177e4
LT
261
262/*
263 * Device drivers call our routines to queue packets here. We empty the
264 * queue in the local softnet handler.
265 */
bea3348e
SH
266
267DEFINE_PER_CPU(struct softnet_data, softnet_data);
d1b19dff 268EXPORT_PER_CPU_SYMBOL(softnet_data);
1da177e4 269
cf508b12 270#ifdef CONFIG_LOCKDEP
723e98b7 271/*
c773e847 272 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
723e98b7
JP
273 * according to dev->type
274 */
275static const unsigned short netdev_lock_type[] =
276 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
277 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
278 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
279 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
280 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
281 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
282 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
283 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
284 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
285 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
286 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
287 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
288 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
2d91d78b 289 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET,
929122cd 290 ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154,
fcb94e42 291 ARPHRD_VOID, ARPHRD_NONE};
723e98b7 292
36cbd3dc 293static const char *const netdev_lock_name[] =
723e98b7
JP
294 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
295 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
296 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
297 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
298 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
299 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
300 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
301 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
302 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
303 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
304 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
305 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
306 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
2d91d78b 307 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET",
929122cd 308 "_xmit_PHONET_PIPE", "_xmit_IEEE802154",
fcb94e42 309 "_xmit_VOID", "_xmit_NONE"};
723e98b7
JP
310
311static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
cf508b12 312static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
723e98b7
JP
313
314static inline unsigned short netdev_lock_pos(unsigned short dev_type)
315{
316 int i;
317
318 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
319 if (netdev_lock_type[i] == dev_type)
320 return i;
321 /* the last key is used by default */
322 return ARRAY_SIZE(netdev_lock_type) - 1;
323}
324
cf508b12
DM
325static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
326 unsigned short dev_type)
723e98b7
JP
327{
328 int i;
329
330 i = netdev_lock_pos(dev_type);
331 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
332 netdev_lock_name[i]);
333}
cf508b12
DM
334
335static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
336{
337 int i;
338
339 i = netdev_lock_pos(dev->type);
340 lockdep_set_class_and_name(&dev->addr_list_lock,
341 &netdev_addr_lock_key[i],
342 netdev_lock_name[i]);
343}
723e98b7 344#else
cf508b12
DM
345static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
346 unsigned short dev_type)
347{
348}
349static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
723e98b7
JP
350{
351}
352#endif
1da177e4
LT
353
354/*******************************************************************************
355
356 Protocol management and registration routines
357
358*******************************************************************************/
359
1da177e4
LT
360/*
361 * Add a protocol ID to the list. Now that the input handler is
362 * smarter we can dispense with all the messy stuff that used to be
363 * here.
364 *
365 * BEWARE!!! Protocol handlers, mangling input packets,
366 * MUST BE last in hash buckets and checking protocol handlers
367 * MUST start from promiscuous ptype_all chain in net_bh.
368 * It is true now, do not change it.
369 * Explanation follows: if protocol handler, mangling packet, will
370 * be the first on list, it is not able to sense, that packet
371 * is cloned and should be copied-on-write, so that it will
372 * change it and subsequent readers will get broken packet.
373 * --ANK (980803)
374 */
375
376/**
377 * dev_add_pack - add packet handler
378 * @pt: packet type declaration
379 *
380 * Add a protocol handler to the networking stack. The passed &packet_type
381 * is linked into kernel lists and may not be freed until it has been
382 * removed from the kernel lists.
383 *
4ec93edb 384 * This call does not sleep therefore it can not
1da177e4
LT
385 * guarantee all CPU's that are in middle of receiving packets
386 * will see the new packet type (until the next received packet).
387 */
388
389void dev_add_pack(struct packet_type *pt)
390{
391 int hash;
392
393 spin_lock_bh(&ptype_lock);
9be9a6b9 394 if (pt->type == htons(ETH_P_ALL))
1da177e4 395 list_add_rcu(&pt->list, &ptype_all);
9be9a6b9 396 else {
82d8a867 397 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
1da177e4
LT
398 list_add_rcu(&pt->list, &ptype_base[hash]);
399 }
400 spin_unlock_bh(&ptype_lock);
401}
d1b19dff 402EXPORT_SYMBOL(dev_add_pack);
1da177e4 403
1da177e4
LT
404/**
405 * __dev_remove_pack - remove packet handler
406 * @pt: packet type declaration
407 *
408 * Remove a protocol handler that was previously added to the kernel
409 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
410 * from the kernel lists and can be freed or reused once this function
4ec93edb 411 * returns.
1da177e4
LT
412 *
413 * The packet type might still be in use by receivers
414 * and must not be freed until after all the CPU's have gone
415 * through a quiescent state.
416 */
417void __dev_remove_pack(struct packet_type *pt)
418{
419 struct list_head *head;
420 struct packet_type *pt1;
421
422 spin_lock_bh(&ptype_lock);
423
9be9a6b9 424 if (pt->type == htons(ETH_P_ALL))
1da177e4 425 head = &ptype_all;
9be9a6b9 426 else
82d8a867 427 head = &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
1da177e4
LT
428
429 list_for_each_entry(pt1, head, list) {
430 if (pt == pt1) {
431 list_del_rcu(&pt->list);
432 goto out;
433 }
434 }
435
436 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
437out:
438 spin_unlock_bh(&ptype_lock);
439}
d1b19dff
ED
440EXPORT_SYMBOL(__dev_remove_pack);
441
1da177e4
LT
442/**
443 * dev_remove_pack - remove packet handler
444 * @pt: packet type declaration
445 *
446 * Remove a protocol handler that was previously added to the kernel
447 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
448 * from the kernel lists and can be freed or reused once this function
449 * returns.
450 *
451 * This call sleeps to guarantee that no CPU is looking at the packet
452 * type after return.
453 */
454void dev_remove_pack(struct packet_type *pt)
455{
456 __dev_remove_pack(pt);
4ec93edb 457
1da177e4
LT
458 synchronize_net();
459}
d1b19dff 460EXPORT_SYMBOL(dev_remove_pack);
1da177e4
LT
461
462/******************************************************************************
463
464 Device Boot-time Settings Routines
465
466*******************************************************************************/
467
468/* Boot time configuration table */
469static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
470
471/**
472 * netdev_boot_setup_add - add new setup entry
473 * @name: name of the device
474 * @map: configured settings for the device
475 *
476 * Adds new setup entry to the dev_boot_setup list. The function
477 * returns 0 on error and 1 on success. This is a generic routine to
478 * all netdevices.
479 */
480static int netdev_boot_setup_add(char *name, struct ifmap *map)
481{
482 struct netdev_boot_setup *s;
483 int i;
484
485 s = dev_boot_setup;
486 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
487 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
488 memset(s[i].name, 0, sizeof(s[i].name));
93b3cff9 489 strlcpy(s[i].name, name, IFNAMSIZ);
1da177e4
LT
490 memcpy(&s[i].map, map, sizeof(s[i].map));
491 break;
492 }
493 }
494
495 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
496}
497
498/**
499 * netdev_boot_setup_check - check boot time settings
500 * @dev: the netdevice
501 *
502 * Check boot time settings for the device.
503 * The found settings are set for the device to be used
504 * later in the device probing.
505 * Returns 0 if no settings found, 1 if they are.
506 */
507int netdev_boot_setup_check(struct net_device *dev)
508{
509 struct netdev_boot_setup *s = dev_boot_setup;
510 int i;
511
512 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
513 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
93b3cff9 514 !strcmp(dev->name, s[i].name)) {
1da177e4
LT
515 dev->irq = s[i].map.irq;
516 dev->base_addr = s[i].map.base_addr;
517 dev->mem_start = s[i].map.mem_start;
518 dev->mem_end = s[i].map.mem_end;
519 return 1;
520 }
521 }
522 return 0;
523}
d1b19dff 524EXPORT_SYMBOL(netdev_boot_setup_check);
1da177e4
LT
525
526
527/**
528 * netdev_boot_base - get address from boot time settings
529 * @prefix: prefix for network device
530 * @unit: id for network device
531 *
532 * Check boot time settings for the base address of device.
533 * The found settings are set for the device to be used
534 * later in the device probing.
535 * Returns 0 if no settings found.
536 */
537unsigned long netdev_boot_base(const char *prefix, int unit)
538{
539 const struct netdev_boot_setup *s = dev_boot_setup;
540 char name[IFNAMSIZ];
541 int i;
542
543 sprintf(name, "%s%d", prefix, unit);
544
545 /*
546 * If device already registered then return base of 1
547 * to indicate not to probe for this interface
548 */
881d966b 549 if (__dev_get_by_name(&init_net, name))
1da177e4
LT
550 return 1;
551
552 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
553 if (!strcmp(name, s[i].name))
554 return s[i].map.base_addr;
555 return 0;
556}
557
558/*
559 * Saves at boot time configured settings for any netdevice.
560 */
561int __init netdev_boot_setup(char *str)
562{
563 int ints[5];
564 struct ifmap map;
565
566 str = get_options(str, ARRAY_SIZE(ints), ints);
567 if (!str || !*str)
568 return 0;
569
570 /* Save settings */
571 memset(&map, 0, sizeof(map));
572 if (ints[0] > 0)
573 map.irq = ints[1];
574 if (ints[0] > 1)
575 map.base_addr = ints[2];
576 if (ints[0] > 2)
577 map.mem_start = ints[3];
578 if (ints[0] > 3)
579 map.mem_end = ints[4];
580
581 /* Add new entry to the list */
582 return netdev_boot_setup_add(str, &map);
583}
584
585__setup("netdev=", netdev_boot_setup);
586
587/*******************************************************************************
588
589 Device Interface Subroutines
590
591*******************************************************************************/
592
593/**
594 * __dev_get_by_name - find a device by its name
c4ea43c5 595 * @net: the applicable net namespace
1da177e4
LT
596 * @name: name to find
597 *
598 * Find an interface by name. Must be called under RTNL semaphore
599 * or @dev_base_lock. If the name is found a pointer to the device
600 * is returned. If the name is not found then %NULL is returned. The
601 * reference counters are not incremented so the caller must be
602 * careful with locks.
603 */
604
881d966b 605struct net_device *__dev_get_by_name(struct net *net, const char *name)
1da177e4
LT
606{
607 struct hlist_node *p;
0bd8d536
ED
608 struct net_device *dev;
609 struct hlist_head *head = dev_name_hash(net, name);
1da177e4 610
0bd8d536 611 hlist_for_each_entry(dev, p, head, name_hlist)
1da177e4
LT
612 if (!strncmp(dev->name, name, IFNAMSIZ))
613 return dev;
0bd8d536 614
1da177e4
LT
615 return NULL;
616}
d1b19dff 617EXPORT_SYMBOL(__dev_get_by_name);
1da177e4 618
72c9528b
ED
619/**
620 * dev_get_by_name_rcu - find a device by its name
621 * @net: the applicable net namespace
622 * @name: name to find
623 *
624 * Find an interface by name.
625 * If the name is found a pointer to the device is returned.
626 * If the name is not found then %NULL is returned.
627 * The reference counters are not incremented so the caller must be
628 * careful with locks. The caller must hold RCU lock.
629 */
630
631struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
632{
633 struct hlist_node *p;
634 struct net_device *dev;
635 struct hlist_head *head = dev_name_hash(net, name);
636
637 hlist_for_each_entry_rcu(dev, p, head, name_hlist)
638 if (!strncmp(dev->name, name, IFNAMSIZ))
639 return dev;
640
641 return NULL;
642}
643EXPORT_SYMBOL(dev_get_by_name_rcu);
644
1da177e4
LT
645/**
646 * dev_get_by_name - find a device by its name
c4ea43c5 647 * @net: the applicable net namespace
1da177e4
LT
648 * @name: name to find
649 *
650 * Find an interface by name. This can be called from any
651 * context and does its own locking. The returned handle has
652 * the usage count incremented and the caller must use dev_put() to
653 * release it when it is no longer needed. %NULL is returned if no
654 * matching device is found.
655 */
656
881d966b 657struct net_device *dev_get_by_name(struct net *net, const char *name)
1da177e4
LT
658{
659 struct net_device *dev;
660
72c9528b
ED
661 rcu_read_lock();
662 dev = dev_get_by_name_rcu(net, name);
1da177e4
LT
663 if (dev)
664 dev_hold(dev);
72c9528b 665 rcu_read_unlock();
1da177e4
LT
666 return dev;
667}
d1b19dff 668EXPORT_SYMBOL(dev_get_by_name);
1da177e4
LT
669
670/**
671 * __dev_get_by_index - find a device by its ifindex
c4ea43c5 672 * @net: the applicable net namespace
1da177e4
LT
673 * @ifindex: index of device
674 *
675 * Search for an interface by index. Returns %NULL if the device
676 * is not found or a pointer to the device. The device has not
677 * had its reference counter increased so the caller must be careful
678 * about locking. The caller must hold either the RTNL semaphore
679 * or @dev_base_lock.
680 */
681
881d966b 682struct net_device *__dev_get_by_index(struct net *net, int ifindex)
1da177e4
LT
683{
684 struct hlist_node *p;
0bd8d536
ED
685 struct net_device *dev;
686 struct hlist_head *head = dev_index_hash(net, ifindex);
1da177e4 687
0bd8d536 688 hlist_for_each_entry(dev, p, head, index_hlist)
1da177e4
LT
689 if (dev->ifindex == ifindex)
690 return dev;
0bd8d536 691
1da177e4
LT
692 return NULL;
693}
d1b19dff 694EXPORT_SYMBOL(__dev_get_by_index);
1da177e4 695
fb699dfd
ED
696/**
697 * dev_get_by_index_rcu - find a device by its ifindex
698 * @net: the applicable net namespace
699 * @ifindex: index of device
700 *
701 * Search for an interface by index. Returns %NULL if the device
702 * is not found or a pointer to the device. The device has not
703 * had its reference counter increased so the caller must be careful
704 * about locking. The caller must hold RCU lock.
705 */
706
707struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
708{
709 struct hlist_node *p;
710 struct net_device *dev;
711 struct hlist_head *head = dev_index_hash(net, ifindex);
712
713 hlist_for_each_entry_rcu(dev, p, head, index_hlist)
714 if (dev->ifindex == ifindex)
715 return dev;
716
717 return NULL;
718}
719EXPORT_SYMBOL(dev_get_by_index_rcu);
720
1da177e4
LT
721
722/**
723 * dev_get_by_index - find a device by its ifindex
c4ea43c5 724 * @net: the applicable net namespace
1da177e4
LT
725 * @ifindex: index of device
726 *
727 * Search for an interface by index. Returns NULL if the device
728 * is not found or a pointer to the device. The device returned has
729 * had a reference added and the pointer is safe until the user calls
730 * dev_put to indicate they have finished with it.
731 */
732
881d966b 733struct net_device *dev_get_by_index(struct net *net, int ifindex)
1da177e4
LT
734{
735 struct net_device *dev;
736
fb699dfd
ED
737 rcu_read_lock();
738 dev = dev_get_by_index_rcu(net, ifindex);
1da177e4
LT
739 if (dev)
740 dev_hold(dev);
fb699dfd 741 rcu_read_unlock();
1da177e4
LT
742 return dev;
743}
d1b19dff 744EXPORT_SYMBOL(dev_get_by_index);
1da177e4
LT
745
746/**
747 * dev_getbyhwaddr - find a device by its hardware address
c4ea43c5 748 * @net: the applicable net namespace
1da177e4
LT
749 * @type: media type of device
750 * @ha: hardware address
751 *
752 * Search for an interface by MAC address. Returns NULL if the device
753 * is not found or a pointer to the device. The caller must hold the
754 * rtnl semaphore. The returned device has not had its ref count increased
755 * and the caller must therefore be careful about locking
756 *
757 * BUGS:
758 * If the API was consistent this would be __dev_get_by_hwaddr
759 */
760
881d966b 761struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
1da177e4
LT
762{
763 struct net_device *dev;
764
765 ASSERT_RTNL();
766
81103a52 767 for_each_netdev(net, dev)
1da177e4
LT
768 if (dev->type == type &&
769 !memcmp(dev->dev_addr, ha, dev->addr_len))
7562f876
PE
770 return dev;
771
772 return NULL;
1da177e4 773}
cf309e3f
JF
774EXPORT_SYMBOL(dev_getbyhwaddr);
775
881d966b 776struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
1da177e4
LT
777{
778 struct net_device *dev;
779
4e9cac2b 780 ASSERT_RTNL();
881d966b 781 for_each_netdev(net, dev)
4e9cac2b 782 if (dev->type == type)
7562f876
PE
783 return dev;
784
785 return NULL;
4e9cac2b 786}
4e9cac2b
PM
787EXPORT_SYMBOL(__dev_getfirstbyhwtype);
788
881d966b 789struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
4e9cac2b 790{
99fe3c39 791 struct net_device *dev, *ret = NULL;
4e9cac2b 792
99fe3c39
ED
793 rcu_read_lock();
794 for_each_netdev_rcu(net, dev)
795 if (dev->type == type) {
796 dev_hold(dev);
797 ret = dev;
798 break;
799 }
800 rcu_read_unlock();
801 return ret;
1da177e4 802}
1da177e4
LT
803EXPORT_SYMBOL(dev_getfirstbyhwtype);
804
805/**
806 * dev_get_by_flags - find any device with given flags
c4ea43c5 807 * @net: the applicable net namespace
1da177e4
LT
808 * @if_flags: IFF_* values
809 * @mask: bitmask of bits in if_flags to check
810 *
811 * Search for any interface with the given flags. Returns NULL if a device
4ec93edb 812 * is not found or a pointer to the device. The device returned has
1da177e4
LT
813 * had a reference added and the pointer is safe until the user calls
814 * dev_put to indicate they have finished with it.
815 */
816
d1b19dff
ED
817struct net_device *dev_get_by_flags(struct net *net, unsigned short if_flags,
818 unsigned short mask)
1da177e4 819{
7562f876 820 struct net_device *dev, *ret;
1da177e4 821
7562f876 822 ret = NULL;
c6d14c84
ED
823 rcu_read_lock();
824 for_each_netdev_rcu(net, dev) {
1da177e4
LT
825 if (((dev->flags ^ if_flags) & mask) == 0) {
826 dev_hold(dev);
7562f876 827 ret = dev;
1da177e4
LT
828 break;
829 }
830 }
c6d14c84 831 rcu_read_unlock();
7562f876 832 return ret;
1da177e4 833}
d1b19dff 834EXPORT_SYMBOL(dev_get_by_flags);
1da177e4
LT
835
836/**
837 * dev_valid_name - check if name is okay for network device
838 * @name: name string
839 *
840 * Network device names need to be valid file names to
c7fa9d18
DM
841 * to allow sysfs to work. We also disallow any kind of
842 * whitespace.
1da177e4 843 */
c2373ee9 844int dev_valid_name(const char *name)
1da177e4 845{
c7fa9d18
DM
846 if (*name == '\0')
847 return 0;
b6fe17d6
SH
848 if (strlen(name) >= IFNAMSIZ)
849 return 0;
c7fa9d18
DM
850 if (!strcmp(name, ".") || !strcmp(name, ".."))
851 return 0;
852
853 while (*name) {
854 if (*name == '/' || isspace(*name))
855 return 0;
856 name++;
857 }
858 return 1;
1da177e4 859}
d1b19dff 860EXPORT_SYMBOL(dev_valid_name);
1da177e4
LT
861
862/**
b267b179
EB
863 * __dev_alloc_name - allocate a name for a device
864 * @net: network namespace to allocate the device name in
1da177e4 865 * @name: name format string
b267b179 866 * @buf: scratch buffer and result name string
1da177e4
LT
867 *
868 * Passed a format string - eg "lt%d" it will try and find a suitable
3041a069
SH
869 * id. It scans list of devices to build up a free map, then chooses
870 * the first empty slot. The caller must hold the dev_base or rtnl lock
871 * while allocating the name and adding the device in order to avoid
872 * duplicates.
873 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
874 * Returns the number of the unit assigned or a negative errno code.
1da177e4
LT
875 */
876
b267b179 877static int __dev_alloc_name(struct net *net, const char *name, char *buf)
1da177e4
LT
878{
879 int i = 0;
1da177e4
LT
880 const char *p;
881 const int max_netdevices = 8*PAGE_SIZE;
cfcabdcc 882 unsigned long *inuse;
1da177e4
LT
883 struct net_device *d;
884
885 p = strnchr(name, IFNAMSIZ-1, '%');
886 if (p) {
887 /*
888 * Verify the string as this thing may have come from
889 * the user. There must be either one "%d" and no other "%"
890 * characters.
891 */
892 if (p[1] != 'd' || strchr(p + 2, '%'))
893 return -EINVAL;
894
895 /* Use one page as a bit array of possible slots */
cfcabdcc 896 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
1da177e4
LT
897 if (!inuse)
898 return -ENOMEM;
899
881d966b 900 for_each_netdev(net, d) {
1da177e4
LT
901 if (!sscanf(d->name, name, &i))
902 continue;
903 if (i < 0 || i >= max_netdevices)
904 continue;
905
906 /* avoid cases where sscanf is not exact inverse of printf */
b267b179 907 snprintf(buf, IFNAMSIZ, name, i);
1da177e4
LT
908 if (!strncmp(buf, d->name, IFNAMSIZ))
909 set_bit(i, inuse);
910 }
911
912 i = find_first_zero_bit(inuse, max_netdevices);
913 free_page((unsigned long) inuse);
914 }
915
d9031024
OP
916 if (buf != name)
917 snprintf(buf, IFNAMSIZ, name, i);
b267b179 918 if (!__dev_get_by_name(net, buf))
1da177e4 919 return i;
1da177e4
LT
920
921 /* It is possible to run out of possible slots
922 * when the name is long and there isn't enough space left
923 * for the digits, or if all bits are used.
924 */
925 return -ENFILE;
926}
927
b267b179
EB
928/**
929 * dev_alloc_name - allocate a name for a device
930 * @dev: device
931 * @name: name format string
932 *
933 * Passed a format string - eg "lt%d" it will try and find a suitable
934 * id. It scans list of devices to build up a free map, then chooses
935 * the first empty slot. The caller must hold the dev_base or rtnl lock
936 * while allocating the name and adding the device in order to avoid
937 * duplicates.
938 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
939 * Returns the number of the unit assigned or a negative errno code.
940 */
941
942int dev_alloc_name(struct net_device *dev, const char *name)
943{
944 char buf[IFNAMSIZ];
945 struct net *net;
946 int ret;
947
c346dca1
YH
948 BUG_ON(!dev_net(dev));
949 net = dev_net(dev);
b267b179
EB
950 ret = __dev_alloc_name(net, name, buf);
951 if (ret >= 0)
952 strlcpy(dev->name, buf, IFNAMSIZ);
953 return ret;
954}
d1b19dff 955EXPORT_SYMBOL(dev_alloc_name);
b267b179 956
d9031024
OP
957static int dev_get_valid_name(struct net *net, const char *name, char *buf,
958 bool fmt)
959{
960 if (!dev_valid_name(name))
961 return -EINVAL;
962
963 if (fmt && strchr(name, '%'))
964 return __dev_alloc_name(net, name, buf);
965 else if (__dev_get_by_name(net, name))
966 return -EEXIST;
967 else if (buf != name)
968 strlcpy(buf, name, IFNAMSIZ);
969
970 return 0;
971}
1da177e4
LT
972
973/**
974 * dev_change_name - change name of a device
975 * @dev: device
976 * @newname: name (or format string) must be at least IFNAMSIZ
977 *
978 * Change name of a device, can pass format strings "eth%d".
979 * for wildcarding.
980 */
cf04a4c7 981int dev_change_name(struct net_device *dev, const char *newname)
1da177e4 982{
fcc5a03a 983 char oldname[IFNAMSIZ];
1da177e4 984 int err = 0;
fcc5a03a 985 int ret;
881d966b 986 struct net *net;
1da177e4
LT
987
988 ASSERT_RTNL();
c346dca1 989 BUG_ON(!dev_net(dev));
1da177e4 990
c346dca1 991 net = dev_net(dev);
1da177e4
LT
992 if (dev->flags & IFF_UP)
993 return -EBUSY;
994
c8d90dca
SH
995 if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
996 return 0;
997
fcc5a03a
HX
998 memcpy(oldname, dev->name, IFNAMSIZ);
999
d9031024
OP
1000 err = dev_get_valid_name(net, newname, dev->name, 1);
1001 if (err < 0)
1002 return err;
1da177e4 1003
fcc5a03a 1004rollback:
3891845e
EB
1005 /* For now only devices in the initial network namespace
1006 * are in sysfs.
1007 */
09ad9bc7 1008 if (net_eq(net, &init_net)) {
3891845e
EB
1009 ret = device_rename(&dev->dev, dev->name);
1010 if (ret) {
1011 memcpy(dev->name, oldname, IFNAMSIZ);
1012 return ret;
1013 }
dcc99773 1014 }
7f988eab
HX
1015
1016 write_lock_bh(&dev_base_lock);
92749821 1017 hlist_del(&dev->name_hlist);
72c9528b
ED
1018 write_unlock_bh(&dev_base_lock);
1019
1020 synchronize_rcu();
1021
1022 write_lock_bh(&dev_base_lock);
1023 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
7f988eab
HX
1024 write_unlock_bh(&dev_base_lock);
1025
056925ab 1026 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
fcc5a03a
HX
1027 ret = notifier_to_errno(ret);
1028
1029 if (ret) {
91e9c07b
ED
1030 /* err >= 0 after dev_alloc_name() or stores the first errno */
1031 if (err >= 0) {
fcc5a03a
HX
1032 err = ret;
1033 memcpy(dev->name, oldname, IFNAMSIZ);
1034 goto rollback;
91e9c07b
ED
1035 } else {
1036 printk(KERN_ERR
1037 "%s: name change rollback failed: %d.\n",
1038 dev->name, ret);
fcc5a03a
HX
1039 }
1040 }
1da177e4
LT
1041
1042 return err;
1043}
1044
0b815a1a
SH
1045/**
1046 * dev_set_alias - change ifalias of a device
1047 * @dev: device
1048 * @alias: name up to IFALIASZ
f0db275a 1049 * @len: limit of bytes to copy from info
0b815a1a
SH
1050 *
1051 * Set ifalias for a device,
1052 */
1053int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1054{
1055 ASSERT_RTNL();
1056
1057 if (len >= IFALIASZ)
1058 return -EINVAL;
1059
96ca4a2c
OH
1060 if (!len) {
1061 if (dev->ifalias) {
1062 kfree(dev->ifalias);
1063 dev->ifalias = NULL;
1064 }
1065 return 0;
1066 }
1067
d1b19dff 1068 dev->ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
0b815a1a
SH
1069 if (!dev->ifalias)
1070 return -ENOMEM;
1071
1072 strlcpy(dev->ifalias, alias, len+1);
1073 return len;
1074}
1075
1076
d8a33ac4 1077/**
3041a069 1078 * netdev_features_change - device changes features
d8a33ac4
SH
1079 * @dev: device to cause notification
1080 *
1081 * Called to indicate a device has changed features.
1082 */
1083void netdev_features_change(struct net_device *dev)
1084{
056925ab 1085 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
d8a33ac4
SH
1086}
1087EXPORT_SYMBOL(netdev_features_change);
1088
1da177e4
LT
1089/**
1090 * netdev_state_change - device changes state
1091 * @dev: device to cause notification
1092 *
1093 * Called to indicate a device has changed state. This function calls
1094 * the notifier chains for netdev_chain and sends a NEWLINK message
1095 * to the routing socket.
1096 */
1097void netdev_state_change(struct net_device *dev)
1098{
1099 if (dev->flags & IFF_UP) {
056925ab 1100 call_netdevice_notifiers(NETDEV_CHANGE, dev);
1da177e4
LT
1101 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1102 }
1103}
d1b19dff 1104EXPORT_SYMBOL(netdev_state_change);
1da177e4 1105
3ca5b404 1106int netdev_bonding_change(struct net_device *dev, unsigned long event)
c1da4ac7 1107{
3ca5b404 1108 return call_netdevice_notifiers(event, dev);
c1da4ac7
OG
1109}
1110EXPORT_SYMBOL(netdev_bonding_change);
1111
1da177e4
LT
1112/**
1113 * dev_load - load a network module
c4ea43c5 1114 * @net: the applicable net namespace
1da177e4
LT
1115 * @name: name of interface
1116 *
1117 * If a network interface is not present and the process has suitable
1118 * privileges this function loads the module. If module loading is not
1119 * available in this kernel then it becomes a nop.
1120 */
1121
881d966b 1122void dev_load(struct net *net, const char *name)
1da177e4 1123{
4ec93edb 1124 struct net_device *dev;
1da177e4 1125
72c9528b
ED
1126 rcu_read_lock();
1127 dev = dev_get_by_name_rcu(net, name);
1128 rcu_read_unlock();
1da177e4 1129
a8f80e8f 1130 if (!dev && capable(CAP_NET_ADMIN))
1da177e4
LT
1131 request_module("%s", name);
1132}
d1b19dff 1133EXPORT_SYMBOL(dev_load);
1da177e4 1134
bd380811 1135static int __dev_open(struct net_device *dev)
1da177e4 1136{
d314774c 1137 const struct net_device_ops *ops = dev->netdev_ops;
3b8bcfd5 1138 int ret;
1da177e4 1139
e46b66bc
BH
1140 ASSERT_RTNL();
1141
1da177e4
LT
1142 /*
1143 * Is it even present?
1144 */
1145 if (!netif_device_present(dev))
1146 return -ENODEV;
1147
3b8bcfd5
JB
1148 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1149 ret = notifier_to_errno(ret);
1150 if (ret)
1151 return ret;
1152
1da177e4
LT
1153 /*
1154 * Call device private open method
1155 */
1156 set_bit(__LINK_STATE_START, &dev->state);
bada339b 1157
d314774c
SH
1158 if (ops->ndo_validate_addr)
1159 ret = ops->ndo_validate_addr(dev);
bada339b 1160
d314774c
SH
1161 if (!ret && ops->ndo_open)
1162 ret = ops->ndo_open(dev);
1da177e4 1163
4ec93edb 1164 /*
1da177e4
LT
1165 * If it went open OK then:
1166 */
1167
bada339b
JG
1168 if (ret)
1169 clear_bit(__LINK_STATE_START, &dev->state);
1170 else {
1da177e4
LT
1171 /*
1172 * Set the flags.
1173 */
1174 dev->flags |= IFF_UP;
1175
649274d9
DW
1176 /*
1177 * Enable NET_DMA
1178 */
b4bd07c2 1179 net_dmaengine_get();
649274d9 1180
1da177e4
LT
1181 /*
1182 * Initialize multicasting status
1183 */
4417da66 1184 dev_set_rx_mode(dev);
1da177e4
LT
1185
1186 /*
1187 * Wakeup transmit queue engine
1188 */
1189 dev_activate(dev);
1da177e4 1190 }
bada339b 1191
1da177e4
LT
1192 return ret;
1193}
1194
1195/**
bd380811
PM
1196 * dev_open - prepare an interface for use.
1197 * @dev: device to open
1da177e4 1198 *
bd380811
PM
1199 * Takes a device from down to up state. The device's private open
1200 * function is invoked and then the multicast lists are loaded. Finally
1201 * the device is moved into the up state and a %NETDEV_UP message is
1202 * sent to the netdev notifier chain.
1203 *
1204 * Calling this function on an active interface is a nop. On a failure
1205 * a negative errno code is returned.
1da177e4 1206 */
bd380811
PM
1207int dev_open(struct net_device *dev)
1208{
1209 int ret;
1210
1211 /*
1212 * Is it already up?
1213 */
1214 if (dev->flags & IFF_UP)
1215 return 0;
1216
1217 /*
1218 * Open device
1219 */
1220 ret = __dev_open(dev);
1221 if (ret < 0)
1222 return ret;
1223
1224 /*
1225 * ... and announce new interface.
1226 */
1227 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1228 call_netdevice_notifiers(NETDEV_UP, dev);
1229
1230 return ret;
1231}
1232EXPORT_SYMBOL(dev_open);
1233
1234static int __dev_close(struct net_device *dev)
1da177e4 1235{
d314774c 1236 const struct net_device_ops *ops = dev->netdev_ops;
e46b66bc 1237
bd380811 1238 ASSERT_RTNL();
9d5010db
DM
1239 might_sleep();
1240
1da177e4
LT
1241 /*
1242 * Tell people we are going down, so that they can
1243 * prepare to death, when device is still operating.
1244 */
056925ab 1245 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1da177e4 1246
1da177e4
LT
1247 clear_bit(__LINK_STATE_START, &dev->state);
1248
1249 /* Synchronize to scheduled poll. We cannot touch poll list,
bea3348e
SH
1250 * it can be even on different cpu. So just clear netif_running().
1251 *
1252 * dev->stop() will invoke napi_disable() on all of it's
1253 * napi_struct instances on this device.
1254 */
1da177e4 1255 smp_mb__after_clear_bit(); /* Commit netif_running(). */
1da177e4 1256
d8b2a4d2
ML
1257 dev_deactivate(dev);
1258
1da177e4
LT
1259 /*
1260 * Call the device specific close. This cannot fail.
1261 * Only if device is UP
1262 *
1263 * We allow it to be called even after a DETACH hot-plug
1264 * event.
1265 */
d314774c
SH
1266 if (ops->ndo_stop)
1267 ops->ndo_stop(dev);
1da177e4
LT
1268
1269 /*
1270 * Device is now down.
1271 */
1272
1273 dev->flags &= ~IFF_UP;
1274
1275 /*
bd380811 1276 * Shutdown NET_DMA
1da177e4 1277 */
bd380811
PM
1278 net_dmaengine_put();
1279
1280 return 0;
1281}
1282
1283/**
1284 * dev_close - shutdown an interface.
1285 * @dev: device to shutdown
1286 *
1287 * This function moves an active device into down state. A
1288 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1289 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1290 * chain.
1291 */
1292int dev_close(struct net_device *dev)
1293{
1294 if (!(dev->flags & IFF_UP))
1295 return 0;
1296
1297 __dev_close(dev);
1da177e4 1298
649274d9 1299 /*
bd380811 1300 * Tell people we are down
649274d9 1301 */
bd380811
PM
1302 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1303 call_netdevice_notifiers(NETDEV_DOWN, dev);
649274d9 1304
1da177e4
LT
1305 return 0;
1306}
d1b19dff 1307EXPORT_SYMBOL(dev_close);
1da177e4
LT
1308
1309
0187bdfb
BH
1310/**
1311 * dev_disable_lro - disable Large Receive Offload on a device
1312 * @dev: device
1313 *
1314 * Disable Large Receive Offload (LRO) on a net device. Must be
1315 * called under RTNL. This is needed if received packets may be
1316 * forwarded to another interface.
1317 */
1318void dev_disable_lro(struct net_device *dev)
1319{
1320 if (dev->ethtool_ops && dev->ethtool_ops->get_flags &&
1321 dev->ethtool_ops->set_flags) {
1322 u32 flags = dev->ethtool_ops->get_flags(dev);
1323 if (flags & ETH_FLAG_LRO) {
1324 flags &= ~ETH_FLAG_LRO;
1325 dev->ethtool_ops->set_flags(dev, flags);
1326 }
1327 }
1328 WARN_ON(dev->features & NETIF_F_LRO);
1329}
1330EXPORT_SYMBOL(dev_disable_lro);
1331
1332
881d966b
EB
1333static int dev_boot_phase = 1;
1334
1da177e4
LT
1335/*
1336 * Device change register/unregister. These are not inline or static
1337 * as we export them to the world.
1338 */
1339
1340/**
1341 * register_netdevice_notifier - register a network notifier block
1342 * @nb: notifier
1343 *
1344 * Register a notifier to be called when network device events occur.
1345 * The notifier passed is linked into the kernel structures and must
1346 * not be reused until it has been unregistered. A negative errno code
1347 * is returned on a failure.
1348 *
1349 * When registered all registration and up events are replayed
4ec93edb 1350 * to the new notifier to allow device to have a race free
1da177e4
LT
1351 * view of the network device list.
1352 */
1353
1354int register_netdevice_notifier(struct notifier_block *nb)
1355{
1356 struct net_device *dev;
fcc5a03a 1357 struct net_device *last;
881d966b 1358 struct net *net;
1da177e4
LT
1359 int err;
1360
1361 rtnl_lock();
f07d5b94 1362 err = raw_notifier_chain_register(&netdev_chain, nb);
fcc5a03a
HX
1363 if (err)
1364 goto unlock;
881d966b
EB
1365 if (dev_boot_phase)
1366 goto unlock;
1367 for_each_net(net) {
1368 for_each_netdev(net, dev) {
1369 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1370 err = notifier_to_errno(err);
1371 if (err)
1372 goto rollback;
1373
1374 if (!(dev->flags & IFF_UP))
1375 continue;
1da177e4 1376
881d966b
EB
1377 nb->notifier_call(nb, NETDEV_UP, dev);
1378 }
1da177e4 1379 }
fcc5a03a
HX
1380
1381unlock:
1da177e4
LT
1382 rtnl_unlock();
1383 return err;
fcc5a03a
HX
1384
1385rollback:
1386 last = dev;
881d966b
EB
1387 for_each_net(net) {
1388 for_each_netdev(net, dev) {
1389 if (dev == last)
1390 break;
fcc5a03a 1391
881d966b
EB
1392 if (dev->flags & IFF_UP) {
1393 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1394 nb->notifier_call(nb, NETDEV_DOWN, dev);
1395 }
1396 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
a5ee1551 1397 nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev);
fcc5a03a 1398 }
fcc5a03a 1399 }
c67625a1
PE
1400
1401 raw_notifier_chain_unregister(&netdev_chain, nb);
fcc5a03a 1402 goto unlock;
1da177e4 1403}
d1b19dff 1404EXPORT_SYMBOL(register_netdevice_notifier);
1da177e4
LT
1405
1406/**
1407 * unregister_netdevice_notifier - unregister a network notifier block
1408 * @nb: notifier
1409 *
1410 * Unregister a notifier previously registered by
1411 * register_netdevice_notifier(). The notifier is unlinked into the
1412 * kernel structures and may then be reused. A negative errno code
1413 * is returned on a failure.
1414 */
1415
1416int unregister_netdevice_notifier(struct notifier_block *nb)
1417{
9f514950
HX
1418 int err;
1419
1420 rtnl_lock();
f07d5b94 1421 err = raw_notifier_chain_unregister(&netdev_chain, nb);
9f514950
HX
1422 rtnl_unlock();
1423 return err;
1da177e4 1424}
d1b19dff 1425EXPORT_SYMBOL(unregister_netdevice_notifier);
1da177e4
LT
1426
1427/**
1428 * call_netdevice_notifiers - call all network notifier blocks
1429 * @val: value passed unmodified to notifier function
c4ea43c5 1430 * @dev: net_device pointer passed unmodified to notifier function
1da177e4
LT
1431 *
1432 * Call all network notifier blocks. Parameters and return value
f07d5b94 1433 * are as for raw_notifier_call_chain().
1da177e4
LT
1434 */
1435
ad7379d4 1436int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1da177e4 1437{
ad7379d4 1438 return raw_notifier_call_chain(&netdev_chain, val, dev);
1da177e4
LT
1439}
1440
1441/* When > 0 there are consumers of rx skb time stamps */
1442static atomic_t netstamp_needed = ATOMIC_INIT(0);
1443
1444void net_enable_timestamp(void)
1445{
1446 atomic_inc(&netstamp_needed);
1447}
d1b19dff 1448EXPORT_SYMBOL(net_enable_timestamp);
1da177e4
LT
1449
1450void net_disable_timestamp(void)
1451{
1452 atomic_dec(&netstamp_needed);
1453}
d1b19dff 1454EXPORT_SYMBOL(net_disable_timestamp);
1da177e4 1455
a61bbcf2 1456static inline void net_timestamp(struct sk_buff *skb)
1da177e4
LT
1457{
1458 if (atomic_read(&netstamp_needed))
a61bbcf2 1459 __net_timestamp(skb);
b7aa0bf7
ED
1460 else
1461 skb->tstamp.tv64 = 0;
1da177e4
LT
1462}
1463
44540960
AB
1464/**
1465 * dev_forward_skb - loopback an skb to another netif
1466 *
1467 * @dev: destination network device
1468 * @skb: buffer to forward
1469 *
1470 * return values:
1471 * NET_RX_SUCCESS (no congestion)
1472 * NET_RX_DROP (packet was dropped)
1473 *
1474 * dev_forward_skb can be used for injecting an skb from the
1475 * start_xmit function of one device into the receive queue
1476 * of another device.
1477 *
1478 * The receiving device may be in another namespace, so
1479 * we have to clear all information in the skb that could
1480 * impact namespace isolation.
1481 */
1482int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1483{
1484 skb_orphan(skb);
1485
1486 if (!(dev->flags & IFF_UP))
1487 return NET_RX_DROP;
1488
1489 if (skb->len > (dev->mtu + dev->hard_header_len))
1490 return NET_RX_DROP;
1491
8a83a00b 1492 skb_set_dev(skb, dev);
44540960
AB
1493 skb->tstamp.tv64 = 0;
1494 skb->pkt_type = PACKET_HOST;
1495 skb->protocol = eth_type_trans(skb, dev);
44540960
AB
1496 return netif_rx(skb);
1497}
1498EXPORT_SYMBOL_GPL(dev_forward_skb);
1499
1da177e4
LT
1500/*
1501 * Support routine. Sends outgoing frames to any network
1502 * taps currently in use.
1503 */
1504
f6a78bfc 1505static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1da177e4
LT
1506{
1507 struct packet_type *ptype;
a61bbcf2 1508
8caf1539
JP
1509#ifdef CONFIG_NET_CLS_ACT
1510 if (!(skb->tstamp.tv64 && (G_TC_FROM(skb->tc_verd) & AT_INGRESS)))
1511 net_timestamp(skb);
1512#else
a61bbcf2 1513 net_timestamp(skb);
8caf1539 1514#endif
1da177e4
LT
1515
1516 rcu_read_lock();
1517 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1518 /* Never send packets back to the socket
1519 * they originated from - MvS (miquels@drinkel.ow.org)
1520 */
1521 if ((ptype->dev == dev || !ptype->dev) &&
1522 (ptype->af_packet_priv == NULL ||
1523 (struct sock *)ptype->af_packet_priv != skb->sk)) {
d1b19dff 1524 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1da177e4
LT
1525 if (!skb2)
1526 break;
1527
1528 /* skb->nh should be correctly
1529 set by sender, so that the second statement is
1530 just protection against buggy protocols.
1531 */
459a98ed 1532 skb_reset_mac_header(skb2);
1da177e4 1533
d56f90a7 1534 if (skb_network_header(skb2) < skb2->data ||
27a884dc 1535 skb2->network_header > skb2->tail) {
1da177e4
LT
1536 if (net_ratelimit())
1537 printk(KERN_CRIT "protocol %04x is "
1538 "buggy, dev %s\n",
1539 skb2->protocol, dev->name);
c1d2bbe1 1540 skb_reset_network_header(skb2);
1da177e4
LT
1541 }
1542
b0e380b1 1543 skb2->transport_header = skb2->network_header;
1da177e4 1544 skb2->pkt_type = PACKET_OUTGOING;
f2ccd8fa 1545 ptype->func(skb2, skb->dev, ptype, skb->dev);
1da177e4
LT
1546 }
1547 }
1548 rcu_read_unlock();
1549}
1550
56079431 1551
def82a1d 1552static inline void __netif_reschedule(struct Qdisc *q)
56079431 1553{
def82a1d
JP
1554 struct softnet_data *sd;
1555 unsigned long flags;
56079431 1556
def82a1d
JP
1557 local_irq_save(flags);
1558 sd = &__get_cpu_var(softnet_data);
1559 q->next_sched = sd->output_queue;
1560 sd->output_queue = q;
1561 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1562 local_irq_restore(flags);
1563}
1564
1565void __netif_schedule(struct Qdisc *q)
1566{
1567 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
1568 __netif_reschedule(q);
56079431
DV
1569}
1570EXPORT_SYMBOL(__netif_schedule);
1571
bea3348e 1572void dev_kfree_skb_irq(struct sk_buff *skb)
56079431 1573{
bea3348e
SH
1574 if (atomic_dec_and_test(&skb->users)) {
1575 struct softnet_data *sd;
1576 unsigned long flags;
56079431 1577
bea3348e
SH
1578 local_irq_save(flags);
1579 sd = &__get_cpu_var(softnet_data);
1580 skb->next = sd->completion_queue;
1581 sd->completion_queue = skb;
1582 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1583 local_irq_restore(flags);
1584 }
56079431 1585}
bea3348e 1586EXPORT_SYMBOL(dev_kfree_skb_irq);
56079431
DV
1587
1588void dev_kfree_skb_any(struct sk_buff *skb)
1589{
1590 if (in_irq() || irqs_disabled())
1591 dev_kfree_skb_irq(skb);
1592 else
1593 dev_kfree_skb(skb);
1594}
1595EXPORT_SYMBOL(dev_kfree_skb_any);
1596
1597
bea3348e
SH
1598/**
1599 * netif_device_detach - mark device as removed
1600 * @dev: network device
1601 *
1602 * Mark device as removed from system and therefore no longer available.
1603 */
56079431
DV
1604void netif_device_detach(struct net_device *dev)
1605{
1606 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1607 netif_running(dev)) {
d543103a 1608 netif_tx_stop_all_queues(dev);
56079431
DV
1609 }
1610}
1611EXPORT_SYMBOL(netif_device_detach);
1612
bea3348e
SH
1613/**
1614 * netif_device_attach - mark device as attached
1615 * @dev: network device
1616 *
1617 * Mark device as attached from system and restart if needed.
1618 */
56079431
DV
1619void netif_device_attach(struct net_device *dev)
1620{
1621 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1622 netif_running(dev)) {
d543103a 1623 netif_tx_wake_all_queues(dev);
4ec93edb 1624 __netdev_watchdog_up(dev);
56079431
DV
1625 }
1626}
1627EXPORT_SYMBOL(netif_device_attach);
1628
6de329e2
BH
1629static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1630{
1631 return ((features & NETIF_F_GEN_CSUM) ||
1632 ((features & NETIF_F_IP_CSUM) &&
1633 protocol == htons(ETH_P_IP)) ||
1634 ((features & NETIF_F_IPV6_CSUM) &&
1c8dbcf6
YZ
1635 protocol == htons(ETH_P_IPV6)) ||
1636 ((features & NETIF_F_FCOE_CRC) &&
1637 protocol == htons(ETH_P_FCOE)));
6de329e2
BH
1638}
1639
1640static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
1641{
1642 if (can_checksum_protocol(dev->features, skb->protocol))
1643 return true;
1644
1645 if (skb->protocol == htons(ETH_P_8021Q)) {
1646 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1647 if (can_checksum_protocol(dev->features & dev->vlan_features,
1648 veh->h_vlan_encapsulated_proto))
1649 return true;
1650 }
1651
1652 return false;
1653}
56079431 1654
8a83a00b
AB
1655/**
1656 * skb_dev_set -- assign a new device to a buffer
1657 * @skb: buffer for the new device
1658 * @dev: network device
1659 *
1660 * If an skb is owned by a device already, we have to reset
1661 * all data private to the namespace a device belongs to
1662 * before assigning it a new device.
1663 */
1664#ifdef CONFIG_NET_NS
1665void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
1666{
1667 skb_dst_drop(skb);
1668 if (skb->dev && !net_eq(dev_net(skb->dev), dev_net(dev))) {
1669 secpath_reset(skb);
1670 nf_reset(skb);
1671 skb_init_secmark(skb);
1672 skb->mark = 0;
1673 skb->priority = 0;
1674 skb->nf_trace = 0;
1675 skb->ipvs_property = 0;
1676#ifdef CONFIG_NET_SCHED
1677 skb->tc_index = 0;
1678#endif
1679 }
1680 skb->dev = dev;
1681}
1682EXPORT_SYMBOL(skb_set_dev);
1683#endif /* CONFIG_NET_NS */
1684
1da177e4
LT
1685/*
1686 * Invalidate hardware checksum when packet is to be mangled, and
1687 * complete checksum manually on outgoing path.
1688 */
84fa7933 1689int skb_checksum_help(struct sk_buff *skb)
1da177e4 1690{
d3bc23e7 1691 __wsum csum;
663ead3b 1692 int ret = 0, offset;
1da177e4 1693
84fa7933 1694 if (skb->ip_summed == CHECKSUM_COMPLETE)
a430a43d
HX
1695 goto out_set_summed;
1696
1697 if (unlikely(skb_shinfo(skb)->gso_size)) {
a430a43d
HX
1698 /* Let GSO fix up the checksum. */
1699 goto out_set_summed;
1da177e4
LT
1700 }
1701
a030847e
HX
1702 offset = skb->csum_start - skb_headroom(skb);
1703 BUG_ON(offset >= skb_headlen(skb));
1704 csum = skb_checksum(skb, offset, skb->len - offset, 0);
1705
1706 offset += skb->csum_offset;
1707 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
1708
1709 if (skb_cloned(skb) &&
1710 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
1da177e4
LT
1711 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1712 if (ret)
1713 goto out;
1714 }
1715
a030847e 1716 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
a430a43d 1717out_set_summed:
1da177e4 1718 skb->ip_summed = CHECKSUM_NONE;
4ec93edb 1719out:
1da177e4
LT
1720 return ret;
1721}
d1b19dff 1722EXPORT_SYMBOL(skb_checksum_help);
1da177e4 1723
f6a78bfc
HX
1724/**
1725 * skb_gso_segment - Perform segmentation on skb.
1726 * @skb: buffer to segment
576a30eb 1727 * @features: features for the output path (see dev->features)
f6a78bfc
HX
1728 *
1729 * This function segments the given skb and returns a list of segments.
576a30eb
HX
1730 *
1731 * It may return NULL if the skb requires no segmentation. This is
1732 * only possible when GSO is used for verifying header integrity.
f6a78bfc 1733 */
576a30eb 1734struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
f6a78bfc
HX
1735{
1736 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1737 struct packet_type *ptype;
252e3346 1738 __be16 type = skb->protocol;
a430a43d 1739 int err;
f6a78bfc 1740
459a98ed 1741 skb_reset_mac_header(skb);
b0e380b1 1742 skb->mac_len = skb->network_header - skb->mac_header;
f6a78bfc
HX
1743 __skb_pull(skb, skb->mac_len);
1744
67fd1a73
HX
1745 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1746 struct net_device *dev = skb->dev;
1747 struct ethtool_drvinfo info = {};
1748
1749 if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
1750 dev->ethtool_ops->get_drvinfo(dev, &info);
1751
1752 WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d "
1753 "ip_summed=%d",
1754 info.driver, dev ? dev->features : 0L,
1755 skb->sk ? skb->sk->sk_route_caps : 0L,
1756 skb->len, skb->data_len, skb->ip_summed);
1757
a430a43d
HX
1758 if (skb_header_cloned(skb) &&
1759 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1760 return ERR_PTR(err);
1761 }
1762
f6a78bfc 1763 rcu_read_lock();
82d8a867
PE
1764 list_for_each_entry_rcu(ptype,
1765 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
f6a78bfc 1766 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
84fa7933 1767 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
a430a43d
HX
1768 err = ptype->gso_send_check(skb);
1769 segs = ERR_PTR(err);
1770 if (err || skb_gso_ok(skb, features))
1771 break;
d56f90a7
ACM
1772 __skb_push(skb, (skb->data -
1773 skb_network_header(skb)));
a430a43d 1774 }
576a30eb 1775 segs = ptype->gso_segment(skb, features);
f6a78bfc
HX
1776 break;
1777 }
1778 }
1779 rcu_read_unlock();
1780
98e399f8 1781 __skb_push(skb, skb->data - skb_mac_header(skb));
576a30eb 1782
f6a78bfc
HX
1783 return segs;
1784}
f6a78bfc
HX
1785EXPORT_SYMBOL(skb_gso_segment);
1786
fb286bb2
HX
1787/* Take action when hardware reception checksum errors are detected. */
1788#ifdef CONFIG_BUG
1789void netdev_rx_csum_fault(struct net_device *dev)
1790{
1791 if (net_ratelimit()) {
4ec93edb 1792 printk(KERN_ERR "%s: hw csum failure.\n",
246a4212 1793 dev ? dev->name : "<unknown>");
fb286bb2
HX
1794 dump_stack();
1795 }
1796}
1797EXPORT_SYMBOL(netdev_rx_csum_fault);
1798#endif
1799
1da177e4
LT
1800/* Actually, we should eliminate this check as soon as we know, that:
1801 * 1. IOMMU is present and allows to map all the memory.
1802 * 2. No high memory really exists on this machine.
1803 */
1804
9092c658 1805static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1da177e4 1806{
3d3a8533 1807#ifdef CONFIG_HIGHMEM
1da177e4 1808 int i;
5acbbd42
FT
1809 if (!(dev->features & NETIF_F_HIGHDMA)) {
1810 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1811 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1812 return 1;
1813 }
1da177e4 1814
5acbbd42
FT
1815 if (PCI_DMA_BUS_IS_PHYS) {
1816 struct device *pdev = dev->dev.parent;
1da177e4 1817
9092c658
ED
1818 if (!pdev)
1819 return 0;
5acbbd42
FT
1820 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1821 dma_addr_t addr = page_to_phys(skb_shinfo(skb)->frags[i].page);
1822 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
1823 return 1;
1824 }
1825 }
3d3a8533 1826#endif
1da177e4
LT
1827 return 0;
1828}
1da177e4 1829
f6a78bfc
HX
1830struct dev_gso_cb {
1831 void (*destructor)(struct sk_buff *skb);
1832};
1833
1834#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
1835
1836static void dev_gso_skb_destructor(struct sk_buff *skb)
1837{
1838 struct dev_gso_cb *cb;
1839
1840 do {
1841 struct sk_buff *nskb = skb->next;
1842
1843 skb->next = nskb->next;
1844 nskb->next = NULL;
1845 kfree_skb(nskb);
1846 } while (skb->next);
1847
1848 cb = DEV_GSO_CB(skb);
1849 if (cb->destructor)
1850 cb->destructor(skb);
1851}
1852
1853/**
1854 * dev_gso_segment - Perform emulated hardware segmentation on skb.
1855 * @skb: buffer to segment
1856 *
1857 * This function segments the given skb and stores the list of segments
1858 * in skb->next.
1859 */
1860static int dev_gso_segment(struct sk_buff *skb)
1861{
1862 struct net_device *dev = skb->dev;
1863 struct sk_buff *segs;
576a30eb
HX
1864 int features = dev->features & ~(illegal_highdma(dev, skb) ?
1865 NETIF_F_SG : 0);
1866
1867 segs = skb_gso_segment(skb, features);
1868
1869 /* Verifying header integrity only. */
1870 if (!segs)
1871 return 0;
f6a78bfc 1872
801678c5 1873 if (IS_ERR(segs))
f6a78bfc
HX
1874 return PTR_ERR(segs);
1875
1876 skb->next = segs;
1877 DEV_GSO_CB(skb)->destructor = skb->destructor;
1878 skb->destructor = dev_gso_skb_destructor;
1879
1880 return 0;
1881}
1882
fd2ea0a7
DM
1883int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1884 struct netdev_queue *txq)
f6a78bfc 1885{
00829823 1886 const struct net_device_ops *ops = dev->netdev_ops;
572a9d7b 1887 int rc = NETDEV_TX_OK;
00829823 1888
f6a78bfc 1889 if (likely(!skb->next)) {
9be9a6b9 1890 if (!list_empty(&ptype_all))
f6a78bfc
HX
1891 dev_queue_xmit_nit(skb, dev);
1892
576a30eb
HX
1893 if (netif_needs_gso(dev, skb)) {
1894 if (unlikely(dev_gso_segment(skb)))
1895 goto out_kfree_skb;
1896 if (skb->next)
1897 goto gso;
1898 }
f6a78bfc 1899
93f154b5
ED
1900 /*
1901 * If device doesnt need skb->dst, release it right now while
1902 * its hot in this cpu cache
1903 */
adf30907
ED
1904 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
1905 skb_dst_drop(skb);
1906
ac45f602 1907 rc = ops->ndo_start_xmit(skb, dev);
ec634fe3 1908 if (rc == NETDEV_TX_OK)
08baf561 1909 txq_trans_update(txq);
ac45f602
PO
1910 /*
1911 * TODO: if skb_orphan() was called by
1912 * dev->hard_start_xmit() (for example, the unmodified
1913 * igb driver does that; bnx2 doesn't), then
1914 * skb_tx_software_timestamp() will be unable to send
1915 * back the time stamp.
1916 *
1917 * How can this be prevented? Always create another
1918 * reference to the socket before calling
1919 * dev->hard_start_xmit()? Prevent that skb_orphan()
1920 * does anything in dev->hard_start_xmit() by clearing
1921 * the skb destructor before the call and restoring it
1922 * afterwards, then doing the skb_orphan() ourselves?
1923 */
ac45f602 1924 return rc;
f6a78bfc
HX
1925 }
1926
576a30eb 1927gso:
f6a78bfc
HX
1928 do {
1929 struct sk_buff *nskb = skb->next;
f6a78bfc
HX
1930
1931 skb->next = nskb->next;
1932 nskb->next = NULL;
068a2de5
KK
1933
1934 /*
1935 * If device doesnt need nskb->dst, release it right now while
1936 * its hot in this cpu cache
1937 */
1938 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
1939 skb_dst_drop(nskb);
1940
00829823 1941 rc = ops->ndo_start_xmit(nskb, dev);
ec634fe3 1942 if (unlikely(rc != NETDEV_TX_OK)) {
572a9d7b
PM
1943 if (rc & ~NETDEV_TX_MASK)
1944 goto out_kfree_gso_skb;
f54d9e8d 1945 nskb->next = skb->next;
f6a78bfc
HX
1946 skb->next = nskb;
1947 return rc;
1948 }
08baf561 1949 txq_trans_update(txq);
fd2ea0a7 1950 if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
f54d9e8d 1951 return NETDEV_TX_BUSY;
f6a78bfc 1952 } while (skb->next);
4ec93edb 1953
572a9d7b
PM
1954out_kfree_gso_skb:
1955 if (likely(skb->next == NULL))
1956 skb->destructor = DEV_GSO_CB(skb)->destructor;
f6a78bfc
HX
1957out_kfree_skb:
1958 kfree_skb(skb);
572a9d7b 1959 return rc;
f6a78bfc
HX
1960}
1961
0a9627f2 1962static u32 hashrnd __read_mostly;
b6b2fed1 1963
9247744e 1964u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
8f0f2223 1965{
7019298a 1966 u32 hash;
b6b2fed1 1967
513de11b
DM
1968 if (skb_rx_queue_recorded(skb)) {
1969 hash = skb_get_rx_queue(skb);
d1b19dff 1970 while (unlikely(hash >= dev->real_num_tx_queues))
513de11b
DM
1971 hash -= dev->real_num_tx_queues;
1972 return hash;
1973 }
ec581f6a
ED
1974
1975 if (skb->sk && skb->sk->sk_hash)
7019298a 1976 hash = skb->sk->sk_hash;
ec581f6a 1977 else
7019298a 1978 hash = skb->protocol;
d5a9e24a 1979
0a9627f2 1980 hash = jhash_1word(hash, hashrnd);
b6b2fed1
DM
1981
1982 return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
8f0f2223 1983}
9247744e 1984EXPORT_SYMBOL(skb_tx_hash);
8f0f2223 1985
ed04642f
ED
1986static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
1987{
1988 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
1989 if (net_ratelimit()) {
7a161ea9
ED
1990 pr_warning("%s selects TX queue %d, but "
1991 "real number of TX queues is %d\n",
1992 dev->name, queue_index, dev->real_num_tx_queues);
ed04642f
ED
1993 }
1994 return 0;
1995 }
1996 return queue_index;
1997}
1998
e8a0464c
DM
1999static struct netdev_queue *dev_pick_tx(struct net_device *dev,
2000 struct sk_buff *skb)
2001{
a4ee3ce3
KK
2002 u16 queue_index;
2003 struct sock *sk = skb->sk;
2004
2005 if (sk_tx_queue_recorded(sk)) {
2006 queue_index = sk_tx_queue_get(sk);
2007 } else {
2008 const struct net_device_ops *ops = dev->netdev_ops;
2009
2010 if (ops->ndo_select_queue) {
2011 queue_index = ops->ndo_select_queue(dev, skb);
ed04642f 2012 queue_index = dev_cap_txqueue(dev, queue_index);
a4ee3ce3
KK
2013 } else {
2014 queue_index = 0;
2015 if (dev->real_num_tx_queues > 1)
2016 queue_index = skb_tx_hash(dev, skb);
fd2ea0a7 2017
b6c6712a 2018 if (sk && rcu_dereference_check(sk->sk_dst_cache, 1))
a4ee3ce3
KK
2019 sk_tx_queue_set(sk, queue_index);
2020 }
2021 }
eae792b7 2022
fd2ea0a7
DM
2023 skb_set_queue_mapping(skb, queue_index);
2024 return netdev_get_tx_queue(dev, queue_index);
e8a0464c
DM
2025}
2026
bbd8a0d3
KK
2027static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2028 struct net_device *dev,
2029 struct netdev_queue *txq)
2030{
2031 spinlock_t *root_lock = qdisc_lock(q);
2032 int rc;
2033
2034 spin_lock(root_lock);
2035 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
2036 kfree_skb(skb);
2037 rc = NET_XMIT_DROP;
2038 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
2039 !test_and_set_bit(__QDISC_STATE_RUNNING, &q->state)) {
2040 /*
2041 * This is a work-conserving queue; there are no old skbs
2042 * waiting to be sent out; and the qdisc is not running -
2043 * xmit the skb directly.
2044 */
2045 __qdisc_update_bstats(q, skb->len);
2046 if (sch_direct_xmit(skb, q, dev, txq, root_lock))
2047 __qdisc_run(q);
2048 else
2049 clear_bit(__QDISC_STATE_RUNNING, &q->state);
2050
2051 rc = NET_XMIT_SUCCESS;
2052 } else {
2053 rc = qdisc_enqueue_root(skb, q);
2054 qdisc_run(q);
2055 }
2056 spin_unlock(root_lock);
2057
2058 return rc;
2059}
2060
4b258461
KK
2061/*
2062 * Returns true if either:
2063 * 1. skb has frag_list and the device doesn't support FRAGLIST, or
2064 * 2. skb is fragmented and the device does not support SG, or if
2065 * at least one of fragments is in highmem and device does not
2066 * support DMA from it.
2067 */
2068static inline int skb_needs_linearize(struct sk_buff *skb,
2069 struct net_device *dev)
2070{
2071 return (skb_has_frags(skb) && !(dev->features & NETIF_F_FRAGLIST)) ||
2072 (skb_shinfo(skb)->nr_frags && (!(dev->features & NETIF_F_SG) ||
2073 illegal_highdma(dev, skb)));
2074}
2075
d29f749e
DJ
2076/**
2077 * dev_queue_xmit - transmit a buffer
2078 * @skb: buffer to transmit
2079 *
2080 * Queue a buffer for transmission to a network device. The caller must
2081 * have set the device and priority and built the buffer before calling
2082 * this function. The function can be called from an interrupt.
2083 *
2084 * A negative errno code is returned on a failure. A success does not
2085 * guarantee the frame will be transmitted as it may be dropped due
2086 * to congestion or traffic shaping.
2087 *
2088 * -----------------------------------------------------------------------------------
2089 * I notice this method can also return errors from the queue disciplines,
2090 * including NET_XMIT_DROP, which is a positive value. So, errors can also
2091 * be positive.
2092 *
2093 * Regardless of the return value, the skb is consumed, so it is currently
2094 * difficult to retry a send to this method. (You can bump the ref count
2095 * before sending to hold a reference for retry if you are careful.)
2096 *
2097 * When calling this method, interrupts MUST be enabled. This is because
2098 * the BH enable code must have IRQs enabled so that it will not deadlock.
2099 * --BLG
2100 */
1da177e4
LT
2101int dev_queue_xmit(struct sk_buff *skb)
2102{
2103 struct net_device *dev = skb->dev;
dc2b4847 2104 struct netdev_queue *txq;
1da177e4
LT
2105 struct Qdisc *q;
2106 int rc = -ENOMEM;
2107
f6a78bfc
HX
2108 /* GSO will handle the following emulations directly. */
2109 if (netif_needs_gso(dev, skb))
2110 goto gso;
2111
4b258461
KK
2112 /* Convert a paged skb to linear, if required */
2113 if (skb_needs_linearize(skb, dev) && __skb_linearize(skb))
1da177e4
LT
2114 goto out_kfree_skb;
2115
2116 /* If packet is not checksummed and device does not support
2117 * checksumming for this protocol, complete checksumming here.
2118 */
663ead3b
HX
2119 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2120 skb_set_transport_header(skb, skb->csum_start -
2121 skb_headroom(skb));
6de329e2
BH
2122 if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb))
2123 goto out_kfree_skb;
663ead3b 2124 }
1da177e4 2125
f6a78bfc 2126gso:
4ec93edb
YH
2127 /* Disable soft irqs for various locks below. Also
2128 * stops preemption for RCU.
1da177e4 2129 */
4ec93edb 2130 rcu_read_lock_bh();
1da177e4 2131
eae792b7 2132 txq = dev_pick_tx(dev, skb);
a898def2 2133 q = rcu_dereference_bh(txq->qdisc);
37437bb2 2134
1da177e4 2135#ifdef CONFIG_NET_CLS_ACT
d1b19dff 2136 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
1da177e4
LT
2137#endif
2138 if (q->enqueue) {
bbd8a0d3 2139 rc = __dev_xmit_skb(skb, q, dev, txq);
37437bb2 2140 goto out;
1da177e4
LT
2141 }
2142
2143 /* The device has no queue. Common case for software devices:
2144 loopback, all the sorts of tunnels...
2145
932ff279
HX
2146 Really, it is unlikely that netif_tx_lock protection is necessary
2147 here. (f.e. loopback and IP tunnels are clean ignoring statistics
1da177e4
LT
2148 counters.)
2149 However, it is possible, that they rely on protection
2150 made by us here.
2151
2152 Check this and shot the lock. It is not prone from deadlocks.
2153 Either shot noqueue qdisc, it is even simpler 8)
2154 */
2155 if (dev->flags & IFF_UP) {
2156 int cpu = smp_processor_id(); /* ok because BHs are off */
2157
c773e847 2158 if (txq->xmit_lock_owner != cpu) {
1da177e4 2159
c773e847 2160 HARD_TX_LOCK(dev, txq, cpu);
1da177e4 2161
fd2ea0a7 2162 if (!netif_tx_queue_stopped(txq)) {
572a9d7b
PM
2163 rc = dev_hard_start_xmit(skb, dev, txq);
2164 if (dev_xmit_complete(rc)) {
c773e847 2165 HARD_TX_UNLOCK(dev, txq);
1da177e4
LT
2166 goto out;
2167 }
2168 }
c773e847 2169 HARD_TX_UNLOCK(dev, txq);
1da177e4
LT
2170 if (net_ratelimit())
2171 printk(KERN_CRIT "Virtual device %s asks to "
2172 "queue packet!\n", dev->name);
2173 } else {
2174 /* Recursion is detected! It is possible,
2175 * unfortunately */
2176 if (net_ratelimit())
2177 printk(KERN_CRIT "Dead loop on virtual device "
2178 "%s, fix it urgently!\n", dev->name);
2179 }
2180 }
2181
2182 rc = -ENETDOWN;
d4828d85 2183 rcu_read_unlock_bh();
1da177e4
LT
2184
2185out_kfree_skb:
2186 kfree_skb(skb);
2187 return rc;
2188out:
d4828d85 2189 rcu_read_unlock_bh();
1da177e4
LT
2190 return rc;
2191}
d1b19dff 2192EXPORT_SYMBOL(dev_queue_xmit);
1da177e4
LT
2193
2194
2195/*=======================================================================
2196 Receiver routines
2197 =======================================================================*/
2198
6b2bedc3
SH
2199int netdev_max_backlog __read_mostly = 1000;
2200int netdev_budget __read_mostly = 300;
2201int weight_p __read_mostly = 64; /* old backlog weight */
1da177e4
LT
2202
2203DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
2204
df334545 2205#ifdef CONFIG_RPS
fec5e652
TH
2206
2207/* One global table that all flow-based protocols share. */
2208struct rps_sock_flow_table *rps_sock_flow_table;
2209EXPORT_SYMBOL(rps_sock_flow_table);
2210
0a9627f2
TH
2211/*
2212 * get_rps_cpu is called from netif_receive_skb and returns the target
2213 * CPU from the RPS map of the receiving queue for a given skb.
b0e28f1e 2214 * rcu_read_lock must be held on entry.
0a9627f2 2215 */
fec5e652
TH
2216static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2217 struct rps_dev_flow **rflowp)
0a9627f2
TH
2218{
2219 struct ipv6hdr *ip6;
2220 struct iphdr *ip;
2221 struct netdev_rx_queue *rxqueue;
2222 struct rps_map *map;
fec5e652
TH
2223 struct rps_dev_flow_table *flow_table;
2224 struct rps_sock_flow_table *sock_flow_table;
0a9627f2
TH
2225 int cpu = -1;
2226 u8 ip_proto;
fec5e652 2227 u16 tcpu;
0a9627f2
TH
2228 u32 addr1, addr2, ports, ihl;
2229
0a9627f2
TH
2230 if (skb_rx_queue_recorded(skb)) {
2231 u16 index = skb_get_rx_queue(skb);
2232 if (unlikely(index >= dev->num_rx_queues)) {
2233 if (net_ratelimit()) {
7a161ea9
ED
2234 pr_warning("%s received packet on queue "
2235 "%u, but number of RX queues is %u\n",
2236 dev->name, index, dev->num_rx_queues);
0a9627f2
TH
2237 }
2238 goto done;
2239 }
2240 rxqueue = dev->_rx + index;
2241 } else
2242 rxqueue = dev->_rx;
2243
fec5e652 2244 if (!rxqueue->rps_map && !rxqueue->rps_flow_table)
0a9627f2
TH
2245 goto done;
2246
2247 if (skb->rxhash)
2248 goto got_hash; /* Skip hash computation on packet header */
2249
2250 switch (skb->protocol) {
2251 case __constant_htons(ETH_P_IP):
2252 if (!pskb_may_pull(skb, sizeof(*ip)))
2253 goto done;
2254
2255 ip = (struct iphdr *) skb->data;
2256 ip_proto = ip->protocol;
2257 addr1 = ip->saddr;
2258 addr2 = ip->daddr;
2259 ihl = ip->ihl;
2260 break;
2261 case __constant_htons(ETH_P_IPV6):
2262 if (!pskb_may_pull(skb, sizeof(*ip6)))
2263 goto done;
2264
2265 ip6 = (struct ipv6hdr *) skb->data;
2266 ip_proto = ip6->nexthdr;
2267 addr1 = ip6->saddr.s6_addr32[3];
2268 addr2 = ip6->daddr.s6_addr32[3];
2269 ihl = (40 >> 2);
2270 break;
2271 default:
2272 goto done;
2273 }
2274 ports = 0;
2275 switch (ip_proto) {
2276 case IPPROTO_TCP:
2277 case IPPROTO_UDP:
2278 case IPPROTO_DCCP:
2279 case IPPROTO_ESP:
2280 case IPPROTO_AH:
2281 case IPPROTO_SCTP:
2282 case IPPROTO_UDPLITE:
2283 if (pskb_may_pull(skb, (ihl * 4) + 4))
2284 ports = *((u32 *) (skb->data + (ihl * 4)));
2285 break;
2286
2287 default:
2288 break;
2289 }
2290
2291 skb->rxhash = jhash_3words(addr1, addr2, ports, hashrnd);
2292 if (!skb->rxhash)
2293 skb->rxhash = 1;
2294
2295got_hash:
fec5e652
TH
2296 flow_table = rcu_dereference(rxqueue->rps_flow_table);
2297 sock_flow_table = rcu_dereference(rps_sock_flow_table);
2298 if (flow_table && sock_flow_table) {
2299 u16 next_cpu;
2300 struct rps_dev_flow *rflow;
2301
2302 rflow = &flow_table->flows[skb->rxhash & flow_table->mask];
2303 tcpu = rflow->cpu;
2304
2305 next_cpu = sock_flow_table->ents[skb->rxhash &
2306 sock_flow_table->mask];
2307
2308 /*
2309 * If the desired CPU (where last recvmsg was done) is
2310 * different from current CPU (one in the rx-queue flow
2311 * table entry), switch if one of the following holds:
2312 * - Current CPU is unset (equal to RPS_NO_CPU).
2313 * - Current CPU is offline.
2314 * - The current CPU's queue tail has advanced beyond the
2315 * last packet that was enqueued using this table entry.
2316 * This guarantees that all previous packets for the flow
2317 * have been dequeued, thus preserving in order delivery.
2318 */
2319 if (unlikely(tcpu != next_cpu) &&
2320 (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
2321 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
2322 rflow->last_qtail)) >= 0)) {
2323 tcpu = rflow->cpu = next_cpu;
2324 if (tcpu != RPS_NO_CPU)
2325 rflow->last_qtail = per_cpu(softnet_data,
2326 tcpu).input_queue_head;
2327 }
2328 if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
2329 *rflowp = rflow;
2330 cpu = tcpu;
2331 goto done;
2332 }
2333 }
2334
0a9627f2
TH
2335 map = rcu_dereference(rxqueue->rps_map);
2336 if (map) {
fec5e652 2337 tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32];
0a9627f2
TH
2338
2339 if (cpu_online(tcpu)) {
2340 cpu = tcpu;
2341 goto done;
2342 }
2343 }
2344
2345done:
0a9627f2
TH
2346 return cpu;
2347}
2348
2349/*
2350 * This structure holds the per-CPU mask of CPUs for which IPIs are scheduled
2351 * to be sent to kick remote softirq processing. There are two masks since
2352 * the sending of IPIs must be done with interrupts enabled. The select field
2353 * indicates the current mask that enqueue_backlog uses to schedule IPIs.
2354 * select is flipped before net_rps_action is called while still under lock,
2355 * net_rps_action then uses the non-selected mask to send the IPIs and clears
2356 * it without conflicting with enqueue_backlog operation.
2357 */
2358struct rps_remote_softirq_cpus {
2359 cpumask_t mask[2];
2360 int select;
2361};
2362static DEFINE_PER_CPU(struct rps_remote_softirq_cpus, rps_remote_softirq_cpus);
2363
2364/* Called from hardirq (IPI) context */
2365static void trigger_softirq(void *data)
2366{
2367 struct softnet_data *queue = data;
2368 __napi_schedule(&queue->backlog);
2369 __get_cpu_var(netdev_rx_stat).received_rps++;
2370}
fec5e652 2371#endif /* CONFIG_RPS */
0a9627f2
TH
2372
2373/*
2374 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
2375 * queue (may be a remote CPU queue).
2376 */
fec5e652
TH
2377static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
2378 unsigned int *qtail)
0a9627f2
TH
2379{
2380 struct softnet_data *queue;
2381 unsigned long flags;
2382
2383 queue = &per_cpu(softnet_data, cpu);
2384
2385 local_irq_save(flags);
2386 __get_cpu_var(netdev_rx_stat).total++;
2387
152102c7 2388 rps_lock(queue);
0a9627f2
TH
2389 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
2390 if (queue->input_pkt_queue.qlen) {
2391enqueue:
2392 __skb_queue_tail(&queue->input_pkt_queue, skb);
fec5e652
TH
2393#ifdef CONFIG_RPS
2394 *qtail = queue->input_queue_head +
2395 queue->input_pkt_queue.qlen;
2396#endif
152102c7
CG
2397 rps_unlock(queue);
2398 local_irq_restore(flags);
0a9627f2
TH
2399 return NET_RX_SUCCESS;
2400 }
2401
2402 /* Schedule NAPI for backlog device */
2403 if (napi_schedule_prep(&queue->backlog)) {
df334545 2404#ifdef CONFIG_RPS
0a9627f2
TH
2405 if (cpu != smp_processor_id()) {
2406 struct rps_remote_softirq_cpus *rcpus =
2407 &__get_cpu_var(rps_remote_softirq_cpus);
2408
2409 cpu_set(cpu, rcpus->mask[rcpus->select]);
2410 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
fec5e652
TH
2411 goto enqueue;
2412 }
1e94d72f 2413#endif
fec5e652 2414 __napi_schedule(&queue->backlog);
0a9627f2
TH
2415 }
2416 goto enqueue;
2417 }
2418
152102c7 2419 rps_unlock(queue);
0a9627f2
TH
2420
2421 __get_cpu_var(netdev_rx_stat).dropped++;
2422 local_irq_restore(flags);
2423
2424 kfree_skb(skb);
2425 return NET_RX_DROP;
2426}
1da177e4 2427
1da177e4
LT
2428/**
2429 * netif_rx - post buffer to the network code
2430 * @skb: buffer to post
2431 *
2432 * This function receives a packet from a device driver and queues it for
2433 * the upper (protocol) levels to process. It always succeeds. The buffer
2434 * may be dropped during processing for congestion control or by the
2435 * protocol layers.
2436 *
2437 * return values:
2438 * NET_RX_SUCCESS (no congestion)
1da177e4
LT
2439 * NET_RX_DROP (packet was dropped)
2440 *
2441 */
2442
2443int netif_rx(struct sk_buff *skb)
2444{
b0e28f1e 2445 int ret;
1da177e4
LT
2446
2447 /* if netpoll wants it, pretend we never saw it */
2448 if (netpoll_rx(skb))
2449 return NET_RX_DROP;
2450
b7aa0bf7 2451 if (!skb->tstamp.tv64)
a61bbcf2 2452 net_timestamp(skb);
1da177e4 2453
df334545 2454#ifdef CONFIG_RPS
b0e28f1e 2455 {
fec5e652 2456 struct rps_dev_flow voidflow, *rflow = &voidflow;
b0e28f1e
ED
2457 int cpu;
2458
2459 rcu_read_lock();
fec5e652
TH
2460
2461 cpu = get_rps_cpu(skb->dev, skb, &rflow);
b0e28f1e
ED
2462 if (cpu < 0)
2463 cpu = smp_processor_id();
fec5e652
TH
2464
2465 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
2466
b0e28f1e
ED
2467 rcu_read_unlock();
2468 }
1e94d72f 2469#else
fec5e652
TH
2470 {
2471 unsigned int qtail;
2472 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
2473 put_cpu();
2474 }
1e94d72f 2475#endif
b0e28f1e 2476 return ret;
1da177e4 2477}
d1b19dff 2478EXPORT_SYMBOL(netif_rx);
1da177e4
LT
2479
2480int netif_rx_ni(struct sk_buff *skb)
2481{
2482 int err;
2483
2484 preempt_disable();
2485 err = netif_rx(skb);
2486 if (local_softirq_pending())
2487 do_softirq();
2488 preempt_enable();
2489
2490 return err;
2491}
1da177e4
LT
2492EXPORT_SYMBOL(netif_rx_ni);
2493
1da177e4
LT
2494static void net_tx_action(struct softirq_action *h)
2495{
2496 struct softnet_data *sd = &__get_cpu_var(softnet_data);
2497
2498 if (sd->completion_queue) {
2499 struct sk_buff *clist;
2500
2501 local_irq_disable();
2502 clist = sd->completion_queue;
2503 sd->completion_queue = NULL;
2504 local_irq_enable();
2505
2506 while (clist) {
2507 struct sk_buff *skb = clist;
2508 clist = clist->next;
2509
547b792c 2510 WARN_ON(atomic_read(&skb->users));
1da177e4
LT
2511 __kfree_skb(skb);
2512 }
2513 }
2514
2515 if (sd->output_queue) {
37437bb2 2516 struct Qdisc *head;
1da177e4
LT
2517
2518 local_irq_disable();
2519 head = sd->output_queue;
2520 sd->output_queue = NULL;
2521 local_irq_enable();
2522
2523 while (head) {
37437bb2
DM
2524 struct Qdisc *q = head;
2525 spinlock_t *root_lock;
2526
1da177e4
LT
2527 head = head->next_sched;
2528
5fb66229 2529 root_lock = qdisc_lock(q);
37437bb2 2530 if (spin_trylock(root_lock)) {
def82a1d
JP
2531 smp_mb__before_clear_bit();
2532 clear_bit(__QDISC_STATE_SCHED,
2533 &q->state);
37437bb2
DM
2534 qdisc_run(q);
2535 spin_unlock(root_lock);
1da177e4 2536 } else {
195648bb 2537 if (!test_bit(__QDISC_STATE_DEACTIVATED,
e8a83e10 2538 &q->state)) {
195648bb 2539 __netif_reschedule(q);
e8a83e10
JP
2540 } else {
2541 smp_mb__before_clear_bit();
2542 clear_bit(__QDISC_STATE_SCHED,
2543 &q->state);
2544 }
1da177e4
LT
2545 }
2546 }
2547 }
2548}
2549
6f05f629
SH
2550static inline int deliver_skb(struct sk_buff *skb,
2551 struct packet_type *pt_prev,
2552 struct net_device *orig_dev)
1da177e4
LT
2553{
2554 atomic_inc(&skb->users);
f2ccd8fa 2555 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1da177e4
LT
2556}
2557
2558#if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
da678292
MM
2559
2560#if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
2561/* This hook is defined here for ATM LANE */
2562int (*br_fdb_test_addr_hook)(struct net_device *dev,
2563 unsigned char *addr) __read_mostly;
4fb019a0 2564EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
da678292 2565#endif
1da177e4 2566
6229e362
SH
2567/*
2568 * If bridge module is loaded call bridging hook.
2569 * returns NULL if packet was consumed.
2570 */
2571struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
2572 struct sk_buff *skb) __read_mostly;
4fb019a0 2573EXPORT_SYMBOL_GPL(br_handle_frame_hook);
da678292 2574
6229e362
SH
2575static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
2576 struct packet_type **pt_prev, int *ret,
2577 struct net_device *orig_dev)
1da177e4
LT
2578{
2579 struct net_bridge_port *port;
2580
6229e362
SH
2581 if (skb->pkt_type == PACKET_LOOPBACK ||
2582 (port = rcu_dereference(skb->dev->br_port)) == NULL)
2583 return skb;
1da177e4
LT
2584
2585 if (*pt_prev) {
6229e362 2586 *ret = deliver_skb(skb, *pt_prev, orig_dev);
1da177e4 2587 *pt_prev = NULL;
4ec93edb
YH
2588 }
2589
6229e362 2590 return br_handle_frame_hook(port, skb);
1da177e4
LT
2591}
2592#else
6229e362 2593#define handle_bridge(skb, pt_prev, ret, orig_dev) (skb)
1da177e4
LT
2594#endif
2595
b863ceb7
PM
2596#if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
2597struct sk_buff *(*macvlan_handle_frame_hook)(struct sk_buff *skb) __read_mostly;
2598EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook);
2599
2600static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
2601 struct packet_type **pt_prev,
2602 int *ret,
2603 struct net_device *orig_dev)
2604{
2605 if (skb->dev->macvlan_port == NULL)
2606 return skb;
2607
2608 if (*pt_prev) {
2609 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2610 *pt_prev = NULL;
2611 }
2612 return macvlan_handle_frame_hook(skb);
2613}
2614#else
2615#define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb)
2616#endif
2617
1da177e4
LT
2618#ifdef CONFIG_NET_CLS_ACT
2619/* TODO: Maybe we should just force sch_ingress to be compiled in
2620 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
2621 * a compare and 2 stores extra right now if we dont have it on
2622 * but have CONFIG_NET_CLS_ACT
4ec93edb 2623 * NOTE: This doesnt stop any functionality; if you dont have
1da177e4
LT
2624 * the ingress scheduler, you just cant add policies on ingress.
2625 *
2626 */
4ec93edb 2627static int ing_filter(struct sk_buff *skb)
1da177e4 2628{
1da177e4 2629 struct net_device *dev = skb->dev;
f697c3e8 2630 u32 ttl = G_TC_RTTL(skb->tc_verd);
555353cf
DM
2631 struct netdev_queue *rxq;
2632 int result = TC_ACT_OK;
2633 struct Qdisc *q;
4ec93edb 2634
f697c3e8
HX
2635 if (MAX_RED_LOOP < ttl++) {
2636 printk(KERN_WARNING
2637 "Redir loop detected Dropping packet (%d->%d)\n",
8964be4a 2638 skb->skb_iif, dev->ifindex);
f697c3e8
HX
2639 return TC_ACT_SHOT;
2640 }
1da177e4 2641
f697c3e8
HX
2642 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
2643 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
1da177e4 2644
555353cf
DM
2645 rxq = &dev->rx_queue;
2646
83874000 2647 q = rxq->qdisc;
8d50b53d 2648 if (q != &noop_qdisc) {
83874000 2649 spin_lock(qdisc_lock(q));
a9312ae8
DM
2650 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
2651 result = qdisc_enqueue_root(skb, q);
83874000
DM
2652 spin_unlock(qdisc_lock(q));
2653 }
f697c3e8
HX
2654
2655 return result;
2656}
86e65da9 2657
f697c3e8
HX
2658static inline struct sk_buff *handle_ing(struct sk_buff *skb,
2659 struct packet_type **pt_prev,
2660 int *ret, struct net_device *orig_dev)
2661{
8d50b53d 2662 if (skb->dev->rx_queue.qdisc == &noop_qdisc)
f697c3e8 2663 goto out;
1da177e4 2664
f697c3e8
HX
2665 if (*pt_prev) {
2666 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2667 *pt_prev = NULL;
2668 } else {
2669 /* Huh? Why does turning on AF_PACKET affect this? */
2670 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
1da177e4
LT
2671 }
2672
f697c3e8
HX
2673 switch (ing_filter(skb)) {
2674 case TC_ACT_SHOT:
2675 case TC_ACT_STOLEN:
2676 kfree_skb(skb);
2677 return NULL;
2678 }
2679
2680out:
2681 skb->tc_verd = 0;
2682 return skb;
1da177e4
LT
2683}
2684#endif
2685
bc1d0411
PM
2686/*
2687 * netif_nit_deliver - deliver received packets to network taps
2688 * @skb: buffer
2689 *
2690 * This function is used to deliver incoming packets to network
2691 * taps. It should be used when the normal netif_receive_skb path
2692 * is bypassed, for example because of VLAN acceleration.
2693 */
2694void netif_nit_deliver(struct sk_buff *skb)
2695{
2696 struct packet_type *ptype;
2697
2698 if (list_empty(&ptype_all))
2699 return;
2700
2701 skb_reset_network_header(skb);
2702 skb_reset_transport_header(skb);
2703 skb->mac_len = skb->network_header - skb->mac_header;
2704
2705 rcu_read_lock();
2706 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2707 if (!ptype->dev || ptype->dev == skb->dev)
2708 deliver_skb(skb, ptype, skb->dev);
2709 }
2710 rcu_read_unlock();
2711}
2712
acbbc071
ED
2713static inline void skb_bond_set_mac_by_master(struct sk_buff *skb,
2714 struct net_device *master)
2715{
2716 if (skb->pkt_type == PACKET_HOST) {
2717 u16 *dest = (u16 *) eth_hdr(skb)->h_dest;
2718
2719 memcpy(dest, master->dev_addr, ETH_ALEN);
2720 }
2721}
2722
2723/* On bonding slaves other than the currently active slave, suppress
2724 * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and
2725 * ARP on active-backup slaves with arp_validate enabled.
2726 */
2727int __skb_bond_should_drop(struct sk_buff *skb, struct net_device *master)
2728{
2729 struct net_device *dev = skb->dev;
2730
2731 if (master->priv_flags & IFF_MASTER_ARPMON)
2732 dev->last_rx = jiffies;
2733
2734 if ((master->priv_flags & IFF_MASTER_ALB) && master->br_port) {
2735 /* Do address unmangle. The local destination address
2736 * will be always the one master has. Provides the right
2737 * functionality in a bridge.
2738 */
2739 skb_bond_set_mac_by_master(skb, master);
2740 }
2741
2742 if (dev->priv_flags & IFF_SLAVE_INACTIVE) {
2743 if ((dev->priv_flags & IFF_SLAVE_NEEDARP) &&
2744 skb->protocol == __cpu_to_be16(ETH_P_ARP))
2745 return 0;
2746
2747 if (master->priv_flags & IFF_MASTER_ALB) {
2748 if (skb->pkt_type != PACKET_BROADCAST &&
2749 skb->pkt_type != PACKET_MULTICAST)
2750 return 0;
2751 }
2752 if (master->priv_flags & IFF_MASTER_8023AD &&
2753 skb->protocol == __cpu_to_be16(ETH_P_SLOW))
2754 return 0;
2755
2756 return 1;
2757 }
2758 return 0;
2759}
2760EXPORT_SYMBOL(__skb_bond_should_drop);
2761
10f744d2 2762static int __netif_receive_skb(struct sk_buff *skb)
1da177e4
LT
2763{
2764 struct packet_type *ptype, *pt_prev;
f2ccd8fa 2765 struct net_device *orig_dev;
0641e4fb 2766 struct net_device *master;
0d7a3681 2767 struct net_device *null_or_orig;
ca8d9ea3 2768 struct net_device *null_or_bond;
1da177e4 2769 int ret = NET_RX_DROP;
252e3346 2770 __be16 type;
1da177e4 2771
81bbb3d4
ED
2772 if (!skb->tstamp.tv64)
2773 net_timestamp(skb);
2774
05423b24 2775 if (vlan_tx_tag_present(skb) && vlan_hwaccel_do_receive(skb))
9b22ea56
PM
2776 return NET_RX_SUCCESS;
2777
1da177e4 2778 /* if we've gotten here through NAPI, check netpoll */
bea3348e 2779 if (netpoll_receive_skb(skb))
1da177e4
LT
2780 return NET_RX_DROP;
2781
8964be4a
ED
2782 if (!skb->skb_iif)
2783 skb->skb_iif = skb->dev->ifindex;
86e65da9 2784
0d7a3681 2785 null_or_orig = NULL;
cc9bd5ce 2786 orig_dev = skb->dev;
0641e4fb
ED
2787 master = ACCESS_ONCE(orig_dev->master);
2788 if (master) {
2789 if (skb_bond_should_drop(skb, master))
0d7a3681
JE
2790 null_or_orig = orig_dev; /* deliver only exact match */
2791 else
0641e4fb 2792 skb->dev = master;
cc9bd5ce 2793 }
8f903c70 2794
1da177e4
LT
2795 __get_cpu_var(netdev_rx_stat).total++;
2796
c1d2bbe1 2797 skb_reset_network_header(skb);
badff6d0 2798 skb_reset_transport_header(skb);
b0e380b1 2799 skb->mac_len = skb->network_header - skb->mac_header;
1da177e4
LT
2800
2801 pt_prev = NULL;
2802
2803 rcu_read_lock();
2804
2805#ifdef CONFIG_NET_CLS_ACT
2806 if (skb->tc_verd & TC_NCLS) {
2807 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
2808 goto ncls;
2809 }
2810#endif
2811
2812 list_for_each_entry_rcu(ptype, &ptype_all, list) {
f982307f
JE
2813 if (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2814 ptype->dev == orig_dev) {
4ec93edb 2815 if (pt_prev)
f2ccd8fa 2816 ret = deliver_skb(skb, pt_prev, orig_dev);
1da177e4
LT
2817 pt_prev = ptype;
2818 }
2819 }
2820
2821#ifdef CONFIG_NET_CLS_ACT
f697c3e8
HX
2822 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
2823 if (!skb)
1da177e4 2824 goto out;
1da177e4
LT
2825ncls:
2826#endif
2827
6229e362 2828 skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
b863ceb7
PM
2829 if (!skb)
2830 goto out;
2831 skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
6229e362 2832 if (!skb)
1da177e4
LT
2833 goto out;
2834
1f3c8804
AG
2835 /*
2836 * Make sure frames received on VLAN interfaces stacked on
2837 * bonding interfaces still make their way to any base bonding
2838 * device that may have registered for a specific ptype. The
2839 * handler may have to adjust skb->dev and orig_dev.
1f3c8804 2840 */
ca8d9ea3 2841 null_or_bond = NULL;
1f3c8804
AG
2842 if ((skb->dev->priv_flags & IFF_802_1Q_VLAN) &&
2843 (vlan_dev_real_dev(skb->dev)->priv_flags & IFF_BONDING)) {
ca8d9ea3 2844 null_or_bond = vlan_dev_real_dev(skb->dev);
1f3c8804
AG
2845 }
2846
1da177e4 2847 type = skb->protocol;
82d8a867
PE
2848 list_for_each_entry_rcu(ptype,
2849 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
1f3c8804 2850 if (ptype->type == type && (ptype->dev == null_or_orig ||
ca8d9ea3
AG
2851 ptype->dev == skb->dev || ptype->dev == orig_dev ||
2852 ptype->dev == null_or_bond)) {
4ec93edb 2853 if (pt_prev)
f2ccd8fa 2854 ret = deliver_skb(skb, pt_prev, orig_dev);
1da177e4
LT
2855 pt_prev = ptype;
2856 }
2857 }
2858
2859 if (pt_prev) {
f2ccd8fa 2860 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1da177e4
LT
2861 } else {
2862 kfree_skb(skb);
2863 /* Jamal, now you will not able to escape explaining
2864 * me how you were going to use this. :-)
2865 */
2866 ret = NET_RX_DROP;
2867 }
2868
2869out:
2870 rcu_read_unlock();
2871 return ret;
2872}
0a9627f2
TH
2873
2874/**
2875 * netif_receive_skb - process receive buffer from network
2876 * @skb: buffer to process
2877 *
2878 * netif_receive_skb() is the main receive data processing function.
2879 * It always succeeds. The buffer may be dropped during processing
2880 * for congestion control or by the protocol layers.
2881 *
2882 * This function may only be called from softirq context and interrupts
2883 * should be enabled.
2884 *
2885 * Return values (usually ignored):
2886 * NET_RX_SUCCESS: no congestion
2887 * NET_RX_DROP: packet was dropped
2888 */
2889int netif_receive_skb(struct sk_buff *skb)
2890{
df334545 2891#ifdef CONFIG_RPS
fec5e652
TH
2892 struct rps_dev_flow voidflow, *rflow = &voidflow;
2893 int cpu, ret;
2894
2895 rcu_read_lock();
0a9627f2 2896
fec5e652 2897 cpu = get_rps_cpu(skb->dev, skb, &rflow);
0a9627f2 2898
fec5e652
TH
2899 if (cpu >= 0) {
2900 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
2901 rcu_read_unlock();
2902 } else {
2903 rcu_read_unlock();
2904 ret = __netif_receive_skb(skb);
2905 }
2906
2907 return ret;
1e94d72f
TH
2908#else
2909 return __netif_receive_skb(skb);
2910#endif
0a9627f2 2911}
d1b19dff 2912EXPORT_SYMBOL(netif_receive_skb);
1da177e4 2913
6e583ce5 2914/* Network device is going away, flush any packets still pending */
152102c7 2915static void flush_backlog(void *arg)
6e583ce5 2916{
152102c7
CG
2917 struct net_device *dev = arg;
2918 struct softnet_data *queue = &__get_cpu_var(softnet_data);
6e583ce5
SH
2919 struct sk_buff *skb, *tmp;
2920
152102c7 2921 rps_lock(queue);
6e583ce5
SH
2922 skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp)
2923 if (skb->dev == dev) {
2924 __skb_unlink(skb, &queue->input_pkt_queue);
2925 kfree_skb(skb);
fec5e652 2926 incr_input_queue_head(queue);
6e583ce5 2927 }
152102c7 2928 rps_unlock(queue);
6e583ce5
SH
2929}
2930
d565b0a1
HX
2931static int napi_gro_complete(struct sk_buff *skb)
2932{
2933 struct packet_type *ptype;
2934 __be16 type = skb->protocol;
2935 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
2936 int err = -ENOENT;
2937
fc59f9a3
HX
2938 if (NAPI_GRO_CB(skb)->count == 1) {
2939 skb_shinfo(skb)->gso_size = 0;
d565b0a1 2940 goto out;
fc59f9a3 2941 }
d565b0a1
HX
2942
2943 rcu_read_lock();
2944 list_for_each_entry_rcu(ptype, head, list) {
2945 if (ptype->type != type || ptype->dev || !ptype->gro_complete)
2946 continue;
2947
2948 err = ptype->gro_complete(skb);
2949 break;
2950 }
2951 rcu_read_unlock();
2952
2953 if (err) {
2954 WARN_ON(&ptype->list == head);
2955 kfree_skb(skb);
2956 return NET_RX_SUCCESS;
2957 }
2958
2959out:
d565b0a1
HX
2960 return netif_receive_skb(skb);
2961}
2962
11380a4b 2963static void napi_gro_flush(struct napi_struct *napi)
d565b0a1
HX
2964{
2965 struct sk_buff *skb, *next;
2966
2967 for (skb = napi->gro_list; skb; skb = next) {
2968 next = skb->next;
2969 skb->next = NULL;
2970 napi_gro_complete(skb);
2971 }
2972
4ae5544f 2973 napi->gro_count = 0;
d565b0a1
HX
2974 napi->gro_list = NULL;
2975}
d565b0a1 2976
5b252f0c 2977enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
d565b0a1
HX
2978{
2979 struct sk_buff **pp = NULL;
2980 struct packet_type *ptype;
2981 __be16 type = skb->protocol;
2982 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
0da2afd5 2983 int same_flow;
d565b0a1 2984 int mac_len;
5b252f0c 2985 enum gro_result ret;
d565b0a1
HX
2986
2987 if (!(skb->dev->features & NETIF_F_GRO))
2988 goto normal;
2989
4cf704fb 2990 if (skb_is_gso(skb) || skb_has_frags(skb))
f17f5c91
HX
2991 goto normal;
2992
d565b0a1
HX
2993 rcu_read_lock();
2994 list_for_each_entry_rcu(ptype, head, list) {
d565b0a1
HX
2995 if (ptype->type != type || ptype->dev || !ptype->gro_receive)
2996 continue;
2997
86911732 2998 skb_set_network_header(skb, skb_gro_offset(skb));
d565b0a1
HX
2999 mac_len = skb->network_header - skb->mac_header;
3000 skb->mac_len = mac_len;
3001 NAPI_GRO_CB(skb)->same_flow = 0;
3002 NAPI_GRO_CB(skb)->flush = 0;
5d38a079 3003 NAPI_GRO_CB(skb)->free = 0;
d565b0a1 3004
d565b0a1
HX
3005 pp = ptype->gro_receive(&napi->gro_list, skb);
3006 break;
3007 }
3008 rcu_read_unlock();
3009
3010 if (&ptype->list == head)
3011 goto normal;
3012
0da2afd5 3013 same_flow = NAPI_GRO_CB(skb)->same_flow;
5d0d9be8 3014 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
0da2afd5 3015
d565b0a1
HX
3016 if (pp) {
3017 struct sk_buff *nskb = *pp;
3018
3019 *pp = nskb->next;
3020 nskb->next = NULL;
3021 napi_gro_complete(nskb);
4ae5544f 3022 napi->gro_count--;
d565b0a1
HX
3023 }
3024
0da2afd5 3025 if (same_flow)
d565b0a1
HX
3026 goto ok;
3027
4ae5544f 3028 if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
d565b0a1 3029 goto normal;
d565b0a1 3030
4ae5544f 3031 napi->gro_count++;
d565b0a1 3032 NAPI_GRO_CB(skb)->count = 1;
86911732 3033 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
d565b0a1
HX
3034 skb->next = napi->gro_list;
3035 napi->gro_list = skb;
5d0d9be8 3036 ret = GRO_HELD;
d565b0a1 3037
ad0f9904 3038pull:
cb18978c
HX
3039 if (skb_headlen(skb) < skb_gro_offset(skb)) {
3040 int grow = skb_gro_offset(skb) - skb_headlen(skb);
3041
3042 BUG_ON(skb->end - skb->tail < grow);
3043
3044 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
3045
3046 skb->tail += grow;
3047 skb->data_len -= grow;
3048
3049 skb_shinfo(skb)->frags[0].page_offset += grow;
3050 skb_shinfo(skb)->frags[0].size -= grow;
3051
3052 if (unlikely(!skb_shinfo(skb)->frags[0].size)) {
3053 put_page(skb_shinfo(skb)->frags[0].page);
3054 memmove(skb_shinfo(skb)->frags,
3055 skb_shinfo(skb)->frags + 1,
3056 --skb_shinfo(skb)->nr_frags);
3057 }
ad0f9904
HX
3058 }
3059
d565b0a1 3060ok:
5d0d9be8 3061 return ret;
d565b0a1
HX
3062
3063normal:
ad0f9904
HX
3064 ret = GRO_NORMAL;
3065 goto pull;
5d38a079 3066}
96e93eab
HX
3067EXPORT_SYMBOL(dev_gro_receive);
3068
5b252f0c
BH
3069static gro_result_t
3070__napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
96e93eab
HX
3071{
3072 struct sk_buff *p;
3073
d1c76af9
HX
3074 if (netpoll_rx_on(skb))
3075 return GRO_NORMAL;
3076
96e93eab 3077 for (p = napi->gro_list; p; p = p->next) {
f64f9e71
JP
3078 NAPI_GRO_CB(p)->same_flow =
3079 (p->dev == skb->dev) &&
3080 !compare_ether_header(skb_mac_header(p),
3081 skb_gro_mac_header(skb));
96e93eab
HX
3082 NAPI_GRO_CB(p)->flush = 0;
3083 }
3084
3085 return dev_gro_receive(napi, skb);
3086}
5d38a079 3087
c7c4b3b6 3088gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
5d38a079 3089{
5d0d9be8
HX
3090 switch (ret) {
3091 case GRO_NORMAL:
c7c4b3b6
BH
3092 if (netif_receive_skb(skb))
3093 ret = GRO_DROP;
3094 break;
5d38a079 3095
5d0d9be8 3096 case GRO_DROP:
5d0d9be8 3097 case GRO_MERGED_FREE:
5d38a079
HX
3098 kfree_skb(skb);
3099 break;
5b252f0c
BH
3100
3101 case GRO_HELD:
3102 case GRO_MERGED:
3103 break;
5d38a079
HX
3104 }
3105
c7c4b3b6 3106 return ret;
5d0d9be8
HX
3107}
3108EXPORT_SYMBOL(napi_skb_finish);
3109
78a478d0
HX
3110void skb_gro_reset_offset(struct sk_buff *skb)
3111{
3112 NAPI_GRO_CB(skb)->data_offset = 0;
3113 NAPI_GRO_CB(skb)->frag0 = NULL;
7489594c 3114 NAPI_GRO_CB(skb)->frag0_len = 0;
78a478d0 3115
78d3fd0b 3116 if (skb->mac_header == skb->tail &&
7489594c 3117 !PageHighMem(skb_shinfo(skb)->frags[0].page)) {
78a478d0
HX
3118 NAPI_GRO_CB(skb)->frag0 =
3119 page_address(skb_shinfo(skb)->frags[0].page) +
3120 skb_shinfo(skb)->frags[0].page_offset;
7489594c
HX
3121 NAPI_GRO_CB(skb)->frag0_len = skb_shinfo(skb)->frags[0].size;
3122 }
78a478d0
HX
3123}
3124EXPORT_SYMBOL(skb_gro_reset_offset);
3125
c7c4b3b6 3126gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
5d0d9be8 3127{
86911732
HX
3128 skb_gro_reset_offset(skb);
3129
5d0d9be8 3130 return napi_skb_finish(__napi_gro_receive(napi, skb), skb);
d565b0a1
HX
3131}
3132EXPORT_SYMBOL(napi_gro_receive);
3133
96e93eab
HX
3134void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
3135{
96e93eab
HX
3136 __skb_pull(skb, skb_headlen(skb));
3137 skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
3138
3139 napi->skb = skb;
3140}
3141EXPORT_SYMBOL(napi_reuse_skb);
3142
76620aaf 3143struct sk_buff *napi_get_frags(struct napi_struct *napi)
5d38a079 3144{
5d38a079 3145 struct sk_buff *skb = napi->skb;
5d38a079
HX
3146
3147 if (!skb) {
89d71a66
ED
3148 skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
3149 if (skb)
3150 napi->skb = skb;
80595d59 3151 }
96e93eab
HX
3152 return skb;
3153}
76620aaf 3154EXPORT_SYMBOL(napi_get_frags);
96e93eab 3155
c7c4b3b6
BH
3156gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
3157 gro_result_t ret)
96e93eab 3158{
5d0d9be8
HX
3159 switch (ret) {
3160 case GRO_NORMAL:
86911732 3161 case GRO_HELD:
e76b69cc 3162 skb->protocol = eth_type_trans(skb, skb->dev);
86911732 3163
c7c4b3b6
BH
3164 if (ret == GRO_HELD)
3165 skb_gro_pull(skb, -ETH_HLEN);
3166 else if (netif_receive_skb(skb))
3167 ret = GRO_DROP;
86911732 3168 break;
5d38a079 3169
5d0d9be8 3170 case GRO_DROP:
5d0d9be8
HX
3171 case GRO_MERGED_FREE:
3172 napi_reuse_skb(napi, skb);
3173 break;
5b252f0c
BH
3174
3175 case GRO_MERGED:
3176 break;
5d0d9be8 3177 }
5d38a079 3178
c7c4b3b6 3179 return ret;
5d38a079 3180}
5d0d9be8
HX
3181EXPORT_SYMBOL(napi_frags_finish);
3182
76620aaf
HX
3183struct sk_buff *napi_frags_skb(struct napi_struct *napi)
3184{
3185 struct sk_buff *skb = napi->skb;
3186 struct ethhdr *eth;
a5b1cf28
HX
3187 unsigned int hlen;
3188 unsigned int off;
76620aaf
HX
3189
3190 napi->skb = NULL;
3191
3192 skb_reset_mac_header(skb);
3193 skb_gro_reset_offset(skb);
3194
a5b1cf28
HX
3195 off = skb_gro_offset(skb);
3196 hlen = off + sizeof(*eth);
3197 eth = skb_gro_header_fast(skb, off);
3198 if (skb_gro_header_hard(skb, hlen)) {
3199 eth = skb_gro_header_slow(skb, hlen, off);
3200 if (unlikely(!eth)) {
3201 napi_reuse_skb(napi, skb);
3202 skb = NULL;
3203 goto out;
3204 }
76620aaf
HX
3205 }
3206
3207 skb_gro_pull(skb, sizeof(*eth));
3208
3209 /*
3210 * This works because the only protocols we care about don't require
3211 * special handling. We'll fix it up properly at the end.
3212 */
3213 skb->protocol = eth->h_proto;
3214
3215out:
3216 return skb;
3217}
3218EXPORT_SYMBOL(napi_frags_skb);
3219
c7c4b3b6 3220gro_result_t napi_gro_frags(struct napi_struct *napi)
5d0d9be8 3221{
76620aaf 3222 struct sk_buff *skb = napi_frags_skb(napi);
5d0d9be8
HX
3223
3224 if (!skb)
c7c4b3b6 3225 return GRO_DROP;
5d0d9be8
HX
3226
3227 return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb));
3228}
5d38a079
HX
3229EXPORT_SYMBOL(napi_gro_frags);
3230
bea3348e 3231static int process_backlog(struct napi_struct *napi, int quota)
1da177e4
LT
3232{
3233 int work = 0;
1da177e4
LT
3234 struct softnet_data *queue = &__get_cpu_var(softnet_data);
3235 unsigned long start_time = jiffies;
3236
bea3348e
SH
3237 napi->weight = weight_p;
3238 do {
1da177e4 3239 struct sk_buff *skb;
1da177e4 3240
152102c7
CG
3241 local_irq_disable();
3242 rps_lock(queue);
1da177e4 3243 skb = __skb_dequeue(&queue->input_pkt_queue);
bea3348e 3244 if (!skb) {
8f1ead2d 3245 __napi_complete(napi);
5a6d234e 3246 rps_unlock(queue);
e4008276 3247 local_irq_enable();
8f1ead2d 3248 break;
bea3348e 3249 }
fec5e652 3250 incr_input_queue_head(queue);
152102c7
CG
3251 rps_unlock(queue);
3252 local_irq_enable();
1da177e4 3253
0a9627f2 3254 __netif_receive_skb(skb);
bea3348e 3255 } while (++work < quota && jiffies == start_time);
1da177e4 3256
bea3348e
SH
3257 return work;
3258}
1da177e4 3259
bea3348e
SH
3260/**
3261 * __napi_schedule - schedule for receive
c4ea43c5 3262 * @n: entry to schedule
bea3348e
SH
3263 *
3264 * The entry's receive function will be scheduled to run
3265 */
b5606c2d 3266void __napi_schedule(struct napi_struct *n)
bea3348e
SH
3267{
3268 unsigned long flags;
1da177e4 3269
bea3348e
SH
3270 local_irq_save(flags);
3271 list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list);
3272 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3273 local_irq_restore(flags);
1da177e4 3274}
bea3348e
SH
3275EXPORT_SYMBOL(__napi_schedule);
3276
d565b0a1
HX
3277void __napi_complete(struct napi_struct *n)
3278{
3279 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
3280 BUG_ON(n->gro_list);
3281
3282 list_del(&n->poll_list);
3283 smp_mb__before_clear_bit();
3284 clear_bit(NAPI_STATE_SCHED, &n->state);
3285}
3286EXPORT_SYMBOL(__napi_complete);
3287
3288void napi_complete(struct napi_struct *n)
3289{
3290 unsigned long flags;
3291
3292 /*
3293 * don't let napi dequeue from the cpu poll list
3294 * just in case its running on a different cpu
3295 */
3296 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
3297 return;
3298
3299 napi_gro_flush(n);
3300 local_irq_save(flags);
3301 __napi_complete(n);
3302 local_irq_restore(flags);
3303}
3304EXPORT_SYMBOL(napi_complete);
3305
3306void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
3307 int (*poll)(struct napi_struct *, int), int weight)
3308{
3309 INIT_LIST_HEAD(&napi->poll_list);
4ae5544f 3310 napi->gro_count = 0;
d565b0a1 3311 napi->gro_list = NULL;
5d38a079 3312 napi->skb = NULL;
d565b0a1
HX
3313 napi->poll = poll;
3314 napi->weight = weight;
3315 list_add(&napi->dev_list, &dev->napi_list);
d565b0a1 3316 napi->dev = dev;
5d38a079 3317#ifdef CONFIG_NETPOLL
d565b0a1
HX
3318 spin_lock_init(&napi->poll_lock);
3319 napi->poll_owner = -1;
3320#endif
3321 set_bit(NAPI_STATE_SCHED, &napi->state);
3322}
3323EXPORT_SYMBOL(netif_napi_add);
3324
3325void netif_napi_del(struct napi_struct *napi)
3326{
3327 struct sk_buff *skb, *next;
3328
d7b06636 3329 list_del_init(&napi->dev_list);
76620aaf 3330 napi_free_frags(napi);
d565b0a1
HX
3331
3332 for (skb = napi->gro_list; skb; skb = next) {
3333 next = skb->next;
3334 skb->next = NULL;
3335 kfree_skb(skb);
3336 }
3337
3338 napi->gro_list = NULL;
4ae5544f 3339 napi->gro_count = 0;
d565b0a1
HX
3340}
3341EXPORT_SYMBOL(netif_napi_del);
3342
df334545 3343#ifdef CONFIG_RPS
0a9627f2
TH
3344/*
3345 * net_rps_action sends any pending IPI's for rps. This is only called from
3346 * softirq and interrupts must be enabled.
3347 */
3348static void net_rps_action(cpumask_t *mask)
3349{
3350 int cpu;
3351
3352 /* Send pending IPI's to kick RPS processing on remote cpus. */
3353 for_each_cpu_mask_nr(cpu, *mask) {
3354 struct softnet_data *queue = &per_cpu(softnet_data, cpu);
3355 if (cpu_online(cpu))
3356 __smp_call_function_single(cpu, &queue->csd, 0);
3357 }
3358 cpus_clear(*mask);
3359}
1e94d72f 3360#endif
1da177e4
LT
3361
3362static void net_rx_action(struct softirq_action *h)
3363{
bea3348e 3364 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
24f8b238 3365 unsigned long time_limit = jiffies + 2;
51b0bded 3366 int budget = netdev_budget;
53fb95d3 3367 void *have;
df334545 3368#ifdef CONFIG_RPS
0a9627f2
TH
3369 int select;
3370 struct rps_remote_softirq_cpus *rcpus;
1e94d72f 3371#endif
53fb95d3 3372
1da177e4
LT
3373 local_irq_disable();
3374
bea3348e
SH
3375 while (!list_empty(list)) {
3376 struct napi_struct *n;
3377 int work, weight;
1da177e4 3378
bea3348e 3379 /* If softirq window is exhuasted then punt.
24f8b238
SH
3380 * Allow this to run for 2 jiffies since which will allow
3381 * an average latency of 1.5/HZ.
bea3348e 3382 */
24f8b238 3383 if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
1da177e4
LT
3384 goto softnet_break;
3385
3386 local_irq_enable();
3387
bea3348e
SH
3388 /* Even though interrupts have been re-enabled, this
3389 * access is safe because interrupts can only add new
3390 * entries to the tail of this list, and only ->poll()
3391 * calls can remove this head entry from the list.
3392 */
e5e26d75 3393 n = list_first_entry(list, struct napi_struct, poll_list);
1da177e4 3394
bea3348e
SH
3395 have = netpoll_poll_lock(n);
3396
3397 weight = n->weight;
3398
0a7606c1
DM
3399 /* This NAPI_STATE_SCHED test is for avoiding a race
3400 * with netpoll's poll_napi(). Only the entity which
3401 * obtains the lock and sees NAPI_STATE_SCHED set will
3402 * actually make the ->poll() call. Therefore we avoid
3403 * accidently calling ->poll() when NAPI is not scheduled.
3404 */
3405 work = 0;
4ea7e386 3406 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
0a7606c1 3407 work = n->poll(n, weight);
4ea7e386
NH
3408 trace_napi_poll(n);
3409 }
bea3348e
SH
3410
3411 WARN_ON_ONCE(work > weight);
3412
3413 budget -= work;
3414
3415 local_irq_disable();
3416
3417 /* Drivers must not modify the NAPI state if they
3418 * consume the entire weight. In such cases this code
3419 * still "owns" the NAPI instance and therefore can
3420 * move the instance around on the list at-will.
3421 */
fed17f30 3422 if (unlikely(work == weight)) {
ff780cd8
HX
3423 if (unlikely(napi_disable_pending(n))) {
3424 local_irq_enable();
3425 napi_complete(n);
3426 local_irq_disable();
3427 } else
fed17f30
DM
3428 list_move_tail(&n->poll_list, list);
3429 }
bea3348e
SH
3430
3431 netpoll_poll_unlock(have);
1da177e4
LT
3432 }
3433out:
df334545 3434#ifdef CONFIG_RPS
0a9627f2
TH
3435 rcpus = &__get_cpu_var(rps_remote_softirq_cpus);
3436 select = rcpus->select;
3437 rcpus->select ^= 1;
3438
515e06c4 3439 local_irq_enable();
bea3348e 3440
0a9627f2 3441 net_rps_action(&rcpus->mask[select]);
1e94d72f
TH
3442#else
3443 local_irq_enable();
3444#endif
0a9627f2 3445
db217334
CL
3446#ifdef CONFIG_NET_DMA
3447 /*
3448 * There may not be any more sk_buffs coming right now, so push
3449 * any pending DMA copies to hardware
3450 */
2ba05622 3451 dma_issue_pending_all();
db217334 3452#endif
bea3348e 3453
1da177e4
LT
3454 return;
3455
3456softnet_break:
3457 __get_cpu_var(netdev_rx_stat).time_squeeze++;
3458 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3459 goto out;
3460}
3461
d1b19dff 3462static gifconf_func_t *gifconf_list[NPROTO];
1da177e4
LT
3463
3464/**
3465 * register_gifconf - register a SIOCGIF handler
3466 * @family: Address family
3467 * @gifconf: Function handler
3468 *
3469 * Register protocol dependent address dumping routines. The handler
3470 * that is passed must not be freed or reused until it has been replaced
3471 * by another handler.
3472 */
d1b19dff 3473int register_gifconf(unsigned int family, gifconf_func_t *gifconf)
1da177e4
LT
3474{
3475 if (family >= NPROTO)
3476 return -EINVAL;
3477 gifconf_list[family] = gifconf;
3478 return 0;
3479}
d1b19dff 3480EXPORT_SYMBOL(register_gifconf);
1da177e4
LT
3481
3482
3483/*
3484 * Map an interface index to its name (SIOCGIFNAME)
3485 */
3486
3487/*
3488 * We need this ioctl for efficient implementation of the
3489 * if_indextoname() function required by the IPv6 API. Without
3490 * it, we would have to search all the interfaces to find a
3491 * match. --pb
3492 */
3493
881d966b 3494static int dev_ifname(struct net *net, struct ifreq __user *arg)
1da177e4
LT
3495{
3496 struct net_device *dev;
3497 struct ifreq ifr;
3498
3499 /*
3500 * Fetch the caller's info block.
3501 */
3502
3503 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
3504 return -EFAULT;
3505
fb699dfd
ED
3506 rcu_read_lock();
3507 dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex);
1da177e4 3508 if (!dev) {
fb699dfd 3509 rcu_read_unlock();
1da177e4
LT
3510 return -ENODEV;
3511 }
3512
3513 strcpy(ifr.ifr_name, dev->name);
fb699dfd 3514 rcu_read_unlock();
1da177e4
LT
3515
3516 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
3517 return -EFAULT;
3518 return 0;
3519}
3520
3521/*
3522 * Perform a SIOCGIFCONF call. This structure will change
3523 * size eventually, and there is nothing I can do about it.
3524 * Thus we will need a 'compatibility mode'.
3525 */
3526
881d966b 3527static int dev_ifconf(struct net *net, char __user *arg)
1da177e4
LT
3528{
3529 struct ifconf ifc;
3530 struct net_device *dev;
3531 char __user *pos;
3532 int len;
3533 int total;
3534 int i;
3535
3536 /*
3537 * Fetch the caller's info block.
3538 */
3539
3540 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
3541 return -EFAULT;
3542
3543 pos = ifc.ifc_buf;
3544 len = ifc.ifc_len;
3545
3546 /*
3547 * Loop over the interfaces, and write an info block for each.
3548 */
3549
3550 total = 0;
881d966b 3551 for_each_netdev(net, dev) {
1da177e4
LT
3552 for (i = 0; i < NPROTO; i++) {
3553 if (gifconf_list[i]) {
3554 int done;
3555 if (!pos)
3556 done = gifconf_list[i](dev, NULL, 0);
3557 else
3558 done = gifconf_list[i](dev, pos + total,
3559 len - total);
3560 if (done < 0)
3561 return -EFAULT;
3562 total += done;
3563 }
3564 }
4ec93edb 3565 }
1da177e4
LT
3566
3567 /*
3568 * All done. Write the updated control block back to the caller.
3569 */
3570 ifc.ifc_len = total;
3571
3572 /*
3573 * Both BSD and Solaris return 0 here, so we do too.
3574 */
3575 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
3576}
3577
3578#ifdef CONFIG_PROC_FS
3579/*
3580 * This is invoked by the /proc filesystem handler to display a device
3581 * in detail.
3582 */
7562f876 3583void *dev_seq_start(struct seq_file *seq, loff_t *pos)
c6d14c84 3584 __acquires(RCU)
1da177e4 3585{
e372c414 3586 struct net *net = seq_file_net(seq);
7562f876 3587 loff_t off;
1da177e4 3588 struct net_device *dev;
1da177e4 3589
c6d14c84 3590 rcu_read_lock();
7562f876
PE
3591 if (!*pos)
3592 return SEQ_START_TOKEN;
1da177e4 3593
7562f876 3594 off = 1;
c6d14c84 3595 for_each_netdev_rcu(net, dev)
7562f876
PE
3596 if (off++ == *pos)
3597 return dev;
1da177e4 3598
7562f876 3599 return NULL;
1da177e4
LT
3600}
3601
3602void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3603{
c6d14c84
ED
3604 struct net_device *dev = (v == SEQ_START_TOKEN) ?
3605 first_net_device(seq_file_net(seq)) :
3606 next_net_device((struct net_device *)v);
3607
1da177e4 3608 ++*pos;
c6d14c84 3609 return rcu_dereference(dev);
1da177e4
LT
3610}
3611
3612void dev_seq_stop(struct seq_file *seq, void *v)
c6d14c84 3613 __releases(RCU)
1da177e4 3614{
c6d14c84 3615 rcu_read_unlock();
1da177e4
LT
3616}
3617
3618static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
3619{
eeda3fd6 3620 const struct net_device_stats *stats = dev_get_stats(dev);
1da177e4 3621
2d13bafe 3622 seq_printf(seq, "%6s: %7lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
5a1b5898
RR
3623 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
3624 dev->name, stats->rx_bytes, stats->rx_packets,
3625 stats->rx_errors,
3626 stats->rx_dropped + stats->rx_missed_errors,
3627 stats->rx_fifo_errors,
3628 stats->rx_length_errors + stats->rx_over_errors +
3629 stats->rx_crc_errors + stats->rx_frame_errors,
3630 stats->rx_compressed, stats->multicast,
3631 stats->tx_bytes, stats->tx_packets,
3632 stats->tx_errors, stats->tx_dropped,
3633 stats->tx_fifo_errors, stats->collisions,
3634 stats->tx_carrier_errors +
3635 stats->tx_aborted_errors +
3636 stats->tx_window_errors +
3637 stats->tx_heartbeat_errors,
3638 stats->tx_compressed);
1da177e4
LT
3639}
3640
3641/*
3642 * Called from the PROCfs module. This now uses the new arbitrary sized
3643 * /proc/net interface to create /proc/net/dev
3644 */
3645static int dev_seq_show(struct seq_file *seq, void *v)
3646{
3647 if (v == SEQ_START_TOKEN)
3648 seq_puts(seq, "Inter-| Receive "
3649 " | Transmit\n"
3650 " face |bytes packets errs drop fifo frame "
3651 "compressed multicast|bytes packets errs "
3652 "drop fifo colls carrier compressed\n");
3653 else
3654 dev_seq_printf_stats(seq, v);
3655 return 0;
3656}
3657
3658static struct netif_rx_stats *softnet_get_online(loff_t *pos)
3659{
3660 struct netif_rx_stats *rc = NULL;
3661
0c0b0aca 3662 while (*pos < nr_cpu_ids)
4ec93edb 3663 if (cpu_online(*pos)) {
1da177e4
LT
3664 rc = &per_cpu(netdev_rx_stat, *pos);
3665 break;
3666 } else
3667 ++*pos;
3668 return rc;
3669}
3670
3671static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
3672{
3673 return softnet_get_online(pos);
3674}
3675
3676static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3677{
3678 ++*pos;
3679 return softnet_get_online(pos);
3680}
3681
3682static void softnet_seq_stop(struct seq_file *seq, void *v)
3683{
3684}
3685
3686static int softnet_seq_show(struct seq_file *seq, void *v)
3687{
3688 struct netif_rx_stats *s = v;
3689
0a9627f2 3690 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
31aa02c5 3691 s->total, s->dropped, s->time_squeeze, 0,
c1ebcdb8 3692 0, 0, 0, 0, /* was fastroute */
0a9627f2 3693 s->cpu_collision, s->received_rps);
1da177e4
LT
3694 return 0;
3695}
3696
f690808e 3697static const struct seq_operations dev_seq_ops = {
1da177e4
LT
3698 .start = dev_seq_start,
3699 .next = dev_seq_next,
3700 .stop = dev_seq_stop,
3701 .show = dev_seq_show,
3702};
3703
3704static int dev_seq_open(struct inode *inode, struct file *file)
3705{
e372c414
DL
3706 return seq_open_net(inode, file, &dev_seq_ops,
3707 sizeof(struct seq_net_private));
1da177e4
LT
3708}
3709
9a32144e 3710static const struct file_operations dev_seq_fops = {
1da177e4
LT
3711 .owner = THIS_MODULE,
3712 .open = dev_seq_open,
3713 .read = seq_read,
3714 .llseek = seq_lseek,
e372c414 3715 .release = seq_release_net,
1da177e4
LT
3716};
3717
f690808e 3718static const struct seq_operations softnet_seq_ops = {
1da177e4
LT
3719 .start = softnet_seq_start,
3720 .next = softnet_seq_next,
3721 .stop = softnet_seq_stop,
3722 .show = softnet_seq_show,
3723};
3724
3725static int softnet_seq_open(struct inode *inode, struct file *file)
3726{
3727 return seq_open(file, &softnet_seq_ops);
3728}
3729
9a32144e 3730static const struct file_operations softnet_seq_fops = {
1da177e4
LT
3731 .owner = THIS_MODULE,
3732 .open = softnet_seq_open,
3733 .read = seq_read,
3734 .llseek = seq_lseek,
3735 .release = seq_release,
3736};
3737
0e1256ff
SH
3738static void *ptype_get_idx(loff_t pos)
3739{
3740 struct packet_type *pt = NULL;
3741 loff_t i = 0;
3742 int t;
3743
3744 list_for_each_entry_rcu(pt, &ptype_all, list) {
3745 if (i == pos)
3746 return pt;
3747 ++i;
3748 }
3749
82d8a867 3750 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
0e1256ff
SH
3751 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
3752 if (i == pos)
3753 return pt;
3754 ++i;
3755 }
3756 }
3757 return NULL;
3758}
3759
3760static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
72348a42 3761 __acquires(RCU)
0e1256ff
SH
3762{
3763 rcu_read_lock();
3764 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
3765}
3766
3767static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3768{
3769 struct packet_type *pt;
3770 struct list_head *nxt;
3771 int hash;
3772
3773 ++*pos;
3774 if (v == SEQ_START_TOKEN)
3775 return ptype_get_idx(0);
3776
3777 pt = v;
3778 nxt = pt->list.next;
3779 if (pt->type == htons(ETH_P_ALL)) {
3780 if (nxt != &ptype_all)
3781 goto found;
3782 hash = 0;
3783 nxt = ptype_base[0].next;
3784 } else
82d8a867 3785 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
0e1256ff
SH
3786
3787 while (nxt == &ptype_base[hash]) {
82d8a867 3788 if (++hash >= PTYPE_HASH_SIZE)
0e1256ff
SH
3789 return NULL;
3790 nxt = ptype_base[hash].next;
3791 }
3792found:
3793 return list_entry(nxt, struct packet_type, list);
3794}
3795
3796static void ptype_seq_stop(struct seq_file *seq, void *v)
72348a42 3797 __releases(RCU)
0e1256ff
SH
3798{
3799 rcu_read_unlock();
3800}
3801
0e1256ff
SH
3802static int ptype_seq_show(struct seq_file *seq, void *v)
3803{
3804 struct packet_type *pt = v;
3805
3806 if (v == SEQ_START_TOKEN)
3807 seq_puts(seq, "Type Device Function\n");
c346dca1 3808 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
0e1256ff
SH
3809 if (pt->type == htons(ETH_P_ALL))
3810 seq_puts(seq, "ALL ");
3811 else
3812 seq_printf(seq, "%04x", ntohs(pt->type));
3813
908cd2da
AD
3814 seq_printf(seq, " %-8s %pF\n",
3815 pt->dev ? pt->dev->name : "", pt->func);
0e1256ff
SH
3816 }
3817
3818 return 0;
3819}
3820
3821static const struct seq_operations ptype_seq_ops = {
3822 .start = ptype_seq_start,
3823 .next = ptype_seq_next,
3824 .stop = ptype_seq_stop,
3825 .show = ptype_seq_show,
3826};
3827
3828static int ptype_seq_open(struct inode *inode, struct file *file)
3829{
2feb27db
PE
3830 return seq_open_net(inode, file, &ptype_seq_ops,
3831 sizeof(struct seq_net_private));
0e1256ff
SH
3832}
3833
3834static const struct file_operations ptype_seq_fops = {
3835 .owner = THIS_MODULE,
3836 .open = ptype_seq_open,
3837 .read = seq_read,
3838 .llseek = seq_lseek,
2feb27db 3839 .release = seq_release_net,
0e1256ff
SH
3840};
3841
3842
4665079c 3843static int __net_init dev_proc_net_init(struct net *net)
1da177e4
LT
3844{
3845 int rc = -ENOMEM;
3846
881d966b 3847 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
1da177e4 3848 goto out;
881d966b 3849 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
1da177e4 3850 goto out_dev;
881d966b 3851 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
457c4cbc 3852 goto out_softnet;
0e1256ff 3853
881d966b 3854 if (wext_proc_init(net))
457c4cbc 3855 goto out_ptype;
1da177e4
LT
3856 rc = 0;
3857out:
3858 return rc;
457c4cbc 3859out_ptype:
881d966b 3860 proc_net_remove(net, "ptype");
1da177e4 3861out_softnet:
881d966b 3862 proc_net_remove(net, "softnet_stat");
1da177e4 3863out_dev:
881d966b 3864 proc_net_remove(net, "dev");
1da177e4
LT
3865 goto out;
3866}
881d966b 3867
4665079c 3868static void __net_exit dev_proc_net_exit(struct net *net)
881d966b
EB
3869{
3870 wext_proc_exit(net);
3871
3872 proc_net_remove(net, "ptype");
3873 proc_net_remove(net, "softnet_stat");
3874 proc_net_remove(net, "dev");
3875}
3876
022cbae6 3877static struct pernet_operations __net_initdata dev_proc_ops = {
881d966b
EB
3878 .init = dev_proc_net_init,
3879 .exit = dev_proc_net_exit,
3880};
3881
3882static int __init dev_proc_init(void)
3883{
3884 return register_pernet_subsys(&dev_proc_ops);
3885}
1da177e4
LT
3886#else
3887#define dev_proc_init() 0
3888#endif /* CONFIG_PROC_FS */
3889
3890
3891/**
3892 * netdev_set_master - set up master/slave pair
3893 * @slave: slave device
3894 * @master: new master device
3895 *
3896 * Changes the master device of the slave. Pass %NULL to break the
3897 * bonding. The caller must hold the RTNL semaphore. On a failure
3898 * a negative errno code is returned. On success the reference counts
3899 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
3900 * function returns zero.
3901 */
3902int netdev_set_master(struct net_device *slave, struct net_device *master)
3903{
3904 struct net_device *old = slave->master;
3905
3906 ASSERT_RTNL();
3907
3908 if (master) {
3909 if (old)
3910 return -EBUSY;
3911 dev_hold(master);
3912 }
3913
3914 slave->master = master;
4ec93edb 3915
283f2fe8
ED
3916 if (old) {
3917 synchronize_net();
1da177e4 3918 dev_put(old);
283f2fe8 3919 }
1da177e4
LT
3920 if (master)
3921 slave->flags |= IFF_SLAVE;
3922 else
3923 slave->flags &= ~IFF_SLAVE;
3924
3925 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
3926 return 0;
3927}
d1b19dff 3928EXPORT_SYMBOL(netdev_set_master);
1da177e4 3929
b6c40d68
PM
3930static void dev_change_rx_flags(struct net_device *dev, int flags)
3931{
d314774c
SH
3932 const struct net_device_ops *ops = dev->netdev_ops;
3933
3934 if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
3935 ops->ndo_change_rx_flags(dev, flags);
b6c40d68
PM
3936}
3937
dad9b335 3938static int __dev_set_promiscuity(struct net_device *dev, int inc)
1da177e4
LT
3939{
3940 unsigned short old_flags = dev->flags;
8192b0c4
DH
3941 uid_t uid;
3942 gid_t gid;
1da177e4 3943
24023451
PM
3944 ASSERT_RTNL();
3945
dad9b335
WC
3946 dev->flags |= IFF_PROMISC;
3947 dev->promiscuity += inc;
3948 if (dev->promiscuity == 0) {
3949 /*
3950 * Avoid overflow.
3951 * If inc causes overflow, untouch promisc and return error.
3952 */
3953 if (inc < 0)
3954 dev->flags &= ~IFF_PROMISC;
3955 else {
3956 dev->promiscuity -= inc;
3957 printk(KERN_WARNING "%s: promiscuity touches roof, "
3958 "set promiscuity failed, promiscuity feature "
3959 "of device might be broken.\n", dev->name);
3960 return -EOVERFLOW;
3961 }
3962 }
52609c0b 3963 if (dev->flags != old_flags) {
1da177e4
LT
3964 printk(KERN_INFO "device %s %s promiscuous mode\n",
3965 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
4ec93edb 3966 "left");
8192b0c4
DH
3967 if (audit_enabled) {
3968 current_uid_gid(&uid, &gid);
7759db82
KHK
3969 audit_log(current->audit_context, GFP_ATOMIC,
3970 AUDIT_ANOM_PROMISCUOUS,
3971 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
3972 dev->name, (dev->flags & IFF_PROMISC),
3973 (old_flags & IFF_PROMISC),
3974 audit_get_loginuid(current),
8192b0c4 3975 uid, gid,
7759db82 3976 audit_get_sessionid(current));
8192b0c4 3977 }
24023451 3978
b6c40d68 3979 dev_change_rx_flags(dev, IFF_PROMISC);
1da177e4 3980 }
dad9b335 3981 return 0;
1da177e4
LT
3982}
3983
4417da66
PM
3984/**
3985 * dev_set_promiscuity - update promiscuity count on a device
3986 * @dev: device
3987 * @inc: modifier
3988 *
3989 * Add or remove promiscuity from a device. While the count in the device
3990 * remains above zero the interface remains promiscuous. Once it hits zero
3991 * the device reverts back to normal filtering operation. A negative inc
3992 * value is used to drop promiscuity on the device.
dad9b335 3993 * Return 0 if successful or a negative errno code on error.
4417da66 3994 */
dad9b335 3995int dev_set_promiscuity(struct net_device *dev, int inc)
4417da66
PM
3996{
3997 unsigned short old_flags = dev->flags;
dad9b335 3998 int err;
4417da66 3999
dad9b335 4000 err = __dev_set_promiscuity(dev, inc);
4b5a698e 4001 if (err < 0)
dad9b335 4002 return err;
4417da66
PM
4003 if (dev->flags != old_flags)
4004 dev_set_rx_mode(dev);
dad9b335 4005 return err;
4417da66 4006}
d1b19dff 4007EXPORT_SYMBOL(dev_set_promiscuity);
4417da66 4008
1da177e4
LT
4009/**
4010 * dev_set_allmulti - update allmulti count on a device
4011 * @dev: device
4012 * @inc: modifier
4013 *
4014 * Add or remove reception of all multicast frames to a device. While the
4015 * count in the device remains above zero the interface remains listening
4016 * to all interfaces. Once it hits zero the device reverts back to normal
4017 * filtering operation. A negative @inc value is used to drop the counter
4018 * when releasing a resource needing all multicasts.
dad9b335 4019 * Return 0 if successful or a negative errno code on error.
1da177e4
LT
4020 */
4021
dad9b335 4022int dev_set_allmulti(struct net_device *dev, int inc)
1da177e4
LT
4023{
4024 unsigned short old_flags = dev->flags;
4025
24023451
PM
4026 ASSERT_RTNL();
4027
1da177e4 4028 dev->flags |= IFF_ALLMULTI;
dad9b335
WC
4029 dev->allmulti += inc;
4030 if (dev->allmulti == 0) {
4031 /*
4032 * Avoid overflow.
4033 * If inc causes overflow, untouch allmulti and return error.
4034 */
4035 if (inc < 0)
4036 dev->flags &= ~IFF_ALLMULTI;
4037 else {
4038 dev->allmulti -= inc;
4039 printk(KERN_WARNING "%s: allmulti touches roof, "
4040 "set allmulti failed, allmulti feature of "
4041 "device might be broken.\n", dev->name);
4042 return -EOVERFLOW;
4043 }
4044 }
24023451 4045 if (dev->flags ^ old_flags) {
b6c40d68 4046 dev_change_rx_flags(dev, IFF_ALLMULTI);
4417da66 4047 dev_set_rx_mode(dev);
24023451 4048 }
dad9b335 4049 return 0;
4417da66 4050}
d1b19dff 4051EXPORT_SYMBOL(dev_set_allmulti);
4417da66
PM
4052
4053/*
4054 * Upload unicast and multicast address lists to device and
4055 * configure RX filtering. When the device doesn't support unicast
53ccaae1 4056 * filtering it is put in promiscuous mode while unicast addresses
4417da66
PM
4057 * are present.
4058 */
4059void __dev_set_rx_mode(struct net_device *dev)
4060{
d314774c
SH
4061 const struct net_device_ops *ops = dev->netdev_ops;
4062
4417da66
PM
4063 /* dev_open will call this function so the list will stay sane. */
4064 if (!(dev->flags&IFF_UP))
4065 return;
4066
4067 if (!netif_device_present(dev))
40b77c94 4068 return;
4417da66 4069
d314774c
SH
4070 if (ops->ndo_set_rx_mode)
4071 ops->ndo_set_rx_mode(dev);
4417da66
PM
4072 else {
4073 /* Unicast addresses changes may only happen under the rtnl,
4074 * therefore calling __dev_set_promiscuity here is safe.
4075 */
32e7bfc4 4076 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
4417da66
PM
4077 __dev_set_promiscuity(dev, 1);
4078 dev->uc_promisc = 1;
32e7bfc4 4079 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
4417da66
PM
4080 __dev_set_promiscuity(dev, -1);
4081 dev->uc_promisc = 0;
4082 }
4083
d314774c
SH
4084 if (ops->ndo_set_multicast_list)
4085 ops->ndo_set_multicast_list(dev);
4417da66
PM
4086 }
4087}
4088
4089void dev_set_rx_mode(struct net_device *dev)
4090{
b9e40857 4091 netif_addr_lock_bh(dev);
4417da66 4092 __dev_set_rx_mode(dev);
b9e40857 4093 netif_addr_unlock_bh(dev);
1da177e4
LT
4094}
4095
f0db275a
SH
4096/**
4097 * dev_get_flags - get flags reported to userspace
4098 * @dev: device
4099 *
4100 * Get the combination of flag bits exported through APIs to userspace.
4101 */
1da177e4
LT
4102unsigned dev_get_flags(const struct net_device *dev)
4103{
4104 unsigned flags;
4105
4106 flags = (dev->flags & ~(IFF_PROMISC |
4107 IFF_ALLMULTI |
b00055aa
SR
4108 IFF_RUNNING |
4109 IFF_LOWER_UP |
4110 IFF_DORMANT)) |
1da177e4
LT
4111 (dev->gflags & (IFF_PROMISC |
4112 IFF_ALLMULTI));
4113
b00055aa
SR
4114 if (netif_running(dev)) {
4115 if (netif_oper_up(dev))
4116 flags |= IFF_RUNNING;
4117 if (netif_carrier_ok(dev))
4118 flags |= IFF_LOWER_UP;
4119 if (netif_dormant(dev))
4120 flags |= IFF_DORMANT;
4121 }
1da177e4
LT
4122
4123 return flags;
4124}
d1b19dff 4125EXPORT_SYMBOL(dev_get_flags);
1da177e4 4126
bd380811 4127int __dev_change_flags(struct net_device *dev, unsigned int flags)
1da177e4 4128{
1da177e4 4129 int old_flags = dev->flags;
bd380811 4130 int ret;
1da177e4 4131
24023451
PM
4132 ASSERT_RTNL();
4133
1da177e4
LT
4134 /*
4135 * Set the flags on our device.
4136 */
4137
4138 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
4139 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
4140 IFF_AUTOMEDIA)) |
4141 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
4142 IFF_ALLMULTI));
4143
4144 /*
4145 * Load in the correct multicast list now the flags have changed.
4146 */
4147
b6c40d68
PM
4148 if ((old_flags ^ flags) & IFF_MULTICAST)
4149 dev_change_rx_flags(dev, IFF_MULTICAST);
24023451 4150
4417da66 4151 dev_set_rx_mode(dev);
1da177e4
LT
4152
4153 /*
4154 * Have we downed the interface. We handle IFF_UP ourselves
4155 * according to user attempts to set it, rather than blindly
4156 * setting it.
4157 */
4158
4159 ret = 0;
4160 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
bd380811 4161 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
1da177e4
LT
4162
4163 if (!ret)
4417da66 4164 dev_set_rx_mode(dev);
1da177e4
LT
4165 }
4166
1da177e4 4167 if ((flags ^ dev->gflags) & IFF_PROMISC) {
d1b19dff
ED
4168 int inc = (flags & IFF_PROMISC) ? 1 : -1;
4169
1da177e4
LT
4170 dev->gflags ^= IFF_PROMISC;
4171 dev_set_promiscuity(dev, inc);
4172 }
4173
4174 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
4175 is important. Some (broken) drivers set IFF_PROMISC, when
4176 IFF_ALLMULTI is requested not asking us and not reporting.
4177 */
4178 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
d1b19dff
ED
4179 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
4180
1da177e4
LT
4181 dev->gflags ^= IFF_ALLMULTI;
4182 dev_set_allmulti(dev, inc);
4183 }
4184
bd380811
PM
4185 return ret;
4186}
4187
4188void __dev_notify_flags(struct net_device *dev, unsigned int old_flags)
4189{
4190 unsigned int changes = dev->flags ^ old_flags;
4191
4192 if (changes & IFF_UP) {
4193 if (dev->flags & IFF_UP)
4194 call_netdevice_notifiers(NETDEV_UP, dev);
4195 else
4196 call_netdevice_notifiers(NETDEV_DOWN, dev);
4197 }
4198
4199 if (dev->flags & IFF_UP &&
4200 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE)))
4201 call_netdevice_notifiers(NETDEV_CHANGE, dev);
4202}
4203
4204/**
4205 * dev_change_flags - change device settings
4206 * @dev: device
4207 * @flags: device state flags
4208 *
4209 * Change settings on device based state flags. The flags are
4210 * in the userspace exported format.
4211 */
4212int dev_change_flags(struct net_device *dev, unsigned flags)
4213{
4214 int ret, changes;
4215 int old_flags = dev->flags;
4216
4217 ret = __dev_change_flags(dev, flags);
4218 if (ret < 0)
4219 return ret;
4220
4221 changes = old_flags ^ dev->flags;
7c355f53
TG
4222 if (changes)
4223 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
1da177e4 4224
bd380811 4225 __dev_notify_flags(dev, old_flags);
1da177e4
LT
4226 return ret;
4227}
d1b19dff 4228EXPORT_SYMBOL(dev_change_flags);
1da177e4 4229
f0db275a
SH
4230/**
4231 * dev_set_mtu - Change maximum transfer unit
4232 * @dev: device
4233 * @new_mtu: new transfer unit
4234 *
4235 * Change the maximum transfer size of the network device.
4236 */
1da177e4
LT
4237int dev_set_mtu(struct net_device *dev, int new_mtu)
4238{
d314774c 4239 const struct net_device_ops *ops = dev->netdev_ops;
1da177e4
LT
4240 int err;
4241
4242 if (new_mtu == dev->mtu)
4243 return 0;
4244
4245 /* MTU must be positive. */
4246 if (new_mtu < 0)
4247 return -EINVAL;
4248
4249 if (!netif_device_present(dev))
4250 return -ENODEV;
4251
4252 err = 0;
d314774c
SH
4253 if (ops->ndo_change_mtu)
4254 err = ops->ndo_change_mtu(dev, new_mtu);
1da177e4
LT
4255 else
4256 dev->mtu = new_mtu;
d314774c 4257
1da177e4 4258 if (!err && dev->flags & IFF_UP)
056925ab 4259 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
1da177e4
LT
4260 return err;
4261}
d1b19dff 4262EXPORT_SYMBOL(dev_set_mtu);
1da177e4 4263
f0db275a
SH
4264/**
4265 * dev_set_mac_address - Change Media Access Control Address
4266 * @dev: device
4267 * @sa: new address
4268 *
4269 * Change the hardware (MAC) address of the device
4270 */
1da177e4
LT
4271int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
4272{
d314774c 4273 const struct net_device_ops *ops = dev->netdev_ops;
1da177e4
LT
4274 int err;
4275
d314774c 4276 if (!ops->ndo_set_mac_address)
1da177e4
LT
4277 return -EOPNOTSUPP;
4278 if (sa->sa_family != dev->type)
4279 return -EINVAL;
4280 if (!netif_device_present(dev))
4281 return -ENODEV;
d314774c 4282 err = ops->ndo_set_mac_address(dev, sa);
1da177e4 4283 if (!err)
056925ab 4284 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
1da177e4
LT
4285 return err;
4286}
d1b19dff 4287EXPORT_SYMBOL(dev_set_mac_address);
1da177e4
LT
4288
4289/*
3710becf 4290 * Perform the SIOCxIFxxx calls, inside rcu_read_lock()
1da177e4 4291 */
14e3e079 4292static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
1da177e4
LT
4293{
4294 int err;
3710becf 4295 struct net_device *dev = dev_get_by_name_rcu(net, ifr->ifr_name);
1da177e4
LT
4296
4297 if (!dev)
4298 return -ENODEV;
4299
4300 switch (cmd) {
d1b19dff
ED
4301 case SIOCGIFFLAGS: /* Get interface flags */
4302 ifr->ifr_flags = (short) dev_get_flags(dev);
4303 return 0;
1da177e4 4304
d1b19dff
ED
4305 case SIOCGIFMETRIC: /* Get the metric on the interface
4306 (currently unused) */
4307 ifr->ifr_metric = 0;
4308 return 0;
1da177e4 4309
d1b19dff
ED
4310 case SIOCGIFMTU: /* Get the MTU of a device */
4311 ifr->ifr_mtu = dev->mtu;
4312 return 0;
1da177e4 4313
d1b19dff
ED
4314 case SIOCGIFHWADDR:
4315 if (!dev->addr_len)
4316 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
4317 else
4318 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
4319 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4320 ifr->ifr_hwaddr.sa_family = dev->type;
4321 return 0;
1da177e4 4322
d1b19dff
ED
4323 case SIOCGIFSLAVE:
4324 err = -EINVAL;
4325 break;
14e3e079 4326
d1b19dff
ED
4327 case SIOCGIFMAP:
4328 ifr->ifr_map.mem_start = dev->mem_start;
4329 ifr->ifr_map.mem_end = dev->mem_end;
4330 ifr->ifr_map.base_addr = dev->base_addr;
4331 ifr->ifr_map.irq = dev->irq;
4332 ifr->ifr_map.dma = dev->dma;
4333 ifr->ifr_map.port = dev->if_port;
4334 return 0;
14e3e079 4335
d1b19dff
ED
4336 case SIOCGIFINDEX:
4337 ifr->ifr_ifindex = dev->ifindex;
4338 return 0;
14e3e079 4339
d1b19dff
ED
4340 case SIOCGIFTXQLEN:
4341 ifr->ifr_qlen = dev->tx_queue_len;
4342 return 0;
14e3e079 4343
d1b19dff
ED
4344 default:
4345 /* dev_ioctl() should ensure this case
4346 * is never reached
4347 */
4348 WARN_ON(1);
4349 err = -EINVAL;
4350 break;
14e3e079
JG
4351
4352 }
4353 return err;
4354}
4355
4356/*
4357 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
4358 */
4359static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
4360{
4361 int err;
4362 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
5f2f6da7 4363 const struct net_device_ops *ops;
14e3e079
JG
4364
4365 if (!dev)
4366 return -ENODEV;
4367
5f2f6da7
JP
4368 ops = dev->netdev_ops;
4369
14e3e079 4370 switch (cmd) {
d1b19dff
ED
4371 case SIOCSIFFLAGS: /* Set interface flags */
4372 return dev_change_flags(dev, ifr->ifr_flags);
14e3e079 4373
d1b19dff
ED
4374 case SIOCSIFMETRIC: /* Set the metric on the interface
4375 (currently unused) */
4376 return -EOPNOTSUPP;
14e3e079 4377
d1b19dff
ED
4378 case SIOCSIFMTU: /* Set the MTU of a device */
4379 return dev_set_mtu(dev, ifr->ifr_mtu);
1da177e4 4380
d1b19dff
ED
4381 case SIOCSIFHWADDR:
4382 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
1da177e4 4383
d1b19dff
ED
4384 case SIOCSIFHWBROADCAST:
4385 if (ifr->ifr_hwaddr.sa_family != dev->type)
4386 return -EINVAL;
4387 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
4388 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4389 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4390 return 0;
1da177e4 4391
d1b19dff
ED
4392 case SIOCSIFMAP:
4393 if (ops->ndo_set_config) {
1da177e4
LT
4394 if (!netif_device_present(dev))
4395 return -ENODEV;
d1b19dff
ED
4396 return ops->ndo_set_config(dev, &ifr->ifr_map);
4397 }
4398 return -EOPNOTSUPP;
1da177e4 4399
d1b19dff
ED
4400 case SIOCADDMULTI:
4401 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
4402 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4403 return -EINVAL;
4404 if (!netif_device_present(dev))
4405 return -ENODEV;
22bedad3 4406 return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data);
d1b19dff
ED
4407
4408 case SIOCDELMULTI:
4409 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
4410 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4411 return -EINVAL;
4412 if (!netif_device_present(dev))
4413 return -ENODEV;
22bedad3 4414 return dev_mc_del_global(dev, ifr->ifr_hwaddr.sa_data);
1da177e4 4415
d1b19dff
ED
4416 case SIOCSIFTXQLEN:
4417 if (ifr->ifr_qlen < 0)
4418 return -EINVAL;
4419 dev->tx_queue_len = ifr->ifr_qlen;
4420 return 0;
1da177e4 4421
d1b19dff
ED
4422 case SIOCSIFNAME:
4423 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
4424 return dev_change_name(dev, ifr->ifr_newname);
1da177e4 4425
d1b19dff
ED
4426 /*
4427 * Unknown or private ioctl
4428 */
4429 default:
4430 if ((cmd >= SIOCDEVPRIVATE &&
4431 cmd <= SIOCDEVPRIVATE + 15) ||
4432 cmd == SIOCBONDENSLAVE ||
4433 cmd == SIOCBONDRELEASE ||
4434 cmd == SIOCBONDSETHWADDR ||
4435 cmd == SIOCBONDSLAVEINFOQUERY ||
4436 cmd == SIOCBONDINFOQUERY ||
4437 cmd == SIOCBONDCHANGEACTIVE ||
4438 cmd == SIOCGMIIPHY ||
4439 cmd == SIOCGMIIREG ||
4440 cmd == SIOCSMIIREG ||
4441 cmd == SIOCBRADDIF ||
4442 cmd == SIOCBRDELIF ||
4443 cmd == SIOCSHWTSTAMP ||
4444 cmd == SIOCWANDEV) {
4445 err = -EOPNOTSUPP;
4446 if (ops->ndo_do_ioctl) {
4447 if (netif_device_present(dev))
4448 err = ops->ndo_do_ioctl(dev, ifr, cmd);
4449 else
4450 err = -ENODEV;
4451 }
4452 } else
4453 err = -EINVAL;
1da177e4
LT
4454
4455 }
4456 return err;
4457}
4458
4459/*
4460 * This function handles all "interface"-type I/O control requests. The actual
4461 * 'doing' part of this is dev_ifsioc above.
4462 */
4463
4464/**
4465 * dev_ioctl - network device ioctl
c4ea43c5 4466 * @net: the applicable net namespace
1da177e4
LT
4467 * @cmd: command to issue
4468 * @arg: pointer to a struct ifreq in user space
4469 *
4470 * Issue ioctl functions to devices. This is normally called by the
4471 * user space syscall interfaces but can sometimes be useful for
4472 * other purposes. The return value is the return from the syscall if
4473 * positive or a negative errno code on error.
4474 */
4475
881d966b 4476int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
1da177e4
LT
4477{
4478 struct ifreq ifr;
4479 int ret;
4480 char *colon;
4481
4482 /* One special case: SIOCGIFCONF takes ifconf argument
4483 and requires shared lock, because it sleeps writing
4484 to user space.
4485 */
4486
4487 if (cmd == SIOCGIFCONF) {
6756ae4b 4488 rtnl_lock();
881d966b 4489 ret = dev_ifconf(net, (char __user *) arg);
6756ae4b 4490 rtnl_unlock();
1da177e4
LT
4491 return ret;
4492 }
4493 if (cmd == SIOCGIFNAME)
881d966b 4494 return dev_ifname(net, (struct ifreq __user *)arg);
1da177e4
LT
4495
4496 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
4497 return -EFAULT;
4498
4499 ifr.ifr_name[IFNAMSIZ-1] = 0;
4500
4501 colon = strchr(ifr.ifr_name, ':');
4502 if (colon)
4503 *colon = 0;
4504
4505 /*
4506 * See which interface the caller is talking about.
4507 */
4508
4509 switch (cmd) {
d1b19dff
ED
4510 /*
4511 * These ioctl calls:
4512 * - can be done by all.
4513 * - atomic and do not require locking.
4514 * - return a value
4515 */
4516 case SIOCGIFFLAGS:
4517 case SIOCGIFMETRIC:
4518 case SIOCGIFMTU:
4519 case SIOCGIFHWADDR:
4520 case SIOCGIFSLAVE:
4521 case SIOCGIFMAP:
4522 case SIOCGIFINDEX:
4523 case SIOCGIFTXQLEN:
4524 dev_load(net, ifr.ifr_name);
3710becf 4525 rcu_read_lock();
d1b19dff 4526 ret = dev_ifsioc_locked(net, &ifr, cmd);
3710becf 4527 rcu_read_unlock();
d1b19dff
ED
4528 if (!ret) {
4529 if (colon)
4530 *colon = ':';
4531 if (copy_to_user(arg, &ifr,
4532 sizeof(struct ifreq)))
4533 ret = -EFAULT;
4534 }
4535 return ret;
1da177e4 4536
d1b19dff
ED
4537 case SIOCETHTOOL:
4538 dev_load(net, ifr.ifr_name);
4539 rtnl_lock();
4540 ret = dev_ethtool(net, &ifr);
4541 rtnl_unlock();
4542 if (!ret) {
4543 if (colon)
4544 *colon = ':';
4545 if (copy_to_user(arg, &ifr,
4546 sizeof(struct ifreq)))
4547 ret = -EFAULT;
4548 }
4549 return ret;
1da177e4 4550
d1b19dff
ED
4551 /*
4552 * These ioctl calls:
4553 * - require superuser power.
4554 * - require strict serialization.
4555 * - return a value
4556 */
4557 case SIOCGMIIPHY:
4558 case SIOCGMIIREG:
4559 case SIOCSIFNAME:
4560 if (!capable(CAP_NET_ADMIN))
4561 return -EPERM;
4562 dev_load(net, ifr.ifr_name);
4563 rtnl_lock();
4564 ret = dev_ifsioc(net, &ifr, cmd);
4565 rtnl_unlock();
4566 if (!ret) {
4567 if (colon)
4568 *colon = ':';
4569 if (copy_to_user(arg, &ifr,
4570 sizeof(struct ifreq)))
4571 ret = -EFAULT;
4572 }
4573 return ret;
1da177e4 4574
d1b19dff
ED
4575 /*
4576 * These ioctl calls:
4577 * - require superuser power.
4578 * - require strict serialization.
4579 * - do not return a value
4580 */
4581 case SIOCSIFFLAGS:
4582 case SIOCSIFMETRIC:
4583 case SIOCSIFMTU:
4584 case SIOCSIFMAP:
4585 case SIOCSIFHWADDR:
4586 case SIOCSIFSLAVE:
4587 case SIOCADDMULTI:
4588 case SIOCDELMULTI:
4589 case SIOCSIFHWBROADCAST:
4590 case SIOCSIFTXQLEN:
4591 case SIOCSMIIREG:
4592 case SIOCBONDENSLAVE:
4593 case SIOCBONDRELEASE:
4594 case SIOCBONDSETHWADDR:
4595 case SIOCBONDCHANGEACTIVE:
4596 case SIOCBRADDIF:
4597 case SIOCBRDELIF:
4598 case SIOCSHWTSTAMP:
4599 if (!capable(CAP_NET_ADMIN))
4600 return -EPERM;
4601 /* fall through */
4602 case SIOCBONDSLAVEINFOQUERY:
4603 case SIOCBONDINFOQUERY:
4604 dev_load(net, ifr.ifr_name);
4605 rtnl_lock();
4606 ret = dev_ifsioc(net, &ifr, cmd);
4607 rtnl_unlock();
4608 return ret;
4609
4610 case SIOCGIFMEM:
4611 /* Get the per device memory space. We can add this but
4612 * currently do not support it */
4613 case SIOCSIFMEM:
4614 /* Set the per device memory buffer space.
4615 * Not applicable in our case */
4616 case SIOCSIFLINK:
4617 return -EINVAL;
4618
4619 /*
4620 * Unknown or private ioctl.
4621 */
4622 default:
4623 if (cmd == SIOCWANDEV ||
4624 (cmd >= SIOCDEVPRIVATE &&
4625 cmd <= SIOCDEVPRIVATE + 15)) {
881d966b 4626 dev_load(net, ifr.ifr_name);
1da177e4 4627 rtnl_lock();
881d966b 4628 ret = dev_ifsioc(net, &ifr, cmd);
1da177e4 4629 rtnl_unlock();
d1b19dff
ED
4630 if (!ret && copy_to_user(arg, &ifr,
4631 sizeof(struct ifreq)))
4632 ret = -EFAULT;
1da177e4 4633 return ret;
d1b19dff
ED
4634 }
4635 /* Take care of Wireless Extensions */
4636 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
4637 return wext_handle_ioctl(net, &ifr, cmd, arg);
4638 return -EINVAL;
1da177e4
LT
4639 }
4640}
4641
4642
4643/**
4644 * dev_new_index - allocate an ifindex
c4ea43c5 4645 * @net: the applicable net namespace
1da177e4
LT
4646 *
4647 * Returns a suitable unique value for a new device interface
4648 * number. The caller must hold the rtnl semaphore or the
4649 * dev_base_lock to be sure it remains unique.
4650 */
881d966b 4651static int dev_new_index(struct net *net)
1da177e4
LT
4652{
4653 static int ifindex;
4654 for (;;) {
4655 if (++ifindex <= 0)
4656 ifindex = 1;
881d966b 4657 if (!__dev_get_by_index(net, ifindex))
1da177e4
LT
4658 return ifindex;
4659 }
4660}
4661
1da177e4 4662/* Delayed registration/unregisteration */
3b5b34fd 4663static LIST_HEAD(net_todo_list);
1da177e4 4664
6f05f629 4665static void net_set_todo(struct net_device *dev)
1da177e4 4666{
1da177e4 4667 list_add_tail(&dev->todo_list, &net_todo_list);
1da177e4
LT
4668}
4669
9b5e383c 4670static void rollback_registered_many(struct list_head *head)
93ee31f1 4671{
e93737b0 4672 struct net_device *dev, *tmp;
9b5e383c 4673
93ee31f1
DL
4674 BUG_ON(dev_boot_phase);
4675 ASSERT_RTNL();
4676
e93737b0 4677 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
9b5e383c 4678 /* Some devices call without registering
e93737b0
KK
4679 * for initialization unwind. Remove those
4680 * devices and proceed with the remaining.
9b5e383c
ED
4681 */
4682 if (dev->reg_state == NETREG_UNINITIALIZED) {
4683 pr_debug("unregister_netdevice: device %s/%p never "
4684 "was registered\n", dev->name, dev);
93ee31f1 4685
9b5e383c 4686 WARN_ON(1);
e93737b0
KK
4687 list_del(&dev->unreg_list);
4688 continue;
9b5e383c 4689 }
93ee31f1 4690
9b5e383c 4691 BUG_ON(dev->reg_state != NETREG_REGISTERED);
93ee31f1 4692
9b5e383c
ED
4693 /* If device is running, close it first. */
4694 dev_close(dev);
93ee31f1 4695
9b5e383c
ED
4696 /* And unlink it from device chain. */
4697 unlist_netdevice(dev);
93ee31f1 4698
9b5e383c
ED
4699 dev->reg_state = NETREG_UNREGISTERING;
4700 }
93ee31f1
DL
4701
4702 synchronize_net();
4703
9b5e383c
ED
4704 list_for_each_entry(dev, head, unreg_list) {
4705 /* Shutdown queueing discipline. */
4706 dev_shutdown(dev);
93ee31f1
DL
4707
4708
9b5e383c
ED
4709 /* Notify protocols, that we are about to destroy
4710 this device. They should clean all the things.
4711 */
4712 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
93ee31f1 4713
a2835763
PM
4714 if (!dev->rtnl_link_ops ||
4715 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
4716 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
4717
9b5e383c
ED
4718 /*
4719 * Flush the unicast and multicast chains
4720 */
a748ee24 4721 dev_uc_flush(dev);
22bedad3 4722 dev_mc_flush(dev);
93ee31f1 4723
9b5e383c
ED
4724 if (dev->netdev_ops->ndo_uninit)
4725 dev->netdev_ops->ndo_uninit(dev);
93ee31f1 4726
9b5e383c
ED
4727 /* Notifier chain MUST detach us from master device. */
4728 WARN_ON(dev->master);
93ee31f1 4729
9b5e383c
ED
4730 /* Remove entries from kobject tree */
4731 netdev_unregister_kobject(dev);
4732 }
93ee31f1 4733
a5ee1551 4734 /* Process any work delayed until the end of the batch */
e5e26d75 4735 dev = list_first_entry(head, struct net_device, unreg_list);
a5ee1551 4736 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
93ee31f1 4737
a5ee1551 4738 synchronize_net();
395264d5 4739
a5ee1551 4740 list_for_each_entry(dev, head, unreg_list)
9b5e383c
ED
4741 dev_put(dev);
4742}
4743
4744static void rollback_registered(struct net_device *dev)
4745{
4746 LIST_HEAD(single);
4747
4748 list_add(&dev->unreg_list, &single);
4749 rollback_registered_many(&single);
93ee31f1
DL
4750}
4751
e8a0464c
DM
4752static void __netdev_init_queue_locks_one(struct net_device *dev,
4753 struct netdev_queue *dev_queue,
4754 void *_unused)
c773e847
DM
4755{
4756 spin_lock_init(&dev_queue->_xmit_lock);
cf508b12 4757 netdev_set_xmit_lockdep_class(&dev_queue->_xmit_lock, dev->type);
c773e847
DM
4758 dev_queue->xmit_lock_owner = -1;
4759}
4760
4761static void netdev_init_queue_locks(struct net_device *dev)
4762{
e8a0464c
DM
4763 netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL);
4764 __netdev_init_queue_locks_one(dev, &dev->rx_queue, NULL);
c773e847
DM
4765}
4766
b63365a2
HX
4767unsigned long netdev_fix_features(unsigned long features, const char *name)
4768{
4769 /* Fix illegal SG+CSUM combinations. */
4770 if ((features & NETIF_F_SG) &&
4771 !(features & NETIF_F_ALL_CSUM)) {
4772 if (name)
4773 printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no "
4774 "checksum feature.\n", name);
4775 features &= ~NETIF_F_SG;
4776 }
4777
4778 /* TSO requires that SG is present as well. */
4779 if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) {
4780 if (name)
4781 printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no "
4782 "SG feature.\n", name);
4783 features &= ~NETIF_F_TSO;
4784 }
4785
4786 if (features & NETIF_F_UFO) {
4787 if (!(features & NETIF_F_GEN_CSUM)) {
4788 if (name)
4789 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4790 "since no NETIF_F_HW_CSUM feature.\n",
4791 name);
4792 features &= ~NETIF_F_UFO;
4793 }
4794
4795 if (!(features & NETIF_F_SG)) {
4796 if (name)
4797 printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
4798 "since no NETIF_F_SG feature.\n", name);
4799 features &= ~NETIF_F_UFO;
4800 }
4801 }
4802
4803 return features;
4804}
4805EXPORT_SYMBOL(netdev_fix_features);
4806
fc4a7489
PM
4807/**
4808 * netif_stacked_transfer_operstate - transfer operstate
4809 * @rootdev: the root or lower level device to transfer state from
4810 * @dev: the device to transfer operstate to
4811 *
4812 * Transfer operational state from root to device. This is normally
4813 * called when a stacking relationship exists between the root
4814 * device and the device(a leaf device).
4815 */
4816void netif_stacked_transfer_operstate(const struct net_device *rootdev,
4817 struct net_device *dev)
4818{
4819 if (rootdev->operstate == IF_OPER_DORMANT)
4820 netif_dormant_on(dev);
4821 else
4822 netif_dormant_off(dev);
4823
4824 if (netif_carrier_ok(rootdev)) {
4825 if (!netif_carrier_ok(dev))
4826 netif_carrier_on(dev);
4827 } else {
4828 if (netif_carrier_ok(dev))
4829 netif_carrier_off(dev);
4830 }
4831}
4832EXPORT_SYMBOL(netif_stacked_transfer_operstate);
4833
1da177e4
LT
4834/**
4835 * register_netdevice - register a network device
4836 * @dev: device to register
4837 *
4838 * Take a completed network device structure and add it to the kernel
4839 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
4840 * chain. 0 is returned on success. A negative errno code is returned
4841 * on a failure to set up the device, or if the name is a duplicate.
4842 *
4843 * Callers must hold the rtnl semaphore. You may want
4844 * register_netdev() instead of this.
4845 *
4846 * BUGS:
4847 * The locking appears insufficient to guarantee two parallel registers
4848 * will not get the same name.
4849 */
4850
4851int register_netdevice(struct net_device *dev)
4852{
1da177e4 4853 int ret;
d314774c 4854 struct net *net = dev_net(dev);
1da177e4
LT
4855
4856 BUG_ON(dev_boot_phase);
4857 ASSERT_RTNL();
4858
b17a7c17
SH
4859 might_sleep();
4860
1da177e4
LT
4861 /* When net_device's are persistent, this will be fatal. */
4862 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
d314774c 4863 BUG_ON(!net);
1da177e4 4864
f1f28aa3 4865 spin_lock_init(&dev->addr_list_lock);
cf508b12 4866 netdev_set_addr_lockdep_class(dev);
c773e847 4867 netdev_init_queue_locks(dev);
1da177e4 4868
1da177e4
LT
4869 dev->iflink = -1;
4870
df334545 4871#ifdef CONFIG_RPS
0a9627f2
TH
4872 if (!dev->num_rx_queues) {
4873 /*
4874 * Allocate a single RX queue if driver never called
4875 * alloc_netdev_mq
4876 */
4877
4878 dev->_rx = kzalloc(sizeof(struct netdev_rx_queue), GFP_KERNEL);
4879 if (!dev->_rx) {
4880 ret = -ENOMEM;
4881 goto out;
4882 }
4883
4884 dev->_rx->first = dev->_rx;
4885 atomic_set(&dev->_rx->count, 1);
4886 dev->num_rx_queues = 1;
4887 }
df334545 4888#endif
1da177e4 4889 /* Init, if this function is available */
d314774c
SH
4890 if (dev->netdev_ops->ndo_init) {
4891 ret = dev->netdev_ops->ndo_init(dev);
1da177e4
LT
4892 if (ret) {
4893 if (ret > 0)
4894 ret = -EIO;
90833aa4 4895 goto out;
1da177e4
LT
4896 }
4897 }
4ec93edb 4898
d9031024
OP
4899 ret = dev_get_valid_name(net, dev->name, dev->name, 0);
4900 if (ret)
7ce1b0ed 4901 goto err_uninit;
1da177e4 4902
881d966b 4903 dev->ifindex = dev_new_index(net);
1da177e4
LT
4904 if (dev->iflink == -1)
4905 dev->iflink = dev->ifindex;
4906
d212f87b
SH
4907 /* Fix illegal checksum combinations */
4908 if ((dev->features & NETIF_F_HW_CSUM) &&
4909 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
4910 printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
4911 dev->name);
4912 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
4913 }
4914
4915 if ((dev->features & NETIF_F_NO_CSUM) &&
4916 (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
4917 printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
4918 dev->name);
4919 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
4920 }
4921
b63365a2 4922 dev->features = netdev_fix_features(dev->features, dev->name);
1da177e4 4923
e5a4a72d
LB
4924 /* Enable software GSO if SG is supported. */
4925 if (dev->features & NETIF_F_SG)
4926 dev->features |= NETIF_F_GSO;
4927
aaf8cdc3 4928 netdev_initialize_kobject(dev);
7ffbe3fd
JB
4929
4930 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
4931 ret = notifier_to_errno(ret);
4932 if (ret)
4933 goto err_uninit;
4934
8b41d188 4935 ret = netdev_register_kobject(dev);
b17a7c17 4936 if (ret)
7ce1b0ed 4937 goto err_uninit;
b17a7c17
SH
4938 dev->reg_state = NETREG_REGISTERED;
4939
1da177e4
LT
4940 /*
4941 * Default initial state at registry is that the
4942 * device is present.
4943 */
4944
4945 set_bit(__LINK_STATE_PRESENT, &dev->state);
4946
1da177e4 4947 dev_init_scheduler(dev);
1da177e4 4948 dev_hold(dev);
ce286d32 4949 list_netdevice(dev);
1da177e4
LT
4950
4951 /* Notify protocols, that a new device appeared. */
056925ab 4952 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
fcc5a03a 4953 ret = notifier_to_errno(ret);
93ee31f1
DL
4954 if (ret) {
4955 rollback_registered(dev);
4956 dev->reg_state = NETREG_UNREGISTERED;
4957 }
d90a909e
EB
4958 /*
4959 * Prevent userspace races by waiting until the network
4960 * device is fully setup before sending notifications.
4961 */
a2835763
PM
4962 if (!dev->rtnl_link_ops ||
4963 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
4964 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
1da177e4
LT
4965
4966out:
4967 return ret;
7ce1b0ed
HX
4968
4969err_uninit:
d314774c
SH
4970 if (dev->netdev_ops->ndo_uninit)
4971 dev->netdev_ops->ndo_uninit(dev);
7ce1b0ed 4972 goto out;
1da177e4 4973}
d1b19dff 4974EXPORT_SYMBOL(register_netdevice);
1da177e4 4975
937f1ba5
BH
4976/**
4977 * init_dummy_netdev - init a dummy network device for NAPI
4978 * @dev: device to init
4979 *
4980 * This takes a network device structure and initialize the minimum
4981 * amount of fields so it can be used to schedule NAPI polls without
4982 * registering a full blown interface. This is to be used by drivers
4983 * that need to tie several hardware interfaces to a single NAPI
4984 * poll scheduler due to HW limitations.
4985 */
4986int init_dummy_netdev(struct net_device *dev)
4987{
4988 /* Clear everything. Note we don't initialize spinlocks
4989 * are they aren't supposed to be taken by any of the
4990 * NAPI code and this dummy netdev is supposed to be
4991 * only ever used for NAPI polls
4992 */
4993 memset(dev, 0, sizeof(struct net_device));
4994
4995 /* make sure we BUG if trying to hit standard
4996 * register/unregister code path
4997 */
4998 dev->reg_state = NETREG_DUMMY;
4999
5000 /* initialize the ref count */
5001 atomic_set(&dev->refcnt, 1);
5002
5003 /* NAPI wants this */
5004 INIT_LIST_HEAD(&dev->napi_list);
5005
5006 /* a dummy interface is started by default */
5007 set_bit(__LINK_STATE_PRESENT, &dev->state);
5008 set_bit(__LINK_STATE_START, &dev->state);
5009
5010 return 0;
5011}
5012EXPORT_SYMBOL_GPL(init_dummy_netdev);
5013
5014
1da177e4
LT
5015/**
5016 * register_netdev - register a network device
5017 * @dev: device to register
5018 *
5019 * Take a completed network device structure and add it to the kernel
5020 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5021 * chain. 0 is returned on success. A negative errno code is returned
5022 * on a failure to set up the device, or if the name is a duplicate.
5023 *
38b4da38 5024 * This is a wrapper around register_netdevice that takes the rtnl semaphore
1da177e4
LT
5025 * and expands the device name if you passed a format string to
5026 * alloc_netdev.
5027 */
5028int register_netdev(struct net_device *dev)
5029{
5030 int err;
5031
5032 rtnl_lock();
5033
5034 /*
5035 * If the name is a format string the caller wants us to do a
5036 * name allocation.
5037 */
5038 if (strchr(dev->name, '%')) {
5039 err = dev_alloc_name(dev, dev->name);
5040 if (err < 0)
5041 goto out;
5042 }
4ec93edb 5043
1da177e4
LT
5044 err = register_netdevice(dev);
5045out:
5046 rtnl_unlock();
5047 return err;
5048}
5049EXPORT_SYMBOL(register_netdev);
5050
5051/*
5052 * netdev_wait_allrefs - wait until all references are gone.
5053 *
5054 * This is called when unregistering network devices.
5055 *
5056 * Any protocol or device that holds a reference should register
5057 * for netdevice notification, and cleanup and put back the
5058 * reference if they receive an UNREGISTER event.
5059 * We can get stuck here if buggy protocols don't correctly
4ec93edb 5060 * call dev_put.
1da177e4
LT
5061 */
5062static void netdev_wait_allrefs(struct net_device *dev)
5063{
5064 unsigned long rebroadcast_time, warning_time;
5065
e014debe
ED
5066 linkwatch_forget_dev(dev);
5067
1da177e4
LT
5068 rebroadcast_time = warning_time = jiffies;
5069 while (atomic_read(&dev->refcnt) != 0) {
5070 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
6756ae4b 5071 rtnl_lock();
1da177e4
LT
5072
5073 /* Rebroadcast unregister notification */
056925ab 5074 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
a5ee1551 5075 /* don't resend NETDEV_UNREGISTER_BATCH, _BATCH users
395264d5 5076 * should have already handle it the first time */
1da177e4
LT
5077
5078 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
5079 &dev->state)) {
5080 /* We must not have linkwatch events
5081 * pending on unregister. If this
5082 * happens, we simply run the queue
5083 * unscheduled, resulting in a noop
5084 * for this device.
5085 */
5086 linkwatch_run_queue();
5087 }
5088
6756ae4b 5089 __rtnl_unlock();
1da177e4
LT
5090
5091 rebroadcast_time = jiffies;
5092 }
5093
5094 msleep(250);
5095
5096 if (time_after(jiffies, warning_time + 10 * HZ)) {
5097 printk(KERN_EMERG "unregister_netdevice: "
5098 "waiting for %s to become free. Usage "
5099 "count = %d\n",
5100 dev->name, atomic_read(&dev->refcnt));
5101 warning_time = jiffies;
5102 }
5103 }
5104}
5105
5106/* The sequence is:
5107 *
5108 * rtnl_lock();
5109 * ...
5110 * register_netdevice(x1);
5111 * register_netdevice(x2);
5112 * ...
5113 * unregister_netdevice(y1);
5114 * unregister_netdevice(y2);
5115 * ...
5116 * rtnl_unlock();
5117 * free_netdev(y1);
5118 * free_netdev(y2);
5119 *
58ec3b4d 5120 * We are invoked by rtnl_unlock().
1da177e4 5121 * This allows us to deal with problems:
b17a7c17 5122 * 1) We can delete sysfs objects which invoke hotplug
1da177e4
LT
5123 * without deadlocking with linkwatch via keventd.
5124 * 2) Since we run with the RTNL semaphore not held, we can sleep
5125 * safely in order to wait for the netdev refcnt to drop to zero.
58ec3b4d
HX
5126 *
5127 * We must not return until all unregister events added during
5128 * the interval the lock was held have been completed.
1da177e4 5129 */
1da177e4
LT
5130void netdev_run_todo(void)
5131{
626ab0e6 5132 struct list_head list;
1da177e4 5133
1da177e4 5134 /* Snapshot list, allow later requests */
626ab0e6 5135 list_replace_init(&net_todo_list, &list);
58ec3b4d
HX
5136
5137 __rtnl_unlock();
626ab0e6 5138
1da177e4
LT
5139 while (!list_empty(&list)) {
5140 struct net_device *dev
e5e26d75 5141 = list_first_entry(&list, struct net_device, todo_list);
1da177e4
LT
5142 list_del(&dev->todo_list);
5143
b17a7c17
SH
5144 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
5145 printk(KERN_ERR "network todo '%s' but state %d\n",
5146 dev->name, dev->reg_state);
5147 dump_stack();
5148 continue;
5149 }
1da177e4 5150
b17a7c17 5151 dev->reg_state = NETREG_UNREGISTERED;
1da177e4 5152
152102c7 5153 on_each_cpu(flush_backlog, dev, 1);
6e583ce5 5154
b17a7c17 5155 netdev_wait_allrefs(dev);
1da177e4 5156
b17a7c17
SH
5157 /* paranoia */
5158 BUG_ON(atomic_read(&dev->refcnt));
547b792c
IJ
5159 WARN_ON(dev->ip_ptr);
5160 WARN_ON(dev->ip6_ptr);
5161 WARN_ON(dev->dn_ptr);
1da177e4 5162
b17a7c17
SH
5163 if (dev->destructor)
5164 dev->destructor(dev);
9093bbb2
SH
5165
5166 /* Free network device */
5167 kobject_put(&dev->dev.kobj);
1da177e4 5168 }
1da177e4
LT
5169}
5170
d83345ad
ED
5171/**
5172 * dev_txq_stats_fold - fold tx_queues stats
5173 * @dev: device to get statistics from
5174 * @stats: struct net_device_stats to hold results
5175 */
5176void dev_txq_stats_fold(const struct net_device *dev,
5177 struct net_device_stats *stats)
5178{
5179 unsigned long tx_bytes = 0, tx_packets = 0, tx_dropped = 0;
5180 unsigned int i;
5181 struct netdev_queue *txq;
5182
5183 for (i = 0; i < dev->num_tx_queues; i++) {
5184 txq = netdev_get_tx_queue(dev, i);
5185 tx_bytes += txq->tx_bytes;
5186 tx_packets += txq->tx_packets;
5187 tx_dropped += txq->tx_dropped;
5188 }
5189 if (tx_bytes || tx_packets || tx_dropped) {
5190 stats->tx_bytes = tx_bytes;
5191 stats->tx_packets = tx_packets;
5192 stats->tx_dropped = tx_dropped;
5193 }
5194}
5195EXPORT_SYMBOL(dev_txq_stats_fold);
5196
eeda3fd6
SH
5197/**
5198 * dev_get_stats - get network device statistics
5199 * @dev: device to get statistics from
5200 *
5201 * Get network statistics from device. The device driver may provide
5202 * its own method by setting dev->netdev_ops->get_stats; otherwise
5203 * the internal statistics structure is used.
5204 */
5205const struct net_device_stats *dev_get_stats(struct net_device *dev)
7004bf25 5206{
eeda3fd6
SH
5207 const struct net_device_ops *ops = dev->netdev_ops;
5208
5209 if (ops->ndo_get_stats)
5210 return ops->ndo_get_stats(dev);
d83345ad
ED
5211
5212 dev_txq_stats_fold(dev, &dev->stats);
5213 return &dev->stats;
c45d286e 5214}
eeda3fd6 5215EXPORT_SYMBOL(dev_get_stats);
c45d286e 5216
dc2b4847 5217static void netdev_init_one_queue(struct net_device *dev,
e8a0464c
DM
5218 struct netdev_queue *queue,
5219 void *_unused)
dc2b4847 5220{
dc2b4847
DM
5221 queue->dev = dev;
5222}
5223
bb949fbd
DM
5224static void netdev_init_queues(struct net_device *dev)
5225{
e8a0464c
DM
5226 netdev_init_one_queue(dev, &dev->rx_queue, NULL);
5227 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
c3f26a26 5228 spin_lock_init(&dev->tx_global_lock);
bb949fbd
DM
5229}
5230
1da177e4 5231/**
f25f4e44 5232 * alloc_netdev_mq - allocate network device
1da177e4
LT
5233 * @sizeof_priv: size of private data to allocate space for
5234 * @name: device name format string
5235 * @setup: callback to initialize device
f25f4e44 5236 * @queue_count: the number of subqueues to allocate
1da177e4
LT
5237 *
5238 * Allocates a struct net_device with private data area for driver use
f25f4e44
PWJ
5239 * and performs basic initialization. Also allocates subquue structs
5240 * for each queue on the device at the end of the netdevice.
1da177e4 5241 */
f25f4e44
PWJ
5242struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
5243 void (*setup)(struct net_device *), unsigned int queue_count)
1da177e4 5244{
e8a0464c 5245 struct netdev_queue *tx;
1da177e4 5246 struct net_device *dev;
7943986c 5247 size_t alloc_size;
1ce8e7b5 5248 struct net_device *p;
df334545
ED
5249#ifdef CONFIG_RPS
5250 struct netdev_rx_queue *rx;
0a9627f2 5251 int i;
df334545 5252#endif
1da177e4 5253
b6fe17d6
SH
5254 BUG_ON(strlen(name) >= sizeof(dev->name));
5255
fd2ea0a7 5256 alloc_size = sizeof(struct net_device);
d1643d24
AD
5257 if (sizeof_priv) {
5258 /* ensure 32-byte alignment of private area */
1ce8e7b5 5259 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
d1643d24
AD
5260 alloc_size += sizeof_priv;
5261 }
5262 /* ensure 32-byte alignment of whole construct */
1ce8e7b5 5263 alloc_size += NETDEV_ALIGN - 1;
1da177e4 5264
31380de9 5265 p = kzalloc(alloc_size, GFP_KERNEL);
1da177e4 5266 if (!p) {
b6fe17d6 5267 printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
1da177e4
LT
5268 return NULL;
5269 }
1da177e4 5270
7943986c 5271 tx = kcalloc(queue_count, sizeof(struct netdev_queue), GFP_KERNEL);
e8a0464c
DM
5272 if (!tx) {
5273 printk(KERN_ERR "alloc_netdev: Unable to allocate "
5274 "tx qdiscs.\n");
ab9c73cc 5275 goto free_p;
e8a0464c
DM
5276 }
5277
df334545 5278#ifdef CONFIG_RPS
0a9627f2
TH
5279 rx = kcalloc(queue_count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
5280 if (!rx) {
5281 printk(KERN_ERR "alloc_netdev: Unable to allocate "
5282 "rx queues.\n");
5283 goto free_tx;
5284 }
5285
5286 atomic_set(&rx->count, queue_count);
5287
5288 /*
5289 * Set a pointer to first element in the array which holds the
5290 * reference count.
5291 */
5292 for (i = 0; i < queue_count; i++)
5293 rx[i].first = rx;
df334545 5294#endif
0a9627f2 5295
1ce8e7b5 5296 dev = PTR_ALIGN(p, NETDEV_ALIGN);
1da177e4 5297 dev->padded = (char *)dev - (char *)p;
ab9c73cc
JP
5298
5299 if (dev_addr_init(dev))
0a9627f2 5300 goto free_rx;
ab9c73cc 5301
22bedad3 5302 dev_mc_init(dev);
a748ee24 5303 dev_uc_init(dev);
ccffad25 5304
c346dca1 5305 dev_net_set(dev, &init_net);
1da177e4 5306
e8a0464c
DM
5307 dev->_tx = tx;
5308 dev->num_tx_queues = queue_count;
fd2ea0a7 5309 dev->real_num_tx_queues = queue_count;
e8a0464c 5310
df334545 5311#ifdef CONFIG_RPS
0a9627f2
TH
5312 dev->_rx = rx;
5313 dev->num_rx_queues = queue_count;
df334545 5314#endif
0a9627f2 5315
82cc1a7a 5316 dev->gso_max_size = GSO_MAX_SIZE;
1da177e4 5317
bb949fbd
DM
5318 netdev_init_queues(dev);
5319
15682bc4
PWJ
5320 INIT_LIST_HEAD(&dev->ethtool_ntuple_list.list);
5321 dev->ethtool_ntuple_list.count = 0;
d565b0a1 5322 INIT_LIST_HEAD(&dev->napi_list);
9fdce099 5323 INIT_LIST_HEAD(&dev->unreg_list);
e014debe 5324 INIT_LIST_HEAD(&dev->link_watch_list);
93f154b5 5325 dev->priv_flags = IFF_XMIT_DST_RELEASE;
1da177e4
LT
5326 setup(dev);
5327 strcpy(dev->name, name);
5328 return dev;
ab9c73cc 5329
0a9627f2 5330free_rx:
df334545 5331#ifdef CONFIG_RPS
0a9627f2 5332 kfree(rx);
ab9c73cc 5333free_tx:
df334545 5334#endif
ab9c73cc 5335 kfree(tx);
ab9c73cc
JP
5336free_p:
5337 kfree(p);
5338 return NULL;
1da177e4 5339}
f25f4e44 5340EXPORT_SYMBOL(alloc_netdev_mq);
1da177e4
LT
5341
5342/**
5343 * free_netdev - free network device
5344 * @dev: device
5345 *
4ec93edb
YH
5346 * This function does the last stage of destroying an allocated device
5347 * interface. The reference to the device object is released.
1da177e4
LT
5348 * If this is the last reference then it will be freed.
5349 */
5350void free_netdev(struct net_device *dev)
5351{
d565b0a1
HX
5352 struct napi_struct *p, *n;
5353
f3005d7f
DL
5354 release_net(dev_net(dev));
5355
e8a0464c
DM
5356 kfree(dev->_tx);
5357
f001fde5
JP
5358 /* Flush device addresses */
5359 dev_addr_flush(dev);
5360
15682bc4
PWJ
5361 /* Clear ethtool n-tuple list */
5362 ethtool_ntuple_flush(dev);
5363
d565b0a1
HX
5364 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
5365 netif_napi_del(p);
5366
3041a069 5367 /* Compatibility with error handling in drivers */
1da177e4
LT
5368 if (dev->reg_state == NETREG_UNINITIALIZED) {
5369 kfree((char *)dev - dev->padded);
5370 return;
5371 }
5372
5373 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
5374 dev->reg_state = NETREG_RELEASED;
5375
43cb76d9
GKH
5376 /* will free via device release */
5377 put_device(&dev->dev);
1da177e4 5378}
d1b19dff 5379EXPORT_SYMBOL(free_netdev);
4ec93edb 5380
f0db275a
SH
5381/**
5382 * synchronize_net - Synchronize with packet receive processing
5383 *
5384 * Wait for packets currently being received to be done.
5385 * Does not block later packets from starting.
5386 */
4ec93edb 5387void synchronize_net(void)
1da177e4
LT
5388{
5389 might_sleep();
fbd568a3 5390 synchronize_rcu();
1da177e4 5391}
d1b19dff 5392EXPORT_SYMBOL(synchronize_net);
1da177e4
LT
5393
5394/**
44a0873d 5395 * unregister_netdevice_queue - remove device from the kernel
1da177e4 5396 * @dev: device
44a0873d 5397 * @head: list
6ebfbc06 5398 *
1da177e4 5399 * This function shuts down a device interface and removes it
d59b54b1 5400 * from the kernel tables.
44a0873d 5401 * If head not NULL, device is queued to be unregistered later.
1da177e4
LT
5402 *
5403 * Callers must hold the rtnl semaphore. You may want
5404 * unregister_netdev() instead of this.
5405 */
5406
44a0873d 5407void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
1da177e4 5408{
a6620712
HX
5409 ASSERT_RTNL();
5410
44a0873d 5411 if (head) {
9fdce099 5412 list_move_tail(&dev->unreg_list, head);
44a0873d
ED
5413 } else {
5414 rollback_registered(dev);
5415 /* Finish processing unregister after unlock */
5416 net_set_todo(dev);
5417 }
1da177e4 5418}
44a0873d 5419EXPORT_SYMBOL(unregister_netdevice_queue);
1da177e4 5420
9b5e383c
ED
5421/**
5422 * unregister_netdevice_many - unregister many devices
5423 * @head: list of devices
9b5e383c
ED
5424 */
5425void unregister_netdevice_many(struct list_head *head)
5426{
5427 struct net_device *dev;
5428
5429 if (!list_empty(head)) {
5430 rollback_registered_many(head);
5431 list_for_each_entry(dev, head, unreg_list)
5432 net_set_todo(dev);
5433 }
5434}
63c8099d 5435EXPORT_SYMBOL(unregister_netdevice_many);
9b5e383c 5436
1da177e4
LT
5437/**
5438 * unregister_netdev - remove device from the kernel
5439 * @dev: device
5440 *
5441 * This function shuts down a device interface and removes it
d59b54b1 5442 * from the kernel tables.
1da177e4
LT
5443 *
5444 * This is just a wrapper for unregister_netdevice that takes
5445 * the rtnl semaphore. In general you want to use this and not
5446 * unregister_netdevice.
5447 */
5448void unregister_netdev(struct net_device *dev)
5449{
5450 rtnl_lock();
5451 unregister_netdevice(dev);
5452 rtnl_unlock();
5453}
1da177e4
LT
5454EXPORT_SYMBOL(unregister_netdev);
5455
ce286d32
EB
5456/**
5457 * dev_change_net_namespace - move device to different nethost namespace
5458 * @dev: device
5459 * @net: network namespace
5460 * @pat: If not NULL name pattern to try if the current device name
5461 * is already taken in the destination network namespace.
5462 *
5463 * This function shuts down a device interface and moves it
5464 * to a new network namespace. On success 0 is returned, on
5465 * a failure a netagive errno code is returned.
5466 *
5467 * Callers must hold the rtnl semaphore.
5468 */
5469
5470int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
5471{
ce286d32
EB
5472 int err;
5473
5474 ASSERT_RTNL();
5475
5476 /* Don't allow namespace local devices to be moved. */
5477 err = -EINVAL;
5478 if (dev->features & NETIF_F_NETNS_LOCAL)
5479 goto out;
5480
3891845e
EB
5481#ifdef CONFIG_SYSFS
5482 /* Don't allow real devices to be moved when sysfs
5483 * is enabled.
5484 */
5485 err = -EINVAL;
5486 if (dev->dev.parent)
5487 goto out;
5488#endif
5489
ce286d32
EB
5490 /* Ensure the device has been registrered */
5491 err = -EINVAL;
5492 if (dev->reg_state != NETREG_REGISTERED)
5493 goto out;
5494
5495 /* Get out if there is nothing todo */
5496 err = 0;
878628fb 5497 if (net_eq(dev_net(dev), net))
ce286d32
EB
5498 goto out;
5499
5500 /* Pick the destination device name, and ensure
5501 * we can use it in the destination network namespace.
5502 */
5503 err = -EEXIST;
d9031024 5504 if (__dev_get_by_name(net, dev->name)) {
ce286d32
EB
5505 /* We get here if we can't use the current device name */
5506 if (!pat)
5507 goto out;
d9031024 5508 if (dev_get_valid_name(net, pat, dev->name, 1))
ce286d32
EB
5509 goto out;
5510 }
5511
5512 /*
5513 * And now a mini version of register_netdevice unregister_netdevice.
5514 */
5515
5516 /* If device is running close it first. */
9b772652 5517 dev_close(dev);
ce286d32
EB
5518
5519 /* And unlink it from device chain */
5520 err = -ENODEV;
5521 unlist_netdevice(dev);
5522
5523 synchronize_net();
5524
5525 /* Shutdown queueing discipline. */
5526 dev_shutdown(dev);
5527
5528 /* Notify protocols, that we are about to destroy
5529 this device. They should clean all the things.
5530 */
5531 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
a5ee1551 5532 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
ce286d32
EB
5533
5534 /*
5535 * Flush the unicast and multicast chains
5536 */
a748ee24 5537 dev_uc_flush(dev);
22bedad3 5538 dev_mc_flush(dev);
ce286d32 5539
3891845e
EB
5540 netdev_unregister_kobject(dev);
5541
ce286d32 5542 /* Actually switch the network namespace */
c346dca1 5543 dev_net_set(dev, net);
ce286d32 5544
ce286d32
EB
5545 /* If there is an ifindex conflict assign a new one */
5546 if (__dev_get_by_index(net, dev->ifindex)) {
5547 int iflink = (dev->iflink == dev->ifindex);
5548 dev->ifindex = dev_new_index(net);
5549 if (iflink)
5550 dev->iflink = dev->ifindex;
5551 }
5552
8b41d188 5553 /* Fixup kobjects */
aaf8cdc3 5554 err = netdev_register_kobject(dev);
8b41d188 5555 WARN_ON(err);
ce286d32
EB
5556
5557 /* Add the device back in the hashes */
5558 list_netdevice(dev);
5559
5560 /* Notify protocols, that a new device appeared. */
5561 call_netdevice_notifiers(NETDEV_REGISTER, dev);
5562
d90a909e
EB
5563 /*
5564 * Prevent userspace races by waiting until the network
5565 * device is fully setup before sending notifications.
5566 */
5567 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
5568
ce286d32
EB
5569 synchronize_net();
5570 err = 0;
5571out:
5572 return err;
5573}
463d0183 5574EXPORT_SYMBOL_GPL(dev_change_net_namespace);
ce286d32 5575
1da177e4
LT
5576static int dev_cpu_callback(struct notifier_block *nfb,
5577 unsigned long action,
5578 void *ocpu)
5579{
5580 struct sk_buff **list_skb;
37437bb2 5581 struct Qdisc **list_net;
1da177e4
LT
5582 struct sk_buff *skb;
5583 unsigned int cpu, oldcpu = (unsigned long)ocpu;
5584 struct softnet_data *sd, *oldsd;
5585
8bb78442 5586 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
1da177e4
LT
5587 return NOTIFY_OK;
5588
5589 local_irq_disable();
5590 cpu = smp_processor_id();
5591 sd = &per_cpu(softnet_data, cpu);
5592 oldsd = &per_cpu(softnet_data, oldcpu);
5593
5594 /* Find end of our completion_queue. */
5595 list_skb = &sd->completion_queue;
5596 while (*list_skb)
5597 list_skb = &(*list_skb)->next;
5598 /* Append completion queue from offline CPU. */
5599 *list_skb = oldsd->completion_queue;
5600 oldsd->completion_queue = NULL;
5601
5602 /* Find end of our output_queue. */
5603 list_net = &sd->output_queue;
5604 while (*list_net)
5605 list_net = &(*list_net)->next_sched;
5606 /* Append output queue from offline CPU. */
5607 *list_net = oldsd->output_queue;
5608 oldsd->output_queue = NULL;
5609
5610 raise_softirq_irqoff(NET_TX_SOFTIRQ);
5611 local_irq_enable();
5612
5613 /* Process offline CPU's input_pkt_queue */
fec5e652 5614 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
1da177e4 5615 netif_rx(skb);
fec5e652
TH
5616 incr_input_queue_head(oldsd);
5617 }
1da177e4
LT
5618
5619 return NOTIFY_OK;
5620}
1da177e4
LT
5621
5622
7f353bf2 5623/**
b63365a2
HX
5624 * netdev_increment_features - increment feature set by one
5625 * @all: current feature set
5626 * @one: new feature set
5627 * @mask: mask feature set
7f353bf2
HX
5628 *
5629 * Computes a new feature set after adding a device with feature set
b63365a2
HX
5630 * @one to the master device with current feature set @all. Will not
5631 * enable anything that is off in @mask. Returns the new feature set.
7f353bf2 5632 */
b63365a2
HX
5633unsigned long netdev_increment_features(unsigned long all, unsigned long one,
5634 unsigned long mask)
5635{
5636 /* If device needs checksumming, downgrade to it. */
d1b19dff 5637 if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
b63365a2
HX
5638 all ^= NETIF_F_NO_CSUM | (one & NETIF_F_ALL_CSUM);
5639 else if (mask & NETIF_F_ALL_CSUM) {
5640 /* If one device supports v4/v6 checksumming, set for all. */
5641 if (one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM) &&
5642 !(all & NETIF_F_GEN_CSUM)) {
5643 all &= ~NETIF_F_ALL_CSUM;
5644 all |= one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
5645 }
e2a6b852 5646
b63365a2
HX
5647 /* If one device supports hw checksumming, set for all. */
5648 if (one & NETIF_F_GEN_CSUM && !(all & NETIF_F_GEN_CSUM)) {
5649 all &= ~NETIF_F_ALL_CSUM;
5650 all |= NETIF_F_HW_CSUM;
5651 }
5652 }
7f353bf2 5653
b63365a2 5654 one |= NETIF_F_ALL_CSUM;
7f353bf2 5655
b63365a2 5656 one |= all & NETIF_F_ONE_FOR_ALL;
d9f5950f 5657 all &= one | NETIF_F_LLTX | NETIF_F_GSO | NETIF_F_UFO;
b63365a2 5658 all |= one & mask & NETIF_F_ONE_FOR_ALL;
7f353bf2
HX
5659
5660 return all;
5661}
b63365a2 5662EXPORT_SYMBOL(netdev_increment_features);
7f353bf2 5663
30d97d35
PE
5664static struct hlist_head *netdev_create_hash(void)
5665{
5666 int i;
5667 struct hlist_head *hash;
5668
5669 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
5670 if (hash != NULL)
5671 for (i = 0; i < NETDEV_HASHENTRIES; i++)
5672 INIT_HLIST_HEAD(&hash[i]);
5673
5674 return hash;
5675}
5676
881d966b 5677/* Initialize per network namespace state */
4665079c 5678static int __net_init netdev_init(struct net *net)
881d966b 5679{
881d966b 5680 INIT_LIST_HEAD(&net->dev_base_head);
881d966b 5681
30d97d35
PE
5682 net->dev_name_head = netdev_create_hash();
5683 if (net->dev_name_head == NULL)
5684 goto err_name;
881d966b 5685
30d97d35
PE
5686 net->dev_index_head = netdev_create_hash();
5687 if (net->dev_index_head == NULL)
5688 goto err_idx;
881d966b
EB
5689
5690 return 0;
30d97d35
PE
5691
5692err_idx:
5693 kfree(net->dev_name_head);
5694err_name:
5695 return -ENOMEM;
881d966b
EB
5696}
5697
f0db275a
SH
5698/**
5699 * netdev_drivername - network driver for the device
5700 * @dev: network device
5701 * @buffer: buffer for resulting name
5702 * @len: size of buffer
5703 *
5704 * Determine network driver for device.
5705 */
cf04a4c7 5706char *netdev_drivername(const struct net_device *dev, char *buffer, int len)
6579e57b 5707{
cf04a4c7
SH
5708 const struct device_driver *driver;
5709 const struct device *parent;
6579e57b
AV
5710
5711 if (len <= 0 || !buffer)
5712 return buffer;
5713 buffer[0] = 0;
5714
5715 parent = dev->dev.parent;
5716
5717 if (!parent)
5718 return buffer;
5719
5720 driver = parent->driver;
5721 if (driver && driver->name)
5722 strlcpy(buffer, driver->name, len);
5723 return buffer;
5724}
5725
4665079c 5726static void __net_exit netdev_exit(struct net *net)
881d966b
EB
5727{
5728 kfree(net->dev_name_head);
5729 kfree(net->dev_index_head);
5730}
5731
022cbae6 5732static struct pernet_operations __net_initdata netdev_net_ops = {
881d966b
EB
5733 .init = netdev_init,
5734 .exit = netdev_exit,
5735};
5736
4665079c 5737static void __net_exit default_device_exit(struct net *net)
ce286d32 5738{
e008b5fc 5739 struct net_device *dev, *aux;
ce286d32 5740 /*
e008b5fc 5741 * Push all migratable network devices back to the
ce286d32
EB
5742 * initial network namespace
5743 */
5744 rtnl_lock();
e008b5fc 5745 for_each_netdev_safe(net, dev, aux) {
ce286d32 5746 int err;
aca51397 5747 char fb_name[IFNAMSIZ];
ce286d32
EB
5748
5749 /* Ignore unmoveable devices (i.e. loopback) */
5750 if (dev->features & NETIF_F_NETNS_LOCAL)
5751 continue;
5752
e008b5fc
EB
5753 /* Leave virtual devices for the generic cleanup */
5754 if (dev->rtnl_link_ops)
5755 continue;
d0c082ce 5756
ce286d32 5757 /* Push remaing network devices to init_net */
aca51397
PE
5758 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
5759 err = dev_change_net_namespace(dev, &init_net, fb_name);
ce286d32 5760 if (err) {
aca51397 5761 printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
ce286d32 5762 __func__, dev->name, err);
aca51397 5763 BUG();
ce286d32
EB
5764 }
5765 }
5766 rtnl_unlock();
5767}
5768
04dc7f6b
EB
5769static void __net_exit default_device_exit_batch(struct list_head *net_list)
5770{
5771 /* At exit all network devices most be removed from a network
5772 * namespace. Do this in the reverse order of registeration.
5773 * Do this across as many network namespaces as possible to
5774 * improve batching efficiency.
5775 */
5776 struct net_device *dev;
5777 struct net *net;
5778 LIST_HEAD(dev_kill_list);
5779
5780 rtnl_lock();
5781 list_for_each_entry(net, net_list, exit_list) {
5782 for_each_netdev_reverse(net, dev) {
5783 if (dev->rtnl_link_ops)
5784 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
5785 else
5786 unregister_netdevice_queue(dev, &dev_kill_list);
5787 }
5788 }
5789 unregister_netdevice_many(&dev_kill_list);
5790 rtnl_unlock();
5791}
5792
022cbae6 5793static struct pernet_operations __net_initdata default_device_ops = {
ce286d32 5794 .exit = default_device_exit,
04dc7f6b 5795 .exit_batch = default_device_exit_batch,
ce286d32
EB
5796};
5797
1da177e4
LT
5798/*
5799 * Initialize the DEV module. At boot time this walks the device list and
5800 * unhooks any devices that fail to initialise (normally hardware not
5801 * present) and leaves us with a valid list of present and active devices.
5802 *
5803 */
5804
5805/*
5806 * This is called single threaded during boot, so no need
5807 * to take the rtnl semaphore.
5808 */
5809static int __init net_dev_init(void)
5810{
5811 int i, rc = -ENOMEM;
5812
5813 BUG_ON(!dev_boot_phase);
5814
1da177e4
LT
5815 if (dev_proc_init())
5816 goto out;
5817
8b41d188 5818 if (netdev_kobject_init())
1da177e4
LT
5819 goto out;
5820
5821 INIT_LIST_HEAD(&ptype_all);
82d8a867 5822 for (i = 0; i < PTYPE_HASH_SIZE; i++)
1da177e4
LT
5823 INIT_LIST_HEAD(&ptype_base[i]);
5824
881d966b
EB
5825 if (register_pernet_subsys(&netdev_net_ops))
5826 goto out;
1da177e4
LT
5827
5828 /*
5829 * Initialise the packet receive queues.
5830 */
5831
6f912042 5832 for_each_possible_cpu(i) {
1da177e4
LT
5833 struct softnet_data *queue;
5834
5835 queue = &per_cpu(softnet_data, i);
5836 skb_queue_head_init(&queue->input_pkt_queue);
1da177e4
LT
5837 queue->completion_queue = NULL;
5838 INIT_LIST_HEAD(&queue->poll_list);
bea3348e 5839
df334545 5840#ifdef CONFIG_RPS
0a9627f2
TH
5841 queue->csd.func = trigger_softirq;
5842 queue->csd.info = queue;
5843 queue->csd.flags = 0;
1e94d72f 5844#endif
0a9627f2 5845
bea3348e
SH
5846 queue->backlog.poll = process_backlog;
5847 queue->backlog.weight = weight_p;
d565b0a1 5848 queue->backlog.gro_list = NULL;
4ae5544f 5849 queue->backlog.gro_count = 0;
1da177e4
LT
5850 }
5851
1da177e4
LT
5852 dev_boot_phase = 0;
5853
505d4f73
EB
5854 /* The loopback device is special if any other network devices
5855 * is present in a network namespace the loopback device must
5856 * be present. Since we now dynamically allocate and free the
5857 * loopback device ensure this invariant is maintained by
5858 * keeping the loopback device as the first device on the
5859 * list of network devices. Ensuring the loopback devices
5860 * is the first device that appears and the last network device
5861 * that disappears.
5862 */
5863 if (register_pernet_device(&loopback_net_ops))
5864 goto out;
5865
5866 if (register_pernet_device(&default_device_ops))
5867 goto out;
5868
962cf36c
CM
5869 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
5870 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
1da177e4
LT
5871
5872 hotcpu_notifier(dev_cpu_callback, 0);
5873 dst_init();
5874 dev_mcast_init();
5875 rc = 0;
5876out:
5877 return rc;
5878}
5879
5880subsys_initcall(net_dev_init);
5881
e88721f8
KK
5882static int __init initialize_hashrnd(void)
5883{
0a9627f2 5884 get_random_bytes(&hashrnd, sizeof(hashrnd));
e88721f8
KK
5885 return 0;
5886}
5887
5888late_initcall_sync(initialize_hashrnd);
5889