]> bbs.cooldavid.org Git - net-next-2.6.git/blame - net/core/dev.c
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[net-next-2.6.git] / net / core / dev.c
CommitLineData
1da177e4
LT
1/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
02c30a84 10 * Authors: Ross Biro
1da177e4
LT
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
74
75#include <asm/uaccess.h>
76#include <asm/system.h>
77#include <linux/bitops.h>
4fc268d2 78#include <linux/capability.h>
1da177e4
LT
79#include <linux/cpu.h>
80#include <linux/types.h>
81#include <linux/kernel.h>
82#include <linux/sched.h>
4a3e2f71 83#include <linux/mutex.h>
1da177e4
LT
84#include <linux/string.h>
85#include <linux/mm.h>
86#include <linux/socket.h>
87#include <linux/sockios.h>
88#include <linux/errno.h>
89#include <linux/interrupt.h>
90#include <linux/if_ether.h>
91#include <linux/netdevice.h>
92#include <linux/etherdevice.h>
0187bdfb 93#include <linux/ethtool.h>
1da177e4
LT
94#include <linux/notifier.h>
95#include <linux/skbuff.h>
457c4cbc 96#include <net/net_namespace.h>
1da177e4
LT
97#include <net/sock.h>
98#include <linux/rtnetlink.h>
99#include <linux/proc_fs.h>
100#include <linux/seq_file.h>
101#include <linux/stat.h>
102#include <linux/if_bridge.h>
b863ceb7 103#include <linux/if_macvlan.h>
1da177e4
LT
104#include <net/dst.h>
105#include <net/pkt_sched.h>
106#include <net/checksum.h>
107#include <linux/highmem.h>
108#include <linux/init.h>
109#include <linux/kmod.h>
110#include <linux/module.h>
111#include <linux/kallsyms.h>
112#include <linux/netpoll.h>
113#include <linux/rcupdate.h>
114#include <linux/delay.h>
295f4a1f 115#include <net/wext.h>
1da177e4 116#include <net/iw_handler.h>
1da177e4 117#include <asm/current.h>
5bdb9886 118#include <linux/audit.h>
db217334 119#include <linux/dmaengine.h>
f6a78bfc 120#include <linux/err.h>
c7fa9d18 121#include <linux/ctype.h>
723e98b7 122#include <linux/if_arp.h>
6de329e2 123#include <linux/if_vlan.h>
8f0f2223
DM
124#include <linux/ip.h>
125#include <linux/ipv6.h>
126#include <linux/in.h>
b6b2fed1
DM
127#include <linux/jhash.h>
128#include <linux/random.h>
1da177e4 129
342709ef
PE
130#include "net-sysfs.h"
131
1da177e4
LT
132/*
133 * The list of packet types we will receive (as opposed to discard)
134 * and the routines to invoke.
135 *
136 * Why 16. Because with 16 the only overlap we get on a hash of the
137 * low nibble of the protocol value is RARP/SNAP/X.25.
138 *
139 * NOTE: That is no longer true with the addition of VLAN tags. Not
140 * sure which should go first, but I bet it won't make much
141 * difference if we are running VLANs. The good news is that
142 * this protocol won't be in the list unless compiled in, so
3041a069 143 * the average user (w/out VLANs) will not be adversely affected.
1da177e4
LT
144 * --BLG
145 *
146 * 0800 IP
147 * 8100 802.1Q VLAN
148 * 0001 802.3
149 * 0002 AX.25
150 * 0004 802.2
151 * 8035 RARP
152 * 0005 SNAP
153 * 0805 X.25
154 * 0806 ARP
155 * 8137 IPX
156 * 0009 Localtalk
157 * 86DD IPv6
158 */
159
82d8a867
PE
160#define PTYPE_HASH_SIZE (16)
161#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
162
1da177e4 163static DEFINE_SPINLOCK(ptype_lock);
82d8a867 164static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
6b2bedc3 165static struct list_head ptype_all __read_mostly; /* Taps */
1da177e4 166
db217334 167#ifdef CONFIG_NET_DMA
d379b01e
DW
168struct net_dma {
169 struct dma_client client;
170 spinlock_t lock;
171 cpumask_t channel_mask;
0c0b0aca 172 struct dma_chan **channels;
d379b01e
DW
173};
174
175static enum dma_state_client
176netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
177 enum dma_state state);
178
179static struct net_dma net_dma = {
180 .client = {
181 .event_callback = netdev_dma_event,
182 },
183};
db217334
CL
184#endif
185
1da177e4 186/*
7562f876 187 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
1da177e4
LT
188 * semaphore.
189 *
190 * Pure readers hold dev_base_lock for reading.
191 *
192 * Writers must hold the rtnl semaphore while they loop through the
7562f876 193 * dev_base_head list, and hold dev_base_lock for writing when they do the
1da177e4
LT
194 * actual updates. This allows pure readers to access the list even
195 * while a writer is preparing to update it.
196 *
197 * To put it another way, dev_base_lock is held for writing only to
198 * protect against pure readers; the rtnl semaphore provides the
199 * protection against other writers.
200 *
201 * See, for example usages, register_netdevice() and
202 * unregister_netdevice(), which must be called with the rtnl
203 * semaphore held.
204 */
1da177e4
LT
205DEFINE_RWLOCK(dev_base_lock);
206
1da177e4
LT
207EXPORT_SYMBOL(dev_base_lock);
208
209#define NETDEV_HASHBITS 8
881d966b 210#define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
1da177e4 211
881d966b 212static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
1da177e4
LT
213{
214 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
881d966b 215 return &net->dev_name_head[hash & ((1 << NETDEV_HASHBITS) - 1)];
1da177e4
LT
216}
217
881d966b 218static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
1da177e4 219{
881d966b 220 return &net->dev_index_head[ifindex & ((1 << NETDEV_HASHBITS) - 1)];
1da177e4
LT
221}
222
ce286d32
EB
223/* Device list insertion */
224static int list_netdevice(struct net_device *dev)
225{
c346dca1 226 struct net *net = dev_net(dev);
ce286d32
EB
227
228 ASSERT_RTNL();
229
230 write_lock_bh(&dev_base_lock);
231 list_add_tail(&dev->dev_list, &net->dev_base_head);
232 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
233 hlist_add_head(&dev->index_hlist, dev_index_hash(net, dev->ifindex));
234 write_unlock_bh(&dev_base_lock);
235 return 0;
236}
237
238/* Device list removal */
239static void unlist_netdevice(struct net_device *dev)
240{
241 ASSERT_RTNL();
242
243 /* Unlink dev from the device chain */
244 write_lock_bh(&dev_base_lock);
245 list_del(&dev->dev_list);
246 hlist_del(&dev->name_hlist);
247 hlist_del(&dev->index_hlist);
248 write_unlock_bh(&dev_base_lock);
249}
250
1da177e4
LT
251/*
252 * Our notifier list
253 */
254
f07d5b94 255static RAW_NOTIFIER_HEAD(netdev_chain);
1da177e4
LT
256
257/*
258 * Device drivers call our routines to queue packets here. We empty the
259 * queue in the local softnet handler.
260 */
bea3348e
SH
261
262DEFINE_PER_CPU(struct softnet_data, softnet_data);
1da177e4 263
cf508b12 264#ifdef CONFIG_LOCKDEP
723e98b7 265/*
c773e847 266 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
723e98b7
JP
267 * according to dev->type
268 */
269static const unsigned short netdev_lock_type[] =
270 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
271 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
272 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
273 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
274 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
275 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
276 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
277 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
278 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
279 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
280 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
281 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
282 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
283 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_VOID,
284 ARPHRD_NONE};
285
286static const char *netdev_lock_name[] =
287 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
288 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
289 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
290 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
291 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
292 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
293 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
294 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
295 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
296 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
297 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
298 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
299 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
300 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_VOID",
301 "_xmit_NONE"};
302
303static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
cf508b12 304static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
723e98b7
JP
305
306static inline unsigned short netdev_lock_pos(unsigned short dev_type)
307{
308 int i;
309
310 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
311 if (netdev_lock_type[i] == dev_type)
312 return i;
313 /* the last key is used by default */
314 return ARRAY_SIZE(netdev_lock_type) - 1;
315}
316
cf508b12
DM
317static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
318 unsigned short dev_type)
723e98b7
JP
319{
320 int i;
321
322 i = netdev_lock_pos(dev_type);
323 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
324 netdev_lock_name[i]);
325}
cf508b12
DM
326
327static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
328{
329 int i;
330
331 i = netdev_lock_pos(dev->type);
332 lockdep_set_class_and_name(&dev->addr_list_lock,
333 &netdev_addr_lock_key[i],
334 netdev_lock_name[i]);
335}
723e98b7 336#else
cf508b12
DM
337static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
338 unsigned short dev_type)
339{
340}
341static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
723e98b7
JP
342{
343}
344#endif
1da177e4
LT
345
346/*******************************************************************************
347
348 Protocol management and registration routines
349
350*******************************************************************************/
351
1da177e4
LT
352/*
353 * Add a protocol ID to the list. Now that the input handler is
354 * smarter we can dispense with all the messy stuff that used to be
355 * here.
356 *
357 * BEWARE!!! Protocol handlers, mangling input packets,
358 * MUST BE last in hash buckets and checking protocol handlers
359 * MUST start from promiscuous ptype_all chain in net_bh.
360 * It is true now, do not change it.
361 * Explanation follows: if protocol handler, mangling packet, will
362 * be the first on list, it is not able to sense, that packet
363 * is cloned and should be copied-on-write, so that it will
364 * change it and subsequent readers will get broken packet.
365 * --ANK (980803)
366 */
367
368/**
369 * dev_add_pack - add packet handler
370 * @pt: packet type declaration
371 *
372 * Add a protocol handler to the networking stack. The passed &packet_type
373 * is linked into kernel lists and may not be freed until it has been
374 * removed from the kernel lists.
375 *
4ec93edb 376 * This call does not sleep therefore it can not
1da177e4
LT
377 * guarantee all CPU's that are in middle of receiving packets
378 * will see the new packet type (until the next received packet).
379 */
380
381void dev_add_pack(struct packet_type *pt)
382{
383 int hash;
384
385 spin_lock_bh(&ptype_lock);
9be9a6b9 386 if (pt->type == htons(ETH_P_ALL))
1da177e4 387 list_add_rcu(&pt->list, &ptype_all);
9be9a6b9 388 else {
82d8a867 389 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
1da177e4
LT
390 list_add_rcu(&pt->list, &ptype_base[hash]);
391 }
392 spin_unlock_bh(&ptype_lock);
393}
394
1da177e4
LT
395/**
396 * __dev_remove_pack - remove packet handler
397 * @pt: packet type declaration
398 *
399 * Remove a protocol handler that was previously added to the kernel
400 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
401 * from the kernel lists and can be freed or reused once this function
4ec93edb 402 * returns.
1da177e4
LT
403 *
404 * The packet type might still be in use by receivers
405 * and must not be freed until after all the CPU's have gone
406 * through a quiescent state.
407 */
408void __dev_remove_pack(struct packet_type *pt)
409{
410 struct list_head *head;
411 struct packet_type *pt1;
412
413 spin_lock_bh(&ptype_lock);
414
9be9a6b9 415 if (pt->type == htons(ETH_P_ALL))
1da177e4 416 head = &ptype_all;
9be9a6b9 417 else
82d8a867 418 head = &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
1da177e4
LT
419
420 list_for_each_entry(pt1, head, list) {
421 if (pt == pt1) {
422 list_del_rcu(&pt->list);
423 goto out;
424 }
425 }
426
427 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
428out:
429 spin_unlock_bh(&ptype_lock);
430}
431/**
432 * dev_remove_pack - remove packet handler
433 * @pt: packet type declaration
434 *
435 * Remove a protocol handler that was previously added to the kernel
436 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
437 * from the kernel lists and can be freed or reused once this function
438 * returns.
439 *
440 * This call sleeps to guarantee that no CPU is looking at the packet
441 * type after return.
442 */
443void dev_remove_pack(struct packet_type *pt)
444{
445 __dev_remove_pack(pt);
4ec93edb 446
1da177e4
LT
447 synchronize_net();
448}
449
450/******************************************************************************
451
452 Device Boot-time Settings Routines
453
454*******************************************************************************/
455
456/* Boot time configuration table */
457static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
458
459/**
460 * netdev_boot_setup_add - add new setup entry
461 * @name: name of the device
462 * @map: configured settings for the device
463 *
464 * Adds new setup entry to the dev_boot_setup list. The function
465 * returns 0 on error and 1 on success. This is a generic routine to
466 * all netdevices.
467 */
468static int netdev_boot_setup_add(char *name, struct ifmap *map)
469{
470 struct netdev_boot_setup *s;
471 int i;
472
473 s = dev_boot_setup;
474 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
475 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
476 memset(s[i].name, 0, sizeof(s[i].name));
93b3cff9 477 strlcpy(s[i].name, name, IFNAMSIZ);
1da177e4
LT
478 memcpy(&s[i].map, map, sizeof(s[i].map));
479 break;
480 }
481 }
482
483 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
484}
485
486/**
487 * netdev_boot_setup_check - check boot time settings
488 * @dev: the netdevice
489 *
490 * Check boot time settings for the device.
491 * The found settings are set for the device to be used
492 * later in the device probing.
493 * Returns 0 if no settings found, 1 if they are.
494 */
495int netdev_boot_setup_check(struct net_device *dev)
496{
497 struct netdev_boot_setup *s = dev_boot_setup;
498 int i;
499
500 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
501 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
93b3cff9 502 !strcmp(dev->name, s[i].name)) {
1da177e4
LT
503 dev->irq = s[i].map.irq;
504 dev->base_addr = s[i].map.base_addr;
505 dev->mem_start = s[i].map.mem_start;
506 dev->mem_end = s[i].map.mem_end;
507 return 1;
508 }
509 }
510 return 0;
511}
512
513
514/**
515 * netdev_boot_base - get address from boot time settings
516 * @prefix: prefix for network device
517 * @unit: id for network device
518 *
519 * Check boot time settings for the base address of device.
520 * The found settings are set for the device to be used
521 * later in the device probing.
522 * Returns 0 if no settings found.
523 */
524unsigned long netdev_boot_base(const char *prefix, int unit)
525{
526 const struct netdev_boot_setup *s = dev_boot_setup;
527 char name[IFNAMSIZ];
528 int i;
529
530 sprintf(name, "%s%d", prefix, unit);
531
532 /*
533 * If device already registered then return base of 1
534 * to indicate not to probe for this interface
535 */
881d966b 536 if (__dev_get_by_name(&init_net, name))
1da177e4
LT
537 return 1;
538
539 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
540 if (!strcmp(name, s[i].name))
541 return s[i].map.base_addr;
542 return 0;
543}
544
545/*
546 * Saves at boot time configured settings for any netdevice.
547 */
548int __init netdev_boot_setup(char *str)
549{
550 int ints[5];
551 struct ifmap map;
552
553 str = get_options(str, ARRAY_SIZE(ints), ints);
554 if (!str || !*str)
555 return 0;
556
557 /* Save settings */
558 memset(&map, 0, sizeof(map));
559 if (ints[0] > 0)
560 map.irq = ints[1];
561 if (ints[0] > 1)
562 map.base_addr = ints[2];
563 if (ints[0] > 2)
564 map.mem_start = ints[3];
565 if (ints[0] > 3)
566 map.mem_end = ints[4];
567
568 /* Add new entry to the list */
569 return netdev_boot_setup_add(str, &map);
570}
571
572__setup("netdev=", netdev_boot_setup);
573
574/*******************************************************************************
575
576 Device Interface Subroutines
577
578*******************************************************************************/
579
580/**
581 * __dev_get_by_name - find a device by its name
c4ea43c5 582 * @net: the applicable net namespace
1da177e4
LT
583 * @name: name to find
584 *
585 * Find an interface by name. Must be called under RTNL semaphore
586 * or @dev_base_lock. If the name is found a pointer to the device
587 * is returned. If the name is not found then %NULL is returned. The
588 * reference counters are not incremented so the caller must be
589 * careful with locks.
590 */
591
881d966b 592struct net_device *__dev_get_by_name(struct net *net, const char *name)
1da177e4
LT
593{
594 struct hlist_node *p;
595
881d966b 596 hlist_for_each(p, dev_name_hash(net, name)) {
1da177e4
LT
597 struct net_device *dev
598 = hlist_entry(p, struct net_device, name_hlist);
599 if (!strncmp(dev->name, name, IFNAMSIZ))
600 return dev;
601 }
602 return NULL;
603}
604
605/**
606 * dev_get_by_name - find a device by its name
c4ea43c5 607 * @net: the applicable net namespace
1da177e4
LT
608 * @name: name to find
609 *
610 * Find an interface by name. This can be called from any
611 * context and does its own locking. The returned handle has
612 * the usage count incremented and the caller must use dev_put() to
613 * release it when it is no longer needed. %NULL is returned if no
614 * matching device is found.
615 */
616
881d966b 617struct net_device *dev_get_by_name(struct net *net, const char *name)
1da177e4
LT
618{
619 struct net_device *dev;
620
621 read_lock(&dev_base_lock);
881d966b 622 dev = __dev_get_by_name(net, name);
1da177e4
LT
623 if (dev)
624 dev_hold(dev);
625 read_unlock(&dev_base_lock);
626 return dev;
627}
628
629/**
630 * __dev_get_by_index - find a device by its ifindex
c4ea43c5 631 * @net: the applicable net namespace
1da177e4
LT
632 * @ifindex: index of device
633 *
634 * Search for an interface by index. Returns %NULL if the device
635 * is not found or a pointer to the device. The device has not
636 * had its reference counter increased so the caller must be careful
637 * about locking. The caller must hold either the RTNL semaphore
638 * or @dev_base_lock.
639 */
640
881d966b 641struct net_device *__dev_get_by_index(struct net *net, int ifindex)
1da177e4
LT
642{
643 struct hlist_node *p;
644
881d966b 645 hlist_for_each(p, dev_index_hash(net, ifindex)) {
1da177e4
LT
646 struct net_device *dev
647 = hlist_entry(p, struct net_device, index_hlist);
648 if (dev->ifindex == ifindex)
649 return dev;
650 }
651 return NULL;
652}
653
654
655/**
656 * dev_get_by_index - find a device by its ifindex
c4ea43c5 657 * @net: the applicable net namespace
1da177e4
LT
658 * @ifindex: index of device
659 *
660 * Search for an interface by index. Returns NULL if the device
661 * is not found or a pointer to the device. The device returned has
662 * had a reference added and the pointer is safe until the user calls
663 * dev_put to indicate they have finished with it.
664 */
665
881d966b 666struct net_device *dev_get_by_index(struct net *net, int ifindex)
1da177e4
LT
667{
668 struct net_device *dev;
669
670 read_lock(&dev_base_lock);
881d966b 671 dev = __dev_get_by_index(net, ifindex);
1da177e4
LT
672 if (dev)
673 dev_hold(dev);
674 read_unlock(&dev_base_lock);
675 return dev;
676}
677
678/**
679 * dev_getbyhwaddr - find a device by its hardware address
c4ea43c5 680 * @net: the applicable net namespace
1da177e4
LT
681 * @type: media type of device
682 * @ha: hardware address
683 *
684 * Search for an interface by MAC address. Returns NULL if the device
685 * is not found or a pointer to the device. The caller must hold the
686 * rtnl semaphore. The returned device has not had its ref count increased
687 * and the caller must therefore be careful about locking
688 *
689 * BUGS:
690 * If the API was consistent this would be __dev_get_by_hwaddr
691 */
692
881d966b 693struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
1da177e4
LT
694{
695 struct net_device *dev;
696
697 ASSERT_RTNL();
698
81103a52 699 for_each_netdev(net, dev)
1da177e4
LT
700 if (dev->type == type &&
701 !memcmp(dev->dev_addr, ha, dev->addr_len))
7562f876
PE
702 return dev;
703
704 return NULL;
1da177e4
LT
705}
706
cf309e3f
JF
707EXPORT_SYMBOL(dev_getbyhwaddr);
708
881d966b 709struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
1da177e4
LT
710{
711 struct net_device *dev;
712
4e9cac2b 713 ASSERT_RTNL();
881d966b 714 for_each_netdev(net, dev)
4e9cac2b 715 if (dev->type == type)
7562f876
PE
716 return dev;
717
718 return NULL;
4e9cac2b
PM
719}
720
721EXPORT_SYMBOL(__dev_getfirstbyhwtype);
722
881d966b 723struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
4e9cac2b
PM
724{
725 struct net_device *dev;
726
727 rtnl_lock();
881d966b 728 dev = __dev_getfirstbyhwtype(net, type);
4e9cac2b
PM
729 if (dev)
730 dev_hold(dev);
1da177e4
LT
731 rtnl_unlock();
732 return dev;
733}
734
735EXPORT_SYMBOL(dev_getfirstbyhwtype);
736
737/**
738 * dev_get_by_flags - find any device with given flags
c4ea43c5 739 * @net: the applicable net namespace
1da177e4
LT
740 * @if_flags: IFF_* values
741 * @mask: bitmask of bits in if_flags to check
742 *
743 * Search for any interface with the given flags. Returns NULL if a device
4ec93edb 744 * is not found or a pointer to the device. The device returned has
1da177e4
LT
745 * had a reference added and the pointer is safe until the user calls
746 * dev_put to indicate they have finished with it.
747 */
748
881d966b 749struct net_device * dev_get_by_flags(struct net *net, unsigned short if_flags, unsigned short mask)
1da177e4 750{
7562f876 751 struct net_device *dev, *ret;
1da177e4 752
7562f876 753 ret = NULL;
1da177e4 754 read_lock(&dev_base_lock);
881d966b 755 for_each_netdev(net, dev) {
1da177e4
LT
756 if (((dev->flags ^ if_flags) & mask) == 0) {
757 dev_hold(dev);
7562f876 758 ret = dev;
1da177e4
LT
759 break;
760 }
761 }
762 read_unlock(&dev_base_lock);
7562f876 763 return ret;
1da177e4
LT
764}
765
766/**
767 * dev_valid_name - check if name is okay for network device
768 * @name: name string
769 *
770 * Network device names need to be valid file names to
c7fa9d18
DM
771 * to allow sysfs to work. We also disallow any kind of
772 * whitespace.
1da177e4 773 */
c2373ee9 774int dev_valid_name(const char *name)
1da177e4 775{
c7fa9d18
DM
776 if (*name == '\0')
777 return 0;
b6fe17d6
SH
778 if (strlen(name) >= IFNAMSIZ)
779 return 0;
c7fa9d18
DM
780 if (!strcmp(name, ".") || !strcmp(name, ".."))
781 return 0;
782
783 while (*name) {
784 if (*name == '/' || isspace(*name))
785 return 0;
786 name++;
787 }
788 return 1;
1da177e4
LT
789}
790
791/**
b267b179
EB
792 * __dev_alloc_name - allocate a name for a device
793 * @net: network namespace to allocate the device name in
1da177e4 794 * @name: name format string
b267b179 795 * @buf: scratch buffer and result name string
1da177e4
LT
796 *
797 * Passed a format string - eg "lt%d" it will try and find a suitable
3041a069
SH
798 * id. It scans list of devices to build up a free map, then chooses
799 * the first empty slot. The caller must hold the dev_base or rtnl lock
800 * while allocating the name and adding the device in order to avoid
801 * duplicates.
802 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
803 * Returns the number of the unit assigned or a negative errno code.
1da177e4
LT
804 */
805
b267b179 806static int __dev_alloc_name(struct net *net, const char *name, char *buf)
1da177e4
LT
807{
808 int i = 0;
1da177e4
LT
809 const char *p;
810 const int max_netdevices = 8*PAGE_SIZE;
cfcabdcc 811 unsigned long *inuse;
1da177e4
LT
812 struct net_device *d;
813
814 p = strnchr(name, IFNAMSIZ-1, '%');
815 if (p) {
816 /*
817 * Verify the string as this thing may have come from
818 * the user. There must be either one "%d" and no other "%"
819 * characters.
820 */
821 if (p[1] != 'd' || strchr(p + 2, '%'))
822 return -EINVAL;
823
824 /* Use one page as a bit array of possible slots */
cfcabdcc 825 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
1da177e4
LT
826 if (!inuse)
827 return -ENOMEM;
828
881d966b 829 for_each_netdev(net, d) {
1da177e4
LT
830 if (!sscanf(d->name, name, &i))
831 continue;
832 if (i < 0 || i >= max_netdevices)
833 continue;
834
835 /* avoid cases where sscanf is not exact inverse of printf */
b267b179 836 snprintf(buf, IFNAMSIZ, name, i);
1da177e4
LT
837 if (!strncmp(buf, d->name, IFNAMSIZ))
838 set_bit(i, inuse);
839 }
840
841 i = find_first_zero_bit(inuse, max_netdevices);
842 free_page((unsigned long) inuse);
843 }
844
b267b179
EB
845 snprintf(buf, IFNAMSIZ, name, i);
846 if (!__dev_get_by_name(net, buf))
1da177e4 847 return i;
1da177e4
LT
848
849 /* It is possible to run out of possible slots
850 * when the name is long and there isn't enough space left
851 * for the digits, or if all bits are used.
852 */
853 return -ENFILE;
854}
855
b267b179
EB
856/**
857 * dev_alloc_name - allocate a name for a device
858 * @dev: device
859 * @name: name format string
860 *
861 * Passed a format string - eg "lt%d" it will try and find a suitable
862 * id. It scans list of devices to build up a free map, then chooses
863 * the first empty slot. The caller must hold the dev_base or rtnl lock
864 * while allocating the name and adding the device in order to avoid
865 * duplicates.
866 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
867 * Returns the number of the unit assigned or a negative errno code.
868 */
869
870int dev_alloc_name(struct net_device *dev, const char *name)
871{
872 char buf[IFNAMSIZ];
873 struct net *net;
874 int ret;
875
c346dca1
YH
876 BUG_ON(!dev_net(dev));
877 net = dev_net(dev);
b267b179
EB
878 ret = __dev_alloc_name(net, name, buf);
879 if (ret >= 0)
880 strlcpy(dev->name, buf, IFNAMSIZ);
881 return ret;
882}
883
1da177e4
LT
884
885/**
886 * dev_change_name - change name of a device
887 * @dev: device
888 * @newname: name (or format string) must be at least IFNAMSIZ
889 *
890 * Change name of a device, can pass format strings "eth%d".
891 * for wildcarding.
892 */
893int dev_change_name(struct net_device *dev, char *newname)
894{
fcc5a03a 895 char oldname[IFNAMSIZ];
1da177e4 896 int err = 0;
fcc5a03a 897 int ret;
881d966b 898 struct net *net;
1da177e4
LT
899
900 ASSERT_RTNL();
c346dca1 901 BUG_ON(!dev_net(dev));
1da177e4 902
c346dca1 903 net = dev_net(dev);
1da177e4
LT
904 if (dev->flags & IFF_UP)
905 return -EBUSY;
906
907 if (!dev_valid_name(newname))
908 return -EINVAL;
909
c8d90dca
SH
910 if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
911 return 0;
912
fcc5a03a
HX
913 memcpy(oldname, dev->name, IFNAMSIZ);
914
1da177e4
LT
915 if (strchr(newname, '%')) {
916 err = dev_alloc_name(dev, newname);
917 if (err < 0)
918 return err;
919 strcpy(newname, dev->name);
920 }
881d966b 921 else if (__dev_get_by_name(net, newname))
1da177e4
LT
922 return -EEXIST;
923 else
924 strlcpy(dev->name, newname, IFNAMSIZ);
925
fcc5a03a 926rollback:
dcc99773
SH
927 err = device_rename(&dev->dev, dev->name);
928 if (err) {
929 memcpy(dev->name, oldname, IFNAMSIZ);
930 return err;
931 }
7f988eab
HX
932
933 write_lock_bh(&dev_base_lock);
92749821 934 hlist_del(&dev->name_hlist);
881d966b 935 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
7f988eab
HX
936 write_unlock_bh(&dev_base_lock);
937
056925ab 938 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
fcc5a03a
HX
939 ret = notifier_to_errno(ret);
940
941 if (ret) {
942 if (err) {
943 printk(KERN_ERR
944 "%s: name change rollback failed: %d.\n",
945 dev->name, ret);
946 } else {
947 err = ret;
948 memcpy(dev->name, oldname, IFNAMSIZ);
949 goto rollback;
950 }
951 }
1da177e4
LT
952
953 return err;
954}
955
0b815a1a
SH
956/**
957 * dev_set_alias - change ifalias of a device
958 * @dev: device
959 * @alias: name up to IFALIASZ
960 *
961 * Set ifalias for a device,
962 */
963int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
964{
965 ASSERT_RTNL();
966
967 if (len >= IFALIASZ)
968 return -EINVAL;
969
96ca4a2c
OH
970 if (!len) {
971 if (dev->ifalias) {
972 kfree(dev->ifalias);
973 dev->ifalias = NULL;
974 }
975 return 0;
976 }
977
0b815a1a
SH
978 dev->ifalias = krealloc(dev->ifalias, len+1, GFP_KERNEL);
979 if (!dev->ifalias)
980 return -ENOMEM;
981
982 strlcpy(dev->ifalias, alias, len+1);
983 return len;
984}
985
986
d8a33ac4 987/**
3041a069 988 * netdev_features_change - device changes features
d8a33ac4
SH
989 * @dev: device to cause notification
990 *
991 * Called to indicate a device has changed features.
992 */
993void netdev_features_change(struct net_device *dev)
994{
056925ab 995 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
d8a33ac4
SH
996}
997EXPORT_SYMBOL(netdev_features_change);
998
1da177e4
LT
999/**
1000 * netdev_state_change - device changes state
1001 * @dev: device to cause notification
1002 *
1003 * Called to indicate a device has changed state. This function calls
1004 * the notifier chains for netdev_chain and sends a NEWLINK message
1005 * to the routing socket.
1006 */
1007void netdev_state_change(struct net_device *dev)
1008{
1009 if (dev->flags & IFF_UP) {
056925ab 1010 call_netdevice_notifiers(NETDEV_CHANGE, dev);
1da177e4
LT
1011 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1012 }
1013}
1014
c1da4ac7
OG
1015void netdev_bonding_change(struct net_device *dev)
1016{
1017 call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, dev);
1018}
1019EXPORT_SYMBOL(netdev_bonding_change);
1020
1da177e4
LT
1021/**
1022 * dev_load - load a network module
c4ea43c5 1023 * @net: the applicable net namespace
1da177e4
LT
1024 * @name: name of interface
1025 *
1026 * If a network interface is not present and the process has suitable
1027 * privileges this function loads the module. If module loading is not
1028 * available in this kernel then it becomes a nop.
1029 */
1030
881d966b 1031void dev_load(struct net *net, const char *name)
1da177e4 1032{
4ec93edb 1033 struct net_device *dev;
1da177e4
LT
1034
1035 read_lock(&dev_base_lock);
881d966b 1036 dev = __dev_get_by_name(net, name);
1da177e4
LT
1037 read_unlock(&dev_base_lock);
1038
1039 if (!dev && capable(CAP_SYS_MODULE))
1040 request_module("%s", name);
1041}
1042
1da177e4
LT
1043/**
1044 * dev_open - prepare an interface for use.
1045 * @dev: device to open
1046 *
1047 * Takes a device from down to up state. The device's private open
1048 * function is invoked and then the multicast lists are loaded. Finally
1049 * the device is moved into the up state and a %NETDEV_UP message is
1050 * sent to the netdev notifier chain.
1051 *
1052 * Calling this function on an active interface is a nop. On a failure
1053 * a negative errno code is returned.
1054 */
1055int dev_open(struct net_device *dev)
1056{
1057 int ret = 0;
1058
e46b66bc
BH
1059 ASSERT_RTNL();
1060
1da177e4
LT
1061 /*
1062 * Is it already up?
1063 */
1064
1065 if (dev->flags & IFF_UP)
1066 return 0;
1067
1068 /*
1069 * Is it even present?
1070 */
1071 if (!netif_device_present(dev))
1072 return -ENODEV;
1073
1074 /*
1075 * Call device private open method
1076 */
1077 set_bit(__LINK_STATE_START, &dev->state);
bada339b
JG
1078
1079 if (dev->validate_addr)
1080 ret = dev->validate_addr(dev);
1081
1082 if (!ret && dev->open)
1da177e4 1083 ret = dev->open(dev);
1da177e4 1084
4ec93edb 1085 /*
1da177e4
LT
1086 * If it went open OK then:
1087 */
1088
bada339b
JG
1089 if (ret)
1090 clear_bit(__LINK_STATE_START, &dev->state);
1091 else {
1da177e4
LT
1092 /*
1093 * Set the flags.
1094 */
1095 dev->flags |= IFF_UP;
1096
1097 /*
1098 * Initialize multicasting status
1099 */
4417da66 1100 dev_set_rx_mode(dev);
1da177e4
LT
1101
1102 /*
1103 * Wakeup transmit queue engine
1104 */
1105 dev_activate(dev);
1106
1107 /*
1108 * ... and announce new interface.
1109 */
056925ab 1110 call_netdevice_notifiers(NETDEV_UP, dev);
1da177e4 1111 }
bada339b 1112
1da177e4
LT
1113 return ret;
1114}
1115
1116/**
1117 * dev_close - shutdown an interface.
1118 * @dev: device to shutdown
1119 *
1120 * This function moves an active device into down state. A
1121 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1122 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1123 * chain.
1124 */
1125int dev_close(struct net_device *dev)
1126{
e46b66bc
BH
1127 ASSERT_RTNL();
1128
9d5010db
DM
1129 might_sleep();
1130
1da177e4
LT
1131 if (!(dev->flags & IFF_UP))
1132 return 0;
1133
1134 /*
1135 * Tell people we are going down, so that they can
1136 * prepare to death, when device is still operating.
1137 */
056925ab 1138 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1da177e4 1139
1da177e4
LT
1140 clear_bit(__LINK_STATE_START, &dev->state);
1141
1142 /* Synchronize to scheduled poll. We cannot touch poll list,
bea3348e
SH
1143 * it can be even on different cpu. So just clear netif_running().
1144 *
1145 * dev->stop() will invoke napi_disable() on all of it's
1146 * napi_struct instances on this device.
1147 */
1da177e4 1148 smp_mb__after_clear_bit(); /* Commit netif_running(). */
1da177e4 1149
d8b2a4d2
ML
1150 dev_deactivate(dev);
1151
1da177e4
LT
1152 /*
1153 * Call the device specific close. This cannot fail.
1154 * Only if device is UP
1155 *
1156 * We allow it to be called even after a DETACH hot-plug
1157 * event.
1158 */
1159 if (dev->stop)
1160 dev->stop(dev);
1161
1162 /*
1163 * Device is now down.
1164 */
1165
1166 dev->flags &= ~IFF_UP;
1167
1168 /*
1169 * Tell people we are down
1170 */
056925ab 1171 call_netdevice_notifiers(NETDEV_DOWN, dev);
1da177e4
LT
1172
1173 return 0;
1174}
1175
1176
0187bdfb
BH
1177/**
1178 * dev_disable_lro - disable Large Receive Offload on a device
1179 * @dev: device
1180 *
1181 * Disable Large Receive Offload (LRO) on a net device. Must be
1182 * called under RTNL. This is needed if received packets may be
1183 * forwarded to another interface.
1184 */
1185void dev_disable_lro(struct net_device *dev)
1186{
1187 if (dev->ethtool_ops && dev->ethtool_ops->get_flags &&
1188 dev->ethtool_ops->set_flags) {
1189 u32 flags = dev->ethtool_ops->get_flags(dev);
1190 if (flags & ETH_FLAG_LRO) {
1191 flags &= ~ETH_FLAG_LRO;
1192 dev->ethtool_ops->set_flags(dev, flags);
1193 }
1194 }
1195 WARN_ON(dev->features & NETIF_F_LRO);
1196}
1197EXPORT_SYMBOL(dev_disable_lro);
1198
1199
881d966b
EB
1200static int dev_boot_phase = 1;
1201
1da177e4
LT
1202/*
1203 * Device change register/unregister. These are not inline or static
1204 * as we export them to the world.
1205 */
1206
1207/**
1208 * register_netdevice_notifier - register a network notifier block
1209 * @nb: notifier
1210 *
1211 * Register a notifier to be called when network device events occur.
1212 * The notifier passed is linked into the kernel structures and must
1213 * not be reused until it has been unregistered. A negative errno code
1214 * is returned on a failure.
1215 *
1216 * When registered all registration and up events are replayed
4ec93edb 1217 * to the new notifier to allow device to have a race free
1da177e4
LT
1218 * view of the network device list.
1219 */
1220
1221int register_netdevice_notifier(struct notifier_block *nb)
1222{
1223 struct net_device *dev;
fcc5a03a 1224 struct net_device *last;
881d966b 1225 struct net *net;
1da177e4
LT
1226 int err;
1227
1228 rtnl_lock();
f07d5b94 1229 err = raw_notifier_chain_register(&netdev_chain, nb);
fcc5a03a
HX
1230 if (err)
1231 goto unlock;
881d966b
EB
1232 if (dev_boot_phase)
1233 goto unlock;
1234 for_each_net(net) {
1235 for_each_netdev(net, dev) {
1236 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1237 err = notifier_to_errno(err);
1238 if (err)
1239 goto rollback;
1240
1241 if (!(dev->flags & IFF_UP))
1242 continue;
1da177e4 1243
881d966b
EB
1244 nb->notifier_call(nb, NETDEV_UP, dev);
1245 }
1da177e4 1246 }
fcc5a03a
HX
1247
1248unlock:
1da177e4
LT
1249 rtnl_unlock();
1250 return err;
fcc5a03a
HX
1251
1252rollback:
1253 last = dev;
881d966b
EB
1254 for_each_net(net) {
1255 for_each_netdev(net, dev) {
1256 if (dev == last)
1257 break;
fcc5a03a 1258
881d966b
EB
1259 if (dev->flags & IFF_UP) {
1260 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1261 nb->notifier_call(nb, NETDEV_DOWN, dev);
1262 }
1263 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
fcc5a03a 1264 }
fcc5a03a 1265 }
c67625a1
PE
1266
1267 raw_notifier_chain_unregister(&netdev_chain, nb);
fcc5a03a 1268 goto unlock;
1da177e4
LT
1269}
1270
1271/**
1272 * unregister_netdevice_notifier - unregister a network notifier block
1273 * @nb: notifier
1274 *
1275 * Unregister a notifier previously registered by
1276 * register_netdevice_notifier(). The notifier is unlinked into the
1277 * kernel structures and may then be reused. A negative errno code
1278 * is returned on a failure.
1279 */
1280
1281int unregister_netdevice_notifier(struct notifier_block *nb)
1282{
9f514950
HX
1283 int err;
1284
1285 rtnl_lock();
f07d5b94 1286 err = raw_notifier_chain_unregister(&netdev_chain, nb);
9f514950
HX
1287 rtnl_unlock();
1288 return err;
1da177e4
LT
1289}
1290
1291/**
1292 * call_netdevice_notifiers - call all network notifier blocks
1293 * @val: value passed unmodified to notifier function
c4ea43c5 1294 * @dev: net_device pointer passed unmodified to notifier function
1da177e4
LT
1295 *
1296 * Call all network notifier blocks. Parameters and return value
f07d5b94 1297 * are as for raw_notifier_call_chain().
1da177e4
LT
1298 */
1299
ad7379d4 1300int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1da177e4 1301{
ad7379d4 1302 return raw_notifier_call_chain(&netdev_chain, val, dev);
1da177e4
LT
1303}
1304
1305/* When > 0 there are consumers of rx skb time stamps */
1306static atomic_t netstamp_needed = ATOMIC_INIT(0);
1307
1308void net_enable_timestamp(void)
1309{
1310 atomic_inc(&netstamp_needed);
1311}
1312
1313void net_disable_timestamp(void)
1314{
1315 atomic_dec(&netstamp_needed);
1316}
1317
a61bbcf2 1318static inline void net_timestamp(struct sk_buff *skb)
1da177e4
LT
1319{
1320 if (atomic_read(&netstamp_needed))
a61bbcf2 1321 __net_timestamp(skb);
b7aa0bf7
ED
1322 else
1323 skb->tstamp.tv64 = 0;
1da177e4
LT
1324}
1325
1326/*
1327 * Support routine. Sends outgoing frames to any network
1328 * taps currently in use.
1329 */
1330
f6a78bfc 1331static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1da177e4
LT
1332{
1333 struct packet_type *ptype;
a61bbcf2
PM
1334
1335 net_timestamp(skb);
1da177e4
LT
1336
1337 rcu_read_lock();
1338 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1339 /* Never send packets back to the socket
1340 * they originated from - MvS (miquels@drinkel.ow.org)
1341 */
1342 if ((ptype->dev == dev || !ptype->dev) &&
1343 (ptype->af_packet_priv == NULL ||
1344 (struct sock *)ptype->af_packet_priv != skb->sk)) {
1345 struct sk_buff *skb2= skb_clone(skb, GFP_ATOMIC);
1346 if (!skb2)
1347 break;
1348
1349 /* skb->nh should be correctly
1350 set by sender, so that the second statement is
1351 just protection against buggy protocols.
1352 */
459a98ed 1353 skb_reset_mac_header(skb2);
1da177e4 1354
d56f90a7 1355 if (skb_network_header(skb2) < skb2->data ||
27a884dc 1356 skb2->network_header > skb2->tail) {
1da177e4
LT
1357 if (net_ratelimit())
1358 printk(KERN_CRIT "protocol %04x is "
1359 "buggy, dev %s\n",
1360 skb2->protocol, dev->name);
c1d2bbe1 1361 skb_reset_network_header(skb2);
1da177e4
LT
1362 }
1363
b0e380b1 1364 skb2->transport_header = skb2->network_header;
1da177e4 1365 skb2->pkt_type = PACKET_OUTGOING;
f2ccd8fa 1366 ptype->func(skb2, skb->dev, ptype, skb->dev);
1da177e4
LT
1367 }
1368 }
1369 rcu_read_unlock();
1370}
1371
56079431 1372
def82a1d 1373static inline void __netif_reschedule(struct Qdisc *q)
56079431 1374{
def82a1d
JP
1375 struct softnet_data *sd;
1376 unsigned long flags;
56079431 1377
def82a1d
JP
1378 local_irq_save(flags);
1379 sd = &__get_cpu_var(softnet_data);
1380 q->next_sched = sd->output_queue;
1381 sd->output_queue = q;
1382 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1383 local_irq_restore(flags);
1384}
1385
1386void __netif_schedule(struct Qdisc *q)
1387{
1388 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
1389 __netif_reschedule(q);
56079431
DV
1390}
1391EXPORT_SYMBOL(__netif_schedule);
1392
bea3348e 1393void dev_kfree_skb_irq(struct sk_buff *skb)
56079431 1394{
bea3348e
SH
1395 if (atomic_dec_and_test(&skb->users)) {
1396 struct softnet_data *sd;
1397 unsigned long flags;
56079431 1398
bea3348e
SH
1399 local_irq_save(flags);
1400 sd = &__get_cpu_var(softnet_data);
1401 skb->next = sd->completion_queue;
1402 sd->completion_queue = skb;
1403 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1404 local_irq_restore(flags);
1405 }
56079431 1406}
bea3348e 1407EXPORT_SYMBOL(dev_kfree_skb_irq);
56079431
DV
1408
1409void dev_kfree_skb_any(struct sk_buff *skb)
1410{
1411 if (in_irq() || irqs_disabled())
1412 dev_kfree_skb_irq(skb);
1413 else
1414 dev_kfree_skb(skb);
1415}
1416EXPORT_SYMBOL(dev_kfree_skb_any);
1417
1418
bea3348e
SH
1419/**
1420 * netif_device_detach - mark device as removed
1421 * @dev: network device
1422 *
1423 * Mark device as removed from system and therefore no longer available.
1424 */
56079431
DV
1425void netif_device_detach(struct net_device *dev)
1426{
1427 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1428 netif_running(dev)) {
1429 netif_stop_queue(dev);
1430 }
1431}
1432EXPORT_SYMBOL(netif_device_detach);
1433
bea3348e
SH
1434/**
1435 * netif_device_attach - mark device as attached
1436 * @dev: network device
1437 *
1438 * Mark device as attached from system and restart if needed.
1439 */
56079431
DV
1440void netif_device_attach(struct net_device *dev)
1441{
1442 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1443 netif_running(dev)) {
1444 netif_wake_queue(dev);
4ec93edb 1445 __netdev_watchdog_up(dev);
56079431
DV
1446 }
1447}
1448EXPORT_SYMBOL(netif_device_attach);
1449
6de329e2
BH
1450static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1451{
1452 return ((features & NETIF_F_GEN_CSUM) ||
1453 ((features & NETIF_F_IP_CSUM) &&
1454 protocol == htons(ETH_P_IP)) ||
1455 ((features & NETIF_F_IPV6_CSUM) &&
1456 protocol == htons(ETH_P_IPV6)));
1457}
1458
1459static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
1460{
1461 if (can_checksum_protocol(dev->features, skb->protocol))
1462 return true;
1463
1464 if (skb->protocol == htons(ETH_P_8021Q)) {
1465 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1466 if (can_checksum_protocol(dev->features & dev->vlan_features,
1467 veh->h_vlan_encapsulated_proto))
1468 return true;
1469 }
1470
1471 return false;
1472}
56079431 1473
1da177e4
LT
1474/*
1475 * Invalidate hardware checksum when packet is to be mangled, and
1476 * complete checksum manually on outgoing path.
1477 */
84fa7933 1478int skb_checksum_help(struct sk_buff *skb)
1da177e4 1479{
d3bc23e7 1480 __wsum csum;
663ead3b 1481 int ret = 0, offset;
1da177e4 1482
84fa7933 1483 if (skb->ip_summed == CHECKSUM_COMPLETE)
a430a43d
HX
1484 goto out_set_summed;
1485
1486 if (unlikely(skb_shinfo(skb)->gso_size)) {
a430a43d
HX
1487 /* Let GSO fix up the checksum. */
1488 goto out_set_summed;
1da177e4
LT
1489 }
1490
a030847e
HX
1491 offset = skb->csum_start - skb_headroom(skb);
1492 BUG_ON(offset >= skb_headlen(skb));
1493 csum = skb_checksum(skb, offset, skb->len - offset, 0);
1494
1495 offset += skb->csum_offset;
1496 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
1497
1498 if (skb_cloned(skb) &&
1499 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
1da177e4
LT
1500 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1501 if (ret)
1502 goto out;
1503 }
1504
a030847e 1505 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
a430a43d 1506out_set_summed:
1da177e4 1507 skb->ip_summed = CHECKSUM_NONE;
4ec93edb 1508out:
1da177e4
LT
1509 return ret;
1510}
1511
f6a78bfc
HX
1512/**
1513 * skb_gso_segment - Perform segmentation on skb.
1514 * @skb: buffer to segment
576a30eb 1515 * @features: features for the output path (see dev->features)
f6a78bfc
HX
1516 *
1517 * This function segments the given skb and returns a list of segments.
576a30eb
HX
1518 *
1519 * It may return NULL if the skb requires no segmentation. This is
1520 * only possible when GSO is used for verifying header integrity.
f6a78bfc 1521 */
576a30eb 1522struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
f6a78bfc
HX
1523{
1524 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1525 struct packet_type *ptype;
252e3346 1526 __be16 type = skb->protocol;
a430a43d 1527 int err;
f6a78bfc
HX
1528
1529 BUG_ON(skb_shinfo(skb)->frag_list);
f6a78bfc 1530
459a98ed 1531 skb_reset_mac_header(skb);
b0e380b1 1532 skb->mac_len = skb->network_header - skb->mac_header;
f6a78bfc
HX
1533 __skb_pull(skb, skb->mac_len);
1534
f9d106a6 1535 if (WARN_ON(skb->ip_summed != CHECKSUM_PARTIAL)) {
a430a43d
HX
1536 if (skb_header_cloned(skb) &&
1537 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1538 return ERR_PTR(err);
1539 }
1540
f6a78bfc 1541 rcu_read_lock();
82d8a867
PE
1542 list_for_each_entry_rcu(ptype,
1543 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
f6a78bfc 1544 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
84fa7933 1545 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
a430a43d
HX
1546 err = ptype->gso_send_check(skb);
1547 segs = ERR_PTR(err);
1548 if (err || skb_gso_ok(skb, features))
1549 break;
d56f90a7
ACM
1550 __skb_push(skb, (skb->data -
1551 skb_network_header(skb)));
a430a43d 1552 }
576a30eb 1553 segs = ptype->gso_segment(skb, features);
f6a78bfc
HX
1554 break;
1555 }
1556 }
1557 rcu_read_unlock();
1558
98e399f8 1559 __skb_push(skb, skb->data - skb_mac_header(skb));
576a30eb 1560
f6a78bfc
HX
1561 return segs;
1562}
1563
1564EXPORT_SYMBOL(skb_gso_segment);
1565
fb286bb2
HX
1566/* Take action when hardware reception checksum errors are detected. */
1567#ifdef CONFIG_BUG
1568void netdev_rx_csum_fault(struct net_device *dev)
1569{
1570 if (net_ratelimit()) {
4ec93edb 1571 printk(KERN_ERR "%s: hw csum failure.\n",
246a4212 1572 dev ? dev->name : "<unknown>");
fb286bb2
HX
1573 dump_stack();
1574 }
1575}
1576EXPORT_SYMBOL(netdev_rx_csum_fault);
1577#endif
1578
1da177e4
LT
1579/* Actually, we should eliminate this check as soon as we know, that:
1580 * 1. IOMMU is present and allows to map all the memory.
1581 * 2. No high memory really exists on this machine.
1582 */
1583
1584static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1585{
3d3a8533 1586#ifdef CONFIG_HIGHMEM
1da177e4
LT
1587 int i;
1588
1589 if (dev->features & NETIF_F_HIGHDMA)
1590 return 0;
1591
1592 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1593 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1594 return 1;
1595
3d3a8533 1596#endif
1da177e4
LT
1597 return 0;
1598}
1da177e4 1599
f6a78bfc
HX
1600struct dev_gso_cb {
1601 void (*destructor)(struct sk_buff *skb);
1602};
1603
1604#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
1605
1606static void dev_gso_skb_destructor(struct sk_buff *skb)
1607{
1608 struct dev_gso_cb *cb;
1609
1610 do {
1611 struct sk_buff *nskb = skb->next;
1612
1613 skb->next = nskb->next;
1614 nskb->next = NULL;
1615 kfree_skb(nskb);
1616 } while (skb->next);
1617
1618 cb = DEV_GSO_CB(skb);
1619 if (cb->destructor)
1620 cb->destructor(skb);
1621}
1622
1623/**
1624 * dev_gso_segment - Perform emulated hardware segmentation on skb.
1625 * @skb: buffer to segment
1626 *
1627 * This function segments the given skb and stores the list of segments
1628 * in skb->next.
1629 */
1630static int dev_gso_segment(struct sk_buff *skb)
1631{
1632 struct net_device *dev = skb->dev;
1633 struct sk_buff *segs;
576a30eb
HX
1634 int features = dev->features & ~(illegal_highdma(dev, skb) ?
1635 NETIF_F_SG : 0);
1636
1637 segs = skb_gso_segment(skb, features);
1638
1639 /* Verifying header integrity only. */
1640 if (!segs)
1641 return 0;
f6a78bfc 1642
801678c5 1643 if (IS_ERR(segs))
f6a78bfc
HX
1644 return PTR_ERR(segs);
1645
1646 skb->next = segs;
1647 DEV_GSO_CB(skb)->destructor = skb->destructor;
1648 skb->destructor = dev_gso_skb_destructor;
1649
1650 return 0;
1651}
1652
fd2ea0a7
DM
1653int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1654 struct netdev_queue *txq)
f6a78bfc
HX
1655{
1656 if (likely(!skb->next)) {
9be9a6b9 1657 if (!list_empty(&ptype_all))
f6a78bfc
HX
1658 dev_queue_xmit_nit(skb, dev);
1659
576a30eb
HX
1660 if (netif_needs_gso(dev, skb)) {
1661 if (unlikely(dev_gso_segment(skb)))
1662 goto out_kfree_skb;
1663 if (skb->next)
1664 goto gso;
1665 }
f6a78bfc 1666
576a30eb 1667 return dev->hard_start_xmit(skb, dev);
f6a78bfc
HX
1668 }
1669
576a30eb 1670gso:
f6a78bfc
HX
1671 do {
1672 struct sk_buff *nskb = skb->next;
1673 int rc;
1674
1675 skb->next = nskb->next;
1676 nskb->next = NULL;
1677 rc = dev->hard_start_xmit(nskb, dev);
1678 if (unlikely(rc)) {
f54d9e8d 1679 nskb->next = skb->next;
f6a78bfc
HX
1680 skb->next = nskb;
1681 return rc;
1682 }
fd2ea0a7 1683 if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
f54d9e8d 1684 return NETDEV_TX_BUSY;
f6a78bfc 1685 } while (skb->next);
4ec93edb 1686
f6a78bfc
HX
1687 skb->destructor = DEV_GSO_CB(skb)->destructor;
1688
1689out_kfree_skb:
1690 kfree_skb(skb);
1691 return 0;
1692}
1693
b6b2fed1
DM
1694static u32 simple_tx_hashrnd;
1695static int simple_tx_hashrnd_initialized = 0;
1696
8f0f2223
DM
1697static u16 simple_tx_hash(struct net_device *dev, struct sk_buff *skb)
1698{
b6b2fed1
DM
1699 u32 addr1, addr2, ports;
1700 u32 hash, ihl;
8f0f2223 1701 u8 ip_proto;
b6b2fed1
DM
1702
1703 if (unlikely(!simple_tx_hashrnd_initialized)) {
1704 get_random_bytes(&simple_tx_hashrnd, 4);
1705 simple_tx_hashrnd_initialized = 1;
1706 }
8f0f2223
DM
1707
1708 switch (skb->protocol) {
60678040 1709 case htons(ETH_P_IP):
8f0f2223 1710 ip_proto = ip_hdr(skb)->protocol;
b6b2fed1
DM
1711 addr1 = ip_hdr(skb)->saddr;
1712 addr2 = ip_hdr(skb)->daddr;
8f0f2223 1713 ihl = ip_hdr(skb)->ihl;
8f0f2223 1714 break;
60678040 1715 case htons(ETH_P_IPV6):
8f0f2223 1716 ip_proto = ipv6_hdr(skb)->nexthdr;
b6b2fed1
DM
1717 addr1 = ipv6_hdr(skb)->saddr.s6_addr32[3];
1718 addr2 = ipv6_hdr(skb)->daddr.s6_addr32[3];
8f0f2223 1719 ihl = (40 >> 2);
8f0f2223
DM
1720 break;
1721 default:
1722 return 0;
1723 }
1724
8f0f2223
DM
1725
1726 switch (ip_proto) {
1727 case IPPROTO_TCP:
1728 case IPPROTO_UDP:
1729 case IPPROTO_DCCP:
1730 case IPPROTO_ESP:
1731 case IPPROTO_AH:
1732 case IPPROTO_SCTP:
1733 case IPPROTO_UDPLITE:
b6b2fed1 1734 ports = *((u32 *) (skb_network_header(skb) + (ihl * 4)));
8f0f2223
DM
1735 break;
1736
1737 default:
b6b2fed1 1738 ports = 0;
8f0f2223
DM
1739 break;
1740 }
1741
b6b2fed1
DM
1742 hash = jhash_3words(addr1, addr2, ports, simple_tx_hashrnd);
1743
1744 return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
8f0f2223
DM
1745}
1746
e8a0464c
DM
1747static struct netdev_queue *dev_pick_tx(struct net_device *dev,
1748 struct sk_buff *skb)
1749{
fd2ea0a7
DM
1750 u16 queue_index = 0;
1751
eae792b7
DM
1752 if (dev->select_queue)
1753 queue_index = dev->select_queue(dev, skb);
8f0f2223
DM
1754 else if (dev->real_num_tx_queues > 1)
1755 queue_index = simple_tx_hash(dev, skb);
eae792b7 1756
fd2ea0a7
DM
1757 skb_set_queue_mapping(skb, queue_index);
1758 return netdev_get_tx_queue(dev, queue_index);
e8a0464c
DM
1759}
1760
d29f749e
DJ
1761/**
1762 * dev_queue_xmit - transmit a buffer
1763 * @skb: buffer to transmit
1764 *
1765 * Queue a buffer for transmission to a network device. The caller must
1766 * have set the device and priority and built the buffer before calling
1767 * this function. The function can be called from an interrupt.
1768 *
1769 * A negative errno code is returned on a failure. A success does not
1770 * guarantee the frame will be transmitted as it may be dropped due
1771 * to congestion or traffic shaping.
1772 *
1773 * -----------------------------------------------------------------------------------
1774 * I notice this method can also return errors from the queue disciplines,
1775 * including NET_XMIT_DROP, which is a positive value. So, errors can also
1776 * be positive.
1777 *
1778 * Regardless of the return value, the skb is consumed, so it is currently
1779 * difficult to retry a send to this method. (You can bump the ref count
1780 * before sending to hold a reference for retry if you are careful.)
1781 *
1782 * When calling this method, interrupts MUST be enabled. This is because
1783 * the BH enable code must have IRQs enabled so that it will not deadlock.
1784 * --BLG
1785 */
1da177e4
LT
1786int dev_queue_xmit(struct sk_buff *skb)
1787{
1788 struct net_device *dev = skb->dev;
dc2b4847 1789 struct netdev_queue *txq;
1da177e4
LT
1790 struct Qdisc *q;
1791 int rc = -ENOMEM;
1792
f6a78bfc
HX
1793 /* GSO will handle the following emulations directly. */
1794 if (netif_needs_gso(dev, skb))
1795 goto gso;
1796
1da177e4
LT
1797 if (skb_shinfo(skb)->frag_list &&
1798 !(dev->features & NETIF_F_FRAGLIST) &&
364c6bad 1799 __skb_linearize(skb))
1da177e4
LT
1800 goto out_kfree_skb;
1801
1802 /* Fragmented skb is linearized if device does not support SG,
1803 * or if at least one of fragments is in highmem and device
1804 * does not support DMA from it.
1805 */
1806 if (skb_shinfo(skb)->nr_frags &&
1807 (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
364c6bad 1808 __skb_linearize(skb))
1da177e4
LT
1809 goto out_kfree_skb;
1810
1811 /* If packet is not checksummed and device does not support
1812 * checksumming for this protocol, complete checksumming here.
1813 */
663ead3b
HX
1814 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1815 skb_set_transport_header(skb, skb->csum_start -
1816 skb_headroom(skb));
6de329e2
BH
1817 if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb))
1818 goto out_kfree_skb;
663ead3b 1819 }
1da177e4 1820
f6a78bfc 1821gso:
4ec93edb
YH
1822 /* Disable soft irqs for various locks below. Also
1823 * stops preemption for RCU.
1da177e4 1824 */
4ec93edb 1825 rcu_read_lock_bh();
1da177e4 1826
eae792b7 1827 txq = dev_pick_tx(dev, skb);
b0e1e646 1828 q = rcu_dereference(txq->qdisc);
37437bb2 1829
1da177e4
LT
1830#ifdef CONFIG_NET_CLS_ACT
1831 skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS);
1832#endif
1833 if (q->enqueue) {
5fb66229 1834 spinlock_t *root_lock = qdisc_lock(q);
37437bb2
DM
1835
1836 spin_lock(root_lock);
1837
a9312ae8 1838 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
96d20316 1839 kfree_skb(skb);
a9312ae8 1840 rc = NET_XMIT_DROP;
96d20316
DM
1841 } else {
1842 rc = qdisc_enqueue_root(skb, q);
1843 qdisc_run(q);
a9312ae8 1844 }
37437bb2
DM
1845 spin_unlock(root_lock);
1846
37437bb2 1847 goto out;
1da177e4
LT
1848 }
1849
1850 /* The device has no queue. Common case for software devices:
1851 loopback, all the sorts of tunnels...
1852
932ff279
HX
1853 Really, it is unlikely that netif_tx_lock protection is necessary
1854 here. (f.e. loopback and IP tunnels are clean ignoring statistics
1da177e4
LT
1855 counters.)
1856 However, it is possible, that they rely on protection
1857 made by us here.
1858
1859 Check this and shot the lock. It is not prone from deadlocks.
1860 Either shot noqueue qdisc, it is even simpler 8)
1861 */
1862 if (dev->flags & IFF_UP) {
1863 int cpu = smp_processor_id(); /* ok because BHs are off */
1864
c773e847 1865 if (txq->xmit_lock_owner != cpu) {
1da177e4 1866
c773e847 1867 HARD_TX_LOCK(dev, txq, cpu);
1da177e4 1868
fd2ea0a7 1869 if (!netif_tx_queue_stopped(txq)) {
1da177e4 1870 rc = 0;
fd2ea0a7 1871 if (!dev_hard_start_xmit(skb, dev, txq)) {
c773e847 1872 HARD_TX_UNLOCK(dev, txq);
1da177e4
LT
1873 goto out;
1874 }
1875 }
c773e847 1876 HARD_TX_UNLOCK(dev, txq);
1da177e4
LT
1877 if (net_ratelimit())
1878 printk(KERN_CRIT "Virtual device %s asks to "
1879 "queue packet!\n", dev->name);
1880 } else {
1881 /* Recursion is detected! It is possible,
1882 * unfortunately */
1883 if (net_ratelimit())
1884 printk(KERN_CRIT "Dead loop on virtual device "
1885 "%s, fix it urgently!\n", dev->name);
1886 }
1887 }
1888
1889 rc = -ENETDOWN;
d4828d85 1890 rcu_read_unlock_bh();
1da177e4
LT
1891
1892out_kfree_skb:
1893 kfree_skb(skb);
1894 return rc;
1895out:
d4828d85 1896 rcu_read_unlock_bh();
1da177e4
LT
1897 return rc;
1898}
1899
1900
1901/*=======================================================================
1902 Receiver routines
1903 =======================================================================*/
1904
6b2bedc3
SH
1905int netdev_max_backlog __read_mostly = 1000;
1906int netdev_budget __read_mostly = 300;
1907int weight_p __read_mostly = 64; /* old backlog weight */
1da177e4
LT
1908
1909DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
1910
1911
1da177e4
LT
1912/**
1913 * netif_rx - post buffer to the network code
1914 * @skb: buffer to post
1915 *
1916 * This function receives a packet from a device driver and queues it for
1917 * the upper (protocol) levels to process. It always succeeds. The buffer
1918 * may be dropped during processing for congestion control or by the
1919 * protocol layers.
1920 *
1921 * return values:
1922 * NET_RX_SUCCESS (no congestion)
1da177e4
LT
1923 * NET_RX_DROP (packet was dropped)
1924 *
1925 */
1926
1927int netif_rx(struct sk_buff *skb)
1928{
1da177e4
LT
1929 struct softnet_data *queue;
1930 unsigned long flags;
1931
1932 /* if netpoll wants it, pretend we never saw it */
1933 if (netpoll_rx(skb))
1934 return NET_RX_DROP;
1935
b7aa0bf7 1936 if (!skb->tstamp.tv64)
a61bbcf2 1937 net_timestamp(skb);
1da177e4
LT
1938
1939 /*
1940 * The code is rearranged so that the path is the most
1941 * short when CPU is congested, but is still operating.
1942 */
1943 local_irq_save(flags);
1da177e4
LT
1944 queue = &__get_cpu_var(softnet_data);
1945
1946 __get_cpu_var(netdev_rx_stat).total++;
1947 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
1948 if (queue->input_pkt_queue.qlen) {
1da177e4 1949enqueue:
1da177e4 1950 __skb_queue_tail(&queue->input_pkt_queue, skb);
1da177e4 1951 local_irq_restore(flags);
34008d8c 1952 return NET_RX_SUCCESS;
1da177e4
LT
1953 }
1954
bea3348e 1955 napi_schedule(&queue->backlog);
1da177e4
LT
1956 goto enqueue;
1957 }
1958
1da177e4
LT
1959 __get_cpu_var(netdev_rx_stat).dropped++;
1960 local_irq_restore(flags);
1961
1962 kfree_skb(skb);
1963 return NET_RX_DROP;
1964}
1965
1966int netif_rx_ni(struct sk_buff *skb)
1967{
1968 int err;
1969
1970 preempt_disable();
1971 err = netif_rx(skb);
1972 if (local_softirq_pending())
1973 do_softirq();
1974 preempt_enable();
1975
1976 return err;
1977}
1978
1979EXPORT_SYMBOL(netif_rx_ni);
1980
1da177e4
LT
1981static void net_tx_action(struct softirq_action *h)
1982{
1983 struct softnet_data *sd = &__get_cpu_var(softnet_data);
1984
1985 if (sd->completion_queue) {
1986 struct sk_buff *clist;
1987
1988 local_irq_disable();
1989 clist = sd->completion_queue;
1990 sd->completion_queue = NULL;
1991 local_irq_enable();
1992
1993 while (clist) {
1994 struct sk_buff *skb = clist;
1995 clist = clist->next;
1996
547b792c 1997 WARN_ON(atomic_read(&skb->users));
1da177e4
LT
1998 __kfree_skb(skb);
1999 }
2000 }
2001
2002 if (sd->output_queue) {
37437bb2 2003 struct Qdisc *head;
1da177e4
LT
2004
2005 local_irq_disable();
2006 head = sd->output_queue;
2007 sd->output_queue = NULL;
2008 local_irq_enable();
2009
2010 while (head) {
37437bb2
DM
2011 struct Qdisc *q = head;
2012 spinlock_t *root_lock;
2013
1da177e4
LT
2014 head = head->next_sched;
2015
5fb66229 2016 root_lock = qdisc_lock(q);
37437bb2 2017 if (spin_trylock(root_lock)) {
def82a1d
JP
2018 smp_mb__before_clear_bit();
2019 clear_bit(__QDISC_STATE_SCHED,
2020 &q->state);
37437bb2
DM
2021 qdisc_run(q);
2022 spin_unlock(root_lock);
1da177e4 2023 } else {
195648bb 2024 if (!test_bit(__QDISC_STATE_DEACTIVATED,
e8a83e10 2025 &q->state)) {
195648bb 2026 __netif_reschedule(q);
e8a83e10
JP
2027 } else {
2028 smp_mb__before_clear_bit();
2029 clear_bit(__QDISC_STATE_SCHED,
2030 &q->state);
2031 }
1da177e4
LT
2032 }
2033 }
2034 }
2035}
2036
6f05f629
SH
2037static inline int deliver_skb(struct sk_buff *skb,
2038 struct packet_type *pt_prev,
2039 struct net_device *orig_dev)
1da177e4
LT
2040{
2041 atomic_inc(&skb->users);
f2ccd8fa 2042 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1da177e4
LT
2043}
2044
2045#if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
6229e362 2046/* These hooks defined here for ATM */
1da177e4
LT
2047struct net_bridge;
2048struct net_bridge_fdb_entry *(*br_fdb_get_hook)(struct net_bridge *br,
2049 unsigned char *addr);
6229e362 2050void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent) __read_mostly;
1da177e4 2051
6229e362
SH
2052/*
2053 * If bridge module is loaded call bridging hook.
2054 * returns NULL if packet was consumed.
2055 */
2056struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
2057 struct sk_buff *skb) __read_mostly;
2058static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
2059 struct packet_type **pt_prev, int *ret,
2060 struct net_device *orig_dev)
1da177e4
LT
2061{
2062 struct net_bridge_port *port;
2063
6229e362
SH
2064 if (skb->pkt_type == PACKET_LOOPBACK ||
2065 (port = rcu_dereference(skb->dev->br_port)) == NULL)
2066 return skb;
1da177e4
LT
2067
2068 if (*pt_prev) {
6229e362 2069 *ret = deliver_skb(skb, *pt_prev, orig_dev);
1da177e4 2070 *pt_prev = NULL;
4ec93edb
YH
2071 }
2072
6229e362 2073 return br_handle_frame_hook(port, skb);
1da177e4
LT
2074}
2075#else
6229e362 2076#define handle_bridge(skb, pt_prev, ret, orig_dev) (skb)
1da177e4
LT
2077#endif
2078
b863ceb7
PM
2079#if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
2080struct sk_buff *(*macvlan_handle_frame_hook)(struct sk_buff *skb) __read_mostly;
2081EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook);
2082
2083static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
2084 struct packet_type **pt_prev,
2085 int *ret,
2086 struct net_device *orig_dev)
2087{
2088 if (skb->dev->macvlan_port == NULL)
2089 return skb;
2090
2091 if (*pt_prev) {
2092 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2093 *pt_prev = NULL;
2094 }
2095 return macvlan_handle_frame_hook(skb);
2096}
2097#else
2098#define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb)
2099#endif
2100
1da177e4
LT
2101#ifdef CONFIG_NET_CLS_ACT
2102/* TODO: Maybe we should just force sch_ingress to be compiled in
2103 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
2104 * a compare and 2 stores extra right now if we dont have it on
2105 * but have CONFIG_NET_CLS_ACT
4ec93edb 2106 * NOTE: This doesnt stop any functionality; if you dont have
1da177e4
LT
2107 * the ingress scheduler, you just cant add policies on ingress.
2108 *
2109 */
4ec93edb 2110static int ing_filter(struct sk_buff *skb)
1da177e4 2111{
1da177e4 2112 struct net_device *dev = skb->dev;
f697c3e8 2113 u32 ttl = G_TC_RTTL(skb->tc_verd);
555353cf
DM
2114 struct netdev_queue *rxq;
2115 int result = TC_ACT_OK;
2116 struct Qdisc *q;
4ec93edb 2117
f697c3e8
HX
2118 if (MAX_RED_LOOP < ttl++) {
2119 printk(KERN_WARNING
2120 "Redir loop detected Dropping packet (%d->%d)\n",
2121 skb->iif, dev->ifindex);
2122 return TC_ACT_SHOT;
2123 }
1da177e4 2124
f697c3e8
HX
2125 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
2126 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
1da177e4 2127
555353cf
DM
2128 rxq = &dev->rx_queue;
2129
83874000 2130 q = rxq->qdisc;
8d50b53d 2131 if (q != &noop_qdisc) {
83874000 2132 spin_lock(qdisc_lock(q));
a9312ae8
DM
2133 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
2134 result = qdisc_enqueue_root(skb, q);
83874000
DM
2135 spin_unlock(qdisc_lock(q));
2136 }
f697c3e8
HX
2137
2138 return result;
2139}
86e65da9 2140
f697c3e8
HX
2141static inline struct sk_buff *handle_ing(struct sk_buff *skb,
2142 struct packet_type **pt_prev,
2143 int *ret, struct net_device *orig_dev)
2144{
8d50b53d 2145 if (skb->dev->rx_queue.qdisc == &noop_qdisc)
f697c3e8 2146 goto out;
1da177e4 2147
f697c3e8
HX
2148 if (*pt_prev) {
2149 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2150 *pt_prev = NULL;
2151 } else {
2152 /* Huh? Why does turning on AF_PACKET affect this? */
2153 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
1da177e4
LT
2154 }
2155
f697c3e8
HX
2156 switch (ing_filter(skb)) {
2157 case TC_ACT_SHOT:
2158 case TC_ACT_STOLEN:
2159 kfree_skb(skb);
2160 return NULL;
2161 }
2162
2163out:
2164 skb->tc_verd = 0;
2165 return skb;
1da177e4
LT
2166}
2167#endif
2168
bc1d0411
PM
2169/*
2170 * netif_nit_deliver - deliver received packets to network taps
2171 * @skb: buffer
2172 *
2173 * This function is used to deliver incoming packets to network
2174 * taps. It should be used when the normal netif_receive_skb path
2175 * is bypassed, for example because of VLAN acceleration.
2176 */
2177void netif_nit_deliver(struct sk_buff *skb)
2178{
2179 struct packet_type *ptype;
2180
2181 if (list_empty(&ptype_all))
2182 return;
2183
2184 skb_reset_network_header(skb);
2185 skb_reset_transport_header(skb);
2186 skb->mac_len = skb->network_header - skb->mac_header;
2187
2188 rcu_read_lock();
2189 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2190 if (!ptype->dev || ptype->dev == skb->dev)
2191 deliver_skb(skb, ptype, skb->dev);
2192 }
2193 rcu_read_unlock();
2194}
2195
3b582cc1
SH
2196/**
2197 * netif_receive_skb - process receive buffer from network
2198 * @skb: buffer to process
2199 *
2200 * netif_receive_skb() is the main receive data processing function.
2201 * It always succeeds. The buffer may be dropped during processing
2202 * for congestion control or by the protocol layers.
2203 *
2204 * This function may only be called from softirq context and interrupts
2205 * should be enabled.
2206 *
2207 * Return values (usually ignored):
2208 * NET_RX_SUCCESS: no congestion
2209 * NET_RX_DROP: packet was dropped
2210 */
1da177e4
LT
2211int netif_receive_skb(struct sk_buff *skb)
2212{
2213 struct packet_type *ptype, *pt_prev;
f2ccd8fa 2214 struct net_device *orig_dev;
0d7a3681 2215 struct net_device *null_or_orig;
1da177e4 2216 int ret = NET_RX_DROP;
252e3346 2217 __be16 type;
1da177e4
LT
2218
2219 /* if we've gotten here through NAPI, check netpoll */
bea3348e 2220 if (netpoll_receive_skb(skb))
1da177e4
LT
2221 return NET_RX_DROP;
2222
b7aa0bf7 2223 if (!skb->tstamp.tv64)
a61bbcf2 2224 net_timestamp(skb);
1da177e4 2225
c01003c2
PM
2226 if (!skb->iif)
2227 skb->iif = skb->dev->ifindex;
86e65da9 2228
0d7a3681 2229 null_or_orig = NULL;
cc9bd5ce
JE
2230 orig_dev = skb->dev;
2231 if (orig_dev->master) {
0d7a3681
JE
2232 if (skb_bond_should_drop(skb))
2233 null_or_orig = orig_dev; /* deliver only exact match */
2234 else
2235 skb->dev = orig_dev->master;
cc9bd5ce 2236 }
8f903c70 2237
1da177e4
LT
2238 __get_cpu_var(netdev_rx_stat).total++;
2239
c1d2bbe1 2240 skb_reset_network_header(skb);
badff6d0 2241 skb_reset_transport_header(skb);
b0e380b1 2242 skb->mac_len = skb->network_header - skb->mac_header;
1da177e4
LT
2243
2244 pt_prev = NULL;
2245
2246 rcu_read_lock();
2247
b9f75f45
EB
2248 /* Don't receive packets in an exiting network namespace */
2249 if (!net_alive(dev_net(skb->dev)))
2250 goto out;
2251
1da177e4
LT
2252#ifdef CONFIG_NET_CLS_ACT
2253 if (skb->tc_verd & TC_NCLS) {
2254 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
2255 goto ncls;
2256 }
2257#endif
2258
2259 list_for_each_entry_rcu(ptype, &ptype_all, list) {
f982307f
JE
2260 if (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2261 ptype->dev == orig_dev) {
4ec93edb 2262 if (pt_prev)
f2ccd8fa 2263 ret = deliver_skb(skb, pt_prev, orig_dev);
1da177e4
LT
2264 pt_prev = ptype;
2265 }
2266 }
2267
2268#ifdef CONFIG_NET_CLS_ACT
f697c3e8
HX
2269 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
2270 if (!skb)
1da177e4 2271 goto out;
1da177e4
LT
2272ncls:
2273#endif
2274
6229e362 2275 skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
b863ceb7
PM
2276 if (!skb)
2277 goto out;
2278 skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
6229e362 2279 if (!skb)
1da177e4
LT
2280 goto out;
2281
2282 type = skb->protocol;
82d8a867
PE
2283 list_for_each_entry_rcu(ptype,
2284 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
1da177e4 2285 if (ptype->type == type &&
f982307f
JE
2286 (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2287 ptype->dev == orig_dev)) {
4ec93edb 2288 if (pt_prev)
f2ccd8fa 2289 ret = deliver_skb(skb, pt_prev, orig_dev);
1da177e4
LT
2290 pt_prev = ptype;
2291 }
2292 }
2293
2294 if (pt_prev) {
f2ccd8fa 2295 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1da177e4
LT
2296 } else {
2297 kfree_skb(skb);
2298 /* Jamal, now you will not able to escape explaining
2299 * me how you were going to use this. :-)
2300 */
2301 ret = NET_RX_DROP;
2302 }
2303
2304out:
2305 rcu_read_unlock();
2306 return ret;
2307}
2308
6e583ce5
SH
2309/* Network device is going away, flush any packets still pending */
2310static void flush_backlog(void *arg)
2311{
2312 struct net_device *dev = arg;
2313 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2314 struct sk_buff *skb, *tmp;
2315
2316 skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp)
2317 if (skb->dev == dev) {
2318 __skb_unlink(skb, &queue->input_pkt_queue);
2319 kfree_skb(skb);
2320 }
2321}
2322
bea3348e 2323static int process_backlog(struct napi_struct *napi, int quota)
1da177e4
LT
2324{
2325 int work = 0;
1da177e4
LT
2326 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2327 unsigned long start_time = jiffies;
2328
bea3348e
SH
2329 napi->weight = weight_p;
2330 do {
1da177e4 2331 struct sk_buff *skb;
1da177e4
LT
2332
2333 local_irq_disable();
2334 skb = __skb_dequeue(&queue->input_pkt_queue);
bea3348e
SH
2335 if (!skb) {
2336 __napi_complete(napi);
2337 local_irq_enable();
2338 break;
2339 }
1da177e4
LT
2340 local_irq_enable();
2341
1da177e4 2342 netif_receive_skb(skb);
bea3348e 2343 } while (++work < quota && jiffies == start_time);
1da177e4 2344
bea3348e
SH
2345 return work;
2346}
1da177e4 2347
bea3348e
SH
2348/**
2349 * __napi_schedule - schedule for receive
c4ea43c5 2350 * @n: entry to schedule
bea3348e
SH
2351 *
2352 * The entry's receive function will be scheduled to run
2353 */
b5606c2d 2354void __napi_schedule(struct napi_struct *n)
bea3348e
SH
2355{
2356 unsigned long flags;
1da177e4 2357
bea3348e
SH
2358 local_irq_save(flags);
2359 list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list);
2360 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2361 local_irq_restore(flags);
1da177e4 2362}
bea3348e
SH
2363EXPORT_SYMBOL(__napi_schedule);
2364
1da177e4
LT
2365
2366static void net_rx_action(struct softirq_action *h)
2367{
bea3348e 2368 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
1da177e4 2369 unsigned long start_time = jiffies;
51b0bded 2370 int budget = netdev_budget;
53fb95d3
MM
2371 void *have;
2372
1da177e4
LT
2373 local_irq_disable();
2374
bea3348e
SH
2375 while (!list_empty(list)) {
2376 struct napi_struct *n;
2377 int work, weight;
1da177e4 2378
bea3348e
SH
2379 /* If softirq window is exhuasted then punt.
2380 *
2381 * Note that this is a slight policy change from the
2382 * previous NAPI code, which would allow up to 2
2383 * jiffies to pass before breaking out. The test
2384 * used to be "jiffies - start_time > 1".
2385 */
2386 if (unlikely(budget <= 0 || jiffies != start_time))
1da177e4
LT
2387 goto softnet_break;
2388
2389 local_irq_enable();
2390
bea3348e
SH
2391 /* Even though interrupts have been re-enabled, this
2392 * access is safe because interrupts can only add new
2393 * entries to the tail of this list, and only ->poll()
2394 * calls can remove this head entry from the list.
2395 */
2396 n = list_entry(list->next, struct napi_struct, poll_list);
1da177e4 2397
bea3348e
SH
2398 have = netpoll_poll_lock(n);
2399
2400 weight = n->weight;
2401
0a7606c1
DM
2402 /* This NAPI_STATE_SCHED test is for avoiding a race
2403 * with netpoll's poll_napi(). Only the entity which
2404 * obtains the lock and sees NAPI_STATE_SCHED set will
2405 * actually make the ->poll() call. Therefore we avoid
2406 * accidently calling ->poll() when NAPI is not scheduled.
2407 */
2408 work = 0;
2409 if (test_bit(NAPI_STATE_SCHED, &n->state))
2410 work = n->poll(n, weight);
bea3348e
SH
2411
2412 WARN_ON_ONCE(work > weight);
2413
2414 budget -= work;
2415
2416 local_irq_disable();
2417
2418 /* Drivers must not modify the NAPI state if they
2419 * consume the entire weight. In such cases this code
2420 * still "owns" the NAPI instance and therefore can
2421 * move the instance around on the list at-will.
2422 */
fed17f30
DM
2423 if (unlikely(work == weight)) {
2424 if (unlikely(napi_disable_pending(n)))
2425 __napi_complete(n);
2426 else
2427 list_move_tail(&n->poll_list, list);
2428 }
bea3348e
SH
2429
2430 netpoll_poll_unlock(have);
1da177e4
LT
2431 }
2432out:
515e06c4 2433 local_irq_enable();
bea3348e 2434
db217334
CL
2435#ifdef CONFIG_NET_DMA
2436 /*
2437 * There may not be any more sk_buffs coming right now, so push
2438 * any pending DMA copies to hardware
2439 */
d379b01e
DW
2440 if (!cpus_empty(net_dma.channel_mask)) {
2441 int chan_idx;
0e12f848 2442 for_each_cpu_mask_nr(chan_idx, net_dma.channel_mask) {
d379b01e
DW
2443 struct dma_chan *chan = net_dma.channels[chan_idx];
2444 if (chan)
2445 dma_async_memcpy_issue_pending(chan);
2446 }
db217334
CL
2447 }
2448#endif
bea3348e 2449
1da177e4
LT
2450 return;
2451
2452softnet_break:
2453 __get_cpu_var(netdev_rx_stat).time_squeeze++;
2454 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2455 goto out;
2456}
2457
2458static gifconf_func_t * gifconf_list [NPROTO];
2459
2460/**
2461 * register_gifconf - register a SIOCGIF handler
2462 * @family: Address family
2463 * @gifconf: Function handler
2464 *
2465 * Register protocol dependent address dumping routines. The handler
2466 * that is passed must not be freed or reused until it has been replaced
2467 * by another handler.
2468 */
2469int register_gifconf(unsigned int family, gifconf_func_t * gifconf)
2470{
2471 if (family >= NPROTO)
2472 return -EINVAL;
2473 gifconf_list[family] = gifconf;
2474 return 0;
2475}
2476
2477
2478/*
2479 * Map an interface index to its name (SIOCGIFNAME)
2480 */
2481
2482/*
2483 * We need this ioctl for efficient implementation of the
2484 * if_indextoname() function required by the IPv6 API. Without
2485 * it, we would have to search all the interfaces to find a
2486 * match. --pb
2487 */
2488
881d966b 2489static int dev_ifname(struct net *net, struct ifreq __user *arg)
1da177e4
LT
2490{
2491 struct net_device *dev;
2492 struct ifreq ifr;
2493
2494 /*
2495 * Fetch the caller's info block.
2496 */
2497
2498 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
2499 return -EFAULT;
2500
2501 read_lock(&dev_base_lock);
881d966b 2502 dev = __dev_get_by_index(net, ifr.ifr_ifindex);
1da177e4
LT
2503 if (!dev) {
2504 read_unlock(&dev_base_lock);
2505 return -ENODEV;
2506 }
2507
2508 strcpy(ifr.ifr_name, dev->name);
2509 read_unlock(&dev_base_lock);
2510
2511 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
2512 return -EFAULT;
2513 return 0;
2514}
2515
2516/*
2517 * Perform a SIOCGIFCONF call. This structure will change
2518 * size eventually, and there is nothing I can do about it.
2519 * Thus we will need a 'compatibility mode'.
2520 */
2521
881d966b 2522static int dev_ifconf(struct net *net, char __user *arg)
1da177e4
LT
2523{
2524 struct ifconf ifc;
2525 struct net_device *dev;
2526 char __user *pos;
2527 int len;
2528 int total;
2529 int i;
2530
2531 /*
2532 * Fetch the caller's info block.
2533 */
2534
2535 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
2536 return -EFAULT;
2537
2538 pos = ifc.ifc_buf;
2539 len = ifc.ifc_len;
2540
2541 /*
2542 * Loop over the interfaces, and write an info block for each.
2543 */
2544
2545 total = 0;
881d966b 2546 for_each_netdev(net, dev) {
1da177e4
LT
2547 for (i = 0; i < NPROTO; i++) {
2548 if (gifconf_list[i]) {
2549 int done;
2550 if (!pos)
2551 done = gifconf_list[i](dev, NULL, 0);
2552 else
2553 done = gifconf_list[i](dev, pos + total,
2554 len - total);
2555 if (done < 0)
2556 return -EFAULT;
2557 total += done;
2558 }
2559 }
4ec93edb 2560 }
1da177e4
LT
2561
2562 /*
2563 * All done. Write the updated control block back to the caller.
2564 */
2565 ifc.ifc_len = total;
2566
2567 /*
2568 * Both BSD and Solaris return 0 here, so we do too.
2569 */
2570 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
2571}
2572
2573#ifdef CONFIG_PROC_FS
2574/*
2575 * This is invoked by the /proc filesystem handler to display a device
2576 * in detail.
2577 */
7562f876 2578void *dev_seq_start(struct seq_file *seq, loff_t *pos)
9a429c49 2579 __acquires(dev_base_lock)
1da177e4 2580{
e372c414 2581 struct net *net = seq_file_net(seq);
7562f876 2582 loff_t off;
1da177e4 2583 struct net_device *dev;
1da177e4 2584
7562f876
PE
2585 read_lock(&dev_base_lock);
2586 if (!*pos)
2587 return SEQ_START_TOKEN;
1da177e4 2588
7562f876 2589 off = 1;
881d966b 2590 for_each_netdev(net, dev)
7562f876
PE
2591 if (off++ == *pos)
2592 return dev;
1da177e4 2593
7562f876 2594 return NULL;
1da177e4
LT
2595}
2596
2597void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2598{
e372c414 2599 struct net *net = seq_file_net(seq);
1da177e4 2600 ++*pos;
7562f876 2601 return v == SEQ_START_TOKEN ?
881d966b 2602 first_net_device(net) : next_net_device((struct net_device *)v);
1da177e4
LT
2603}
2604
2605void dev_seq_stop(struct seq_file *seq, void *v)
9a429c49 2606 __releases(dev_base_lock)
1da177e4
LT
2607{
2608 read_unlock(&dev_base_lock);
2609}
2610
2611static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
2612{
c45d286e 2613 struct net_device_stats *stats = dev->get_stats(dev);
1da177e4 2614
5a1b5898
RR
2615 seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
2616 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
2617 dev->name, stats->rx_bytes, stats->rx_packets,
2618 stats->rx_errors,
2619 stats->rx_dropped + stats->rx_missed_errors,
2620 stats->rx_fifo_errors,
2621 stats->rx_length_errors + stats->rx_over_errors +
2622 stats->rx_crc_errors + stats->rx_frame_errors,
2623 stats->rx_compressed, stats->multicast,
2624 stats->tx_bytes, stats->tx_packets,
2625 stats->tx_errors, stats->tx_dropped,
2626 stats->tx_fifo_errors, stats->collisions,
2627 stats->tx_carrier_errors +
2628 stats->tx_aborted_errors +
2629 stats->tx_window_errors +
2630 stats->tx_heartbeat_errors,
2631 stats->tx_compressed);
1da177e4
LT
2632}
2633
2634/*
2635 * Called from the PROCfs module. This now uses the new arbitrary sized
2636 * /proc/net interface to create /proc/net/dev
2637 */
2638static int dev_seq_show(struct seq_file *seq, void *v)
2639{
2640 if (v == SEQ_START_TOKEN)
2641 seq_puts(seq, "Inter-| Receive "
2642 " | Transmit\n"
2643 " face |bytes packets errs drop fifo frame "
2644 "compressed multicast|bytes packets errs "
2645 "drop fifo colls carrier compressed\n");
2646 else
2647 dev_seq_printf_stats(seq, v);
2648 return 0;
2649}
2650
2651static struct netif_rx_stats *softnet_get_online(loff_t *pos)
2652{
2653 struct netif_rx_stats *rc = NULL;
2654
0c0b0aca 2655 while (*pos < nr_cpu_ids)
4ec93edb 2656 if (cpu_online(*pos)) {
1da177e4
LT
2657 rc = &per_cpu(netdev_rx_stat, *pos);
2658 break;
2659 } else
2660 ++*pos;
2661 return rc;
2662}
2663
2664static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
2665{
2666 return softnet_get_online(pos);
2667}
2668
2669static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2670{
2671 ++*pos;
2672 return softnet_get_online(pos);
2673}
2674
2675static void softnet_seq_stop(struct seq_file *seq, void *v)
2676{
2677}
2678
2679static int softnet_seq_show(struct seq_file *seq, void *v)
2680{
2681 struct netif_rx_stats *s = v;
2682
2683 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
31aa02c5 2684 s->total, s->dropped, s->time_squeeze, 0,
c1ebcdb8
SH
2685 0, 0, 0, 0, /* was fastroute */
2686 s->cpu_collision );
1da177e4
LT
2687 return 0;
2688}
2689
f690808e 2690static const struct seq_operations dev_seq_ops = {
1da177e4
LT
2691 .start = dev_seq_start,
2692 .next = dev_seq_next,
2693 .stop = dev_seq_stop,
2694 .show = dev_seq_show,
2695};
2696
2697static int dev_seq_open(struct inode *inode, struct file *file)
2698{
e372c414
DL
2699 return seq_open_net(inode, file, &dev_seq_ops,
2700 sizeof(struct seq_net_private));
1da177e4
LT
2701}
2702
9a32144e 2703static const struct file_operations dev_seq_fops = {
1da177e4
LT
2704 .owner = THIS_MODULE,
2705 .open = dev_seq_open,
2706 .read = seq_read,
2707 .llseek = seq_lseek,
e372c414 2708 .release = seq_release_net,
1da177e4
LT
2709};
2710
f690808e 2711static const struct seq_operations softnet_seq_ops = {
1da177e4
LT
2712 .start = softnet_seq_start,
2713 .next = softnet_seq_next,
2714 .stop = softnet_seq_stop,
2715 .show = softnet_seq_show,
2716};
2717
2718static int softnet_seq_open(struct inode *inode, struct file *file)
2719{
2720 return seq_open(file, &softnet_seq_ops);
2721}
2722
9a32144e 2723static const struct file_operations softnet_seq_fops = {
1da177e4
LT
2724 .owner = THIS_MODULE,
2725 .open = softnet_seq_open,
2726 .read = seq_read,
2727 .llseek = seq_lseek,
2728 .release = seq_release,
2729};
2730
0e1256ff
SH
2731static void *ptype_get_idx(loff_t pos)
2732{
2733 struct packet_type *pt = NULL;
2734 loff_t i = 0;
2735 int t;
2736
2737 list_for_each_entry_rcu(pt, &ptype_all, list) {
2738 if (i == pos)
2739 return pt;
2740 ++i;
2741 }
2742
82d8a867 2743 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
0e1256ff
SH
2744 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
2745 if (i == pos)
2746 return pt;
2747 ++i;
2748 }
2749 }
2750 return NULL;
2751}
2752
2753static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
72348a42 2754 __acquires(RCU)
0e1256ff
SH
2755{
2756 rcu_read_lock();
2757 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
2758}
2759
2760static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2761{
2762 struct packet_type *pt;
2763 struct list_head *nxt;
2764 int hash;
2765
2766 ++*pos;
2767 if (v == SEQ_START_TOKEN)
2768 return ptype_get_idx(0);
2769
2770 pt = v;
2771 nxt = pt->list.next;
2772 if (pt->type == htons(ETH_P_ALL)) {
2773 if (nxt != &ptype_all)
2774 goto found;
2775 hash = 0;
2776 nxt = ptype_base[0].next;
2777 } else
82d8a867 2778 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
0e1256ff
SH
2779
2780 while (nxt == &ptype_base[hash]) {
82d8a867 2781 if (++hash >= PTYPE_HASH_SIZE)
0e1256ff
SH
2782 return NULL;
2783 nxt = ptype_base[hash].next;
2784 }
2785found:
2786 return list_entry(nxt, struct packet_type, list);
2787}
2788
2789static void ptype_seq_stop(struct seq_file *seq, void *v)
72348a42 2790 __releases(RCU)
0e1256ff
SH
2791{
2792 rcu_read_unlock();
2793}
2794
2795static void ptype_seq_decode(struct seq_file *seq, void *sym)
2796{
2797#ifdef CONFIG_KALLSYMS
2798 unsigned long offset = 0, symsize;
2799 const char *symname;
2800 char *modname;
2801 char namebuf[128];
2802
2803 symname = kallsyms_lookup((unsigned long)sym, &symsize, &offset,
2804 &modname, namebuf);
2805
2806 if (symname) {
2807 char *delim = ":";
2808
2809 if (!modname)
2810 modname = delim = "";
2811 seq_printf(seq, "%s%s%s%s+0x%lx", delim, modname, delim,
2812 symname, offset);
2813 return;
2814 }
2815#endif
2816
2817 seq_printf(seq, "[%p]", sym);
2818}
2819
2820static int ptype_seq_show(struct seq_file *seq, void *v)
2821{
2822 struct packet_type *pt = v;
2823
2824 if (v == SEQ_START_TOKEN)
2825 seq_puts(seq, "Type Device Function\n");
c346dca1 2826 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
0e1256ff
SH
2827 if (pt->type == htons(ETH_P_ALL))
2828 seq_puts(seq, "ALL ");
2829 else
2830 seq_printf(seq, "%04x", ntohs(pt->type));
2831
2832 seq_printf(seq, " %-8s ",
2833 pt->dev ? pt->dev->name : "");
2834 ptype_seq_decode(seq, pt->func);
2835 seq_putc(seq, '\n');
2836 }
2837
2838 return 0;
2839}
2840
2841static const struct seq_operations ptype_seq_ops = {
2842 .start = ptype_seq_start,
2843 .next = ptype_seq_next,
2844 .stop = ptype_seq_stop,
2845 .show = ptype_seq_show,
2846};
2847
2848static int ptype_seq_open(struct inode *inode, struct file *file)
2849{
2feb27db
PE
2850 return seq_open_net(inode, file, &ptype_seq_ops,
2851 sizeof(struct seq_net_private));
0e1256ff
SH
2852}
2853
2854static const struct file_operations ptype_seq_fops = {
2855 .owner = THIS_MODULE,
2856 .open = ptype_seq_open,
2857 .read = seq_read,
2858 .llseek = seq_lseek,
2feb27db 2859 .release = seq_release_net,
0e1256ff
SH
2860};
2861
2862
4665079c 2863static int __net_init dev_proc_net_init(struct net *net)
1da177e4
LT
2864{
2865 int rc = -ENOMEM;
2866
881d966b 2867 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
1da177e4 2868 goto out;
881d966b 2869 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
1da177e4 2870 goto out_dev;
881d966b 2871 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
457c4cbc 2872 goto out_softnet;
0e1256ff 2873
881d966b 2874 if (wext_proc_init(net))
457c4cbc 2875 goto out_ptype;
1da177e4
LT
2876 rc = 0;
2877out:
2878 return rc;
457c4cbc 2879out_ptype:
881d966b 2880 proc_net_remove(net, "ptype");
1da177e4 2881out_softnet:
881d966b 2882 proc_net_remove(net, "softnet_stat");
1da177e4 2883out_dev:
881d966b 2884 proc_net_remove(net, "dev");
1da177e4
LT
2885 goto out;
2886}
881d966b 2887
4665079c 2888static void __net_exit dev_proc_net_exit(struct net *net)
881d966b
EB
2889{
2890 wext_proc_exit(net);
2891
2892 proc_net_remove(net, "ptype");
2893 proc_net_remove(net, "softnet_stat");
2894 proc_net_remove(net, "dev");
2895}
2896
022cbae6 2897static struct pernet_operations __net_initdata dev_proc_ops = {
881d966b
EB
2898 .init = dev_proc_net_init,
2899 .exit = dev_proc_net_exit,
2900};
2901
2902static int __init dev_proc_init(void)
2903{
2904 return register_pernet_subsys(&dev_proc_ops);
2905}
1da177e4
LT
2906#else
2907#define dev_proc_init() 0
2908#endif /* CONFIG_PROC_FS */
2909
2910
2911/**
2912 * netdev_set_master - set up master/slave pair
2913 * @slave: slave device
2914 * @master: new master device
2915 *
2916 * Changes the master device of the slave. Pass %NULL to break the
2917 * bonding. The caller must hold the RTNL semaphore. On a failure
2918 * a negative errno code is returned. On success the reference counts
2919 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
2920 * function returns zero.
2921 */
2922int netdev_set_master(struct net_device *slave, struct net_device *master)
2923{
2924 struct net_device *old = slave->master;
2925
2926 ASSERT_RTNL();
2927
2928 if (master) {
2929 if (old)
2930 return -EBUSY;
2931 dev_hold(master);
2932 }
2933
2934 slave->master = master;
4ec93edb 2935
1da177e4
LT
2936 synchronize_net();
2937
2938 if (old)
2939 dev_put(old);
2940
2941 if (master)
2942 slave->flags |= IFF_SLAVE;
2943 else
2944 slave->flags &= ~IFF_SLAVE;
2945
2946 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
2947 return 0;
2948}
2949
dad9b335 2950static int __dev_set_promiscuity(struct net_device *dev, int inc)
1da177e4
LT
2951{
2952 unsigned short old_flags = dev->flags;
2953
24023451
PM
2954 ASSERT_RTNL();
2955
dad9b335
WC
2956 dev->flags |= IFF_PROMISC;
2957 dev->promiscuity += inc;
2958 if (dev->promiscuity == 0) {
2959 /*
2960 * Avoid overflow.
2961 * If inc causes overflow, untouch promisc and return error.
2962 */
2963 if (inc < 0)
2964 dev->flags &= ~IFF_PROMISC;
2965 else {
2966 dev->promiscuity -= inc;
2967 printk(KERN_WARNING "%s: promiscuity touches roof, "
2968 "set promiscuity failed, promiscuity feature "
2969 "of device might be broken.\n", dev->name);
2970 return -EOVERFLOW;
2971 }
2972 }
52609c0b 2973 if (dev->flags != old_flags) {
1da177e4
LT
2974 printk(KERN_INFO "device %s %s promiscuous mode\n",
2975 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
4ec93edb 2976 "left");
7759db82
KHK
2977 if (audit_enabled)
2978 audit_log(current->audit_context, GFP_ATOMIC,
2979 AUDIT_ANOM_PROMISCUOUS,
2980 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
2981 dev->name, (dev->flags & IFF_PROMISC),
2982 (old_flags & IFF_PROMISC),
2983 audit_get_loginuid(current),
2984 current->uid, current->gid,
2985 audit_get_sessionid(current));
24023451
PM
2986
2987 if (dev->change_rx_flags)
2988 dev->change_rx_flags(dev, IFF_PROMISC);
1da177e4 2989 }
dad9b335 2990 return 0;
1da177e4
LT
2991}
2992
4417da66
PM
2993/**
2994 * dev_set_promiscuity - update promiscuity count on a device
2995 * @dev: device
2996 * @inc: modifier
2997 *
2998 * Add or remove promiscuity from a device. While the count in the device
2999 * remains above zero the interface remains promiscuous. Once it hits zero
3000 * the device reverts back to normal filtering operation. A negative inc
3001 * value is used to drop promiscuity on the device.
dad9b335 3002 * Return 0 if successful or a negative errno code on error.
4417da66 3003 */
dad9b335 3004int dev_set_promiscuity(struct net_device *dev, int inc)
4417da66
PM
3005{
3006 unsigned short old_flags = dev->flags;
dad9b335 3007 int err;
4417da66 3008
dad9b335 3009 err = __dev_set_promiscuity(dev, inc);
4b5a698e 3010 if (err < 0)
dad9b335 3011 return err;
4417da66
PM
3012 if (dev->flags != old_flags)
3013 dev_set_rx_mode(dev);
dad9b335 3014 return err;
4417da66
PM
3015}
3016
1da177e4
LT
3017/**
3018 * dev_set_allmulti - update allmulti count on a device
3019 * @dev: device
3020 * @inc: modifier
3021 *
3022 * Add or remove reception of all multicast frames to a device. While the
3023 * count in the device remains above zero the interface remains listening
3024 * to all interfaces. Once it hits zero the device reverts back to normal
3025 * filtering operation. A negative @inc value is used to drop the counter
3026 * when releasing a resource needing all multicasts.
dad9b335 3027 * Return 0 if successful or a negative errno code on error.
1da177e4
LT
3028 */
3029
dad9b335 3030int dev_set_allmulti(struct net_device *dev, int inc)
1da177e4
LT
3031{
3032 unsigned short old_flags = dev->flags;
3033
24023451
PM
3034 ASSERT_RTNL();
3035
1da177e4 3036 dev->flags |= IFF_ALLMULTI;
dad9b335
WC
3037 dev->allmulti += inc;
3038 if (dev->allmulti == 0) {
3039 /*
3040 * Avoid overflow.
3041 * If inc causes overflow, untouch allmulti and return error.
3042 */
3043 if (inc < 0)
3044 dev->flags &= ~IFF_ALLMULTI;
3045 else {
3046 dev->allmulti -= inc;
3047 printk(KERN_WARNING "%s: allmulti touches roof, "
3048 "set allmulti failed, allmulti feature of "
3049 "device might be broken.\n", dev->name);
3050 return -EOVERFLOW;
3051 }
3052 }
24023451
PM
3053 if (dev->flags ^ old_flags) {
3054 if (dev->change_rx_flags)
3055 dev->change_rx_flags(dev, IFF_ALLMULTI);
4417da66 3056 dev_set_rx_mode(dev);
24023451 3057 }
dad9b335 3058 return 0;
4417da66
PM
3059}
3060
3061/*
3062 * Upload unicast and multicast address lists to device and
3063 * configure RX filtering. When the device doesn't support unicast
53ccaae1 3064 * filtering it is put in promiscuous mode while unicast addresses
4417da66
PM
3065 * are present.
3066 */
3067void __dev_set_rx_mode(struct net_device *dev)
3068{
3069 /* dev_open will call this function so the list will stay sane. */
3070 if (!(dev->flags&IFF_UP))
3071 return;
3072
3073 if (!netif_device_present(dev))
40b77c94 3074 return;
4417da66
PM
3075
3076 if (dev->set_rx_mode)
3077 dev->set_rx_mode(dev);
3078 else {
3079 /* Unicast addresses changes may only happen under the rtnl,
3080 * therefore calling __dev_set_promiscuity here is safe.
3081 */
3082 if (dev->uc_count > 0 && !dev->uc_promisc) {
3083 __dev_set_promiscuity(dev, 1);
3084 dev->uc_promisc = 1;
3085 } else if (dev->uc_count == 0 && dev->uc_promisc) {
3086 __dev_set_promiscuity(dev, -1);
3087 dev->uc_promisc = 0;
3088 }
3089
3090 if (dev->set_multicast_list)
3091 dev->set_multicast_list(dev);
3092 }
3093}
3094
3095void dev_set_rx_mode(struct net_device *dev)
3096{
b9e40857 3097 netif_addr_lock_bh(dev);
4417da66 3098 __dev_set_rx_mode(dev);
b9e40857 3099 netif_addr_unlock_bh(dev);
1da177e4
LT
3100}
3101
61cbc2fc
PM
3102int __dev_addr_delete(struct dev_addr_list **list, int *count,
3103 void *addr, int alen, int glbl)
bf742482
PM
3104{
3105 struct dev_addr_list *da;
3106
3107 for (; (da = *list) != NULL; list = &da->next) {
3108 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3109 alen == da->da_addrlen) {
3110 if (glbl) {
3111 int old_glbl = da->da_gusers;
3112 da->da_gusers = 0;
3113 if (old_glbl == 0)
3114 break;
3115 }
3116 if (--da->da_users)
3117 return 0;
3118
3119 *list = da->next;
3120 kfree(da);
61cbc2fc 3121 (*count)--;
bf742482
PM
3122 return 0;
3123 }
3124 }
3125 return -ENOENT;
3126}
3127
61cbc2fc
PM
3128int __dev_addr_add(struct dev_addr_list **list, int *count,
3129 void *addr, int alen, int glbl)
bf742482
PM
3130{
3131 struct dev_addr_list *da;
3132
3133 for (da = *list; da != NULL; da = da->next) {
3134 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
3135 da->da_addrlen == alen) {
3136 if (glbl) {
3137 int old_glbl = da->da_gusers;
3138 da->da_gusers = 1;
3139 if (old_glbl)
3140 return 0;
3141 }
3142 da->da_users++;
3143 return 0;
3144 }
3145 }
3146
12aa343a 3147 da = kzalloc(sizeof(*da), GFP_ATOMIC);
bf742482
PM
3148 if (da == NULL)
3149 return -ENOMEM;
3150 memcpy(da->da_addr, addr, alen);
3151 da->da_addrlen = alen;
3152 da->da_users = 1;
3153 da->da_gusers = glbl ? 1 : 0;
3154 da->next = *list;
3155 *list = da;
61cbc2fc 3156 (*count)++;
bf742482
PM
3157 return 0;
3158}
3159
4417da66
PM
3160/**
3161 * dev_unicast_delete - Release secondary unicast address.
3162 * @dev: device
0ed72ec4
RD
3163 * @addr: address to delete
3164 * @alen: length of @addr
4417da66
PM
3165 *
3166 * Release reference to a secondary unicast address and remove it
0ed72ec4 3167 * from the device if the reference count drops to zero.
4417da66
PM
3168 *
3169 * The caller must hold the rtnl_mutex.
3170 */
3171int dev_unicast_delete(struct net_device *dev, void *addr, int alen)
3172{
3173 int err;
3174
3175 ASSERT_RTNL();
3176
b9e40857 3177 netif_addr_lock_bh(dev);
61cbc2fc
PM
3178 err = __dev_addr_delete(&dev->uc_list, &dev->uc_count, addr, alen, 0);
3179 if (!err)
4417da66 3180 __dev_set_rx_mode(dev);
b9e40857 3181 netif_addr_unlock_bh(dev);
4417da66
PM
3182 return err;
3183}
3184EXPORT_SYMBOL(dev_unicast_delete);
3185
3186/**
3187 * dev_unicast_add - add a secondary unicast address
3188 * @dev: device
5dbaec5d 3189 * @addr: address to add
0ed72ec4 3190 * @alen: length of @addr
4417da66
PM
3191 *
3192 * Add a secondary unicast address to the device or increase
3193 * the reference count if it already exists.
3194 *
3195 * The caller must hold the rtnl_mutex.
3196 */
3197int dev_unicast_add(struct net_device *dev, void *addr, int alen)
3198{
3199 int err;
3200
3201 ASSERT_RTNL();
3202
b9e40857 3203 netif_addr_lock_bh(dev);
61cbc2fc
PM
3204 err = __dev_addr_add(&dev->uc_list, &dev->uc_count, addr, alen, 0);
3205 if (!err)
4417da66 3206 __dev_set_rx_mode(dev);
b9e40857 3207 netif_addr_unlock_bh(dev);
4417da66
PM
3208 return err;
3209}
3210EXPORT_SYMBOL(dev_unicast_add);
3211
e83a2ea8
CL
3212int __dev_addr_sync(struct dev_addr_list **to, int *to_count,
3213 struct dev_addr_list **from, int *from_count)
3214{
3215 struct dev_addr_list *da, *next;
3216 int err = 0;
3217
3218 da = *from;
3219 while (da != NULL) {
3220 next = da->next;
3221 if (!da->da_synced) {
3222 err = __dev_addr_add(to, to_count,
3223 da->da_addr, da->da_addrlen, 0);
3224 if (err < 0)
3225 break;
3226 da->da_synced = 1;
3227 da->da_users++;
3228 } else if (da->da_users == 1) {
3229 __dev_addr_delete(to, to_count,
3230 da->da_addr, da->da_addrlen, 0);
3231 __dev_addr_delete(from, from_count,
3232 da->da_addr, da->da_addrlen, 0);
3233 }
3234 da = next;
3235 }
3236 return err;
3237}
3238
3239void __dev_addr_unsync(struct dev_addr_list **to, int *to_count,
3240 struct dev_addr_list **from, int *from_count)
3241{
3242 struct dev_addr_list *da, *next;
3243
3244 da = *from;
3245 while (da != NULL) {
3246 next = da->next;
3247 if (da->da_synced) {
3248 __dev_addr_delete(to, to_count,
3249 da->da_addr, da->da_addrlen, 0);
3250 da->da_synced = 0;
3251 __dev_addr_delete(from, from_count,
3252 da->da_addr, da->da_addrlen, 0);
3253 }
3254 da = next;
3255 }
3256}
3257
3258/**
3259 * dev_unicast_sync - Synchronize device's unicast list to another device
3260 * @to: destination device
3261 * @from: source device
3262 *
3263 * Add newly added addresses to the destination device and release
3264 * addresses that have no users left. The source device must be
3265 * locked by netif_tx_lock_bh.
3266 *
3267 * This function is intended to be called from the dev->set_rx_mode
3268 * function of layered software devices.
3269 */
3270int dev_unicast_sync(struct net_device *to, struct net_device *from)
3271{
3272 int err = 0;
3273
b9e40857 3274 netif_addr_lock_bh(to);
e83a2ea8
CL
3275 err = __dev_addr_sync(&to->uc_list, &to->uc_count,
3276 &from->uc_list, &from->uc_count);
3277 if (!err)
3278 __dev_set_rx_mode(to);
b9e40857 3279 netif_addr_unlock_bh(to);
e83a2ea8
CL
3280 return err;
3281}
3282EXPORT_SYMBOL(dev_unicast_sync);
3283
3284/**
bc2cda1e 3285 * dev_unicast_unsync - Remove synchronized addresses from the destination device
e83a2ea8
CL
3286 * @to: destination device
3287 * @from: source device
3288 *
3289 * Remove all addresses that were added to the destination device by
3290 * dev_unicast_sync(). This function is intended to be called from the
3291 * dev->stop function of layered software devices.
3292 */
3293void dev_unicast_unsync(struct net_device *to, struct net_device *from)
3294{
b9e40857 3295 netif_addr_lock_bh(from);
e308a5d8 3296 netif_addr_lock(to);
e83a2ea8
CL
3297
3298 __dev_addr_unsync(&to->uc_list, &to->uc_count,
3299 &from->uc_list, &from->uc_count);
3300 __dev_set_rx_mode(to);
3301
e308a5d8 3302 netif_addr_unlock(to);
b9e40857 3303 netif_addr_unlock_bh(from);
e83a2ea8
CL
3304}
3305EXPORT_SYMBOL(dev_unicast_unsync);
3306
12972621
DC
3307static void __dev_addr_discard(struct dev_addr_list **list)
3308{
3309 struct dev_addr_list *tmp;
3310
3311 while (*list != NULL) {
3312 tmp = *list;
3313 *list = tmp->next;
3314 if (tmp->da_users > tmp->da_gusers)
3315 printk("__dev_addr_discard: address leakage! "
3316 "da_users=%d\n", tmp->da_users);
3317 kfree(tmp);
3318 }
3319}
3320
26cc2522 3321static void dev_addr_discard(struct net_device *dev)
4417da66 3322{
b9e40857 3323 netif_addr_lock_bh(dev);
26cc2522 3324
4417da66
PM
3325 __dev_addr_discard(&dev->uc_list);
3326 dev->uc_count = 0;
4417da66 3327
456ad75c
DC
3328 __dev_addr_discard(&dev->mc_list);
3329 dev->mc_count = 0;
26cc2522 3330
b9e40857 3331 netif_addr_unlock_bh(dev);
456ad75c
DC
3332}
3333
1da177e4
LT
3334unsigned dev_get_flags(const struct net_device *dev)
3335{
3336 unsigned flags;
3337
3338 flags = (dev->flags & ~(IFF_PROMISC |
3339 IFF_ALLMULTI |
b00055aa
SR
3340 IFF_RUNNING |
3341 IFF_LOWER_UP |
3342 IFF_DORMANT)) |
1da177e4
LT
3343 (dev->gflags & (IFF_PROMISC |
3344 IFF_ALLMULTI));
3345
b00055aa
SR
3346 if (netif_running(dev)) {
3347 if (netif_oper_up(dev))
3348 flags |= IFF_RUNNING;
3349 if (netif_carrier_ok(dev))
3350 flags |= IFF_LOWER_UP;
3351 if (netif_dormant(dev))
3352 flags |= IFF_DORMANT;
3353 }
1da177e4
LT
3354
3355 return flags;
3356}
3357
3358int dev_change_flags(struct net_device *dev, unsigned flags)
3359{
7c355f53 3360 int ret, changes;
1da177e4
LT
3361 int old_flags = dev->flags;
3362
24023451
PM
3363 ASSERT_RTNL();
3364
1da177e4
LT
3365 /*
3366 * Set the flags on our device.
3367 */
3368
3369 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
3370 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
3371 IFF_AUTOMEDIA)) |
3372 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
3373 IFF_ALLMULTI));
3374
3375 /*
3376 * Load in the correct multicast list now the flags have changed.
3377 */
3378
0e91796e 3379 if (dev->change_rx_flags && (old_flags ^ flags) & IFF_MULTICAST)
24023451
PM
3380 dev->change_rx_flags(dev, IFF_MULTICAST);
3381
4417da66 3382 dev_set_rx_mode(dev);
1da177e4
LT
3383
3384 /*
3385 * Have we downed the interface. We handle IFF_UP ourselves
3386 * according to user attempts to set it, rather than blindly
3387 * setting it.
3388 */
3389
3390 ret = 0;
3391 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
3392 ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
3393
3394 if (!ret)
4417da66 3395 dev_set_rx_mode(dev);
1da177e4
LT
3396 }
3397
3398 if (dev->flags & IFF_UP &&
3399 ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
3400 IFF_VOLATILE)))
056925ab 3401 call_netdevice_notifiers(NETDEV_CHANGE, dev);
1da177e4
LT
3402
3403 if ((flags ^ dev->gflags) & IFF_PROMISC) {
3404 int inc = (flags & IFF_PROMISC) ? +1 : -1;
3405 dev->gflags ^= IFF_PROMISC;
3406 dev_set_promiscuity(dev, inc);
3407 }
3408
3409 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
3410 is important. Some (broken) drivers set IFF_PROMISC, when
3411 IFF_ALLMULTI is requested not asking us and not reporting.
3412 */
3413 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
3414 int inc = (flags & IFF_ALLMULTI) ? +1 : -1;
3415 dev->gflags ^= IFF_ALLMULTI;
3416 dev_set_allmulti(dev, inc);
3417 }
3418
7c355f53
TG
3419 /* Exclude state transition flags, already notified */
3420 changes = (old_flags ^ dev->flags) & ~(IFF_UP | IFF_RUNNING);
3421 if (changes)
3422 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
1da177e4
LT
3423
3424 return ret;
3425}
3426
3427int dev_set_mtu(struct net_device *dev, int new_mtu)
3428{
3429 int err;
3430
3431 if (new_mtu == dev->mtu)
3432 return 0;
3433
3434 /* MTU must be positive. */
3435 if (new_mtu < 0)
3436 return -EINVAL;
3437
3438 if (!netif_device_present(dev))
3439 return -ENODEV;
3440
3441 err = 0;
3442 if (dev->change_mtu)
3443 err = dev->change_mtu(dev, new_mtu);
3444 else
3445 dev->mtu = new_mtu;
3446 if (!err && dev->flags & IFF_UP)
056925ab 3447 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
1da177e4
LT
3448 return err;
3449}
3450
3451int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
3452{
3453 int err;
3454
3455 if (!dev->set_mac_address)
3456 return -EOPNOTSUPP;
3457 if (sa->sa_family != dev->type)
3458 return -EINVAL;
3459 if (!netif_device_present(dev))
3460 return -ENODEV;
3461 err = dev->set_mac_address(dev, sa);
3462 if (!err)
056925ab 3463 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
1da177e4
LT
3464 return err;
3465}
3466
3467/*
14e3e079 3468 * Perform the SIOCxIFxxx calls, inside read_lock(dev_base_lock)
1da177e4 3469 */
14e3e079 3470static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
1da177e4
LT
3471{
3472 int err;
881d966b 3473 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
1da177e4
LT
3474
3475 if (!dev)
3476 return -ENODEV;
3477
3478 switch (cmd) {
3479 case SIOCGIFFLAGS: /* Get interface flags */
3480 ifr->ifr_flags = dev_get_flags(dev);
3481 return 0;
3482
1da177e4
LT
3483 case SIOCGIFMETRIC: /* Get the metric on the interface
3484 (currently unused) */
3485 ifr->ifr_metric = 0;
3486 return 0;
3487
1da177e4
LT
3488 case SIOCGIFMTU: /* Get the MTU of a device */
3489 ifr->ifr_mtu = dev->mtu;
3490 return 0;
3491
1da177e4
LT
3492 case SIOCGIFHWADDR:
3493 if (!dev->addr_len)
3494 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
3495 else
3496 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
3497 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
3498 ifr->ifr_hwaddr.sa_family = dev->type;
3499 return 0;
3500
14e3e079
JG
3501 case SIOCGIFSLAVE:
3502 err = -EINVAL;
3503 break;
3504
3505 case SIOCGIFMAP:
3506 ifr->ifr_map.mem_start = dev->mem_start;
3507 ifr->ifr_map.mem_end = dev->mem_end;
3508 ifr->ifr_map.base_addr = dev->base_addr;
3509 ifr->ifr_map.irq = dev->irq;
3510 ifr->ifr_map.dma = dev->dma;
3511 ifr->ifr_map.port = dev->if_port;
3512 return 0;
3513
3514 case SIOCGIFINDEX:
3515 ifr->ifr_ifindex = dev->ifindex;
3516 return 0;
3517
3518 case SIOCGIFTXQLEN:
3519 ifr->ifr_qlen = dev->tx_queue_len;
3520 return 0;
3521
3522 default:
3523 /* dev_ioctl() should ensure this case
3524 * is never reached
3525 */
3526 WARN_ON(1);
3527 err = -EINVAL;
3528 break;
3529
3530 }
3531 return err;
3532}
3533
3534/*
3535 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
3536 */
3537static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
3538{
3539 int err;
3540 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
3541
3542 if (!dev)
3543 return -ENODEV;
3544
3545 switch (cmd) {
3546 case SIOCSIFFLAGS: /* Set interface flags */
3547 return dev_change_flags(dev, ifr->ifr_flags);
3548
3549 case SIOCSIFMETRIC: /* Set the metric on the interface
3550 (currently unused) */
3551 return -EOPNOTSUPP;
3552
3553 case SIOCSIFMTU: /* Set the MTU of a device */
3554 return dev_set_mtu(dev, ifr->ifr_mtu);
3555
1da177e4
LT
3556 case SIOCSIFHWADDR:
3557 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
3558
3559 case SIOCSIFHWBROADCAST:
3560 if (ifr->ifr_hwaddr.sa_family != dev->type)
3561 return -EINVAL;
3562 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
3563 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
056925ab 3564 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
1da177e4
LT
3565 return 0;
3566
1da177e4
LT
3567 case SIOCSIFMAP:
3568 if (dev->set_config) {
3569 if (!netif_device_present(dev))
3570 return -ENODEV;
3571 return dev->set_config(dev, &ifr->ifr_map);
3572 }
3573 return -EOPNOTSUPP;
3574
3575 case SIOCADDMULTI:
61ee6bd4 3576 if ((!dev->set_multicast_list && !dev->set_rx_mode) ||
1da177e4
LT
3577 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
3578 return -EINVAL;
3579 if (!netif_device_present(dev))
3580 return -ENODEV;
3581 return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
3582 dev->addr_len, 1);
3583
3584 case SIOCDELMULTI:
61ee6bd4 3585 if ((!dev->set_multicast_list && !dev->set_rx_mode) ||
1da177e4
LT
3586 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
3587 return -EINVAL;
3588 if (!netif_device_present(dev))
3589 return -ENODEV;
3590 return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
3591 dev->addr_len, 1);
3592
1da177e4
LT
3593 case SIOCSIFTXQLEN:
3594 if (ifr->ifr_qlen < 0)
3595 return -EINVAL;
3596 dev->tx_queue_len = ifr->ifr_qlen;
3597 return 0;
3598
3599 case SIOCSIFNAME:
3600 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
3601 return dev_change_name(dev, ifr->ifr_newname);
3602
3603 /*
3604 * Unknown or private ioctl
3605 */
3606
3607 default:
3608 if ((cmd >= SIOCDEVPRIVATE &&
3609 cmd <= SIOCDEVPRIVATE + 15) ||
3610 cmd == SIOCBONDENSLAVE ||
3611 cmd == SIOCBONDRELEASE ||
3612 cmd == SIOCBONDSETHWADDR ||
3613 cmd == SIOCBONDSLAVEINFOQUERY ||
3614 cmd == SIOCBONDINFOQUERY ||
3615 cmd == SIOCBONDCHANGEACTIVE ||
3616 cmd == SIOCGMIIPHY ||
3617 cmd == SIOCGMIIREG ||
3618 cmd == SIOCSMIIREG ||
3619 cmd == SIOCBRADDIF ||
3620 cmd == SIOCBRDELIF ||
3621 cmd == SIOCWANDEV) {
3622 err = -EOPNOTSUPP;
3623 if (dev->do_ioctl) {
3624 if (netif_device_present(dev))
3625 err = dev->do_ioctl(dev, ifr,
3626 cmd);
3627 else
3628 err = -ENODEV;
3629 }
3630 } else
3631 err = -EINVAL;
3632
3633 }
3634 return err;
3635}
3636
3637/*
3638 * This function handles all "interface"-type I/O control requests. The actual
3639 * 'doing' part of this is dev_ifsioc above.
3640 */
3641
3642/**
3643 * dev_ioctl - network device ioctl
c4ea43c5 3644 * @net: the applicable net namespace
1da177e4
LT
3645 * @cmd: command to issue
3646 * @arg: pointer to a struct ifreq in user space
3647 *
3648 * Issue ioctl functions to devices. This is normally called by the
3649 * user space syscall interfaces but can sometimes be useful for
3650 * other purposes. The return value is the return from the syscall if
3651 * positive or a negative errno code on error.
3652 */
3653
881d966b 3654int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
1da177e4
LT
3655{
3656 struct ifreq ifr;
3657 int ret;
3658 char *colon;
3659
3660 /* One special case: SIOCGIFCONF takes ifconf argument
3661 and requires shared lock, because it sleeps writing
3662 to user space.
3663 */
3664
3665 if (cmd == SIOCGIFCONF) {
6756ae4b 3666 rtnl_lock();
881d966b 3667 ret = dev_ifconf(net, (char __user *) arg);
6756ae4b 3668 rtnl_unlock();
1da177e4
LT
3669 return ret;
3670 }
3671 if (cmd == SIOCGIFNAME)
881d966b 3672 return dev_ifname(net, (struct ifreq __user *)arg);
1da177e4
LT
3673
3674 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
3675 return -EFAULT;
3676
3677 ifr.ifr_name[IFNAMSIZ-1] = 0;
3678
3679 colon = strchr(ifr.ifr_name, ':');
3680 if (colon)
3681 *colon = 0;
3682
3683 /*
3684 * See which interface the caller is talking about.
3685 */
3686
3687 switch (cmd) {
3688 /*
3689 * These ioctl calls:
3690 * - can be done by all.
3691 * - atomic and do not require locking.
3692 * - return a value
3693 */
3694 case SIOCGIFFLAGS:
3695 case SIOCGIFMETRIC:
3696 case SIOCGIFMTU:
3697 case SIOCGIFHWADDR:
3698 case SIOCGIFSLAVE:
3699 case SIOCGIFMAP:
3700 case SIOCGIFINDEX:
3701 case SIOCGIFTXQLEN:
881d966b 3702 dev_load(net, ifr.ifr_name);
1da177e4 3703 read_lock(&dev_base_lock);
14e3e079 3704 ret = dev_ifsioc_locked(net, &ifr, cmd);
1da177e4
LT
3705 read_unlock(&dev_base_lock);
3706 if (!ret) {
3707 if (colon)
3708 *colon = ':';
3709 if (copy_to_user(arg, &ifr,
3710 sizeof(struct ifreq)))
3711 ret = -EFAULT;
3712 }
3713 return ret;
3714
3715 case SIOCETHTOOL:
881d966b 3716 dev_load(net, ifr.ifr_name);
1da177e4 3717 rtnl_lock();
881d966b 3718 ret = dev_ethtool(net, &ifr);
1da177e4
LT
3719 rtnl_unlock();
3720 if (!ret) {
3721 if (colon)
3722 *colon = ':';
3723 if (copy_to_user(arg, &ifr,
3724 sizeof(struct ifreq)))
3725 ret = -EFAULT;
3726 }
3727 return ret;
3728
3729 /*
3730 * These ioctl calls:
3731 * - require superuser power.
3732 * - require strict serialization.
3733 * - return a value
3734 */
3735 case SIOCGMIIPHY:
3736 case SIOCGMIIREG:
3737 case SIOCSIFNAME:
3738 if (!capable(CAP_NET_ADMIN))
3739 return -EPERM;
881d966b 3740 dev_load(net, ifr.ifr_name);
1da177e4 3741 rtnl_lock();
881d966b 3742 ret = dev_ifsioc(net, &ifr, cmd);
1da177e4
LT
3743 rtnl_unlock();
3744 if (!ret) {
3745 if (colon)
3746 *colon = ':';
3747 if (copy_to_user(arg, &ifr,
3748 sizeof(struct ifreq)))
3749 ret = -EFAULT;
3750 }
3751 return ret;
3752
3753 /*
3754 * These ioctl calls:
3755 * - require superuser power.
3756 * - require strict serialization.
3757 * - do not return a value
3758 */
3759 case SIOCSIFFLAGS:
3760 case SIOCSIFMETRIC:
3761 case SIOCSIFMTU:
3762 case SIOCSIFMAP:
3763 case SIOCSIFHWADDR:
3764 case SIOCSIFSLAVE:
3765 case SIOCADDMULTI:
3766 case SIOCDELMULTI:
3767 case SIOCSIFHWBROADCAST:
3768 case SIOCSIFTXQLEN:
3769 case SIOCSMIIREG:
3770 case SIOCBONDENSLAVE:
3771 case SIOCBONDRELEASE:
3772 case SIOCBONDSETHWADDR:
1da177e4
LT
3773 case SIOCBONDCHANGEACTIVE:
3774 case SIOCBRADDIF:
3775 case SIOCBRDELIF:
3776 if (!capable(CAP_NET_ADMIN))
3777 return -EPERM;
cabcac0b
TG
3778 /* fall through */
3779 case SIOCBONDSLAVEINFOQUERY:
3780 case SIOCBONDINFOQUERY:
881d966b 3781 dev_load(net, ifr.ifr_name);
1da177e4 3782 rtnl_lock();
881d966b 3783 ret = dev_ifsioc(net, &ifr, cmd);
1da177e4
LT
3784 rtnl_unlock();
3785 return ret;
3786
3787 case SIOCGIFMEM:
3788 /* Get the per device memory space. We can add this but
3789 * currently do not support it */
3790 case SIOCSIFMEM:
3791 /* Set the per device memory buffer space.
3792 * Not applicable in our case */
3793 case SIOCSIFLINK:
3794 return -EINVAL;
3795
3796 /*
3797 * Unknown or private ioctl.
3798 */
3799 default:
3800 if (cmd == SIOCWANDEV ||
3801 (cmd >= SIOCDEVPRIVATE &&
3802 cmd <= SIOCDEVPRIVATE + 15)) {
881d966b 3803 dev_load(net, ifr.ifr_name);
1da177e4 3804 rtnl_lock();
881d966b 3805 ret = dev_ifsioc(net, &ifr, cmd);
1da177e4
LT
3806 rtnl_unlock();
3807 if (!ret && copy_to_user(arg, &ifr,
3808 sizeof(struct ifreq)))
3809 ret = -EFAULT;
3810 return ret;
3811 }
1da177e4 3812 /* Take care of Wireless Extensions */
295f4a1f 3813 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
881d966b 3814 return wext_handle_ioctl(net, &ifr, cmd, arg);
1da177e4
LT
3815 return -EINVAL;
3816 }
3817}
3818
3819
3820/**
3821 * dev_new_index - allocate an ifindex
c4ea43c5 3822 * @net: the applicable net namespace
1da177e4
LT
3823 *
3824 * Returns a suitable unique value for a new device interface
3825 * number. The caller must hold the rtnl semaphore or the
3826 * dev_base_lock to be sure it remains unique.
3827 */
881d966b 3828static int dev_new_index(struct net *net)
1da177e4
LT
3829{
3830 static int ifindex;
3831 for (;;) {
3832 if (++ifindex <= 0)
3833 ifindex = 1;
881d966b 3834 if (!__dev_get_by_index(net, ifindex))
1da177e4
LT
3835 return ifindex;
3836 }
3837}
3838
1da177e4
LT
3839/* Delayed registration/unregisteration */
3840static DEFINE_SPINLOCK(net_todo_list_lock);
3b5b34fd 3841static LIST_HEAD(net_todo_list);
1da177e4 3842
6f05f629 3843static void net_set_todo(struct net_device *dev)
1da177e4
LT
3844{
3845 spin_lock(&net_todo_list_lock);
3846 list_add_tail(&dev->todo_list, &net_todo_list);
3847 spin_unlock(&net_todo_list_lock);
3848}
3849
93ee31f1
DL
3850static void rollback_registered(struct net_device *dev)
3851{
3852 BUG_ON(dev_boot_phase);
3853 ASSERT_RTNL();
3854
3855 /* Some devices call without registering for initialization unwind. */
3856 if (dev->reg_state == NETREG_UNINITIALIZED) {
3857 printk(KERN_DEBUG "unregister_netdevice: device %s/%p never "
3858 "was registered\n", dev->name, dev);
3859
3860 WARN_ON(1);
3861 return;
3862 }
3863
3864 BUG_ON(dev->reg_state != NETREG_REGISTERED);
3865
3866 /* If device is running, close it first. */
3867 dev_close(dev);
3868
3869 /* And unlink it from device chain. */
3870 unlist_netdevice(dev);
3871
3872 dev->reg_state = NETREG_UNREGISTERING;
3873
3874 synchronize_net();
3875
3876 /* Shutdown queueing discipline. */
3877 dev_shutdown(dev);
3878
3879
3880 /* Notify protocols, that we are about to destroy
3881 this device. They should clean all the things.
3882 */
3883 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
3884
3885 /*
3886 * Flush the unicast and multicast chains
3887 */
3888 dev_addr_discard(dev);
3889
3890 if (dev->uninit)
3891 dev->uninit(dev);
3892
3893 /* Notifier chain MUST detach us from master device. */
547b792c 3894 WARN_ON(dev->master);
93ee31f1
DL
3895
3896 /* Remove entries from kobject tree */
3897 netdev_unregister_kobject(dev);
3898
3899 synchronize_net();
3900
3901 dev_put(dev);
3902}
3903
e8a0464c
DM
3904static void __netdev_init_queue_locks_one(struct net_device *dev,
3905 struct netdev_queue *dev_queue,
3906 void *_unused)
c773e847
DM
3907{
3908 spin_lock_init(&dev_queue->_xmit_lock);
cf508b12 3909 netdev_set_xmit_lockdep_class(&dev_queue->_xmit_lock, dev->type);
c773e847
DM
3910 dev_queue->xmit_lock_owner = -1;
3911}
3912
3913static void netdev_init_queue_locks(struct net_device *dev)
3914{
e8a0464c
DM
3915 netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL);
3916 __netdev_init_queue_locks_one(dev, &dev->rx_queue, NULL);
c773e847
DM
3917}
3918
1da177e4
LT
3919/**
3920 * register_netdevice - register a network device
3921 * @dev: device to register
3922 *
3923 * Take a completed network device structure and add it to the kernel
3924 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
3925 * chain. 0 is returned on success. A negative errno code is returned
3926 * on a failure to set up the device, or if the name is a duplicate.
3927 *
3928 * Callers must hold the rtnl semaphore. You may want
3929 * register_netdev() instead of this.
3930 *
3931 * BUGS:
3932 * The locking appears insufficient to guarantee two parallel registers
3933 * will not get the same name.
3934 */
3935
3936int register_netdevice(struct net_device *dev)
3937{
3938 struct hlist_head *head;
3939 struct hlist_node *p;
3940 int ret;
881d966b 3941 struct net *net;
1da177e4
LT
3942
3943 BUG_ON(dev_boot_phase);
3944 ASSERT_RTNL();
3945
b17a7c17
SH
3946 might_sleep();
3947
1da177e4
LT
3948 /* When net_device's are persistent, this will be fatal. */
3949 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
c346dca1
YH
3950 BUG_ON(!dev_net(dev));
3951 net = dev_net(dev);
1da177e4 3952
f1f28aa3 3953 spin_lock_init(&dev->addr_list_lock);
cf508b12 3954 netdev_set_addr_lockdep_class(dev);
c773e847 3955 netdev_init_queue_locks(dev);
1da177e4 3956
1da177e4
LT
3957 dev->iflink = -1;
3958
3959 /* Init, if this function is available */
3960 if (dev->init) {
3961 ret = dev->init(dev);
3962 if (ret) {
3963 if (ret > 0)
3964 ret = -EIO;
90833aa4 3965 goto out;
1da177e4
LT
3966 }
3967 }
4ec93edb 3968
1da177e4
LT
3969 if (!dev_valid_name(dev->name)) {
3970 ret = -EINVAL;
7ce1b0ed 3971 goto err_uninit;
1da177e4
LT
3972 }
3973
881d966b 3974 dev->ifindex = dev_new_index(net);
1da177e4
LT
3975 if (dev->iflink == -1)
3976 dev->iflink = dev->ifindex;
3977
3978 /* Check for existence of name */
881d966b 3979 head = dev_name_hash(net, dev->name);
1da177e4
LT
3980 hlist_for_each(p, head) {
3981 struct net_device *d
3982 = hlist_entry(p, struct net_device, name_hlist);
3983 if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
3984 ret = -EEXIST;
7ce1b0ed 3985 goto err_uninit;
1da177e4 3986 }
4ec93edb 3987 }
1da177e4 3988
d212f87b
SH
3989 /* Fix illegal checksum combinations */
3990 if ((dev->features & NETIF_F_HW_CSUM) &&
3991 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
3992 printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
3993 dev->name);
3994 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
3995 }
3996
3997 if ((dev->features & NETIF_F_NO_CSUM) &&
3998 (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
3999 printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
4000 dev->name);
4001 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
4002 }
4003
4004
1da177e4
LT
4005 /* Fix illegal SG+CSUM combinations. */
4006 if ((dev->features & NETIF_F_SG) &&
8648b305 4007 !(dev->features & NETIF_F_ALL_CSUM)) {
5a8da02b 4008 printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no checksum feature.\n",
1da177e4
LT
4009 dev->name);
4010 dev->features &= ~NETIF_F_SG;
4011 }
4012
4013 /* TSO requires that SG is present as well. */
4014 if ((dev->features & NETIF_F_TSO) &&
4015 !(dev->features & NETIF_F_SG)) {
5a8da02b 4016 printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no SG feature.\n",
1da177e4
LT
4017 dev->name);
4018 dev->features &= ~NETIF_F_TSO;
4019 }
e89e9cf5
AR
4020 if (dev->features & NETIF_F_UFO) {
4021 if (!(dev->features & NETIF_F_HW_CSUM)) {
4022 printk(KERN_ERR "%s: Dropping NETIF_F_UFO since no "
4023 "NETIF_F_HW_CSUM feature.\n",
4024 dev->name);
4025 dev->features &= ~NETIF_F_UFO;
4026 }
4027 if (!(dev->features & NETIF_F_SG)) {
4028 printk(KERN_ERR "%s: Dropping NETIF_F_UFO since no "
4029 "NETIF_F_SG feature.\n",
4030 dev->name);
4031 dev->features &= ~NETIF_F_UFO;
4032 }
4033 }
1da177e4 4034
e5a4a72d
LB
4035 /* Enable software GSO if SG is supported. */
4036 if (dev->features & NETIF_F_SG)
4037 dev->features |= NETIF_F_GSO;
4038
aaf8cdc3 4039 netdev_initialize_kobject(dev);
8b41d188 4040 ret = netdev_register_kobject(dev);
b17a7c17 4041 if (ret)
7ce1b0ed 4042 goto err_uninit;
b17a7c17
SH
4043 dev->reg_state = NETREG_REGISTERED;
4044
1da177e4
LT
4045 /*
4046 * Default initial state at registry is that the
4047 * device is present.
4048 */
4049
4050 set_bit(__LINK_STATE_PRESENT, &dev->state);
4051
1da177e4 4052 dev_init_scheduler(dev);
1da177e4 4053 dev_hold(dev);
ce286d32 4054 list_netdevice(dev);
1da177e4
LT
4055
4056 /* Notify protocols, that a new device appeared. */
056925ab 4057 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
fcc5a03a 4058 ret = notifier_to_errno(ret);
93ee31f1
DL
4059 if (ret) {
4060 rollback_registered(dev);
4061 dev->reg_state = NETREG_UNREGISTERED;
4062 }
1da177e4
LT
4063
4064out:
4065 return ret;
7ce1b0ed
HX
4066
4067err_uninit:
4068 if (dev->uninit)
4069 dev->uninit(dev);
4070 goto out;
1da177e4
LT
4071}
4072
4073/**
4074 * register_netdev - register a network device
4075 * @dev: device to register
4076 *
4077 * Take a completed network device structure and add it to the kernel
4078 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
4079 * chain. 0 is returned on success. A negative errno code is returned
4080 * on a failure to set up the device, or if the name is a duplicate.
4081 *
38b4da38 4082 * This is a wrapper around register_netdevice that takes the rtnl semaphore
1da177e4
LT
4083 * and expands the device name if you passed a format string to
4084 * alloc_netdev.
4085 */
4086int register_netdev(struct net_device *dev)
4087{
4088 int err;
4089
4090 rtnl_lock();
4091
4092 /*
4093 * If the name is a format string the caller wants us to do a
4094 * name allocation.
4095 */
4096 if (strchr(dev->name, '%')) {
4097 err = dev_alloc_name(dev, dev->name);
4098 if (err < 0)
4099 goto out;
4100 }
4ec93edb 4101
1da177e4
LT
4102 err = register_netdevice(dev);
4103out:
4104 rtnl_unlock();
4105 return err;
4106}
4107EXPORT_SYMBOL(register_netdev);
4108
4109/*
4110 * netdev_wait_allrefs - wait until all references are gone.
4111 *
4112 * This is called when unregistering network devices.
4113 *
4114 * Any protocol or device that holds a reference should register
4115 * for netdevice notification, and cleanup and put back the
4116 * reference if they receive an UNREGISTER event.
4117 * We can get stuck here if buggy protocols don't correctly
4ec93edb 4118 * call dev_put.
1da177e4
LT
4119 */
4120static void netdev_wait_allrefs(struct net_device *dev)
4121{
4122 unsigned long rebroadcast_time, warning_time;
4123
4124 rebroadcast_time = warning_time = jiffies;
4125 while (atomic_read(&dev->refcnt) != 0) {
4126 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
6756ae4b 4127 rtnl_lock();
1da177e4
LT
4128
4129 /* Rebroadcast unregister notification */
056925ab 4130 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
1da177e4
LT
4131
4132 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
4133 &dev->state)) {
4134 /* We must not have linkwatch events
4135 * pending on unregister. If this
4136 * happens, we simply run the queue
4137 * unscheduled, resulting in a noop
4138 * for this device.
4139 */
4140 linkwatch_run_queue();
4141 }
4142
6756ae4b 4143 __rtnl_unlock();
1da177e4
LT
4144
4145 rebroadcast_time = jiffies;
4146 }
4147
4148 msleep(250);
4149
4150 if (time_after(jiffies, warning_time + 10 * HZ)) {
4151 printk(KERN_EMERG "unregister_netdevice: "
4152 "waiting for %s to become free. Usage "
4153 "count = %d\n",
4154 dev->name, atomic_read(&dev->refcnt));
4155 warning_time = jiffies;
4156 }
4157 }
4158}
4159
4160/* The sequence is:
4161 *
4162 * rtnl_lock();
4163 * ...
4164 * register_netdevice(x1);
4165 * register_netdevice(x2);
4166 * ...
4167 * unregister_netdevice(y1);
4168 * unregister_netdevice(y2);
4169 * ...
4170 * rtnl_unlock();
4171 * free_netdev(y1);
4172 * free_netdev(y2);
4173 *
4174 * We are invoked by rtnl_unlock() after it drops the semaphore.
4175 * This allows us to deal with problems:
b17a7c17 4176 * 1) We can delete sysfs objects which invoke hotplug
1da177e4
LT
4177 * without deadlocking with linkwatch via keventd.
4178 * 2) Since we run with the RTNL semaphore not held, we can sleep
4179 * safely in order to wait for the netdev refcnt to drop to zero.
4180 */
4a3e2f71 4181static DEFINE_MUTEX(net_todo_run_mutex);
1da177e4
LT
4182void netdev_run_todo(void)
4183{
626ab0e6 4184 struct list_head list;
1da177e4
LT
4185
4186 /* Need to guard against multiple cpu's getting out of order. */
4a3e2f71 4187 mutex_lock(&net_todo_run_mutex);
1da177e4
LT
4188
4189 /* Not safe to do outside the semaphore. We must not return
4190 * until all unregister events invoked by the local processor
4191 * have been completed (either by this todo run, or one on
4192 * another cpu).
4193 */
4194 if (list_empty(&net_todo_list))
4195 goto out;
4196
4197 /* Snapshot list, allow later requests */
4198 spin_lock(&net_todo_list_lock);
626ab0e6 4199 list_replace_init(&net_todo_list, &list);
1da177e4 4200 spin_unlock(&net_todo_list_lock);
626ab0e6 4201
1da177e4
LT
4202 while (!list_empty(&list)) {
4203 struct net_device *dev
4204 = list_entry(list.next, struct net_device, todo_list);
4205 list_del(&dev->todo_list);
4206
b17a7c17
SH
4207 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
4208 printk(KERN_ERR "network todo '%s' but state %d\n",
4209 dev->name, dev->reg_state);
4210 dump_stack();
4211 continue;
4212 }
1da177e4 4213
b17a7c17 4214 dev->reg_state = NETREG_UNREGISTERED;
1da177e4 4215
6e583ce5
SH
4216 on_each_cpu(flush_backlog, dev, 1);
4217
b17a7c17 4218 netdev_wait_allrefs(dev);
1da177e4 4219
b17a7c17
SH
4220 /* paranoia */
4221 BUG_ON(atomic_read(&dev->refcnt));
547b792c
IJ
4222 WARN_ON(dev->ip_ptr);
4223 WARN_ON(dev->ip6_ptr);
4224 WARN_ON(dev->dn_ptr);
1da177e4 4225
b17a7c17
SH
4226 if (dev->destructor)
4227 dev->destructor(dev);
9093bbb2
SH
4228
4229 /* Free network device */
4230 kobject_put(&dev->dev.kobj);
1da177e4
LT
4231 }
4232
4233out:
4a3e2f71 4234 mutex_unlock(&net_todo_run_mutex);
1da177e4
LT
4235}
4236
5a1b5898 4237static struct net_device_stats *internal_stats(struct net_device *dev)
c45d286e 4238{
5a1b5898 4239 return &dev->stats;
c45d286e
RR
4240}
4241
dc2b4847 4242static void netdev_init_one_queue(struct net_device *dev,
e8a0464c
DM
4243 struct netdev_queue *queue,
4244 void *_unused)
dc2b4847 4245{
dc2b4847
DM
4246 queue->dev = dev;
4247}
4248
bb949fbd
DM
4249static void netdev_init_queues(struct net_device *dev)
4250{
e8a0464c
DM
4251 netdev_init_one_queue(dev, &dev->rx_queue, NULL);
4252 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
c3f26a26 4253 spin_lock_init(&dev->tx_global_lock);
bb949fbd
DM
4254}
4255
1da177e4 4256/**
f25f4e44 4257 * alloc_netdev_mq - allocate network device
1da177e4
LT
4258 * @sizeof_priv: size of private data to allocate space for
4259 * @name: device name format string
4260 * @setup: callback to initialize device
f25f4e44 4261 * @queue_count: the number of subqueues to allocate
1da177e4
LT
4262 *
4263 * Allocates a struct net_device with private data area for driver use
f25f4e44
PWJ
4264 * and performs basic initialization. Also allocates subquue structs
4265 * for each queue on the device at the end of the netdevice.
1da177e4 4266 */
f25f4e44
PWJ
4267struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
4268 void (*setup)(struct net_device *), unsigned int queue_count)
1da177e4 4269{
e8a0464c 4270 struct netdev_queue *tx;
1da177e4 4271 struct net_device *dev;
7943986c 4272 size_t alloc_size;
e8a0464c 4273 void *p;
1da177e4 4274
b6fe17d6
SH
4275 BUG_ON(strlen(name) >= sizeof(dev->name));
4276
fd2ea0a7 4277 alloc_size = sizeof(struct net_device);
d1643d24
AD
4278 if (sizeof_priv) {
4279 /* ensure 32-byte alignment of private area */
4280 alloc_size = (alloc_size + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST;
4281 alloc_size += sizeof_priv;
4282 }
4283 /* ensure 32-byte alignment of whole construct */
4284 alloc_size += NETDEV_ALIGN_CONST;
1da177e4 4285
31380de9 4286 p = kzalloc(alloc_size, GFP_KERNEL);
1da177e4 4287 if (!p) {
b6fe17d6 4288 printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
1da177e4
LT
4289 return NULL;
4290 }
1da177e4 4291
7943986c 4292 tx = kcalloc(queue_count, sizeof(struct netdev_queue), GFP_KERNEL);
e8a0464c
DM
4293 if (!tx) {
4294 printk(KERN_ERR "alloc_netdev: Unable to allocate "
4295 "tx qdiscs.\n");
4296 kfree(p);
4297 return NULL;
4298 }
4299
1da177e4
LT
4300 dev = (struct net_device *)
4301 (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
4302 dev->padded = (char *)dev - (char *)p;
c346dca1 4303 dev_net_set(dev, &init_net);
1da177e4 4304
e8a0464c
DM
4305 dev->_tx = tx;
4306 dev->num_tx_queues = queue_count;
fd2ea0a7 4307 dev->real_num_tx_queues = queue_count;
e8a0464c 4308
f25f4e44
PWJ
4309 if (sizeof_priv) {
4310 dev->priv = ((char *)dev +
fd2ea0a7 4311 ((sizeof(struct net_device) + NETDEV_ALIGN_CONST)
f25f4e44
PWJ
4312 & ~NETDEV_ALIGN_CONST));
4313 }
4314
82cc1a7a 4315 dev->gso_max_size = GSO_MAX_SIZE;
1da177e4 4316
bb949fbd
DM
4317 netdev_init_queues(dev);
4318
5a1b5898 4319 dev->get_stats = internal_stats;
bea3348e 4320 netpoll_netdev_init(dev);
1da177e4
LT
4321 setup(dev);
4322 strcpy(dev->name, name);
4323 return dev;
4324}
f25f4e44 4325EXPORT_SYMBOL(alloc_netdev_mq);
1da177e4
LT
4326
4327/**
4328 * free_netdev - free network device
4329 * @dev: device
4330 *
4ec93edb
YH
4331 * This function does the last stage of destroying an allocated device
4332 * interface. The reference to the device object is released.
1da177e4
LT
4333 * If this is the last reference then it will be freed.
4334 */
4335void free_netdev(struct net_device *dev)
4336{
f3005d7f
DL
4337 release_net(dev_net(dev));
4338
e8a0464c
DM
4339 kfree(dev->_tx);
4340
3041a069 4341 /* Compatibility with error handling in drivers */
1da177e4
LT
4342 if (dev->reg_state == NETREG_UNINITIALIZED) {
4343 kfree((char *)dev - dev->padded);
4344 return;
4345 }
4346
4347 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
4348 dev->reg_state = NETREG_RELEASED;
4349
43cb76d9
GKH
4350 /* will free via device release */
4351 put_device(&dev->dev);
1da177e4 4352}
4ec93edb 4353
1da177e4 4354/* Synchronize with packet receive processing. */
4ec93edb 4355void synchronize_net(void)
1da177e4
LT
4356{
4357 might_sleep();
fbd568a3 4358 synchronize_rcu();
1da177e4
LT
4359}
4360
4361/**
4362 * unregister_netdevice - remove device from the kernel
4363 * @dev: device
4364 *
4365 * This function shuts down a device interface and removes it
d59b54b1 4366 * from the kernel tables.
1da177e4
LT
4367 *
4368 * Callers must hold the rtnl semaphore. You may want
4369 * unregister_netdev() instead of this.
4370 */
4371
22f8cde5 4372void unregister_netdevice(struct net_device *dev)
1da177e4 4373{
a6620712
HX
4374 ASSERT_RTNL();
4375
93ee31f1 4376 rollback_registered(dev);
1da177e4
LT
4377 /* Finish processing unregister after unlock */
4378 net_set_todo(dev);
1da177e4
LT
4379}
4380
4381/**
4382 * unregister_netdev - remove device from the kernel
4383 * @dev: device
4384 *
4385 * This function shuts down a device interface and removes it
d59b54b1 4386 * from the kernel tables.
1da177e4
LT
4387 *
4388 * This is just a wrapper for unregister_netdevice that takes
4389 * the rtnl semaphore. In general you want to use this and not
4390 * unregister_netdevice.
4391 */
4392void unregister_netdev(struct net_device *dev)
4393{
4394 rtnl_lock();
4395 unregister_netdevice(dev);
4396 rtnl_unlock();
4397}
4398
4399EXPORT_SYMBOL(unregister_netdev);
4400
ce286d32
EB
4401/**
4402 * dev_change_net_namespace - move device to different nethost namespace
4403 * @dev: device
4404 * @net: network namespace
4405 * @pat: If not NULL name pattern to try if the current device name
4406 * is already taken in the destination network namespace.
4407 *
4408 * This function shuts down a device interface and moves it
4409 * to a new network namespace. On success 0 is returned, on
4410 * a failure a netagive errno code is returned.
4411 *
4412 * Callers must hold the rtnl semaphore.
4413 */
4414
4415int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
4416{
4417 char buf[IFNAMSIZ];
4418 const char *destname;
4419 int err;
4420
4421 ASSERT_RTNL();
4422
4423 /* Don't allow namespace local devices to be moved. */
4424 err = -EINVAL;
4425 if (dev->features & NETIF_F_NETNS_LOCAL)
4426 goto out;
4427
4428 /* Ensure the device has been registrered */
4429 err = -EINVAL;
4430 if (dev->reg_state != NETREG_REGISTERED)
4431 goto out;
4432
4433 /* Get out if there is nothing todo */
4434 err = 0;
878628fb 4435 if (net_eq(dev_net(dev), net))
ce286d32
EB
4436 goto out;
4437
4438 /* Pick the destination device name, and ensure
4439 * we can use it in the destination network namespace.
4440 */
4441 err = -EEXIST;
4442 destname = dev->name;
4443 if (__dev_get_by_name(net, destname)) {
4444 /* We get here if we can't use the current device name */
4445 if (!pat)
4446 goto out;
4447 if (!dev_valid_name(pat))
4448 goto out;
4449 if (strchr(pat, '%')) {
4450 if (__dev_alloc_name(net, pat, buf) < 0)
4451 goto out;
4452 destname = buf;
4453 } else
4454 destname = pat;
4455 if (__dev_get_by_name(net, destname))
4456 goto out;
4457 }
4458
4459 /*
4460 * And now a mini version of register_netdevice unregister_netdevice.
4461 */
4462
4463 /* If device is running close it first. */
9b772652 4464 dev_close(dev);
ce286d32
EB
4465
4466 /* And unlink it from device chain */
4467 err = -ENODEV;
4468 unlist_netdevice(dev);
4469
4470 synchronize_net();
4471
4472 /* Shutdown queueing discipline. */
4473 dev_shutdown(dev);
4474
4475 /* Notify protocols, that we are about to destroy
4476 this device. They should clean all the things.
4477 */
4478 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
4479
4480 /*
4481 * Flush the unicast and multicast chains
4482 */
4483 dev_addr_discard(dev);
4484
4485 /* Actually switch the network namespace */
c346dca1 4486 dev_net_set(dev, net);
ce286d32
EB
4487
4488 /* Assign the new device name */
4489 if (destname != dev->name)
4490 strcpy(dev->name, destname);
4491
4492 /* If there is an ifindex conflict assign a new one */
4493 if (__dev_get_by_index(net, dev->ifindex)) {
4494 int iflink = (dev->iflink == dev->ifindex);
4495 dev->ifindex = dev_new_index(net);
4496 if (iflink)
4497 dev->iflink = dev->ifindex;
4498 }
4499
8b41d188 4500 /* Fixup kobjects */
aaf8cdc3
DL
4501 netdev_unregister_kobject(dev);
4502 err = netdev_register_kobject(dev);
8b41d188 4503 WARN_ON(err);
ce286d32
EB
4504
4505 /* Add the device back in the hashes */
4506 list_netdevice(dev);
4507
4508 /* Notify protocols, that a new device appeared. */
4509 call_netdevice_notifiers(NETDEV_REGISTER, dev);
4510
4511 synchronize_net();
4512 err = 0;
4513out:
4514 return err;
4515}
4516
1da177e4
LT
4517static int dev_cpu_callback(struct notifier_block *nfb,
4518 unsigned long action,
4519 void *ocpu)
4520{
4521 struct sk_buff **list_skb;
37437bb2 4522 struct Qdisc **list_net;
1da177e4
LT
4523 struct sk_buff *skb;
4524 unsigned int cpu, oldcpu = (unsigned long)ocpu;
4525 struct softnet_data *sd, *oldsd;
4526
8bb78442 4527 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
1da177e4
LT
4528 return NOTIFY_OK;
4529
4530 local_irq_disable();
4531 cpu = smp_processor_id();
4532 sd = &per_cpu(softnet_data, cpu);
4533 oldsd = &per_cpu(softnet_data, oldcpu);
4534
4535 /* Find end of our completion_queue. */
4536 list_skb = &sd->completion_queue;
4537 while (*list_skb)
4538 list_skb = &(*list_skb)->next;
4539 /* Append completion queue from offline CPU. */
4540 *list_skb = oldsd->completion_queue;
4541 oldsd->completion_queue = NULL;
4542
4543 /* Find end of our output_queue. */
4544 list_net = &sd->output_queue;
4545 while (*list_net)
4546 list_net = &(*list_net)->next_sched;
4547 /* Append output queue from offline CPU. */
4548 *list_net = oldsd->output_queue;
4549 oldsd->output_queue = NULL;
4550
4551 raise_softirq_irqoff(NET_TX_SOFTIRQ);
4552 local_irq_enable();
4553
4554 /* Process offline CPU's input_pkt_queue */
4555 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
4556 netif_rx(skb);
4557
4558 return NOTIFY_OK;
4559}
1da177e4 4560
db217334
CL
4561#ifdef CONFIG_NET_DMA
4562/**
0ed72ec4
RD
4563 * net_dma_rebalance - try to maintain one DMA channel per CPU
4564 * @net_dma: DMA client and associated data (lock, channels, channel_mask)
4565 *
4566 * This is called when the number of channels allocated to the net_dma client
4567 * changes. The net_dma client tries to have one DMA channel per CPU.
db217334 4568 */
d379b01e
DW
4569
4570static void net_dma_rebalance(struct net_dma *net_dma)
db217334 4571{
d379b01e 4572 unsigned int cpu, i, n, chan_idx;
db217334
CL
4573 struct dma_chan *chan;
4574
d379b01e 4575 if (cpus_empty(net_dma->channel_mask)) {
db217334 4576 for_each_online_cpu(cpu)
29bbd72d 4577 rcu_assign_pointer(per_cpu(softnet_data, cpu).net_dma, NULL);
db217334
CL
4578 return;
4579 }
4580
4581 i = 0;
4582 cpu = first_cpu(cpu_online_map);
4583
0e12f848 4584 for_each_cpu_mask_nr(chan_idx, net_dma->channel_mask) {
d379b01e
DW
4585 chan = net_dma->channels[chan_idx];
4586
4587 n = ((num_online_cpus() / cpus_weight(net_dma->channel_mask))
4588 + (i < (num_online_cpus() %
4589 cpus_weight(net_dma->channel_mask)) ? 1 : 0));
db217334
CL
4590
4591 while(n) {
29bbd72d 4592 per_cpu(softnet_data, cpu).net_dma = chan;
db217334
CL
4593 cpu = next_cpu(cpu, cpu_online_map);
4594 n--;
4595 }
4596 i++;
4597 }
db217334
CL
4598}
4599
4600/**
4601 * netdev_dma_event - event callback for the net_dma_client
4602 * @client: should always be net_dma_client
f4b8ea78 4603 * @chan: DMA channel for the event
0ed72ec4 4604 * @state: DMA state to be handled
db217334 4605 */
d379b01e
DW
4606static enum dma_state_client
4607netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
4608 enum dma_state state)
4609{
4610 int i, found = 0, pos = -1;
4611 struct net_dma *net_dma =
4612 container_of(client, struct net_dma, client);
4613 enum dma_state_client ack = DMA_DUP; /* default: take no action */
4614
4615 spin_lock(&net_dma->lock);
4616 switch (state) {
4617 case DMA_RESOURCE_AVAILABLE:
0c0b0aca 4618 for (i = 0; i < nr_cpu_ids; i++)
d379b01e
DW
4619 if (net_dma->channels[i] == chan) {
4620 found = 1;
4621 break;
4622 } else if (net_dma->channels[i] == NULL && pos < 0)
4623 pos = i;
4624
4625 if (!found && pos >= 0) {
4626 ack = DMA_ACK;
4627 net_dma->channels[pos] = chan;
4628 cpu_set(pos, net_dma->channel_mask);
4629 net_dma_rebalance(net_dma);
4630 }
db217334
CL
4631 break;
4632 case DMA_RESOURCE_REMOVED:
0c0b0aca 4633 for (i = 0; i < nr_cpu_ids; i++)
d379b01e
DW
4634 if (net_dma->channels[i] == chan) {
4635 found = 1;
4636 pos = i;
4637 break;
4638 }
4639
4640 if (found) {
4641 ack = DMA_ACK;
4642 cpu_clear(pos, net_dma->channel_mask);
4643 net_dma->channels[i] = NULL;
4644 net_dma_rebalance(net_dma);
4645 }
db217334
CL
4646 break;
4647 default:
4648 break;
4649 }
d379b01e
DW
4650 spin_unlock(&net_dma->lock);
4651
4652 return ack;
db217334
CL
4653}
4654
4655/**
4656 * netdev_dma_regiser - register the networking subsystem as a DMA client
4657 */
4658static int __init netdev_dma_register(void)
4659{
0c0b0aca
MT
4660 net_dma.channels = kzalloc(nr_cpu_ids * sizeof(struct net_dma),
4661 GFP_KERNEL);
4662 if (unlikely(!net_dma.channels)) {
4663 printk(KERN_NOTICE
4664 "netdev_dma: no memory for net_dma.channels\n");
4665 return -ENOMEM;
4666 }
d379b01e
DW
4667 spin_lock_init(&net_dma.lock);
4668 dma_cap_set(DMA_MEMCPY, net_dma.client.cap_mask);
4669 dma_async_client_register(&net_dma.client);
4670 dma_async_client_chan_request(&net_dma.client);
db217334
CL
4671 return 0;
4672}
4673
4674#else
4675static int __init netdev_dma_register(void) { return -ENODEV; }
4676#endif /* CONFIG_NET_DMA */
1da177e4 4677
7f353bf2
HX
4678/**
4679 * netdev_compute_feature - compute conjunction of two feature sets
4680 * @all: first feature set
4681 * @one: second feature set
4682 *
4683 * Computes a new feature set after adding a device with feature set
4684 * @one to the master device with current feature set @all. Returns
4685 * the new feature set.
4686 */
4687int netdev_compute_features(unsigned long all, unsigned long one)
4688{
4689 /* if device needs checksumming, downgrade to hw checksumming */
4690 if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
4691 all ^= NETIF_F_NO_CSUM | NETIF_F_HW_CSUM;
4692
4693 /* if device can't do all checksum, downgrade to ipv4/ipv6 */
4694 if (all & NETIF_F_HW_CSUM && !(one & NETIF_F_HW_CSUM))
4695 all ^= NETIF_F_HW_CSUM
4696 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
4697
4698 if (one & NETIF_F_GSO)
4699 one |= NETIF_F_GSO_SOFTWARE;
4700 one |= NETIF_F_GSO;
4701
e2a6b852
HX
4702 /*
4703 * If even one device supports a GSO protocol with software fallback,
4704 * enable it for all.
4705 */
4706 all |= one & NETIF_F_GSO_SOFTWARE;
4707
7f353bf2
HX
4708 /* If even one device supports robust GSO, enable it for all. */
4709 if (one & NETIF_F_GSO_ROBUST)
4710 all |= NETIF_F_GSO_ROBUST;
4711
4712 all &= one | NETIF_F_LLTX;
4713
4714 if (!(all & NETIF_F_ALL_CSUM))
4715 all &= ~NETIF_F_SG;
4716 if (!(all & NETIF_F_SG))
4717 all &= ~NETIF_F_GSO_MASK;
4718
4719 return all;
4720}
4721EXPORT_SYMBOL(netdev_compute_features);
4722
30d97d35
PE
4723static struct hlist_head *netdev_create_hash(void)
4724{
4725 int i;
4726 struct hlist_head *hash;
4727
4728 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
4729 if (hash != NULL)
4730 for (i = 0; i < NETDEV_HASHENTRIES; i++)
4731 INIT_HLIST_HEAD(&hash[i]);
4732
4733 return hash;
4734}
4735
881d966b 4736/* Initialize per network namespace state */
4665079c 4737static int __net_init netdev_init(struct net *net)
881d966b 4738{
881d966b 4739 INIT_LIST_HEAD(&net->dev_base_head);
881d966b 4740
30d97d35
PE
4741 net->dev_name_head = netdev_create_hash();
4742 if (net->dev_name_head == NULL)
4743 goto err_name;
881d966b 4744
30d97d35
PE
4745 net->dev_index_head = netdev_create_hash();
4746 if (net->dev_index_head == NULL)
4747 goto err_idx;
881d966b
EB
4748
4749 return 0;
30d97d35
PE
4750
4751err_idx:
4752 kfree(net->dev_name_head);
4753err_name:
4754 return -ENOMEM;
881d966b
EB
4755}
4756
6579e57b
AV
4757char *netdev_drivername(struct net_device *dev, char *buffer, int len)
4758{
4759 struct device_driver *driver;
4760 struct device *parent;
4761
4762 if (len <= 0 || !buffer)
4763 return buffer;
4764 buffer[0] = 0;
4765
4766 parent = dev->dev.parent;
4767
4768 if (!parent)
4769 return buffer;
4770
4771 driver = parent->driver;
4772 if (driver && driver->name)
4773 strlcpy(buffer, driver->name, len);
4774 return buffer;
4775}
4776
4665079c 4777static void __net_exit netdev_exit(struct net *net)
881d966b
EB
4778{
4779 kfree(net->dev_name_head);
4780 kfree(net->dev_index_head);
4781}
4782
022cbae6 4783static struct pernet_operations __net_initdata netdev_net_ops = {
881d966b
EB
4784 .init = netdev_init,
4785 .exit = netdev_exit,
4786};
4787
4665079c 4788static void __net_exit default_device_exit(struct net *net)
ce286d32
EB
4789{
4790 struct net_device *dev, *next;
4791 /*
4792 * Push all migratable of the network devices back to the
4793 * initial network namespace
4794 */
4795 rtnl_lock();
4796 for_each_netdev_safe(net, dev, next) {
4797 int err;
aca51397 4798 char fb_name[IFNAMSIZ];
ce286d32
EB
4799
4800 /* Ignore unmoveable devices (i.e. loopback) */
4801 if (dev->features & NETIF_F_NETNS_LOCAL)
4802 continue;
4803
4804 /* Push remaing network devices to init_net */
aca51397
PE
4805 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
4806 err = dev_change_net_namespace(dev, &init_net, fb_name);
ce286d32 4807 if (err) {
aca51397 4808 printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
ce286d32 4809 __func__, dev->name, err);
aca51397 4810 BUG();
ce286d32
EB
4811 }
4812 }
4813 rtnl_unlock();
4814}
4815
022cbae6 4816static struct pernet_operations __net_initdata default_device_ops = {
ce286d32
EB
4817 .exit = default_device_exit,
4818};
4819
1da177e4
LT
4820/*
4821 * Initialize the DEV module. At boot time this walks the device list and
4822 * unhooks any devices that fail to initialise (normally hardware not
4823 * present) and leaves us with a valid list of present and active devices.
4824 *
4825 */
4826
4827/*
4828 * This is called single threaded during boot, so no need
4829 * to take the rtnl semaphore.
4830 */
4831static int __init net_dev_init(void)
4832{
4833 int i, rc = -ENOMEM;
4834
4835 BUG_ON(!dev_boot_phase);
4836
1da177e4
LT
4837 if (dev_proc_init())
4838 goto out;
4839
8b41d188 4840 if (netdev_kobject_init())
1da177e4
LT
4841 goto out;
4842
4843 INIT_LIST_HEAD(&ptype_all);
82d8a867 4844 for (i = 0; i < PTYPE_HASH_SIZE; i++)
1da177e4
LT
4845 INIT_LIST_HEAD(&ptype_base[i]);
4846
881d966b
EB
4847 if (register_pernet_subsys(&netdev_net_ops))
4848 goto out;
1da177e4 4849
ce286d32
EB
4850 if (register_pernet_device(&default_device_ops))
4851 goto out;
4852
1da177e4
LT
4853 /*
4854 * Initialise the packet receive queues.
4855 */
4856
6f912042 4857 for_each_possible_cpu(i) {
1da177e4
LT
4858 struct softnet_data *queue;
4859
4860 queue = &per_cpu(softnet_data, i);
4861 skb_queue_head_init(&queue->input_pkt_queue);
1da177e4
LT
4862 queue->completion_queue = NULL;
4863 INIT_LIST_HEAD(&queue->poll_list);
bea3348e
SH
4864
4865 queue->backlog.poll = process_backlog;
4866 queue->backlog.weight = weight_p;
1da177e4
LT
4867 }
4868
db217334
CL
4869 netdev_dma_register();
4870
1da177e4
LT
4871 dev_boot_phase = 0;
4872
962cf36c
CM
4873 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
4874 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
1da177e4
LT
4875
4876 hotcpu_notifier(dev_cpu_callback, 0);
4877 dst_init();
4878 dev_mcast_init();
4879 rc = 0;
4880out:
4881 return rc;
4882}
4883
4884subsys_initcall(net_dev_init);
4885
4886EXPORT_SYMBOL(__dev_get_by_index);
4887EXPORT_SYMBOL(__dev_get_by_name);
4888EXPORT_SYMBOL(__dev_remove_pack);
c2373ee9 4889EXPORT_SYMBOL(dev_valid_name);
1da177e4
LT
4890EXPORT_SYMBOL(dev_add_pack);
4891EXPORT_SYMBOL(dev_alloc_name);
4892EXPORT_SYMBOL(dev_close);
4893EXPORT_SYMBOL(dev_get_by_flags);
4894EXPORT_SYMBOL(dev_get_by_index);
4895EXPORT_SYMBOL(dev_get_by_name);
1da177e4
LT
4896EXPORT_SYMBOL(dev_open);
4897EXPORT_SYMBOL(dev_queue_xmit);
4898EXPORT_SYMBOL(dev_remove_pack);
4899EXPORT_SYMBOL(dev_set_allmulti);
4900EXPORT_SYMBOL(dev_set_promiscuity);
4901EXPORT_SYMBOL(dev_change_flags);
4902EXPORT_SYMBOL(dev_set_mtu);
4903EXPORT_SYMBOL(dev_set_mac_address);
4904EXPORT_SYMBOL(free_netdev);
4905EXPORT_SYMBOL(netdev_boot_setup_check);
4906EXPORT_SYMBOL(netdev_set_master);
4907EXPORT_SYMBOL(netdev_state_change);
4908EXPORT_SYMBOL(netif_receive_skb);
4909EXPORT_SYMBOL(netif_rx);
4910EXPORT_SYMBOL(register_gifconf);
4911EXPORT_SYMBOL(register_netdevice);
4912EXPORT_SYMBOL(register_netdevice_notifier);
4913EXPORT_SYMBOL(skb_checksum_help);
4914EXPORT_SYMBOL(synchronize_net);
4915EXPORT_SYMBOL(unregister_netdevice);
4916EXPORT_SYMBOL(unregister_netdevice_notifier);
4917EXPORT_SYMBOL(net_enable_timestamp);
4918EXPORT_SYMBOL(net_disable_timestamp);
4919EXPORT_SYMBOL(dev_get_flags);
4920
4921#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
4922EXPORT_SYMBOL(br_handle_frame_hook);
4923EXPORT_SYMBOL(br_fdb_get_hook);
4924EXPORT_SYMBOL(br_fdb_put_hook);
4925#endif
4926
4927#ifdef CONFIG_KMOD
4928EXPORT_SYMBOL(dev_load);
4929#endif
4930
4931EXPORT_PER_CPU_SYMBOL(softnet_data);