]> bbs.cooldavid.org Git - net-next-2.6.git/blame - net/core/dev.c
[NET]: Make the device list and device lookups per namespace.
[net-next-2.6.git] / net / core / dev.c
CommitLineData
1da177e4
LT
1/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
02c30a84 10 * Authors: Ross Biro
1da177e4
LT
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
74
75#include <asm/uaccess.h>
76#include <asm/system.h>
77#include <linux/bitops.h>
4fc268d2 78#include <linux/capability.h>
1da177e4
LT
79#include <linux/cpu.h>
80#include <linux/types.h>
81#include <linux/kernel.h>
82#include <linux/sched.h>
4a3e2f71 83#include <linux/mutex.h>
1da177e4
LT
84#include <linux/string.h>
85#include <linux/mm.h>
86#include <linux/socket.h>
87#include <linux/sockios.h>
88#include <linux/errno.h>
89#include <linux/interrupt.h>
90#include <linux/if_ether.h>
91#include <linux/netdevice.h>
92#include <linux/etherdevice.h>
93#include <linux/notifier.h>
94#include <linux/skbuff.h>
457c4cbc 95#include <net/net_namespace.h>
1da177e4
LT
96#include <net/sock.h>
97#include <linux/rtnetlink.h>
98#include <linux/proc_fs.h>
99#include <linux/seq_file.h>
100#include <linux/stat.h>
101#include <linux/if_bridge.h>
b863ceb7 102#include <linux/if_macvlan.h>
1da177e4
LT
103#include <net/dst.h>
104#include <net/pkt_sched.h>
105#include <net/checksum.h>
106#include <linux/highmem.h>
107#include <linux/init.h>
108#include <linux/kmod.h>
109#include <linux/module.h>
110#include <linux/kallsyms.h>
111#include <linux/netpoll.h>
112#include <linux/rcupdate.h>
113#include <linux/delay.h>
295f4a1f 114#include <net/wext.h>
1da177e4 115#include <net/iw_handler.h>
1da177e4 116#include <asm/current.h>
5bdb9886 117#include <linux/audit.h>
db217334 118#include <linux/dmaengine.h>
f6a78bfc 119#include <linux/err.h>
c7fa9d18 120#include <linux/ctype.h>
723e98b7 121#include <linux/if_arp.h>
1da177e4 122
1da177e4
LT
123/*
124 * The list of packet types we will receive (as opposed to discard)
125 * and the routines to invoke.
126 *
127 * Why 16. Because with 16 the only overlap we get on a hash of the
128 * low nibble of the protocol value is RARP/SNAP/X.25.
129 *
130 * NOTE: That is no longer true with the addition of VLAN tags. Not
131 * sure which should go first, but I bet it won't make much
132 * difference if we are running VLANs. The good news is that
133 * this protocol won't be in the list unless compiled in, so
3041a069 134 * the average user (w/out VLANs) will not be adversely affected.
1da177e4
LT
135 * --BLG
136 *
137 * 0800 IP
138 * 8100 802.1Q VLAN
139 * 0001 802.3
140 * 0002 AX.25
141 * 0004 802.2
142 * 8035 RARP
143 * 0005 SNAP
144 * 0805 X.25
145 * 0806 ARP
146 * 8137 IPX
147 * 0009 Localtalk
148 * 86DD IPv6
149 */
150
151static DEFINE_SPINLOCK(ptype_lock);
6b2bedc3
SH
152static struct list_head ptype_base[16] __read_mostly; /* 16 way hashed list */
153static struct list_head ptype_all __read_mostly; /* Taps */
1da177e4 154
db217334 155#ifdef CONFIG_NET_DMA
d379b01e
DW
156struct net_dma {
157 struct dma_client client;
158 spinlock_t lock;
159 cpumask_t channel_mask;
160 struct dma_chan *channels[NR_CPUS];
161};
162
163static enum dma_state_client
164netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
165 enum dma_state state);
166
167static struct net_dma net_dma = {
168 .client = {
169 .event_callback = netdev_dma_event,
170 },
171};
db217334
CL
172#endif
173
1da177e4 174/*
7562f876 175 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
1da177e4
LT
176 * semaphore.
177 *
178 * Pure readers hold dev_base_lock for reading.
179 *
180 * Writers must hold the rtnl semaphore while they loop through the
7562f876 181 * dev_base_head list, and hold dev_base_lock for writing when they do the
1da177e4
LT
182 * actual updates. This allows pure readers to access the list even
183 * while a writer is preparing to update it.
184 *
185 * To put it another way, dev_base_lock is held for writing only to
186 * protect against pure readers; the rtnl semaphore provides the
187 * protection against other writers.
188 *
189 * See, for example usages, register_netdevice() and
190 * unregister_netdevice(), which must be called with the rtnl
191 * semaphore held.
192 */
1da177e4
LT
193DEFINE_RWLOCK(dev_base_lock);
194
1da177e4
LT
195EXPORT_SYMBOL(dev_base_lock);
196
197#define NETDEV_HASHBITS 8
881d966b 198#define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
1da177e4 199
881d966b 200static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
1da177e4
LT
201{
202 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
881d966b 203 return &net->dev_name_head[hash & ((1 << NETDEV_HASHBITS) - 1)];
1da177e4
LT
204}
205
881d966b 206static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
1da177e4 207{
881d966b 208 return &net->dev_index_head[ifindex & ((1 << NETDEV_HASHBITS) - 1)];
1da177e4
LT
209}
210
211/*
212 * Our notifier list
213 */
214
f07d5b94 215static RAW_NOTIFIER_HEAD(netdev_chain);
1da177e4
LT
216
217/*
218 * Device drivers call our routines to queue packets here. We empty the
219 * queue in the local softnet handler.
220 */
bea3348e
SH
221
222DEFINE_PER_CPU(struct softnet_data, softnet_data);
1da177e4
LT
223
224#ifdef CONFIG_SYSFS
225extern int netdev_sysfs_init(void);
226extern int netdev_register_sysfs(struct net_device *);
227extern void netdev_unregister_sysfs(struct net_device *);
228#else
229#define netdev_sysfs_init() (0)
230#define netdev_register_sysfs(dev) (0)
231#define netdev_unregister_sysfs(dev) do { } while(0)
232#endif
233
723e98b7
JP
234#ifdef CONFIG_DEBUG_LOCK_ALLOC
235/*
236 * register_netdevice() inits dev->_xmit_lock and sets lockdep class
237 * according to dev->type
238 */
239static const unsigned short netdev_lock_type[] =
240 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
241 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
242 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
243 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
244 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
245 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
246 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
247 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
248 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
249 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
250 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
251 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
252 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
253 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_VOID,
254 ARPHRD_NONE};
255
256static const char *netdev_lock_name[] =
257 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
258 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
259 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
260 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
261 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
262 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
263 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
264 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
265 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
266 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
267 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
268 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
269 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
270 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_VOID",
271 "_xmit_NONE"};
272
273static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
274
275static inline unsigned short netdev_lock_pos(unsigned short dev_type)
276{
277 int i;
278
279 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
280 if (netdev_lock_type[i] == dev_type)
281 return i;
282 /* the last key is used by default */
283 return ARRAY_SIZE(netdev_lock_type) - 1;
284}
285
286static inline void netdev_set_lockdep_class(spinlock_t *lock,
287 unsigned short dev_type)
288{
289 int i;
290
291 i = netdev_lock_pos(dev_type);
292 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
293 netdev_lock_name[i]);
294}
295#else
296static inline void netdev_set_lockdep_class(spinlock_t *lock,
297 unsigned short dev_type)
298{
299}
300#endif
1da177e4
LT
301
302/*******************************************************************************
303
304 Protocol management and registration routines
305
306*******************************************************************************/
307
1da177e4
LT
308/*
309 * Add a protocol ID to the list. Now that the input handler is
310 * smarter we can dispense with all the messy stuff that used to be
311 * here.
312 *
313 * BEWARE!!! Protocol handlers, mangling input packets,
314 * MUST BE last in hash buckets and checking protocol handlers
315 * MUST start from promiscuous ptype_all chain in net_bh.
316 * It is true now, do not change it.
317 * Explanation follows: if protocol handler, mangling packet, will
318 * be the first on list, it is not able to sense, that packet
319 * is cloned and should be copied-on-write, so that it will
320 * change it and subsequent readers will get broken packet.
321 * --ANK (980803)
322 */
323
324/**
325 * dev_add_pack - add packet handler
326 * @pt: packet type declaration
327 *
328 * Add a protocol handler to the networking stack. The passed &packet_type
329 * is linked into kernel lists and may not be freed until it has been
330 * removed from the kernel lists.
331 *
4ec93edb 332 * This call does not sleep therefore it can not
1da177e4
LT
333 * guarantee all CPU's that are in middle of receiving packets
334 * will see the new packet type (until the next received packet).
335 */
336
337void dev_add_pack(struct packet_type *pt)
338{
339 int hash;
340
341 spin_lock_bh(&ptype_lock);
9be9a6b9 342 if (pt->type == htons(ETH_P_ALL))
1da177e4 343 list_add_rcu(&pt->list, &ptype_all);
9be9a6b9 344 else {
1da177e4
LT
345 hash = ntohs(pt->type) & 15;
346 list_add_rcu(&pt->list, &ptype_base[hash]);
347 }
348 spin_unlock_bh(&ptype_lock);
349}
350
1da177e4
LT
351/**
352 * __dev_remove_pack - remove packet handler
353 * @pt: packet type declaration
354 *
355 * Remove a protocol handler that was previously added to the kernel
356 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
357 * from the kernel lists and can be freed or reused once this function
4ec93edb 358 * returns.
1da177e4
LT
359 *
360 * The packet type might still be in use by receivers
361 * and must not be freed until after all the CPU's have gone
362 * through a quiescent state.
363 */
364void __dev_remove_pack(struct packet_type *pt)
365{
366 struct list_head *head;
367 struct packet_type *pt1;
368
369 spin_lock_bh(&ptype_lock);
370
9be9a6b9 371 if (pt->type == htons(ETH_P_ALL))
1da177e4 372 head = &ptype_all;
9be9a6b9 373 else
1da177e4
LT
374 head = &ptype_base[ntohs(pt->type) & 15];
375
376 list_for_each_entry(pt1, head, list) {
377 if (pt == pt1) {
378 list_del_rcu(&pt->list);
379 goto out;
380 }
381 }
382
383 printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
384out:
385 spin_unlock_bh(&ptype_lock);
386}
387/**
388 * dev_remove_pack - remove packet handler
389 * @pt: packet type declaration
390 *
391 * Remove a protocol handler that was previously added to the kernel
392 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
393 * from the kernel lists and can be freed or reused once this function
394 * returns.
395 *
396 * This call sleeps to guarantee that no CPU is looking at the packet
397 * type after return.
398 */
399void dev_remove_pack(struct packet_type *pt)
400{
401 __dev_remove_pack(pt);
4ec93edb 402
1da177e4
LT
403 synchronize_net();
404}
405
406/******************************************************************************
407
408 Device Boot-time Settings Routines
409
410*******************************************************************************/
411
412/* Boot time configuration table */
413static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
414
415/**
416 * netdev_boot_setup_add - add new setup entry
417 * @name: name of the device
418 * @map: configured settings for the device
419 *
420 * Adds new setup entry to the dev_boot_setup list. The function
421 * returns 0 on error and 1 on success. This is a generic routine to
422 * all netdevices.
423 */
424static int netdev_boot_setup_add(char *name, struct ifmap *map)
425{
426 struct netdev_boot_setup *s;
427 int i;
428
429 s = dev_boot_setup;
430 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
431 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
432 memset(s[i].name, 0, sizeof(s[i].name));
433 strcpy(s[i].name, name);
434 memcpy(&s[i].map, map, sizeof(s[i].map));
435 break;
436 }
437 }
438
439 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
440}
441
442/**
443 * netdev_boot_setup_check - check boot time settings
444 * @dev: the netdevice
445 *
446 * Check boot time settings for the device.
447 * The found settings are set for the device to be used
448 * later in the device probing.
449 * Returns 0 if no settings found, 1 if they are.
450 */
451int netdev_boot_setup_check(struct net_device *dev)
452{
453 struct netdev_boot_setup *s = dev_boot_setup;
454 int i;
455
456 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
457 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
458 !strncmp(dev->name, s[i].name, strlen(s[i].name))) {
459 dev->irq = s[i].map.irq;
460 dev->base_addr = s[i].map.base_addr;
461 dev->mem_start = s[i].map.mem_start;
462 dev->mem_end = s[i].map.mem_end;
463 return 1;
464 }
465 }
466 return 0;
467}
468
469
470/**
471 * netdev_boot_base - get address from boot time settings
472 * @prefix: prefix for network device
473 * @unit: id for network device
474 *
475 * Check boot time settings for the base address of device.
476 * The found settings are set for the device to be used
477 * later in the device probing.
478 * Returns 0 if no settings found.
479 */
480unsigned long netdev_boot_base(const char *prefix, int unit)
481{
482 const struct netdev_boot_setup *s = dev_boot_setup;
483 char name[IFNAMSIZ];
484 int i;
485
486 sprintf(name, "%s%d", prefix, unit);
487
488 /*
489 * If device already registered then return base of 1
490 * to indicate not to probe for this interface
491 */
881d966b 492 if (__dev_get_by_name(&init_net, name))
1da177e4
LT
493 return 1;
494
495 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
496 if (!strcmp(name, s[i].name))
497 return s[i].map.base_addr;
498 return 0;
499}
500
501/*
502 * Saves at boot time configured settings for any netdevice.
503 */
504int __init netdev_boot_setup(char *str)
505{
506 int ints[5];
507 struct ifmap map;
508
509 str = get_options(str, ARRAY_SIZE(ints), ints);
510 if (!str || !*str)
511 return 0;
512
513 /* Save settings */
514 memset(&map, 0, sizeof(map));
515 if (ints[0] > 0)
516 map.irq = ints[1];
517 if (ints[0] > 1)
518 map.base_addr = ints[2];
519 if (ints[0] > 2)
520 map.mem_start = ints[3];
521 if (ints[0] > 3)
522 map.mem_end = ints[4];
523
524 /* Add new entry to the list */
525 return netdev_boot_setup_add(str, &map);
526}
527
528__setup("netdev=", netdev_boot_setup);
529
530/*******************************************************************************
531
532 Device Interface Subroutines
533
534*******************************************************************************/
535
536/**
537 * __dev_get_by_name - find a device by its name
538 * @name: name to find
539 *
540 * Find an interface by name. Must be called under RTNL semaphore
541 * or @dev_base_lock. If the name is found a pointer to the device
542 * is returned. If the name is not found then %NULL is returned. The
543 * reference counters are not incremented so the caller must be
544 * careful with locks.
545 */
546
881d966b 547struct net_device *__dev_get_by_name(struct net *net, const char *name)
1da177e4
LT
548{
549 struct hlist_node *p;
550
881d966b 551 hlist_for_each(p, dev_name_hash(net, name)) {
1da177e4
LT
552 struct net_device *dev
553 = hlist_entry(p, struct net_device, name_hlist);
554 if (!strncmp(dev->name, name, IFNAMSIZ))
555 return dev;
556 }
557 return NULL;
558}
559
560/**
561 * dev_get_by_name - find a device by its name
562 * @name: name to find
563 *
564 * Find an interface by name. This can be called from any
565 * context and does its own locking. The returned handle has
566 * the usage count incremented and the caller must use dev_put() to
567 * release it when it is no longer needed. %NULL is returned if no
568 * matching device is found.
569 */
570
881d966b 571struct net_device *dev_get_by_name(struct net *net, const char *name)
1da177e4
LT
572{
573 struct net_device *dev;
574
575 read_lock(&dev_base_lock);
881d966b 576 dev = __dev_get_by_name(net, name);
1da177e4
LT
577 if (dev)
578 dev_hold(dev);
579 read_unlock(&dev_base_lock);
580 return dev;
581}
582
583/**
584 * __dev_get_by_index - find a device by its ifindex
585 * @ifindex: index of device
586 *
587 * Search for an interface by index. Returns %NULL if the device
588 * is not found or a pointer to the device. The device has not
589 * had its reference counter increased so the caller must be careful
590 * about locking. The caller must hold either the RTNL semaphore
591 * or @dev_base_lock.
592 */
593
881d966b 594struct net_device *__dev_get_by_index(struct net *net, int ifindex)
1da177e4
LT
595{
596 struct hlist_node *p;
597
881d966b 598 hlist_for_each(p, dev_index_hash(net, ifindex)) {
1da177e4
LT
599 struct net_device *dev
600 = hlist_entry(p, struct net_device, index_hlist);
601 if (dev->ifindex == ifindex)
602 return dev;
603 }
604 return NULL;
605}
606
607
608/**
609 * dev_get_by_index - find a device by its ifindex
610 * @ifindex: index of device
611 *
612 * Search for an interface by index. Returns NULL if the device
613 * is not found or a pointer to the device. The device returned has
614 * had a reference added and the pointer is safe until the user calls
615 * dev_put to indicate they have finished with it.
616 */
617
881d966b 618struct net_device *dev_get_by_index(struct net *net, int ifindex)
1da177e4
LT
619{
620 struct net_device *dev;
621
622 read_lock(&dev_base_lock);
881d966b 623 dev = __dev_get_by_index(net, ifindex);
1da177e4
LT
624 if (dev)
625 dev_hold(dev);
626 read_unlock(&dev_base_lock);
627 return dev;
628}
629
630/**
631 * dev_getbyhwaddr - find a device by its hardware address
632 * @type: media type of device
633 * @ha: hardware address
634 *
635 * Search for an interface by MAC address. Returns NULL if the device
636 * is not found or a pointer to the device. The caller must hold the
637 * rtnl semaphore. The returned device has not had its ref count increased
638 * and the caller must therefore be careful about locking
639 *
640 * BUGS:
641 * If the API was consistent this would be __dev_get_by_hwaddr
642 */
643
881d966b 644struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
1da177e4
LT
645{
646 struct net_device *dev;
647
648 ASSERT_RTNL();
649
881d966b 650 for_each_netdev(&init_net, dev)
1da177e4
LT
651 if (dev->type == type &&
652 !memcmp(dev->dev_addr, ha, dev->addr_len))
7562f876
PE
653 return dev;
654
655 return NULL;
1da177e4
LT
656}
657
cf309e3f
JF
658EXPORT_SYMBOL(dev_getbyhwaddr);
659
881d966b 660struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
1da177e4
LT
661{
662 struct net_device *dev;
663
4e9cac2b 664 ASSERT_RTNL();
881d966b 665 for_each_netdev(net, dev)
4e9cac2b 666 if (dev->type == type)
7562f876
PE
667 return dev;
668
669 return NULL;
4e9cac2b
PM
670}
671
672EXPORT_SYMBOL(__dev_getfirstbyhwtype);
673
881d966b 674struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
4e9cac2b
PM
675{
676 struct net_device *dev;
677
678 rtnl_lock();
881d966b 679 dev = __dev_getfirstbyhwtype(net, type);
4e9cac2b
PM
680 if (dev)
681 dev_hold(dev);
1da177e4
LT
682 rtnl_unlock();
683 return dev;
684}
685
686EXPORT_SYMBOL(dev_getfirstbyhwtype);
687
688/**
689 * dev_get_by_flags - find any device with given flags
690 * @if_flags: IFF_* values
691 * @mask: bitmask of bits in if_flags to check
692 *
693 * Search for any interface with the given flags. Returns NULL if a device
4ec93edb 694 * is not found or a pointer to the device. The device returned has
1da177e4
LT
695 * had a reference added and the pointer is safe until the user calls
696 * dev_put to indicate they have finished with it.
697 */
698
881d966b 699struct net_device * dev_get_by_flags(struct net *net, unsigned short if_flags, unsigned short mask)
1da177e4 700{
7562f876 701 struct net_device *dev, *ret;
1da177e4 702
7562f876 703 ret = NULL;
1da177e4 704 read_lock(&dev_base_lock);
881d966b 705 for_each_netdev(net, dev) {
1da177e4
LT
706 if (((dev->flags ^ if_flags) & mask) == 0) {
707 dev_hold(dev);
7562f876 708 ret = dev;
1da177e4
LT
709 break;
710 }
711 }
712 read_unlock(&dev_base_lock);
7562f876 713 return ret;
1da177e4
LT
714}
715
716/**
717 * dev_valid_name - check if name is okay for network device
718 * @name: name string
719 *
720 * Network device names need to be valid file names to
c7fa9d18
DM
721 * to allow sysfs to work. We also disallow any kind of
722 * whitespace.
1da177e4 723 */
c2373ee9 724int dev_valid_name(const char *name)
1da177e4 725{
c7fa9d18
DM
726 if (*name == '\0')
727 return 0;
b6fe17d6
SH
728 if (strlen(name) >= IFNAMSIZ)
729 return 0;
c7fa9d18
DM
730 if (!strcmp(name, ".") || !strcmp(name, ".."))
731 return 0;
732
733 while (*name) {
734 if (*name == '/' || isspace(*name))
735 return 0;
736 name++;
737 }
738 return 1;
1da177e4
LT
739}
740
741/**
742 * dev_alloc_name - allocate a name for a device
743 * @dev: device
744 * @name: name format string
745 *
746 * Passed a format string - eg "lt%d" it will try and find a suitable
3041a069
SH
747 * id. It scans list of devices to build up a free map, then chooses
748 * the first empty slot. The caller must hold the dev_base or rtnl lock
749 * while allocating the name and adding the device in order to avoid
750 * duplicates.
751 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
752 * Returns the number of the unit assigned or a negative errno code.
1da177e4
LT
753 */
754
755int dev_alloc_name(struct net_device *dev, const char *name)
756{
757 int i = 0;
758 char buf[IFNAMSIZ];
759 const char *p;
760 const int max_netdevices = 8*PAGE_SIZE;
761 long *inuse;
762 struct net_device *d;
881d966b
EB
763 struct net *net;
764
765 BUG_ON(!dev->nd_net);
766 net = dev->nd_net;
1da177e4
LT
767
768 p = strnchr(name, IFNAMSIZ-1, '%');
769 if (p) {
770 /*
771 * Verify the string as this thing may have come from
772 * the user. There must be either one "%d" and no other "%"
773 * characters.
774 */
775 if (p[1] != 'd' || strchr(p + 2, '%'))
776 return -EINVAL;
777
778 /* Use one page as a bit array of possible slots */
779 inuse = (long *) get_zeroed_page(GFP_ATOMIC);
780 if (!inuse)
781 return -ENOMEM;
782
881d966b 783 for_each_netdev(net, d) {
1da177e4
LT
784 if (!sscanf(d->name, name, &i))
785 continue;
786 if (i < 0 || i >= max_netdevices)
787 continue;
788
789 /* avoid cases where sscanf is not exact inverse of printf */
790 snprintf(buf, sizeof(buf), name, i);
791 if (!strncmp(buf, d->name, IFNAMSIZ))
792 set_bit(i, inuse);
793 }
794
795 i = find_first_zero_bit(inuse, max_netdevices);
796 free_page((unsigned long) inuse);
797 }
798
799 snprintf(buf, sizeof(buf), name, i);
881d966b 800 if (!__dev_get_by_name(net, buf)) {
1da177e4
LT
801 strlcpy(dev->name, buf, IFNAMSIZ);
802 return i;
803 }
804
805 /* It is possible to run out of possible slots
806 * when the name is long and there isn't enough space left
807 * for the digits, or if all bits are used.
808 */
809 return -ENFILE;
810}
811
812
813/**
814 * dev_change_name - change name of a device
815 * @dev: device
816 * @newname: name (or format string) must be at least IFNAMSIZ
817 *
818 * Change name of a device, can pass format strings "eth%d".
819 * for wildcarding.
820 */
821int dev_change_name(struct net_device *dev, char *newname)
822{
fcc5a03a 823 char oldname[IFNAMSIZ];
1da177e4 824 int err = 0;
fcc5a03a 825 int ret;
881d966b 826 struct net *net;
1da177e4
LT
827
828 ASSERT_RTNL();
881d966b 829 BUG_ON(!dev->nd_net);
1da177e4 830
881d966b 831 net = dev->nd_net;
1da177e4
LT
832 if (dev->flags & IFF_UP)
833 return -EBUSY;
834
835 if (!dev_valid_name(newname))
836 return -EINVAL;
837
fcc5a03a
HX
838 memcpy(oldname, dev->name, IFNAMSIZ);
839
1da177e4
LT
840 if (strchr(newname, '%')) {
841 err = dev_alloc_name(dev, newname);
842 if (err < 0)
843 return err;
844 strcpy(newname, dev->name);
845 }
881d966b 846 else if (__dev_get_by_name(net, newname))
1da177e4
LT
847 return -EEXIST;
848 else
849 strlcpy(dev->name, newname, IFNAMSIZ);
850
fcc5a03a 851rollback:
92749821 852 device_rename(&dev->dev, dev->name);
7f988eab
HX
853
854 write_lock_bh(&dev_base_lock);
92749821 855 hlist_del(&dev->name_hlist);
881d966b 856 hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
7f988eab
HX
857 write_unlock_bh(&dev_base_lock);
858
fcc5a03a
HX
859 ret = raw_notifier_call_chain(&netdev_chain, NETDEV_CHANGENAME, dev);
860 ret = notifier_to_errno(ret);
861
862 if (ret) {
863 if (err) {
864 printk(KERN_ERR
865 "%s: name change rollback failed: %d.\n",
866 dev->name, ret);
867 } else {
868 err = ret;
869 memcpy(dev->name, oldname, IFNAMSIZ);
870 goto rollback;
871 }
872 }
1da177e4
LT
873
874 return err;
875}
876
d8a33ac4 877/**
3041a069 878 * netdev_features_change - device changes features
d8a33ac4
SH
879 * @dev: device to cause notification
880 *
881 * Called to indicate a device has changed features.
882 */
883void netdev_features_change(struct net_device *dev)
884{
f07d5b94 885 raw_notifier_call_chain(&netdev_chain, NETDEV_FEAT_CHANGE, dev);
d8a33ac4
SH
886}
887EXPORT_SYMBOL(netdev_features_change);
888
1da177e4
LT
889/**
890 * netdev_state_change - device changes state
891 * @dev: device to cause notification
892 *
893 * Called to indicate a device has changed state. This function calls
894 * the notifier chains for netdev_chain and sends a NEWLINK message
895 * to the routing socket.
896 */
897void netdev_state_change(struct net_device *dev)
898{
899 if (dev->flags & IFF_UP) {
f07d5b94 900 raw_notifier_call_chain(&netdev_chain,
e041c683 901 NETDEV_CHANGE, dev);
1da177e4
LT
902 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
903 }
904}
905
906/**
907 * dev_load - load a network module
908 * @name: name of interface
909 *
910 * If a network interface is not present and the process has suitable
911 * privileges this function loads the module. If module loading is not
912 * available in this kernel then it becomes a nop.
913 */
914
881d966b 915void dev_load(struct net *net, const char *name)
1da177e4 916{
4ec93edb 917 struct net_device *dev;
1da177e4
LT
918
919 read_lock(&dev_base_lock);
881d966b 920 dev = __dev_get_by_name(net, name);
1da177e4
LT
921 read_unlock(&dev_base_lock);
922
923 if (!dev && capable(CAP_SYS_MODULE))
924 request_module("%s", name);
925}
926
927static int default_rebuild_header(struct sk_buff *skb)
928{
929 printk(KERN_DEBUG "%s: default_rebuild_header called -- BUG!\n",
930 skb->dev ? skb->dev->name : "NULL!!!");
931 kfree_skb(skb);
932 return 1;
933}
934
1da177e4
LT
935/**
936 * dev_open - prepare an interface for use.
937 * @dev: device to open
938 *
939 * Takes a device from down to up state. The device's private open
940 * function is invoked and then the multicast lists are loaded. Finally
941 * the device is moved into the up state and a %NETDEV_UP message is
942 * sent to the netdev notifier chain.
943 *
944 * Calling this function on an active interface is a nop. On a failure
945 * a negative errno code is returned.
946 */
947int dev_open(struct net_device *dev)
948{
949 int ret = 0;
950
951 /*
952 * Is it already up?
953 */
954
955 if (dev->flags & IFF_UP)
956 return 0;
957
958 /*
959 * Is it even present?
960 */
961 if (!netif_device_present(dev))
962 return -ENODEV;
963
964 /*
965 * Call device private open method
966 */
967 set_bit(__LINK_STATE_START, &dev->state);
968 if (dev->open) {
969 ret = dev->open(dev);
970 if (ret)
971 clear_bit(__LINK_STATE_START, &dev->state);
972 }
973
4ec93edb 974 /*
1da177e4
LT
975 * If it went open OK then:
976 */
977
978 if (!ret) {
979 /*
980 * Set the flags.
981 */
982 dev->flags |= IFF_UP;
983
984 /*
985 * Initialize multicasting status
986 */
4417da66 987 dev_set_rx_mode(dev);
1da177e4
LT
988
989 /*
990 * Wakeup transmit queue engine
991 */
992 dev_activate(dev);
993
994 /*
995 * ... and announce new interface.
996 */
f07d5b94 997 raw_notifier_call_chain(&netdev_chain, NETDEV_UP, dev);
1da177e4
LT
998 }
999 return ret;
1000}
1001
1002/**
1003 * dev_close - shutdown an interface.
1004 * @dev: device to shutdown
1005 *
1006 * This function moves an active device into down state. A
1007 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1008 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1009 * chain.
1010 */
1011int dev_close(struct net_device *dev)
1012{
1013 if (!(dev->flags & IFF_UP))
1014 return 0;
1015
1016 /*
1017 * Tell people we are going down, so that they can
1018 * prepare to death, when device is still operating.
1019 */
f07d5b94 1020 raw_notifier_call_chain(&netdev_chain, NETDEV_GOING_DOWN, dev);
1da177e4
LT
1021
1022 dev_deactivate(dev);
1023
1024 clear_bit(__LINK_STATE_START, &dev->state);
1025
1026 /* Synchronize to scheduled poll. We cannot touch poll list,
bea3348e
SH
1027 * it can be even on different cpu. So just clear netif_running().
1028 *
1029 * dev->stop() will invoke napi_disable() on all of it's
1030 * napi_struct instances on this device.
1031 */
1da177e4 1032 smp_mb__after_clear_bit(); /* Commit netif_running(). */
1da177e4
LT
1033
1034 /*
1035 * Call the device specific close. This cannot fail.
1036 * Only if device is UP
1037 *
1038 * We allow it to be called even after a DETACH hot-plug
1039 * event.
1040 */
1041 if (dev->stop)
1042 dev->stop(dev);
1043
1044 /*
1045 * Device is now down.
1046 */
1047
1048 dev->flags &= ~IFF_UP;
1049
1050 /*
1051 * Tell people we are down
1052 */
f07d5b94 1053 raw_notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev);
1da177e4
LT
1054
1055 return 0;
1056}
1057
1058
881d966b
EB
1059static int dev_boot_phase = 1;
1060
1da177e4
LT
1061/*
1062 * Device change register/unregister. These are not inline or static
1063 * as we export them to the world.
1064 */
1065
1066/**
1067 * register_netdevice_notifier - register a network notifier block
1068 * @nb: notifier
1069 *
1070 * Register a notifier to be called when network device events occur.
1071 * The notifier passed is linked into the kernel structures and must
1072 * not be reused until it has been unregistered. A negative errno code
1073 * is returned on a failure.
1074 *
1075 * When registered all registration and up events are replayed
4ec93edb 1076 * to the new notifier to allow device to have a race free
1da177e4
LT
1077 * view of the network device list.
1078 */
1079
1080int register_netdevice_notifier(struct notifier_block *nb)
1081{
1082 struct net_device *dev;
fcc5a03a 1083 struct net_device *last;
881d966b 1084 struct net *net;
1da177e4
LT
1085 int err;
1086
1087 rtnl_lock();
f07d5b94 1088 err = raw_notifier_chain_register(&netdev_chain, nb);
fcc5a03a
HX
1089 if (err)
1090 goto unlock;
881d966b
EB
1091 if (dev_boot_phase)
1092 goto unlock;
1093 for_each_net(net) {
1094 for_each_netdev(net, dev) {
1095 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1096 err = notifier_to_errno(err);
1097 if (err)
1098 goto rollback;
1099
1100 if (!(dev->flags & IFF_UP))
1101 continue;
1da177e4 1102
881d966b
EB
1103 nb->notifier_call(nb, NETDEV_UP, dev);
1104 }
1da177e4 1105 }
fcc5a03a
HX
1106
1107unlock:
1da177e4
LT
1108 rtnl_unlock();
1109 return err;
fcc5a03a
HX
1110
1111rollback:
1112 last = dev;
881d966b
EB
1113 for_each_net(net) {
1114 for_each_netdev(net, dev) {
1115 if (dev == last)
1116 break;
fcc5a03a 1117
881d966b
EB
1118 if (dev->flags & IFF_UP) {
1119 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1120 nb->notifier_call(nb, NETDEV_DOWN, dev);
1121 }
1122 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
fcc5a03a 1123 }
fcc5a03a
HX
1124 }
1125 goto unlock;
1da177e4
LT
1126}
1127
1128/**
1129 * unregister_netdevice_notifier - unregister a network notifier block
1130 * @nb: notifier
1131 *
1132 * Unregister a notifier previously registered by
1133 * register_netdevice_notifier(). The notifier is unlinked into the
1134 * kernel structures and may then be reused. A negative errno code
1135 * is returned on a failure.
1136 */
1137
1138int unregister_netdevice_notifier(struct notifier_block *nb)
1139{
9f514950
HX
1140 int err;
1141
1142 rtnl_lock();
f07d5b94 1143 err = raw_notifier_chain_unregister(&netdev_chain, nb);
9f514950
HX
1144 rtnl_unlock();
1145 return err;
1da177e4
LT
1146}
1147
1148/**
1149 * call_netdevice_notifiers - call all network notifier blocks
1150 * @val: value passed unmodified to notifier function
1151 * @v: pointer passed unmodified to notifier function
1152 *
1153 * Call all network notifier blocks. Parameters and return value
f07d5b94 1154 * are as for raw_notifier_call_chain().
1da177e4
LT
1155 */
1156
1157int call_netdevice_notifiers(unsigned long val, void *v)
1158{
f07d5b94 1159 return raw_notifier_call_chain(&netdev_chain, val, v);
1da177e4
LT
1160}
1161
1162/* When > 0 there are consumers of rx skb time stamps */
1163static atomic_t netstamp_needed = ATOMIC_INIT(0);
1164
1165void net_enable_timestamp(void)
1166{
1167 atomic_inc(&netstamp_needed);
1168}
1169
1170void net_disable_timestamp(void)
1171{
1172 atomic_dec(&netstamp_needed);
1173}
1174
a61bbcf2 1175static inline void net_timestamp(struct sk_buff *skb)
1da177e4
LT
1176{
1177 if (atomic_read(&netstamp_needed))
a61bbcf2 1178 __net_timestamp(skb);
b7aa0bf7
ED
1179 else
1180 skb->tstamp.tv64 = 0;
1da177e4
LT
1181}
1182
1183/*
1184 * Support routine. Sends outgoing frames to any network
1185 * taps currently in use.
1186 */
1187
f6a78bfc 1188static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1da177e4
LT
1189{
1190 struct packet_type *ptype;
a61bbcf2
PM
1191
1192 net_timestamp(skb);
1da177e4
LT
1193
1194 rcu_read_lock();
1195 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1196 /* Never send packets back to the socket
1197 * they originated from - MvS (miquels@drinkel.ow.org)
1198 */
1199 if ((ptype->dev == dev || !ptype->dev) &&
1200 (ptype->af_packet_priv == NULL ||
1201 (struct sock *)ptype->af_packet_priv != skb->sk)) {
1202 struct sk_buff *skb2= skb_clone(skb, GFP_ATOMIC);
1203 if (!skb2)
1204 break;
1205
1206 /* skb->nh should be correctly
1207 set by sender, so that the second statement is
1208 just protection against buggy protocols.
1209 */
459a98ed 1210 skb_reset_mac_header(skb2);
1da177e4 1211
d56f90a7 1212 if (skb_network_header(skb2) < skb2->data ||
27a884dc 1213 skb2->network_header > skb2->tail) {
1da177e4
LT
1214 if (net_ratelimit())
1215 printk(KERN_CRIT "protocol %04x is "
1216 "buggy, dev %s\n",
1217 skb2->protocol, dev->name);
c1d2bbe1 1218 skb_reset_network_header(skb2);
1da177e4
LT
1219 }
1220
b0e380b1 1221 skb2->transport_header = skb2->network_header;
1da177e4 1222 skb2->pkt_type = PACKET_OUTGOING;
f2ccd8fa 1223 ptype->func(skb2, skb->dev, ptype, skb->dev);
1da177e4
LT
1224 }
1225 }
1226 rcu_read_unlock();
1227}
1228
56079431
DV
1229
1230void __netif_schedule(struct net_device *dev)
1231{
1232 if (!test_and_set_bit(__LINK_STATE_SCHED, &dev->state)) {
1233 unsigned long flags;
1234 struct softnet_data *sd;
1235
1236 local_irq_save(flags);
1237 sd = &__get_cpu_var(softnet_data);
1238 dev->next_sched = sd->output_queue;
1239 sd->output_queue = dev;
1240 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1241 local_irq_restore(flags);
1242 }
1243}
1244EXPORT_SYMBOL(__netif_schedule);
1245
bea3348e 1246void dev_kfree_skb_irq(struct sk_buff *skb)
56079431 1247{
bea3348e
SH
1248 if (atomic_dec_and_test(&skb->users)) {
1249 struct softnet_data *sd;
1250 unsigned long flags;
56079431 1251
bea3348e
SH
1252 local_irq_save(flags);
1253 sd = &__get_cpu_var(softnet_data);
1254 skb->next = sd->completion_queue;
1255 sd->completion_queue = skb;
1256 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1257 local_irq_restore(flags);
1258 }
56079431 1259}
bea3348e 1260EXPORT_SYMBOL(dev_kfree_skb_irq);
56079431
DV
1261
1262void dev_kfree_skb_any(struct sk_buff *skb)
1263{
1264 if (in_irq() || irqs_disabled())
1265 dev_kfree_skb_irq(skb);
1266 else
1267 dev_kfree_skb(skb);
1268}
1269EXPORT_SYMBOL(dev_kfree_skb_any);
1270
1271
bea3348e
SH
1272/**
1273 * netif_device_detach - mark device as removed
1274 * @dev: network device
1275 *
1276 * Mark device as removed from system and therefore no longer available.
1277 */
56079431
DV
1278void netif_device_detach(struct net_device *dev)
1279{
1280 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1281 netif_running(dev)) {
1282 netif_stop_queue(dev);
1283 }
1284}
1285EXPORT_SYMBOL(netif_device_detach);
1286
bea3348e
SH
1287/**
1288 * netif_device_attach - mark device as attached
1289 * @dev: network device
1290 *
1291 * Mark device as attached from system and restart if needed.
1292 */
56079431
DV
1293void netif_device_attach(struct net_device *dev)
1294{
1295 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1296 netif_running(dev)) {
1297 netif_wake_queue(dev);
4ec93edb 1298 __netdev_watchdog_up(dev);
56079431
DV
1299 }
1300}
1301EXPORT_SYMBOL(netif_device_attach);
1302
1303
1da177e4
LT
1304/*
1305 * Invalidate hardware checksum when packet is to be mangled, and
1306 * complete checksum manually on outgoing path.
1307 */
84fa7933 1308int skb_checksum_help(struct sk_buff *skb)
1da177e4 1309{
d3bc23e7 1310 __wsum csum;
663ead3b 1311 int ret = 0, offset;
1da177e4 1312
84fa7933 1313 if (skb->ip_summed == CHECKSUM_COMPLETE)
a430a43d
HX
1314 goto out_set_summed;
1315
1316 if (unlikely(skb_shinfo(skb)->gso_size)) {
a430a43d
HX
1317 /* Let GSO fix up the checksum. */
1318 goto out_set_summed;
1da177e4
LT
1319 }
1320
1321 if (skb_cloned(skb)) {
1322 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1323 if (ret)
1324 goto out;
1325 }
1326
663ead3b 1327 offset = skb->csum_start - skb_headroom(skb);
09a62660 1328 BUG_ON(offset > (int)skb->len);
1da177e4
LT
1329 csum = skb_checksum(skb, offset, skb->len-offset, 0);
1330
663ead3b 1331 offset = skb_headlen(skb) - offset;
09a62660 1332 BUG_ON(offset <= 0);
ff1dcadb 1333 BUG_ON(skb->csum_offset + 2 > offset);
1da177e4 1334
663ead3b
HX
1335 *(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) =
1336 csum_fold(csum);
a430a43d 1337out_set_summed:
1da177e4 1338 skb->ip_summed = CHECKSUM_NONE;
4ec93edb 1339out:
1da177e4
LT
1340 return ret;
1341}
1342
f6a78bfc
HX
1343/**
1344 * skb_gso_segment - Perform segmentation on skb.
1345 * @skb: buffer to segment
576a30eb 1346 * @features: features for the output path (see dev->features)
f6a78bfc
HX
1347 *
1348 * This function segments the given skb and returns a list of segments.
576a30eb
HX
1349 *
1350 * It may return NULL if the skb requires no segmentation. This is
1351 * only possible when GSO is used for verifying header integrity.
f6a78bfc 1352 */
576a30eb 1353struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
f6a78bfc
HX
1354{
1355 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1356 struct packet_type *ptype;
252e3346 1357 __be16 type = skb->protocol;
a430a43d 1358 int err;
f6a78bfc
HX
1359
1360 BUG_ON(skb_shinfo(skb)->frag_list);
f6a78bfc 1361
459a98ed 1362 skb_reset_mac_header(skb);
b0e380b1 1363 skb->mac_len = skb->network_header - skb->mac_header;
f6a78bfc
HX
1364 __skb_pull(skb, skb->mac_len);
1365
f9d106a6 1366 if (WARN_ON(skb->ip_summed != CHECKSUM_PARTIAL)) {
a430a43d
HX
1367 if (skb_header_cloned(skb) &&
1368 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1369 return ERR_PTR(err);
1370 }
1371
f6a78bfc
HX
1372 rcu_read_lock();
1373 list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type) & 15], list) {
1374 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
84fa7933 1375 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
a430a43d
HX
1376 err = ptype->gso_send_check(skb);
1377 segs = ERR_PTR(err);
1378 if (err || skb_gso_ok(skb, features))
1379 break;
d56f90a7
ACM
1380 __skb_push(skb, (skb->data -
1381 skb_network_header(skb)));
a430a43d 1382 }
576a30eb 1383 segs = ptype->gso_segment(skb, features);
f6a78bfc
HX
1384 break;
1385 }
1386 }
1387 rcu_read_unlock();
1388
98e399f8 1389 __skb_push(skb, skb->data - skb_mac_header(skb));
576a30eb 1390
f6a78bfc
HX
1391 return segs;
1392}
1393
1394EXPORT_SYMBOL(skb_gso_segment);
1395
fb286bb2
HX
1396/* Take action when hardware reception checksum errors are detected. */
1397#ifdef CONFIG_BUG
1398void netdev_rx_csum_fault(struct net_device *dev)
1399{
1400 if (net_ratelimit()) {
4ec93edb 1401 printk(KERN_ERR "%s: hw csum failure.\n",
246a4212 1402 dev ? dev->name : "<unknown>");
fb286bb2
HX
1403 dump_stack();
1404 }
1405}
1406EXPORT_SYMBOL(netdev_rx_csum_fault);
1407#endif
1408
1da177e4
LT
1409/* Actually, we should eliminate this check as soon as we know, that:
1410 * 1. IOMMU is present and allows to map all the memory.
1411 * 2. No high memory really exists on this machine.
1412 */
1413
1414static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1415{
3d3a8533 1416#ifdef CONFIG_HIGHMEM
1da177e4
LT
1417 int i;
1418
1419 if (dev->features & NETIF_F_HIGHDMA)
1420 return 0;
1421
1422 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1423 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1424 return 1;
1425
3d3a8533 1426#endif
1da177e4
LT
1427 return 0;
1428}
1da177e4 1429
f6a78bfc
HX
1430struct dev_gso_cb {
1431 void (*destructor)(struct sk_buff *skb);
1432};
1433
1434#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
1435
1436static void dev_gso_skb_destructor(struct sk_buff *skb)
1437{
1438 struct dev_gso_cb *cb;
1439
1440 do {
1441 struct sk_buff *nskb = skb->next;
1442
1443 skb->next = nskb->next;
1444 nskb->next = NULL;
1445 kfree_skb(nskb);
1446 } while (skb->next);
1447
1448 cb = DEV_GSO_CB(skb);
1449 if (cb->destructor)
1450 cb->destructor(skb);
1451}
1452
1453/**
1454 * dev_gso_segment - Perform emulated hardware segmentation on skb.
1455 * @skb: buffer to segment
1456 *
1457 * This function segments the given skb and stores the list of segments
1458 * in skb->next.
1459 */
1460static int dev_gso_segment(struct sk_buff *skb)
1461{
1462 struct net_device *dev = skb->dev;
1463 struct sk_buff *segs;
576a30eb
HX
1464 int features = dev->features & ~(illegal_highdma(dev, skb) ?
1465 NETIF_F_SG : 0);
1466
1467 segs = skb_gso_segment(skb, features);
1468
1469 /* Verifying header integrity only. */
1470 if (!segs)
1471 return 0;
f6a78bfc 1472
f6a78bfc
HX
1473 if (unlikely(IS_ERR(segs)))
1474 return PTR_ERR(segs);
1475
1476 skb->next = segs;
1477 DEV_GSO_CB(skb)->destructor = skb->destructor;
1478 skb->destructor = dev_gso_skb_destructor;
1479
1480 return 0;
1481}
1482
1483int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
1484{
1485 if (likely(!skb->next)) {
9be9a6b9 1486 if (!list_empty(&ptype_all))
f6a78bfc
HX
1487 dev_queue_xmit_nit(skb, dev);
1488
576a30eb
HX
1489 if (netif_needs_gso(dev, skb)) {
1490 if (unlikely(dev_gso_segment(skb)))
1491 goto out_kfree_skb;
1492 if (skb->next)
1493 goto gso;
1494 }
f6a78bfc 1495
576a30eb 1496 return dev->hard_start_xmit(skb, dev);
f6a78bfc
HX
1497 }
1498
576a30eb 1499gso:
f6a78bfc
HX
1500 do {
1501 struct sk_buff *nskb = skb->next;
1502 int rc;
1503
1504 skb->next = nskb->next;
1505 nskb->next = NULL;
1506 rc = dev->hard_start_xmit(nskb, dev);
1507 if (unlikely(rc)) {
f54d9e8d 1508 nskb->next = skb->next;
f6a78bfc
HX
1509 skb->next = nskb;
1510 return rc;
1511 }
f25f4e44
PWJ
1512 if (unlikely((netif_queue_stopped(dev) ||
1513 netif_subqueue_stopped(dev, skb->queue_mapping)) &&
1514 skb->next))
f54d9e8d 1515 return NETDEV_TX_BUSY;
f6a78bfc 1516 } while (skb->next);
4ec93edb 1517
f6a78bfc
HX
1518 skb->destructor = DEV_GSO_CB(skb)->destructor;
1519
1520out_kfree_skb:
1521 kfree_skb(skb);
1522 return 0;
1523}
1524
1da177e4
LT
1525#define HARD_TX_LOCK(dev, cpu) { \
1526 if ((dev->features & NETIF_F_LLTX) == 0) { \
932ff279 1527 netif_tx_lock(dev); \
1da177e4
LT
1528 } \
1529}
1530
1531#define HARD_TX_UNLOCK(dev) { \
1532 if ((dev->features & NETIF_F_LLTX) == 0) { \
932ff279 1533 netif_tx_unlock(dev); \
1da177e4
LT
1534 } \
1535}
1536
1537/**
1538 * dev_queue_xmit - transmit a buffer
1539 * @skb: buffer to transmit
1540 *
1541 * Queue a buffer for transmission to a network device. The caller must
1542 * have set the device and priority and built the buffer before calling
1543 * this function. The function can be called from an interrupt.
1544 *
1545 * A negative errno code is returned on a failure. A success does not
1546 * guarantee the frame will be transmitted as it may be dropped due
1547 * to congestion or traffic shaping.
af191367
BG
1548 *
1549 * -----------------------------------------------------------------------------------
1550 * I notice this method can also return errors from the queue disciplines,
1551 * including NET_XMIT_DROP, which is a positive value. So, errors can also
1552 * be positive.
1553 *
1554 * Regardless of the return value, the skb is consumed, so it is currently
1555 * difficult to retry a send to this method. (You can bump the ref count
1556 * before sending to hold a reference for retry if you are careful.)
1557 *
1558 * When calling this method, interrupts MUST be enabled. This is because
1559 * the BH enable code must have IRQs enabled so that it will not deadlock.
1560 * --BLG
1da177e4
LT
1561 */
1562
1563int dev_queue_xmit(struct sk_buff *skb)
1564{
1565 struct net_device *dev = skb->dev;
1566 struct Qdisc *q;
1567 int rc = -ENOMEM;
1568
f6a78bfc
HX
1569 /* GSO will handle the following emulations directly. */
1570 if (netif_needs_gso(dev, skb))
1571 goto gso;
1572
1da177e4
LT
1573 if (skb_shinfo(skb)->frag_list &&
1574 !(dev->features & NETIF_F_FRAGLIST) &&
364c6bad 1575 __skb_linearize(skb))
1da177e4
LT
1576 goto out_kfree_skb;
1577
1578 /* Fragmented skb is linearized if device does not support SG,
1579 * or if at least one of fragments is in highmem and device
1580 * does not support DMA from it.
1581 */
1582 if (skb_shinfo(skb)->nr_frags &&
1583 (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
364c6bad 1584 __skb_linearize(skb))
1da177e4
LT
1585 goto out_kfree_skb;
1586
1587 /* If packet is not checksummed and device does not support
1588 * checksumming for this protocol, complete checksumming here.
1589 */
663ead3b
HX
1590 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1591 skb_set_transport_header(skb, skb->csum_start -
1592 skb_headroom(skb));
1593
a298830c
HX
1594 if (!(dev->features & NETIF_F_GEN_CSUM) &&
1595 !((dev->features & NETIF_F_IP_CSUM) &&
1596 skb->protocol == htons(ETH_P_IP)) &&
1597 !((dev->features & NETIF_F_IPV6_CSUM) &&
1598 skb->protocol == htons(ETH_P_IPV6)))
663ead3b
HX
1599 if (skb_checksum_help(skb))
1600 goto out_kfree_skb;
1601 }
1da177e4 1602
f6a78bfc 1603gso:
2d7ceece
ED
1604 spin_lock_prefetch(&dev->queue_lock);
1605
4ec93edb
YH
1606 /* Disable soft irqs for various locks below. Also
1607 * stops preemption for RCU.
1da177e4 1608 */
4ec93edb 1609 rcu_read_lock_bh();
1da177e4 1610
4ec93edb
YH
1611 /* Updates of qdisc are serialized by queue_lock.
1612 * The struct Qdisc which is pointed to by qdisc is now a
1613 * rcu structure - it may be accessed without acquiring
1da177e4 1614 * a lock (but the structure may be stale.) The freeing of the
4ec93edb 1615 * qdisc will be deferred until it's known that there are no
1da177e4 1616 * more references to it.
4ec93edb
YH
1617 *
1618 * If the qdisc has an enqueue function, we still need to
1da177e4
LT
1619 * hold the queue_lock before calling it, since queue_lock
1620 * also serializes access to the device queue.
1621 */
1622
1623 q = rcu_dereference(dev->qdisc);
1624#ifdef CONFIG_NET_CLS_ACT
1625 skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS);
1626#endif
1627 if (q->enqueue) {
1628 /* Grab device queue */
1629 spin_lock(&dev->queue_lock);
85670cc1
PM
1630 q = dev->qdisc;
1631 if (q->enqueue) {
f25f4e44
PWJ
1632 /* reset queue_mapping to zero */
1633 skb->queue_mapping = 0;
85670cc1
PM
1634 rc = q->enqueue(skb, q);
1635 qdisc_run(dev);
1636 spin_unlock(&dev->queue_lock);
1da177e4 1637
85670cc1
PM
1638 rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc;
1639 goto out;
1640 }
1da177e4 1641 spin_unlock(&dev->queue_lock);
1da177e4
LT
1642 }
1643
1644 /* The device has no queue. Common case for software devices:
1645 loopback, all the sorts of tunnels...
1646
932ff279
HX
1647 Really, it is unlikely that netif_tx_lock protection is necessary
1648 here. (f.e. loopback and IP tunnels are clean ignoring statistics
1da177e4
LT
1649 counters.)
1650 However, it is possible, that they rely on protection
1651 made by us here.
1652
1653 Check this and shot the lock. It is not prone from deadlocks.
1654 Either shot noqueue qdisc, it is even simpler 8)
1655 */
1656 if (dev->flags & IFF_UP) {
1657 int cpu = smp_processor_id(); /* ok because BHs are off */
1658
1659 if (dev->xmit_lock_owner != cpu) {
1660
1661 HARD_TX_LOCK(dev, cpu);
1662
f25f4e44
PWJ
1663 if (!netif_queue_stopped(dev) &&
1664 !netif_subqueue_stopped(dev, skb->queue_mapping)) {
1da177e4 1665 rc = 0;
f6a78bfc 1666 if (!dev_hard_start_xmit(skb, dev)) {
1da177e4
LT
1667 HARD_TX_UNLOCK(dev);
1668 goto out;
1669 }
1670 }
1671 HARD_TX_UNLOCK(dev);
1672 if (net_ratelimit())
1673 printk(KERN_CRIT "Virtual device %s asks to "
1674 "queue packet!\n", dev->name);
1675 } else {
1676 /* Recursion is detected! It is possible,
1677 * unfortunately */
1678 if (net_ratelimit())
1679 printk(KERN_CRIT "Dead loop on virtual device "
1680 "%s, fix it urgently!\n", dev->name);
1681 }
1682 }
1683
1684 rc = -ENETDOWN;
d4828d85 1685 rcu_read_unlock_bh();
1da177e4
LT
1686
1687out_kfree_skb:
1688 kfree_skb(skb);
1689 return rc;
1690out:
d4828d85 1691 rcu_read_unlock_bh();
1da177e4
LT
1692 return rc;
1693}
1694
1695
1696/*=======================================================================
1697 Receiver routines
1698 =======================================================================*/
1699
6b2bedc3
SH
1700int netdev_max_backlog __read_mostly = 1000;
1701int netdev_budget __read_mostly = 300;
1702int weight_p __read_mostly = 64; /* old backlog weight */
1da177e4
LT
1703
1704DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
1705
1706
1da177e4
LT
1707/**
1708 * netif_rx - post buffer to the network code
1709 * @skb: buffer to post
1710 *
1711 * This function receives a packet from a device driver and queues it for
1712 * the upper (protocol) levels to process. It always succeeds. The buffer
1713 * may be dropped during processing for congestion control or by the
1714 * protocol layers.
1715 *
1716 * return values:
1717 * NET_RX_SUCCESS (no congestion)
1718 * NET_RX_CN_LOW (low congestion)
1719 * NET_RX_CN_MOD (moderate congestion)
1720 * NET_RX_CN_HIGH (high congestion)
1721 * NET_RX_DROP (packet was dropped)
1722 *
1723 */
1724
1725int netif_rx(struct sk_buff *skb)
1726{
1da177e4
LT
1727 struct softnet_data *queue;
1728 unsigned long flags;
1729
1730 /* if netpoll wants it, pretend we never saw it */
1731 if (netpoll_rx(skb))
1732 return NET_RX_DROP;
1733
b7aa0bf7 1734 if (!skb->tstamp.tv64)
a61bbcf2 1735 net_timestamp(skb);
1da177e4
LT
1736
1737 /*
1738 * The code is rearranged so that the path is the most
1739 * short when CPU is congested, but is still operating.
1740 */
1741 local_irq_save(flags);
1da177e4
LT
1742 queue = &__get_cpu_var(softnet_data);
1743
1744 __get_cpu_var(netdev_rx_stat).total++;
1745 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
1746 if (queue->input_pkt_queue.qlen) {
1da177e4
LT
1747enqueue:
1748 dev_hold(skb->dev);
1749 __skb_queue_tail(&queue->input_pkt_queue, skb);
1da177e4 1750 local_irq_restore(flags);
34008d8c 1751 return NET_RX_SUCCESS;
1da177e4
LT
1752 }
1753
bea3348e 1754 napi_schedule(&queue->backlog);
1da177e4
LT
1755 goto enqueue;
1756 }
1757
1da177e4
LT
1758 __get_cpu_var(netdev_rx_stat).dropped++;
1759 local_irq_restore(flags);
1760
1761 kfree_skb(skb);
1762 return NET_RX_DROP;
1763}
1764
1765int netif_rx_ni(struct sk_buff *skb)
1766{
1767 int err;
1768
1769 preempt_disable();
1770 err = netif_rx(skb);
1771 if (local_softirq_pending())
1772 do_softirq();
1773 preempt_enable();
1774
1775 return err;
1776}
1777
1778EXPORT_SYMBOL(netif_rx_ni);
1779
f2ccd8fa 1780static inline struct net_device *skb_bond(struct sk_buff *skb)
1da177e4
LT
1781{
1782 struct net_device *dev = skb->dev;
1783
8f903c70 1784 if (dev->master) {
7ea49ed7 1785 if (skb_bond_should_drop(skb)) {
8f903c70
JV
1786 kfree_skb(skb);
1787 return NULL;
1788 }
1da177e4 1789 skb->dev = dev->master;
8f903c70 1790 }
f2ccd8fa
DM
1791
1792 return dev;
1da177e4
LT
1793}
1794
bea3348e 1795
1da177e4
LT
1796static void net_tx_action(struct softirq_action *h)
1797{
1798 struct softnet_data *sd = &__get_cpu_var(softnet_data);
1799
1800 if (sd->completion_queue) {
1801 struct sk_buff *clist;
1802
1803 local_irq_disable();
1804 clist = sd->completion_queue;
1805 sd->completion_queue = NULL;
1806 local_irq_enable();
1807
1808 while (clist) {
1809 struct sk_buff *skb = clist;
1810 clist = clist->next;
1811
1812 BUG_TRAP(!atomic_read(&skb->users));
1813 __kfree_skb(skb);
1814 }
1815 }
1816
1817 if (sd->output_queue) {
1818 struct net_device *head;
1819
1820 local_irq_disable();
1821 head = sd->output_queue;
1822 sd->output_queue = NULL;
1823 local_irq_enable();
1824
1825 while (head) {
1826 struct net_device *dev = head;
1827 head = head->next_sched;
1828
1829 smp_mb__before_clear_bit();
1830 clear_bit(__LINK_STATE_SCHED, &dev->state);
1831
1832 if (spin_trylock(&dev->queue_lock)) {
1833 qdisc_run(dev);
1834 spin_unlock(&dev->queue_lock);
1835 } else {
1836 netif_schedule(dev);
1837 }
1838 }
1839 }
1840}
1841
6f05f629
SH
1842static inline int deliver_skb(struct sk_buff *skb,
1843 struct packet_type *pt_prev,
1844 struct net_device *orig_dev)
1da177e4
LT
1845{
1846 atomic_inc(&skb->users);
f2ccd8fa 1847 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1da177e4
LT
1848}
1849
1850#if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
6229e362 1851/* These hooks defined here for ATM */
1da177e4
LT
1852struct net_bridge;
1853struct net_bridge_fdb_entry *(*br_fdb_get_hook)(struct net_bridge *br,
1854 unsigned char *addr);
6229e362 1855void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent) __read_mostly;
1da177e4 1856
6229e362
SH
1857/*
1858 * If bridge module is loaded call bridging hook.
1859 * returns NULL if packet was consumed.
1860 */
1861struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
1862 struct sk_buff *skb) __read_mostly;
1863static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
1864 struct packet_type **pt_prev, int *ret,
1865 struct net_device *orig_dev)
1da177e4
LT
1866{
1867 struct net_bridge_port *port;
1868
6229e362
SH
1869 if (skb->pkt_type == PACKET_LOOPBACK ||
1870 (port = rcu_dereference(skb->dev->br_port)) == NULL)
1871 return skb;
1da177e4
LT
1872
1873 if (*pt_prev) {
6229e362 1874 *ret = deliver_skb(skb, *pt_prev, orig_dev);
1da177e4 1875 *pt_prev = NULL;
4ec93edb
YH
1876 }
1877
6229e362 1878 return br_handle_frame_hook(port, skb);
1da177e4
LT
1879}
1880#else
6229e362 1881#define handle_bridge(skb, pt_prev, ret, orig_dev) (skb)
1da177e4
LT
1882#endif
1883
b863ceb7
PM
1884#if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
1885struct sk_buff *(*macvlan_handle_frame_hook)(struct sk_buff *skb) __read_mostly;
1886EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook);
1887
1888static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
1889 struct packet_type **pt_prev,
1890 int *ret,
1891 struct net_device *orig_dev)
1892{
1893 if (skb->dev->macvlan_port == NULL)
1894 return skb;
1895
1896 if (*pt_prev) {
1897 *ret = deliver_skb(skb, *pt_prev, orig_dev);
1898 *pt_prev = NULL;
1899 }
1900 return macvlan_handle_frame_hook(skb);
1901}
1902#else
1903#define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb)
1904#endif
1905
1da177e4
LT
1906#ifdef CONFIG_NET_CLS_ACT
1907/* TODO: Maybe we should just force sch_ingress to be compiled in
1908 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
1909 * a compare and 2 stores extra right now if we dont have it on
1910 * but have CONFIG_NET_CLS_ACT
4ec93edb 1911 * NOTE: This doesnt stop any functionality; if you dont have
1da177e4
LT
1912 * the ingress scheduler, you just cant add policies on ingress.
1913 *
1914 */
4ec93edb 1915static int ing_filter(struct sk_buff *skb)
1da177e4
LT
1916{
1917 struct Qdisc *q;
1918 struct net_device *dev = skb->dev;
1919 int result = TC_ACT_OK;
4ec93edb 1920
1da177e4
LT
1921 if (dev->qdisc_ingress) {
1922 __u32 ttl = (__u32) G_TC_RTTL(skb->tc_verd);
1923 if (MAX_RED_LOOP < ttl++) {
c01003c2
PM
1924 printk(KERN_WARNING "Redir loop detected Dropping packet (%d->%d)\n",
1925 skb->iif, skb->dev->ifindex);
1da177e4
LT
1926 return TC_ACT_SHOT;
1927 }
1928
1929 skb->tc_verd = SET_TC_RTTL(skb->tc_verd,ttl);
1930
1931 skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_INGRESS);
86e65da9 1932
fd44de7c 1933 spin_lock(&dev->ingress_lock);
1da177e4
LT
1934 if ((q = dev->qdisc_ingress) != NULL)
1935 result = q->enqueue(skb, q);
fd44de7c 1936 spin_unlock(&dev->ingress_lock);
1da177e4
LT
1937
1938 }
1939
1940 return result;
1941}
1942#endif
1943
1944int netif_receive_skb(struct sk_buff *skb)
1945{
1946 struct packet_type *ptype, *pt_prev;
f2ccd8fa 1947 struct net_device *orig_dev;
1da177e4 1948 int ret = NET_RX_DROP;
252e3346 1949 __be16 type;
1da177e4
LT
1950
1951 /* if we've gotten here through NAPI, check netpoll */
bea3348e 1952 if (netpoll_receive_skb(skb))
1da177e4
LT
1953 return NET_RX_DROP;
1954
b7aa0bf7 1955 if (!skb->tstamp.tv64)
a61bbcf2 1956 net_timestamp(skb);
1da177e4 1957
c01003c2
PM
1958 if (!skb->iif)
1959 skb->iif = skb->dev->ifindex;
86e65da9 1960
f2ccd8fa 1961 orig_dev = skb_bond(skb);
1da177e4 1962
8f903c70
JV
1963 if (!orig_dev)
1964 return NET_RX_DROP;
1965
1da177e4
LT
1966 __get_cpu_var(netdev_rx_stat).total++;
1967
c1d2bbe1 1968 skb_reset_network_header(skb);
badff6d0 1969 skb_reset_transport_header(skb);
b0e380b1 1970 skb->mac_len = skb->network_header - skb->mac_header;
1da177e4
LT
1971
1972 pt_prev = NULL;
1973
1974 rcu_read_lock();
1975
1976#ifdef CONFIG_NET_CLS_ACT
1977 if (skb->tc_verd & TC_NCLS) {
1978 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
1979 goto ncls;
1980 }
1981#endif
1982
1983 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1984 if (!ptype->dev || ptype->dev == skb->dev) {
4ec93edb 1985 if (pt_prev)
f2ccd8fa 1986 ret = deliver_skb(skb, pt_prev, orig_dev);
1da177e4
LT
1987 pt_prev = ptype;
1988 }
1989 }
1990
1991#ifdef CONFIG_NET_CLS_ACT
1992 if (pt_prev) {
f2ccd8fa 1993 ret = deliver_skb(skb, pt_prev, orig_dev);
1da177e4
LT
1994 pt_prev = NULL; /* noone else should process this after*/
1995 } else {
1996 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
1997 }
1998
1999 ret = ing_filter(skb);
2000
2001 if (ret == TC_ACT_SHOT || (ret == TC_ACT_STOLEN)) {
2002 kfree_skb(skb);
2003 goto out;
2004 }
2005
2006 skb->tc_verd = 0;
2007ncls:
2008#endif
2009
6229e362 2010 skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
b863ceb7
PM
2011 if (!skb)
2012 goto out;
2013 skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
6229e362 2014 if (!skb)
1da177e4
LT
2015 goto out;
2016
2017 type = skb->protocol;
2018 list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type)&15], list) {
2019 if (ptype->type == type &&
2020 (!ptype->dev || ptype->dev == skb->dev)) {
4ec93edb 2021 if (pt_prev)
f2ccd8fa 2022 ret = deliver_skb(skb, pt_prev, orig_dev);
1da177e4
LT
2023 pt_prev = ptype;
2024 }
2025 }
2026
2027 if (pt_prev) {
f2ccd8fa 2028 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1da177e4
LT
2029 } else {
2030 kfree_skb(skb);
2031 /* Jamal, now you will not able to escape explaining
2032 * me how you were going to use this. :-)
2033 */
2034 ret = NET_RX_DROP;
2035 }
2036
2037out:
2038 rcu_read_unlock();
2039 return ret;
2040}
2041
bea3348e 2042static int process_backlog(struct napi_struct *napi, int quota)
1da177e4
LT
2043{
2044 int work = 0;
1da177e4
LT
2045 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2046 unsigned long start_time = jiffies;
2047
bea3348e
SH
2048 napi->weight = weight_p;
2049 do {
1da177e4
LT
2050 struct sk_buff *skb;
2051 struct net_device *dev;
2052
2053 local_irq_disable();
2054 skb = __skb_dequeue(&queue->input_pkt_queue);
bea3348e
SH
2055 if (!skb) {
2056 __napi_complete(napi);
2057 local_irq_enable();
2058 break;
2059 }
2060
1da177e4
LT
2061 local_irq_enable();
2062
2063 dev = skb->dev;
2064
2065 netif_receive_skb(skb);
2066
2067 dev_put(dev);
bea3348e 2068 } while (++work < quota && jiffies == start_time);
1da177e4 2069
bea3348e
SH
2070 return work;
2071}
1da177e4 2072
bea3348e
SH
2073/**
2074 * __napi_schedule - schedule for receive
2075 * @napi: entry to schedule
2076 *
2077 * The entry's receive function will be scheduled to run
2078 */
2079void fastcall __napi_schedule(struct napi_struct *n)
2080{
2081 unsigned long flags;
1da177e4 2082
bea3348e
SH
2083 local_irq_save(flags);
2084 list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list);
2085 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2086 local_irq_restore(flags);
1da177e4 2087}
bea3348e
SH
2088EXPORT_SYMBOL(__napi_schedule);
2089
1da177e4
LT
2090
2091static void net_rx_action(struct softirq_action *h)
2092{
bea3348e 2093 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
1da177e4 2094 unsigned long start_time = jiffies;
51b0bded 2095 int budget = netdev_budget;
53fb95d3
MM
2096 void *have;
2097
1da177e4
LT
2098 local_irq_disable();
2099
bea3348e
SH
2100 while (!list_empty(list)) {
2101 struct napi_struct *n;
2102 int work, weight;
1da177e4 2103
bea3348e
SH
2104 /* If softirq window is exhuasted then punt.
2105 *
2106 * Note that this is a slight policy change from the
2107 * previous NAPI code, which would allow up to 2
2108 * jiffies to pass before breaking out. The test
2109 * used to be "jiffies - start_time > 1".
2110 */
2111 if (unlikely(budget <= 0 || jiffies != start_time))
1da177e4
LT
2112 goto softnet_break;
2113
2114 local_irq_enable();
2115
bea3348e
SH
2116 /* Even though interrupts have been re-enabled, this
2117 * access is safe because interrupts can only add new
2118 * entries to the tail of this list, and only ->poll()
2119 * calls can remove this head entry from the list.
2120 */
2121 n = list_entry(list->next, struct napi_struct, poll_list);
1da177e4 2122
bea3348e
SH
2123 have = netpoll_poll_lock(n);
2124
2125 weight = n->weight;
2126
2127 work = n->poll(n, weight);
2128
2129 WARN_ON_ONCE(work > weight);
2130
2131 budget -= work;
2132
2133 local_irq_disable();
2134
2135 /* Drivers must not modify the NAPI state if they
2136 * consume the entire weight. In such cases this code
2137 * still "owns" the NAPI instance and therefore can
2138 * move the instance around on the list at-will.
2139 */
2140 if (unlikely(work == weight))
2141 list_move_tail(&n->poll_list, list);
2142
2143 netpoll_poll_unlock(have);
1da177e4
LT
2144 }
2145out:
515e06c4 2146 local_irq_enable();
bea3348e 2147
db217334
CL
2148#ifdef CONFIG_NET_DMA
2149 /*
2150 * There may not be any more sk_buffs coming right now, so push
2151 * any pending DMA copies to hardware
2152 */
d379b01e
DW
2153 if (!cpus_empty(net_dma.channel_mask)) {
2154 int chan_idx;
2155 for_each_cpu_mask(chan_idx, net_dma.channel_mask) {
2156 struct dma_chan *chan = net_dma.channels[chan_idx];
2157 if (chan)
2158 dma_async_memcpy_issue_pending(chan);
2159 }
db217334
CL
2160 }
2161#endif
bea3348e 2162
1da177e4
LT
2163 return;
2164
2165softnet_break:
2166 __get_cpu_var(netdev_rx_stat).time_squeeze++;
2167 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2168 goto out;
2169}
2170
2171static gifconf_func_t * gifconf_list [NPROTO];
2172
2173/**
2174 * register_gifconf - register a SIOCGIF handler
2175 * @family: Address family
2176 * @gifconf: Function handler
2177 *
2178 * Register protocol dependent address dumping routines. The handler
2179 * that is passed must not be freed or reused until it has been replaced
2180 * by another handler.
2181 */
2182int register_gifconf(unsigned int family, gifconf_func_t * gifconf)
2183{
2184 if (family >= NPROTO)
2185 return -EINVAL;
2186 gifconf_list[family] = gifconf;
2187 return 0;
2188}
2189
2190
2191/*
2192 * Map an interface index to its name (SIOCGIFNAME)
2193 */
2194
2195/*
2196 * We need this ioctl for efficient implementation of the
2197 * if_indextoname() function required by the IPv6 API. Without
2198 * it, we would have to search all the interfaces to find a
2199 * match. --pb
2200 */
2201
881d966b 2202static int dev_ifname(struct net *net, struct ifreq __user *arg)
1da177e4
LT
2203{
2204 struct net_device *dev;
2205 struct ifreq ifr;
2206
2207 /*
2208 * Fetch the caller's info block.
2209 */
2210
2211 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
2212 return -EFAULT;
2213
2214 read_lock(&dev_base_lock);
881d966b 2215 dev = __dev_get_by_index(net, ifr.ifr_ifindex);
1da177e4
LT
2216 if (!dev) {
2217 read_unlock(&dev_base_lock);
2218 return -ENODEV;
2219 }
2220
2221 strcpy(ifr.ifr_name, dev->name);
2222 read_unlock(&dev_base_lock);
2223
2224 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
2225 return -EFAULT;
2226 return 0;
2227}
2228
2229/*
2230 * Perform a SIOCGIFCONF call. This structure will change
2231 * size eventually, and there is nothing I can do about it.
2232 * Thus we will need a 'compatibility mode'.
2233 */
2234
881d966b 2235static int dev_ifconf(struct net *net, char __user *arg)
1da177e4
LT
2236{
2237 struct ifconf ifc;
2238 struct net_device *dev;
2239 char __user *pos;
2240 int len;
2241 int total;
2242 int i;
2243
2244 /*
2245 * Fetch the caller's info block.
2246 */
2247
2248 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
2249 return -EFAULT;
2250
2251 pos = ifc.ifc_buf;
2252 len = ifc.ifc_len;
2253
2254 /*
2255 * Loop over the interfaces, and write an info block for each.
2256 */
2257
2258 total = 0;
881d966b 2259 for_each_netdev(net, dev) {
1da177e4
LT
2260 for (i = 0; i < NPROTO; i++) {
2261 if (gifconf_list[i]) {
2262 int done;
2263 if (!pos)
2264 done = gifconf_list[i](dev, NULL, 0);
2265 else
2266 done = gifconf_list[i](dev, pos + total,
2267 len - total);
2268 if (done < 0)
2269 return -EFAULT;
2270 total += done;
2271 }
2272 }
4ec93edb 2273 }
1da177e4
LT
2274
2275 /*
2276 * All done. Write the updated control block back to the caller.
2277 */
2278 ifc.ifc_len = total;
2279
2280 /*
2281 * Both BSD and Solaris return 0 here, so we do too.
2282 */
2283 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
2284}
2285
2286#ifdef CONFIG_PROC_FS
2287/*
2288 * This is invoked by the /proc filesystem handler to display a device
2289 * in detail.
2290 */
7562f876 2291void *dev_seq_start(struct seq_file *seq, loff_t *pos)
1da177e4 2292{
881d966b 2293 struct net *net = seq->private;
7562f876 2294 loff_t off;
1da177e4 2295 struct net_device *dev;
1da177e4 2296
7562f876
PE
2297 read_lock(&dev_base_lock);
2298 if (!*pos)
2299 return SEQ_START_TOKEN;
1da177e4 2300
7562f876 2301 off = 1;
881d966b 2302 for_each_netdev(net, dev)
7562f876
PE
2303 if (off++ == *pos)
2304 return dev;
1da177e4 2305
7562f876 2306 return NULL;
1da177e4
LT
2307}
2308
2309void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2310{
881d966b 2311 struct net *net = seq->private;
1da177e4 2312 ++*pos;
7562f876 2313 return v == SEQ_START_TOKEN ?
881d966b 2314 first_net_device(net) : next_net_device((struct net_device *)v);
1da177e4
LT
2315}
2316
2317void dev_seq_stop(struct seq_file *seq, void *v)
2318{
2319 read_unlock(&dev_base_lock);
2320}
2321
2322static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
2323{
c45d286e 2324 struct net_device_stats *stats = dev->get_stats(dev);
1da177e4 2325
5a1b5898
RR
2326 seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
2327 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
2328 dev->name, stats->rx_bytes, stats->rx_packets,
2329 stats->rx_errors,
2330 stats->rx_dropped + stats->rx_missed_errors,
2331 stats->rx_fifo_errors,
2332 stats->rx_length_errors + stats->rx_over_errors +
2333 stats->rx_crc_errors + stats->rx_frame_errors,
2334 stats->rx_compressed, stats->multicast,
2335 stats->tx_bytes, stats->tx_packets,
2336 stats->tx_errors, stats->tx_dropped,
2337 stats->tx_fifo_errors, stats->collisions,
2338 stats->tx_carrier_errors +
2339 stats->tx_aborted_errors +
2340 stats->tx_window_errors +
2341 stats->tx_heartbeat_errors,
2342 stats->tx_compressed);
1da177e4
LT
2343}
2344
2345/*
2346 * Called from the PROCfs module. This now uses the new arbitrary sized
2347 * /proc/net interface to create /proc/net/dev
2348 */
2349static int dev_seq_show(struct seq_file *seq, void *v)
2350{
2351 if (v == SEQ_START_TOKEN)
2352 seq_puts(seq, "Inter-| Receive "
2353 " | Transmit\n"
2354 " face |bytes packets errs drop fifo frame "
2355 "compressed multicast|bytes packets errs "
2356 "drop fifo colls carrier compressed\n");
2357 else
2358 dev_seq_printf_stats(seq, v);
2359 return 0;
2360}
2361
2362static struct netif_rx_stats *softnet_get_online(loff_t *pos)
2363{
2364 struct netif_rx_stats *rc = NULL;
2365
2366 while (*pos < NR_CPUS)
4ec93edb 2367 if (cpu_online(*pos)) {
1da177e4
LT
2368 rc = &per_cpu(netdev_rx_stat, *pos);
2369 break;
2370 } else
2371 ++*pos;
2372 return rc;
2373}
2374
2375static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
2376{
2377 return softnet_get_online(pos);
2378}
2379
2380static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2381{
2382 ++*pos;
2383 return softnet_get_online(pos);
2384}
2385
2386static void softnet_seq_stop(struct seq_file *seq, void *v)
2387{
2388}
2389
2390static int softnet_seq_show(struct seq_file *seq, void *v)
2391{
2392 struct netif_rx_stats *s = v;
2393
2394 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
31aa02c5 2395 s->total, s->dropped, s->time_squeeze, 0,
c1ebcdb8
SH
2396 0, 0, 0, 0, /* was fastroute */
2397 s->cpu_collision );
1da177e4
LT
2398 return 0;
2399}
2400
f690808e 2401static const struct seq_operations dev_seq_ops = {
1da177e4
LT
2402 .start = dev_seq_start,
2403 .next = dev_seq_next,
2404 .stop = dev_seq_stop,
2405 .show = dev_seq_show,
2406};
2407
2408static int dev_seq_open(struct inode *inode, struct file *file)
2409{
881d966b
EB
2410 struct seq_file *seq;
2411 int res;
2412 res = seq_open(file, &dev_seq_ops);
2413 if (!res) {
2414 seq = file->private_data;
2415 seq->private = get_net(PROC_NET(inode));
2416 }
2417 return res;
2418}
2419
2420static int dev_seq_release(struct inode *inode, struct file *file)
2421{
2422 struct seq_file *seq = file->private_data;
2423 struct net *net = seq->private;
2424 put_net(net);
2425 return seq_release(inode, file);
1da177e4
LT
2426}
2427
9a32144e 2428static const struct file_operations dev_seq_fops = {
1da177e4
LT
2429 .owner = THIS_MODULE,
2430 .open = dev_seq_open,
2431 .read = seq_read,
2432 .llseek = seq_lseek,
881d966b 2433 .release = dev_seq_release,
1da177e4
LT
2434};
2435
f690808e 2436static const struct seq_operations softnet_seq_ops = {
1da177e4
LT
2437 .start = softnet_seq_start,
2438 .next = softnet_seq_next,
2439 .stop = softnet_seq_stop,
2440 .show = softnet_seq_show,
2441};
2442
2443static int softnet_seq_open(struct inode *inode, struct file *file)
2444{
2445 return seq_open(file, &softnet_seq_ops);
2446}
2447
9a32144e 2448static const struct file_operations softnet_seq_fops = {
1da177e4
LT
2449 .owner = THIS_MODULE,
2450 .open = softnet_seq_open,
2451 .read = seq_read,
2452 .llseek = seq_lseek,
2453 .release = seq_release,
2454};
2455
0e1256ff
SH
2456static void *ptype_get_idx(loff_t pos)
2457{
2458 struct packet_type *pt = NULL;
2459 loff_t i = 0;
2460 int t;
2461
2462 list_for_each_entry_rcu(pt, &ptype_all, list) {
2463 if (i == pos)
2464 return pt;
2465 ++i;
2466 }
2467
2468 for (t = 0; t < 16; t++) {
2469 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
2470 if (i == pos)
2471 return pt;
2472 ++i;
2473 }
2474 }
2475 return NULL;
2476}
2477
2478static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
2479{
2480 rcu_read_lock();
2481 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
2482}
2483
2484static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2485{
2486 struct packet_type *pt;
2487 struct list_head *nxt;
2488 int hash;
2489
2490 ++*pos;
2491 if (v == SEQ_START_TOKEN)
2492 return ptype_get_idx(0);
2493
2494 pt = v;
2495 nxt = pt->list.next;
2496 if (pt->type == htons(ETH_P_ALL)) {
2497 if (nxt != &ptype_all)
2498 goto found;
2499 hash = 0;
2500 nxt = ptype_base[0].next;
2501 } else
2502 hash = ntohs(pt->type) & 15;
2503
2504 while (nxt == &ptype_base[hash]) {
2505 if (++hash >= 16)
2506 return NULL;
2507 nxt = ptype_base[hash].next;
2508 }
2509found:
2510 return list_entry(nxt, struct packet_type, list);
2511}
2512
2513static void ptype_seq_stop(struct seq_file *seq, void *v)
2514{
2515 rcu_read_unlock();
2516}
2517
2518static void ptype_seq_decode(struct seq_file *seq, void *sym)
2519{
2520#ifdef CONFIG_KALLSYMS
2521 unsigned long offset = 0, symsize;
2522 const char *symname;
2523 char *modname;
2524 char namebuf[128];
2525
2526 symname = kallsyms_lookup((unsigned long)sym, &symsize, &offset,
2527 &modname, namebuf);
2528
2529 if (symname) {
2530 char *delim = ":";
2531
2532 if (!modname)
2533 modname = delim = "";
2534 seq_printf(seq, "%s%s%s%s+0x%lx", delim, modname, delim,
2535 symname, offset);
2536 return;
2537 }
2538#endif
2539
2540 seq_printf(seq, "[%p]", sym);
2541}
2542
2543static int ptype_seq_show(struct seq_file *seq, void *v)
2544{
2545 struct packet_type *pt = v;
2546
2547 if (v == SEQ_START_TOKEN)
2548 seq_puts(seq, "Type Device Function\n");
2549 else {
2550 if (pt->type == htons(ETH_P_ALL))
2551 seq_puts(seq, "ALL ");
2552 else
2553 seq_printf(seq, "%04x", ntohs(pt->type));
2554
2555 seq_printf(seq, " %-8s ",
2556 pt->dev ? pt->dev->name : "");
2557 ptype_seq_decode(seq, pt->func);
2558 seq_putc(seq, '\n');
2559 }
2560
2561 return 0;
2562}
2563
2564static const struct seq_operations ptype_seq_ops = {
2565 .start = ptype_seq_start,
2566 .next = ptype_seq_next,
2567 .stop = ptype_seq_stop,
2568 .show = ptype_seq_show,
2569};
2570
2571static int ptype_seq_open(struct inode *inode, struct file *file)
2572{
2573 return seq_open(file, &ptype_seq_ops);
2574}
2575
2576static const struct file_operations ptype_seq_fops = {
2577 .owner = THIS_MODULE,
2578 .open = ptype_seq_open,
2579 .read = seq_read,
2580 .llseek = seq_lseek,
2581 .release = seq_release,
2582};
2583
2584
881d966b 2585static int dev_proc_net_init(struct net *net)
1da177e4
LT
2586{
2587 int rc = -ENOMEM;
2588
881d966b 2589 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
1da177e4 2590 goto out;
881d966b 2591 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
1da177e4 2592 goto out_dev;
881d966b 2593 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
457c4cbc 2594 goto out_softnet;
0e1256ff 2595
881d966b 2596 if (wext_proc_init(net))
457c4cbc 2597 goto out_ptype;
1da177e4
LT
2598 rc = 0;
2599out:
2600 return rc;
457c4cbc 2601out_ptype:
881d966b 2602 proc_net_remove(net, "ptype");
1da177e4 2603out_softnet:
881d966b 2604 proc_net_remove(net, "softnet_stat");
1da177e4 2605out_dev:
881d966b 2606 proc_net_remove(net, "dev");
1da177e4
LT
2607 goto out;
2608}
881d966b
EB
2609
2610static void dev_proc_net_exit(struct net *net)
2611{
2612 wext_proc_exit(net);
2613
2614 proc_net_remove(net, "ptype");
2615 proc_net_remove(net, "softnet_stat");
2616 proc_net_remove(net, "dev");
2617}
2618
2619static struct pernet_operations dev_proc_ops = {
2620 .init = dev_proc_net_init,
2621 .exit = dev_proc_net_exit,
2622};
2623
2624static int __init dev_proc_init(void)
2625{
2626 return register_pernet_subsys(&dev_proc_ops);
2627}
1da177e4
LT
2628#else
2629#define dev_proc_init() 0
2630#endif /* CONFIG_PROC_FS */
2631
2632
2633/**
2634 * netdev_set_master - set up master/slave pair
2635 * @slave: slave device
2636 * @master: new master device
2637 *
2638 * Changes the master device of the slave. Pass %NULL to break the
2639 * bonding. The caller must hold the RTNL semaphore. On a failure
2640 * a negative errno code is returned. On success the reference counts
2641 * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
2642 * function returns zero.
2643 */
2644int netdev_set_master(struct net_device *slave, struct net_device *master)
2645{
2646 struct net_device *old = slave->master;
2647
2648 ASSERT_RTNL();
2649
2650 if (master) {
2651 if (old)
2652 return -EBUSY;
2653 dev_hold(master);
2654 }
2655
2656 slave->master = master;
4ec93edb 2657
1da177e4
LT
2658 synchronize_net();
2659
2660 if (old)
2661 dev_put(old);
2662
2663 if (master)
2664 slave->flags |= IFF_SLAVE;
2665 else
2666 slave->flags &= ~IFF_SLAVE;
2667
2668 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
2669 return 0;
2670}
2671
4417da66 2672static void __dev_set_promiscuity(struct net_device *dev, int inc)
1da177e4
LT
2673{
2674 unsigned short old_flags = dev->flags;
2675
24023451
PM
2676 ASSERT_RTNL();
2677
1da177e4
LT
2678 if ((dev->promiscuity += inc) == 0)
2679 dev->flags &= ~IFF_PROMISC;
52609c0b
DC
2680 else
2681 dev->flags |= IFF_PROMISC;
2682 if (dev->flags != old_flags) {
1da177e4
LT
2683 printk(KERN_INFO "device %s %s promiscuous mode\n",
2684 dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
4ec93edb 2685 "left");
5bdb9886
SG
2686 audit_log(current->audit_context, GFP_ATOMIC,
2687 AUDIT_ANOM_PROMISCUOUS,
2688 "dev=%s prom=%d old_prom=%d auid=%u",
2689 dev->name, (dev->flags & IFF_PROMISC),
2690 (old_flags & IFF_PROMISC),
4ec93edb 2691 audit_get_loginuid(current->audit_context));
24023451
PM
2692
2693 if (dev->change_rx_flags)
2694 dev->change_rx_flags(dev, IFF_PROMISC);
1da177e4
LT
2695 }
2696}
2697
4417da66
PM
2698/**
2699 * dev_set_promiscuity - update promiscuity count on a device
2700 * @dev: device
2701 * @inc: modifier
2702 *
2703 * Add or remove promiscuity from a device. While the count in the device
2704 * remains above zero the interface remains promiscuous. Once it hits zero
2705 * the device reverts back to normal filtering operation. A negative inc
2706 * value is used to drop promiscuity on the device.
2707 */
2708void dev_set_promiscuity(struct net_device *dev, int inc)
2709{
2710 unsigned short old_flags = dev->flags;
2711
2712 __dev_set_promiscuity(dev, inc);
2713 if (dev->flags != old_flags)
2714 dev_set_rx_mode(dev);
2715}
2716
1da177e4
LT
2717/**
2718 * dev_set_allmulti - update allmulti count on a device
2719 * @dev: device
2720 * @inc: modifier
2721 *
2722 * Add or remove reception of all multicast frames to a device. While the
2723 * count in the device remains above zero the interface remains listening
2724 * to all interfaces. Once it hits zero the device reverts back to normal
2725 * filtering operation. A negative @inc value is used to drop the counter
2726 * when releasing a resource needing all multicasts.
2727 */
2728
2729void dev_set_allmulti(struct net_device *dev, int inc)
2730{
2731 unsigned short old_flags = dev->flags;
2732
24023451
PM
2733 ASSERT_RTNL();
2734
1da177e4
LT
2735 dev->flags |= IFF_ALLMULTI;
2736 if ((dev->allmulti += inc) == 0)
2737 dev->flags &= ~IFF_ALLMULTI;
24023451
PM
2738 if (dev->flags ^ old_flags) {
2739 if (dev->change_rx_flags)
2740 dev->change_rx_flags(dev, IFF_ALLMULTI);
4417da66 2741 dev_set_rx_mode(dev);
24023451 2742 }
4417da66
PM
2743}
2744
2745/*
2746 * Upload unicast and multicast address lists to device and
2747 * configure RX filtering. When the device doesn't support unicast
2748 * filtering it is put in promiscous mode while unicast addresses
2749 * are present.
2750 */
2751void __dev_set_rx_mode(struct net_device *dev)
2752{
2753 /* dev_open will call this function so the list will stay sane. */
2754 if (!(dev->flags&IFF_UP))
2755 return;
2756
2757 if (!netif_device_present(dev))
40b77c94 2758 return;
4417da66
PM
2759
2760 if (dev->set_rx_mode)
2761 dev->set_rx_mode(dev);
2762 else {
2763 /* Unicast addresses changes may only happen under the rtnl,
2764 * therefore calling __dev_set_promiscuity here is safe.
2765 */
2766 if (dev->uc_count > 0 && !dev->uc_promisc) {
2767 __dev_set_promiscuity(dev, 1);
2768 dev->uc_promisc = 1;
2769 } else if (dev->uc_count == 0 && dev->uc_promisc) {
2770 __dev_set_promiscuity(dev, -1);
2771 dev->uc_promisc = 0;
2772 }
2773
2774 if (dev->set_multicast_list)
2775 dev->set_multicast_list(dev);
2776 }
2777}
2778
2779void dev_set_rx_mode(struct net_device *dev)
2780{
2781 netif_tx_lock_bh(dev);
2782 __dev_set_rx_mode(dev);
2783 netif_tx_unlock_bh(dev);
1da177e4
LT
2784}
2785
61cbc2fc
PM
2786int __dev_addr_delete(struct dev_addr_list **list, int *count,
2787 void *addr, int alen, int glbl)
bf742482
PM
2788{
2789 struct dev_addr_list *da;
2790
2791 for (; (da = *list) != NULL; list = &da->next) {
2792 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
2793 alen == da->da_addrlen) {
2794 if (glbl) {
2795 int old_glbl = da->da_gusers;
2796 da->da_gusers = 0;
2797 if (old_glbl == 0)
2798 break;
2799 }
2800 if (--da->da_users)
2801 return 0;
2802
2803 *list = da->next;
2804 kfree(da);
61cbc2fc 2805 (*count)--;
bf742482
PM
2806 return 0;
2807 }
2808 }
2809 return -ENOENT;
2810}
2811
61cbc2fc
PM
2812int __dev_addr_add(struct dev_addr_list **list, int *count,
2813 void *addr, int alen, int glbl)
bf742482
PM
2814{
2815 struct dev_addr_list *da;
2816
2817 for (da = *list; da != NULL; da = da->next) {
2818 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
2819 da->da_addrlen == alen) {
2820 if (glbl) {
2821 int old_glbl = da->da_gusers;
2822 da->da_gusers = 1;
2823 if (old_glbl)
2824 return 0;
2825 }
2826 da->da_users++;
2827 return 0;
2828 }
2829 }
2830
2831 da = kmalloc(sizeof(*da), GFP_ATOMIC);
2832 if (da == NULL)
2833 return -ENOMEM;
2834 memcpy(da->da_addr, addr, alen);
2835 da->da_addrlen = alen;
2836 da->da_users = 1;
2837 da->da_gusers = glbl ? 1 : 0;
2838 da->next = *list;
2839 *list = da;
61cbc2fc 2840 (*count)++;
bf742482
PM
2841 return 0;
2842}
2843
4417da66
PM
2844/**
2845 * dev_unicast_delete - Release secondary unicast address.
2846 * @dev: device
0ed72ec4
RD
2847 * @addr: address to delete
2848 * @alen: length of @addr
4417da66
PM
2849 *
2850 * Release reference to a secondary unicast address and remove it
0ed72ec4 2851 * from the device if the reference count drops to zero.
4417da66
PM
2852 *
2853 * The caller must hold the rtnl_mutex.
2854 */
2855int dev_unicast_delete(struct net_device *dev, void *addr, int alen)
2856{
2857 int err;
2858
2859 ASSERT_RTNL();
2860
2861 netif_tx_lock_bh(dev);
61cbc2fc
PM
2862 err = __dev_addr_delete(&dev->uc_list, &dev->uc_count, addr, alen, 0);
2863 if (!err)
4417da66 2864 __dev_set_rx_mode(dev);
4417da66
PM
2865 netif_tx_unlock_bh(dev);
2866 return err;
2867}
2868EXPORT_SYMBOL(dev_unicast_delete);
2869
2870/**
2871 * dev_unicast_add - add a secondary unicast address
2872 * @dev: device
0ed72ec4
RD
2873 * @addr: address to delete
2874 * @alen: length of @addr
4417da66
PM
2875 *
2876 * Add a secondary unicast address to the device or increase
2877 * the reference count if it already exists.
2878 *
2879 * The caller must hold the rtnl_mutex.
2880 */
2881int dev_unicast_add(struct net_device *dev, void *addr, int alen)
2882{
2883 int err;
2884
2885 ASSERT_RTNL();
2886
2887 netif_tx_lock_bh(dev);
61cbc2fc
PM
2888 err = __dev_addr_add(&dev->uc_list, &dev->uc_count, addr, alen, 0);
2889 if (!err)
4417da66 2890 __dev_set_rx_mode(dev);
4417da66
PM
2891 netif_tx_unlock_bh(dev);
2892 return err;
2893}
2894EXPORT_SYMBOL(dev_unicast_add);
2895
12972621
DC
2896static void __dev_addr_discard(struct dev_addr_list **list)
2897{
2898 struct dev_addr_list *tmp;
2899
2900 while (*list != NULL) {
2901 tmp = *list;
2902 *list = tmp->next;
2903 if (tmp->da_users > tmp->da_gusers)
2904 printk("__dev_addr_discard: address leakage! "
2905 "da_users=%d\n", tmp->da_users);
2906 kfree(tmp);
2907 }
2908}
2909
26cc2522 2910static void dev_addr_discard(struct net_device *dev)
4417da66
PM
2911{
2912 netif_tx_lock_bh(dev);
26cc2522 2913
4417da66
PM
2914 __dev_addr_discard(&dev->uc_list);
2915 dev->uc_count = 0;
4417da66 2916
456ad75c
DC
2917 __dev_addr_discard(&dev->mc_list);
2918 dev->mc_count = 0;
26cc2522 2919
456ad75c
DC
2920 netif_tx_unlock_bh(dev);
2921}
2922
1da177e4
LT
2923unsigned dev_get_flags(const struct net_device *dev)
2924{
2925 unsigned flags;
2926
2927 flags = (dev->flags & ~(IFF_PROMISC |
2928 IFF_ALLMULTI |
b00055aa
SR
2929 IFF_RUNNING |
2930 IFF_LOWER_UP |
2931 IFF_DORMANT)) |
1da177e4
LT
2932 (dev->gflags & (IFF_PROMISC |
2933 IFF_ALLMULTI));
2934
b00055aa
SR
2935 if (netif_running(dev)) {
2936 if (netif_oper_up(dev))
2937 flags |= IFF_RUNNING;
2938 if (netif_carrier_ok(dev))
2939 flags |= IFF_LOWER_UP;
2940 if (netif_dormant(dev))
2941 flags |= IFF_DORMANT;
2942 }
1da177e4
LT
2943
2944 return flags;
2945}
2946
2947int dev_change_flags(struct net_device *dev, unsigned flags)
2948{
7c355f53 2949 int ret, changes;
1da177e4
LT
2950 int old_flags = dev->flags;
2951
24023451
PM
2952 ASSERT_RTNL();
2953
1da177e4
LT
2954 /*
2955 * Set the flags on our device.
2956 */
2957
2958 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
2959 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
2960 IFF_AUTOMEDIA)) |
2961 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
2962 IFF_ALLMULTI));
2963
2964 /*
2965 * Load in the correct multicast list now the flags have changed.
2966 */
2967
24023451
PM
2968 if (dev->change_rx_flags && (dev->flags ^ flags) & IFF_MULTICAST)
2969 dev->change_rx_flags(dev, IFF_MULTICAST);
2970
4417da66 2971 dev_set_rx_mode(dev);
1da177e4
LT
2972
2973 /*
2974 * Have we downed the interface. We handle IFF_UP ourselves
2975 * according to user attempts to set it, rather than blindly
2976 * setting it.
2977 */
2978
2979 ret = 0;
2980 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
2981 ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
2982
2983 if (!ret)
4417da66 2984 dev_set_rx_mode(dev);
1da177e4
LT
2985 }
2986
2987 if (dev->flags & IFF_UP &&
2988 ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
2989 IFF_VOLATILE)))
f07d5b94 2990 raw_notifier_call_chain(&netdev_chain,
e041c683 2991 NETDEV_CHANGE, dev);
1da177e4
LT
2992
2993 if ((flags ^ dev->gflags) & IFF_PROMISC) {
2994 int inc = (flags & IFF_PROMISC) ? +1 : -1;
2995 dev->gflags ^= IFF_PROMISC;
2996 dev_set_promiscuity(dev, inc);
2997 }
2998
2999 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
3000 is important. Some (broken) drivers set IFF_PROMISC, when
3001 IFF_ALLMULTI is requested not asking us and not reporting.
3002 */
3003 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
3004 int inc = (flags & IFF_ALLMULTI) ? +1 : -1;
3005 dev->gflags ^= IFF_ALLMULTI;
3006 dev_set_allmulti(dev, inc);
3007 }
3008
7c355f53
TG
3009 /* Exclude state transition flags, already notified */
3010 changes = (old_flags ^ dev->flags) & ~(IFF_UP | IFF_RUNNING);
3011 if (changes)
3012 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
1da177e4
LT
3013
3014 return ret;
3015}
3016
3017int dev_set_mtu(struct net_device *dev, int new_mtu)
3018{
3019 int err;
3020
3021 if (new_mtu == dev->mtu)
3022 return 0;
3023
3024 /* MTU must be positive. */
3025 if (new_mtu < 0)
3026 return -EINVAL;
3027
3028 if (!netif_device_present(dev))
3029 return -ENODEV;
3030
3031 err = 0;
3032 if (dev->change_mtu)
3033 err = dev->change_mtu(dev, new_mtu);
3034 else
3035 dev->mtu = new_mtu;
3036 if (!err && dev->flags & IFF_UP)
f07d5b94 3037 raw_notifier_call_chain(&netdev_chain,
e041c683 3038 NETDEV_CHANGEMTU, dev);
1da177e4
LT
3039 return err;
3040}
3041
3042int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
3043{
3044 int err;
3045
3046 if (!dev->set_mac_address)
3047 return -EOPNOTSUPP;
3048 if (sa->sa_family != dev->type)
3049 return -EINVAL;
3050 if (!netif_device_present(dev))
3051 return -ENODEV;
3052 err = dev->set_mac_address(dev, sa);
3053 if (!err)
f07d5b94 3054 raw_notifier_call_chain(&netdev_chain,
e041c683 3055 NETDEV_CHANGEADDR, dev);
1da177e4
LT
3056 return err;
3057}
3058
3059/*
3060 * Perform the SIOCxIFxxx calls.
3061 */
881d966b 3062static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
1da177e4
LT
3063{
3064 int err;
881d966b 3065 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
1da177e4
LT
3066
3067 if (!dev)
3068 return -ENODEV;
3069
3070 switch (cmd) {
3071 case SIOCGIFFLAGS: /* Get interface flags */
3072 ifr->ifr_flags = dev_get_flags(dev);
3073 return 0;
3074
3075 case SIOCSIFFLAGS: /* Set interface flags */
3076 return dev_change_flags(dev, ifr->ifr_flags);
3077
3078 case SIOCGIFMETRIC: /* Get the metric on the interface
3079 (currently unused) */
3080 ifr->ifr_metric = 0;
3081 return 0;
3082
3083 case SIOCSIFMETRIC: /* Set the metric on the interface
3084 (currently unused) */
3085 return -EOPNOTSUPP;
3086
3087 case SIOCGIFMTU: /* Get the MTU of a device */
3088 ifr->ifr_mtu = dev->mtu;
3089 return 0;
3090
3091 case SIOCSIFMTU: /* Set the MTU of a device */
3092 return dev_set_mtu(dev, ifr->ifr_mtu);
3093
3094 case SIOCGIFHWADDR:
3095 if (!dev->addr_len)
3096 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
3097 else
3098 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
3099 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
3100 ifr->ifr_hwaddr.sa_family = dev->type;
3101 return 0;
3102
3103 case SIOCSIFHWADDR:
3104 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
3105
3106 case SIOCSIFHWBROADCAST:
3107 if (ifr->ifr_hwaddr.sa_family != dev->type)
3108 return -EINVAL;
3109 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
3110 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
f07d5b94 3111 raw_notifier_call_chain(&netdev_chain,
1da177e4
LT
3112 NETDEV_CHANGEADDR, dev);
3113 return 0;
3114
3115 case SIOCGIFMAP:
3116 ifr->ifr_map.mem_start = dev->mem_start;
3117 ifr->ifr_map.mem_end = dev->mem_end;
3118 ifr->ifr_map.base_addr = dev->base_addr;
3119 ifr->ifr_map.irq = dev->irq;
3120 ifr->ifr_map.dma = dev->dma;
3121 ifr->ifr_map.port = dev->if_port;
3122 return 0;
3123
3124 case SIOCSIFMAP:
3125 if (dev->set_config) {
3126 if (!netif_device_present(dev))
3127 return -ENODEV;
3128 return dev->set_config(dev, &ifr->ifr_map);
3129 }
3130 return -EOPNOTSUPP;
3131
3132 case SIOCADDMULTI:
3133 if (!dev->set_multicast_list ||
3134 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
3135 return -EINVAL;
3136 if (!netif_device_present(dev))
3137 return -ENODEV;
3138 return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
3139 dev->addr_len, 1);
3140
3141 case SIOCDELMULTI:
3142 if (!dev->set_multicast_list ||
3143 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
3144 return -EINVAL;
3145 if (!netif_device_present(dev))
3146 return -ENODEV;
3147 return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
3148 dev->addr_len, 1);
3149
3150 case SIOCGIFINDEX:
3151 ifr->ifr_ifindex = dev->ifindex;
3152 return 0;
3153
3154 case SIOCGIFTXQLEN:
3155 ifr->ifr_qlen = dev->tx_queue_len;
3156 return 0;
3157
3158 case SIOCSIFTXQLEN:
3159 if (ifr->ifr_qlen < 0)
3160 return -EINVAL;
3161 dev->tx_queue_len = ifr->ifr_qlen;
3162 return 0;
3163
3164 case SIOCSIFNAME:
3165 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
3166 return dev_change_name(dev, ifr->ifr_newname);
3167
3168 /*
3169 * Unknown or private ioctl
3170 */
3171
3172 default:
3173 if ((cmd >= SIOCDEVPRIVATE &&
3174 cmd <= SIOCDEVPRIVATE + 15) ||
3175 cmd == SIOCBONDENSLAVE ||
3176 cmd == SIOCBONDRELEASE ||
3177 cmd == SIOCBONDSETHWADDR ||
3178 cmd == SIOCBONDSLAVEINFOQUERY ||
3179 cmd == SIOCBONDINFOQUERY ||
3180 cmd == SIOCBONDCHANGEACTIVE ||
3181 cmd == SIOCGMIIPHY ||
3182 cmd == SIOCGMIIREG ||
3183 cmd == SIOCSMIIREG ||
3184 cmd == SIOCBRADDIF ||
3185 cmd == SIOCBRDELIF ||
3186 cmd == SIOCWANDEV) {
3187 err = -EOPNOTSUPP;
3188 if (dev->do_ioctl) {
3189 if (netif_device_present(dev))
3190 err = dev->do_ioctl(dev, ifr,
3191 cmd);
3192 else
3193 err = -ENODEV;
3194 }
3195 } else
3196 err = -EINVAL;
3197
3198 }
3199 return err;
3200}
3201
3202/*
3203 * This function handles all "interface"-type I/O control requests. The actual
3204 * 'doing' part of this is dev_ifsioc above.
3205 */
3206
3207/**
3208 * dev_ioctl - network device ioctl
3209 * @cmd: command to issue
3210 * @arg: pointer to a struct ifreq in user space
3211 *
3212 * Issue ioctl functions to devices. This is normally called by the
3213 * user space syscall interfaces but can sometimes be useful for
3214 * other purposes. The return value is the return from the syscall if
3215 * positive or a negative errno code on error.
3216 */
3217
881d966b 3218int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
1da177e4
LT
3219{
3220 struct ifreq ifr;
3221 int ret;
3222 char *colon;
3223
3224 /* One special case: SIOCGIFCONF takes ifconf argument
3225 and requires shared lock, because it sleeps writing
3226 to user space.
3227 */
3228
3229 if (cmd == SIOCGIFCONF) {
6756ae4b 3230 rtnl_lock();
881d966b 3231 ret = dev_ifconf(net, (char __user *) arg);
6756ae4b 3232 rtnl_unlock();
1da177e4
LT
3233 return ret;
3234 }
3235 if (cmd == SIOCGIFNAME)
881d966b 3236 return dev_ifname(net, (struct ifreq __user *)arg);
1da177e4
LT
3237
3238 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
3239 return -EFAULT;
3240
3241 ifr.ifr_name[IFNAMSIZ-1] = 0;
3242
3243 colon = strchr(ifr.ifr_name, ':');
3244 if (colon)
3245 *colon = 0;
3246
3247 /*
3248 * See which interface the caller is talking about.
3249 */
3250
3251 switch (cmd) {
3252 /*
3253 * These ioctl calls:
3254 * - can be done by all.
3255 * - atomic and do not require locking.
3256 * - return a value
3257 */
3258 case SIOCGIFFLAGS:
3259 case SIOCGIFMETRIC:
3260 case SIOCGIFMTU:
3261 case SIOCGIFHWADDR:
3262 case SIOCGIFSLAVE:
3263 case SIOCGIFMAP:
3264 case SIOCGIFINDEX:
3265 case SIOCGIFTXQLEN:
881d966b 3266 dev_load(net, ifr.ifr_name);
1da177e4 3267 read_lock(&dev_base_lock);
881d966b 3268 ret = dev_ifsioc(net, &ifr, cmd);
1da177e4
LT
3269 read_unlock(&dev_base_lock);
3270 if (!ret) {
3271 if (colon)
3272 *colon = ':';
3273 if (copy_to_user(arg, &ifr,
3274 sizeof(struct ifreq)))
3275 ret = -EFAULT;
3276 }
3277 return ret;
3278
3279 case SIOCETHTOOL:
881d966b 3280 dev_load(net, ifr.ifr_name);
1da177e4 3281 rtnl_lock();
881d966b 3282 ret = dev_ethtool(net, &ifr);
1da177e4
LT
3283 rtnl_unlock();
3284 if (!ret) {
3285 if (colon)
3286 *colon = ':';
3287 if (copy_to_user(arg, &ifr,
3288 sizeof(struct ifreq)))
3289 ret = -EFAULT;
3290 }
3291 return ret;
3292
3293 /*
3294 * These ioctl calls:
3295 * - require superuser power.
3296 * - require strict serialization.
3297 * - return a value
3298 */
3299 case SIOCGMIIPHY:
3300 case SIOCGMIIREG:
3301 case SIOCSIFNAME:
3302 if (!capable(CAP_NET_ADMIN))
3303 return -EPERM;
881d966b 3304 dev_load(net, ifr.ifr_name);
1da177e4 3305 rtnl_lock();
881d966b 3306 ret = dev_ifsioc(net, &ifr, cmd);
1da177e4
LT
3307 rtnl_unlock();
3308 if (!ret) {
3309 if (colon)
3310 *colon = ':';
3311 if (copy_to_user(arg, &ifr,
3312 sizeof(struct ifreq)))
3313 ret = -EFAULT;
3314 }
3315 return ret;
3316
3317 /*
3318 * These ioctl calls:
3319 * - require superuser power.
3320 * - require strict serialization.
3321 * - do not return a value
3322 */
3323 case SIOCSIFFLAGS:
3324 case SIOCSIFMETRIC:
3325 case SIOCSIFMTU:
3326 case SIOCSIFMAP:
3327 case SIOCSIFHWADDR:
3328 case SIOCSIFSLAVE:
3329 case SIOCADDMULTI:
3330 case SIOCDELMULTI:
3331 case SIOCSIFHWBROADCAST:
3332 case SIOCSIFTXQLEN:
3333 case SIOCSMIIREG:
3334 case SIOCBONDENSLAVE:
3335 case SIOCBONDRELEASE:
3336 case SIOCBONDSETHWADDR:
1da177e4
LT
3337 case SIOCBONDCHANGEACTIVE:
3338 case SIOCBRADDIF:
3339 case SIOCBRDELIF:
3340 if (!capable(CAP_NET_ADMIN))
3341 return -EPERM;
cabcac0b
TG
3342 /* fall through */
3343 case SIOCBONDSLAVEINFOQUERY:
3344 case SIOCBONDINFOQUERY:
881d966b 3345 dev_load(net, ifr.ifr_name);
1da177e4 3346 rtnl_lock();
881d966b 3347 ret = dev_ifsioc(net, &ifr, cmd);
1da177e4
LT
3348 rtnl_unlock();
3349 return ret;
3350
3351 case SIOCGIFMEM:
3352 /* Get the per device memory space. We can add this but
3353 * currently do not support it */
3354 case SIOCSIFMEM:
3355 /* Set the per device memory buffer space.
3356 * Not applicable in our case */
3357 case SIOCSIFLINK:
3358 return -EINVAL;
3359
3360 /*
3361 * Unknown or private ioctl.
3362 */
3363 default:
3364 if (cmd == SIOCWANDEV ||
3365 (cmd >= SIOCDEVPRIVATE &&
3366 cmd <= SIOCDEVPRIVATE + 15)) {
881d966b 3367 dev_load(net, ifr.ifr_name);
1da177e4 3368 rtnl_lock();
881d966b 3369 ret = dev_ifsioc(net, &ifr, cmd);
1da177e4
LT
3370 rtnl_unlock();
3371 if (!ret && copy_to_user(arg, &ifr,
3372 sizeof(struct ifreq)))
3373 ret = -EFAULT;
3374 return ret;
3375 }
1da177e4 3376 /* Take care of Wireless Extensions */
295f4a1f 3377 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
881d966b 3378 return wext_handle_ioctl(net, &ifr, cmd, arg);
1da177e4
LT
3379 return -EINVAL;
3380 }
3381}
3382
3383
3384/**
3385 * dev_new_index - allocate an ifindex
3386 *
3387 * Returns a suitable unique value for a new device interface
3388 * number. The caller must hold the rtnl semaphore or the
3389 * dev_base_lock to be sure it remains unique.
3390 */
881d966b 3391static int dev_new_index(struct net *net)
1da177e4
LT
3392{
3393 static int ifindex;
3394 for (;;) {
3395 if (++ifindex <= 0)
3396 ifindex = 1;
881d966b 3397 if (!__dev_get_by_index(net, ifindex))
1da177e4
LT
3398 return ifindex;
3399 }
3400}
3401
1da177e4
LT
3402/* Delayed registration/unregisteration */
3403static DEFINE_SPINLOCK(net_todo_list_lock);
3404static struct list_head net_todo_list = LIST_HEAD_INIT(net_todo_list);
3405
6f05f629 3406static void net_set_todo(struct net_device *dev)
1da177e4
LT
3407{
3408 spin_lock(&net_todo_list_lock);
3409 list_add_tail(&dev->todo_list, &net_todo_list);
3410 spin_unlock(&net_todo_list_lock);
3411}
3412
3413/**
3414 * register_netdevice - register a network device
3415 * @dev: device to register
3416 *
3417 * Take a completed network device structure and add it to the kernel
3418 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
3419 * chain. 0 is returned on success. A negative errno code is returned
3420 * on a failure to set up the device, or if the name is a duplicate.
3421 *
3422 * Callers must hold the rtnl semaphore. You may want
3423 * register_netdev() instead of this.
3424 *
3425 * BUGS:
3426 * The locking appears insufficient to guarantee two parallel registers
3427 * will not get the same name.
3428 */
3429
3430int register_netdevice(struct net_device *dev)
3431{
3432 struct hlist_head *head;
3433 struct hlist_node *p;
3434 int ret;
881d966b 3435 struct net *net;
1da177e4
LT
3436
3437 BUG_ON(dev_boot_phase);
3438 ASSERT_RTNL();
3439
b17a7c17
SH
3440 might_sleep();
3441
1da177e4
LT
3442 /* When net_device's are persistent, this will be fatal. */
3443 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
881d966b
EB
3444 BUG_ON(!dev->nd_net);
3445 net = dev->nd_net;
1da177e4
LT
3446
3447 spin_lock_init(&dev->queue_lock);
932ff279 3448 spin_lock_init(&dev->_xmit_lock);
723e98b7 3449 netdev_set_lockdep_class(&dev->_xmit_lock, dev->type);
1da177e4 3450 dev->xmit_lock_owner = -1;
1da177e4 3451 spin_lock_init(&dev->ingress_lock);
1da177e4 3452
1da177e4
LT
3453 dev->iflink = -1;
3454
3455 /* Init, if this function is available */
3456 if (dev->init) {
3457 ret = dev->init(dev);
3458 if (ret) {
3459 if (ret > 0)
3460 ret = -EIO;
90833aa4 3461 goto out;
1da177e4
LT
3462 }
3463 }
4ec93edb 3464
1da177e4
LT
3465 if (!dev_valid_name(dev->name)) {
3466 ret = -EINVAL;
7ce1b0ed 3467 goto err_uninit;
1da177e4
LT
3468 }
3469
881d966b 3470 dev->ifindex = dev_new_index(net);
1da177e4
LT
3471 if (dev->iflink == -1)
3472 dev->iflink = dev->ifindex;
3473
3474 /* Check for existence of name */
881d966b 3475 head = dev_name_hash(net, dev->name);
1da177e4
LT
3476 hlist_for_each(p, head) {
3477 struct net_device *d
3478 = hlist_entry(p, struct net_device, name_hlist);
3479 if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
3480 ret = -EEXIST;
7ce1b0ed 3481 goto err_uninit;
1da177e4 3482 }
4ec93edb 3483 }
1da177e4 3484
d212f87b
SH
3485 /* Fix illegal checksum combinations */
3486 if ((dev->features & NETIF_F_HW_CSUM) &&
3487 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
3488 printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
3489 dev->name);
3490 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
3491 }
3492
3493 if ((dev->features & NETIF_F_NO_CSUM) &&
3494 (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
3495 printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
3496 dev->name);
3497 dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
3498 }
3499
3500
1da177e4
LT
3501 /* Fix illegal SG+CSUM combinations. */
3502 if ((dev->features & NETIF_F_SG) &&
8648b305 3503 !(dev->features & NETIF_F_ALL_CSUM)) {
5a8da02b 3504 printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no checksum feature.\n",
1da177e4
LT
3505 dev->name);
3506 dev->features &= ~NETIF_F_SG;
3507 }
3508
3509 /* TSO requires that SG is present as well. */
3510 if ((dev->features & NETIF_F_TSO) &&
3511 !(dev->features & NETIF_F_SG)) {
5a8da02b 3512 printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no SG feature.\n",
1da177e4
LT
3513 dev->name);
3514 dev->features &= ~NETIF_F_TSO;
3515 }
e89e9cf5
AR
3516 if (dev->features & NETIF_F_UFO) {
3517 if (!(dev->features & NETIF_F_HW_CSUM)) {
3518 printk(KERN_ERR "%s: Dropping NETIF_F_UFO since no "
3519 "NETIF_F_HW_CSUM feature.\n",
3520 dev->name);
3521 dev->features &= ~NETIF_F_UFO;
3522 }
3523 if (!(dev->features & NETIF_F_SG)) {
3524 printk(KERN_ERR "%s: Dropping NETIF_F_UFO since no "
3525 "NETIF_F_SG feature.\n",
3526 dev->name);
3527 dev->features &= ~NETIF_F_UFO;
3528 }
3529 }
1da177e4
LT
3530
3531 /*
3532 * nil rebuild_header routine,
3533 * that should be never called and used as just bug trap.
3534 */
3535
3536 if (!dev->rebuild_header)
3537 dev->rebuild_header = default_rebuild_header;
3538
b17a7c17
SH
3539 ret = netdev_register_sysfs(dev);
3540 if (ret)
7ce1b0ed 3541 goto err_uninit;
b17a7c17
SH
3542 dev->reg_state = NETREG_REGISTERED;
3543
1da177e4
LT
3544 /*
3545 * Default initial state at registry is that the
3546 * device is present.
3547 */
3548
3549 set_bit(__LINK_STATE_PRESENT, &dev->state);
3550
1da177e4
LT
3551 dev_init_scheduler(dev);
3552 write_lock_bh(&dev_base_lock);
881d966b 3553 list_add_tail(&dev->dev_list, &net->dev_base_head);
1da177e4 3554 hlist_add_head(&dev->name_hlist, head);
881d966b 3555 hlist_add_head(&dev->index_hlist, dev_index_hash(net, dev->ifindex));
1da177e4 3556 dev_hold(dev);
1da177e4
LT
3557 write_unlock_bh(&dev_base_lock);
3558
3559 /* Notify protocols, that a new device appeared. */
fcc5a03a
HX
3560 ret = raw_notifier_call_chain(&netdev_chain, NETDEV_REGISTER, dev);
3561 ret = notifier_to_errno(ret);
3562 if (ret)
3563 unregister_netdevice(dev);
1da177e4
LT
3564
3565out:
3566 return ret;
7ce1b0ed
HX
3567
3568err_uninit:
3569 if (dev->uninit)
3570 dev->uninit(dev);
3571 goto out;
1da177e4
LT
3572}
3573
3574/**
3575 * register_netdev - register a network device
3576 * @dev: device to register
3577 *
3578 * Take a completed network device structure and add it to the kernel
3579 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
3580 * chain. 0 is returned on success. A negative errno code is returned
3581 * on a failure to set up the device, or if the name is a duplicate.
3582 *
38b4da38 3583 * This is a wrapper around register_netdevice that takes the rtnl semaphore
1da177e4
LT
3584 * and expands the device name if you passed a format string to
3585 * alloc_netdev.
3586 */
3587int register_netdev(struct net_device *dev)
3588{
3589 int err;
3590
3591 rtnl_lock();
3592
3593 /*
3594 * If the name is a format string the caller wants us to do a
3595 * name allocation.
3596 */
3597 if (strchr(dev->name, '%')) {
3598 err = dev_alloc_name(dev, dev->name);
3599 if (err < 0)
3600 goto out;
3601 }
4ec93edb 3602
1da177e4
LT
3603 err = register_netdevice(dev);
3604out:
3605 rtnl_unlock();
3606 return err;
3607}
3608EXPORT_SYMBOL(register_netdev);
3609
3610/*
3611 * netdev_wait_allrefs - wait until all references are gone.
3612 *
3613 * This is called when unregistering network devices.
3614 *
3615 * Any protocol or device that holds a reference should register
3616 * for netdevice notification, and cleanup and put back the
3617 * reference if they receive an UNREGISTER event.
3618 * We can get stuck here if buggy protocols don't correctly
4ec93edb 3619 * call dev_put.
1da177e4
LT
3620 */
3621static void netdev_wait_allrefs(struct net_device *dev)
3622{
3623 unsigned long rebroadcast_time, warning_time;
3624
3625 rebroadcast_time = warning_time = jiffies;
3626 while (atomic_read(&dev->refcnt) != 0) {
3627 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
6756ae4b 3628 rtnl_lock();
1da177e4
LT
3629
3630 /* Rebroadcast unregister notification */
f07d5b94 3631 raw_notifier_call_chain(&netdev_chain,
1da177e4
LT
3632 NETDEV_UNREGISTER, dev);
3633
3634 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
3635 &dev->state)) {
3636 /* We must not have linkwatch events
3637 * pending on unregister. If this
3638 * happens, we simply run the queue
3639 * unscheduled, resulting in a noop
3640 * for this device.
3641 */
3642 linkwatch_run_queue();
3643 }
3644
6756ae4b 3645 __rtnl_unlock();
1da177e4
LT
3646
3647 rebroadcast_time = jiffies;
3648 }
3649
3650 msleep(250);
3651
3652 if (time_after(jiffies, warning_time + 10 * HZ)) {
3653 printk(KERN_EMERG "unregister_netdevice: "
3654 "waiting for %s to become free. Usage "
3655 "count = %d\n",
3656 dev->name, atomic_read(&dev->refcnt));
3657 warning_time = jiffies;
3658 }
3659 }
3660}
3661
3662/* The sequence is:
3663 *
3664 * rtnl_lock();
3665 * ...
3666 * register_netdevice(x1);
3667 * register_netdevice(x2);
3668 * ...
3669 * unregister_netdevice(y1);
3670 * unregister_netdevice(y2);
3671 * ...
3672 * rtnl_unlock();
3673 * free_netdev(y1);
3674 * free_netdev(y2);
3675 *
3676 * We are invoked by rtnl_unlock() after it drops the semaphore.
3677 * This allows us to deal with problems:
b17a7c17 3678 * 1) We can delete sysfs objects which invoke hotplug
1da177e4
LT
3679 * without deadlocking with linkwatch via keventd.
3680 * 2) Since we run with the RTNL semaphore not held, we can sleep
3681 * safely in order to wait for the netdev refcnt to drop to zero.
3682 */
4a3e2f71 3683static DEFINE_MUTEX(net_todo_run_mutex);
1da177e4
LT
3684void netdev_run_todo(void)
3685{
626ab0e6 3686 struct list_head list;
1da177e4
LT
3687
3688 /* Need to guard against multiple cpu's getting out of order. */
4a3e2f71 3689 mutex_lock(&net_todo_run_mutex);
1da177e4
LT
3690
3691 /* Not safe to do outside the semaphore. We must not return
3692 * until all unregister events invoked by the local processor
3693 * have been completed (either by this todo run, or one on
3694 * another cpu).
3695 */
3696 if (list_empty(&net_todo_list))
3697 goto out;
3698
3699 /* Snapshot list, allow later requests */
3700 spin_lock(&net_todo_list_lock);
626ab0e6 3701 list_replace_init(&net_todo_list, &list);
1da177e4 3702 spin_unlock(&net_todo_list_lock);
626ab0e6 3703
1da177e4
LT
3704 while (!list_empty(&list)) {
3705 struct net_device *dev
3706 = list_entry(list.next, struct net_device, todo_list);
3707 list_del(&dev->todo_list);
3708
b17a7c17
SH
3709 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
3710 printk(KERN_ERR "network todo '%s' but state %d\n",
3711 dev->name, dev->reg_state);
3712 dump_stack();
3713 continue;
3714 }
1da177e4 3715
b17a7c17 3716 dev->reg_state = NETREG_UNREGISTERED;
1da177e4 3717
b17a7c17 3718 netdev_wait_allrefs(dev);
1da177e4 3719
b17a7c17
SH
3720 /* paranoia */
3721 BUG_ON(atomic_read(&dev->refcnt));
3722 BUG_TRAP(!dev->ip_ptr);
3723 BUG_TRAP(!dev->ip6_ptr);
3724 BUG_TRAP(!dev->dn_ptr);
1da177e4 3725
b17a7c17
SH
3726 if (dev->destructor)
3727 dev->destructor(dev);
9093bbb2
SH
3728
3729 /* Free network device */
3730 kobject_put(&dev->dev.kobj);
1da177e4
LT
3731 }
3732
3733out:
4a3e2f71 3734 mutex_unlock(&net_todo_run_mutex);
1da177e4
LT
3735}
3736
5a1b5898 3737static struct net_device_stats *internal_stats(struct net_device *dev)
c45d286e 3738{
5a1b5898 3739 return &dev->stats;
c45d286e
RR
3740}
3741
1da177e4 3742/**
f25f4e44 3743 * alloc_netdev_mq - allocate network device
1da177e4
LT
3744 * @sizeof_priv: size of private data to allocate space for
3745 * @name: device name format string
3746 * @setup: callback to initialize device
f25f4e44 3747 * @queue_count: the number of subqueues to allocate
1da177e4
LT
3748 *
3749 * Allocates a struct net_device with private data area for driver use
f25f4e44
PWJ
3750 * and performs basic initialization. Also allocates subquue structs
3751 * for each queue on the device at the end of the netdevice.
1da177e4 3752 */
f25f4e44
PWJ
3753struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
3754 void (*setup)(struct net_device *), unsigned int queue_count)
1da177e4
LT
3755{
3756 void *p;
3757 struct net_device *dev;
3758 int alloc_size;
3759
b6fe17d6
SH
3760 BUG_ON(strlen(name) >= sizeof(dev->name));
3761
1da177e4 3762 /* ensure 32-byte alignment of both the device and private area */
f25f4e44 3763 alloc_size = (sizeof(*dev) + NETDEV_ALIGN_CONST +
31ce72a6 3764 (sizeof(struct net_device_subqueue) * (queue_count - 1))) &
f25f4e44 3765 ~NETDEV_ALIGN_CONST;
1da177e4
LT
3766 alloc_size += sizeof_priv + NETDEV_ALIGN_CONST;
3767
31380de9 3768 p = kzalloc(alloc_size, GFP_KERNEL);
1da177e4 3769 if (!p) {
b6fe17d6 3770 printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
1da177e4
LT
3771 return NULL;
3772 }
1da177e4
LT
3773
3774 dev = (struct net_device *)
3775 (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
3776 dev->padded = (char *)dev - (char *)p;
6d34b1c2 3777 dev->nd_net = &init_net;
1da177e4 3778
f25f4e44
PWJ
3779 if (sizeof_priv) {
3780 dev->priv = ((char *)dev +
3781 ((sizeof(struct net_device) +
3782 (sizeof(struct net_device_subqueue) *
31ce72a6 3783 (queue_count - 1)) + NETDEV_ALIGN_CONST)
f25f4e44
PWJ
3784 & ~NETDEV_ALIGN_CONST));
3785 }
3786
3787 dev->egress_subqueue_count = queue_count;
1da177e4 3788
5a1b5898 3789 dev->get_stats = internal_stats;
bea3348e 3790 netpoll_netdev_init(dev);
1da177e4
LT
3791 setup(dev);
3792 strcpy(dev->name, name);
3793 return dev;
3794}
f25f4e44 3795EXPORT_SYMBOL(alloc_netdev_mq);
1da177e4
LT
3796
3797/**
3798 * free_netdev - free network device
3799 * @dev: device
3800 *
4ec93edb
YH
3801 * This function does the last stage of destroying an allocated device
3802 * interface. The reference to the device object is released.
1da177e4
LT
3803 * If this is the last reference then it will be freed.
3804 */
3805void free_netdev(struct net_device *dev)
3806{
3807#ifdef CONFIG_SYSFS
3041a069 3808 /* Compatibility with error handling in drivers */
1da177e4
LT
3809 if (dev->reg_state == NETREG_UNINITIALIZED) {
3810 kfree((char *)dev - dev->padded);
3811 return;
3812 }
3813
3814 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
3815 dev->reg_state = NETREG_RELEASED;
3816
43cb76d9
GKH
3817 /* will free via device release */
3818 put_device(&dev->dev);
1da177e4
LT
3819#else
3820 kfree((char *)dev - dev->padded);
3821#endif
3822}
4ec93edb 3823
1da177e4 3824/* Synchronize with packet receive processing. */
4ec93edb 3825void synchronize_net(void)
1da177e4
LT
3826{
3827 might_sleep();
fbd568a3 3828 synchronize_rcu();
1da177e4
LT
3829}
3830
3831/**
3832 * unregister_netdevice - remove device from the kernel
3833 * @dev: device
3834 *
3835 * This function shuts down a device interface and removes it
3836 * from the kernel tables. On success 0 is returned, on a failure
3837 * a negative errno code is returned.
3838 *
3839 * Callers must hold the rtnl semaphore. You may want
3840 * unregister_netdev() instead of this.
3841 */
3842
22f8cde5 3843void unregister_netdevice(struct net_device *dev)
1da177e4 3844{
1da177e4
LT
3845 BUG_ON(dev_boot_phase);
3846 ASSERT_RTNL();
3847
3848 /* Some devices call without registering for initialization unwind. */
3849 if (dev->reg_state == NETREG_UNINITIALIZED) {
3850 printk(KERN_DEBUG "unregister_netdevice: device %s/%p never "
3851 "was registered\n", dev->name, dev);
22f8cde5
SH
3852
3853 WARN_ON(1);
3854 return;
1da177e4
LT
3855 }
3856
3857 BUG_ON(dev->reg_state != NETREG_REGISTERED);
3858
3859 /* If device is running, close it first. */
3860 if (dev->flags & IFF_UP)
3861 dev_close(dev);
3862
3863 /* And unlink it from device chain. */
7562f876
PE
3864 write_lock_bh(&dev_base_lock);
3865 list_del(&dev->dev_list);
3866 hlist_del(&dev->name_hlist);
3867 hlist_del(&dev->index_hlist);
3868 write_unlock_bh(&dev_base_lock);
1da177e4
LT
3869
3870 dev->reg_state = NETREG_UNREGISTERING;
3871
3872 synchronize_net();
3873
3874 /* Shutdown queueing discipline. */
3875 dev_shutdown(dev);
3876
4ec93edb 3877
1da177e4
LT
3878 /* Notify protocols, that we are about to destroy
3879 this device. They should clean all the things.
3880 */
f07d5b94 3881 raw_notifier_call_chain(&netdev_chain, NETDEV_UNREGISTER, dev);
4ec93edb 3882
1da177e4 3883 /*
4417da66 3884 * Flush the unicast and multicast chains
1da177e4 3885 */
26cc2522 3886 dev_addr_discard(dev);
1da177e4
LT
3887
3888 if (dev->uninit)
3889 dev->uninit(dev);
3890
3891 /* Notifier chain MUST detach us from master device. */
3892 BUG_TRAP(!dev->master);
3893
9093bbb2
SH
3894 /* Remove entries from sysfs */
3895 netdev_unregister_sysfs(dev);
3896
1da177e4
LT
3897 /* Finish processing unregister after unlock */
3898 net_set_todo(dev);
3899
3900 synchronize_net();
3901
3902 dev_put(dev);
1da177e4
LT
3903}
3904
3905/**
3906 * unregister_netdev - remove device from the kernel
3907 * @dev: device
3908 *
3909 * This function shuts down a device interface and removes it
3910 * from the kernel tables. On success 0 is returned, on a failure
3911 * a negative errno code is returned.
3912 *
3913 * This is just a wrapper for unregister_netdevice that takes
3914 * the rtnl semaphore. In general you want to use this and not
3915 * unregister_netdevice.
3916 */
3917void unregister_netdev(struct net_device *dev)
3918{
3919 rtnl_lock();
3920 unregister_netdevice(dev);
3921 rtnl_unlock();
3922}
3923
3924EXPORT_SYMBOL(unregister_netdev);
3925
1da177e4
LT
3926static int dev_cpu_callback(struct notifier_block *nfb,
3927 unsigned long action,
3928 void *ocpu)
3929{
3930 struct sk_buff **list_skb;
3931 struct net_device **list_net;
3932 struct sk_buff *skb;
3933 unsigned int cpu, oldcpu = (unsigned long)ocpu;
3934 struct softnet_data *sd, *oldsd;
3935
8bb78442 3936 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
1da177e4
LT
3937 return NOTIFY_OK;
3938
3939 local_irq_disable();
3940 cpu = smp_processor_id();
3941 sd = &per_cpu(softnet_data, cpu);
3942 oldsd = &per_cpu(softnet_data, oldcpu);
3943
3944 /* Find end of our completion_queue. */
3945 list_skb = &sd->completion_queue;
3946 while (*list_skb)
3947 list_skb = &(*list_skb)->next;
3948 /* Append completion queue from offline CPU. */
3949 *list_skb = oldsd->completion_queue;
3950 oldsd->completion_queue = NULL;
3951
3952 /* Find end of our output_queue. */
3953 list_net = &sd->output_queue;
3954 while (*list_net)
3955 list_net = &(*list_net)->next_sched;
3956 /* Append output queue from offline CPU. */
3957 *list_net = oldsd->output_queue;
3958 oldsd->output_queue = NULL;
3959
3960 raise_softirq_irqoff(NET_TX_SOFTIRQ);
3961 local_irq_enable();
3962
3963 /* Process offline CPU's input_pkt_queue */
3964 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
3965 netif_rx(skb);
3966
3967 return NOTIFY_OK;
3968}
1da177e4 3969
db217334
CL
3970#ifdef CONFIG_NET_DMA
3971/**
0ed72ec4
RD
3972 * net_dma_rebalance - try to maintain one DMA channel per CPU
3973 * @net_dma: DMA client and associated data (lock, channels, channel_mask)
3974 *
3975 * This is called when the number of channels allocated to the net_dma client
3976 * changes. The net_dma client tries to have one DMA channel per CPU.
db217334 3977 */
d379b01e
DW
3978
3979static void net_dma_rebalance(struct net_dma *net_dma)
db217334 3980{
d379b01e 3981 unsigned int cpu, i, n, chan_idx;
db217334
CL
3982 struct dma_chan *chan;
3983
d379b01e 3984 if (cpus_empty(net_dma->channel_mask)) {
db217334 3985 for_each_online_cpu(cpu)
29bbd72d 3986 rcu_assign_pointer(per_cpu(softnet_data, cpu).net_dma, NULL);
db217334
CL
3987 return;
3988 }
3989
3990 i = 0;
3991 cpu = first_cpu(cpu_online_map);
3992
d379b01e
DW
3993 for_each_cpu_mask(chan_idx, net_dma->channel_mask) {
3994 chan = net_dma->channels[chan_idx];
3995
3996 n = ((num_online_cpus() / cpus_weight(net_dma->channel_mask))
3997 + (i < (num_online_cpus() %
3998 cpus_weight(net_dma->channel_mask)) ? 1 : 0));
db217334
CL
3999
4000 while(n) {
29bbd72d 4001 per_cpu(softnet_data, cpu).net_dma = chan;
db217334
CL
4002 cpu = next_cpu(cpu, cpu_online_map);
4003 n--;
4004 }
4005 i++;
4006 }
db217334
CL
4007}
4008
4009/**
4010 * netdev_dma_event - event callback for the net_dma_client
4011 * @client: should always be net_dma_client
f4b8ea78 4012 * @chan: DMA channel for the event
0ed72ec4 4013 * @state: DMA state to be handled
db217334 4014 */
d379b01e
DW
4015static enum dma_state_client
4016netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
4017 enum dma_state state)
4018{
4019 int i, found = 0, pos = -1;
4020 struct net_dma *net_dma =
4021 container_of(client, struct net_dma, client);
4022 enum dma_state_client ack = DMA_DUP; /* default: take no action */
4023
4024 spin_lock(&net_dma->lock);
4025 switch (state) {
4026 case DMA_RESOURCE_AVAILABLE:
4027 for (i = 0; i < NR_CPUS; i++)
4028 if (net_dma->channels[i] == chan) {
4029 found = 1;
4030 break;
4031 } else if (net_dma->channels[i] == NULL && pos < 0)
4032 pos = i;
4033
4034 if (!found && pos >= 0) {
4035 ack = DMA_ACK;
4036 net_dma->channels[pos] = chan;
4037 cpu_set(pos, net_dma->channel_mask);
4038 net_dma_rebalance(net_dma);
4039 }
db217334
CL
4040 break;
4041 case DMA_RESOURCE_REMOVED:
d379b01e
DW
4042 for (i = 0; i < NR_CPUS; i++)
4043 if (net_dma->channels[i] == chan) {
4044 found = 1;
4045 pos = i;
4046 break;
4047 }
4048
4049 if (found) {
4050 ack = DMA_ACK;
4051 cpu_clear(pos, net_dma->channel_mask);
4052 net_dma->channels[i] = NULL;
4053 net_dma_rebalance(net_dma);
4054 }
db217334
CL
4055 break;
4056 default:
4057 break;
4058 }
d379b01e
DW
4059 spin_unlock(&net_dma->lock);
4060
4061 return ack;
db217334
CL
4062}
4063
4064/**
4065 * netdev_dma_regiser - register the networking subsystem as a DMA client
4066 */
4067static int __init netdev_dma_register(void)
4068{
d379b01e
DW
4069 spin_lock_init(&net_dma.lock);
4070 dma_cap_set(DMA_MEMCPY, net_dma.client.cap_mask);
4071 dma_async_client_register(&net_dma.client);
4072 dma_async_client_chan_request(&net_dma.client);
db217334
CL
4073 return 0;
4074}
4075
4076#else
4077static int __init netdev_dma_register(void) { return -ENODEV; }
4078#endif /* CONFIG_NET_DMA */
1da177e4 4079
7f353bf2
HX
4080/**
4081 * netdev_compute_feature - compute conjunction of two feature sets
4082 * @all: first feature set
4083 * @one: second feature set
4084 *
4085 * Computes a new feature set after adding a device with feature set
4086 * @one to the master device with current feature set @all. Returns
4087 * the new feature set.
4088 */
4089int netdev_compute_features(unsigned long all, unsigned long one)
4090{
4091 /* if device needs checksumming, downgrade to hw checksumming */
4092 if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
4093 all ^= NETIF_F_NO_CSUM | NETIF_F_HW_CSUM;
4094
4095 /* if device can't do all checksum, downgrade to ipv4/ipv6 */
4096 if (all & NETIF_F_HW_CSUM && !(one & NETIF_F_HW_CSUM))
4097 all ^= NETIF_F_HW_CSUM
4098 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
4099
4100 if (one & NETIF_F_GSO)
4101 one |= NETIF_F_GSO_SOFTWARE;
4102 one |= NETIF_F_GSO;
4103
4104 /* If even one device supports robust GSO, enable it for all. */
4105 if (one & NETIF_F_GSO_ROBUST)
4106 all |= NETIF_F_GSO_ROBUST;
4107
4108 all &= one | NETIF_F_LLTX;
4109
4110 if (!(all & NETIF_F_ALL_CSUM))
4111 all &= ~NETIF_F_SG;
4112 if (!(all & NETIF_F_SG))
4113 all &= ~NETIF_F_GSO_MASK;
4114
4115 return all;
4116}
4117EXPORT_SYMBOL(netdev_compute_features);
4118
881d966b
EB
4119/* Initialize per network namespace state */
4120static int netdev_init(struct net *net)
4121{
4122 int i;
4123 INIT_LIST_HEAD(&net->dev_base_head);
4124 rwlock_init(&dev_base_lock);
4125
4126 net->dev_name_head = kmalloc(
4127 sizeof(*net->dev_name_head)*NETDEV_HASHENTRIES, GFP_KERNEL);
4128 if (!net->dev_name_head)
4129 return -ENOMEM;
4130
4131 net->dev_index_head = kmalloc(
4132 sizeof(*net->dev_index_head)*NETDEV_HASHENTRIES, GFP_KERNEL);
4133 if (!net->dev_index_head) {
4134 kfree(net->dev_name_head);
4135 return -ENOMEM;
4136 }
4137
4138 for (i = 0; i < NETDEV_HASHENTRIES; i++)
4139 INIT_HLIST_HEAD(&net->dev_name_head[i]);
4140
4141 for (i = 0; i < NETDEV_HASHENTRIES; i++)
4142 INIT_HLIST_HEAD(&net->dev_index_head[i]);
4143
4144 return 0;
4145}
4146
4147static void netdev_exit(struct net *net)
4148{
4149 kfree(net->dev_name_head);
4150 kfree(net->dev_index_head);
4151}
4152
4153static struct pernet_operations netdev_net_ops = {
4154 .init = netdev_init,
4155 .exit = netdev_exit,
4156};
4157
1da177e4
LT
4158/*
4159 * Initialize the DEV module. At boot time this walks the device list and
4160 * unhooks any devices that fail to initialise (normally hardware not
4161 * present) and leaves us with a valid list of present and active devices.
4162 *
4163 */
4164
4165/*
4166 * This is called single threaded during boot, so no need
4167 * to take the rtnl semaphore.
4168 */
4169static int __init net_dev_init(void)
4170{
4171 int i, rc = -ENOMEM;
4172
4173 BUG_ON(!dev_boot_phase);
4174
1da177e4
LT
4175 if (dev_proc_init())
4176 goto out;
4177
4178 if (netdev_sysfs_init())
4179 goto out;
4180
4181 INIT_LIST_HEAD(&ptype_all);
4ec93edb 4182 for (i = 0; i < 16; i++)
1da177e4
LT
4183 INIT_LIST_HEAD(&ptype_base[i]);
4184
881d966b
EB
4185 if (register_pernet_subsys(&netdev_net_ops))
4186 goto out;
1da177e4
LT
4187
4188 /*
4189 * Initialise the packet receive queues.
4190 */
4191
6f912042 4192 for_each_possible_cpu(i) {
1da177e4
LT
4193 struct softnet_data *queue;
4194
4195 queue = &per_cpu(softnet_data, i);
4196 skb_queue_head_init(&queue->input_pkt_queue);
1da177e4
LT
4197 queue->completion_queue = NULL;
4198 INIT_LIST_HEAD(&queue->poll_list);
bea3348e
SH
4199
4200 queue->backlog.poll = process_backlog;
4201 queue->backlog.weight = weight_p;
1da177e4
LT
4202 }
4203
db217334
CL
4204 netdev_dma_register();
4205
1da177e4
LT
4206 dev_boot_phase = 0;
4207
4208 open_softirq(NET_TX_SOFTIRQ, net_tx_action, NULL);
4209 open_softirq(NET_RX_SOFTIRQ, net_rx_action, NULL);
4210
4211 hotcpu_notifier(dev_cpu_callback, 0);
4212 dst_init();
4213 dev_mcast_init();
4214 rc = 0;
4215out:
4216 return rc;
4217}
4218
4219subsys_initcall(net_dev_init);
4220
4221EXPORT_SYMBOL(__dev_get_by_index);
4222EXPORT_SYMBOL(__dev_get_by_name);
4223EXPORT_SYMBOL(__dev_remove_pack);
c2373ee9 4224EXPORT_SYMBOL(dev_valid_name);
1da177e4
LT
4225EXPORT_SYMBOL(dev_add_pack);
4226EXPORT_SYMBOL(dev_alloc_name);
4227EXPORT_SYMBOL(dev_close);
4228EXPORT_SYMBOL(dev_get_by_flags);
4229EXPORT_SYMBOL(dev_get_by_index);
4230EXPORT_SYMBOL(dev_get_by_name);
1da177e4
LT
4231EXPORT_SYMBOL(dev_open);
4232EXPORT_SYMBOL(dev_queue_xmit);
4233EXPORT_SYMBOL(dev_remove_pack);
4234EXPORT_SYMBOL(dev_set_allmulti);
4235EXPORT_SYMBOL(dev_set_promiscuity);
4236EXPORT_SYMBOL(dev_change_flags);
4237EXPORT_SYMBOL(dev_set_mtu);
4238EXPORT_SYMBOL(dev_set_mac_address);
4239EXPORT_SYMBOL(free_netdev);
4240EXPORT_SYMBOL(netdev_boot_setup_check);
4241EXPORT_SYMBOL(netdev_set_master);
4242EXPORT_SYMBOL(netdev_state_change);
4243EXPORT_SYMBOL(netif_receive_skb);
4244EXPORT_SYMBOL(netif_rx);
4245EXPORT_SYMBOL(register_gifconf);
4246EXPORT_SYMBOL(register_netdevice);
4247EXPORT_SYMBOL(register_netdevice_notifier);
4248EXPORT_SYMBOL(skb_checksum_help);
4249EXPORT_SYMBOL(synchronize_net);
4250EXPORT_SYMBOL(unregister_netdevice);
4251EXPORT_SYMBOL(unregister_netdevice_notifier);
4252EXPORT_SYMBOL(net_enable_timestamp);
4253EXPORT_SYMBOL(net_disable_timestamp);
4254EXPORT_SYMBOL(dev_get_flags);
4255
4256#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
4257EXPORT_SYMBOL(br_handle_frame_hook);
4258EXPORT_SYMBOL(br_fdb_get_hook);
4259EXPORT_SYMBOL(br_fdb_put_hook);
4260#endif
4261
4262#ifdef CONFIG_KMOD
4263EXPORT_SYMBOL(dev_load);
4264#endif
4265
4266EXPORT_PER_CPU_SYMBOL(softnet_data);