]> bbs.cooldavid.org Git - net-next-2.6.git/blame - net/core/link_watch.c
[XFRM/RTNETLINK]: Decrement qlen properly in {xfrm_,rt}netlink_rcv().
[net-next-2.6.git] / net / core / link_watch.c
CommitLineData
1da177e4
LT
1/*
2 * Linux network device link state notification
3 *
4 * Author:
5 * Stefan Rompf <sux@loplof.de>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
12 */
13
14#include <linux/config.h>
15#include <linux/module.h>
16#include <linux/netdevice.h>
17#include <linux/if.h>
18#include <net/sock.h>
19#include <linux/rtnetlink.h>
20#include <linux/jiffies.h>
21#include <linux/spinlock.h>
22#include <linux/list.h>
23#include <linux/slab.h>
24#include <linux/workqueue.h>
25#include <linux/bitops.h>
26#include <asm/types.h>
27
28
29enum lw_bits {
30 LW_RUNNING = 0,
31 LW_SE_USED
32};
33
34static unsigned long linkwatch_flags;
35static unsigned long linkwatch_nextevent;
36
37static void linkwatch_event(void *dummy);
38static DECLARE_WORK(linkwatch_work, linkwatch_event, NULL);
39
40static LIST_HEAD(lweventlist);
41static DEFINE_SPINLOCK(lweventlist_lock);
42
43struct lw_event {
44 struct list_head list;
45 struct net_device *dev;
46};
47
48/* Avoid kmalloc() for most systems */
49static struct lw_event singleevent;
50
51/* Must be called with the rtnl semaphore held */
52void linkwatch_run_queue(void)
53{
54 LIST_HEAD(head);
55 struct list_head *n, *next;
56
57 spin_lock_irq(&lweventlist_lock);
58 list_splice_init(&lweventlist, &head);
59 spin_unlock_irq(&lweventlist_lock);
60
61 list_for_each_safe(n, next, &head) {
62 struct lw_event *event = list_entry(n, struct lw_event, list);
63 struct net_device *dev = event->dev;
64
65 if (event == &singleevent) {
66 clear_bit(LW_SE_USED, &linkwatch_flags);
67 } else {
68 kfree(event);
69 }
70
71 /* We are about to handle this device,
72 * so new events can be accepted
73 */
74 clear_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state);
75
76 if (dev->flags & IFF_UP) {
77 netdev_state_change(dev);
78 }
79
80 dev_put(dev);
81 }
82}
83
84
85static void linkwatch_event(void *dummy)
86{
87 /* Limit the number of linkwatch events to one
88 * per second so that a runaway driver does not
89 * cause a storm of messages on the netlink
90 * socket
91 */
92 linkwatch_nextevent = jiffies + HZ;
93 clear_bit(LW_RUNNING, &linkwatch_flags);
94
95 rtnl_shlock();
96 linkwatch_run_queue();
97 rtnl_shunlock();
98}
99
100
101void linkwatch_fire_event(struct net_device *dev)
102{
103 if (!test_and_set_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state)) {
104 unsigned long flags;
105 struct lw_event *event;
106
107 if (test_and_set_bit(LW_SE_USED, &linkwatch_flags)) {
108 event = kmalloc(sizeof(struct lw_event), GFP_ATOMIC);
109
110 if (unlikely(event == NULL)) {
111 clear_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state);
112 return;
113 }
114 } else {
115 event = &singleevent;
116 }
117
118 dev_hold(dev);
119 event->dev = dev;
120
121 spin_lock_irqsave(&lweventlist_lock, flags);
122 list_add_tail(&event->list, &lweventlist);
123 spin_unlock_irqrestore(&lweventlist_lock, flags);
124
125 if (!test_and_set_bit(LW_RUNNING, &linkwatch_flags)) {
126 unsigned long thisevent = jiffies;
127
128 if (thisevent >= linkwatch_nextevent) {
129 schedule_work(&linkwatch_work);
130 } else {
131 schedule_delayed_work(&linkwatch_work, linkwatch_nextevent - thisevent);
132 }
133 }
134 }
135}
136
137EXPORT_SYMBOL(linkwatch_fire_event);