]> bbs.cooldavid.org Git - net-next-2.6.git/blame - net/sched/sch_generic.c
[NETFILTER]: hashlimit match: fix random initialization
[net-next-2.6.git] / net / sched / sch_generic.c
CommitLineData
1da177e4
LT
1/*
2 * net/sched/sch_generic.c Generic packet scheduler routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * Jamal Hadi Salim, <hadi@cyberus.ca> 990601
11 * - Ingress support
12 */
13
14#include <asm/uaccess.h>
15#include <asm/system.h>
16#include <linux/bitops.h>
17#include <linux/config.h>
18#include <linux/module.h>
19#include <linux/types.h>
20#include <linux/kernel.h>
21#include <linux/sched.h>
22#include <linux/string.h>
23#include <linux/mm.h>
24#include <linux/socket.h>
25#include <linux/sockios.h>
26#include <linux/in.h>
27#include <linux/errno.h>
28#include <linux/interrupt.h>
29#include <linux/netdevice.h>
30#include <linux/skbuff.h>
31#include <linux/rtnetlink.h>
32#include <linux/init.h>
33#include <linux/rcupdate.h>
34#include <linux/list.h>
35#include <net/sock.h>
36#include <net/pkt_sched.h>
37
38/* Main transmission queue. */
39
40/* Main qdisc structure lock.
41
42 However, modifications
43 to data, participating in scheduling must be additionally
44 protected with dev->queue_lock spinlock.
45
46 The idea is the following:
47 - enqueue, dequeue are serialized via top level device
48 spinlock dev->queue_lock.
49 - tree walking is protected by read_lock_bh(qdisc_tree_lock)
50 and this lock is used only in process context.
51 - updates to tree are made under rtnl semaphore or
52 from softirq context (__qdisc_destroy rcu-callback)
53 hence this lock needs local bh disabling.
54
55 qdisc_tree_lock must be grabbed BEFORE dev->queue_lock!
56 */
57DEFINE_RWLOCK(qdisc_tree_lock);
58
59void qdisc_lock_tree(struct net_device *dev)
60{
61 write_lock_bh(&qdisc_tree_lock);
62 spin_lock_bh(&dev->queue_lock);
63}
64
65void qdisc_unlock_tree(struct net_device *dev)
66{
67 spin_unlock_bh(&dev->queue_lock);
68 write_unlock_bh(&qdisc_tree_lock);
69}
70
71/*
72 dev->queue_lock serializes queue accesses for this device
73 AND dev->qdisc pointer itself.
74
75 dev->xmit_lock serializes accesses to device driver.
76
77 dev->queue_lock and dev->xmit_lock are mutually exclusive,
78 if one is grabbed, another must be free.
79 */
80
81
82/* Kick device.
83 Note, that this procedure can be called by a watchdog timer, so that
84 we do not check dev->tbusy flag here.
85
86 Returns: 0 - queue is empty.
87 >0 - queue is not empty, but throttled.
88 <0 - queue is not empty. Device is throttled, if dev->tbusy != 0.
89
90 NOTE: Called under dev->queue_lock with locally disabled BH.
91*/
92
93int qdisc_restart(struct net_device *dev)
94{
95 struct Qdisc *q = dev->qdisc;
96 struct sk_buff *skb;
97
98 /* Dequeue packet */
99 if ((skb = q->dequeue(q)) != NULL) {
100 unsigned nolock = (dev->features & NETIF_F_LLTX);
101 /*
102 * When the driver has LLTX set it does its own locking
103 * in start_xmit. No need to add additional overhead by
104 * locking again. These checks are worth it because
105 * even uncongested locks can be quite expensive.
106 * The driver can do trylock like here too, in case
107 * of lock congestion it should return -1 and the packet
108 * will be requeued.
109 */
110 if (!nolock) {
111 if (!spin_trylock(&dev->xmit_lock)) {
112 collision:
113 /* So, someone grabbed the driver. */
114
115 /* It may be transient configuration error,
116 when hard_start_xmit() recurses. We detect
117 it by checking xmit owner and drop the
118 packet when deadloop is detected.
119 */
120 if (dev->xmit_lock_owner == smp_processor_id()) {
121 kfree_skb(skb);
122 if (net_ratelimit())
123 printk(KERN_DEBUG "Dead loop on netdevice %s, fix it urgently!\n", dev->name);
124 return -1;
125 }
126 __get_cpu_var(netdev_rx_stat).cpu_collision++;
127 goto requeue;
128 }
129 /* Remember that the driver is grabbed by us. */
130 dev->xmit_lock_owner = smp_processor_id();
131 }
132
133 {
134 /* And release queue */
135 spin_unlock(&dev->queue_lock);
136
137 if (!netif_queue_stopped(dev)) {
138 int ret;
139 if (netdev_nit)
140 dev_queue_xmit_nit(skb, dev);
141
142 ret = dev->hard_start_xmit(skb, dev);
143 if (ret == NETDEV_TX_OK) {
144 if (!nolock) {
145 dev->xmit_lock_owner = -1;
146 spin_unlock(&dev->xmit_lock);
147 }
148 spin_lock(&dev->queue_lock);
149 return -1;
150 }
151 if (ret == NETDEV_TX_LOCKED && nolock) {
152 spin_lock(&dev->queue_lock);
153 goto collision;
154 }
155 }
156
157 /* NETDEV_TX_BUSY - we need to requeue */
158 /* Release the driver */
159 if (!nolock) {
160 dev->xmit_lock_owner = -1;
161 spin_unlock(&dev->xmit_lock);
162 }
163 spin_lock(&dev->queue_lock);
164 q = dev->qdisc;
165 }
166
167 /* Device kicked us out :(
168 This is possible in three cases:
169
170 0. driver is locked
171 1. fastroute is enabled
172 2. device cannot determine busy state
173 before start of transmission (f.e. dialout)
174 3. device is buggy (ppp)
175 */
176
177requeue:
178 q->ops->requeue(skb, q);
179 netif_schedule(dev);
180 return 1;
181 }
8cbe1d46 182 BUG_ON((int) q->q.qlen < 0);
1da177e4
LT
183 return q->q.qlen;
184}
185
186static void dev_watchdog(unsigned long arg)
187{
188 struct net_device *dev = (struct net_device *)arg;
189
190 spin_lock(&dev->xmit_lock);
191 if (dev->qdisc != &noop_qdisc) {
192 if (netif_device_present(dev) &&
193 netif_running(dev) &&
194 netif_carrier_ok(dev)) {
195 if (netif_queue_stopped(dev) &&
338f7566
SH
196 time_after(jiffies, dev->trans_start + dev->watchdog_timeo)) {
197
198 printk(KERN_INFO "NETDEV WATCHDOG: %s: transmit timed out\n",
199 dev->name);
1da177e4
LT
200 dev->tx_timeout(dev);
201 }
202 if (!mod_timer(&dev->watchdog_timer, jiffies + dev->watchdog_timeo))
203 dev_hold(dev);
204 }
205 }
206 spin_unlock(&dev->xmit_lock);
207
208 dev_put(dev);
209}
210
211static void dev_watchdog_init(struct net_device *dev)
212{
213 init_timer(&dev->watchdog_timer);
214 dev->watchdog_timer.data = (unsigned long)dev;
215 dev->watchdog_timer.function = dev_watchdog;
216}
217
218void __netdev_watchdog_up(struct net_device *dev)
219{
220 if (dev->tx_timeout) {
221 if (dev->watchdog_timeo <= 0)
222 dev->watchdog_timeo = 5*HZ;
223 if (!mod_timer(&dev->watchdog_timer, jiffies + dev->watchdog_timeo))
224 dev_hold(dev);
225 }
226}
227
228static void dev_watchdog_up(struct net_device *dev)
229{
230 spin_lock_bh(&dev->xmit_lock);
231 __netdev_watchdog_up(dev);
232 spin_unlock_bh(&dev->xmit_lock);
233}
234
235static void dev_watchdog_down(struct net_device *dev)
236{
237 spin_lock_bh(&dev->xmit_lock);
238 if (del_timer(&dev->watchdog_timer))
15333061 239 dev_put(dev);
1da177e4
LT
240 spin_unlock_bh(&dev->xmit_lock);
241}
242
0a242efc
DV
243void netif_carrier_on(struct net_device *dev)
244{
245 if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state))
246 linkwatch_fire_event(dev);
247 if (netif_running(dev))
248 __netdev_watchdog_up(dev);
249}
250
251void netif_carrier_off(struct net_device *dev)
252{
253 if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state))
254 linkwatch_fire_event(dev);
255}
256
1da177e4
LT
257/* "NOOP" scheduler: the best scheduler, recommended for all interfaces
258 under all circumstances. It is difficult to invent anything faster or
259 cheaper.
260 */
261
94df109a 262static int noop_enqueue(struct sk_buff *skb, struct Qdisc * qdisc)
1da177e4
LT
263{
264 kfree_skb(skb);
265 return NET_XMIT_CN;
266}
267
94df109a 268static struct sk_buff *noop_dequeue(struct Qdisc * qdisc)
1da177e4
LT
269{
270 return NULL;
271}
272
94df109a 273static int noop_requeue(struct sk_buff *skb, struct Qdisc* qdisc)
1da177e4
LT
274{
275 if (net_ratelimit())
94df109a
TG
276 printk(KERN_DEBUG "%s deferred output. It is buggy.\n",
277 skb->dev->name);
1da177e4
LT
278 kfree_skb(skb);
279 return NET_XMIT_CN;
280}
281
282struct Qdisc_ops noop_qdisc_ops = {
1da177e4
LT
283 .id = "noop",
284 .priv_size = 0,
285 .enqueue = noop_enqueue,
286 .dequeue = noop_dequeue,
287 .requeue = noop_requeue,
288 .owner = THIS_MODULE,
289};
290
291struct Qdisc noop_qdisc = {
292 .enqueue = noop_enqueue,
293 .dequeue = noop_dequeue,
294 .flags = TCQ_F_BUILTIN,
295 .ops = &noop_qdisc_ops,
296 .list = LIST_HEAD_INIT(noop_qdisc.list),
297};
298
299static struct Qdisc_ops noqueue_qdisc_ops = {
1da177e4
LT
300 .id = "noqueue",
301 .priv_size = 0,
302 .enqueue = noop_enqueue,
303 .dequeue = noop_dequeue,
304 .requeue = noop_requeue,
305 .owner = THIS_MODULE,
306};
307
308static struct Qdisc noqueue_qdisc = {
309 .enqueue = NULL,
310 .dequeue = noop_dequeue,
311 .flags = TCQ_F_BUILTIN,
312 .ops = &noqueue_qdisc_ops,
313 .list = LIST_HEAD_INIT(noqueue_qdisc.list),
314};
315
316
317static const u8 prio2band[TC_PRIO_MAX+1] =
318 { 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1 };
319
320/* 3-band FIFO queue: old style, but should be a bit faster than
321 generic prio+fifo combination.
322 */
323
f87a9c3d
TG
324#define PFIFO_FAST_BANDS 3
325
321090e7
TG
326static inline struct sk_buff_head *prio2list(struct sk_buff *skb,
327 struct Qdisc *qdisc)
1da177e4
LT
328{
329 struct sk_buff_head *list = qdisc_priv(qdisc);
321090e7
TG
330 return list + prio2band[skb->priority & TC_PRIO_MAX];
331}
1da177e4 332
f87a9c3d 333static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc)
321090e7
TG
334{
335 struct sk_buff_head *list = prio2list(skb, qdisc);
1da177e4 336
821d24ae 337 if (skb_queue_len(list) < qdisc->dev->tx_queue_len) {
1da177e4 338 qdisc->q.qlen++;
821d24ae 339 return __qdisc_enqueue_tail(skb, qdisc, list);
1da177e4 340 }
821d24ae
TG
341
342 return qdisc_drop(skb, qdisc);
1da177e4
LT
343}
344
f87a9c3d 345static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc)
1da177e4
LT
346{
347 int prio;
348 struct sk_buff_head *list = qdisc_priv(qdisc);
1da177e4 349
452f299d
TG
350 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
351 if (!skb_queue_empty(list + prio)) {
1da177e4 352 qdisc->q.qlen--;
452f299d 353 return __qdisc_dequeue_head(qdisc, list + prio);
1da177e4
LT
354 }
355 }
f87a9c3d 356
1da177e4
LT
357 return NULL;
358}
359
f87a9c3d 360static int pfifo_fast_requeue(struct sk_buff *skb, struct Qdisc* qdisc)
1da177e4 361{
1da177e4 362 qdisc->q.qlen++;
321090e7 363 return __qdisc_requeue(skb, qdisc, prio2list(skb, qdisc));
1da177e4
LT
364}
365
f87a9c3d 366static void pfifo_fast_reset(struct Qdisc* qdisc)
1da177e4
LT
367{
368 int prio;
369 struct sk_buff_head *list = qdisc_priv(qdisc);
370
f87a9c3d 371 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
821d24ae
TG
372 __qdisc_reset_queue(qdisc, list + prio);
373
374 qdisc->qstats.backlog = 0;
1da177e4
LT
375 qdisc->q.qlen = 0;
376}
377
378static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
379{
f87a9c3d 380 struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS };
1da177e4 381
1da177e4
LT
382 memcpy(&opt.priomap, prio2band, TC_PRIO_MAX+1);
383 RTA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
384 return skb->len;
385
386rtattr_failure:
1da177e4
LT
387 return -1;
388}
389
390static int pfifo_fast_init(struct Qdisc *qdisc, struct rtattr *opt)
391{
f87a9c3d 392 int prio;
1da177e4
LT
393 struct sk_buff_head *list = qdisc_priv(qdisc);
394
f87a9c3d
TG
395 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
396 skb_queue_head_init(list + prio);
1da177e4
LT
397
398 return 0;
399}
400
401static struct Qdisc_ops pfifo_fast_ops = {
1da177e4 402 .id = "pfifo_fast",
f87a9c3d 403 .priv_size = PFIFO_FAST_BANDS * sizeof(struct sk_buff_head),
1da177e4
LT
404 .enqueue = pfifo_fast_enqueue,
405 .dequeue = pfifo_fast_dequeue,
406 .requeue = pfifo_fast_requeue,
407 .init = pfifo_fast_init,
408 .reset = pfifo_fast_reset,
409 .dump = pfifo_fast_dump,
410 .owner = THIS_MODULE,
411};
412
3d54b82f 413struct Qdisc *qdisc_alloc(struct net_device *dev, struct Qdisc_ops *ops)
1da177e4
LT
414{
415 void *p;
416 struct Qdisc *sch;
3d54b82f
TG
417 unsigned int size;
418 int err = -ENOBUFS;
1da177e4
LT
419
420 /* ensure that the Qdisc and the private data are 32-byte aligned */
3d54b82f
TG
421 size = QDISC_ALIGN(sizeof(*sch));
422 size += ops->priv_size + (QDISC_ALIGNTO - 1);
1da177e4
LT
423
424 p = kmalloc(size, GFP_KERNEL);
425 if (!p)
3d54b82f 426 goto errout;
1da177e4 427 memset(p, 0, size);
3d54b82f
TG
428 sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
429 sch->padded = (char *) sch - (char *) p;
1da177e4
LT
430
431 INIT_LIST_HEAD(&sch->list);
432 skb_queue_head_init(&sch->q);
433 sch->ops = ops;
434 sch->enqueue = ops->enqueue;
435 sch->dequeue = ops->dequeue;
436 sch->dev = dev;
437 dev_hold(dev);
438 sch->stats_lock = &dev->queue_lock;
439 atomic_set(&sch->refcnt, 1);
3d54b82f
TG
440
441 return sch;
442errout:
443 return ERR_PTR(-err);
444}
445
446struct Qdisc * qdisc_create_dflt(struct net_device *dev, struct Qdisc_ops *ops)
447{
448 struct Qdisc *sch;
449
450 sch = qdisc_alloc(dev, ops);
451 if (IS_ERR(sch))
452 goto errout;
453
1da177e4
LT
454 if (!ops->init || ops->init(sch, NULL) == 0)
455 return sch;
456
0fbbeb1b 457 qdisc_destroy(sch);
3d54b82f 458errout:
1da177e4
LT
459 return NULL;
460}
461
462/* Under dev->queue_lock and BH! */
463
464void qdisc_reset(struct Qdisc *qdisc)
465{
466 struct Qdisc_ops *ops = qdisc->ops;
467
468 if (ops->reset)
469 ops->reset(qdisc);
470}
471
472/* this is the rcu callback function to clean up a qdisc when there
473 * are no further references to it */
474
475static void __qdisc_destroy(struct rcu_head *head)
476{
477 struct Qdisc *qdisc = container_of(head, struct Qdisc, q_rcu);
478 struct Qdisc_ops *ops = qdisc->ops;
479
480#ifdef CONFIG_NET_ESTIMATOR
481 gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est);
482#endif
483 write_lock(&qdisc_tree_lock);
484 if (ops->reset)
485 ops->reset(qdisc);
486 if (ops->destroy)
487 ops->destroy(qdisc);
488 write_unlock(&qdisc_tree_lock);
489 module_put(ops->owner);
490
491 dev_put(qdisc->dev);
492 kfree((char *) qdisc - qdisc->padded);
493}
494
495/* Under dev->queue_lock and BH! */
496
497void qdisc_destroy(struct Qdisc *qdisc)
498{
499 struct list_head cql = LIST_HEAD_INIT(cql);
500 struct Qdisc *cq, *q, *n;
501
502 if (qdisc->flags & TCQ_F_BUILTIN ||
503 !atomic_dec_and_test(&qdisc->refcnt))
504 return;
505
506 if (!list_empty(&qdisc->list)) {
507 if (qdisc->ops->cl_ops == NULL)
508 list_del(&qdisc->list);
509 else
510 list_move(&qdisc->list, &cql);
511 }
512
513 /* unlink inner qdiscs from dev->qdisc_list immediately */
514 list_for_each_entry(cq, &cql, list)
515 list_for_each_entry_safe(q, n, &qdisc->dev->qdisc_list, list)
516 if (TC_H_MAJ(q->parent) == TC_H_MAJ(cq->handle)) {
517 if (q->ops->cl_ops == NULL)
518 list_del_init(&q->list);
519 else
520 list_move_tail(&q->list, &cql);
521 }
522 list_for_each_entry_safe(cq, n, &cql, list)
523 list_del_init(&cq->list);
524
525 call_rcu(&qdisc->q_rcu, __qdisc_destroy);
526}
527
528void dev_activate(struct net_device *dev)
529{
530 /* No queueing discipline is attached to device;
531 create default one i.e. pfifo_fast for devices,
532 which need queueing and noqueue_qdisc for
533 virtual interfaces
534 */
535
536 if (dev->qdisc_sleeping == &noop_qdisc) {
537 struct Qdisc *qdisc;
538 if (dev->tx_queue_len) {
539 qdisc = qdisc_create_dflt(dev, &pfifo_fast_ops);
540 if (qdisc == NULL) {
541 printk(KERN_INFO "%s: activation failed\n", dev->name);
542 return;
543 }
544 write_lock_bh(&qdisc_tree_lock);
545 list_add_tail(&qdisc->list, &dev->qdisc_list);
546 write_unlock_bh(&qdisc_tree_lock);
547 } else {
548 qdisc = &noqueue_qdisc;
549 }
550 write_lock_bh(&qdisc_tree_lock);
551 dev->qdisc_sleeping = qdisc;
552 write_unlock_bh(&qdisc_tree_lock);
553 }
554
cacaddf5
TC
555 if (!netif_carrier_ok(dev))
556 /* Delay activation until next carrier-on event */
557 return;
558
1da177e4
LT
559 spin_lock_bh(&dev->queue_lock);
560 rcu_assign_pointer(dev->qdisc, dev->qdisc_sleeping);
561 if (dev->qdisc != &noqueue_qdisc) {
562 dev->trans_start = jiffies;
563 dev_watchdog_up(dev);
564 }
565 spin_unlock_bh(&dev->queue_lock);
566}
567
568void dev_deactivate(struct net_device *dev)
569{
570 struct Qdisc *qdisc;
571
572 spin_lock_bh(&dev->queue_lock);
573 qdisc = dev->qdisc;
574 dev->qdisc = &noop_qdisc;
575
576 qdisc_reset(qdisc);
577
578 spin_unlock_bh(&dev->queue_lock);
579
580 dev_watchdog_down(dev);
581
582 while (test_bit(__LINK_STATE_SCHED, &dev->state))
583 yield();
584
585 spin_unlock_wait(&dev->xmit_lock);
586}
587
588void dev_init_scheduler(struct net_device *dev)
589{
590 qdisc_lock_tree(dev);
591 dev->qdisc = &noop_qdisc;
592 dev->qdisc_sleeping = &noop_qdisc;
593 INIT_LIST_HEAD(&dev->qdisc_list);
594 qdisc_unlock_tree(dev);
595
596 dev_watchdog_init(dev);
597}
598
599void dev_shutdown(struct net_device *dev)
600{
601 struct Qdisc *qdisc;
602
603 qdisc_lock_tree(dev);
604 qdisc = dev->qdisc_sleeping;
605 dev->qdisc = &noop_qdisc;
606 dev->qdisc_sleeping = &noop_qdisc;
607 qdisc_destroy(qdisc);
608#if defined(CONFIG_NET_SCH_INGRESS) || defined(CONFIG_NET_SCH_INGRESS_MODULE)
609 if ((qdisc = dev->qdisc_ingress) != NULL) {
610 dev->qdisc_ingress = NULL;
611 qdisc_destroy(qdisc);
612 }
613#endif
614 BUG_TRAP(!timer_pending(&dev->watchdog_timer));
615 qdisc_unlock_tree(dev);
616}
617
618EXPORT_SYMBOL(__netdev_watchdog_up);
0a242efc
DV
619EXPORT_SYMBOL(netif_carrier_on);
620EXPORT_SYMBOL(netif_carrier_off);
1da177e4
LT
621EXPORT_SYMBOL(noop_qdisc);
622EXPORT_SYMBOL(noop_qdisc_ops);
623EXPORT_SYMBOL(qdisc_create_dflt);
3d54b82f 624EXPORT_SYMBOL(qdisc_alloc);
1da177e4
LT
625EXPORT_SYMBOL(qdisc_destroy);
626EXPORT_SYMBOL(qdisc_reset);
627EXPORT_SYMBOL(qdisc_restart);
628EXPORT_SYMBOL(qdisc_lock_tree);
629EXPORT_SYMBOL(qdisc_unlock_tree);