]> bbs.cooldavid.org Git - net-next-2.6.git/blame - net/sched/sch_red.c
stmmac: update the driver version
[net-next-2.6.git] / net / sched / sch_red.c
CommitLineData
1da177e4
LT
1/*
2 * net/sched/sch_red.c Random Early Detection queue.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
11 * Changes:
dba051f3 12 * J Hadi Salim 980914: computation fixes
1da177e4 13 * Alexey Makarenko <makar@phoenix.kharkov.ua> 990814: qave on idle link was calculated incorrectly.
dba051f3 14 * J Hadi Salim 980816: ECN support
1da177e4
LT
15 */
16
1da177e4 17#include <linux/module.h>
1da177e4
LT
18#include <linux/types.h>
19#include <linux/kernel.h>
1da177e4 20#include <linux/skbuff.h>
1da177e4
LT
21#include <net/pkt_sched.h>
22#include <net/inet_ecn.h>
6b31b28a 23#include <net/red.h>
1da177e4
LT
24
25
6b31b28a 26/* Parameters, settable by user:
1da177e4
LT
27 -----------------------------
28
29 limit - bytes (must be > qth_max + burst)
30
31 Hard limit on queue length, should be chosen >qth_max
32 to allow packet bursts. This parameter does not
33 affect the algorithms behaviour and can be chosen
34 arbitrarily high (well, less than ram size)
35 Really, this limit will never be reached
36 if RED works correctly.
1da177e4
LT
37 */
38
39struct red_sched_data
40{
6b31b28a
TG
41 u32 limit; /* HARD maximal queue length */
42 unsigned char flags;
43 struct red_parms parms;
44 struct red_stats stats;
f38c39d6 45 struct Qdisc *qdisc;
1da177e4
LT
46};
47
6b31b28a 48static inline int red_use_ecn(struct red_sched_data *q)
1da177e4 49{
6b31b28a 50 return q->flags & TC_RED_ECN;
1da177e4
LT
51}
52
bdc450a0
TG
53static inline int red_use_harddrop(struct red_sched_data *q)
54{
55 return q->flags & TC_RED_HARDDROP;
56}
57
dba051f3 58static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
1da177e4
LT
59{
60 struct red_sched_data *q = qdisc_priv(sch);
f38c39d6
PM
61 struct Qdisc *child = q->qdisc;
62 int ret;
1da177e4 63
f38c39d6 64 q->parms.qavg = red_calc_qavg(&q->parms, child->qstats.backlog);
1da177e4 65
6b31b28a
TG
66 if (red_is_idling(&q->parms))
67 red_end_of_idle_period(&q->parms);
1da177e4 68
6b31b28a
TG
69 switch (red_action(&q->parms, q->parms.qavg)) {
70 case RED_DONT_MARK:
71 break;
1da177e4 72
6b31b28a
TG
73 case RED_PROB_MARK:
74 sch->qstats.overlimits++;
75 if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) {
76 q->stats.prob_drop++;
77 goto congestion_drop;
78 }
1da177e4 79
6b31b28a
TG
80 q->stats.prob_mark++;
81 break;
82
83 case RED_HARD_MARK:
84 sch->qstats.overlimits++;
bdc450a0
TG
85 if (red_use_harddrop(q) || !red_use_ecn(q) ||
86 !INET_ECN_set_ce(skb)) {
6b31b28a
TG
87 q->stats.forced_drop++;
88 goto congestion_drop;
89 }
90
91 q->stats.forced_mark++;
92 break;
1da177e4
LT
93 }
94
5f86173b 95 ret = qdisc_enqueue(skb, child);
f38c39d6 96 if (likely(ret == NET_XMIT_SUCCESS)) {
0abf77e5 97 sch->bstats.bytes += qdisc_pkt_len(skb);
f38c39d6
PM
98 sch->bstats.packets++;
99 sch->q.qlen++;
378a2f09 100 } else if (net_xmit_drop_count(ret)) {
f38c39d6
PM
101 q->stats.pdrop++;
102 sch->qstats.drops++;
103 }
104 return ret;
6b31b28a
TG
105
106congestion_drop:
9e178ff2 107 qdisc_drop(skb, sch);
1da177e4
LT
108 return NET_XMIT_CN;
109}
110
dba051f3 111static struct sk_buff * red_dequeue(struct Qdisc* sch)
1da177e4
LT
112{
113 struct sk_buff *skb;
114 struct red_sched_data *q = qdisc_priv(sch);
f38c39d6 115 struct Qdisc *child = q->qdisc;
1da177e4 116
f38c39d6
PM
117 skb = child->dequeue(child);
118 if (skb)
119 sch->q.qlen--;
120 else if (!red_is_idling(&q->parms))
9e178ff2
TG
121 red_start_of_idle_period(&q->parms);
122
123 return skb;
1da177e4
LT
124}
125
8e3af978
JP
126static struct sk_buff * red_peek(struct Qdisc* sch)
127{
128 struct red_sched_data *q = qdisc_priv(sch);
129 struct Qdisc *child = q->qdisc;
130
131 return child->ops->peek(child);
132}
133
1da177e4
LT
134static unsigned int red_drop(struct Qdisc* sch)
135{
1da177e4 136 struct red_sched_data *q = qdisc_priv(sch);
f38c39d6
PM
137 struct Qdisc *child = q->qdisc;
138 unsigned int len;
1da177e4 139
f38c39d6 140 if (child->ops->drop && (len = child->ops->drop(child)) > 0) {
6b31b28a 141 q->stats.other++;
f38c39d6
PM
142 sch->qstats.drops++;
143 sch->q.qlen--;
1da177e4
LT
144 return len;
145 }
6b31b28a 146
6a1b63d4
TG
147 if (!red_is_idling(&q->parms))
148 red_start_of_idle_period(&q->parms);
149
1da177e4
LT
150 return 0;
151}
152
153static void red_reset(struct Qdisc* sch)
154{
155 struct red_sched_data *q = qdisc_priv(sch);
156
f38c39d6
PM
157 qdisc_reset(q->qdisc);
158 sch->q.qlen = 0;
6b31b28a 159 red_restart(&q->parms);
1da177e4
LT
160}
161
f38c39d6
PM
162static void red_destroy(struct Qdisc *sch)
163{
164 struct red_sched_data *q = qdisc_priv(sch);
165 qdisc_destroy(q->qdisc);
166}
167
27a3421e
PM
168static const struct nla_policy red_policy[TCA_RED_MAX + 1] = {
169 [TCA_RED_PARMS] = { .len = sizeof(struct tc_red_qopt) },
170 [TCA_RED_STAB] = { .len = RED_STAB_SIZE },
171};
172
1e90474c 173static int red_change(struct Qdisc *sch, struct nlattr *opt)
1da177e4
LT
174{
175 struct red_sched_data *q = qdisc_priv(sch);
1e90474c 176 struct nlattr *tb[TCA_RED_MAX + 1];
1da177e4 177 struct tc_red_qopt *ctl;
f38c39d6 178 struct Qdisc *child = NULL;
cee63723 179 int err;
1da177e4 180
cee63723 181 if (opt == NULL)
dba051f3
TG
182 return -EINVAL;
183
27a3421e 184 err = nla_parse_nested(tb, TCA_RED_MAX, opt, red_policy);
cee63723
PM
185 if (err < 0)
186 return err;
187
1e90474c 188 if (tb[TCA_RED_PARMS] == NULL ||
27a3421e 189 tb[TCA_RED_STAB] == NULL)
1da177e4
LT
190 return -EINVAL;
191
1e90474c 192 ctl = nla_data(tb[TCA_RED_PARMS]);
1da177e4 193
f38c39d6 194 if (ctl->limit > 0) {
fb0305ce
PM
195 child = fifo_create_dflt(sch, &bfifo_qdisc_ops, ctl->limit);
196 if (IS_ERR(child))
197 return PTR_ERR(child);
f38c39d6
PM
198 }
199
1da177e4
LT
200 sch_tree_lock(sch);
201 q->flags = ctl->flags;
1da177e4 202 q->limit = ctl->limit;
5e50da01
PM
203 if (child) {
204 qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
b94c8afc
PM
205 qdisc_destroy(q->qdisc);
206 q->qdisc = child;
5e50da01 207 }
1da177e4 208
6b31b28a
TG
209 red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
210 ctl->Plog, ctl->Scell_log,
1e90474c 211 nla_data(tb[TCA_RED_STAB]));
6b31b28a 212
b03efcfb 213 if (skb_queue_empty(&sch->q))
6b31b28a 214 red_end_of_idle_period(&q->parms);
dba051f3 215
1da177e4
LT
216 sch_tree_unlock(sch);
217 return 0;
218}
219
1e90474c 220static int red_init(struct Qdisc* sch, struct nlattr *opt)
1da177e4 221{
f38c39d6
PM
222 struct red_sched_data *q = qdisc_priv(sch);
223
224 q->qdisc = &noop_qdisc;
1da177e4
LT
225 return red_change(sch, opt);
226}
227
228static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
229{
230 struct red_sched_data *q = qdisc_priv(sch);
1e90474c 231 struct nlattr *opts = NULL;
6b31b28a
TG
232 struct tc_red_qopt opt = {
233 .limit = q->limit,
234 .flags = q->flags,
235 .qth_min = q->parms.qth_min >> q->parms.Wlog,
236 .qth_max = q->parms.qth_max >> q->parms.Wlog,
237 .Wlog = q->parms.Wlog,
238 .Plog = q->parms.Plog,
239 .Scell_log = q->parms.Scell_log,
240 };
1da177e4 241
1e90474c
PM
242 opts = nla_nest_start(skb, TCA_OPTIONS);
243 if (opts == NULL)
244 goto nla_put_failure;
245 NLA_PUT(skb, TCA_RED_PARMS, sizeof(opt), &opt);
246 return nla_nest_end(skb, opts);
1da177e4 247
1e90474c 248nla_put_failure:
bc3ed28c
TG
249 nla_nest_cancel(skb, opts);
250 return -EMSGSIZE;
1da177e4
LT
251}
252
253static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
254{
255 struct red_sched_data *q = qdisc_priv(sch);
6b31b28a
TG
256 struct tc_red_xstats st = {
257 .early = q->stats.prob_drop + q->stats.forced_drop,
258 .pdrop = q->stats.pdrop,
259 .other = q->stats.other,
260 .marked = q->stats.prob_mark + q->stats.forced_mark,
261 };
262
263 return gnet_stats_copy_app(d, &st, sizeof(st));
1da177e4
LT
264}
265
f38c39d6
PM
266static int red_dump_class(struct Qdisc *sch, unsigned long cl,
267 struct sk_buff *skb, struct tcmsg *tcm)
268{
269 struct red_sched_data *q = qdisc_priv(sch);
270
f38c39d6
PM
271 tcm->tcm_handle |= TC_H_MIN(1);
272 tcm->tcm_info = q->qdisc->handle;
273 return 0;
274}
275
276static int red_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
277 struct Qdisc **old)
278{
279 struct red_sched_data *q = qdisc_priv(sch);
280
281 if (new == NULL)
282 new = &noop_qdisc;
283
284 sch_tree_lock(sch);
b94c8afc
PM
285 *old = q->qdisc;
286 q->qdisc = new;
5e50da01 287 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
f38c39d6 288 qdisc_reset(*old);
f38c39d6
PM
289 sch_tree_unlock(sch);
290 return 0;
291}
292
293static struct Qdisc *red_leaf(struct Qdisc *sch, unsigned long arg)
294{
295 struct red_sched_data *q = qdisc_priv(sch);
296 return q->qdisc;
297}
298
299static unsigned long red_get(struct Qdisc *sch, u32 classid)
300{
301 return 1;
302}
303
304static void red_put(struct Qdisc *sch, unsigned long arg)
305{
f38c39d6
PM
306}
307
f38c39d6
PM
308static void red_walk(struct Qdisc *sch, struct qdisc_walker *walker)
309{
310 if (!walker->stop) {
311 if (walker->count >= walker->skip)
312 if (walker->fn(sch, 1, walker) < 0) {
313 walker->stop = 1;
314 return;
315 }
316 walker->count++;
317 }
318}
319
20fea08b 320static const struct Qdisc_class_ops red_class_ops = {
f38c39d6
PM
321 .graft = red_graft,
322 .leaf = red_leaf,
323 .get = red_get,
324 .put = red_put,
f38c39d6 325 .walk = red_walk,
f38c39d6
PM
326 .dump = red_dump_class,
327};
328
20fea08b 329static struct Qdisc_ops red_qdisc_ops __read_mostly = {
1da177e4
LT
330 .id = "red",
331 .priv_size = sizeof(struct red_sched_data),
f38c39d6 332 .cl_ops = &red_class_ops,
1da177e4
LT
333 .enqueue = red_enqueue,
334 .dequeue = red_dequeue,
8e3af978 335 .peek = red_peek,
1da177e4
LT
336 .drop = red_drop,
337 .init = red_init,
338 .reset = red_reset,
f38c39d6 339 .destroy = red_destroy,
1da177e4
LT
340 .change = red_change,
341 .dump = red_dump,
342 .dump_stats = red_dump_stats,
343 .owner = THIS_MODULE,
344};
345
346static int __init red_module_init(void)
347{
348 return register_qdisc(&red_qdisc_ops);
349}
dba051f3
TG
350
351static void __exit red_module_exit(void)
1da177e4
LT
352{
353 unregister_qdisc(&red_qdisc_ops);
354}
dba051f3 355
1da177e4
LT
356module_init(red_module_init)
357module_exit(red_module_exit)
dba051f3 358
1da177e4 359MODULE_LICENSE("GPL");