2 * net/sched/sch_red.c Random Early Detection queue.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
12 * J Hadi Salim 980914: computation fixes
13 * Alexey Makarenko <makar@phoenix.kharkov.ua> 990814: qave on idle link was calculated incorrectly.
14 * J Hadi Salim 980816: ECN support
17 #include <linux/module.h>
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/skbuff.h>
21 #include <net/pkt_sched.h>
22 #include <net/inet_ecn.h>
26 /* Parameters, settable by user:
27 -----------------------------
29 limit - bytes (must be > qth_max + burst)
31 Hard limit on queue length, should be chosen >qth_max
32 to allow packet bursts. This parameter does not
33 affect the algorithms behaviour and can be chosen
34 arbitrarily high (well, less than ram size)
35 Really, this limit will never be reached
36 if RED works correctly.
41 u32 limit; /* HARD maximal queue length */
43 struct red_parms parms;
44 struct red_stats stats;
48 static inline int red_use_ecn(struct red_sched_data *q)
50 return q->flags & TC_RED_ECN;
53 static inline int red_use_harddrop(struct red_sched_data *q)
55 return q->flags & TC_RED_HARDDROP;
58 static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
60 struct red_sched_data *q = qdisc_priv(sch);
61 struct Qdisc *child = q->qdisc;
64 q->parms.qavg = red_calc_qavg(&q->parms, child->qstats.backlog);
66 if (red_is_idling(&q->parms))
67 red_end_of_idle_period(&q->parms);
69 switch (red_action(&q->parms, q->parms.qavg)) {
74 sch->qstats.overlimits++;
75 if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) {
84 sch->qstats.overlimits++;
85 if (red_use_harddrop(q) || !red_use_ecn(q) ||
86 !INET_ECN_set_ce(skb)) {
87 q->stats.forced_drop++;
91 q->stats.forced_mark++;
95 ret = child->enqueue(skb, child);
96 if (likely(ret == NET_XMIT_SUCCESS)) {
97 sch->bstats.bytes += skb->len;
98 sch->bstats.packets++;
107 qdisc_drop(skb, sch);
111 static int red_requeue(struct sk_buff *skb, struct Qdisc* sch)
113 struct red_sched_data *q = qdisc_priv(sch);
114 struct Qdisc *child = q->qdisc;
117 if (red_is_idling(&q->parms))
118 red_end_of_idle_period(&q->parms);
120 ret = child->ops->requeue(skb, child);
121 if (likely(ret == NET_XMIT_SUCCESS)) {
122 sch->qstats.requeues++;
128 static struct sk_buff * red_dequeue(struct Qdisc* sch)
131 struct red_sched_data *q = qdisc_priv(sch);
132 struct Qdisc *child = q->qdisc;
134 skb = child->dequeue(child);
137 else if (!red_is_idling(&q->parms))
138 red_start_of_idle_period(&q->parms);
143 static unsigned int red_drop(struct Qdisc* sch)
145 struct red_sched_data *q = qdisc_priv(sch);
146 struct Qdisc *child = q->qdisc;
149 if (child->ops->drop && (len = child->ops->drop(child)) > 0) {
156 if (!red_is_idling(&q->parms))
157 red_start_of_idle_period(&q->parms);
162 static void red_reset(struct Qdisc* sch)
164 struct red_sched_data *q = qdisc_priv(sch);
166 qdisc_reset(q->qdisc);
168 red_restart(&q->parms);
171 static void red_destroy(struct Qdisc *sch)
173 struct red_sched_data *q = qdisc_priv(sch);
174 qdisc_destroy(q->qdisc);
177 static struct Qdisc *red_create_dflt(struct Qdisc *sch, u32 limit)
183 q = qdisc_create_dflt(sch->dev, &bfifo_qdisc_ops,
184 TC_H_MAKE(sch->handle, 1));
186 nla = kmalloc(nla_attr_size(sizeof(struct tc_fifo_qopt)),
189 nla->nla_type = RTM_NEWQDISC;
190 nla->nla_len = nla_attr_size(sizeof(struct tc_fifo_qopt));
191 ((struct tc_fifo_qopt *)nla_data(nla))->limit = limit;
193 ret = q->ops->change(q, nla);
204 static int red_change(struct Qdisc *sch, struct nlattr *opt)
206 struct red_sched_data *q = qdisc_priv(sch);
207 struct nlattr *tb[TCA_RED_MAX + 1];
208 struct tc_red_qopt *ctl;
209 struct Qdisc *child = NULL;
215 err = nla_parse_nested(tb, TCA_RED_MAX, opt, NULL);
219 if (tb[TCA_RED_PARMS] == NULL ||
220 nla_len(tb[TCA_RED_PARMS]) < sizeof(*ctl) ||
221 tb[TCA_RED_STAB] == NULL ||
222 nla_len(tb[TCA_RED_STAB]) < RED_STAB_SIZE)
225 ctl = nla_data(tb[TCA_RED_PARMS]);
227 if (ctl->limit > 0) {
228 child = red_create_dflt(sch, ctl->limit);
234 q->flags = ctl->flags;
235 q->limit = ctl->limit;
237 qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
238 qdisc_destroy(xchg(&q->qdisc, child));
241 red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
242 ctl->Plog, ctl->Scell_log,
243 nla_data(tb[TCA_RED_STAB]));
245 if (skb_queue_empty(&sch->q))
246 red_end_of_idle_period(&q->parms);
248 sch_tree_unlock(sch);
252 static int red_init(struct Qdisc* sch, struct nlattr *opt)
254 struct red_sched_data *q = qdisc_priv(sch);
256 q->qdisc = &noop_qdisc;
257 return red_change(sch, opt);
260 static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
262 struct red_sched_data *q = qdisc_priv(sch);
263 struct nlattr *opts = NULL;
264 struct tc_red_qopt opt = {
267 .qth_min = q->parms.qth_min >> q->parms.Wlog,
268 .qth_max = q->parms.qth_max >> q->parms.Wlog,
269 .Wlog = q->parms.Wlog,
270 .Plog = q->parms.Plog,
271 .Scell_log = q->parms.Scell_log,
274 opts = nla_nest_start(skb, TCA_OPTIONS);
276 goto nla_put_failure;
277 NLA_PUT(skb, TCA_RED_PARMS, sizeof(opt), &opt);
278 return nla_nest_end(skb, opts);
281 return nla_nest_cancel(skb, opts);
284 static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
286 struct red_sched_data *q = qdisc_priv(sch);
287 struct tc_red_xstats st = {
288 .early = q->stats.prob_drop + q->stats.forced_drop,
289 .pdrop = q->stats.pdrop,
290 .other = q->stats.other,
291 .marked = q->stats.prob_mark + q->stats.forced_mark,
294 return gnet_stats_copy_app(d, &st, sizeof(st));
297 static int red_dump_class(struct Qdisc *sch, unsigned long cl,
298 struct sk_buff *skb, struct tcmsg *tcm)
300 struct red_sched_data *q = qdisc_priv(sch);
304 tcm->tcm_handle |= TC_H_MIN(1);
305 tcm->tcm_info = q->qdisc->handle;
309 static int red_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
312 struct red_sched_data *q = qdisc_priv(sch);
318 *old = xchg(&q->qdisc, new);
319 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
321 sch_tree_unlock(sch);
325 static struct Qdisc *red_leaf(struct Qdisc *sch, unsigned long arg)
327 struct red_sched_data *q = qdisc_priv(sch);
331 static unsigned long red_get(struct Qdisc *sch, u32 classid)
336 static void red_put(struct Qdisc *sch, unsigned long arg)
341 static int red_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
342 struct nlattr **tca, unsigned long *arg)
347 static int red_delete(struct Qdisc *sch, unsigned long cl)
352 static void red_walk(struct Qdisc *sch, struct qdisc_walker *walker)
355 if (walker->count >= walker->skip)
356 if (walker->fn(sch, 1, walker) < 0) {
364 static struct tcf_proto **red_find_tcf(struct Qdisc *sch, unsigned long cl)
369 static const struct Qdisc_class_ops red_class_ops = {
374 .change = red_change_class,
375 .delete = red_delete,
377 .tcf_chain = red_find_tcf,
378 .dump = red_dump_class,
381 static struct Qdisc_ops red_qdisc_ops __read_mostly = {
383 .priv_size = sizeof(struct red_sched_data),
384 .cl_ops = &red_class_ops,
385 .enqueue = red_enqueue,
386 .dequeue = red_dequeue,
387 .requeue = red_requeue,
391 .destroy = red_destroy,
392 .change = red_change,
394 .dump_stats = red_dump_stats,
395 .owner = THIS_MODULE,
398 static int __init red_module_init(void)
400 return register_qdisc(&red_qdisc_ops);
403 static void __exit red_module_exit(void)
405 unregister_qdisc(&red_qdisc_ops);
408 module_init(red_module_init)
409 module_exit(red_module_exit)
411 MODULE_LICENSE("GPL");