2 * net/sched/sch_gred.c Generic Random Early Detection queue.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
10 * Authors: J Hadi Salim (hadi@cyberus.ca) 1998-2002
12 * 991129: - Bug fix with grio mode
13 * - a better sing. AvgQ mode with Grio(WRED)
14 * - A finer grained VQ dequeue based on sugestion
20 * For all the glorious comments look at Alexey's sch_red.c
23 #include <linux/config.h>
24 #include <linux/module.h>
25 #include <asm/uaccess.h>
26 #include <asm/system.h>
27 #include <linux/bitops.h>
28 #include <linux/types.h>
29 #include <linux/kernel.h>
30 #include <linux/sched.h>
31 #include <linux/string.h>
33 #include <linux/socket.h>
34 #include <linux/sockios.h>
36 #include <linux/errno.h>
37 #include <linux/interrupt.h>
38 #include <linux/if_ether.h>
39 #include <linux/inet.h>
40 #include <linux/netdevice.h>
41 #include <linux/etherdevice.h>
42 #include <linux/notifier.h>
44 #include <net/route.h>
45 #include <linux/skbuff.h>
47 #include <net/pkt_sched.h>
51 #define DPRINTK(format,args...) printk(KERN_DEBUG format,##args)
53 #define DPRINTK(format,args...)
57 #define D2PRINTK(format,args...) printk(KERN_DEBUG format,##args)
59 #define D2PRINTK(format,args...)
62 #define GRED_DEF_PRIO (MAX_DPs / 2)
64 struct gred_sched_data;
67 struct gred_sched_data
69 u32 limit; /* HARD maximal queue length */
70 u32 DP; /* the drop pramaters */
71 u32 bytesin; /* bytes seen on virtualQ so far*/
72 u32 packetsin; /* packets seen on virtualQ so far*/
73 u32 backlog; /* bytes on the virtualQ */
74 u8 prio; /* the prio of this vq */
76 struct red_parms parms;
77 struct red_stats stats;
87 struct gred_sched_data *tab[MAX_DPs];
94 static inline int gred_wred_mode(struct gred_sched *table)
96 return test_bit(GRED_WRED_MODE, &table->flags);
99 static inline void gred_enable_wred_mode(struct gred_sched *table)
101 __set_bit(GRED_WRED_MODE, &table->flags);
104 static inline void gred_disable_wred_mode(struct gred_sched *table)
106 __clear_bit(GRED_WRED_MODE, &table->flags);
109 static inline int gred_rio_mode(struct gred_sched *table)
111 return test_bit(GRED_RIO_MODE, &table->flags);
114 static inline void gred_enable_rio_mode(struct gred_sched *table)
116 __set_bit(GRED_RIO_MODE, &table->flags);
119 static inline void gred_disable_rio_mode(struct gred_sched *table)
121 __clear_bit(GRED_RIO_MODE, &table->flags);
124 static inline int gred_wred_mode_check(struct Qdisc *sch)
126 struct gred_sched *table = qdisc_priv(sch);
129 /* Really ugly O(n^2) but shouldn't be necessary too frequent. */
130 for (i = 0; i < table->DPs; i++) {
131 struct gred_sched_data *q = table->tab[i];
137 for (n = 0; n < table->DPs; n++)
138 if (table->tab[n] && table->tab[n] != q &&
139 table->tab[n]->prio == q->prio)
146 static inline unsigned int gred_backlog(struct gred_sched *table,
147 struct gred_sched_data *q,
150 if (gred_wred_mode(table))
151 return sch->qstats.backlog;
157 gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
159 struct gred_sched_data *q=NULL;
160 struct gred_sched *t= qdisc_priv(sch);
161 unsigned long qavg = 0;
164 if (!t->initd && skb_queue_len(&sch->q) < (sch->dev->tx_queue_len ? : 1)) {
165 D2PRINTK("NO GRED Queues setup yet! Enqueued anyway\n");
170 if ( ((skb->tc_index&0xf) > (t->DPs -1)) || !(q=t->tab[skb->tc_index&0xf])) {
171 printk("GRED: setting to default (%d)\n ",t->def);
172 if (!(q=t->tab[t->def])) {
173 DPRINTK("GRED: setting to default FAILED! dropping!! "
177 /* fix tc_index? --could be controvesial but needed for
179 skb->tc_index=(skb->tc_index&0xfffffff0) | t->def;
182 D2PRINTK("gred_enqueue virtualQ 0x%x classid %x backlog %d "
183 "general backlog %d\n",skb->tc_index&0xf,sch->handle,q->backlog,
184 sch->qstats.backlog);
185 /* sum up all the qaves of prios <= to ours to get the new qave*/
186 if (!gred_wred_mode(t) && gred_rio_mode(t)) {
187 for (i=0;i<t->DPs;i++) {
188 if ((!t->tab[i]) || (i==q->DP))
191 if (t->tab[i]->prio < q->prio &&
192 !red_is_idling(&t->tab[i]->parms))
193 qavg +=t->tab[i]->parms.qavg;
199 q->bytesin+=skb->len;
201 if (gred_wred_mode(t)) {
203 q->parms.qavg = t->tab[t->def]->parms.qavg;
204 q->parms.qidlestart = t->tab[t->def]->parms.qidlestart;
207 q->parms.qavg = red_calc_qavg(&q->parms, gred_backlog(t, q, sch));
209 if (red_is_idling(&q->parms))
210 red_end_of_idle_period(&q->parms);
212 if (gred_wred_mode(t))
213 t->tab[t->def]->parms.qavg = q->parms.qavg;
215 switch (red_action(&q->parms, q->parms.qavg + qavg)) {
220 sch->qstats.overlimits++;
221 q->stats.prob_drop++;
222 goto congestion_drop;
225 sch->qstats.overlimits++;
226 q->stats.forced_drop++;
227 goto congestion_drop;
230 if (q->backlog + skb->len <= q->limit) {
231 q->backlog += skb->len;
233 __skb_queue_tail(&sch->q, skb);
234 sch->qstats.backlog += skb->len;
235 sch->bstats.bytes += skb->len;
236 sch->bstats.packets++;
244 return NET_XMIT_DROP;
253 gred_requeue(struct sk_buff *skb, struct Qdisc* sch)
255 struct gred_sched_data *q;
256 struct gred_sched *t= qdisc_priv(sch);
257 q= t->tab[(skb->tc_index&0xf)];
258 /* error checking here -- probably unnecessary */
260 if (red_is_idling(&q->parms))
261 red_end_of_idle_period(&q->parms);
263 __skb_queue_head(&sch->q, skb);
264 sch->qstats.backlog += skb->len;
265 sch->qstats.requeues++;
266 q->backlog += skb->len;
270 static struct sk_buff *
271 gred_dequeue(struct Qdisc* sch)
274 struct gred_sched_data *q;
275 struct gred_sched *t= qdisc_priv(sch);
277 skb = __skb_dequeue(&sch->q);
279 sch->qstats.backlog -= skb->len;
280 q= t->tab[(skb->tc_index&0xf)];
282 q->backlog -= skb->len;
283 if (!q->backlog && !gred_wred_mode(t))
284 red_start_of_idle_period(&q->parms);
286 D2PRINTK("gred_dequeue: skb has bad tcindex %x\n",skb->tc_index&0xf);
291 if (gred_wred_mode(t)) {
294 D2PRINTK("no default VQ set: Results will be "
297 red_start_of_idle_period(&q->parms);
303 static unsigned int gred_drop(struct Qdisc* sch)
307 struct gred_sched_data *q;
308 struct gred_sched *t= qdisc_priv(sch);
310 skb = __skb_dequeue_tail(&sch->q);
312 unsigned int len = skb->len;
313 sch->qstats.backlog -= len;
315 q= t->tab[(skb->tc_index&0xf)];
319 if (!q->backlog && !gred_wred_mode(t))
320 red_start_of_idle_period(&q->parms);
322 D2PRINTK("gred_dequeue: skb has bad tcindex %x\n",skb->tc_index&0xf);
331 D2PRINTK("no default VQ set: Results might be screwed up\n");
335 red_start_of_idle_period(&q->parms);
340 static void gred_reset(struct Qdisc* sch)
343 struct gred_sched_data *q;
344 struct gred_sched *t= qdisc_priv(sch);
346 __skb_queue_purge(&sch->q);
348 sch->qstats.backlog = 0;
350 for (i=0;i<t->DPs;i++) {
354 red_restart(&q->parms);
359 static inline void gred_destroy_vq(struct gred_sched_data *q)
364 static inline int gred_change_table_def(struct Qdisc *sch, struct rtattr *dps)
366 struct gred_sched *table = qdisc_priv(sch);
367 struct tc_gred_sopt *sopt;
370 if (dps == NULL || RTA_PAYLOAD(dps) < sizeof(*sopt))
373 sopt = RTA_DATA(dps);
375 if (sopt->DPs > MAX_DPs || sopt->DPs == 0 || sopt->def_DP >= sopt->DPs)
379 table->DPs = sopt->DPs;
380 table->def = sopt->def_DP;
383 * Every entry point to GRED is synchronized with the above code
384 * and the DP is checked against DPs, i.e. shadowed VQs can no
385 * longer be found so we can unlock right here.
387 sch_tree_unlock(sch);
390 gred_enable_rio_mode(table);
391 gred_disable_wred_mode(table);
392 if (gred_wred_mode_check(sch))
393 gred_enable_wred_mode(table);
395 gred_disable_rio_mode(table);
396 gred_disable_wred_mode(table);
399 for (i = table->DPs; i < MAX_DPs; i++) {
401 printk(KERN_WARNING "GRED: Warning: Destroying "
402 "shadowed VQ 0x%x\n", i);
403 gred_destroy_vq(table->tab[i]);
404 table->tab[i] = NULL;
413 static inline int gred_change_vq(struct Qdisc *sch, int dp,
414 struct tc_gred_qopt *ctl, int prio, u8 *stab)
416 struct gred_sched *table = qdisc_priv(sch);
417 struct gred_sched_data *q;
419 if (table->tab[dp] == NULL) {
420 table->tab[dp] = kmalloc(sizeof(*q), GFP_KERNEL);
421 if (table->tab[dp] == NULL)
423 memset(table->tab[dp], 0, sizeof(*q));
429 q->limit = ctl->limit;
432 red_end_of_idle_period(&q->parms);
434 red_set_parms(&q->parms,
435 ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog,
436 ctl->Scell_log, stab);
441 static int gred_change(struct Qdisc *sch, struct rtattr *opt)
443 struct gred_sched *table = qdisc_priv(sch);
444 struct tc_gred_qopt *ctl;
445 struct rtattr *tb[TCA_GRED_MAX];
446 int err = -EINVAL, prio = GRED_DEF_PRIO;
449 if (opt == NULL || rtattr_parse_nested(tb, TCA_GRED_MAX, opt))
452 if (tb[TCA_GRED_PARMS-1] == NULL && tb[TCA_GRED_STAB-1] == NULL)
453 return gred_change_table_def(sch, opt);
455 if (tb[TCA_GRED_PARMS-1] == NULL ||
456 RTA_PAYLOAD(tb[TCA_GRED_PARMS-1]) < sizeof(*ctl) ||
457 tb[TCA_GRED_STAB-1] == NULL ||
458 RTA_PAYLOAD(tb[TCA_GRED_STAB-1]) < 256)
461 ctl = RTA_DATA(tb[TCA_GRED_PARMS-1]);
462 stab = RTA_DATA(tb[TCA_GRED_STAB-1]);
464 if (ctl->DP >= table->DPs)
467 if (gred_rio_mode(table)) {
468 if (ctl->prio == 0) {
469 int def_prio = GRED_DEF_PRIO;
471 if (table->tab[table->def])
472 def_prio = table->tab[table->def]->prio;
474 printk(KERN_DEBUG "GRED: DP %u does not have a prio "
475 "setting default to %d\n", ctl->DP, def_prio);
484 err = gred_change_vq(sch, ctl->DP, ctl, prio, stab);
488 if (table->tab[table->def] == NULL) {
489 if (gred_rio_mode(table))
490 prio = table->tab[ctl->DP]->prio;
492 err = gred_change_vq(sch, table->def, ctl, prio, stab);
499 if (gred_rio_mode(table)) {
500 gred_disable_wred_mode(table);
501 if (gred_wred_mode_check(sch))
502 gred_enable_wred_mode(table);
508 sch_tree_unlock(sch);
513 static int gred_init(struct Qdisc *sch, struct rtattr *opt)
515 struct rtattr *tb[TCA_GRED_MAX];
517 if (opt == NULL || rtattr_parse_nested(tb, TCA_GRED_MAX, opt))
520 if (tb[TCA_GRED_PARMS-1] || tb[TCA_GRED_STAB-1])
523 return gred_change_table_def(sch, tb[TCA_GRED_DPS-1]);
526 static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
528 struct gred_sched *table = qdisc_priv(sch);
529 struct rtattr *parms, *opts = NULL;
531 struct tc_gred_sopt sopt = {
533 .def_DP = table->def,
534 .grio = gred_rio_mode(table),
537 opts = RTA_NEST(skb, TCA_OPTIONS);
538 RTA_PUT(skb, TCA_GRED_DPS, sizeof(sopt), &sopt);
539 parms = RTA_NEST(skb, TCA_GRED_PARMS);
541 for (i = 0; i < MAX_DPs; i++) {
542 struct gred_sched_data *q = table->tab[i];
543 struct tc_gred_qopt opt;
545 memset(&opt, 0, sizeof(opt));
548 /* hack -- fix at some point with proper message
549 This is how we indicate to tc that there is no VQ
552 opt.DP = MAX_DPs + i;
556 opt.limit = q->limit;
558 opt.backlog = q->backlog;
560 opt.qth_min = q->parms.qth_min >> q->parms.Wlog;
561 opt.qth_max = q->parms.qth_max >> q->parms.Wlog;
562 opt.Wlog = q->parms.Wlog;
563 opt.Plog = q->parms.Plog;
564 opt.Scell_log = q->parms.Scell_log;
565 opt.other = q->stats.other;
566 opt.early = q->stats.prob_drop;
567 opt.forced = q->stats.forced_drop;
568 opt.pdrop = q->stats.pdrop;
569 opt.packets = q->packetsin;
570 opt.bytesin = q->bytesin;
572 if (gred_wred_mode(table)) {
573 q->parms.qidlestart =
574 table->tab[table->def]->parms.qidlestart;
575 q->parms.qavg = table->tab[table->def]->parms.qavg;
578 opt.qave = red_calc_qavg(&q->parms, q->parms.qavg);
581 RTA_APPEND(skb, sizeof(opt), &opt);
584 RTA_NEST_END(skb, parms);
586 return RTA_NEST_END(skb, opts);
589 return RTA_NEST_CANCEL(skb, opts);
592 static void gred_destroy(struct Qdisc *sch)
594 struct gred_sched *table = qdisc_priv(sch);
597 for (i = 0;i < table->DPs; i++) {
599 gred_destroy_vq(table->tab[i]);
603 static struct Qdisc_ops gred_qdisc_ops = {
607 .priv_size = sizeof(struct gred_sched),
608 .enqueue = gred_enqueue,
609 .dequeue = gred_dequeue,
610 .requeue = gred_requeue,
614 .destroy = gred_destroy,
615 .change = gred_change,
617 .owner = THIS_MODULE,
620 static int __init gred_module_init(void)
622 return register_qdisc(&gred_qdisc_ops);
624 static void __exit gred_module_exit(void)
626 unregister_qdisc(&gred_qdisc_ops);
628 module_init(gred_module_init)
629 module_exit(gred_module_exit)
630 MODULE_LICENSE("GPL");