2 * net/sched/sch_tbf.c Token Bucket Filter queue.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * Dmitry Torokhov <dtor@mail.ru> - allow attaching inner qdiscs -
11 * original idea by Martin Devera
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/kernel.h>
18 #include <linux/string.h>
19 #include <linux/errno.h>
20 #include <linux/skbuff.h>
21 #include <net/netlink.h>
22 #include <net/pkt_sched.h>
25 /* Simple Token Bucket Filter.
26 =======================================
36 A data flow obeys TBF with rate R and depth B, if for any
37 time interval t_i...t_f the number of transmitted bits
38 does not exceed B + R*(t_f-t_i).
40 Packetized version of this definition:
41 The sequence of packets of sizes s_i served at moments t_i
42 obeys TBF, if for any i<=k:
44 s_i+....+s_k <= B + R*(t_k - t_i)
49 Let N(t_i) be B/R initially and N(t) grow continuously with time as:
51 N(t+delta) = min{B/R, N(t) + delta}
53 If the first packet in queue has length S, it may be
54 transmitted only at the time t_* when S/R <= N(t_*),
55 and in this case N(t) jumps:
57 N(t_* + 0) = N(t_* - 0) - S/R.
61 Actually, QoS requires two TBF to be applied to a data stream.
62 One of them controls steady state burst size, another
63 one with rate P (peak rate) and depth M (equal to link MTU)
64 limits bursts at a smaller time scale.
66 It is easy to see that P>R, and B>M. If P is infinity, this double
67 TBF is equivalent to a single one.
69 When TBF works in reshaping mode, latency is estimated as:
71 lat = max ((L-B)/R, (L-M)/P)
77 If TBF throttles, it starts a watchdog timer, which will wake it up
78 when it is ready to transmit.
79 Note that the minimal timer resolution is 1/HZ.
80 If no new packets arrive during this period,
81 or if the device is not awaken by EOI for some previous packet,
82 TBF can stop its activity for 1/HZ.
85 This means, that with depth B, the maximal rate is
89 F.e. for 10Mbit ethernet and HZ=100 the minimal allowed B is ~10Kbytes.
91 Note that the peak rate TBF is much more tough: with MTU 1500
92 P_crit = 150Kbytes/sec. So, if you need greater peak
93 rates, use alpha with HZ=1000 :-)
95 With classful TBF, limit is just kept for backwards compatibility.
96 It is passed to the default bfifo qdisc - if the inner qdisc is
97 changed the limit is not effective anymore.
100 struct tbf_sched_data
103 u32 limit; /* Maximal length of backlog: bytes */
104 u32 buffer; /* Token bucket depth/rate: MUST BE >= MTU/B */
107 struct qdisc_rate_table *R_tab;
108 struct qdisc_rate_table *P_tab;
111 long tokens; /* Current number of B tokens */
112 long ptokens; /* Current number of P tokens */
113 psched_time_t t_c; /* Time check-point */
114 struct Qdisc *qdisc; /* Inner qdisc, default - bfifo queue */
115 struct qdisc_watchdog watchdog; /* Watchdog timer */
118 #define L2T(q,L) qdisc_l2t((q)->R_tab,L)
119 #define L2T_P(q,L) qdisc_l2t((q)->P_tab,L)
121 static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
123 struct tbf_sched_data *q = qdisc_priv(sch);
126 if (qdisc_pkt_len(skb) > q->max_size)
127 return qdisc_reshape_fail(skb, sch);
129 ret = qdisc_enqueue(skb, q->qdisc);
131 if (net_xmit_drop_count(ret))
137 sch->bstats.bytes += qdisc_pkt_len(skb);
138 sch->bstats.packets++;
142 static int tbf_requeue(struct sk_buff *skb, struct Qdisc* sch)
144 struct tbf_sched_data *q = qdisc_priv(sch);
147 if ((ret = q->qdisc->ops->requeue(skb, q->qdisc)) == 0) {
149 sch->qstats.requeues++;
155 static unsigned int tbf_drop(struct Qdisc* sch)
157 struct tbf_sched_data *q = qdisc_priv(sch);
158 unsigned int len = 0;
160 if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) {
167 static struct sk_buff *tbf_dequeue(struct Qdisc* sch)
169 struct tbf_sched_data *q = qdisc_priv(sch);
172 skb = q->qdisc->ops->peek(q->qdisc);
178 unsigned int len = qdisc_pkt_len(skb);
180 now = psched_get_time();
181 toks = psched_tdiff_bounded(now, q->t_c, q->buffer);
184 ptoks = toks + q->ptokens;
185 if (ptoks > (long)q->mtu)
187 ptoks -= L2T_P(q, len);
190 if (toks > (long)q->buffer)
194 if ((toks|ptoks) >= 0) {
195 skb = qdisc_dequeue_peeked(q->qdisc);
203 sch->flags &= ~TCQ_F_THROTTLED;
207 qdisc_watchdog_schedule(&q->watchdog,
208 now + max_t(long, -toks, -ptoks));
210 /* Maybe we have a shorter packet in the queue,
211 which can be sent now. It sounds cool,
212 but, however, this is wrong in principle.
213 We MUST NOT reorder packets under these circumstances.
215 Really, if we split the flow into independent
216 subflows, it would be a very good solution.
217 This is the main idea of all FQ algorithms
218 (cf. CSZ, HPFQ, HFSC)
221 sch->qstats.overlimits++;
226 static void tbf_reset(struct Qdisc* sch)
228 struct tbf_sched_data *q = qdisc_priv(sch);
230 qdisc_reset(q->qdisc);
232 q->t_c = psched_get_time();
233 q->tokens = q->buffer;
235 qdisc_watchdog_cancel(&q->watchdog);
238 static const struct nla_policy tbf_policy[TCA_TBF_MAX + 1] = {
239 [TCA_TBF_PARMS] = { .len = sizeof(struct tc_tbf_qopt) },
240 [TCA_TBF_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
241 [TCA_TBF_PTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
244 static int tbf_change(struct Qdisc* sch, struct nlattr *opt)
247 struct tbf_sched_data *q = qdisc_priv(sch);
248 struct nlattr *tb[TCA_TBF_PTAB + 1];
249 struct tc_tbf_qopt *qopt;
250 struct qdisc_rate_table *rtab = NULL;
251 struct qdisc_rate_table *ptab = NULL;
252 struct Qdisc *child = NULL;
255 err = nla_parse_nested(tb, TCA_TBF_PTAB, opt, tbf_policy);
260 if (tb[TCA_TBF_PARMS] == NULL)
263 qopt = nla_data(tb[TCA_TBF_PARMS]);
264 rtab = qdisc_get_rtab(&qopt->rate, tb[TCA_TBF_RTAB]);
268 if (qopt->peakrate.rate) {
269 if (qopt->peakrate.rate > qopt->rate.rate)
270 ptab = qdisc_get_rtab(&qopt->peakrate, tb[TCA_TBF_PTAB]);
275 for (n = 0; n < 256; n++)
276 if (rtab->data[n] > qopt->buffer) break;
277 max_size = (n << qopt->rate.cell_log)-1;
281 for (n = 0; n < 256; n++)
282 if (ptab->data[n] > qopt->mtu) break;
283 size = (n << qopt->peakrate.cell_log)-1;
284 if (size < max_size) max_size = size;
289 if (qopt->limit > 0) {
290 child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit);
292 err = PTR_ERR(child);
299 qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
300 qdisc_destroy(xchg(&q->qdisc, child));
302 q->limit = qopt->limit;
304 q->max_size = max_size;
305 q->buffer = qopt->buffer;
306 q->tokens = q->buffer;
308 rtab = xchg(&q->R_tab, rtab);
309 ptab = xchg(&q->P_tab, ptab);
310 sch_tree_unlock(sch);
314 qdisc_put_rtab(rtab);
316 qdisc_put_rtab(ptab);
320 static int tbf_init(struct Qdisc* sch, struct nlattr *opt)
322 struct tbf_sched_data *q = qdisc_priv(sch);
327 q->t_c = psched_get_time();
328 qdisc_watchdog_init(&q->watchdog, sch);
329 q->qdisc = &noop_qdisc;
331 return tbf_change(sch, opt);
334 static void tbf_destroy(struct Qdisc *sch)
336 struct tbf_sched_data *q = qdisc_priv(sch);
338 qdisc_watchdog_cancel(&q->watchdog);
341 qdisc_put_rtab(q->P_tab);
343 qdisc_put_rtab(q->R_tab);
345 qdisc_destroy(q->qdisc);
348 static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
350 struct tbf_sched_data *q = qdisc_priv(sch);
352 struct tc_tbf_qopt opt;
354 nest = nla_nest_start(skb, TCA_OPTIONS);
356 goto nla_put_failure;
358 opt.limit = q->limit;
359 opt.rate = q->R_tab->rate;
361 opt.peakrate = q->P_tab->rate;
363 memset(&opt.peakrate, 0, sizeof(opt.peakrate));
365 opt.buffer = q->buffer;
366 NLA_PUT(skb, TCA_TBF_PARMS, sizeof(opt), &opt);
368 nla_nest_end(skb, nest);
372 nla_nest_cancel(skb, nest);
376 static int tbf_dump_class(struct Qdisc *sch, unsigned long cl,
377 struct sk_buff *skb, struct tcmsg *tcm)
379 struct tbf_sched_data *q = qdisc_priv(sch);
381 if (cl != 1) /* only one class */
384 tcm->tcm_handle |= TC_H_MIN(1);
385 tcm->tcm_info = q->qdisc->handle;
390 static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
393 struct tbf_sched_data *q = qdisc_priv(sch);
399 *old = xchg(&q->qdisc, new);
400 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
402 sch_tree_unlock(sch);
407 static struct Qdisc *tbf_leaf(struct Qdisc *sch, unsigned long arg)
409 struct tbf_sched_data *q = qdisc_priv(sch);
413 static unsigned long tbf_get(struct Qdisc *sch, u32 classid)
418 static void tbf_put(struct Qdisc *sch, unsigned long arg)
422 static int tbf_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
423 struct nlattr **tca, unsigned long *arg)
428 static int tbf_delete(struct Qdisc *sch, unsigned long arg)
433 static void tbf_walk(struct Qdisc *sch, struct qdisc_walker *walker)
436 if (walker->count >= walker->skip)
437 if (walker->fn(sch, 1, walker) < 0) {
445 static struct tcf_proto **tbf_find_tcf(struct Qdisc *sch, unsigned long cl)
450 static const struct Qdisc_class_ops tbf_class_ops =
456 .change = tbf_change_class,
457 .delete = tbf_delete,
459 .tcf_chain = tbf_find_tcf,
460 .dump = tbf_dump_class,
463 static struct Qdisc_ops tbf_qdisc_ops __read_mostly = {
465 .cl_ops = &tbf_class_ops,
467 .priv_size = sizeof(struct tbf_sched_data),
468 .enqueue = tbf_enqueue,
469 .dequeue = tbf_dequeue,
470 .peek = qdisc_peek_dequeued,
471 .requeue = tbf_requeue,
475 .destroy = tbf_destroy,
476 .change = tbf_change,
478 .owner = THIS_MODULE,
481 static int __init tbf_module_init(void)
483 return register_qdisc(&tbf_qdisc_ops);
486 static void __exit tbf_module_exit(void)
488 unregister_qdisc(&tbf_qdisc_ops);
490 module_init(tbf_module_init)
491 module_exit(tbf_module_exit)
492 MODULE_LICENSE("GPL");