2 * net/sched/police.c Input police filter.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * J Hadi Salim (action changes)
13 #include <asm/uaccess.h>
14 #include <asm/system.h>
15 #include <linux/bitops.h>
16 #include <linux/module.h>
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/string.h>
21 #include <linux/socket.h>
22 #include <linux/sockios.h>
24 #include <linux/errno.h>
25 #include <linux/interrupt.h>
26 #include <linux/netdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/module.h>
29 #include <linux/rtnetlink.h>
30 #include <linux/init.h>
32 #include <net/act_api.h>
33 #include <net/netlink.h>
35 #define L2T(p,L) ((p)->tcfp_R_tab->data[(L)>>(p)->tcfp_R_tab->rate.cell_log])
36 #define L2T_P(p,L) ((p)->tcfp_P_tab->data[(L)>>(p)->tcfp_P_tab->rate.cell_log])
38 #define POL_TAB_MASK 15
39 static struct tcf_common *tcf_police_ht[POL_TAB_MASK + 1];
40 static u32 police_idx_gen;
41 static DEFINE_RWLOCK(police_lock);
43 static struct tcf_hashinfo police_hash_info = {
44 .htab = tcf_police_ht,
45 .hmask = POL_TAB_MASK,
49 /* old policer structure from before tc actions */
50 struct tc_police_compat
57 struct tc_ratespec rate;
58 struct tc_ratespec peakrate;
61 /* Each policer is serialized by its individual spinlock */
63 #ifdef CONFIG_NET_CLS_ACT
64 static int tcf_act_police_walker(struct sk_buff *skb, struct netlink_callback *cb,
65 int type, struct tc_action *a)
68 int err = 0, index = -1, i = 0, s_i = 0, n_i = 0;
71 read_lock(&police_lock);
75 for (i = 0; i < (POL_TAB_MASK + 1); i++) {
76 p = tcf_police_ht[tcf_hash(i, POL_TAB_MASK)];
78 for (; p; p = p->tcfc_next) {
84 r = (struct rtattr *)skb_tail_pointer(skb);
85 RTA_PUT(skb, a->order, 0, NULL);
86 if (type == RTM_DELACTION)
87 err = tcf_action_dump_1(skb, a, 0, 1);
89 err = tcf_action_dump_1(skb, a, 0, 0);
95 r->rta_len = skb_tail_pointer(skb) - (u8 *)r;
100 read_unlock(&police_lock);
111 void tcf_police_destroy(struct tcf_police *p)
113 unsigned int h = tcf_hash(p->tcf_index, POL_TAB_MASK);
114 struct tcf_common **p1p;
116 for (p1p = &tcf_police_ht[h]; *p1p; p1p = &(*p1p)->tcfc_next) {
117 if (*p1p == &p->common) {
118 write_lock_bh(&police_lock);
120 write_unlock_bh(&police_lock);
121 gen_kill_estimator(&p->tcf_bstats,
124 qdisc_put_rtab(p->tcfp_R_tab);
126 qdisc_put_rtab(p->tcfp_P_tab);
134 #ifdef CONFIG_NET_CLS_ACT
135 static int tcf_act_police_locate(struct rtattr *rta, struct rtattr *est,
136 struct tc_action *a, int ovr, int bind)
140 struct rtattr *tb[TCA_POLICE_MAX];
141 struct tc_police *parm;
142 struct tcf_police *police;
143 struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL;
146 if (rta == NULL || rtattr_parse_nested(tb, TCA_POLICE_MAX, rta) < 0)
149 if (tb[TCA_POLICE_TBF-1] == NULL)
151 size = RTA_PAYLOAD(tb[TCA_POLICE_TBF-1]);
152 if (size != sizeof(*parm) && size != sizeof(struct tc_police_compat))
154 parm = RTA_DATA(tb[TCA_POLICE_TBF-1]);
156 if (tb[TCA_POLICE_RESULT-1] != NULL &&
157 RTA_PAYLOAD(tb[TCA_POLICE_RESULT-1]) != sizeof(u32))
159 if (tb[TCA_POLICE_RESULT-1] != NULL &&
160 RTA_PAYLOAD(tb[TCA_POLICE_RESULT-1]) != sizeof(u32))
164 struct tcf_common *pc;
166 pc = tcf_hash_lookup(parm->index, &police_hash_info);
169 police = to_police(pc);
171 police->tcf_bindcnt += 1;
172 police->tcf_refcnt += 1;
180 police = kzalloc(sizeof(*police), GFP_KERNEL);
184 police->tcf_refcnt = 1;
185 spin_lock_init(&police->tcf_lock);
187 police->tcf_bindcnt = 1;
189 if (parm->rate.rate) {
191 R_tab = qdisc_get_rtab(&parm->rate, tb[TCA_POLICE_RATE-1]);
194 if (parm->peakrate.rate) {
195 P_tab = qdisc_get_rtab(&parm->peakrate,
196 tb[TCA_POLICE_PEAKRATE-1]);
198 qdisc_put_rtab(R_tab);
203 /* No failure allowed after this point */
204 spin_lock_bh(&police->tcf_lock);
206 qdisc_put_rtab(police->tcfp_R_tab);
207 police->tcfp_R_tab = R_tab;
210 qdisc_put_rtab(police->tcfp_P_tab);
211 police->tcfp_P_tab = P_tab;
214 if (tb[TCA_POLICE_RESULT-1])
215 police->tcfp_result = *(u32*)RTA_DATA(tb[TCA_POLICE_RESULT-1]);
216 police->tcfp_toks = police->tcfp_burst = parm->burst;
217 police->tcfp_mtu = parm->mtu;
218 if (police->tcfp_mtu == 0) {
219 police->tcfp_mtu = ~0;
220 if (police->tcfp_R_tab)
221 police->tcfp_mtu = 255<<police->tcfp_R_tab->rate.cell_log;
223 if (police->tcfp_P_tab)
224 police->tcfp_ptoks = L2T_P(police, police->tcfp_mtu);
225 police->tcf_action = parm->action;
227 if (tb[TCA_POLICE_AVRATE-1])
228 police->tcfp_ewma_rate =
229 *(u32*)RTA_DATA(tb[TCA_POLICE_AVRATE-1]);
231 gen_replace_estimator(&police->tcf_bstats,
232 &police->tcf_rate_est,
233 &police->tcf_lock, est);
235 spin_unlock_bh(&police->tcf_lock);
236 if (ret != ACT_P_CREATED)
239 police->tcfp_t_c = psched_get_time();
240 police->tcf_index = parm->index ? parm->index :
241 tcf_hash_new_index(&police_idx_gen, &police_hash_info);
242 h = tcf_hash(police->tcf_index, POL_TAB_MASK);
243 write_lock_bh(&police_lock);
244 police->tcf_next = tcf_police_ht[h];
245 tcf_police_ht[h] = &police->common;
246 write_unlock_bh(&police_lock);
252 if (ret == ACT_P_CREATED)
257 static int tcf_act_police_cleanup(struct tc_action *a, int bind)
259 struct tcf_police *p = a->priv;
262 return tcf_police_release(p, bind);
266 static int tcf_act_police(struct sk_buff *skb, struct tc_action *a,
267 struct tcf_result *res)
269 struct tcf_police *police = a->priv;
274 spin_lock(&police->tcf_lock);
276 police->tcf_bstats.bytes += skb->len;
277 police->tcf_bstats.packets++;
279 if (police->tcfp_ewma_rate &&
280 police->tcf_rate_est.bps >= police->tcfp_ewma_rate) {
281 police->tcf_qstats.overlimits++;
282 spin_unlock(&police->tcf_lock);
283 return police->tcf_action;
286 if (skb->len <= police->tcfp_mtu) {
287 if (police->tcfp_R_tab == NULL) {
288 spin_unlock(&police->tcf_lock);
289 return police->tcfp_result;
292 now = psched_get_time();
293 toks = psched_tdiff_bounded(now, police->tcfp_t_c,
295 if (police->tcfp_P_tab) {
296 ptoks = toks + police->tcfp_ptoks;
297 if (ptoks > (long)L2T_P(police, police->tcfp_mtu))
298 ptoks = (long)L2T_P(police, police->tcfp_mtu);
299 ptoks -= L2T_P(police, skb->len);
301 toks += police->tcfp_toks;
302 if (toks > (long)police->tcfp_burst)
303 toks = police->tcfp_burst;
304 toks -= L2T(police, skb->len);
305 if ((toks|ptoks) >= 0) {
306 police->tcfp_t_c = now;
307 police->tcfp_toks = toks;
308 police->tcfp_ptoks = ptoks;
309 spin_unlock(&police->tcf_lock);
310 return police->tcfp_result;
314 police->tcf_qstats.overlimits++;
315 spin_unlock(&police->tcf_lock);
316 return police->tcf_action;
320 tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
322 unsigned char *b = skb_tail_pointer(skb);
323 struct tcf_police *police = a->priv;
324 struct tc_police opt;
326 opt.index = police->tcf_index;
327 opt.action = police->tcf_action;
328 opt.mtu = police->tcfp_mtu;
329 opt.burst = police->tcfp_burst;
330 opt.refcnt = police->tcf_refcnt - ref;
331 opt.bindcnt = police->tcf_bindcnt - bind;
332 if (police->tcfp_R_tab)
333 opt.rate = police->tcfp_R_tab->rate;
335 memset(&opt.rate, 0, sizeof(opt.rate));
336 if (police->tcfp_P_tab)
337 opt.peakrate = police->tcfp_P_tab->rate;
339 memset(&opt.peakrate, 0, sizeof(opt.peakrate));
340 RTA_PUT(skb, TCA_POLICE_TBF, sizeof(opt), &opt);
341 if (police->tcfp_result)
342 RTA_PUT(skb, TCA_POLICE_RESULT, sizeof(int),
343 &police->tcfp_result);
344 if (police->tcfp_ewma_rate)
345 RTA_PUT(skb, TCA_POLICE_AVRATE, 4, &police->tcfp_ewma_rate);
353 MODULE_AUTHOR("Alexey Kuznetsov");
354 MODULE_DESCRIPTION("Policing actions");
355 MODULE_LICENSE("GPL");
357 static struct tc_action_ops act_police_ops = {
359 .hinfo = &police_hash_info,
360 .type = TCA_ID_POLICE,
361 .capab = TCA_CAP_NONE,
362 .owner = THIS_MODULE,
363 .act = tcf_act_police,
364 .dump = tcf_act_police_dump,
365 .cleanup = tcf_act_police_cleanup,
366 .lookup = tcf_hash_search,
367 .init = tcf_act_police_locate,
368 .walk = tcf_act_police_walker
372 police_init_module(void)
374 return tcf_register_action(&act_police_ops);
378 police_cleanup_module(void)
380 tcf_unregister_action(&act_police_ops);
383 module_init(police_init_module);
384 module_exit(police_cleanup_module);
386 #else /* CONFIG_NET_CLS_ACT */
388 static struct tcf_common *tcf_police_lookup(u32 index)
390 struct tcf_hashinfo *hinfo = &police_hash_info;
391 struct tcf_common *p;
393 read_lock(hinfo->lock);
394 for (p = hinfo->htab[tcf_hash(index, hinfo->hmask)]; p;
396 if (p->tcfc_index == index)
399 read_unlock(hinfo->lock);
404 static u32 tcf_police_new_index(void)
406 u32 *idx_gen = &police_idx_gen;
412 } while (tcf_police_lookup(val));
414 return (*idx_gen = val);
417 struct tcf_police *tcf_police_locate(struct rtattr *rta, struct rtattr *est)
420 struct tcf_police *police;
421 struct rtattr *tb[TCA_POLICE_MAX];
422 struct tc_police *parm;
425 if (rtattr_parse_nested(tb, TCA_POLICE_MAX, rta) < 0)
428 if (tb[TCA_POLICE_TBF-1] == NULL)
430 size = RTA_PAYLOAD(tb[TCA_POLICE_TBF-1]);
431 if (size != sizeof(*parm) && size != sizeof(struct tc_police_compat))
434 parm = RTA_DATA(tb[TCA_POLICE_TBF-1]);
437 struct tcf_common *pc;
439 pc = tcf_police_lookup(parm->index);
441 police = to_police(pc);
442 police->tcf_refcnt++;
446 police = kzalloc(sizeof(*police), GFP_KERNEL);
447 if (unlikely(!police))
450 police->tcf_refcnt = 1;
451 spin_lock_init(&police->tcf_lock);
452 if (parm->rate.rate) {
454 qdisc_get_rtab(&parm->rate, tb[TCA_POLICE_RATE-1]);
455 if (police->tcfp_R_tab == NULL)
457 if (parm->peakrate.rate) {
459 qdisc_get_rtab(&parm->peakrate,
460 tb[TCA_POLICE_PEAKRATE-1]);
461 if (police->tcfp_P_tab == NULL)
465 if (tb[TCA_POLICE_RESULT-1]) {
466 if (RTA_PAYLOAD(tb[TCA_POLICE_RESULT-1]) != sizeof(u32))
468 police->tcfp_result = *(u32*)RTA_DATA(tb[TCA_POLICE_RESULT-1]);
470 if (tb[TCA_POLICE_AVRATE-1]) {
471 if (RTA_PAYLOAD(tb[TCA_POLICE_AVRATE-1]) != sizeof(u32))
473 police->tcfp_ewma_rate =
474 *(u32*)RTA_DATA(tb[TCA_POLICE_AVRATE-1]);
476 police->tcfp_toks = police->tcfp_burst = parm->burst;
477 police->tcfp_mtu = parm->mtu;
478 if (police->tcfp_mtu == 0) {
479 police->tcfp_mtu = ~0;
480 if (police->tcfp_R_tab)
481 police->tcfp_mtu = 255<<police->tcfp_R_tab->rate.cell_log;
483 if (police->tcfp_P_tab)
484 police->tcfp_ptoks = L2T_P(police, police->tcfp_mtu);
485 police->tcfp_t_c = psched_get_time();
486 police->tcf_index = parm->index ? parm->index :
487 tcf_police_new_index();
488 police->tcf_action = parm->action;
490 gen_new_estimator(&police->tcf_bstats, &police->tcf_rate_est,
491 &police->tcf_lock, est);
492 h = tcf_hash(police->tcf_index, POL_TAB_MASK);
493 write_lock_bh(&police_lock);
494 police->tcf_next = tcf_police_ht[h];
495 tcf_police_ht[h] = &police->common;
496 write_unlock_bh(&police_lock);
500 if (police->tcfp_R_tab)
501 qdisc_put_rtab(police->tcfp_R_tab);
506 int tcf_police(struct sk_buff *skb, struct tcf_police *police)
512 spin_lock(&police->tcf_lock);
514 police->tcf_bstats.bytes += skb->len;
515 police->tcf_bstats.packets++;
517 if (police->tcfp_ewma_rate &&
518 police->tcf_rate_est.bps >= police->tcfp_ewma_rate) {
519 police->tcf_qstats.overlimits++;
520 spin_unlock(&police->tcf_lock);
521 return police->tcf_action;
523 if (skb->len <= police->tcfp_mtu) {
524 if (police->tcfp_R_tab == NULL) {
525 spin_unlock(&police->tcf_lock);
526 return police->tcfp_result;
529 now = psched_get_time();
530 toks = psched_tdiff_bounded(now, police->tcfp_t_c,
532 if (police->tcfp_P_tab) {
533 ptoks = toks + police->tcfp_ptoks;
534 if (ptoks > (long)L2T_P(police, police->tcfp_mtu))
535 ptoks = (long)L2T_P(police, police->tcfp_mtu);
536 ptoks -= L2T_P(police, skb->len);
538 toks += police->tcfp_toks;
539 if (toks > (long)police->tcfp_burst)
540 toks = police->tcfp_burst;
541 toks -= L2T(police, skb->len);
542 if ((toks|ptoks) >= 0) {
543 police->tcfp_t_c = now;
544 police->tcfp_toks = toks;
545 police->tcfp_ptoks = ptoks;
546 spin_unlock(&police->tcf_lock);
547 return police->tcfp_result;
551 police->tcf_qstats.overlimits++;
552 spin_unlock(&police->tcf_lock);
553 return police->tcf_action;
555 EXPORT_SYMBOL(tcf_police);
557 int tcf_police_dump(struct sk_buff *skb, struct tcf_police *police)
559 unsigned char *b = skb_tail_pointer(skb);
560 struct tc_police opt;
562 opt.index = police->tcf_index;
563 opt.action = police->tcf_action;
564 opt.mtu = police->tcfp_mtu;
565 opt.burst = police->tcfp_burst;
566 if (police->tcfp_R_tab)
567 opt.rate = police->tcfp_R_tab->rate;
569 memset(&opt.rate, 0, sizeof(opt.rate));
570 if (police->tcfp_P_tab)
571 opt.peakrate = police->tcfp_P_tab->rate;
573 memset(&opt.peakrate, 0, sizeof(opt.peakrate));
574 RTA_PUT(skb, TCA_POLICE_TBF, sizeof(opt), &opt);
575 if (police->tcfp_result)
576 RTA_PUT(skb, TCA_POLICE_RESULT, sizeof(int),
577 &police->tcfp_result);
578 if (police->tcfp_ewma_rate)
579 RTA_PUT(skb, TCA_POLICE_AVRATE, 4, &police->tcfp_ewma_rate);
587 int tcf_police_dump_stats(struct sk_buff *skb, struct tcf_police *police)
591 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS,
592 TCA_XSTATS, &police->tcf_lock,
596 if (gnet_stats_copy_basic(&d, &police->tcf_bstats) < 0 ||
597 gnet_stats_copy_rate_est(&d, &police->tcf_rate_est) < 0 ||
598 gnet_stats_copy_queue(&d, &police->tcf_qstats) < 0)
601 if (gnet_stats_finish_copy(&d) < 0)
610 #endif /* CONFIG_NET_CLS_ACT */