2 * net/core/fib_rules.c Generic Routing Rules
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation, version 2.
8 * Authors: Thomas Graf <tgraf@suug.ch>
11 #include <linux/types.h>
12 #include <linux/kernel.h>
13 #include <linux/list.h>
14 #include <net/net_namespace.h>
16 #include <net/fib_rules.h>
18 int fib_default_rule_add(struct fib_rules_ops *ops,
19 u32 pref, u32 table, u32 flags)
23 r = kzalloc(ops->rule_size, GFP_KERNEL);
27 atomic_set(&r->refcnt, 1);
28 r->action = FR_ACT_TO_TBL;
33 /* The lock is not required here, the list in unreacheable
34 * at the moment this function is called */
35 list_add_tail(&r->list, &ops->rules_list);
38 EXPORT_SYMBOL(fib_default_rule_add);
40 static void notify_rule_change(struct net *net, int event,
41 struct fib_rule *rule,
42 struct fib_rules_ops *ops, struct nlmsghdr *nlh,
45 static struct fib_rules_ops *lookup_rules_ops(struct net *net, int family)
47 struct fib_rules_ops *ops;
50 list_for_each_entry_rcu(ops, &net->rules_ops, list) {
51 if (ops->family == family) {
52 if (!try_module_get(ops->owner))
63 static void rules_ops_put(struct fib_rules_ops *ops)
66 module_put(ops->owner);
69 static void flush_route_cache(struct fib_rules_ops *ops)
75 int fib_rules_register(struct net *net, struct fib_rules_ops *ops)
78 struct fib_rules_ops *o;
80 if (ops->rule_size < sizeof(struct fib_rule))
83 if (ops->match == NULL || ops->configure == NULL ||
84 ops->compare == NULL || ops->fill == NULL ||
88 spin_lock(&net->rules_mod_lock);
89 list_for_each_entry(o, &net->rules_ops, list)
90 if (ops->family == o->family)
94 list_add_tail_rcu(&ops->list, &net->rules_ops);
97 spin_unlock(&net->rules_mod_lock);
102 EXPORT_SYMBOL_GPL(fib_rules_register);
104 void fib_rules_cleanup_ops(struct fib_rules_ops *ops)
106 struct fib_rule *rule, *tmp;
108 list_for_each_entry_safe(rule, tmp, &ops->rules_list, list) {
109 list_del_rcu(&rule->list);
113 EXPORT_SYMBOL_GPL(fib_rules_cleanup_ops);
115 void fib_rules_unregister(struct net *net, struct fib_rules_ops *ops)
118 spin_lock(&net->rules_mod_lock);
119 list_del_rcu(&ops->list);
120 fib_rules_cleanup_ops(ops);
121 spin_unlock(&net->rules_mod_lock);
127 EXPORT_SYMBOL_GPL(fib_rules_unregister);
129 static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops,
130 struct flowi *fl, int flags)
134 if (rule->ifindex && (rule->ifindex != fl->iif))
137 if ((rule->mark ^ fl->mark) & rule->mark_mask)
140 ret = ops->match(rule, fl, flags);
142 return (rule->flags & FIB_RULE_INVERT) ? !ret : ret;
145 int fib_rules_lookup(struct fib_rules_ops *ops, struct flowi *fl,
146 int flags, struct fib_lookup_arg *arg)
148 struct fib_rule *rule;
153 list_for_each_entry_rcu(rule, &ops->rules_list, list) {
155 if (!fib_rule_match(rule, ops, fl, flags))
158 if (rule->action == FR_ACT_GOTO) {
159 struct fib_rule *target;
161 target = rcu_dereference(rule->ctarget);
162 if (target == NULL) {
168 } else if (rule->action == FR_ACT_NOP)
171 err = ops->action(rule, fl, flags, arg);
173 if (err != -EAGAIN) {
187 EXPORT_SYMBOL_GPL(fib_rules_lookup);
189 static int validate_rulemsg(struct fib_rule_hdr *frh, struct nlattr **tb,
190 struct fib_rules_ops *ops)
195 if (tb[FRA_SRC] == NULL ||
196 frh->src_len > (ops->addr_size * 8) ||
197 nla_len(tb[FRA_SRC]) != ops->addr_size)
201 if (tb[FRA_DST] == NULL ||
202 frh->dst_len > (ops->addr_size * 8) ||
203 nla_len(tb[FRA_DST]) != ops->addr_size)
211 static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
213 struct net *net = skb->sk->sk_net;
214 struct fib_rule_hdr *frh = nlmsg_data(nlh);
215 struct fib_rules_ops *ops = NULL;
216 struct fib_rule *rule, *r, *last = NULL;
217 struct nlattr *tb[FRA_MAX+1];
218 int err = -EINVAL, unresolved = 0;
220 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh)))
223 ops = lookup_rules_ops(net, frh->family);
229 err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy);
233 err = validate_rulemsg(frh, tb, ops);
237 rule = kzalloc(ops->rule_size, GFP_KERNEL);
243 if (tb[FRA_PRIORITY])
244 rule->pref = nla_get_u32(tb[FRA_PRIORITY]);
246 if (tb[FRA_IFNAME]) {
247 struct net_device *dev;
250 nla_strlcpy(rule->ifname, tb[FRA_IFNAME], IFNAMSIZ);
251 dev = __dev_get_by_name(net, rule->ifname);
253 rule->ifindex = dev->ifindex;
256 if (tb[FRA_FWMARK]) {
257 rule->mark = nla_get_u32(tb[FRA_FWMARK]);
259 /* compatibility: if the mark value is non-zero all bits
260 * are compared unless a mask is explicitly specified.
262 rule->mark_mask = 0xFFFFFFFF;
266 rule->mark_mask = nla_get_u32(tb[FRA_FWMASK]);
268 rule->action = frh->action;
269 rule->flags = frh->flags;
270 rule->table = frh_get_table(frh, tb);
272 if (!rule->pref && ops->default_pref)
273 rule->pref = ops->default_pref(ops);
277 if (rule->action != FR_ACT_GOTO)
280 rule->target = nla_get_u32(tb[FRA_GOTO]);
281 /* Backward jumps are prohibited to avoid endless loops */
282 if (rule->target <= rule->pref)
285 list_for_each_entry(r, &ops->rules_list, list) {
286 if (r->pref == rule->target) {
292 if (rule->ctarget == NULL)
294 } else if (rule->action == FR_ACT_GOTO)
297 err = ops->configure(rule, skb, nlh, frh, tb);
301 list_for_each_entry(r, &ops->rules_list, list) {
302 if (r->pref > rule->pref)
309 if (ops->unresolved_rules) {
311 * There are unresolved goto rules in the list, check if
312 * any of them are pointing to this new rule.
314 list_for_each_entry(r, &ops->rules_list, list) {
315 if (r->action == FR_ACT_GOTO &&
316 r->target == rule->pref) {
317 BUG_ON(r->ctarget != NULL);
318 rcu_assign_pointer(r->ctarget, rule);
319 if (--ops->unresolved_rules == 0)
325 if (rule->action == FR_ACT_GOTO)
326 ops->nr_goto_rules++;
329 ops->unresolved_rules++;
332 list_add_rcu(&rule->list, &last->list);
334 list_add_rcu(&rule->list, &ops->rules_list);
336 notify_rule_change(net, RTM_NEWRULE, rule, ops, nlh, NETLINK_CB(skb).pid);
337 flush_route_cache(ops);
348 static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
350 struct net *net = skb->sk->sk_net;
351 struct fib_rule_hdr *frh = nlmsg_data(nlh);
352 struct fib_rules_ops *ops = NULL;
353 struct fib_rule *rule, *tmp;
354 struct nlattr *tb[FRA_MAX+1];
357 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh)))
360 ops = lookup_rules_ops(net, frh->family);
366 err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy);
370 err = validate_rulemsg(frh, tb, ops);
374 list_for_each_entry(rule, &ops->rules_list, list) {
375 if (frh->action && (frh->action != rule->action))
378 if (frh->table && (frh_get_table(frh, tb) != rule->table))
381 if (tb[FRA_PRIORITY] &&
382 (rule->pref != nla_get_u32(tb[FRA_PRIORITY])))
385 if (tb[FRA_IFNAME] &&
386 nla_strcmp(tb[FRA_IFNAME], rule->ifname))
389 if (tb[FRA_FWMARK] &&
390 (rule->mark != nla_get_u32(tb[FRA_FWMARK])))
393 if (tb[FRA_FWMASK] &&
394 (rule->mark_mask != nla_get_u32(tb[FRA_FWMASK])))
397 if (!ops->compare(rule, frh, tb))
400 if (rule->flags & FIB_RULE_PERMANENT) {
405 list_del_rcu(&rule->list);
407 if (rule->action == FR_ACT_GOTO)
408 ops->nr_goto_rules--;
411 * Check if this rule is a target to any of them. If so,
412 * disable them. As this operation is eventually very
413 * expensive, it is only performed if goto rules have
414 * actually been added.
416 if (ops->nr_goto_rules > 0) {
417 list_for_each_entry(tmp, &ops->rules_list, list) {
418 if (tmp->ctarget == rule) {
419 rcu_assign_pointer(tmp->ctarget, NULL);
420 ops->unresolved_rules++;
426 notify_rule_change(net, RTM_DELRULE, rule, ops, nlh,
427 NETLINK_CB(skb).pid);
429 flush_route_cache(ops);
440 static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops *ops,
441 struct fib_rule *rule)
443 size_t payload = NLMSG_ALIGN(sizeof(struct fib_rule_hdr))
444 + nla_total_size(IFNAMSIZ) /* FRA_IFNAME */
445 + nla_total_size(4) /* FRA_PRIORITY */
446 + nla_total_size(4) /* FRA_TABLE */
447 + nla_total_size(4) /* FRA_FWMARK */
448 + nla_total_size(4); /* FRA_FWMASK */
450 if (ops->nlmsg_payload)
451 payload += ops->nlmsg_payload(rule);
456 static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
457 u32 pid, u32 seq, int type, int flags,
458 struct fib_rules_ops *ops)
460 struct nlmsghdr *nlh;
461 struct fib_rule_hdr *frh;
463 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*frh), flags);
467 frh = nlmsg_data(nlh);
468 frh->table = rule->table;
469 NLA_PUT_U32(skb, FRA_TABLE, rule->table);
472 frh->action = rule->action;
473 frh->flags = rule->flags;
475 if (rule->action == FR_ACT_GOTO && rule->ctarget == NULL)
476 frh->flags |= FIB_RULE_UNRESOLVED;
478 if (rule->ifname[0]) {
479 NLA_PUT_STRING(skb, FRA_IFNAME, rule->ifname);
481 if (rule->ifindex == -1)
482 frh->flags |= FIB_RULE_DEV_DETACHED;
486 NLA_PUT_U32(skb, FRA_PRIORITY, rule->pref);
489 NLA_PUT_U32(skb, FRA_FWMARK, rule->mark);
491 if (rule->mark_mask || rule->mark)
492 NLA_PUT_U32(skb, FRA_FWMASK, rule->mark_mask);
495 NLA_PUT_U32(skb, FRA_GOTO, rule->target);
497 if (ops->fill(rule, skb, nlh, frh) < 0)
498 goto nla_put_failure;
500 return nlmsg_end(skb, nlh);
503 nlmsg_cancel(skb, nlh);
507 static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb,
508 struct fib_rules_ops *ops)
511 struct fib_rule *rule;
513 list_for_each_entry(rule, &ops->rules_list, list) {
514 if (idx < cb->args[1])
517 if (fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).pid,
518 cb->nlh->nlmsg_seq, RTM_NEWRULE,
519 NLM_F_MULTI, ops) < 0)
530 static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb)
532 struct net *net = skb->sk->sk_net;
533 struct fib_rules_ops *ops;
536 family = rtnl_msg_family(cb->nlh);
537 if (family != AF_UNSPEC) {
538 /* Protocol specific dump request */
539 ops = lookup_rules_ops(net, family);
541 return -EAFNOSUPPORT;
543 return dump_rules(skb, cb, ops);
547 list_for_each_entry_rcu(ops, &net->rules_ops, list) {
548 if (idx < cb->args[0] || !try_module_get(ops->owner))
551 if (dump_rules(skb, cb, ops) < 0)
564 static void notify_rule_change(struct net *net, int event, struct fib_rule *rule,
565 struct fib_rules_ops *ops, struct nlmsghdr *nlh,
571 skb = nlmsg_new(fib_rule_nlmsg_size(ops, rule), GFP_KERNEL);
575 err = fib_nl_fill_rule(skb, rule, pid, nlh->nlmsg_seq, event, 0, ops);
577 /* -EMSGSIZE implies BUG in fib_rule_nlmsg_size() */
578 WARN_ON(err == -EMSGSIZE);
582 err = rtnl_notify(skb, net, pid, ops->nlgroup, nlh, GFP_KERNEL);
585 rtnl_set_sk_err(net, ops->nlgroup, err);
588 static void attach_rules(struct list_head *rules, struct net_device *dev)
590 struct fib_rule *rule;
592 list_for_each_entry(rule, rules, list) {
593 if (rule->ifindex == -1 &&
594 strcmp(dev->name, rule->ifname) == 0)
595 rule->ifindex = dev->ifindex;
599 static void detach_rules(struct list_head *rules, struct net_device *dev)
601 struct fib_rule *rule;
603 list_for_each_entry(rule, rules, list)
604 if (rule->ifindex == dev->ifindex)
609 static int fib_rules_event(struct notifier_block *this, unsigned long event,
612 struct net_device *dev = ptr;
613 struct net *net = dev->nd_net;
614 struct fib_rules_ops *ops;
620 case NETDEV_REGISTER:
621 list_for_each_entry(ops, &net->rules_ops, list)
622 attach_rules(&ops->rules_list, dev);
625 case NETDEV_UNREGISTER:
626 list_for_each_entry(ops, &net->rules_ops, list)
627 detach_rules(&ops->rules_list, dev);
636 static struct notifier_block fib_rules_notifier = {
637 .notifier_call = fib_rules_event,
640 static int fib_rules_net_init(struct net *net)
642 INIT_LIST_HEAD(&net->rules_ops);
643 spin_lock_init(&net->rules_mod_lock);
647 static struct pernet_operations fib_rules_net_ops = {
648 .init = fib_rules_net_init,
651 static int __init fib_rules_init(void)
654 rtnl_register(PF_UNSPEC, RTM_NEWRULE, fib_nl_newrule, NULL);
655 rtnl_register(PF_UNSPEC, RTM_DELRULE, fib_nl_delrule, NULL);
656 rtnl_register(PF_UNSPEC, RTM_GETRULE, NULL, fib_nl_dumprule);
658 err = register_netdevice_notifier(&fib_rules_notifier);
662 err = register_pernet_subsys(&fib_rules_net_ops);
664 goto fail_unregister;
668 unregister_netdevice_notifier(&fib_rules_notifier);
670 rtnl_unregister(PF_UNSPEC, RTM_NEWRULE);
671 rtnl_unregister(PF_UNSPEC, RTM_DELRULE);
672 rtnl_unregister(PF_UNSPEC, RTM_GETRULE);
676 subsys_initcall(fib_rules_init);