2 * net/core/fib_rules.c Generic Routing Rules
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation, version 2.
8 * Authors: Thomas Graf <tgraf@suug.ch>
11 #include <linux/types.h>
12 #include <linux/kernel.h>
13 #include <linux/slab.h>
14 #include <linux/list.h>
15 #include <linux/module.h>
16 #include <net/net_namespace.h>
18 #include <net/fib_rules.h>
20 int fib_default_rule_add(struct fib_rules_ops *ops,
21 u32 pref, u32 table, u32 flags)
25 r = kzalloc(ops->rule_size, GFP_KERNEL);
29 atomic_set(&r->refcnt, 1);
30 r->action = FR_ACT_TO_TBL;
34 r->uid_start = INVALID_UID;
35 r->uid_end = INVALID_UID;
36 r->fr_net = hold_net(ops->fro_net);
38 /* The lock is not required here, the list in unreacheable
39 * at the moment this function is called */
40 list_add_tail(&r->list, &ops->rules_list);
43 EXPORT_SYMBOL(fib_default_rule_add);
45 u32 fib_default_rule_pref(struct fib_rules_ops *ops)
47 struct list_head *pos;
48 struct fib_rule *rule;
50 if (!list_empty(&ops->rules_list)) {
51 pos = ops->rules_list.next;
52 if (pos->next != &ops->rules_list) {
53 rule = list_entry(pos->next, struct fib_rule, list);
55 return rule->pref - 1;
61 EXPORT_SYMBOL(fib_default_rule_pref);
63 static void notify_rule_change(int event, struct fib_rule *rule,
64 struct fib_rules_ops *ops, struct nlmsghdr *nlh,
67 static struct fib_rules_ops *lookup_rules_ops(struct net *net, int family)
69 struct fib_rules_ops *ops;
72 list_for_each_entry_rcu(ops, &net->rules_ops, list) {
73 if (ops->family == family) {
74 if (!try_module_get(ops->owner))
85 static void rules_ops_put(struct fib_rules_ops *ops)
88 module_put(ops->owner);
91 static void flush_route_cache(struct fib_rules_ops *ops)
94 ops->flush_cache(ops);
97 static int __fib_rules_register(struct fib_rules_ops *ops)
100 struct fib_rules_ops *o;
105 if (ops->rule_size < sizeof(struct fib_rule))
108 if (ops->match == NULL || ops->configure == NULL ||
109 ops->compare == NULL || ops->fill == NULL ||
113 spin_lock(&net->rules_mod_lock);
114 list_for_each_entry(o, &net->rules_ops, list)
115 if (ops->family == o->family)
119 list_add_tail_rcu(&ops->list, &net->rules_ops);
122 spin_unlock(&net->rules_mod_lock);
127 struct fib_rules_ops *
128 fib_rules_register(const struct fib_rules_ops *tmpl, struct net *net)
130 struct fib_rules_ops *ops;
133 ops = kmemdup(tmpl, sizeof(*ops), GFP_KERNEL);
135 return ERR_PTR(-ENOMEM);
137 INIT_LIST_HEAD(&ops->rules_list);
140 err = __fib_rules_register(ops);
148 EXPORT_SYMBOL_GPL(fib_rules_register);
150 static void fib_rules_cleanup_ops(struct fib_rules_ops *ops)
152 struct fib_rule *rule, *tmp;
154 list_for_each_entry_safe(rule, tmp, &ops->rules_list, list) {
155 list_del_rcu(&rule->list);
162 static void fib_rules_put_rcu(struct rcu_head *head)
164 struct fib_rules_ops *ops = container_of(head, struct fib_rules_ops, rcu);
165 struct net *net = ops->fro_net;
171 void fib_rules_unregister(struct fib_rules_ops *ops)
173 struct net *net = ops->fro_net;
175 spin_lock(&net->rules_mod_lock);
176 list_del_rcu(&ops->list);
177 fib_rules_cleanup_ops(ops);
178 spin_unlock(&net->rules_mod_lock);
180 call_rcu(&ops->rcu, fib_rules_put_rcu);
182 EXPORT_SYMBOL_GPL(fib_rules_unregister);
184 static inline kuid_t fib_nl_uid(struct nlattr *nla)
186 return make_kuid(current_user_ns(), nla_get_u32(nla));
189 static int nla_put_uid(struct sk_buff *skb, int idx, kuid_t uid)
191 return nla_put_u32(skb, idx, from_kuid_munged(current_user_ns(), uid));
194 static int fib_uid_range_match(struct flowi *fl, struct fib_rule *rule)
196 return (!uid_valid(rule->uid_start) && !uid_valid(rule->uid_end)) ||
197 (uid_gte(fl->flowi_uid, rule->uid_start) &&
198 uid_lte(fl->flowi_uid, rule->uid_end));
201 static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops,
202 struct flowi *fl, int flags)
206 if (rule->iifindex && (rule->iifindex != fl->flowi_iif))
209 if (rule->oifindex && (rule->oifindex != fl->flowi_oif))
212 if ((rule->mark ^ fl->flowi_mark) & rule->mark_mask)
215 if (!fib_uid_range_match(fl, rule))
218 ret = ops->match(rule, fl, flags);
220 return (rule->flags & FIB_RULE_INVERT) ? !ret : ret;
223 int fib_rules_lookup(struct fib_rules_ops *ops, struct flowi *fl,
224 int flags, struct fib_lookup_arg *arg)
226 struct fib_rule *rule;
231 list_for_each_entry_rcu(rule, &ops->rules_list, list) {
233 if (!fib_rule_match(rule, ops, fl, flags))
236 if (rule->action == FR_ACT_GOTO) {
237 struct fib_rule *target;
239 target = rcu_dereference(rule->ctarget);
240 if (target == NULL) {
246 } else if (rule->action == FR_ACT_NOP)
249 err = ops->action(rule, fl, flags, arg);
251 if (err != -EAGAIN) {
252 if ((arg->flags & FIB_LOOKUP_NOREF) ||
253 likely(atomic_inc_not_zero(&rule->refcnt))) {
267 EXPORT_SYMBOL_GPL(fib_rules_lookup);
269 static int validate_rulemsg(struct fib_rule_hdr *frh, struct nlattr **tb,
270 struct fib_rules_ops *ops)
275 if (tb[FRA_SRC] == NULL ||
276 frh->src_len > (ops->addr_size * 8) ||
277 nla_len(tb[FRA_SRC]) != ops->addr_size)
281 if (tb[FRA_DST] == NULL ||
282 frh->dst_len > (ops->addr_size * 8) ||
283 nla_len(tb[FRA_DST]) != ops->addr_size)
291 static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh)
293 struct net *net = sock_net(skb->sk);
294 struct fib_rule_hdr *frh = nlmsg_data(nlh);
295 struct fib_rules_ops *ops = NULL;
296 struct fib_rule *rule, *r, *last = NULL;
297 struct nlattr *tb[FRA_MAX+1];
298 int err = -EINVAL, unresolved = 0;
300 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh)))
303 ops = lookup_rules_ops(net, frh->family);
309 err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy);
313 err = validate_rulemsg(frh, tb, ops);
317 rule = kzalloc(ops->rule_size, GFP_KERNEL);
322 rule->fr_net = hold_net(net);
324 if (tb[FRA_PRIORITY])
325 rule->pref = nla_get_u32(tb[FRA_PRIORITY]);
327 if (tb[FRA_IIFNAME]) {
328 struct net_device *dev;
331 nla_strlcpy(rule->iifname, tb[FRA_IIFNAME], IFNAMSIZ);
332 dev = __dev_get_by_name(net, rule->iifname);
334 rule->iifindex = dev->ifindex;
337 if (tb[FRA_OIFNAME]) {
338 struct net_device *dev;
341 nla_strlcpy(rule->oifname, tb[FRA_OIFNAME], IFNAMSIZ);
342 dev = __dev_get_by_name(net, rule->oifname);
344 rule->oifindex = dev->ifindex;
347 if (tb[FRA_FWMARK]) {
348 rule->mark = nla_get_u32(tb[FRA_FWMARK]);
350 /* compatibility: if the mark value is non-zero all bits
351 * are compared unless a mask is explicitly specified.
353 rule->mark_mask = 0xFFFFFFFF;
357 rule->mark_mask = nla_get_u32(tb[FRA_FWMASK]);
359 rule->action = frh->action;
360 rule->flags = frh->flags;
361 rule->table = frh_get_table(frh, tb);
363 if (!tb[FRA_PRIORITY] && ops->default_pref)
364 rule->pref = ops->default_pref(ops);
368 if (rule->action != FR_ACT_GOTO)
371 rule->target = nla_get_u32(tb[FRA_GOTO]);
372 /* Backward jumps are prohibited to avoid endless loops */
373 if (rule->target <= rule->pref)
376 list_for_each_entry(r, &ops->rules_list, list) {
377 if (r->pref == rule->target) {
378 RCU_INIT_POINTER(rule->ctarget, r);
383 if (rcu_dereference_protected(rule->ctarget, 1) == NULL)
385 } else if (rule->action == FR_ACT_GOTO)
388 /* UID start and end must either both be valid or both unspecified. */
389 rule->uid_start = rule->uid_end = INVALID_UID;
390 if (tb[FRA_UID_START] || tb[FRA_UID_END]) {
391 if (tb[FRA_UID_START] && tb[FRA_UID_END]) {
392 rule->uid_start = fib_nl_uid(tb[FRA_UID_START]);
393 rule->uid_end = fib_nl_uid(tb[FRA_UID_END]);
395 if (!uid_valid(rule->uid_start) ||
396 !uid_valid(rule->uid_end) ||
397 !uid_lte(rule->uid_start, rule->uid_end))
401 err = ops->configure(rule, skb, frh, tb);
405 list_for_each_entry(r, &ops->rules_list, list) {
406 if (r->pref > rule->pref)
414 list_add_rcu(&rule->list, &last->list);
416 list_add_rcu(&rule->list, &ops->rules_list);
418 if (ops->unresolved_rules) {
420 * There are unresolved goto rules in the list, check if
421 * any of them are pointing to this new rule.
423 list_for_each_entry(r, &ops->rules_list, list) {
424 if (r->action == FR_ACT_GOTO &&
425 r->target == rule->pref &&
426 rtnl_dereference(r->ctarget) == NULL) {
427 rcu_assign_pointer(r->ctarget, rule);
428 if (--ops->unresolved_rules == 0)
434 if (rule->action == FR_ACT_GOTO)
435 ops->nr_goto_rules++;
438 ops->unresolved_rules++;
440 notify_rule_change(RTM_NEWRULE, rule, ops, nlh, NETLINK_CB(skb).portid);
441 flush_route_cache(ops);
446 release_net(rule->fr_net);
453 static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh)
455 struct net *net = sock_net(skb->sk);
456 struct fib_rule_hdr *frh = nlmsg_data(nlh);
457 struct fib_rules_ops *ops = NULL;
458 struct fib_rule *rule, *tmp;
459 struct nlattr *tb[FRA_MAX+1];
462 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh)))
465 ops = lookup_rules_ops(net, frh->family);
471 err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy);
475 err = validate_rulemsg(frh, tb, ops);
479 list_for_each_entry(rule, &ops->rules_list, list) {
480 if (frh->action && (frh->action != rule->action))
483 if (frh_get_table(frh, tb) &&
484 (frh_get_table(frh, tb) != rule->table))
487 if (tb[FRA_PRIORITY] &&
488 (rule->pref != nla_get_u32(tb[FRA_PRIORITY])))
491 if (tb[FRA_IIFNAME] &&
492 nla_strcmp(tb[FRA_IIFNAME], rule->iifname))
495 if (tb[FRA_OIFNAME] &&
496 nla_strcmp(tb[FRA_OIFNAME], rule->oifname))
499 if (tb[FRA_FWMARK] &&
500 (rule->mark != nla_get_u32(tb[FRA_FWMARK])))
503 if (tb[FRA_FWMASK] &&
504 (rule->mark_mask != nla_get_u32(tb[FRA_FWMASK])))
507 if (tb[FRA_UID_START] &&
508 !uid_eq(rule->uid_start, fib_nl_uid(tb[FRA_UID_START])))
511 if (tb[FRA_UID_END] &&
512 !uid_eq(rule->uid_end, fib_nl_uid(tb[FRA_UID_END])))
515 if (!ops->compare(rule, frh, tb))
518 if (rule->flags & FIB_RULE_PERMANENT) {
523 list_del_rcu(&rule->list);
525 if (rule->action == FR_ACT_GOTO) {
526 ops->nr_goto_rules--;
527 if (rtnl_dereference(rule->ctarget) == NULL)
528 ops->unresolved_rules--;
532 * Check if this rule is a target to any of them. If so,
533 * disable them. As this operation is eventually very
534 * expensive, it is only performed if goto rules have
535 * actually been added.
537 if (ops->nr_goto_rules > 0) {
538 list_for_each_entry(tmp, &ops->rules_list, list) {
539 if (rtnl_dereference(tmp->ctarget) == rule) {
540 RCU_INIT_POINTER(tmp->ctarget, NULL);
541 ops->unresolved_rules++;
546 notify_rule_change(RTM_DELRULE, rule, ops, nlh,
547 NETLINK_CB(skb).portid);
551 flush_route_cache(ops);
562 static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops *ops,
563 struct fib_rule *rule)
565 size_t payload = NLMSG_ALIGN(sizeof(struct fib_rule_hdr))
566 + nla_total_size(IFNAMSIZ) /* FRA_IIFNAME */
567 + nla_total_size(IFNAMSIZ) /* FRA_OIFNAME */
568 + nla_total_size(4) /* FRA_PRIORITY */
569 + nla_total_size(4) /* FRA_TABLE */
570 + nla_total_size(4) /* FRA_FWMARK */
571 + nla_total_size(4) /* FRA_FWMASK */
572 + nla_total_size(4) /* FRA_UID_START */
573 + nla_total_size(4); /* FRA_UID_END */
575 if (ops->nlmsg_payload)
576 payload += ops->nlmsg_payload(rule);
581 static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
582 u32 pid, u32 seq, int type, int flags,
583 struct fib_rules_ops *ops)
585 struct nlmsghdr *nlh;
586 struct fib_rule_hdr *frh;
588 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*frh), flags);
592 frh = nlmsg_data(nlh);
593 frh->family = ops->family;
594 frh->table = rule->table;
595 if (nla_put_u32(skb, FRA_TABLE, rule->table))
596 goto nla_put_failure;
599 frh->action = rule->action;
600 frh->flags = rule->flags;
602 if (rule->action == FR_ACT_GOTO &&
603 rcu_access_pointer(rule->ctarget) == NULL)
604 frh->flags |= FIB_RULE_UNRESOLVED;
606 if (rule->iifname[0]) {
607 if (nla_put_string(skb, FRA_IIFNAME, rule->iifname))
608 goto nla_put_failure;
609 if (rule->iifindex == -1)
610 frh->flags |= FIB_RULE_IIF_DETACHED;
613 if (rule->oifname[0]) {
614 if (nla_put_string(skb, FRA_OIFNAME, rule->oifname))
615 goto nla_put_failure;
616 if (rule->oifindex == -1)
617 frh->flags |= FIB_RULE_OIF_DETACHED;
621 nla_put_u32(skb, FRA_PRIORITY, rule->pref)) ||
623 nla_put_u32(skb, FRA_FWMARK, rule->mark)) ||
624 ((rule->mark_mask || rule->mark) &&
625 nla_put_u32(skb, FRA_FWMASK, rule->mark_mask)) ||
627 nla_put_u32(skb, FRA_GOTO, rule->target)) ||
628 (uid_valid(rule->uid_start) &&
629 nla_put_uid(skb, FRA_UID_START, rule->uid_start)) ||
630 (uid_valid(rule->uid_end) &&
631 nla_put_uid(skb, FRA_UID_END, rule->uid_end)))
632 goto nla_put_failure;
633 if (ops->fill(rule, skb, frh) < 0)
634 goto nla_put_failure;
636 return nlmsg_end(skb, nlh);
639 nlmsg_cancel(skb, nlh);
643 static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb,
644 struct fib_rules_ops *ops)
647 struct fib_rule *rule;
651 list_for_each_entry_rcu(rule, &ops->rules_list, list) {
652 if (idx < cb->args[1])
655 err = fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).portid,
656 cb->nlh->nlmsg_seq, RTM_NEWRULE,
670 static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb)
672 struct net *net = sock_net(skb->sk);
673 struct fib_rules_ops *ops;
676 family = rtnl_msg_family(cb->nlh);
677 if (family != AF_UNSPEC) {
678 /* Protocol specific dump request */
679 ops = lookup_rules_ops(net, family);
681 return -EAFNOSUPPORT;
683 dump_rules(skb, cb, ops);
689 list_for_each_entry_rcu(ops, &net->rules_ops, list) {
690 if (idx < cb->args[0] || !try_module_get(ops->owner))
693 if (dump_rules(skb, cb, ops) < 0)
706 static void notify_rule_change(int event, struct fib_rule *rule,
707 struct fib_rules_ops *ops, struct nlmsghdr *nlh,
715 skb = nlmsg_new(fib_rule_nlmsg_size(ops, rule), GFP_KERNEL);
719 err = fib_nl_fill_rule(skb, rule, pid, nlh->nlmsg_seq, event, 0, ops);
721 /* -EMSGSIZE implies BUG in fib_rule_nlmsg_size() */
722 WARN_ON(err == -EMSGSIZE);
727 rtnl_notify(skb, net, pid, ops->nlgroup, nlh, GFP_KERNEL);
731 rtnl_set_sk_err(net, ops->nlgroup, err);
734 static void attach_rules(struct list_head *rules, struct net_device *dev)
736 struct fib_rule *rule;
738 list_for_each_entry(rule, rules, list) {
739 if (rule->iifindex == -1 &&
740 strcmp(dev->name, rule->iifname) == 0)
741 rule->iifindex = dev->ifindex;
742 if (rule->oifindex == -1 &&
743 strcmp(dev->name, rule->oifname) == 0)
744 rule->oifindex = dev->ifindex;
748 static void detach_rules(struct list_head *rules, struct net_device *dev)
750 struct fib_rule *rule;
752 list_for_each_entry(rule, rules, list) {
753 if (rule->iifindex == dev->ifindex)
755 if (rule->oifindex == dev->ifindex)
761 static int fib_rules_event(struct notifier_block *this, unsigned long event,
764 struct net_device *dev = ptr;
765 struct net *net = dev_net(dev);
766 struct fib_rules_ops *ops;
771 case NETDEV_REGISTER:
772 list_for_each_entry(ops, &net->rules_ops, list)
773 attach_rules(&ops->rules_list, dev);
776 case NETDEV_CHANGENAME:
777 list_for_each_entry(ops, &net->rules_ops, list) {
778 detach_rules(&ops->rules_list, dev);
779 attach_rules(&ops->rules_list, dev);
783 case NETDEV_UNREGISTER:
784 list_for_each_entry(ops, &net->rules_ops, list)
785 detach_rules(&ops->rules_list, dev);
792 static struct notifier_block fib_rules_notifier = {
793 .notifier_call = fib_rules_event,
796 static int __net_init fib_rules_net_init(struct net *net)
798 INIT_LIST_HEAD(&net->rules_ops);
799 spin_lock_init(&net->rules_mod_lock);
803 static struct pernet_operations fib_rules_net_ops = {
804 .init = fib_rules_net_init,
807 static int __init fib_rules_init(void)
810 rtnl_register(PF_UNSPEC, RTM_NEWRULE, fib_nl_newrule, NULL, NULL);
811 rtnl_register(PF_UNSPEC, RTM_DELRULE, fib_nl_delrule, NULL, NULL);
812 rtnl_register(PF_UNSPEC, RTM_GETRULE, NULL, fib_nl_dumprule, NULL);
814 err = register_pernet_subsys(&fib_rules_net_ops);
818 err = register_netdevice_notifier(&fib_rules_notifier);
820 goto fail_unregister;
825 unregister_pernet_subsys(&fib_rules_net_ops);
827 rtnl_unregister(PF_UNSPEC, RTM_NEWRULE);
828 rtnl_unregister(PF_UNSPEC, RTM_DELRULE);
829 rtnl_unregister(PF_UNSPEC, RTM_GETRULE);
833 subsys_initcall(fib_rules_init);