video: rockchip: hdmi: fix certain tv blank when mode switch
[firefly-linux-kernel-4.4.55.git] / net / core / fib_rules.c
1 /*
2  * net/core/fib_rules.c         Generic Routing Rules
3  *
4  *      This program is free software; you can redistribute it and/or
5  *      modify it under the terms of the GNU General Public License as
6  *      published by the Free Software Foundation, version 2.
7  *
8  * Authors:     Thomas Graf <tgraf@suug.ch>
9  */
10
11 #include <linux/types.h>
12 #include <linux/kernel.h>
13 #include <linux/slab.h>
14 #include <linux/list.h>
15 #include <linux/module.h>
16 #include <net/net_namespace.h>
17 #include <net/sock.h>
18 #include <net/fib_rules.h>
19 #include <net/ip_tunnels.h>
20
21 int fib_default_rule_add(struct fib_rules_ops *ops,
22                          u32 pref, u32 table, u32 flags)
23 {
24         struct fib_rule *r;
25
26         r = kzalloc(ops->rule_size, GFP_KERNEL);
27         if (r == NULL)
28                 return -ENOMEM;
29
30         atomic_set(&r->refcnt, 1);
31         r->action = FR_ACT_TO_TBL;
32         r->pref = pref;
33         r->table = table;
34         r->flags = flags;
35         r->fr_net = ops->fro_net;
36         r->uid_start = INVALID_UID;
37         r->uid_end = INVALID_UID;
38
39         r->suppress_prefixlen = -1;
40         r->suppress_ifgroup = -1;
41
42         /* The lock is not required here, the list in unreacheable
43          * at the moment this function is called */
44         list_add_tail(&r->list, &ops->rules_list);
45         return 0;
46 }
47 EXPORT_SYMBOL(fib_default_rule_add);
48
49 static u32 fib_default_rule_pref(struct fib_rules_ops *ops)
50 {
51         struct list_head *pos;
52         struct fib_rule *rule;
53
54         if (!list_empty(&ops->rules_list)) {
55                 pos = ops->rules_list.next;
56                 if (pos->next != &ops->rules_list) {
57                         rule = list_entry(pos->next, struct fib_rule, list);
58                         if (rule->pref)
59                                 return rule->pref - 1;
60                 }
61         }
62
63         return 0;
64 }
65
66 static void notify_rule_change(int event, struct fib_rule *rule,
67                                struct fib_rules_ops *ops, struct nlmsghdr *nlh,
68                                u32 pid);
69
70 static struct fib_rules_ops *lookup_rules_ops(struct net *net, int family)
71 {
72         struct fib_rules_ops *ops;
73
74         rcu_read_lock();
75         list_for_each_entry_rcu(ops, &net->rules_ops, list) {
76                 if (ops->family == family) {
77                         if (!try_module_get(ops->owner))
78                                 ops = NULL;
79                         rcu_read_unlock();
80                         return ops;
81                 }
82         }
83         rcu_read_unlock();
84
85         return NULL;
86 }
87
88 static void rules_ops_put(struct fib_rules_ops *ops)
89 {
90         if (ops)
91                 module_put(ops->owner);
92 }
93
94 static void flush_route_cache(struct fib_rules_ops *ops)
95 {
96         if (ops->flush_cache)
97                 ops->flush_cache(ops);
98 }
99
100 static int __fib_rules_register(struct fib_rules_ops *ops)
101 {
102         int err = -EEXIST;
103         struct fib_rules_ops *o;
104         struct net *net;
105
106         net = ops->fro_net;
107
108         if (ops->rule_size < sizeof(struct fib_rule))
109                 return -EINVAL;
110
111         if (ops->match == NULL || ops->configure == NULL ||
112             ops->compare == NULL || ops->fill == NULL ||
113             ops->action == NULL)
114                 return -EINVAL;
115
116         spin_lock(&net->rules_mod_lock);
117         list_for_each_entry(o, &net->rules_ops, list)
118                 if (ops->family == o->family)
119                         goto errout;
120
121         list_add_tail_rcu(&ops->list, &net->rules_ops);
122         err = 0;
123 errout:
124         spin_unlock(&net->rules_mod_lock);
125
126         return err;
127 }
128
129 struct fib_rules_ops *
130 fib_rules_register(const struct fib_rules_ops *tmpl, struct net *net)
131 {
132         struct fib_rules_ops *ops;
133         int err;
134
135         ops = kmemdup(tmpl, sizeof(*ops), GFP_KERNEL);
136         if (ops == NULL)
137                 return ERR_PTR(-ENOMEM);
138
139         INIT_LIST_HEAD(&ops->rules_list);
140         ops->fro_net = net;
141
142         err = __fib_rules_register(ops);
143         if (err) {
144                 kfree(ops);
145                 ops = ERR_PTR(err);
146         }
147
148         return ops;
149 }
150 EXPORT_SYMBOL_GPL(fib_rules_register);
151
152 static void fib_rules_cleanup_ops(struct fib_rules_ops *ops)
153 {
154         struct fib_rule *rule, *tmp;
155
156         list_for_each_entry_safe(rule, tmp, &ops->rules_list, list) {
157                 list_del_rcu(&rule->list);
158                 if (ops->delete)
159                         ops->delete(rule);
160                 fib_rule_put(rule);
161         }
162 }
163
164 void fib_rules_unregister(struct fib_rules_ops *ops)
165 {
166         struct net *net = ops->fro_net;
167
168         spin_lock(&net->rules_mod_lock);
169         list_del_rcu(&ops->list);
170         spin_unlock(&net->rules_mod_lock);
171
172         fib_rules_cleanup_ops(ops);
173         kfree_rcu(ops, rcu);
174 }
175 EXPORT_SYMBOL_GPL(fib_rules_unregister);
176
177 static inline kuid_t fib_nl_uid(struct nlattr *nla)
178 {
179         return make_kuid(current_user_ns(), nla_get_u32(nla));
180 }
181
182 static int nla_put_uid(struct sk_buff *skb, int idx, kuid_t uid)
183 {
184         return nla_put_u32(skb, idx, from_kuid_munged(current_user_ns(), uid));
185 }
186
187 static int fib_uid_range_match(struct flowi *fl, struct fib_rule *rule)
188 {
189         return (!uid_valid(rule->uid_start) && !uid_valid(rule->uid_end)) ||
190                (uid_gte(fl->flowi_uid, rule->uid_start) &&
191                 uid_lte(fl->flowi_uid, rule->uid_end));
192 }
193
194 static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops,
195                           struct flowi *fl, int flags)
196 {
197         int ret = 0;
198
199         if (rule->iifindex && (rule->iifindex != fl->flowi_iif))
200                 goto out;
201
202         if (rule->oifindex && (rule->oifindex != fl->flowi_oif))
203                 goto out;
204
205         if ((rule->mark ^ fl->flowi_mark) & rule->mark_mask)
206                 goto out;
207
208         if (rule->tun_id && (rule->tun_id != fl->flowi_tun_key.tun_id))
209                 goto out;
210
211         if (!fib_uid_range_match(fl, rule))
212                 goto out;
213
214         ret = ops->match(rule, fl, flags);
215 out:
216         return (rule->flags & FIB_RULE_INVERT) ? !ret : ret;
217 }
218
219 int fib_rules_lookup(struct fib_rules_ops *ops, struct flowi *fl,
220                      int flags, struct fib_lookup_arg *arg)
221 {
222         struct fib_rule *rule;
223         int err;
224
225         rcu_read_lock();
226
227         list_for_each_entry_rcu(rule, &ops->rules_list, list) {
228 jumped:
229                 if (!fib_rule_match(rule, ops, fl, flags))
230                         continue;
231
232                 if (rule->action == FR_ACT_GOTO) {
233                         struct fib_rule *target;
234
235                         target = rcu_dereference(rule->ctarget);
236                         if (target == NULL) {
237                                 continue;
238                         } else {
239                                 rule = target;
240                                 goto jumped;
241                         }
242                 } else if (rule->action == FR_ACT_NOP)
243                         continue;
244                 else
245                         err = ops->action(rule, fl, flags, arg);
246
247                 if (!err && ops->suppress && ops->suppress(rule, arg))
248                         continue;
249
250                 if (err != -EAGAIN) {
251                         if ((arg->flags & FIB_LOOKUP_NOREF) ||
252                             likely(atomic_inc_not_zero(&rule->refcnt))) {
253                                 arg->rule = rule;
254                                 goto out;
255                         }
256                         break;
257                 }
258         }
259
260         err = -ESRCH;
261 out:
262         rcu_read_unlock();
263
264         return err;
265 }
266 EXPORT_SYMBOL_GPL(fib_rules_lookup);
267
268 static int validate_rulemsg(struct fib_rule_hdr *frh, struct nlattr **tb,
269                             struct fib_rules_ops *ops)
270 {
271         int err = -EINVAL;
272
273         if (frh->src_len)
274                 if (tb[FRA_SRC] == NULL ||
275                     frh->src_len > (ops->addr_size * 8) ||
276                     nla_len(tb[FRA_SRC]) != ops->addr_size)
277                         goto errout;
278
279         if (frh->dst_len)
280                 if (tb[FRA_DST] == NULL ||
281                     frh->dst_len > (ops->addr_size * 8) ||
282                     nla_len(tb[FRA_DST]) != ops->addr_size)
283                         goto errout;
284
285         err = 0;
286 errout:
287         return err;
288 }
289
290 static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh)
291 {
292         struct net *net = sock_net(skb->sk);
293         struct fib_rule_hdr *frh = nlmsg_data(nlh);
294         struct fib_rules_ops *ops = NULL;
295         struct fib_rule *rule, *r, *last = NULL;
296         struct nlattr *tb[FRA_MAX+1];
297         int err = -EINVAL, unresolved = 0;
298
299         if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh)))
300                 goto errout;
301
302         ops = lookup_rules_ops(net, frh->family);
303         if (ops == NULL) {
304                 err = -EAFNOSUPPORT;
305                 goto errout;
306         }
307
308         err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy);
309         if (err < 0)
310                 goto errout;
311
312         err = validate_rulemsg(frh, tb, ops);
313         if (err < 0)
314                 goto errout;
315
316         rule = kzalloc(ops->rule_size, GFP_KERNEL);
317         if (rule == NULL) {
318                 err = -ENOMEM;
319                 goto errout;
320         }
321         rule->fr_net = net;
322
323         rule->pref = tb[FRA_PRIORITY] ? nla_get_u32(tb[FRA_PRIORITY])
324                                       : fib_default_rule_pref(ops);
325
326         if (tb[FRA_IIFNAME]) {
327                 struct net_device *dev;
328
329                 rule->iifindex = -1;
330                 nla_strlcpy(rule->iifname, tb[FRA_IIFNAME], IFNAMSIZ);
331                 dev = __dev_get_by_name(net, rule->iifname);
332                 if (dev)
333                         rule->iifindex = dev->ifindex;
334         }
335
336         if (tb[FRA_OIFNAME]) {
337                 struct net_device *dev;
338
339                 rule->oifindex = -1;
340                 nla_strlcpy(rule->oifname, tb[FRA_OIFNAME], IFNAMSIZ);
341                 dev = __dev_get_by_name(net, rule->oifname);
342                 if (dev)
343                         rule->oifindex = dev->ifindex;
344         }
345
346         if (tb[FRA_FWMARK]) {
347                 rule->mark = nla_get_u32(tb[FRA_FWMARK]);
348                 if (rule->mark)
349                         /* compatibility: if the mark value is non-zero all bits
350                          * are compared unless a mask is explicitly specified.
351                          */
352                         rule->mark_mask = 0xFFFFFFFF;
353         }
354
355         if (tb[FRA_FWMASK])
356                 rule->mark_mask = nla_get_u32(tb[FRA_FWMASK]);
357
358         if (tb[FRA_TUN_ID])
359                 rule->tun_id = nla_get_be64(tb[FRA_TUN_ID]);
360
361         rule->action = frh->action;
362         rule->flags = frh->flags;
363         rule->table = frh_get_table(frh, tb);
364         if (tb[FRA_SUPPRESS_PREFIXLEN])
365                 rule->suppress_prefixlen = nla_get_u32(tb[FRA_SUPPRESS_PREFIXLEN]);
366         else
367                 rule->suppress_prefixlen = -1;
368
369         if (tb[FRA_SUPPRESS_IFGROUP])
370                 rule->suppress_ifgroup = nla_get_u32(tb[FRA_SUPPRESS_IFGROUP]);
371         else
372                 rule->suppress_ifgroup = -1;
373
374         err = -EINVAL;
375         if (tb[FRA_GOTO]) {
376                 if (rule->action != FR_ACT_GOTO)
377                         goto errout_free;
378
379                 rule->target = nla_get_u32(tb[FRA_GOTO]);
380                 /* Backward jumps are prohibited to avoid endless loops */
381                 if (rule->target <= rule->pref)
382                         goto errout_free;
383
384                 list_for_each_entry(r, &ops->rules_list, list) {
385                         if (r->pref == rule->target) {
386                                 RCU_INIT_POINTER(rule->ctarget, r);
387                                 break;
388                         }
389                 }
390
391                 if (rcu_dereference_protected(rule->ctarget, 1) == NULL)
392                         unresolved = 1;
393         } else if (rule->action == FR_ACT_GOTO)
394                 goto errout_free;
395
396         /* UID start and end must either both be valid or both unspecified. */
397         rule->uid_start = rule->uid_end = INVALID_UID;
398         if (tb[FRA_UID_START] || tb[FRA_UID_END]) {
399                 if (tb[FRA_UID_START] && tb[FRA_UID_END]) {
400                         rule->uid_start = fib_nl_uid(tb[FRA_UID_START]);
401                         rule->uid_end = fib_nl_uid(tb[FRA_UID_END]);
402                 }
403                 if (!uid_valid(rule->uid_start) ||
404                     !uid_valid(rule->uid_end) ||
405                     !uid_lte(rule->uid_start, rule->uid_end))
406                 goto errout_free;
407         }
408
409         err = ops->configure(rule, skb, frh, tb);
410         if (err < 0)
411                 goto errout_free;
412
413         list_for_each_entry(r, &ops->rules_list, list) {
414                 if (r->pref > rule->pref)
415                         break;
416                 last = r;
417         }
418
419         fib_rule_get(rule);
420
421         if (last)
422                 list_add_rcu(&rule->list, &last->list);
423         else
424                 list_add_rcu(&rule->list, &ops->rules_list);
425
426         if (ops->unresolved_rules) {
427                 /*
428                  * There are unresolved goto rules in the list, check if
429                  * any of them are pointing to this new rule.
430                  */
431                 list_for_each_entry(r, &ops->rules_list, list) {
432                         if (r->action == FR_ACT_GOTO &&
433                             r->target == rule->pref &&
434                             rtnl_dereference(r->ctarget) == NULL) {
435                                 rcu_assign_pointer(r->ctarget, rule);
436                                 if (--ops->unresolved_rules == 0)
437                                         break;
438                         }
439                 }
440         }
441
442         if (rule->action == FR_ACT_GOTO)
443                 ops->nr_goto_rules++;
444
445         if (unresolved)
446                 ops->unresolved_rules++;
447
448         if (rule->tun_id)
449                 ip_tunnel_need_metadata();
450
451         notify_rule_change(RTM_NEWRULE, rule, ops, nlh, NETLINK_CB(skb).portid);
452         flush_route_cache(ops);
453         rules_ops_put(ops);
454         return 0;
455
456 errout_free:
457         kfree(rule);
458 errout:
459         rules_ops_put(ops);
460         return err;
461 }
462
463 static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh)
464 {
465         struct net *net = sock_net(skb->sk);
466         struct fib_rule_hdr *frh = nlmsg_data(nlh);
467         struct fib_rules_ops *ops = NULL;
468         struct fib_rule *rule, *tmp;
469         struct nlattr *tb[FRA_MAX+1];
470         int err = -EINVAL;
471
472         if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh)))
473                 goto errout;
474
475         ops = lookup_rules_ops(net, frh->family);
476         if (ops == NULL) {
477                 err = -EAFNOSUPPORT;
478                 goto errout;
479         }
480
481         err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy);
482         if (err < 0)
483                 goto errout;
484
485         err = validate_rulemsg(frh, tb, ops);
486         if (err < 0)
487                 goto errout;
488
489         list_for_each_entry(rule, &ops->rules_list, list) {
490                 if (frh->action && (frh->action != rule->action))
491                         continue;
492
493                 if (frh_get_table(frh, tb) &&
494                     (frh_get_table(frh, tb) != rule->table))
495                         continue;
496
497                 if (tb[FRA_PRIORITY] &&
498                     (rule->pref != nla_get_u32(tb[FRA_PRIORITY])))
499                         continue;
500
501                 if (tb[FRA_IIFNAME] &&
502                     nla_strcmp(tb[FRA_IIFNAME], rule->iifname))
503                         continue;
504
505                 if (tb[FRA_OIFNAME] &&
506                     nla_strcmp(tb[FRA_OIFNAME], rule->oifname))
507                         continue;
508
509                 if (tb[FRA_FWMARK] &&
510                     (rule->mark != nla_get_u32(tb[FRA_FWMARK])))
511                         continue;
512
513                 if (tb[FRA_FWMASK] &&
514                     (rule->mark_mask != nla_get_u32(tb[FRA_FWMASK])))
515                         continue;
516
517                 if (tb[FRA_TUN_ID] &&
518                     (rule->tun_id != nla_get_be64(tb[FRA_TUN_ID])))
519                         continue;
520
521                 if (tb[FRA_UID_START] &&
522                     !uid_eq(rule->uid_start, fib_nl_uid(tb[FRA_UID_START])))
523                         continue;
524
525                 if (tb[FRA_UID_END] &&
526                     !uid_eq(rule->uid_end, fib_nl_uid(tb[FRA_UID_END])))
527                         continue;
528
529                 if (!ops->compare(rule, frh, tb))
530                         continue;
531
532                 if (rule->flags & FIB_RULE_PERMANENT) {
533                         err = -EPERM;
534                         goto errout;
535                 }
536
537                 if (ops->delete) {
538                         err = ops->delete(rule);
539                         if (err)
540                                 goto errout;
541                 }
542
543                 if (rule->tun_id)
544                         ip_tunnel_unneed_metadata();
545
546                 list_del_rcu(&rule->list);
547
548                 if (rule->action == FR_ACT_GOTO) {
549                         ops->nr_goto_rules--;
550                         if (rtnl_dereference(rule->ctarget) == NULL)
551                                 ops->unresolved_rules--;
552                 }
553
554                 /*
555                  * Check if this rule is a target to any of them. If so,
556                  * disable them. As this operation is eventually very
557                  * expensive, it is only performed if goto rules have
558                  * actually been added.
559                  */
560                 if (ops->nr_goto_rules > 0) {
561                         list_for_each_entry(tmp, &ops->rules_list, list) {
562                                 if (rtnl_dereference(tmp->ctarget) == rule) {
563                                         RCU_INIT_POINTER(tmp->ctarget, NULL);
564                                         ops->unresolved_rules++;
565                                 }
566                         }
567                 }
568
569                 notify_rule_change(RTM_DELRULE, rule, ops, nlh,
570                                    NETLINK_CB(skb).portid);
571                 fib_rule_put(rule);
572                 flush_route_cache(ops);
573                 rules_ops_put(ops);
574                 return 0;
575         }
576
577         err = -ENOENT;
578 errout:
579         rules_ops_put(ops);
580         return err;
581 }
582
583 static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops *ops,
584                                          struct fib_rule *rule)
585 {
586         size_t payload = NLMSG_ALIGN(sizeof(struct fib_rule_hdr))
587                          + nla_total_size(IFNAMSIZ) /* FRA_IIFNAME */
588                          + nla_total_size(IFNAMSIZ) /* FRA_OIFNAME */
589                          + nla_total_size(4) /* FRA_PRIORITY */
590                          + nla_total_size(4) /* FRA_TABLE */
591                          + nla_total_size(4) /* FRA_SUPPRESS_PREFIXLEN */
592                          + nla_total_size(4) /* FRA_SUPPRESS_IFGROUP */
593                          + nla_total_size(4) /* FRA_FWMARK */
594                          + nla_total_size(4) /* FRA_FWMASK */
595                          + nla_total_size(8) /* FRA_TUN_ID */
596                          + nla_total_size(4) /* FRA_UID_START */
597                          + nla_total_size(4); /* FRA_UID_END */
598
599         if (ops->nlmsg_payload)
600                 payload += ops->nlmsg_payload(rule);
601
602         return payload;
603 }
604
605 static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
606                             u32 pid, u32 seq, int type, int flags,
607                             struct fib_rules_ops *ops)
608 {
609         struct nlmsghdr *nlh;
610         struct fib_rule_hdr *frh;
611
612         nlh = nlmsg_put(skb, pid, seq, type, sizeof(*frh), flags);
613         if (nlh == NULL)
614                 return -EMSGSIZE;
615
616         frh = nlmsg_data(nlh);
617         frh->family = ops->family;
618         frh->table = rule->table;
619         if (nla_put_u32(skb, FRA_TABLE, rule->table))
620                 goto nla_put_failure;
621         if (nla_put_u32(skb, FRA_SUPPRESS_PREFIXLEN, rule->suppress_prefixlen))
622                 goto nla_put_failure;
623         frh->res1 = 0;
624         frh->res2 = 0;
625         frh->action = rule->action;
626         frh->flags = rule->flags;
627
628         if (rule->action == FR_ACT_GOTO &&
629             rcu_access_pointer(rule->ctarget) == NULL)
630                 frh->flags |= FIB_RULE_UNRESOLVED;
631
632         if (rule->iifname[0]) {
633                 if (nla_put_string(skb, FRA_IIFNAME, rule->iifname))
634                         goto nla_put_failure;
635                 if (rule->iifindex == -1)
636                         frh->flags |= FIB_RULE_IIF_DETACHED;
637         }
638
639         if (rule->oifname[0]) {
640                 if (nla_put_string(skb, FRA_OIFNAME, rule->oifname))
641                         goto nla_put_failure;
642                 if (rule->oifindex == -1)
643                         frh->flags |= FIB_RULE_OIF_DETACHED;
644         }
645
646         if ((rule->pref &&
647              nla_put_u32(skb, FRA_PRIORITY, rule->pref)) ||
648             (rule->mark &&
649              nla_put_u32(skb, FRA_FWMARK, rule->mark)) ||
650             ((rule->mark_mask || rule->mark) &&
651              nla_put_u32(skb, FRA_FWMASK, rule->mark_mask)) ||
652             (rule->target &&
653              nla_put_u32(skb, FRA_GOTO, rule->target)) ||
654             (rule->tun_id &&
655              nla_put_be64(skb, FRA_TUN_ID, rule->tun_id)) ||
656             (uid_valid(rule->uid_start) &&
657              nla_put_uid(skb, FRA_UID_START, rule->uid_start)) ||
658             (uid_valid(rule->uid_end) &&
659              nla_put_uid(skb, FRA_UID_END, rule->uid_end)))
660                 goto nla_put_failure;
661
662         if (rule->suppress_ifgroup != -1) {
663                 if (nla_put_u32(skb, FRA_SUPPRESS_IFGROUP, rule->suppress_ifgroup))
664                         goto nla_put_failure;
665         }
666
667         if (ops->fill(rule, skb, frh) < 0)
668                 goto nla_put_failure;
669
670         nlmsg_end(skb, nlh);
671         return 0;
672
673 nla_put_failure:
674         nlmsg_cancel(skb, nlh);
675         return -EMSGSIZE;
676 }
677
678 static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb,
679                       struct fib_rules_ops *ops)
680 {
681         int idx = 0;
682         struct fib_rule *rule;
683         int err = 0;
684
685         rcu_read_lock();
686         list_for_each_entry_rcu(rule, &ops->rules_list, list) {
687                 if (idx < cb->args[1])
688                         goto skip;
689
690                 err = fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).portid,
691                                        cb->nlh->nlmsg_seq, RTM_NEWRULE,
692                                        NLM_F_MULTI, ops);
693                 if (err)
694                         break;
695 skip:
696                 idx++;
697         }
698         rcu_read_unlock();
699         cb->args[1] = idx;
700         rules_ops_put(ops);
701
702         return err;
703 }
704
705 static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb)
706 {
707         struct net *net = sock_net(skb->sk);
708         struct fib_rules_ops *ops;
709         int idx = 0, family;
710
711         family = rtnl_msg_family(cb->nlh);
712         if (family != AF_UNSPEC) {
713                 /* Protocol specific dump request */
714                 ops = lookup_rules_ops(net, family);
715                 if (ops == NULL)
716                         return -EAFNOSUPPORT;
717
718                 dump_rules(skb, cb, ops);
719
720                 return skb->len;
721         }
722
723         rcu_read_lock();
724         list_for_each_entry_rcu(ops, &net->rules_ops, list) {
725                 if (idx < cb->args[0] || !try_module_get(ops->owner))
726                         goto skip;
727
728                 if (dump_rules(skb, cb, ops) < 0)
729                         break;
730
731                 cb->args[1] = 0;
732 skip:
733                 idx++;
734         }
735         rcu_read_unlock();
736         cb->args[0] = idx;
737
738         return skb->len;
739 }
740
741 static void notify_rule_change(int event, struct fib_rule *rule,
742                                struct fib_rules_ops *ops, struct nlmsghdr *nlh,
743                                u32 pid)
744 {
745         struct net *net;
746         struct sk_buff *skb;
747         int err = -ENOBUFS;
748
749         net = ops->fro_net;
750         skb = nlmsg_new(fib_rule_nlmsg_size(ops, rule), GFP_KERNEL);
751         if (skb == NULL)
752                 goto errout;
753
754         err = fib_nl_fill_rule(skb, rule, pid, nlh->nlmsg_seq, event, 0, ops);
755         if (err < 0) {
756                 /* -EMSGSIZE implies BUG in fib_rule_nlmsg_size() */
757                 WARN_ON(err == -EMSGSIZE);
758                 kfree_skb(skb);
759                 goto errout;
760         }
761
762         rtnl_notify(skb, net, pid, ops->nlgroup, nlh, GFP_KERNEL);
763         return;
764 errout:
765         if (err < 0)
766                 rtnl_set_sk_err(net, ops->nlgroup, err);
767 }
768
769 static void attach_rules(struct list_head *rules, struct net_device *dev)
770 {
771         struct fib_rule *rule;
772
773         list_for_each_entry(rule, rules, list) {
774                 if (rule->iifindex == -1 &&
775                     strcmp(dev->name, rule->iifname) == 0)
776                         rule->iifindex = dev->ifindex;
777                 if (rule->oifindex == -1 &&
778                     strcmp(dev->name, rule->oifname) == 0)
779                         rule->oifindex = dev->ifindex;
780         }
781 }
782
783 static void detach_rules(struct list_head *rules, struct net_device *dev)
784 {
785         struct fib_rule *rule;
786
787         list_for_each_entry(rule, rules, list) {
788                 if (rule->iifindex == dev->ifindex)
789                         rule->iifindex = -1;
790                 if (rule->oifindex == dev->ifindex)
791                         rule->oifindex = -1;
792         }
793 }
794
795
796 static int fib_rules_event(struct notifier_block *this, unsigned long event,
797                            void *ptr)
798 {
799         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
800         struct net *net = dev_net(dev);
801         struct fib_rules_ops *ops;
802
803         ASSERT_RTNL();
804
805         switch (event) {
806         case NETDEV_REGISTER:
807                 list_for_each_entry(ops, &net->rules_ops, list)
808                         attach_rules(&ops->rules_list, dev);
809                 break;
810
811         case NETDEV_CHANGENAME:
812                 list_for_each_entry(ops, &net->rules_ops, list) {
813                         detach_rules(&ops->rules_list, dev);
814                         attach_rules(&ops->rules_list, dev);
815                 }
816                 break;
817
818         case NETDEV_UNREGISTER:
819                 list_for_each_entry(ops, &net->rules_ops, list)
820                         detach_rules(&ops->rules_list, dev);
821                 break;
822         }
823
824         return NOTIFY_DONE;
825 }
826
827 static struct notifier_block fib_rules_notifier = {
828         .notifier_call = fib_rules_event,
829 };
830
831 static int __net_init fib_rules_net_init(struct net *net)
832 {
833         INIT_LIST_HEAD(&net->rules_ops);
834         spin_lock_init(&net->rules_mod_lock);
835         return 0;
836 }
837
838 static struct pernet_operations fib_rules_net_ops = {
839         .init = fib_rules_net_init,
840 };
841
842 static int __init fib_rules_init(void)
843 {
844         int err;
845         rtnl_register(PF_UNSPEC, RTM_NEWRULE, fib_nl_newrule, NULL, NULL);
846         rtnl_register(PF_UNSPEC, RTM_DELRULE, fib_nl_delrule, NULL, NULL);
847         rtnl_register(PF_UNSPEC, RTM_GETRULE, NULL, fib_nl_dumprule, NULL);
848
849         err = register_pernet_subsys(&fib_rules_net_ops);
850         if (err < 0)
851                 goto fail;
852
853         err = register_netdevice_notifier(&fib_rules_notifier);
854         if (err < 0)
855                 goto fail_unregister;
856
857         return 0;
858
859 fail_unregister:
860         unregister_pernet_subsys(&fib_rules_net_ops);
861 fail:
862         rtnl_unregister(PF_UNSPEC, RTM_NEWRULE);
863         rtnl_unregister(PF_UNSPEC, RTM_DELRULE);
864         rtnl_unregister(PF_UNSPEC, RTM_GETRULE);
865         return err;
866 }
867
868 subsys_initcall(fib_rules_init);