2 * NET3 IP device support routines.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Derived from the IP parts of dev.c 1.0.19
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
15 * Alan Cox, <gw4pts@gw4pts.ampr.org>
16 * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
19 * Alexey Kuznetsov: pa_* fields are replaced with ifaddr
21 * Cyrus Durgin: updated for kmod
22 * Matthias Andree: in devinet_ioctl, compare label and
23 * address (4.4BSD alias style support),
24 * fall back to comparing just the label
29 #include <asm/uaccess.h>
30 #include <linux/bitops.h>
31 #include <linux/capability.h>
32 #include <linux/module.h>
33 #include <linux/types.h>
34 #include <linux/kernel.h>
35 #include <linux/string.h>
37 #include <linux/socket.h>
38 #include <linux/sockios.h>
40 #include <linux/errno.h>
41 #include <linux/interrupt.h>
42 #include <linux/if_addr.h>
43 #include <linux/if_ether.h>
44 #include <linux/inet.h>
45 #include <linux/netdevice.h>
46 #include <linux/etherdevice.h>
47 #include <linux/skbuff.h>
48 #include <linux/init.h>
49 #include <linux/notifier.h>
50 #include <linux/inetdevice.h>
51 #include <linux/igmp.h>
52 #include <linux/slab.h>
53 #include <linux/hash.h>
55 #include <linux/sysctl.h>
57 #include <linux/kmod.h>
58 #include <linux/netconf.h>
63 #include <net/route.h>
64 #include <net/ip_fib.h>
65 #include <net/rtnetlink.h>
66 #include <net/net_namespace.h>
67 #include <net/addrconf.h>
69 #include "fib_lookup.h"
71 static struct ipv4_devconf ipv4_devconf = {
73 [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
74 [IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
75 [IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
76 [IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
77 [IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL - 1] = 10000 /*ms*/,
78 [IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL - 1] = 1000 /*ms*/,
82 static struct ipv4_devconf ipv4_devconf_dflt = {
84 [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
85 [IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
86 [IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
87 [IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
88 [IPV4_DEVCONF_ACCEPT_SOURCE_ROUTE - 1] = 1,
89 [IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL - 1] = 10000 /*ms*/,
90 [IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL - 1] = 1000 /*ms*/,
94 #define IPV4_DEVCONF_DFLT(net, attr) \
95 IPV4_DEVCONF((*net->ipv4.devconf_dflt), attr)
97 static const struct nla_policy ifa_ipv4_policy[IFA_MAX+1] = {
98 [IFA_LOCAL] = { .type = NLA_U32 },
99 [IFA_ADDRESS] = { .type = NLA_U32 },
100 [IFA_BROADCAST] = { .type = NLA_U32 },
101 [IFA_LABEL] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 },
102 [IFA_CACHEINFO] = { .len = sizeof(struct ifa_cacheinfo) },
103 [IFA_FLAGS] = { .type = NLA_U32 },
106 #define IN4_ADDR_HSIZE_SHIFT 8
107 #define IN4_ADDR_HSIZE (1U << IN4_ADDR_HSIZE_SHIFT)
109 static struct hlist_head inet_addr_lst[IN4_ADDR_HSIZE];
111 static u32 inet_addr_hash(const struct net *net, __be32 addr)
113 u32 val = (__force u32) addr ^ net_hash_mix(net);
115 return hash_32(val, IN4_ADDR_HSIZE_SHIFT);
118 static void inet_hash_insert(struct net *net, struct in_ifaddr *ifa)
120 u32 hash = inet_addr_hash(net, ifa->ifa_local);
123 hlist_add_head_rcu(&ifa->hash, &inet_addr_lst[hash]);
126 static void inet_hash_remove(struct in_ifaddr *ifa)
129 hlist_del_init_rcu(&ifa->hash);
133 * __ip_dev_find - find the first device with a given source address.
134 * @net: the net namespace
135 * @addr: the source address
136 * @devref: if true, take a reference on the found device
138 * If a caller uses devref=false, it should be protected by RCU, or RTNL
140 struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref)
142 u32 hash = inet_addr_hash(net, addr);
143 struct net_device *result = NULL;
144 struct in_ifaddr *ifa;
147 hlist_for_each_entry_rcu(ifa, &inet_addr_lst[hash], hash) {
148 if (ifa->ifa_local == addr) {
149 struct net_device *dev = ifa->ifa_dev->dev;
151 if (!net_eq(dev_net(dev), net))
158 struct flowi4 fl4 = { .daddr = addr };
159 struct fib_result res = { 0 };
160 struct fib_table *local;
162 /* Fallback to FIB local table so that communication
163 * over loopback subnets work.
165 local = fib_get_table(net, RT_TABLE_LOCAL);
167 !fib_table_lookup(local, &fl4, &res, FIB_LOOKUP_NOREF) &&
168 res.type == RTN_LOCAL)
169 result = FIB_RES_DEV(res);
171 if (result && devref)
176 EXPORT_SYMBOL(__ip_dev_find);
178 static void rtmsg_ifa(int event, struct in_ifaddr *, struct nlmsghdr *, u32);
180 static BLOCKING_NOTIFIER_HEAD(inetaddr_chain);
181 static void inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
184 static int devinet_sysctl_register(struct in_device *idev);
185 static void devinet_sysctl_unregister(struct in_device *idev);
187 static int devinet_sysctl_register(struct in_device *idev)
191 static void devinet_sysctl_unregister(struct in_device *idev)
196 /* Locks all the inet devices. */
198 static struct in_ifaddr *inet_alloc_ifa(void)
200 return kzalloc(sizeof(struct in_ifaddr), GFP_KERNEL);
203 static void inet_rcu_free_ifa(struct rcu_head *head)
205 struct in_ifaddr *ifa = container_of(head, struct in_ifaddr, rcu_head);
207 in_dev_put(ifa->ifa_dev);
211 static void inet_free_ifa(struct in_ifaddr *ifa)
213 call_rcu(&ifa->rcu_head, inet_rcu_free_ifa);
216 void in_dev_finish_destroy(struct in_device *idev)
218 struct net_device *dev = idev->dev;
220 WARN_ON(idev->ifa_list);
221 WARN_ON(idev->mc_list);
222 kfree(rcu_dereference_protected(idev->mc_hash, 1));
223 #ifdef NET_REFCNT_DEBUG
224 pr_debug("%s: %p=%s\n", __func__, idev, dev ? dev->name : "NIL");
228 pr_err("Freeing alive in_device %p\n", idev);
232 EXPORT_SYMBOL(in_dev_finish_destroy);
234 static struct in_device *inetdev_init(struct net_device *dev)
236 struct in_device *in_dev;
241 in_dev = kzalloc(sizeof(*in_dev), GFP_KERNEL);
244 memcpy(&in_dev->cnf, dev_net(dev)->ipv4.devconf_dflt,
245 sizeof(in_dev->cnf));
246 in_dev->cnf.sysctl = NULL;
248 in_dev->arp_parms = neigh_parms_alloc(dev, &arp_tbl);
249 if (!in_dev->arp_parms)
251 if (IPV4_DEVCONF(in_dev->cnf, FORWARDING))
252 dev_disable_lro(dev);
253 /* Reference in_dev->dev */
255 /* Account for reference dev->ip_ptr (below) */
258 err = devinet_sysctl_register(in_dev);
265 ip_mc_init_dev(in_dev);
266 if (dev->flags & IFF_UP)
269 /* we can receive as soon as ip_ptr is set -- do this last */
270 rcu_assign_pointer(dev->ip_ptr, in_dev);
272 return in_dev ?: ERR_PTR(err);
279 static void in_dev_rcu_put(struct rcu_head *head)
281 struct in_device *idev = container_of(head, struct in_device, rcu_head);
285 static void inetdev_destroy(struct in_device *in_dev)
287 struct in_ifaddr *ifa;
288 struct net_device *dev;
296 ip_mc_destroy_dev(in_dev);
298 while ((ifa = in_dev->ifa_list) != NULL) {
299 inet_del_ifa(in_dev, &in_dev->ifa_list, 0);
303 RCU_INIT_POINTER(dev->ip_ptr, NULL);
305 devinet_sysctl_unregister(in_dev);
306 neigh_parms_release(&arp_tbl, in_dev->arp_parms);
309 call_rcu(&in_dev->rcu_head, in_dev_rcu_put);
312 int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b)
315 for_primary_ifa(in_dev) {
316 if (inet_ifa_match(a, ifa)) {
317 if (!b || inet_ifa_match(b, ifa)) {
322 } endfor_ifa(in_dev);
327 static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
328 int destroy, struct nlmsghdr *nlh, u32 portid)
330 struct in_ifaddr *promote = NULL;
331 struct in_ifaddr *ifa, *ifa1 = *ifap;
332 struct in_ifaddr *last_prim = in_dev->ifa_list;
333 struct in_ifaddr *prev_prom = NULL;
334 int do_promote = IN_DEV_PROMOTE_SECONDARIES(in_dev);
338 /* 1. Deleting primary ifaddr forces deletion all secondaries
339 * unless alias promotion is set
342 if (!(ifa1->ifa_flags & IFA_F_SECONDARY)) {
343 struct in_ifaddr **ifap1 = &ifa1->ifa_next;
345 while ((ifa = *ifap1) != NULL) {
346 if (!(ifa->ifa_flags & IFA_F_SECONDARY) &&
347 ifa1->ifa_scope <= ifa->ifa_scope)
350 if (!(ifa->ifa_flags & IFA_F_SECONDARY) ||
351 ifa1->ifa_mask != ifa->ifa_mask ||
352 !inet_ifa_match(ifa1->ifa_address, ifa)) {
353 ifap1 = &ifa->ifa_next;
359 inet_hash_remove(ifa);
360 *ifap1 = ifa->ifa_next;
362 rtmsg_ifa(RTM_DELADDR, ifa, nlh, portid);
363 blocking_notifier_call_chain(&inetaddr_chain,
373 /* On promotion all secondaries from subnet are changing
374 * the primary IP, we must remove all their routes silently
375 * and later to add them back with new prefsrc. Do this
376 * while all addresses are on the device list.
378 for (ifa = promote; ifa; ifa = ifa->ifa_next) {
379 if (ifa1->ifa_mask == ifa->ifa_mask &&
380 inet_ifa_match(ifa1->ifa_address, ifa))
381 fib_del_ifaddr(ifa, ifa1);
386 *ifap = ifa1->ifa_next;
387 inet_hash_remove(ifa1);
389 /* 3. Announce address deletion */
391 /* Send message first, then call notifier.
392 At first sight, FIB update triggered by notifier
393 will refer to already deleted ifaddr, that could confuse
394 netlink listeners. It is not true: look, gated sees
395 that route deleted and if it still thinks that ifaddr
396 is valid, it will try to restore deleted routes... Grr.
397 So that, this order is correct.
399 rtmsg_ifa(RTM_DELADDR, ifa1, nlh, portid);
400 blocking_notifier_call_chain(&inetaddr_chain, NETDEV_DOWN, ifa1);
403 struct in_ifaddr *next_sec = promote->ifa_next;
406 prev_prom->ifa_next = promote->ifa_next;
407 promote->ifa_next = last_prim->ifa_next;
408 last_prim->ifa_next = promote;
411 promote->ifa_flags &= ~IFA_F_SECONDARY;
412 rtmsg_ifa(RTM_NEWADDR, promote, nlh, portid);
413 blocking_notifier_call_chain(&inetaddr_chain,
415 for (ifa = next_sec; ifa; ifa = ifa->ifa_next) {
416 if (ifa1->ifa_mask != ifa->ifa_mask ||
417 !inet_ifa_match(ifa1->ifa_address, ifa))
427 static void inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
430 __inet_del_ifa(in_dev, ifap, destroy, NULL, 0);
433 static void check_lifetime(struct work_struct *work);
435 static DECLARE_DELAYED_WORK(check_lifetime_work, check_lifetime);
437 static int __inet_insert_ifa(struct in_ifaddr *ifa, struct nlmsghdr *nlh,
440 struct in_device *in_dev = ifa->ifa_dev;
441 struct in_ifaddr *ifa1, **ifap, **last_primary;
445 if (!ifa->ifa_local) {
450 ifa->ifa_flags &= ~IFA_F_SECONDARY;
451 last_primary = &in_dev->ifa_list;
453 for (ifap = &in_dev->ifa_list; (ifa1 = *ifap) != NULL;
454 ifap = &ifa1->ifa_next) {
455 if (!(ifa1->ifa_flags & IFA_F_SECONDARY) &&
456 ifa->ifa_scope <= ifa1->ifa_scope)
457 last_primary = &ifa1->ifa_next;
458 if (ifa1->ifa_mask == ifa->ifa_mask &&
459 inet_ifa_match(ifa1->ifa_address, ifa)) {
460 if (ifa1->ifa_local == ifa->ifa_local) {
464 if (ifa1->ifa_scope != ifa->ifa_scope) {
468 ifa->ifa_flags |= IFA_F_SECONDARY;
472 if (!(ifa->ifa_flags & IFA_F_SECONDARY)) {
473 prandom_seed((__force u32) ifa->ifa_local);
477 ifa->ifa_next = *ifap;
480 inet_hash_insert(dev_net(in_dev->dev), ifa);
482 cancel_delayed_work(&check_lifetime_work);
483 queue_delayed_work(system_power_efficient_wq, &check_lifetime_work, 0);
485 /* Send message first, then call notifier.
486 Notifier will trigger FIB update, so that
487 listeners of netlink will know about new ifaddr */
488 rtmsg_ifa(RTM_NEWADDR, ifa, nlh, portid);
489 blocking_notifier_call_chain(&inetaddr_chain, NETDEV_UP, ifa);
494 static int inet_insert_ifa(struct in_ifaddr *ifa)
496 return __inet_insert_ifa(ifa, NULL, 0);
499 static int inet_set_ifa(struct net_device *dev, struct in_ifaddr *ifa)
501 struct in_device *in_dev = __in_dev_get_rtnl(dev);
509 ipv4_devconf_setall(in_dev);
510 neigh_parms_data_state_setall(in_dev->arp_parms);
511 if (ifa->ifa_dev != in_dev) {
512 WARN_ON(ifa->ifa_dev);
514 ifa->ifa_dev = in_dev;
516 if (ipv4_is_loopback(ifa->ifa_local))
517 ifa->ifa_scope = RT_SCOPE_HOST;
518 return inet_insert_ifa(ifa);
521 /* Caller must hold RCU or RTNL :
522 * We dont take a reference on found in_device
524 struct in_device *inetdev_by_index(struct net *net, int ifindex)
526 struct net_device *dev;
527 struct in_device *in_dev = NULL;
530 dev = dev_get_by_index_rcu(net, ifindex);
532 in_dev = rcu_dereference_rtnl(dev->ip_ptr);
536 EXPORT_SYMBOL(inetdev_by_index);
538 /* Called only from RTNL semaphored context. No locks. */
540 struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix,
545 for_primary_ifa(in_dev) {
546 if (ifa->ifa_mask == mask && inet_ifa_match(prefix, ifa))
548 } endfor_ifa(in_dev);
552 static int ip_mc_config(struct sock *sk, bool join, const struct in_ifaddr *ifa)
554 struct ip_mreqn mreq = {
555 .imr_multiaddr.s_addr = ifa->ifa_address,
556 .imr_ifindex = ifa->ifa_dev->dev->ifindex,
564 ret = ip_mc_join_group(sk, &mreq);
566 ret = ip_mc_leave_group(sk, &mreq);
572 static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh)
574 struct net *net = sock_net(skb->sk);
575 struct nlattr *tb[IFA_MAX+1];
576 struct in_device *in_dev;
577 struct ifaddrmsg *ifm;
578 struct in_ifaddr *ifa, **ifap;
583 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv4_policy);
587 ifm = nlmsg_data(nlh);
588 in_dev = inetdev_by_index(net, ifm->ifa_index);
594 for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
595 ifap = &ifa->ifa_next) {
597 ifa->ifa_local != nla_get_in_addr(tb[IFA_LOCAL]))
600 if (tb[IFA_LABEL] && nla_strcmp(tb[IFA_LABEL], ifa->ifa_label))
603 if (tb[IFA_ADDRESS] &&
604 (ifm->ifa_prefixlen != ifa->ifa_prefixlen ||
605 !inet_ifa_match(nla_get_in_addr(tb[IFA_ADDRESS]), ifa)))
608 if (ipv4_is_multicast(ifa->ifa_address))
609 ip_mc_config(net->ipv4.mc_autojoin_sk, false, ifa);
610 __inet_del_ifa(in_dev, ifap, 1, nlh, NETLINK_CB(skb).portid);
614 err = -EADDRNOTAVAIL;
619 #define INFINITY_LIFE_TIME 0xFFFFFFFF
621 static void check_lifetime(struct work_struct *work)
623 unsigned long now, next, next_sec, next_sched;
624 struct in_ifaddr *ifa;
625 struct hlist_node *n;
629 next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY);
631 for (i = 0; i < IN4_ADDR_HSIZE; i++) {
632 bool change_needed = false;
635 hlist_for_each_entry_rcu(ifa, &inet_addr_lst[i], hash) {
638 if (ifa->ifa_flags & IFA_F_PERMANENT)
641 /* We try to batch several events at once. */
642 age = (now - ifa->ifa_tstamp +
643 ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
645 if (ifa->ifa_valid_lft != INFINITY_LIFE_TIME &&
646 age >= ifa->ifa_valid_lft) {
647 change_needed = true;
648 } else if (ifa->ifa_preferred_lft ==
649 INFINITY_LIFE_TIME) {
651 } else if (age >= ifa->ifa_preferred_lft) {
652 if (time_before(ifa->ifa_tstamp +
653 ifa->ifa_valid_lft * HZ, next))
654 next = ifa->ifa_tstamp +
655 ifa->ifa_valid_lft * HZ;
657 if (!(ifa->ifa_flags & IFA_F_DEPRECATED))
658 change_needed = true;
659 } else if (time_before(ifa->ifa_tstamp +
660 ifa->ifa_preferred_lft * HZ,
662 next = ifa->ifa_tstamp +
663 ifa->ifa_preferred_lft * HZ;
670 hlist_for_each_entry_safe(ifa, n, &inet_addr_lst[i], hash) {
673 if (ifa->ifa_flags & IFA_F_PERMANENT)
676 /* We try to batch several events at once. */
677 age = (now - ifa->ifa_tstamp +
678 ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
680 if (ifa->ifa_valid_lft != INFINITY_LIFE_TIME &&
681 age >= ifa->ifa_valid_lft) {
682 struct in_ifaddr **ifap;
684 for (ifap = &ifa->ifa_dev->ifa_list;
685 *ifap != NULL; ifap = &(*ifap)->ifa_next) {
687 inet_del_ifa(ifa->ifa_dev,
692 } else if (ifa->ifa_preferred_lft !=
693 INFINITY_LIFE_TIME &&
694 age >= ifa->ifa_preferred_lft &&
695 !(ifa->ifa_flags & IFA_F_DEPRECATED)) {
696 ifa->ifa_flags |= IFA_F_DEPRECATED;
697 rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0);
703 next_sec = round_jiffies_up(next);
706 /* If rounded timeout is accurate enough, accept it. */
707 if (time_before(next_sec, next + ADDRCONF_TIMER_FUZZ))
708 next_sched = next_sec;
711 /* And minimum interval is ADDRCONF_TIMER_FUZZ_MAX. */
712 if (time_before(next_sched, now + ADDRCONF_TIMER_FUZZ_MAX))
713 next_sched = now + ADDRCONF_TIMER_FUZZ_MAX;
715 queue_delayed_work(system_power_efficient_wq, &check_lifetime_work,
719 static void set_ifa_lifetime(struct in_ifaddr *ifa, __u32 valid_lft,
722 unsigned long timeout;
724 ifa->ifa_flags &= ~(IFA_F_PERMANENT | IFA_F_DEPRECATED);
726 timeout = addrconf_timeout_fixup(valid_lft, HZ);
727 if (addrconf_finite_timeout(timeout))
728 ifa->ifa_valid_lft = timeout;
730 ifa->ifa_flags |= IFA_F_PERMANENT;
732 timeout = addrconf_timeout_fixup(prefered_lft, HZ);
733 if (addrconf_finite_timeout(timeout)) {
735 ifa->ifa_flags |= IFA_F_DEPRECATED;
736 ifa->ifa_preferred_lft = timeout;
738 ifa->ifa_tstamp = jiffies;
739 if (!ifa->ifa_cstamp)
740 ifa->ifa_cstamp = ifa->ifa_tstamp;
743 static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh,
744 __u32 *pvalid_lft, __u32 *pprefered_lft)
746 struct nlattr *tb[IFA_MAX+1];
747 struct in_ifaddr *ifa;
748 struct ifaddrmsg *ifm;
749 struct net_device *dev;
750 struct in_device *in_dev;
753 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv4_policy);
757 ifm = nlmsg_data(nlh);
759 if (ifm->ifa_prefixlen > 32 || !tb[IFA_LOCAL])
762 dev = __dev_get_by_index(net, ifm->ifa_index);
767 in_dev = __in_dev_get_rtnl(dev);
772 ifa = inet_alloc_ifa();
775 * A potential indev allocation can be left alive, it stays
776 * assigned to its device and is destroy with it.
780 ipv4_devconf_setall(in_dev);
781 neigh_parms_data_state_setall(in_dev->arp_parms);
784 if (!tb[IFA_ADDRESS])
785 tb[IFA_ADDRESS] = tb[IFA_LOCAL];
787 INIT_HLIST_NODE(&ifa->hash);
788 ifa->ifa_prefixlen = ifm->ifa_prefixlen;
789 ifa->ifa_mask = inet_make_mask(ifm->ifa_prefixlen);
790 ifa->ifa_flags = tb[IFA_FLAGS] ? nla_get_u32(tb[IFA_FLAGS]) :
792 ifa->ifa_scope = ifm->ifa_scope;
793 ifa->ifa_dev = in_dev;
795 ifa->ifa_local = nla_get_in_addr(tb[IFA_LOCAL]);
796 ifa->ifa_address = nla_get_in_addr(tb[IFA_ADDRESS]);
798 if (tb[IFA_BROADCAST])
799 ifa->ifa_broadcast = nla_get_in_addr(tb[IFA_BROADCAST]);
802 nla_strlcpy(ifa->ifa_label, tb[IFA_LABEL], IFNAMSIZ);
804 memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
806 if (tb[IFA_CACHEINFO]) {
807 struct ifa_cacheinfo *ci;
809 ci = nla_data(tb[IFA_CACHEINFO]);
810 if (!ci->ifa_valid || ci->ifa_prefered > ci->ifa_valid) {
814 *pvalid_lft = ci->ifa_valid;
815 *pprefered_lft = ci->ifa_prefered;
826 static struct in_ifaddr *find_matching_ifa(struct in_ifaddr *ifa)
828 struct in_device *in_dev = ifa->ifa_dev;
829 struct in_ifaddr *ifa1, **ifap;
834 for (ifap = &in_dev->ifa_list; (ifa1 = *ifap) != NULL;
835 ifap = &ifa1->ifa_next) {
836 if (ifa1->ifa_mask == ifa->ifa_mask &&
837 inet_ifa_match(ifa1->ifa_address, ifa) &&
838 ifa1->ifa_local == ifa->ifa_local)
844 static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh)
846 struct net *net = sock_net(skb->sk);
847 struct in_ifaddr *ifa;
848 struct in_ifaddr *ifa_existing;
849 __u32 valid_lft = INFINITY_LIFE_TIME;
850 __u32 prefered_lft = INFINITY_LIFE_TIME;
854 ifa = rtm_to_ifaddr(net, nlh, &valid_lft, &prefered_lft);
858 ifa_existing = find_matching_ifa(ifa);
860 /* It would be best to check for !NLM_F_CREATE here but
861 * userspace already relies on not having to provide this.
863 set_ifa_lifetime(ifa, valid_lft, prefered_lft);
864 if (ifa->ifa_flags & IFA_F_MCAUTOJOIN) {
865 int ret = ip_mc_config(net->ipv4.mc_autojoin_sk,
873 return __inet_insert_ifa(ifa, nlh, NETLINK_CB(skb).portid);
877 if (nlh->nlmsg_flags & NLM_F_EXCL ||
878 !(nlh->nlmsg_flags & NLM_F_REPLACE))
881 set_ifa_lifetime(ifa, valid_lft, prefered_lft);
882 cancel_delayed_work(&check_lifetime_work);
883 queue_delayed_work(system_power_efficient_wq,
884 &check_lifetime_work, 0);
885 rtmsg_ifa(RTM_NEWADDR, ifa, nlh, NETLINK_CB(skb).portid);
891 * Determine a default network mask, based on the IP address.
894 static int inet_abc_len(__be32 addr)
896 int rc = -1; /* Something else, probably a multicast. */
898 if (ipv4_is_zeronet(addr))
901 __u32 haddr = ntohl(addr);
903 if (IN_CLASSA(haddr))
905 else if (IN_CLASSB(haddr))
907 else if (IN_CLASSC(haddr))
915 int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
918 struct sockaddr_in sin_orig;
919 struct sockaddr_in *sin = (struct sockaddr_in *)&ifr.ifr_addr;
920 struct in_device *in_dev;
921 struct in_ifaddr **ifap = NULL;
922 struct in_ifaddr *ifa = NULL;
923 struct net_device *dev;
926 int tryaddrmatch = 0;
929 * Fetch the caller's info block into kernel space
932 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
934 ifr.ifr_name[IFNAMSIZ - 1] = 0;
936 /* save original address for comparison */
937 memcpy(&sin_orig, sin, sizeof(*sin));
939 colon = strchr(ifr.ifr_name, ':');
943 dev_load(net, ifr.ifr_name);
946 case SIOCGIFADDR: /* Get interface address */
947 case SIOCGIFBRDADDR: /* Get the broadcast address */
948 case SIOCGIFDSTADDR: /* Get the destination address */
949 case SIOCGIFNETMASK: /* Get the netmask for the interface */
950 /* Note that these ioctls will not sleep,
951 so that we do not impose a lock.
952 One day we will be forced to put shlock here (I mean SMP)
954 tryaddrmatch = (sin_orig.sin_family == AF_INET);
955 memset(sin, 0, sizeof(*sin));
956 sin->sin_family = AF_INET;
961 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
964 case SIOCSIFADDR: /* Set interface address (and family) */
965 case SIOCSIFBRDADDR: /* Set the broadcast address */
966 case SIOCSIFDSTADDR: /* Set the destination address */
967 case SIOCSIFNETMASK: /* Set the netmask for the interface */
968 case SIOCKILLADDR: /* Nuke all sockets on this address */
970 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
973 if (sin->sin_family != AF_INET)
984 dev = __dev_get_by_name(net, ifr.ifr_name);
991 in_dev = __in_dev_get_rtnl(dev);
994 /* Matthias Andree */
995 /* compare label and address (4.4BSD style) */
996 /* note: we only do this for a limited set of ioctls
997 and only if the original address family was AF_INET.
998 This is checked above. */
999 for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
1000 ifap = &ifa->ifa_next) {
1001 if (!strcmp(ifr.ifr_name, ifa->ifa_label) &&
1002 sin_orig.sin_addr.s_addr ==
1008 /* we didn't get a match, maybe the application is
1009 4.3BSD-style and passed in junk so we fall back to
1010 comparing just the label */
1012 for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
1013 ifap = &ifa->ifa_next)
1014 if (!strcmp(ifr.ifr_name, ifa->ifa_label))
1019 ret = -EADDRNOTAVAIL;
1020 if (!ifa && cmd != SIOCSIFADDR && cmd != SIOCSIFFLAGS
1021 && cmd != SIOCKILLADDR)
1025 case SIOCGIFADDR: /* Get interface address */
1026 sin->sin_addr.s_addr = ifa->ifa_local;
1029 case SIOCGIFBRDADDR: /* Get the broadcast address */
1030 sin->sin_addr.s_addr = ifa->ifa_broadcast;
1033 case SIOCGIFDSTADDR: /* Get the destination address */
1034 sin->sin_addr.s_addr = ifa->ifa_address;
1037 case SIOCGIFNETMASK: /* Get the netmask for the interface */
1038 sin->sin_addr.s_addr = ifa->ifa_mask;
1043 ret = -EADDRNOTAVAIL;
1047 if (!(ifr.ifr_flags & IFF_UP))
1048 inet_del_ifa(in_dev, ifap, 1);
1051 ret = dev_change_flags(dev, ifr.ifr_flags);
1054 case SIOCSIFADDR: /* Set interface address (and family) */
1056 if (inet_abc_len(sin->sin_addr.s_addr) < 0)
1061 ifa = inet_alloc_ifa();
1064 INIT_HLIST_NODE(&ifa->hash);
1066 memcpy(ifa->ifa_label, ifr.ifr_name, IFNAMSIZ);
1068 memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
1071 if (ifa->ifa_local == sin->sin_addr.s_addr)
1073 inet_del_ifa(in_dev, ifap, 0);
1074 ifa->ifa_broadcast = 0;
1078 ifa->ifa_address = ifa->ifa_local = sin->sin_addr.s_addr;
1080 if (!(dev->flags & IFF_POINTOPOINT)) {
1081 ifa->ifa_prefixlen = inet_abc_len(ifa->ifa_address);
1082 ifa->ifa_mask = inet_make_mask(ifa->ifa_prefixlen);
1083 if ((dev->flags & IFF_BROADCAST) &&
1084 ifa->ifa_prefixlen < 31)
1085 ifa->ifa_broadcast = ifa->ifa_address |
1088 ifa->ifa_prefixlen = 32;
1089 ifa->ifa_mask = inet_make_mask(32);
1091 set_ifa_lifetime(ifa, INFINITY_LIFE_TIME, INFINITY_LIFE_TIME);
1092 ret = inet_set_ifa(dev, ifa);
1095 case SIOCSIFBRDADDR: /* Set the broadcast address */
1097 if (ifa->ifa_broadcast != sin->sin_addr.s_addr) {
1098 inet_del_ifa(in_dev, ifap, 0);
1099 ifa->ifa_broadcast = sin->sin_addr.s_addr;
1100 inet_insert_ifa(ifa);
1104 case SIOCSIFDSTADDR: /* Set the destination address */
1106 if (ifa->ifa_address == sin->sin_addr.s_addr)
1109 if (inet_abc_len(sin->sin_addr.s_addr) < 0)
1112 inet_del_ifa(in_dev, ifap, 0);
1113 ifa->ifa_address = sin->sin_addr.s_addr;
1114 inet_insert_ifa(ifa);
1117 case SIOCSIFNETMASK: /* Set the netmask for the interface */
1120 * The mask we set must be legal.
1123 if (bad_mask(sin->sin_addr.s_addr, 0))
1126 if (ifa->ifa_mask != sin->sin_addr.s_addr) {
1127 __be32 old_mask = ifa->ifa_mask;
1128 inet_del_ifa(in_dev, ifap, 0);
1129 ifa->ifa_mask = sin->sin_addr.s_addr;
1130 ifa->ifa_prefixlen = inet_mask_len(ifa->ifa_mask);
1132 /* See if current broadcast address matches
1133 * with current netmask, then recalculate
1134 * the broadcast address. Otherwise it's a
1135 * funny address, so don't touch it since
1136 * the user seems to know what (s)he's doing...
1138 if ((dev->flags & IFF_BROADCAST) &&
1139 (ifa->ifa_prefixlen < 31) &&
1140 (ifa->ifa_broadcast ==
1141 (ifa->ifa_local|~old_mask))) {
1142 ifa->ifa_broadcast = (ifa->ifa_local |
1143 ~sin->sin_addr.s_addr);
1145 inet_insert_ifa(ifa);
1148 case SIOCKILLADDR: /* Nuke all connections on this address */
1149 ret = tcp_nuke_addr(net, (struct sockaddr *) sin);
1158 ret = copy_to_user(arg, &ifr, sizeof(struct ifreq)) ? -EFAULT : 0;
1162 static int inet_gifconf(struct net_device *dev, char __user *buf, int len)
1164 struct in_device *in_dev = __in_dev_get_rtnl(dev);
1165 struct in_ifaddr *ifa;
1172 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
1174 done += sizeof(ifr);
1177 if (len < (int) sizeof(ifr))
1179 memset(&ifr, 0, sizeof(struct ifreq));
1180 strcpy(ifr.ifr_name, ifa->ifa_label);
1182 (*(struct sockaddr_in *)&ifr.ifr_addr).sin_family = AF_INET;
1183 (*(struct sockaddr_in *)&ifr.ifr_addr).sin_addr.s_addr =
1186 if (copy_to_user(buf, &ifr, sizeof(struct ifreq))) {
1190 buf += sizeof(struct ifreq);
1191 len -= sizeof(struct ifreq);
1192 done += sizeof(struct ifreq);
1198 __be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope)
1201 struct in_device *in_dev;
1202 struct net *net = dev_net(dev);
1205 in_dev = __in_dev_get_rcu(dev);
1209 for_primary_ifa(in_dev) {
1210 if (ifa->ifa_scope > scope)
1212 if (!dst || inet_ifa_match(dst, ifa)) {
1213 addr = ifa->ifa_local;
1217 addr = ifa->ifa_local;
1218 } endfor_ifa(in_dev);
1224 /* Not loopback addresses on loopback should be preferred
1225 in this case. It is important that lo is the first interface
1228 for_each_netdev_rcu(net, dev) {
1229 in_dev = __in_dev_get_rcu(dev);
1233 for_primary_ifa(in_dev) {
1234 if (ifa->ifa_scope != RT_SCOPE_LINK &&
1235 ifa->ifa_scope <= scope) {
1236 addr = ifa->ifa_local;
1239 } endfor_ifa(in_dev);
1245 EXPORT_SYMBOL(inet_select_addr);
1247 static __be32 confirm_addr_indev(struct in_device *in_dev, __be32 dst,
1248 __be32 local, int scope)
1255 (local == ifa->ifa_local || !local) &&
1256 ifa->ifa_scope <= scope) {
1257 addr = ifa->ifa_local;
1262 same = (!local || inet_ifa_match(local, ifa)) &&
1263 (!dst || inet_ifa_match(dst, ifa));
1267 /* Is the selected addr into dst subnet? */
1268 if (inet_ifa_match(addr, ifa))
1270 /* No, then can we use new local src? */
1271 if (ifa->ifa_scope <= scope) {
1272 addr = ifa->ifa_local;
1275 /* search for large dst subnet for addr */
1279 } endfor_ifa(in_dev);
1281 return same ? addr : 0;
1285 * Confirm that local IP address exists using wildcards:
1286 * - net: netns to check, cannot be NULL
1287 * - in_dev: only on this interface, NULL=any interface
1288 * - dst: only in the same subnet as dst, 0=any dst
1289 * - local: address, 0=autoselect the local address
1290 * - scope: maximum allowed scope value for the local address
1292 __be32 inet_confirm_addr(struct net *net, struct in_device *in_dev,
1293 __be32 dst, __be32 local, int scope)
1296 struct net_device *dev;
1299 return confirm_addr_indev(in_dev, dst, local, scope);
1302 for_each_netdev_rcu(net, dev) {
1303 in_dev = __in_dev_get_rcu(dev);
1305 addr = confirm_addr_indev(in_dev, dst, local, scope);
1314 EXPORT_SYMBOL(inet_confirm_addr);
1320 int register_inetaddr_notifier(struct notifier_block *nb)
1322 return blocking_notifier_chain_register(&inetaddr_chain, nb);
1324 EXPORT_SYMBOL(register_inetaddr_notifier);
1326 int unregister_inetaddr_notifier(struct notifier_block *nb)
1328 return blocking_notifier_chain_unregister(&inetaddr_chain, nb);
1330 EXPORT_SYMBOL(unregister_inetaddr_notifier);
1332 /* Rename ifa_labels for a device name change. Make some effort to preserve
1333 * existing alias numbering and to create unique labels if possible.
1335 static void inetdev_changename(struct net_device *dev, struct in_device *in_dev)
1337 struct in_ifaddr *ifa;
1340 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
1341 char old[IFNAMSIZ], *dot;
1343 memcpy(old, ifa->ifa_label, IFNAMSIZ);
1344 memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
1347 dot = strchr(old, ':');
1349 sprintf(old, ":%d", named);
1352 if (strlen(dot) + strlen(dev->name) < IFNAMSIZ)
1353 strcat(ifa->ifa_label, dot);
1355 strcpy(ifa->ifa_label + (IFNAMSIZ - strlen(dot) - 1), dot);
1357 rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0);
1361 static bool inetdev_valid_mtu(unsigned int mtu)
1366 static void inetdev_send_gratuitous_arp(struct net_device *dev,
1367 struct in_device *in_dev)
1370 struct in_ifaddr *ifa;
1372 for (ifa = in_dev->ifa_list; ifa;
1373 ifa = ifa->ifa_next) {
1374 arp_send(ARPOP_REQUEST, ETH_P_ARP,
1375 ifa->ifa_local, dev,
1376 ifa->ifa_local, NULL,
1377 dev->dev_addr, NULL);
1381 /* Called only under RTNL semaphore */
1383 static int inetdev_event(struct notifier_block *this, unsigned long event,
1386 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1387 struct in_device *in_dev = __in_dev_get_rtnl(dev);
1392 if (event == NETDEV_REGISTER) {
1393 in_dev = inetdev_init(dev);
1395 return notifier_from_errno(PTR_ERR(in_dev));
1396 if (dev->flags & IFF_LOOPBACK) {
1397 IN_DEV_CONF_SET(in_dev, NOXFRM, 1);
1398 IN_DEV_CONF_SET(in_dev, NOPOLICY, 1);
1400 } else if (event == NETDEV_CHANGEMTU) {
1401 /* Re-enabling IP */
1402 if (inetdev_valid_mtu(dev->mtu))
1403 in_dev = inetdev_init(dev);
1409 case NETDEV_REGISTER:
1410 pr_debug("%s: bug\n", __func__);
1411 RCU_INIT_POINTER(dev->ip_ptr, NULL);
1414 if (!inetdev_valid_mtu(dev->mtu))
1416 if (dev->flags & IFF_LOOPBACK) {
1417 struct in_ifaddr *ifa = inet_alloc_ifa();
1420 INIT_HLIST_NODE(&ifa->hash);
1422 ifa->ifa_address = htonl(INADDR_LOOPBACK);
1423 ifa->ifa_prefixlen = 8;
1424 ifa->ifa_mask = inet_make_mask(8);
1425 in_dev_hold(in_dev);
1426 ifa->ifa_dev = in_dev;
1427 ifa->ifa_scope = RT_SCOPE_HOST;
1428 memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
1429 set_ifa_lifetime(ifa, INFINITY_LIFE_TIME,
1430 INFINITY_LIFE_TIME);
1431 ipv4_devconf_setall(in_dev);
1432 neigh_parms_data_state_setall(in_dev->arp_parms);
1433 inet_insert_ifa(ifa);
1438 case NETDEV_CHANGEADDR:
1439 if (!IN_DEV_ARP_NOTIFY(in_dev))
1442 case NETDEV_NOTIFY_PEERS:
1443 /* Send gratuitous ARP to notify of link change */
1444 inetdev_send_gratuitous_arp(dev, in_dev);
1449 case NETDEV_PRE_TYPE_CHANGE:
1450 ip_mc_unmap(in_dev);
1452 case NETDEV_POST_TYPE_CHANGE:
1453 ip_mc_remap(in_dev);
1455 case NETDEV_CHANGEMTU:
1456 if (inetdev_valid_mtu(dev->mtu))
1458 /* disable IP when MTU is not enough */
1459 case NETDEV_UNREGISTER:
1460 inetdev_destroy(in_dev);
1462 case NETDEV_CHANGENAME:
1463 /* Do not notify about label change, this event is
1464 * not interesting to applications using netlink.
1466 inetdev_changename(dev, in_dev);
1468 devinet_sysctl_unregister(in_dev);
1469 devinet_sysctl_register(in_dev);
1476 static struct notifier_block ip_netdev_notifier = {
1477 .notifier_call = inetdev_event,
1480 static size_t inet_nlmsg_size(void)
1482 return NLMSG_ALIGN(sizeof(struct ifaddrmsg))
1483 + nla_total_size(4) /* IFA_ADDRESS */
1484 + nla_total_size(4) /* IFA_LOCAL */
1485 + nla_total_size(4) /* IFA_BROADCAST */
1486 + nla_total_size(IFNAMSIZ) /* IFA_LABEL */
1487 + nla_total_size(4) /* IFA_FLAGS */
1488 + nla_total_size(sizeof(struct ifa_cacheinfo)); /* IFA_CACHEINFO */
1491 static inline u32 cstamp_delta(unsigned long cstamp)
1493 return (cstamp - INITIAL_JIFFIES) * 100UL / HZ;
1496 static int put_cacheinfo(struct sk_buff *skb, unsigned long cstamp,
1497 unsigned long tstamp, u32 preferred, u32 valid)
1499 struct ifa_cacheinfo ci;
1501 ci.cstamp = cstamp_delta(cstamp);
1502 ci.tstamp = cstamp_delta(tstamp);
1503 ci.ifa_prefered = preferred;
1504 ci.ifa_valid = valid;
1506 return nla_put(skb, IFA_CACHEINFO, sizeof(ci), &ci);
1509 static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
1510 u32 portid, u32 seq, int event, unsigned int flags)
1512 struct ifaddrmsg *ifm;
1513 struct nlmsghdr *nlh;
1514 u32 preferred, valid;
1516 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*ifm), flags);
1520 ifm = nlmsg_data(nlh);
1521 ifm->ifa_family = AF_INET;
1522 ifm->ifa_prefixlen = ifa->ifa_prefixlen;
1523 ifm->ifa_flags = ifa->ifa_flags;
1524 ifm->ifa_scope = ifa->ifa_scope;
1525 ifm->ifa_index = ifa->ifa_dev->dev->ifindex;
1527 if (!(ifm->ifa_flags & IFA_F_PERMANENT)) {
1528 preferred = ifa->ifa_preferred_lft;
1529 valid = ifa->ifa_valid_lft;
1530 if (preferred != INFINITY_LIFE_TIME) {
1531 long tval = (jiffies - ifa->ifa_tstamp) / HZ;
1533 if (preferred > tval)
1537 if (valid != INFINITY_LIFE_TIME) {
1545 preferred = INFINITY_LIFE_TIME;
1546 valid = INFINITY_LIFE_TIME;
1548 if ((ifa->ifa_address &&
1549 nla_put_in_addr(skb, IFA_ADDRESS, ifa->ifa_address)) ||
1551 nla_put_in_addr(skb, IFA_LOCAL, ifa->ifa_local)) ||
1552 (ifa->ifa_broadcast &&
1553 nla_put_in_addr(skb, IFA_BROADCAST, ifa->ifa_broadcast)) ||
1554 (ifa->ifa_label[0] &&
1555 nla_put_string(skb, IFA_LABEL, ifa->ifa_label)) ||
1556 nla_put_u32(skb, IFA_FLAGS, ifa->ifa_flags) ||
1557 put_cacheinfo(skb, ifa->ifa_cstamp, ifa->ifa_tstamp,
1559 goto nla_put_failure;
1561 nlmsg_end(skb, nlh);
1565 nlmsg_cancel(skb, nlh);
1569 static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
1571 struct net *net = sock_net(skb->sk);
1574 int ip_idx, s_ip_idx;
1575 struct net_device *dev;
1576 struct in_device *in_dev;
1577 struct in_ifaddr *ifa;
1578 struct hlist_head *head;
1581 s_idx = idx = cb->args[1];
1582 s_ip_idx = ip_idx = cb->args[2];
1584 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
1586 head = &net->dev_index_head[h];
1588 cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
1590 hlist_for_each_entry_rcu(dev, head, index_hlist) {
1593 if (h > s_h || idx > s_idx)
1595 in_dev = __in_dev_get_rcu(dev);
1599 for (ifa = in_dev->ifa_list, ip_idx = 0; ifa;
1600 ifa = ifa->ifa_next, ip_idx++) {
1601 if (ip_idx < s_ip_idx)
1603 if (inet_fill_ifaddr(skb, ifa,
1604 NETLINK_CB(cb->skb).portid,
1606 RTM_NEWADDR, NLM_F_MULTI) < 0) {
1610 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
1621 cb->args[2] = ip_idx;
1626 static void rtmsg_ifa(int event, struct in_ifaddr *ifa, struct nlmsghdr *nlh,
1629 struct sk_buff *skb;
1630 u32 seq = nlh ? nlh->nlmsg_seq : 0;
1634 net = dev_net(ifa->ifa_dev->dev);
1635 skb = nlmsg_new(inet_nlmsg_size(), GFP_KERNEL);
1639 err = inet_fill_ifaddr(skb, ifa, portid, seq, event, 0);
1641 /* -EMSGSIZE implies BUG in inet_nlmsg_size() */
1642 WARN_ON(err == -EMSGSIZE);
1646 rtnl_notify(skb, net, portid, RTNLGRP_IPV4_IFADDR, nlh, GFP_KERNEL);
1650 rtnl_set_sk_err(net, RTNLGRP_IPV4_IFADDR, err);
1653 static size_t inet_get_link_af_size(const struct net_device *dev,
1654 u32 ext_filter_mask)
1656 struct in_device *in_dev = rcu_dereference_rtnl(dev->ip_ptr);
1661 return nla_total_size(IPV4_DEVCONF_MAX * 4); /* IFLA_INET_CONF */
1664 static int inet_fill_link_af(struct sk_buff *skb, const struct net_device *dev,
1665 u32 ext_filter_mask)
1667 struct in_device *in_dev = rcu_dereference_rtnl(dev->ip_ptr);
1674 nla = nla_reserve(skb, IFLA_INET_CONF, IPV4_DEVCONF_MAX * 4);
1678 for (i = 0; i < IPV4_DEVCONF_MAX; i++)
1679 ((u32 *) nla_data(nla))[i] = in_dev->cnf.data[i];
1684 static const struct nla_policy inet_af_policy[IFLA_INET_MAX+1] = {
1685 [IFLA_INET_CONF] = { .type = NLA_NESTED },
1688 static int inet_validate_link_af(const struct net_device *dev,
1689 const struct nlattr *nla)
1691 struct nlattr *a, *tb[IFLA_INET_MAX+1];
1694 if (dev && !__in_dev_get_rtnl(dev))
1695 return -EAFNOSUPPORT;
1697 err = nla_parse_nested(tb, IFLA_INET_MAX, nla, inet_af_policy);
1701 if (tb[IFLA_INET_CONF]) {
1702 nla_for_each_nested(a, tb[IFLA_INET_CONF], rem) {
1703 int cfgid = nla_type(a);
1708 if (cfgid <= 0 || cfgid > IPV4_DEVCONF_MAX)
1716 static int inet_set_link_af(struct net_device *dev, const struct nlattr *nla)
1718 struct in_device *in_dev = __in_dev_get_rtnl(dev);
1719 struct nlattr *a, *tb[IFLA_INET_MAX+1];
1723 return -EAFNOSUPPORT;
1725 if (nla_parse_nested(tb, IFLA_INET_MAX, nla, NULL) < 0)
1728 if (tb[IFLA_INET_CONF]) {
1729 nla_for_each_nested(a, tb[IFLA_INET_CONF], rem)
1730 ipv4_devconf_set(in_dev, nla_type(a), nla_get_u32(a));
1736 static int inet_netconf_msgsize_devconf(int type)
1738 int size = NLMSG_ALIGN(sizeof(struct netconfmsg))
1739 + nla_total_size(4); /* NETCONFA_IFINDEX */
1741 /* type -1 is used for ALL */
1742 if (type == -1 || type == NETCONFA_FORWARDING)
1743 size += nla_total_size(4);
1744 if (type == -1 || type == NETCONFA_RP_FILTER)
1745 size += nla_total_size(4);
1746 if (type == -1 || type == NETCONFA_MC_FORWARDING)
1747 size += nla_total_size(4);
1748 if (type == -1 || type == NETCONFA_PROXY_NEIGH)
1749 size += nla_total_size(4);
1750 if (type == -1 || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN)
1751 size += nla_total_size(4);
1756 static int inet_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
1757 struct ipv4_devconf *devconf, u32 portid,
1758 u32 seq, int event, unsigned int flags,
1761 struct nlmsghdr *nlh;
1762 struct netconfmsg *ncm;
1764 nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct netconfmsg),
1769 ncm = nlmsg_data(nlh);
1770 ncm->ncm_family = AF_INET;
1772 if (nla_put_s32(skb, NETCONFA_IFINDEX, ifindex) < 0)
1773 goto nla_put_failure;
1775 /* type -1 is used for ALL */
1776 if ((type == -1 || type == NETCONFA_FORWARDING) &&
1777 nla_put_s32(skb, NETCONFA_FORWARDING,
1778 IPV4_DEVCONF(*devconf, FORWARDING)) < 0)
1779 goto nla_put_failure;
1780 if ((type == -1 || type == NETCONFA_RP_FILTER) &&
1781 nla_put_s32(skb, NETCONFA_RP_FILTER,
1782 IPV4_DEVCONF(*devconf, RP_FILTER)) < 0)
1783 goto nla_put_failure;
1784 if ((type == -1 || type == NETCONFA_MC_FORWARDING) &&
1785 nla_put_s32(skb, NETCONFA_MC_FORWARDING,
1786 IPV4_DEVCONF(*devconf, MC_FORWARDING)) < 0)
1787 goto nla_put_failure;
1788 if ((type == -1 || type == NETCONFA_PROXY_NEIGH) &&
1789 nla_put_s32(skb, NETCONFA_PROXY_NEIGH,
1790 IPV4_DEVCONF(*devconf, PROXY_ARP)) < 0)
1791 goto nla_put_failure;
1792 if ((type == -1 || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN) &&
1793 nla_put_s32(skb, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
1794 IPV4_DEVCONF(*devconf, IGNORE_ROUTES_WITH_LINKDOWN)) < 0)
1795 goto nla_put_failure;
1797 nlmsg_end(skb, nlh);
1801 nlmsg_cancel(skb, nlh);
1805 void inet_netconf_notify_devconf(struct net *net, int type, int ifindex,
1806 struct ipv4_devconf *devconf)
1808 struct sk_buff *skb;
1811 skb = nlmsg_new(inet_netconf_msgsize_devconf(type), GFP_ATOMIC);
1815 err = inet_netconf_fill_devconf(skb, ifindex, devconf, 0, 0,
1816 RTM_NEWNETCONF, 0, type);
1818 /* -EMSGSIZE implies BUG in inet_netconf_msgsize_devconf() */
1819 WARN_ON(err == -EMSGSIZE);
1823 rtnl_notify(skb, net, 0, RTNLGRP_IPV4_NETCONF, NULL, GFP_ATOMIC);
1827 rtnl_set_sk_err(net, RTNLGRP_IPV4_NETCONF, err);
1830 static const struct nla_policy devconf_ipv4_policy[NETCONFA_MAX+1] = {
1831 [NETCONFA_IFINDEX] = { .len = sizeof(int) },
1832 [NETCONFA_FORWARDING] = { .len = sizeof(int) },
1833 [NETCONFA_RP_FILTER] = { .len = sizeof(int) },
1834 [NETCONFA_PROXY_NEIGH] = { .len = sizeof(int) },
1835 [NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN] = { .len = sizeof(int) },
1838 static int inet_netconf_get_devconf(struct sk_buff *in_skb,
1839 struct nlmsghdr *nlh)
1841 struct net *net = sock_net(in_skb->sk);
1842 struct nlattr *tb[NETCONFA_MAX+1];
1843 struct netconfmsg *ncm;
1844 struct sk_buff *skb;
1845 struct ipv4_devconf *devconf;
1846 struct in_device *in_dev;
1847 struct net_device *dev;
1851 err = nlmsg_parse(nlh, sizeof(*ncm), tb, NETCONFA_MAX,
1852 devconf_ipv4_policy);
1857 if (!tb[NETCONFA_IFINDEX])
1860 ifindex = nla_get_s32(tb[NETCONFA_IFINDEX]);
1862 case NETCONFA_IFINDEX_ALL:
1863 devconf = net->ipv4.devconf_all;
1865 case NETCONFA_IFINDEX_DEFAULT:
1866 devconf = net->ipv4.devconf_dflt;
1869 dev = __dev_get_by_index(net, ifindex);
1872 in_dev = __in_dev_get_rtnl(dev);
1875 devconf = &in_dev->cnf;
1880 skb = nlmsg_new(inet_netconf_msgsize_devconf(-1), GFP_ATOMIC);
1884 err = inet_netconf_fill_devconf(skb, ifindex, devconf,
1885 NETLINK_CB(in_skb).portid,
1886 nlh->nlmsg_seq, RTM_NEWNETCONF, 0,
1889 /* -EMSGSIZE implies BUG in inet_netconf_msgsize_devconf() */
1890 WARN_ON(err == -EMSGSIZE);
1894 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
1899 static int inet_netconf_dump_devconf(struct sk_buff *skb,
1900 struct netlink_callback *cb)
1902 struct net *net = sock_net(skb->sk);
1905 struct net_device *dev;
1906 struct in_device *in_dev;
1907 struct hlist_head *head;
1910 s_idx = idx = cb->args[1];
1912 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
1914 head = &net->dev_index_head[h];
1916 cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
1918 hlist_for_each_entry_rcu(dev, head, index_hlist) {
1921 in_dev = __in_dev_get_rcu(dev);
1925 if (inet_netconf_fill_devconf(skb, dev->ifindex,
1927 NETLINK_CB(cb->skb).portid,
1935 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
1941 if (h == NETDEV_HASHENTRIES) {
1942 if (inet_netconf_fill_devconf(skb, NETCONFA_IFINDEX_ALL,
1943 net->ipv4.devconf_all,
1944 NETLINK_CB(cb->skb).portid,
1946 RTM_NEWNETCONF, NLM_F_MULTI,
1952 if (h == NETDEV_HASHENTRIES + 1) {
1953 if (inet_netconf_fill_devconf(skb, NETCONFA_IFINDEX_DEFAULT,
1954 net->ipv4.devconf_dflt,
1955 NETLINK_CB(cb->skb).portid,
1957 RTM_NEWNETCONF, NLM_F_MULTI,
1970 #ifdef CONFIG_SYSCTL
1972 static void devinet_copy_dflt_conf(struct net *net, int i)
1974 struct net_device *dev;
1977 for_each_netdev_rcu(net, dev) {
1978 struct in_device *in_dev;
1980 in_dev = __in_dev_get_rcu(dev);
1981 if (in_dev && !test_bit(i, in_dev->cnf.state))
1982 in_dev->cnf.data[i] = net->ipv4.devconf_dflt->data[i];
1987 /* called with RTNL locked */
1988 static void inet_forward_change(struct net *net)
1990 struct net_device *dev;
1991 int on = IPV4_DEVCONF_ALL(net, FORWARDING);
1993 IPV4_DEVCONF_ALL(net, ACCEPT_REDIRECTS) = !on;
1994 IPV4_DEVCONF_DFLT(net, FORWARDING) = on;
1995 inet_netconf_notify_devconf(net, NETCONFA_FORWARDING,
1996 NETCONFA_IFINDEX_ALL,
1997 net->ipv4.devconf_all);
1998 inet_netconf_notify_devconf(net, NETCONFA_FORWARDING,
1999 NETCONFA_IFINDEX_DEFAULT,
2000 net->ipv4.devconf_dflt);
2002 for_each_netdev(net, dev) {
2003 struct in_device *in_dev;
2005 dev_disable_lro(dev);
2007 in_dev = __in_dev_get_rcu(dev);
2009 IN_DEV_CONF_SET(in_dev, FORWARDING, on);
2010 inet_netconf_notify_devconf(net, NETCONFA_FORWARDING,
2011 dev->ifindex, &in_dev->cnf);
2017 static int devinet_conf_ifindex(struct net *net, struct ipv4_devconf *cnf)
2019 if (cnf == net->ipv4.devconf_dflt)
2020 return NETCONFA_IFINDEX_DEFAULT;
2021 else if (cnf == net->ipv4.devconf_all)
2022 return NETCONFA_IFINDEX_ALL;
2024 struct in_device *idev
2025 = container_of(cnf, struct in_device, cnf);
2026 return idev->dev->ifindex;
2030 static int devinet_conf_proc(struct ctl_table *ctl, int write,
2031 void __user *buffer,
2032 size_t *lenp, loff_t *ppos)
2034 int old_value = *(int *)ctl->data;
2035 int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
2036 int new_value = *(int *)ctl->data;
2039 struct ipv4_devconf *cnf = ctl->extra1;
2040 struct net *net = ctl->extra2;
2041 int i = (int *)ctl->data - cnf->data;
2044 set_bit(i, cnf->state);
2046 if (cnf == net->ipv4.devconf_dflt)
2047 devinet_copy_dflt_conf(net, i);
2048 if (i == IPV4_DEVCONF_ACCEPT_LOCAL - 1 ||
2049 i == IPV4_DEVCONF_ROUTE_LOCALNET - 1)
2050 if ((new_value == 0) && (old_value != 0))
2051 rt_cache_flush(net);
2053 if (i == IPV4_DEVCONF_RP_FILTER - 1 &&
2054 new_value != old_value) {
2055 ifindex = devinet_conf_ifindex(net, cnf);
2056 inet_netconf_notify_devconf(net, NETCONFA_RP_FILTER,
2059 if (i == IPV4_DEVCONF_PROXY_ARP - 1 &&
2060 new_value != old_value) {
2061 ifindex = devinet_conf_ifindex(net, cnf);
2062 inet_netconf_notify_devconf(net, NETCONFA_PROXY_NEIGH,
2065 if (i == IPV4_DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN - 1 &&
2066 new_value != old_value) {
2067 ifindex = devinet_conf_ifindex(net, cnf);
2068 inet_netconf_notify_devconf(net, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
2076 static int devinet_sysctl_forward(struct ctl_table *ctl, int write,
2077 void __user *buffer,
2078 size_t *lenp, loff_t *ppos)
2080 int *valp = ctl->data;
2083 int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
2085 if (write && *valp != val) {
2086 struct net *net = ctl->extra2;
2088 if (valp != &IPV4_DEVCONF_DFLT(net, FORWARDING)) {
2089 if (!rtnl_trylock()) {
2090 /* Restore the original values before restarting */
2093 return restart_syscall();
2095 if (valp == &IPV4_DEVCONF_ALL(net, FORWARDING)) {
2096 inet_forward_change(net);
2098 struct ipv4_devconf *cnf = ctl->extra1;
2099 struct in_device *idev =
2100 container_of(cnf, struct in_device, cnf);
2102 dev_disable_lro(idev->dev);
2103 inet_netconf_notify_devconf(net,
2104 NETCONFA_FORWARDING,
2109 rt_cache_flush(net);
2111 inet_netconf_notify_devconf(net, NETCONFA_FORWARDING,
2112 NETCONFA_IFINDEX_DEFAULT,
2113 net->ipv4.devconf_dflt);
2119 static int ipv4_doint_and_flush(struct ctl_table *ctl, int write,
2120 void __user *buffer,
2121 size_t *lenp, loff_t *ppos)
2123 int *valp = ctl->data;
2125 int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
2126 struct net *net = ctl->extra2;
2128 if (write && *valp != val)
2129 rt_cache_flush(net);
2134 #define DEVINET_SYSCTL_ENTRY(attr, name, mval, proc) \
2137 .data = ipv4_devconf.data + \
2138 IPV4_DEVCONF_ ## attr - 1, \
2139 .maxlen = sizeof(int), \
2141 .proc_handler = proc, \
2142 .extra1 = &ipv4_devconf, \
2145 #define DEVINET_SYSCTL_RW_ENTRY(attr, name) \
2146 DEVINET_SYSCTL_ENTRY(attr, name, 0644, devinet_conf_proc)
2148 #define DEVINET_SYSCTL_RO_ENTRY(attr, name) \
2149 DEVINET_SYSCTL_ENTRY(attr, name, 0444, devinet_conf_proc)
2151 #define DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, proc) \
2152 DEVINET_SYSCTL_ENTRY(attr, name, 0644, proc)
2154 #define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
2155 DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
2157 static struct devinet_sysctl_table {
2158 struct ctl_table_header *sysctl_header;
2159 struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
2160 } devinet_sysctl = {
2162 DEVINET_SYSCTL_COMPLEX_ENTRY(FORWARDING, "forwarding",
2163 devinet_sysctl_forward),
2164 DEVINET_SYSCTL_RO_ENTRY(MC_FORWARDING, "mc_forwarding"),
2166 DEVINET_SYSCTL_RW_ENTRY(ACCEPT_REDIRECTS, "accept_redirects"),
2167 DEVINET_SYSCTL_RW_ENTRY(SECURE_REDIRECTS, "secure_redirects"),
2168 DEVINET_SYSCTL_RW_ENTRY(SHARED_MEDIA, "shared_media"),
2169 DEVINET_SYSCTL_RW_ENTRY(RP_FILTER, "rp_filter"),
2170 DEVINET_SYSCTL_RW_ENTRY(SEND_REDIRECTS, "send_redirects"),
2171 DEVINET_SYSCTL_RW_ENTRY(ACCEPT_SOURCE_ROUTE,
2172 "accept_source_route"),
2173 DEVINET_SYSCTL_RW_ENTRY(ACCEPT_LOCAL, "accept_local"),
2174 DEVINET_SYSCTL_RW_ENTRY(SRC_VMARK, "src_valid_mark"),
2175 DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP, "proxy_arp"),
2176 DEVINET_SYSCTL_RW_ENTRY(MEDIUM_ID, "medium_id"),
2177 DEVINET_SYSCTL_RW_ENTRY(BOOTP_RELAY, "bootp_relay"),
2178 DEVINET_SYSCTL_RW_ENTRY(LOG_MARTIANS, "log_martians"),
2179 DEVINET_SYSCTL_RW_ENTRY(TAG, "tag"),
2180 DEVINET_SYSCTL_RW_ENTRY(ARPFILTER, "arp_filter"),
2181 DEVINET_SYSCTL_RW_ENTRY(ARP_ANNOUNCE, "arp_announce"),
2182 DEVINET_SYSCTL_RW_ENTRY(ARP_IGNORE, "arp_ignore"),
2183 DEVINET_SYSCTL_RW_ENTRY(ARP_ACCEPT, "arp_accept"),
2184 DEVINET_SYSCTL_RW_ENTRY(ARP_NOTIFY, "arp_notify"),
2185 DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP_PVLAN, "proxy_arp_pvlan"),
2186 DEVINET_SYSCTL_RW_ENTRY(FORCE_IGMP_VERSION,
2187 "force_igmp_version"),
2188 DEVINET_SYSCTL_RW_ENTRY(IGMPV2_UNSOLICITED_REPORT_INTERVAL,
2189 "igmpv2_unsolicited_report_interval"),
2190 DEVINET_SYSCTL_RW_ENTRY(IGMPV3_UNSOLICITED_REPORT_INTERVAL,
2191 "igmpv3_unsolicited_report_interval"),
2192 DEVINET_SYSCTL_RW_ENTRY(IGNORE_ROUTES_WITH_LINKDOWN,
2193 "ignore_routes_with_linkdown"),
2195 DEVINET_SYSCTL_FLUSHING_ENTRY(NOXFRM, "disable_xfrm"),
2196 DEVINET_SYSCTL_FLUSHING_ENTRY(NOPOLICY, "disable_policy"),
2197 DEVINET_SYSCTL_FLUSHING_ENTRY(PROMOTE_SECONDARIES,
2198 "promote_secondaries"),
2199 DEVINET_SYSCTL_FLUSHING_ENTRY(ROUTE_LOCALNET,
2204 static int __devinet_sysctl_register(struct net *net, char *dev_name,
2205 struct ipv4_devconf *p)
2208 struct devinet_sysctl_table *t;
2209 char path[sizeof("net/ipv4/conf/") + IFNAMSIZ];
2211 t = kmemdup(&devinet_sysctl, sizeof(*t), GFP_KERNEL);
2215 for (i = 0; i < ARRAY_SIZE(t->devinet_vars) - 1; i++) {
2216 t->devinet_vars[i].data += (char *)p - (char *)&ipv4_devconf;
2217 t->devinet_vars[i].extra1 = p;
2218 t->devinet_vars[i].extra2 = net;
2221 snprintf(path, sizeof(path), "net/ipv4/conf/%s", dev_name);
2223 t->sysctl_header = register_net_sysctl(net, path, t->devinet_vars);
2224 if (!t->sysctl_header)
2236 static void __devinet_sysctl_unregister(struct ipv4_devconf *cnf)
2238 struct devinet_sysctl_table *t = cnf->sysctl;
2244 unregister_net_sysctl_table(t->sysctl_header);
2248 static int devinet_sysctl_register(struct in_device *idev)
2252 if (!sysctl_dev_name_is_allowed(idev->dev->name))
2255 err = neigh_sysctl_register(idev->dev, idev->arp_parms, NULL);
2258 err = __devinet_sysctl_register(dev_net(idev->dev), idev->dev->name,
2261 neigh_sysctl_unregister(idev->arp_parms);
2265 static void devinet_sysctl_unregister(struct in_device *idev)
2267 __devinet_sysctl_unregister(&idev->cnf);
2268 neigh_sysctl_unregister(idev->arp_parms);
2271 static struct ctl_table ctl_forward_entry[] = {
2273 .procname = "ip_forward",
2274 .data = &ipv4_devconf.data[
2275 IPV4_DEVCONF_FORWARDING - 1],
2276 .maxlen = sizeof(int),
2278 .proc_handler = devinet_sysctl_forward,
2279 .extra1 = &ipv4_devconf,
2280 .extra2 = &init_net,
2286 static __net_init int devinet_init_net(struct net *net)
2289 struct ipv4_devconf *all, *dflt;
2290 #ifdef CONFIG_SYSCTL
2291 struct ctl_table *tbl = ctl_forward_entry;
2292 struct ctl_table_header *forw_hdr;
2296 all = &ipv4_devconf;
2297 dflt = &ipv4_devconf_dflt;
2299 if (!net_eq(net, &init_net)) {
2300 all = kmemdup(all, sizeof(ipv4_devconf), GFP_KERNEL);
2304 dflt = kmemdup(dflt, sizeof(ipv4_devconf_dflt), GFP_KERNEL);
2306 goto err_alloc_dflt;
2308 #ifdef CONFIG_SYSCTL
2309 tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL);
2313 tbl[0].data = &all->data[IPV4_DEVCONF_FORWARDING - 1];
2314 tbl[0].extra1 = all;
2315 tbl[0].extra2 = net;
2319 #ifdef CONFIG_SYSCTL
2320 err = __devinet_sysctl_register(net, "all", all);
2324 err = __devinet_sysctl_register(net, "default", dflt);
2329 forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
2332 net->ipv4.forw_hdr = forw_hdr;
2335 net->ipv4.devconf_all = all;
2336 net->ipv4.devconf_dflt = dflt;
2339 #ifdef CONFIG_SYSCTL
2341 __devinet_sysctl_unregister(dflt);
2343 __devinet_sysctl_unregister(all);
2345 if (tbl != ctl_forward_entry)
2349 if (dflt != &ipv4_devconf_dflt)
2352 if (all != &ipv4_devconf)
2358 static __net_exit void devinet_exit_net(struct net *net)
2360 #ifdef CONFIG_SYSCTL
2361 struct ctl_table *tbl;
2363 tbl = net->ipv4.forw_hdr->ctl_table_arg;
2364 unregister_net_sysctl_table(net->ipv4.forw_hdr);
2365 __devinet_sysctl_unregister(net->ipv4.devconf_dflt);
2366 __devinet_sysctl_unregister(net->ipv4.devconf_all);
2369 kfree(net->ipv4.devconf_dflt);
2370 kfree(net->ipv4.devconf_all);
2373 static __net_initdata struct pernet_operations devinet_ops = {
2374 .init = devinet_init_net,
2375 .exit = devinet_exit_net,
2378 static struct rtnl_af_ops inet_af_ops __read_mostly = {
2380 .fill_link_af = inet_fill_link_af,
2381 .get_link_af_size = inet_get_link_af_size,
2382 .validate_link_af = inet_validate_link_af,
2383 .set_link_af = inet_set_link_af,
2386 void __init devinet_init(void)
2390 for (i = 0; i < IN4_ADDR_HSIZE; i++)
2391 INIT_HLIST_HEAD(&inet_addr_lst[i]);
2393 register_pernet_subsys(&devinet_ops);
2395 register_gifconf(PF_INET, inet_gifconf);
2396 register_netdevice_notifier(&ip_netdev_notifier);
2398 queue_delayed_work(system_power_efficient_wq, &check_lifetime_work, 0);
2400 rtnl_af_register(&inet_af_ops);
2402 rtnl_register(PF_INET, RTM_NEWADDR, inet_rtm_newaddr, NULL, NULL);
2403 rtnl_register(PF_INET, RTM_DELADDR, inet_rtm_deladdr, NULL, NULL);
2404 rtnl_register(PF_INET, RTM_GETADDR, NULL, inet_dump_ifaddr, NULL);
2405 rtnl_register(PF_INET, RTM_GETNETCONF, inet_netconf_get_devconf,
2406 inet_netconf_dump_devconf, NULL);