2 * Copyright (c) 2013 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 #include <linux/capability.h>
22 #include <linux/module.h>
23 #include <linux/types.h>
24 #include <linux/kernel.h>
25 #include <linux/slab.h>
26 #include <linux/uaccess.h>
27 #include <linux/skbuff.h>
28 #include <linux/netdevice.h>
30 #include <linux/tcp.h>
31 #include <linux/udp.h>
32 #include <linux/if_arp.h>
33 #include <linux/mroute.h>
34 #include <linux/init.h>
35 #include <linux/in6.h>
36 #include <linux/inetdevice.h>
37 #include <linux/igmp.h>
38 #include <linux/netfilter_ipv4.h>
39 #include <linux/etherdevice.h>
40 #include <linux/if_ether.h>
41 #include <linux/if_vlan.h>
42 #include <linux/rculist.h>
47 #include <net/protocol.h>
48 #include <net/ip_tunnels.h>
50 #include <net/checksum.h>
51 #include <net/dsfield.h>
52 #include <net/inet_ecn.h>
54 #include <net/net_namespace.h>
55 #include <net/netns/generic.h>
56 #include <net/rtnetlink.h>
58 #if IS_ENABLED(CONFIG_IPV6)
60 #include <net/ip6_fib.h>
61 #include <net/ip6_route.h>
64 static unsigned int ip_tunnel_hash(__be32 key, __be32 remote)
66 return hash_32((__force u32)key ^ (__force u32)remote,
70 static void __tunnel_dst_set(struct ip_tunnel_dst *idst,
71 struct dst_entry *dst)
73 struct dst_entry *old_dst;
76 if (dst->flags & DST_NOCACHE)
81 old_dst = xchg((__force struct dst_entry **)&idst->dst, dst);
85 static void tunnel_dst_set(struct ip_tunnel *t, struct dst_entry *dst)
87 __tunnel_dst_set(this_cpu_ptr(t->dst_cache), dst);
90 static void tunnel_dst_reset(struct ip_tunnel *t)
92 tunnel_dst_set(t, NULL);
95 static void tunnel_dst_reset_all(struct ip_tunnel *t)
99 for_each_possible_cpu(i)
100 __tunnel_dst_set(per_cpu_ptr(t->dst_cache, i), NULL);
103 static struct dst_entry *tunnel_dst_get(struct ip_tunnel *t)
105 struct dst_entry *dst;
108 dst = rcu_dereference(this_cpu_ptr(t->dst_cache)->dst);
115 static struct dst_entry *tunnel_dst_check(struct ip_tunnel *t, u32 cookie)
117 struct dst_entry *dst = tunnel_dst_get(t);
119 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
127 /* Often modified stats are per cpu, other are shared (netdev->stats) */
128 struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev,
129 struct rtnl_link_stats64 *tot)
133 for_each_possible_cpu(i) {
134 const struct pcpu_sw_netstats *tstats =
135 per_cpu_ptr(dev->tstats, i);
136 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
140 start = u64_stats_fetch_begin_bh(&tstats->syncp);
141 rx_packets = tstats->rx_packets;
142 tx_packets = tstats->tx_packets;
143 rx_bytes = tstats->rx_bytes;
144 tx_bytes = tstats->tx_bytes;
145 } while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
147 tot->rx_packets += rx_packets;
148 tot->tx_packets += tx_packets;
149 tot->rx_bytes += rx_bytes;
150 tot->tx_bytes += tx_bytes;
153 tot->multicast = dev->stats.multicast;
155 tot->rx_crc_errors = dev->stats.rx_crc_errors;
156 tot->rx_fifo_errors = dev->stats.rx_fifo_errors;
157 tot->rx_length_errors = dev->stats.rx_length_errors;
158 tot->rx_frame_errors = dev->stats.rx_frame_errors;
159 tot->rx_errors = dev->stats.rx_errors;
161 tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
162 tot->tx_carrier_errors = dev->stats.tx_carrier_errors;
163 tot->tx_dropped = dev->stats.tx_dropped;
164 tot->tx_aborted_errors = dev->stats.tx_aborted_errors;
165 tot->tx_errors = dev->stats.tx_errors;
167 tot->collisions = dev->stats.collisions;
171 EXPORT_SYMBOL_GPL(ip_tunnel_get_stats64);
173 static bool ip_tunnel_key_match(const struct ip_tunnel_parm *p,
174 __be16 flags, __be32 key)
176 if (p->i_flags & TUNNEL_KEY) {
177 if (flags & TUNNEL_KEY)
178 return key == p->i_key;
180 /* key expected, none present */
183 return !(flags & TUNNEL_KEY);
186 /* Fallback tunnel: no source, no destination, no key, no options
189 We require exact key match i.e. if a key is present in packet
190 it will match only tunnel with the same key; if it is not present,
191 it will match only keyless tunnel.
193 All keysless packets, if not matched configured keyless tunnels
194 will match fallback tunnel.
195 Given src, dst and key, find appropriate for input tunnel.
197 struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
198 int link, __be16 flags,
199 __be32 remote, __be32 local,
203 struct ip_tunnel *t, *cand = NULL;
204 struct hlist_head *head;
206 hash = ip_tunnel_hash(key, remote);
207 head = &itn->tunnels[hash];
209 hlist_for_each_entry_rcu(t, head, hash_node) {
210 if (local != t->parms.iph.saddr ||
211 remote != t->parms.iph.daddr ||
212 !(t->dev->flags & IFF_UP))
215 if (!ip_tunnel_key_match(&t->parms, flags, key))
218 if (t->parms.link == link)
224 hlist_for_each_entry_rcu(t, head, hash_node) {
225 if (remote != t->parms.iph.daddr ||
226 !(t->dev->flags & IFF_UP))
229 if (!ip_tunnel_key_match(&t->parms, flags, key))
232 if (t->parms.link == link)
238 hash = ip_tunnel_hash(key, 0);
239 head = &itn->tunnels[hash];
241 hlist_for_each_entry_rcu(t, head, hash_node) {
242 if ((local != t->parms.iph.saddr &&
243 (local != t->parms.iph.daddr ||
244 !ipv4_is_multicast(local))) ||
245 !(t->dev->flags & IFF_UP))
248 if (!ip_tunnel_key_match(&t->parms, flags, key))
251 if (t->parms.link == link)
257 if (flags & TUNNEL_NO_KEY)
258 goto skip_key_lookup;
260 hlist_for_each_entry_rcu(t, head, hash_node) {
261 if (t->parms.i_key != key ||
262 !(t->dev->flags & IFF_UP))
265 if (t->parms.link == link)
275 if (itn->fb_tunnel_dev && itn->fb_tunnel_dev->flags & IFF_UP)
276 return netdev_priv(itn->fb_tunnel_dev);
281 EXPORT_SYMBOL_GPL(ip_tunnel_lookup);
283 static struct hlist_head *ip_bucket(struct ip_tunnel_net *itn,
284 struct ip_tunnel_parm *parms)
289 if (parms->iph.daddr && !ipv4_is_multicast(parms->iph.daddr))
290 remote = parms->iph.daddr;
294 h = ip_tunnel_hash(parms->i_key, remote);
295 return &itn->tunnels[h];
298 static void ip_tunnel_add(struct ip_tunnel_net *itn, struct ip_tunnel *t)
300 struct hlist_head *head = ip_bucket(itn, &t->parms);
302 hlist_add_head_rcu(&t->hash_node, head);
305 static void ip_tunnel_del(struct ip_tunnel *t)
307 hlist_del_init_rcu(&t->hash_node);
310 static struct ip_tunnel *ip_tunnel_find(struct ip_tunnel_net *itn,
311 struct ip_tunnel_parm *parms,
314 __be32 remote = parms->iph.daddr;
315 __be32 local = parms->iph.saddr;
316 __be32 key = parms->i_key;
317 int link = parms->link;
318 struct ip_tunnel *t = NULL;
319 struct hlist_head *head = ip_bucket(itn, parms);
321 hlist_for_each_entry_rcu(t, head, hash_node) {
322 if (local == t->parms.iph.saddr &&
323 remote == t->parms.iph.daddr &&
324 key == t->parms.i_key &&
325 link == t->parms.link &&
326 type == t->dev->type)
332 static struct net_device *__ip_tunnel_create(struct net *net,
333 const struct rtnl_link_ops *ops,
334 struct ip_tunnel_parm *parms)
337 struct ip_tunnel *tunnel;
338 struct net_device *dev;
342 strlcpy(name, parms->name, IFNAMSIZ);
344 if (strlen(ops->kind) > (IFNAMSIZ - 3)) {
348 strlcpy(name, ops->kind, IFNAMSIZ);
349 strncat(name, "%d", 2);
353 dev = alloc_netdev(ops->priv_size, name, ops->setup);
358 dev_net_set(dev, net);
360 dev->rtnl_link_ops = ops;
362 tunnel = netdev_priv(dev);
363 tunnel->parms = *parms;
366 err = register_netdevice(dev);
378 static inline void init_tunnel_flow(struct flowi4 *fl4,
380 __be32 daddr, __be32 saddr,
381 __be32 key, __u8 tos, int oif)
383 memset(fl4, 0, sizeof(*fl4));
384 fl4->flowi4_oif = oif;
387 fl4->flowi4_tos = tos;
388 fl4->flowi4_proto = proto;
389 fl4->fl4_gre_key = key;
392 static int ip_tunnel_bind_dev(struct net_device *dev)
394 struct net_device *tdev = NULL;
395 struct ip_tunnel *tunnel = netdev_priv(dev);
396 const struct iphdr *iph;
397 int hlen = LL_MAX_HEADER;
398 int mtu = ETH_DATA_LEN;
399 int t_hlen = tunnel->hlen + sizeof(struct iphdr);
401 iph = &tunnel->parms.iph;
403 /* Guess output device to choose reasonable mtu and needed_headroom */
408 init_tunnel_flow(&fl4, iph->protocol, iph->daddr,
409 iph->saddr, tunnel->parms.o_key,
410 RT_TOS(iph->tos), tunnel->parms.link);
411 rt = ip_route_output_key(tunnel->net, &fl4);
415 tunnel_dst_set(tunnel, &rt->dst);
418 if (dev->type != ARPHRD_ETHER)
419 dev->flags |= IFF_POINTOPOINT;
422 if (!tdev && tunnel->parms.link)
423 tdev = __dev_get_by_index(tunnel->net, tunnel->parms.link);
426 hlen = tdev->hard_header_len + tdev->needed_headroom;
429 dev->iflink = tunnel->parms.link;
431 dev->needed_headroom = t_hlen + hlen;
432 mtu -= (dev->hard_header_len + t_hlen);
440 static struct ip_tunnel *ip_tunnel_create(struct net *net,
441 struct ip_tunnel_net *itn,
442 struct ip_tunnel_parm *parms)
444 struct ip_tunnel *nt, *fbt;
445 struct net_device *dev;
447 BUG_ON(!itn->fb_tunnel_dev);
448 fbt = netdev_priv(itn->fb_tunnel_dev);
449 dev = __ip_tunnel_create(net, itn->fb_tunnel_dev->rtnl_link_ops, parms);
453 dev->mtu = ip_tunnel_bind_dev(dev);
455 nt = netdev_priv(dev);
456 ip_tunnel_add(itn, nt);
460 int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
461 const struct tnl_ptk_info *tpi, bool log_ecn_error)
463 struct pcpu_sw_netstats *tstats;
464 const struct iphdr *iph = ip_hdr(skb);
467 #ifdef CONFIG_NET_IPGRE_BROADCAST
468 if (ipv4_is_multicast(iph->daddr)) {
469 /* Looped back packet, drop it! */
470 if (rt_is_output_route(skb_rtable(skb)))
472 tunnel->dev->stats.multicast++;
473 skb->pkt_type = PACKET_BROADCAST;
477 if ((!(tpi->flags&TUNNEL_CSUM) && (tunnel->parms.i_flags&TUNNEL_CSUM)) ||
478 ((tpi->flags&TUNNEL_CSUM) && !(tunnel->parms.i_flags&TUNNEL_CSUM))) {
479 tunnel->dev->stats.rx_crc_errors++;
480 tunnel->dev->stats.rx_errors++;
484 if (tunnel->parms.i_flags&TUNNEL_SEQ) {
485 if (!(tpi->flags&TUNNEL_SEQ) ||
486 (tunnel->i_seqno && (s32)(ntohl(tpi->seq) - tunnel->i_seqno) < 0)) {
487 tunnel->dev->stats.rx_fifo_errors++;
488 tunnel->dev->stats.rx_errors++;
491 tunnel->i_seqno = ntohl(tpi->seq) + 1;
494 err = IP_ECN_decapsulate(iph, skb);
497 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
498 &iph->saddr, iph->tos);
500 ++tunnel->dev->stats.rx_frame_errors;
501 ++tunnel->dev->stats.rx_errors;
506 tstats = this_cpu_ptr(tunnel->dev->tstats);
507 u64_stats_update_begin(&tstats->syncp);
508 tstats->rx_packets++;
509 tstats->rx_bytes += skb->len;
510 u64_stats_update_end(&tstats->syncp);
512 skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(tunnel->dev)));
514 if (tunnel->dev->type == ARPHRD_ETHER) {
515 skb->protocol = eth_type_trans(skb, tunnel->dev);
516 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
518 skb->dev = tunnel->dev;
521 gro_cells_receive(&tunnel->gro_cells, skb);
528 EXPORT_SYMBOL_GPL(ip_tunnel_rcv);
530 static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
531 struct rtable *rt, __be16 df)
533 struct ip_tunnel *tunnel = netdev_priv(dev);
534 int pkt_size = skb->len - tunnel->hlen - dev->hard_header_len;
538 mtu = dst_mtu(&rt->dst) - dev->hard_header_len
539 - sizeof(struct iphdr) - tunnel->hlen;
541 mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
544 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
546 if (skb->protocol == htons(ETH_P_IP)) {
547 if (!skb_is_gso(skb) &&
548 (df & htons(IP_DF)) && mtu < pkt_size) {
549 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
550 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
554 #if IS_ENABLED(CONFIG_IPV6)
555 else if (skb->protocol == htons(ETH_P_IPV6)) {
556 struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb);
558 if (rt6 && mtu < dst_mtu(skb_dst(skb)) &&
559 mtu >= IPV6_MIN_MTU) {
560 if ((tunnel->parms.iph.daddr &&
561 !ipv4_is_multicast(tunnel->parms.iph.daddr)) ||
562 rt6->rt6i_dst.plen == 128) {
563 rt6->rt6i_flags |= RTF_MODIFIED;
564 dst_metric_set(skb_dst(skb), RTAX_MTU, mtu);
568 if (!skb_is_gso(skb) && mtu >= IPV6_MIN_MTU &&
570 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
578 void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
579 const struct iphdr *tnl_params, const u8 protocol)
581 struct ip_tunnel *tunnel = netdev_priv(dev);
582 const struct iphdr *inner_iph;
586 struct rtable *rt = NULL; /* Route to the other host */
587 unsigned int max_headroom; /* The extra header space needed */
590 bool connected = true;
592 inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
594 dst = tnl_params->daddr;
598 if (skb_dst(skb) == NULL) {
599 dev->stats.tx_fifo_errors++;
603 if (skb->protocol == htons(ETH_P_IP)) {
604 rt = skb_rtable(skb);
605 dst = rt_nexthop(rt, inner_iph->daddr);
607 #if IS_ENABLED(CONFIG_IPV6)
608 else if (skb->protocol == htons(ETH_P_IPV6)) {
609 const struct in6_addr *addr6;
610 struct neighbour *neigh;
611 bool do_tx_error_icmp;
614 neigh = dst_neigh_lookup(skb_dst(skb),
615 &ipv6_hdr(skb)->daddr);
619 addr6 = (const struct in6_addr *)&neigh->primary_key;
620 addr_type = ipv6_addr_type(addr6);
622 if (addr_type == IPV6_ADDR_ANY) {
623 addr6 = &ipv6_hdr(skb)->daddr;
624 addr_type = ipv6_addr_type(addr6);
627 if ((addr_type & IPV6_ADDR_COMPATv4) == 0)
628 do_tx_error_icmp = true;
630 do_tx_error_icmp = false;
631 dst = addr6->s6_addr32[3];
633 neigh_release(neigh);
634 if (do_tx_error_icmp)
644 tos = tnl_params->tos;
647 if (skb->protocol == htons(ETH_P_IP)) {
648 tos = inner_iph->tos;
650 } else if (skb->protocol == htons(ETH_P_IPV6)) {
651 tos = ipv6_get_dsfield((const struct ipv6hdr *)inner_iph);
656 init_tunnel_flow(&fl4, protocol, dst, tnl_params->saddr,
657 tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link);
660 rt = (struct rtable *)tunnel_dst_check(tunnel, 0);
663 rt = ip_route_output_key(tunnel->net, &fl4);
666 dev->stats.tx_carrier_errors++;
670 tunnel_dst_set(tunnel, &rt->dst);
673 if (rt->dst.dev == dev) {
675 dev->stats.collisions++;
679 if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off)) {
684 if (tunnel->err_count > 0) {
685 if (time_before(jiffies,
686 tunnel->err_time + IPTUNNEL_ERR_TIMEO)) {
689 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
690 dst_link_failure(skb);
692 tunnel->err_count = 0;
695 tos = ip_tunnel_ecn_encap(tos, inner_iph, skb);
696 ttl = tnl_params->ttl;
698 if (skb->protocol == htons(ETH_P_IP))
699 ttl = inner_iph->ttl;
700 #if IS_ENABLED(CONFIG_IPV6)
701 else if (skb->protocol == htons(ETH_P_IPV6))
702 ttl = ((const struct ipv6hdr *)inner_iph)->hop_limit;
705 ttl = ip4_dst_hoplimit(&rt->dst);
708 df = tnl_params->frag_off;
709 if (skb->protocol == htons(ETH_P_IP))
710 df |= (inner_iph->frag_off&htons(IP_DF));
712 max_headroom = LL_RESERVED_SPACE(rt->dst.dev) + sizeof(struct iphdr)
713 + rt->dst.header_len;
714 if (max_headroom > dev->needed_headroom)
715 dev->needed_headroom = max_headroom;
717 if (skb_cow_head(skb, dev->needed_headroom)) {
718 dev->stats.tx_dropped++;
723 err = iptunnel_xmit(rt, skb, fl4.saddr, fl4.daddr, protocol,
724 tos, ttl, df, !net_eq(tunnel->net, dev_net(dev)));
725 iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
729 #if IS_ENABLED(CONFIG_IPV6)
731 dst_link_failure(skb);
734 dev->stats.tx_errors++;
737 EXPORT_SYMBOL_GPL(ip_tunnel_xmit);
739 static void ip_tunnel_update(struct ip_tunnel_net *itn,
741 struct net_device *dev,
742 struct ip_tunnel_parm *p,
746 t->parms.iph.saddr = p->iph.saddr;
747 t->parms.iph.daddr = p->iph.daddr;
748 t->parms.i_key = p->i_key;
749 t->parms.o_key = p->o_key;
750 if (dev->type != ARPHRD_ETHER) {
751 memcpy(dev->dev_addr, &p->iph.saddr, 4);
752 memcpy(dev->broadcast, &p->iph.daddr, 4);
754 ip_tunnel_add(itn, t);
756 t->parms.iph.ttl = p->iph.ttl;
757 t->parms.iph.tos = p->iph.tos;
758 t->parms.iph.frag_off = p->iph.frag_off;
760 if (t->parms.link != p->link) {
763 t->parms.link = p->link;
764 mtu = ip_tunnel_bind_dev(dev);
768 tunnel_dst_reset_all(t);
769 netdev_state_change(dev);
772 int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd)
776 struct net *net = dev_net(dev);
777 struct ip_tunnel *tunnel = netdev_priv(dev);
778 struct ip_tunnel_net *itn = net_generic(net, tunnel->ip_tnl_net_id);
780 BUG_ON(!itn->fb_tunnel_dev);
784 if (dev == itn->fb_tunnel_dev)
785 t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type);
787 t = netdev_priv(dev);
788 memcpy(p, &t->parms, sizeof(*p));
794 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
797 p->iph.frag_off |= htons(IP_DF);
798 if (!(p->i_flags&TUNNEL_KEY))
800 if (!(p->o_flags&TUNNEL_KEY))
803 t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type);
805 if (!t && (cmd == SIOCADDTUNNEL))
806 t = ip_tunnel_create(net, itn, p);
808 if (dev != itn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
815 unsigned int nflags = 0;
817 if (ipv4_is_multicast(p->iph.daddr))
818 nflags = IFF_BROADCAST;
819 else if (p->iph.daddr)
820 nflags = IFF_POINTOPOINT;
822 if ((dev->flags^nflags)&(IFF_POINTOPOINT|IFF_BROADCAST)) {
827 t = netdev_priv(dev);
833 ip_tunnel_update(itn, t, dev, p, true);
835 err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
840 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
843 if (dev == itn->fb_tunnel_dev) {
845 t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type);
849 if (t == netdev_priv(itn->fb_tunnel_dev))
853 unregister_netdevice(dev);
864 EXPORT_SYMBOL_GPL(ip_tunnel_ioctl);
866 int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu)
868 struct ip_tunnel *tunnel = netdev_priv(dev);
869 int t_hlen = tunnel->hlen + sizeof(struct iphdr);
872 new_mtu > 0xFFF8 - dev->hard_header_len - t_hlen)
877 EXPORT_SYMBOL_GPL(ip_tunnel_change_mtu);
879 static void ip_tunnel_dev_free(struct net_device *dev)
881 struct ip_tunnel *tunnel = netdev_priv(dev);
883 gro_cells_destroy(&tunnel->gro_cells);
884 free_percpu(tunnel->dst_cache);
885 free_percpu(dev->tstats);
889 void ip_tunnel_dellink(struct net_device *dev, struct list_head *head)
891 struct ip_tunnel *tunnel = netdev_priv(dev);
892 struct ip_tunnel_net *itn;
894 itn = net_generic(tunnel->net, tunnel->ip_tnl_net_id);
896 if (itn->fb_tunnel_dev != dev) {
897 ip_tunnel_del(netdev_priv(dev));
898 unregister_netdevice_queue(dev, head);
901 EXPORT_SYMBOL_GPL(ip_tunnel_dellink);
903 int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
904 struct rtnl_link_ops *ops, char *devname)
906 struct ip_tunnel_net *itn = net_generic(net, ip_tnl_net_id);
907 struct ip_tunnel_parm parms;
910 for (i = 0; i < IP_TNL_HASH_SIZE; i++)
911 INIT_HLIST_HEAD(&itn->tunnels[i]);
914 itn->fb_tunnel_dev = NULL;
918 memset(&parms, 0, sizeof(parms));
920 strlcpy(parms.name, devname, IFNAMSIZ);
923 itn->fb_tunnel_dev = __ip_tunnel_create(net, ops, &parms);
924 /* FB netdevice is special: we have one, and only one per netns.
925 * Allowing to move it to another netns is clearly unsafe.
927 if (!IS_ERR(itn->fb_tunnel_dev)) {
928 itn->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL;
929 ip_tunnel_add(itn, netdev_priv(itn->fb_tunnel_dev));
933 return PTR_RET(itn->fb_tunnel_dev);
935 EXPORT_SYMBOL_GPL(ip_tunnel_init_net);
937 static void ip_tunnel_destroy(struct ip_tunnel_net *itn, struct list_head *head,
938 struct rtnl_link_ops *ops)
940 struct net *net = dev_net(itn->fb_tunnel_dev);
941 struct net_device *dev, *aux;
944 for_each_netdev_safe(net, dev, aux)
945 if (dev->rtnl_link_ops == ops)
946 unregister_netdevice_queue(dev, head);
948 for (h = 0; h < IP_TNL_HASH_SIZE; h++) {
950 struct hlist_node *n;
951 struct hlist_head *thead = &itn->tunnels[h];
953 hlist_for_each_entry_safe(t, n, thead, hash_node)
954 /* If dev is in the same netns, it has already
955 * been added to the list by the previous loop.
957 if (!net_eq(dev_net(t->dev), net))
958 unregister_netdevice_queue(t->dev, head);
962 void ip_tunnel_delete_net(struct ip_tunnel_net *itn, struct rtnl_link_ops *ops)
967 ip_tunnel_destroy(itn, &list, ops);
968 unregister_netdevice_many(&list);
971 EXPORT_SYMBOL_GPL(ip_tunnel_delete_net);
973 int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
974 struct ip_tunnel_parm *p)
976 struct ip_tunnel *nt;
977 struct net *net = dev_net(dev);
978 struct ip_tunnel_net *itn;
982 nt = netdev_priv(dev);
983 itn = net_generic(net, nt->ip_tnl_net_id);
985 if (ip_tunnel_find(itn, p, dev->type))
990 err = register_netdevice(dev);
994 if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS])
995 eth_hw_addr_random(dev);
997 mtu = ip_tunnel_bind_dev(dev);
1001 ip_tunnel_add(itn, nt);
1006 EXPORT_SYMBOL_GPL(ip_tunnel_newlink);
1008 int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[],
1009 struct ip_tunnel_parm *p)
1011 struct ip_tunnel *t;
1012 struct ip_tunnel *tunnel = netdev_priv(dev);
1013 struct net *net = tunnel->net;
1014 struct ip_tunnel_net *itn = net_generic(net, tunnel->ip_tnl_net_id);
1016 if (dev == itn->fb_tunnel_dev)
1019 t = ip_tunnel_find(itn, p, dev->type);
1027 if (dev->type != ARPHRD_ETHER) {
1028 unsigned int nflags = 0;
1030 if (ipv4_is_multicast(p->iph.daddr))
1031 nflags = IFF_BROADCAST;
1032 else if (p->iph.daddr)
1033 nflags = IFF_POINTOPOINT;
1035 if ((dev->flags ^ nflags) &
1036 (IFF_POINTOPOINT | IFF_BROADCAST))
1041 ip_tunnel_update(itn, t, dev, p, !tb[IFLA_MTU]);
1044 EXPORT_SYMBOL_GPL(ip_tunnel_changelink);
1046 int ip_tunnel_init(struct net_device *dev)
1048 struct ip_tunnel *tunnel = netdev_priv(dev);
1049 struct iphdr *iph = &tunnel->parms.iph;
1052 dev->destructor = ip_tunnel_dev_free;
1053 dev->tstats = alloc_percpu(struct pcpu_sw_netstats);
1057 for_each_possible_cpu(i) {
1058 struct pcpu_sw_netstats *ipt_stats;
1059 ipt_stats = per_cpu_ptr(dev->tstats, i);
1060 u64_stats_init(&ipt_stats->syncp);
1063 tunnel->dst_cache = alloc_percpu(struct ip_tunnel_dst);
1064 if (!tunnel->dst_cache) {
1065 free_percpu(dev->tstats);
1069 err = gro_cells_init(&tunnel->gro_cells, dev);
1071 free_percpu(tunnel->dst_cache);
1072 free_percpu(dev->tstats);
1077 tunnel->net = dev_net(dev);
1078 strcpy(tunnel->parms.name, dev->name);
1084 EXPORT_SYMBOL_GPL(ip_tunnel_init);
1086 void ip_tunnel_uninit(struct net_device *dev)
1088 struct ip_tunnel *tunnel = netdev_priv(dev);
1089 struct net *net = tunnel->net;
1090 struct ip_tunnel_net *itn;
1092 itn = net_generic(net, tunnel->ip_tnl_net_id);
1093 /* fb_tunnel_dev will be unregisted in net-exit call. */
1094 if (itn->fb_tunnel_dev != dev)
1095 ip_tunnel_del(netdev_priv(dev));
1097 tunnel_dst_reset_all(tunnel);
1099 EXPORT_SYMBOL_GPL(ip_tunnel_uninit);
1101 /* Do least required initialization, rest of init is done in tunnel_init call */
1102 void ip_tunnel_setup(struct net_device *dev, int net_id)
1104 struct ip_tunnel *tunnel = netdev_priv(dev);
1105 tunnel->ip_tnl_net_id = net_id;
1107 EXPORT_SYMBOL_GPL(ip_tunnel_setup);
1109 MODULE_LICENSE("GPL");