2 * VXLAN: Virtual eXtensible Local Area Network
4 * Copyright (c) 2012-2013 Vyatta Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 #include <linux/kernel.h>
17 #include <linux/types.h>
18 #include <linux/module.h>
19 #include <linux/errno.h>
20 #include <linux/slab.h>
21 #include <linux/skbuff.h>
22 #include <linux/rculist.h>
23 #include <linux/netdevice.h>
26 #include <linux/udp.h>
27 #include <linux/igmp.h>
28 #include <linux/etherdevice.h>
29 #include <linux/if_ether.h>
30 #include <linux/hash.h>
31 #include <linux/ethtool.h>
33 #include <net/ndisc.h>
35 #include <net/ip_tunnels.h>
38 #include <net/rtnetlink.h>
39 #include <net/route.h>
40 #include <net/dsfield.h>
41 #include <net/inet_ecn.h>
42 #include <net/net_namespace.h>
43 #include <net/netns/generic.h>
45 #define VXLAN_VERSION "0.1"
47 #define VNI_HASH_BITS 10
48 #define VNI_HASH_SIZE (1<<VNI_HASH_BITS)
49 #define FDB_HASH_BITS 8
50 #define FDB_HASH_SIZE (1<<FDB_HASH_BITS)
51 #define FDB_AGE_DEFAULT 300 /* 5 min */
52 #define FDB_AGE_INTERVAL (10 * HZ) /* rescan interval */
54 #define VXLAN_N_VID (1u << 24)
55 #define VXLAN_VID_MASK (VXLAN_N_VID - 1)
56 /* IP header + UDP + VXLAN + Ethernet header */
57 #define VXLAN_HEADROOM (20 + 8 + 8 + 14)
59 #define VXLAN_FLAGS 0x08000000 /* struct vxlanhdr.vx_flags required value. */
61 /* VXLAN protocol header */
67 /* UDP port for VXLAN traffic.
68 * The IANA assigned port is 4789, but the Linux default is 8472
69 * for compatability with early adopters.
71 static unsigned int vxlan_port __read_mostly = 8472;
72 module_param_named(udp_port, vxlan_port, uint, 0444);
73 MODULE_PARM_DESC(udp_port, "Destination UDP port");
75 static bool log_ecn_error = true;
76 module_param(log_ecn_error, bool, 0644);
77 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
79 /* per-net private data for this module */
80 static unsigned int vxlan_net_id;
82 struct socket *sock; /* UDP encap socket */
83 struct hlist_head vni_list[VNI_HASH_SIZE];
92 struct vxlan_rdst *remote_next;
95 /* Forwarding table entry */
97 struct hlist_node hlist; /* linked list of entries */
99 unsigned long updated; /* jiffies */
101 struct vxlan_rdst remote;
102 u16 state; /* see ndm_state */
103 u8 flags; /* see ndm_flags */
104 u8 eth_addr[ETH_ALEN];
107 /* Pseudo network device */
109 struct hlist_node hlist;
110 struct net_device *dev;
111 struct vxlan_rdst default_dst; /* default destination */
112 __be32 saddr; /* source address */
114 __u16 port_min; /* source port range */
116 __u8 tos; /* TOS override */
118 u32 flags; /* VXLAN_F_* below */
120 unsigned long age_interval;
121 struct timer_list age_timer;
122 spinlock_t hash_lock;
123 unsigned int addrcnt;
124 unsigned int addrmax;
126 struct hlist_head fdb_head[FDB_HASH_SIZE];
129 #define VXLAN_F_LEARN 0x01
130 #define VXLAN_F_PROXY 0x02
131 #define VXLAN_F_RSC 0x04
132 #define VXLAN_F_L2MISS 0x08
133 #define VXLAN_F_L3MISS 0x10
135 /* salt for hash table */
136 static u32 vxlan_salt __read_mostly;
138 static inline struct hlist_head *vni_head(struct net *net, u32 id)
140 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
142 return &vn->vni_list[hash_32(id, VNI_HASH_BITS)];
145 /* Look up VNI in a per net namespace table */
146 static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id)
148 struct vxlan_dev *vxlan;
150 hlist_for_each_entry_rcu(vxlan, vni_head(net, id), hlist) {
151 if (vxlan->default_dst.remote_vni == id)
158 /* Fill in neighbour message in skbuff. */
159 static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
160 const struct vxlan_fdb *fdb,
161 u32 portid, u32 seq, int type, unsigned int flags,
162 const struct vxlan_rdst *rdst)
164 unsigned long now = jiffies;
165 struct nda_cacheinfo ci;
166 struct nlmsghdr *nlh;
168 bool send_ip, send_eth;
170 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
174 ndm = nlmsg_data(nlh);
175 memset(ndm, 0, sizeof(*ndm));
177 send_eth = send_ip = true;
179 if (type == RTM_GETNEIGH) {
180 ndm->ndm_family = AF_INET;
181 send_ip = rdst->remote_ip != htonl(INADDR_ANY);
182 send_eth = !is_zero_ether_addr(fdb->eth_addr);
184 ndm->ndm_family = AF_BRIDGE;
185 ndm->ndm_state = fdb->state;
186 ndm->ndm_ifindex = vxlan->dev->ifindex;
187 ndm->ndm_flags = fdb->flags;
188 ndm->ndm_type = NDA_DST;
190 if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr))
191 goto nla_put_failure;
193 if (send_ip && nla_put_be32(skb, NDA_DST, rdst->remote_ip))
194 goto nla_put_failure;
196 if (rdst->remote_port && rdst->remote_port != vxlan->dst_port &&
197 nla_put_be16(skb, NDA_PORT, rdst->remote_port))
198 goto nla_put_failure;
199 if (rdst->remote_vni != vxlan->default_dst.remote_vni &&
200 nla_put_be32(skb, NDA_VNI, rdst->remote_vni))
201 goto nla_put_failure;
202 if (rdst->remote_ifindex &&
203 nla_put_u32(skb, NDA_IFINDEX, rdst->remote_ifindex))
204 goto nla_put_failure;
206 ci.ndm_used = jiffies_to_clock_t(now - fdb->used);
207 ci.ndm_confirmed = 0;
208 ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated);
211 if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
212 goto nla_put_failure;
214 return nlmsg_end(skb, nlh);
217 nlmsg_cancel(skb, nlh);
221 static inline size_t vxlan_nlmsg_size(void)
223 return NLMSG_ALIGN(sizeof(struct ndmsg))
224 + nla_total_size(ETH_ALEN) /* NDA_LLADDR */
225 + nla_total_size(sizeof(__be32)) /* NDA_DST */
226 + nla_total_size(sizeof(__be16)) /* NDA_PORT */
227 + nla_total_size(sizeof(__be32)) /* NDA_VNI */
228 + nla_total_size(sizeof(__u32)) /* NDA_IFINDEX */
229 + nla_total_size(sizeof(struct nda_cacheinfo));
232 static void vxlan_fdb_notify(struct vxlan_dev *vxlan,
233 const struct vxlan_fdb *fdb, int type)
235 struct net *net = dev_net(vxlan->dev);
239 skb = nlmsg_new(vxlan_nlmsg_size(), GFP_ATOMIC);
243 err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0, &fdb->remote);
245 /* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */
246 WARN_ON(err == -EMSGSIZE);
251 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
255 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
258 static void vxlan_ip_miss(struct net_device *dev, __be32 ipa)
260 struct vxlan_dev *vxlan = netdev_priv(dev);
263 memset(&f, 0, sizeof f);
265 f.remote.remote_ip = ipa; /* goes to NDA_DST */
266 f.remote.remote_vni = VXLAN_N_VID;
268 vxlan_fdb_notify(vxlan, &f, RTM_GETNEIGH);
271 static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN])
275 memset(&f, 0, sizeof f);
277 memcpy(f.eth_addr, eth_addr, ETH_ALEN);
279 vxlan_fdb_notify(vxlan, &f, RTM_GETNEIGH);
282 /* Hash Ethernet address */
283 static u32 eth_hash(const unsigned char *addr)
285 u64 value = get_unaligned((u64 *)addr);
287 /* only want 6 bytes */
293 return hash_64(value, FDB_HASH_BITS);
296 /* Hash chain to use given mac address */
297 static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan,
300 return &vxlan->fdb_head[eth_hash(mac)];
303 /* Look up Ethernet address in forwarding table */
304 static struct vxlan_fdb *__vxlan_find_mac(struct vxlan_dev *vxlan,
308 struct hlist_head *head = vxlan_fdb_head(vxlan, mac);
311 hlist_for_each_entry_rcu(f, head, hlist) {
312 if (compare_ether_addr(mac, f->eth_addr) == 0)
319 static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
324 f = __vxlan_find_mac(vxlan, mac);
331 /* Add/update destinations for multicast */
332 static int vxlan_fdb_append(struct vxlan_fdb *f,
333 __be32 ip, __be16 port, __u32 vni, __u32 ifindex)
335 struct vxlan_rdst *rd_prev, *rd;
338 for (rd = &f->remote; rd; rd = rd->remote_next) {
339 if (rd->remote_ip == ip &&
340 rd->remote_port == port &&
341 rd->remote_vni == vni &&
342 rd->remote_ifindex == ifindex)
346 rd = kmalloc(sizeof(*rd), GFP_ATOMIC);
350 rd->remote_port = port;
351 rd->remote_vni = vni;
352 rd->remote_ifindex = ifindex;
353 rd->remote_next = NULL;
354 rd_prev->remote_next = rd;
358 /* Add new entry to forwarding table -- assumes lock held */
359 static int vxlan_fdb_create(struct vxlan_dev *vxlan,
360 const u8 *mac, __be32 ip,
361 __u16 state, __u16 flags,
362 __be16 port, __u32 vni, __u32 ifindex,
368 f = __vxlan_find_mac(vxlan, mac);
370 if (flags & NLM_F_EXCL) {
371 netdev_dbg(vxlan->dev,
372 "lost race to create %pM\n", mac);
375 if (f->state != state) {
377 f->updated = jiffies;
380 if (f->flags != ndm_flags) {
381 f->flags = ndm_flags;
382 f->updated = jiffies;
385 if ((flags & NLM_F_APPEND) &&
386 is_multicast_ether_addr(f->eth_addr)) {
387 int rc = vxlan_fdb_append(f, ip, port, vni, ifindex);
394 if (!(flags & NLM_F_CREATE))
397 if (vxlan->addrmax && vxlan->addrcnt >= vxlan->addrmax)
400 netdev_dbg(vxlan->dev, "add %pM -> %pI4\n", mac, &ip);
401 f = kmalloc(sizeof(*f), GFP_ATOMIC);
406 f->remote.remote_ip = ip;
407 f->remote.remote_port = port;
408 f->remote.remote_vni = vni;
409 f->remote.remote_ifindex = ifindex;
410 f->remote.remote_next = NULL;
412 f->flags = ndm_flags;
413 f->updated = f->used = jiffies;
414 memcpy(f->eth_addr, mac, ETH_ALEN);
417 hlist_add_head_rcu(&f->hlist,
418 vxlan_fdb_head(vxlan, mac));
422 vxlan_fdb_notify(vxlan, f, RTM_NEWNEIGH);
427 static void vxlan_fdb_free(struct rcu_head *head)
429 struct vxlan_fdb *f = container_of(head, struct vxlan_fdb, rcu);
431 while (f->remote.remote_next) {
432 struct vxlan_rdst *rd = f->remote.remote_next;
434 f->remote.remote_next = rd->remote_next;
440 static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
442 netdev_dbg(vxlan->dev,
443 "delete %pM\n", f->eth_addr);
446 vxlan_fdb_notify(vxlan, f, RTM_DELNEIGH);
448 hlist_del_rcu(&f->hlist);
449 call_rcu(&f->rcu, vxlan_fdb_free);
452 /* Add static entry (via netlink) */
453 static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
454 struct net_device *dev,
455 const unsigned char *addr, u16 flags)
457 struct vxlan_dev *vxlan = netdev_priv(dev);
458 struct net *net = dev_net(vxlan->dev);
464 if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_REACHABLE))) {
465 pr_info("RTM_NEWNEIGH with invalid state %#x\n",
470 if (tb[NDA_DST] == NULL)
473 if (nla_len(tb[NDA_DST]) != sizeof(__be32))
474 return -EAFNOSUPPORT;
476 ip = nla_get_be32(tb[NDA_DST]);
479 if (nla_len(tb[NDA_PORT]) != sizeof(__be16))
481 port = nla_get_be16(tb[NDA_PORT]);
483 port = vxlan->dst_port;
486 if (nla_len(tb[NDA_VNI]) != sizeof(u32))
488 vni = nla_get_u32(tb[NDA_VNI]);
490 vni = vxlan->default_dst.remote_vni;
492 if (tb[NDA_IFINDEX]) {
493 struct net_device *tdev;
495 if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32))
497 ifindex = nla_get_u32(tb[NDA_IFINDEX]);
498 tdev = dev_get_by_index(net, ifindex);
500 return -EADDRNOTAVAIL;
505 spin_lock_bh(&vxlan->hash_lock);
506 err = vxlan_fdb_create(vxlan, addr, ip, ndm->ndm_state, flags,
507 port, vni, ifindex, ndm->ndm_flags);
508 spin_unlock_bh(&vxlan->hash_lock);
513 /* Delete entry (via netlink) */
514 static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
515 struct net_device *dev,
516 const unsigned char *addr)
518 struct vxlan_dev *vxlan = netdev_priv(dev);
522 spin_lock_bh(&vxlan->hash_lock);
523 f = vxlan_find_mac(vxlan, addr);
525 vxlan_fdb_destroy(vxlan, f);
528 spin_unlock_bh(&vxlan->hash_lock);
533 /* Dump forwarding table */
534 static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
535 struct net_device *dev, int idx)
537 struct vxlan_dev *vxlan = netdev_priv(dev);
540 for (h = 0; h < FDB_HASH_SIZE; ++h) {
544 hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) {
545 struct vxlan_rdst *rd;
546 for (rd = &f->remote; rd; rd = rd->remote_next) {
547 if (idx < cb->args[0])
550 err = vxlan_fdb_info(skb, vxlan, f,
551 NETLINK_CB(cb->skb).portid,
566 /* Watch incoming packets to learn mapping between Ethernet address
567 * and Tunnel endpoint.
569 static void vxlan_snoop(struct net_device *dev,
570 __be32 src_ip, const u8 *src_mac)
572 struct vxlan_dev *vxlan = netdev_priv(dev);
575 f = vxlan_find_mac(vxlan, src_mac);
577 if (likely(f->remote.remote_ip == src_ip))
582 "%pM migrated from %pI4 to %pI4\n",
583 src_mac, &f->remote.remote_ip, &src_ip);
585 f->remote.remote_ip = src_ip;
586 f->updated = jiffies;
588 /* learned new entry */
589 spin_lock(&vxlan->hash_lock);
591 /* close off race between vxlan_flush and incoming packets */
592 if (netif_running(dev))
593 vxlan_fdb_create(vxlan, src_mac, src_ip,
595 NLM_F_EXCL|NLM_F_CREATE,
597 vxlan->default_dst.remote_vni,
599 spin_unlock(&vxlan->hash_lock);
604 /* See if multicast group is already in use by other ID */
605 static bool vxlan_group_used(struct vxlan_net *vn,
606 const struct vxlan_dev *this)
608 const struct vxlan_dev *vxlan;
611 for (h = 0; h < VNI_HASH_SIZE; ++h)
612 hlist_for_each_entry(vxlan, &vn->vni_list[h], hlist) {
616 if (!netif_running(vxlan->dev))
619 if (vxlan->default_dst.remote_ip == this->default_dst.remote_ip)
626 /* kernel equivalent to IP_ADD_MEMBERSHIP */
627 static int vxlan_join_group(struct net_device *dev)
629 struct vxlan_dev *vxlan = netdev_priv(dev);
630 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
631 struct sock *sk = vn->sock->sk;
632 struct ip_mreqn mreq = {
633 .imr_multiaddr.s_addr = vxlan->default_dst.remote_ip,
634 .imr_ifindex = vxlan->default_dst.remote_ifindex,
638 /* Already a member of group */
639 if (vxlan_group_used(vn, vxlan))
642 /* Need to drop RTNL to call multicast join */
645 err = ip_mc_join_group(sk, &mreq);
653 /* kernel equivalent to IP_DROP_MEMBERSHIP */
654 static int vxlan_leave_group(struct net_device *dev)
656 struct vxlan_dev *vxlan = netdev_priv(dev);
657 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
659 struct sock *sk = vn->sock->sk;
660 struct ip_mreqn mreq = {
661 .imr_multiaddr.s_addr = vxlan->default_dst.remote_ip,
662 .imr_ifindex = vxlan->default_dst.remote_ifindex,
665 /* Only leave group when last vxlan is done. */
666 if (vxlan_group_used(vn, vxlan))
669 /* Need to drop RTNL to call multicast leave */
672 err = ip_mc_leave_group(sk, &mreq);
679 /* Callback from net/ipv4/udp.c to receive packets */
680 static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
683 struct vxlanhdr *vxh;
684 struct vxlan_dev *vxlan;
685 struct pcpu_tstats *stats;
689 /* pop off outer UDP header */
690 __skb_pull(skb, sizeof(struct udphdr));
692 /* Need Vxlan and inner Ethernet header to be present */
693 if (!pskb_may_pull(skb, sizeof(struct vxlanhdr)))
696 /* Drop packets with reserved bits set */
697 vxh = (struct vxlanhdr *) skb->data;
698 if (vxh->vx_flags != htonl(VXLAN_FLAGS) ||
699 (vxh->vx_vni & htonl(0xff))) {
700 netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n",
701 ntohl(vxh->vx_flags), ntohl(vxh->vx_vni));
705 __skb_pull(skb, sizeof(struct vxlanhdr));
707 /* Is this VNI defined? */
708 vni = ntohl(vxh->vx_vni) >> 8;
709 vxlan = vxlan_find_vni(sock_net(sk), vni);
711 netdev_dbg(skb->dev, "unknown vni %d\n", vni);
715 if (!pskb_may_pull(skb, ETH_HLEN)) {
716 vxlan->dev->stats.rx_length_errors++;
717 vxlan->dev->stats.rx_errors++;
721 skb_reset_mac_header(skb);
723 /* Re-examine inner Ethernet packet */
725 skb->protocol = eth_type_trans(skb, vxlan->dev);
727 /* Ignore packet loops (and multicast echo) */
728 if (compare_ether_addr(eth_hdr(skb)->h_source,
729 vxlan->dev->dev_addr) == 0)
732 if (vxlan->flags & VXLAN_F_LEARN)
733 vxlan_snoop(skb->dev, oip->saddr, eth_hdr(skb)->h_source);
735 __skb_tunnel_rx(skb, vxlan->dev);
736 skb_reset_network_header(skb);
738 /* If the NIC driver gave us an encapsulated packet with
739 * CHECKSUM_UNNECESSARY and Rx checksum feature is enabled,
740 * leave the CHECKSUM_UNNECESSARY, the device checksummed it
741 * for us. Otherwise force the upper layers to verify it.
743 if (skb->ip_summed != CHECKSUM_UNNECESSARY || !skb->encapsulation ||
744 !(vxlan->dev->features & NETIF_F_RXCSUM))
745 skb->ip_summed = CHECKSUM_NONE;
747 skb->encapsulation = 0;
749 err = IP_ECN_decapsulate(oip, skb);
752 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
753 &oip->saddr, oip->tos);
755 ++vxlan->dev->stats.rx_frame_errors;
756 ++vxlan->dev->stats.rx_errors;
761 stats = this_cpu_ptr(vxlan->dev->tstats);
762 u64_stats_update_begin(&stats->syncp);
764 stats->rx_bytes += skb->len;
765 u64_stats_update_end(&stats->syncp);
771 /* Put UDP header back */
772 __skb_push(skb, sizeof(struct udphdr));
776 /* Consume bad packet */
781 static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
783 struct vxlan_dev *vxlan = netdev_priv(dev);
789 if (dev->flags & IFF_NOARP)
792 if (!pskb_may_pull(skb, arp_hdr_len(dev))) {
793 dev->stats.tx_dropped++;
798 if ((parp->ar_hrd != htons(ARPHRD_ETHER) &&
799 parp->ar_hrd != htons(ARPHRD_IEEE802)) ||
800 parp->ar_pro != htons(ETH_P_IP) ||
801 parp->ar_op != htons(ARPOP_REQUEST) ||
802 parp->ar_hln != dev->addr_len ||
805 arpptr = (u8 *)parp + sizeof(struct arphdr);
807 arpptr += dev->addr_len; /* sha */
808 memcpy(&sip, arpptr, sizeof(sip));
809 arpptr += sizeof(sip);
810 arpptr += dev->addr_len; /* tha */
811 memcpy(&tip, arpptr, sizeof(tip));
813 if (ipv4_is_loopback(tip) ||
814 ipv4_is_multicast(tip))
817 n = neigh_lookup(&arp_tbl, &tip, dev);
821 struct sk_buff *reply;
823 if (!(n->nud_state & NUD_CONNECTED)) {
828 f = vxlan_find_mac(vxlan, n->ha);
829 if (f && f->remote.remote_ip == htonl(INADDR_ANY)) {
830 /* bridge-local neighbor */
835 reply = arp_create(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
840 skb_reset_mac_header(reply);
841 __skb_pull(reply, skb_network_offset(reply));
842 reply->ip_summed = CHECKSUM_UNNECESSARY;
843 reply->pkt_type = PACKET_HOST;
845 if (netif_rx_ni(reply) == NET_RX_DROP)
846 dev->stats.rx_dropped++;
847 } else if (vxlan->flags & VXLAN_F_L3MISS)
848 vxlan_ip_miss(dev, tip);
854 static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
856 struct vxlan_dev *vxlan = netdev_priv(dev);
860 if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
864 switch (ntohs(eth_hdr(skb)->h_proto)) {
866 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
869 n = neigh_lookup(&arp_tbl, &pip->daddr, dev);
878 diff = compare_ether_addr(eth_hdr(skb)->h_dest, n->ha) != 0;
880 memcpy(eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
882 memcpy(eth_hdr(skb)->h_dest, n->ha, dev->addr_len);
886 } else if (vxlan->flags & VXLAN_F_L3MISS)
887 vxlan_ip_miss(dev, pip->daddr);
891 static void vxlan_sock_free(struct sk_buff *skb)
896 /* On transmit, associate with the tunnel socket */
897 static void vxlan_set_owner(struct net_device *dev, struct sk_buff *skb)
899 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
900 struct sock *sk = vn->sock->sk;
905 skb->destructor = vxlan_sock_free;
908 /* Compute source port for outgoing packet
909 * first choice to use L4 flow hash since it will spread
910 * better and maybe available from hardware
911 * secondary choice is to use jhash on the Ethernet header
913 static __be16 vxlan_src_port(const struct vxlan_dev *vxlan, struct sk_buff *skb)
915 unsigned int range = (vxlan->port_max - vxlan->port_min) + 1;
918 hash = skb_get_rxhash(skb);
920 hash = jhash(skb->data, 2 * ETH_ALEN,
921 (__force u32) skb->protocol);
923 return htons((((u64) hash * range) >> 32) + vxlan->port_min);
926 static int handle_offloads(struct sk_buff *skb)
928 if (skb_is_gso(skb)) {
929 int err = skb_unclone(skb, GFP_ATOMIC);
933 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
934 } else if (skb->ip_summed != CHECKSUM_PARTIAL)
935 skb->ip_summed = CHECKSUM_NONE;
940 /* Bypass encapsulation if the destination is local */
941 static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
942 struct vxlan_dev *dst_vxlan)
944 struct pcpu_tstats *tx_stats = this_cpu_ptr(src_vxlan->dev->tstats);
945 struct pcpu_tstats *rx_stats = this_cpu_ptr(dst_vxlan->dev->tstats);
947 skb->pkt_type = PACKET_HOST;
948 skb->encapsulation = 0;
949 skb->dev = dst_vxlan->dev;
950 __skb_pull(skb, skb_network_offset(skb));
952 if (dst_vxlan->flags & VXLAN_F_LEARN)
953 vxlan_snoop(skb->dev, htonl(INADDR_LOOPBACK),
954 eth_hdr(skb)->h_source);
956 u64_stats_update_begin(&tx_stats->syncp);
957 tx_stats->tx_packets++;
958 tx_stats->tx_bytes += skb->len;
959 u64_stats_update_end(&tx_stats->syncp);
961 if (netif_rx(skb) == NET_RX_SUCCESS) {
962 u64_stats_update_begin(&rx_stats->syncp);
963 rx_stats->rx_packets++;
964 rx_stats->rx_bytes += skb->len;
965 u64_stats_update_end(&rx_stats->syncp);
967 skb->dev->stats.rx_dropped++;
971 static netdev_tx_t vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
972 struct vxlan_rdst *rdst, bool did_rsc)
974 struct vxlan_dev *vxlan = netdev_priv(dev);
976 const struct iphdr *old_iph;
978 struct vxlanhdr *vxh;
982 __be16 src_port, dst_port;
987 dst_port = rdst->remote_port ? rdst->remote_port : vxlan->dst_port;
988 vni = rdst->remote_vni;
989 dst = rdst->remote_ip;
993 /* short-circuited back to local bridge */
994 vxlan_encap_bypass(skb, vxlan, vxlan);
1000 if (!skb->encapsulation) {
1001 skb_reset_inner_headers(skb);
1002 skb->encapsulation = 1;
1005 /* Need space for new headers (invalidates iph ptr) */
1006 if (skb_cow_head(skb, VXLAN_HEADROOM))
1009 old_iph = ip_hdr(skb);
1012 if (!ttl && IN_MULTICAST(ntohl(dst)))
1017 tos = ip_tunnel_get_dsfield(old_iph, skb);
1019 src_port = vxlan_src_port(vxlan, skb);
1021 memset(&fl4, 0, sizeof(fl4));
1022 fl4.flowi4_oif = rdst->remote_ifindex;
1023 fl4.flowi4_tos = RT_TOS(tos);
1025 fl4.saddr = vxlan->saddr;
1027 rt = ip_route_output_key(dev_net(dev), &fl4);
1029 netdev_dbg(dev, "no route to %pI4\n", &dst);
1030 dev->stats.tx_carrier_errors++;
1034 if (rt->dst.dev == dev) {
1035 netdev_dbg(dev, "circular route to %pI4\n", &dst);
1037 dev->stats.collisions++;
1041 /* Bypass encapsulation if the destination is local */
1042 if (rt->rt_flags & RTCF_LOCAL &&
1043 !(rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
1044 struct vxlan_dev *dst_vxlan;
1047 dst_vxlan = vxlan_find_vni(dev_net(dev), vni);
1050 vxlan_encap_bypass(skb, vxlan, dst_vxlan);
1051 return NETDEV_TX_OK;
1054 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1055 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
1058 skb_dst_set(skb, &rt->dst);
1060 vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
1061 vxh->vx_flags = htonl(VXLAN_FLAGS);
1062 vxh->vx_vni = htonl(vni << 8);
1064 __skb_push(skb, sizeof(*uh));
1065 skb_reset_transport_header(skb);
1068 uh->dest = dst_port;
1069 uh->source = src_port;
1071 uh->len = htons(skb->len);
1074 __skb_push(skb, sizeof(*iph));
1075 skb_reset_network_header(skb);
1078 iph->ihl = sizeof(struct iphdr) >> 2;
1080 iph->protocol = IPPROTO_UDP;
1081 iph->tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
1083 iph->saddr = fl4.saddr;
1084 iph->ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
1085 tunnel_ip_select_ident(skb, old_iph, &rt->dst);
1089 vxlan_set_owner(dev, skb);
1091 if (handle_offloads(skb))
1094 iptunnel_xmit(skb, dev);
1095 return NETDEV_TX_OK;
1098 dev->stats.tx_dropped++;
1102 dev->stats.tx_errors++;
1105 return NETDEV_TX_OK;
1108 /* Transmit local packets over Vxlan
1110 * Outer IP header inherits ECN and DF from inner header.
1111 * Outer UDP destination is the VXLAN assigned port.
1112 * source port is based on hash of flow
1114 static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
1116 struct vxlan_dev *vxlan = netdev_priv(dev);
1118 bool did_rsc = false;
1119 struct vxlan_rdst *rdst0, *rdst;
1120 struct vxlan_fdb *f;
1123 skb_reset_mac_header(skb);
1126 if ((vxlan->flags & VXLAN_F_PROXY) && ntohs(eth->h_proto) == ETH_P_ARP)
1127 return arp_reduce(dev, skb);
1129 f = vxlan_find_mac(vxlan, eth->h_dest);
1132 if (f && (f->flags & NTF_ROUTER) && (vxlan->flags & VXLAN_F_RSC) &&
1133 ntohs(eth->h_proto) == ETH_P_IP) {
1134 did_rsc = route_shortcircuit(dev, skb);
1136 f = vxlan_find_mac(vxlan, eth->h_dest);
1140 rdst0 = &vxlan->default_dst;
1142 if (rdst0->remote_ip == htonl(INADDR_ANY) &&
1143 (vxlan->flags & VXLAN_F_L2MISS) &&
1144 !is_multicast_ether_addr(eth->h_dest))
1145 vxlan_fdb_miss(vxlan, eth->h_dest);
1151 /* if there are multiple destinations, send copies */
1152 for (rdst = rdst0->remote_next; rdst; rdst = rdst->remote_next) {
1153 struct sk_buff *skb1;
1155 skb1 = skb_clone(skb, GFP_ATOMIC);
1156 rc1 = vxlan_xmit_one(skb1, dev, rdst, did_rsc);
1157 if (rc == NETDEV_TX_OK)
1161 rc1 = vxlan_xmit_one(skb, dev, rdst0, did_rsc);
1162 if (rc == NETDEV_TX_OK)
1167 /* Walk the forwarding table and purge stale entries */
1168 static void vxlan_cleanup(unsigned long arg)
1170 struct vxlan_dev *vxlan = (struct vxlan_dev *) arg;
1171 unsigned long next_timer = jiffies + FDB_AGE_INTERVAL;
1174 if (!netif_running(vxlan->dev))
1177 spin_lock_bh(&vxlan->hash_lock);
1178 for (h = 0; h < FDB_HASH_SIZE; ++h) {
1179 struct hlist_node *p, *n;
1180 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
1182 = container_of(p, struct vxlan_fdb, hlist);
1183 unsigned long timeout;
1185 if (f->state & NUD_PERMANENT)
1188 timeout = f->used + vxlan->age_interval * HZ;
1189 if (time_before_eq(timeout, jiffies)) {
1190 netdev_dbg(vxlan->dev,
1191 "garbage collect %pM\n",
1193 f->state = NUD_STALE;
1194 vxlan_fdb_destroy(vxlan, f);
1195 } else if (time_before(timeout, next_timer))
1196 next_timer = timeout;
1199 spin_unlock_bh(&vxlan->hash_lock);
1201 mod_timer(&vxlan->age_timer, next_timer);
1204 /* Setup stats when device is created */
1205 static int vxlan_init(struct net_device *dev)
1207 dev->tstats = alloc_percpu(struct pcpu_tstats);
1214 /* Start ageing timer and join group when device is brought up */
1215 static int vxlan_open(struct net_device *dev)
1217 struct vxlan_dev *vxlan = netdev_priv(dev);
1220 if (IN_MULTICAST(ntohl(vxlan->default_dst.remote_ip))) {
1221 err = vxlan_join_group(dev);
1226 if (vxlan->age_interval)
1227 mod_timer(&vxlan->age_timer, jiffies + FDB_AGE_INTERVAL);
1232 /* Purge the forwarding table */
1233 static void vxlan_flush(struct vxlan_dev *vxlan)
1237 spin_lock_bh(&vxlan->hash_lock);
1238 for (h = 0; h < FDB_HASH_SIZE; ++h) {
1239 struct hlist_node *p, *n;
1240 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
1242 = container_of(p, struct vxlan_fdb, hlist);
1243 vxlan_fdb_destroy(vxlan, f);
1246 spin_unlock_bh(&vxlan->hash_lock);
1249 /* Cleanup timer and forwarding table on shutdown */
1250 static int vxlan_stop(struct net_device *dev)
1252 struct vxlan_dev *vxlan = netdev_priv(dev);
1254 if (IN_MULTICAST(ntohl(vxlan->default_dst.remote_ip)))
1255 vxlan_leave_group(dev);
1257 del_timer_sync(&vxlan->age_timer);
1264 /* Stub, nothing needs to be done. */
1265 static void vxlan_set_multicast_list(struct net_device *dev)
1269 static const struct net_device_ops vxlan_netdev_ops = {
1270 .ndo_init = vxlan_init,
1271 .ndo_open = vxlan_open,
1272 .ndo_stop = vxlan_stop,
1273 .ndo_start_xmit = vxlan_xmit,
1274 .ndo_get_stats64 = ip_tunnel_get_stats64,
1275 .ndo_set_rx_mode = vxlan_set_multicast_list,
1276 .ndo_change_mtu = eth_change_mtu,
1277 .ndo_validate_addr = eth_validate_addr,
1278 .ndo_set_mac_address = eth_mac_addr,
1279 .ndo_fdb_add = vxlan_fdb_add,
1280 .ndo_fdb_del = vxlan_fdb_delete,
1281 .ndo_fdb_dump = vxlan_fdb_dump,
1284 /* Info for udev, that this is a virtual tunnel endpoint */
1285 static struct device_type vxlan_type = {
1289 static void vxlan_free(struct net_device *dev)
1291 free_percpu(dev->tstats);
1295 /* Initialize the device structure. */
1296 static void vxlan_setup(struct net_device *dev)
1298 struct vxlan_dev *vxlan = netdev_priv(dev);
1302 eth_hw_addr_random(dev);
1304 dev->hard_header_len = ETH_HLEN + VXLAN_HEADROOM;
1306 dev->netdev_ops = &vxlan_netdev_ops;
1307 dev->destructor = vxlan_free;
1308 SET_NETDEV_DEVTYPE(dev, &vxlan_type);
1310 dev->tx_queue_len = 0;
1311 dev->features |= NETIF_F_LLTX;
1312 dev->features |= NETIF_F_NETNS_LOCAL;
1313 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
1314 dev->features |= NETIF_F_RXCSUM;
1315 dev->features |= NETIF_F_GSO_SOFTWARE;
1317 dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
1318 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
1319 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
1320 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1322 spin_lock_init(&vxlan->hash_lock);
1324 init_timer_deferrable(&vxlan->age_timer);
1325 vxlan->age_timer.function = vxlan_cleanup;
1326 vxlan->age_timer.data = (unsigned long) vxlan;
1328 inet_get_local_port_range(&low, &high);
1329 vxlan->port_min = low;
1330 vxlan->port_max = high;
1331 vxlan->dst_port = htons(vxlan_port);
1335 for (h = 0; h < FDB_HASH_SIZE; ++h)
1336 INIT_HLIST_HEAD(&vxlan->fdb_head[h]);
1339 static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
1340 [IFLA_VXLAN_ID] = { .type = NLA_U32 },
1341 [IFLA_VXLAN_GROUP] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
1342 [IFLA_VXLAN_LINK] = { .type = NLA_U32 },
1343 [IFLA_VXLAN_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
1344 [IFLA_VXLAN_TOS] = { .type = NLA_U8 },
1345 [IFLA_VXLAN_TTL] = { .type = NLA_U8 },
1346 [IFLA_VXLAN_LEARNING] = { .type = NLA_U8 },
1347 [IFLA_VXLAN_AGEING] = { .type = NLA_U32 },
1348 [IFLA_VXLAN_LIMIT] = { .type = NLA_U32 },
1349 [IFLA_VXLAN_PORT_RANGE] = { .len = sizeof(struct ifla_vxlan_port_range) },
1350 [IFLA_VXLAN_PROXY] = { .type = NLA_U8 },
1351 [IFLA_VXLAN_RSC] = { .type = NLA_U8 },
1352 [IFLA_VXLAN_L2MISS] = { .type = NLA_U8 },
1353 [IFLA_VXLAN_L3MISS] = { .type = NLA_U8 },
1354 [IFLA_VXLAN_PORT] = { .type = NLA_U16 },
1357 static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[])
1359 if (tb[IFLA_ADDRESS]) {
1360 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) {
1361 pr_debug("invalid link address (not ethernet)\n");
1365 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) {
1366 pr_debug("invalid all zero ethernet address\n");
1367 return -EADDRNOTAVAIL;
1374 if (data[IFLA_VXLAN_ID]) {
1375 __u32 id = nla_get_u32(data[IFLA_VXLAN_ID]);
1376 if (id >= VXLAN_VID_MASK)
1380 if (data[IFLA_VXLAN_PORT_RANGE]) {
1381 const struct ifla_vxlan_port_range *p
1382 = nla_data(data[IFLA_VXLAN_PORT_RANGE]);
1384 if (ntohs(p->high) < ntohs(p->low)) {
1385 pr_debug("port range %u .. %u not valid\n",
1386 ntohs(p->low), ntohs(p->high));
1394 static void vxlan_get_drvinfo(struct net_device *netdev,
1395 struct ethtool_drvinfo *drvinfo)
1397 strlcpy(drvinfo->version, VXLAN_VERSION, sizeof(drvinfo->version));
1398 strlcpy(drvinfo->driver, "vxlan", sizeof(drvinfo->driver));
1401 static const struct ethtool_ops vxlan_ethtool_ops = {
1402 .get_drvinfo = vxlan_get_drvinfo,
1403 .get_link = ethtool_op_get_link,
1406 static int vxlan_newlink(struct net *net, struct net_device *dev,
1407 struct nlattr *tb[], struct nlattr *data[])
1409 struct vxlan_dev *vxlan = netdev_priv(dev);
1410 struct vxlan_rdst *dst = &vxlan->default_dst;
1414 if (!data[IFLA_VXLAN_ID])
1417 vni = nla_get_u32(data[IFLA_VXLAN_ID]);
1418 if (vxlan_find_vni(net, vni)) {
1419 pr_info("duplicate VNI %u\n", vni);
1422 dst->remote_vni = vni;
1424 if (data[IFLA_VXLAN_GROUP])
1425 dst->remote_ip = nla_get_be32(data[IFLA_VXLAN_GROUP]);
1427 if (data[IFLA_VXLAN_LOCAL])
1428 vxlan->saddr = nla_get_be32(data[IFLA_VXLAN_LOCAL]);
1430 if (data[IFLA_VXLAN_LINK] &&
1431 (dst->remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]))) {
1432 struct net_device *lowerdev
1433 = __dev_get_by_index(net, dst->remote_ifindex);
1436 pr_info("ifindex %d does not exist\n", dst->remote_ifindex);
1441 dev->mtu = lowerdev->mtu - VXLAN_HEADROOM;
1443 /* update header length based on lower device */
1444 dev->hard_header_len = lowerdev->hard_header_len +
1448 if (data[IFLA_VXLAN_TOS])
1449 vxlan->tos = nla_get_u8(data[IFLA_VXLAN_TOS]);
1451 if (data[IFLA_VXLAN_TTL])
1452 vxlan->ttl = nla_get_u8(data[IFLA_VXLAN_TTL]);
1454 if (!data[IFLA_VXLAN_LEARNING] || nla_get_u8(data[IFLA_VXLAN_LEARNING]))
1455 vxlan->flags |= VXLAN_F_LEARN;
1457 if (data[IFLA_VXLAN_AGEING])
1458 vxlan->age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]);
1460 vxlan->age_interval = FDB_AGE_DEFAULT;
1462 if (data[IFLA_VXLAN_PROXY] && nla_get_u8(data[IFLA_VXLAN_PROXY]))
1463 vxlan->flags |= VXLAN_F_PROXY;
1465 if (data[IFLA_VXLAN_RSC] && nla_get_u8(data[IFLA_VXLAN_RSC]))
1466 vxlan->flags |= VXLAN_F_RSC;
1468 if (data[IFLA_VXLAN_L2MISS] && nla_get_u8(data[IFLA_VXLAN_L2MISS]))
1469 vxlan->flags |= VXLAN_F_L2MISS;
1471 if (data[IFLA_VXLAN_L3MISS] && nla_get_u8(data[IFLA_VXLAN_L3MISS]))
1472 vxlan->flags |= VXLAN_F_L3MISS;
1474 if (data[IFLA_VXLAN_LIMIT])
1475 vxlan->addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]);
1477 if (data[IFLA_VXLAN_PORT_RANGE]) {
1478 const struct ifla_vxlan_port_range *p
1479 = nla_data(data[IFLA_VXLAN_PORT_RANGE]);
1480 vxlan->port_min = ntohs(p->low);
1481 vxlan->port_max = ntohs(p->high);
1484 if (data[IFLA_VXLAN_PORT])
1485 vxlan->dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]);
1487 SET_ETHTOOL_OPS(dev, &vxlan_ethtool_ops);
1489 err = register_netdevice(dev);
1491 hlist_add_head_rcu(&vxlan->hlist, vni_head(net, dst->remote_vni));
1496 static void vxlan_dellink(struct net_device *dev, struct list_head *head)
1498 struct vxlan_dev *vxlan = netdev_priv(dev);
1500 hlist_del_rcu(&vxlan->hlist);
1502 unregister_netdevice_queue(dev, head);
1505 static size_t vxlan_get_size(const struct net_device *dev)
1508 return nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_ID */
1509 nla_total_size(sizeof(__be32)) +/* IFLA_VXLAN_GROUP */
1510 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LINK */
1511 nla_total_size(sizeof(__be32))+ /* IFLA_VXLAN_LOCAL */
1512 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL */
1513 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TOS */
1514 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */
1515 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_PROXY */
1516 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_RSC */
1517 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L2MISS */
1518 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L3MISS */
1519 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */
1520 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */
1521 nla_total_size(sizeof(struct ifla_vxlan_port_range)) +
1522 nla_total_size(sizeof(__be16))+ /* IFLA_VXLAN_PORT */
1526 static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
1528 const struct vxlan_dev *vxlan = netdev_priv(dev);
1529 const struct vxlan_rdst *dst = &vxlan->default_dst;
1530 struct ifla_vxlan_port_range ports = {
1531 .low = htons(vxlan->port_min),
1532 .high = htons(vxlan->port_max),
1535 if (nla_put_u32(skb, IFLA_VXLAN_ID, dst->remote_vni))
1536 goto nla_put_failure;
1538 if (dst->remote_ip && nla_put_be32(skb, IFLA_VXLAN_GROUP, dst->remote_ip))
1539 goto nla_put_failure;
1541 if (dst->remote_ifindex && nla_put_u32(skb, IFLA_VXLAN_LINK, dst->remote_ifindex))
1542 goto nla_put_failure;
1544 if (vxlan->saddr && nla_put_be32(skb, IFLA_VXLAN_LOCAL, vxlan->saddr))
1545 goto nla_put_failure;
1547 if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->ttl) ||
1548 nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->tos) ||
1549 nla_put_u8(skb, IFLA_VXLAN_LEARNING,
1550 !!(vxlan->flags & VXLAN_F_LEARN)) ||
1551 nla_put_u8(skb, IFLA_VXLAN_PROXY,
1552 !!(vxlan->flags & VXLAN_F_PROXY)) ||
1553 nla_put_u8(skb, IFLA_VXLAN_RSC, !!(vxlan->flags & VXLAN_F_RSC)) ||
1554 nla_put_u8(skb, IFLA_VXLAN_L2MISS,
1555 !!(vxlan->flags & VXLAN_F_L2MISS)) ||
1556 nla_put_u8(skb, IFLA_VXLAN_L3MISS,
1557 !!(vxlan->flags & VXLAN_F_L3MISS)) ||
1558 nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->age_interval) ||
1559 nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->addrmax) ||
1560 nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->dst_port))
1561 goto nla_put_failure;
1563 if (nla_put(skb, IFLA_VXLAN_PORT_RANGE, sizeof(ports), &ports))
1564 goto nla_put_failure;
1572 static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
1574 .maxtype = IFLA_VXLAN_MAX,
1575 .policy = vxlan_policy,
1576 .priv_size = sizeof(struct vxlan_dev),
1577 .setup = vxlan_setup,
1578 .validate = vxlan_validate,
1579 .newlink = vxlan_newlink,
1580 .dellink = vxlan_dellink,
1581 .get_size = vxlan_get_size,
1582 .fill_info = vxlan_fill_info,
1585 static __net_init int vxlan_init_net(struct net *net)
1587 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
1589 struct sockaddr_in vxlan_addr = {
1590 .sin_family = AF_INET,
1591 .sin_addr.s_addr = htonl(INADDR_ANY),
1596 /* Create UDP socket for encapsulation receive. */
1597 rc = sock_create_kern(AF_INET, SOCK_DGRAM, IPPROTO_UDP, &vn->sock);
1599 pr_debug("UDP socket create failed\n");
1602 /* Put in proper namespace */
1604 sk_change_net(sk, net);
1606 vxlan_addr.sin_port = htons(vxlan_port);
1608 rc = kernel_bind(vn->sock, (struct sockaddr *) &vxlan_addr,
1609 sizeof(vxlan_addr));
1611 pr_debug("bind for UDP socket %pI4:%u (%d)\n",
1612 &vxlan_addr.sin_addr, ntohs(vxlan_addr.sin_port), rc);
1613 sk_release_kernel(sk);
1618 /* Disable multicast loopback */
1619 inet_sk(sk)->mc_loop = 0;
1621 /* Mark socket as an encapsulation socket. */
1622 udp_sk(sk)->encap_type = 1;
1623 udp_sk(sk)->encap_rcv = vxlan_udp_encap_recv;
1626 for (h = 0; h < VNI_HASH_SIZE; ++h)
1627 INIT_HLIST_HEAD(&vn->vni_list[h]);
1632 static __net_exit void vxlan_exit_net(struct net *net)
1634 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
1635 struct vxlan_dev *vxlan;
1639 for (h = 0; h < VNI_HASH_SIZE; ++h)
1640 hlist_for_each_entry(vxlan, &vn->vni_list[h], hlist)
1641 dev_close(vxlan->dev);
1645 sk_release_kernel(vn->sock->sk);
1650 static struct pernet_operations vxlan_net_ops = {
1651 .init = vxlan_init_net,
1652 .exit = vxlan_exit_net,
1653 .id = &vxlan_net_id,
1654 .size = sizeof(struct vxlan_net),
1657 static int __init vxlan_init_module(void)
1661 get_random_bytes(&vxlan_salt, sizeof(vxlan_salt));
1663 rc = register_pernet_device(&vxlan_net_ops);
1667 rc = rtnl_link_register(&vxlan_link_ops);
1674 unregister_pernet_device(&vxlan_net_ops);
1678 module_init(vxlan_init_module);
1680 static void __exit vxlan_cleanup_module(void)
1682 rtnl_link_unregister(&vxlan_link_ops);
1683 unregister_pernet_device(&vxlan_net_ops);
1686 module_exit(vxlan_cleanup_module);
1688 MODULE_LICENSE("GPL");
1689 MODULE_VERSION(VXLAN_VERSION);
1690 MODULE_AUTHOR("Stephen Hemminger <stephen@networkplumber.org>");
1691 MODULE_ALIAS_RTNL_LINK("vxlan");