1 #include <linux/etherdevice.h>
2 #include <linux/if_macvlan.h>
3 #include <linux/if_vlan.h>
4 #include <linux/interrupt.h>
5 #include <linux/nsproxy.h>
6 #include <linux/compat.h>
7 #include <linux/if_tun.h>
8 #include <linux/module.h>
9 #include <linux/skbuff.h>
10 #include <linux/cache.h>
11 #include <linux/sched.h>
12 #include <linux/types.h>
13 #include <linux/slab.h>
14 #include <linux/wait.h>
15 #include <linux/cdev.h>
16 #include <linux/idr.h>
19 #include <net/net_namespace.h>
20 #include <net/rtnetlink.h>
22 #include <linux/virtio_net.h>
25 * A macvtap queue is the central object of this driver, it connects
26 * an open character device to a macvlan interface. There can be
27 * multiple queues on one interface, which map back to queues
28 * implemented in hardware on the underlying device.
30 * macvtap_proto is used to allocate queues through the sock allocation
34 struct macvtap_queue {
39 struct macvlan_dev __rcu *vlan;
44 struct list_head next;
47 static struct proto macvtap_proto = {
50 .obj_size = sizeof (struct macvtap_queue),
54 * Variables for dealing with macvtaps device numbers.
56 static dev_t macvtap_major;
57 #define MACVTAP_NUM_DEVS (1U << MINORBITS)
58 static DEFINE_MUTEX(minor_lock);
59 static DEFINE_IDR(minor_idr);
61 #define GOODCOPY_LEN 128
62 static struct class *macvtap_class;
63 static struct cdev macvtap_cdev;
65 static const struct proto_ops macvtap_socket_ops;
67 #define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \
68 NETIF_F_TSO6 | NETIF_F_UFO)
69 #define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO)
70 #define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG)
72 static struct macvlan_dev *macvtap_get_vlan_rcu(const struct net_device *dev)
74 return rcu_dereference(dev->rx_handler_data);
79 * The macvtap_queue and the macvlan_dev are loosely coupled, the
80 * pointers from one to the other can only be read while rcu_read_lock
83 * Both the file and the macvlan_dev hold a reference on the macvtap_queue
84 * through sock_hold(&q->sk). When the macvlan_dev goes away first,
85 * q->vlan becomes inaccessible. When the files gets closed,
86 * macvtap_get_queue() fails.
88 * There may still be references to the struct sock inside of the
89 * queue from outbound SKBs, but these never reference back to the
90 * file or the dev. The data structure is freed through __sk_free
91 * when both our references and any pending SKBs are gone.
94 static int macvtap_enable_queue(struct net_device *dev, struct file *file,
95 struct macvtap_queue *q)
97 struct macvlan_dev *vlan = netdev_priv(dev);
106 rcu_assign_pointer(vlan->taps[vlan->numvtaps], q);
107 q->queue_index = vlan->numvtaps;
116 static int macvtap_set_queue(struct net_device *dev, struct file *file,
117 struct macvtap_queue *q)
119 struct macvlan_dev *vlan = netdev_priv(dev);
121 if (vlan->numqueues == MAX_MACVTAP_QUEUES)
124 rcu_assign_pointer(q->vlan, vlan);
125 rcu_assign_pointer(vlan->taps[vlan->numvtaps], q);
129 q->queue_index = vlan->numvtaps;
131 file->private_data = q;
132 list_add_tail(&q->next, &vlan->queue_list);
140 static int macvtap_disable_queue(struct macvtap_queue *q)
142 struct macvlan_dev *vlan;
143 struct macvtap_queue *nq;
149 vlan = rtnl_dereference(q->vlan);
152 int index = q->queue_index;
153 BUG_ON(index >= vlan->numvtaps);
154 nq = rtnl_dereference(vlan->taps[vlan->numvtaps - 1]);
155 nq->queue_index = index;
157 rcu_assign_pointer(vlan->taps[index], nq);
158 RCU_INIT_POINTER(vlan->taps[vlan->numvtaps - 1], NULL);
168 * The file owning the queue got closed, give up both
169 * the reference that the files holds as well as the
170 * one from the macvlan_dev if that still exists.
172 * Using the spinlock makes sure that we don't get
173 * to the queue again after destroying it.
175 static void macvtap_put_queue(struct macvtap_queue *q)
177 struct macvlan_dev *vlan;
180 vlan = rtnl_dereference(q->vlan);
184 BUG_ON(macvtap_disable_queue(q));
187 RCU_INIT_POINTER(q->vlan, NULL);
189 list_del_init(&q->next);
199 * Select a queue based on the rxq of the device on which this packet
200 * arrived. If the incoming device is not mq, calculate a flow hash
201 * to select a queue. If all fails, find the first available queue.
202 * Cache vlan->numvtaps since it can become zero during the execution
205 static struct macvtap_queue *macvtap_get_queue(struct net_device *dev,
208 struct macvlan_dev *vlan = netdev_priv(dev);
209 struct macvtap_queue *tap = NULL;
210 /* Access to taps array is protected by rcu, but access to numvtaps
211 * isn't. Below we use it to lookup a queue, but treat it as a hint
212 * and validate that the result isn't NULL - in case we are
213 * racing against queue removal.
215 int numvtaps = ACCESS_ONCE(vlan->numvtaps);
221 /* Check if we can use flow to select a queue */
222 rxq = skb_get_hash(skb);
224 tap = rcu_dereference(vlan->taps[rxq % numvtaps]);
228 if (likely(skb_rx_queue_recorded(skb))) {
229 rxq = skb_get_rx_queue(skb);
231 while (unlikely(rxq >= numvtaps))
234 tap = rcu_dereference(vlan->taps[rxq]);
238 tap = rcu_dereference(vlan->taps[0]);
244 * The net_device is going away, give up the reference
245 * that it holds on all queues and safely set the pointer
246 * from the queues to NULL.
248 static void macvtap_del_queues(struct net_device *dev)
250 struct macvlan_dev *vlan = netdev_priv(dev);
251 struct macvtap_queue *q, *tmp, *qlist[MAX_MACVTAP_QUEUES];
255 list_for_each_entry_safe(q, tmp, &vlan->queue_list, next) {
256 list_del_init(&q->next);
258 RCU_INIT_POINTER(q->vlan, NULL);
263 for (i = 0; i < vlan->numvtaps; i++)
264 RCU_INIT_POINTER(vlan->taps[i], NULL);
265 BUG_ON(vlan->numvtaps);
266 BUG_ON(vlan->numqueues);
267 /* guarantee that any future macvtap_set_queue will fail */
268 vlan->numvtaps = MAX_MACVTAP_QUEUES;
270 for (--j; j >= 0; j--)
271 sock_put(&qlist[j]->sk);
274 static rx_handler_result_t macvtap_handle_frame(struct sk_buff **pskb)
276 struct sk_buff *skb = *pskb;
277 struct net_device *dev = skb->dev;
278 struct macvlan_dev *vlan;
279 struct macvtap_queue *q;
280 netdev_features_t features = TAP_FEATURES;
282 vlan = macvtap_get_vlan_rcu(dev);
284 return RX_HANDLER_PASS;
286 q = macvtap_get_queue(dev, skb);
288 return RX_HANDLER_PASS;
290 if (skb_queue_len(&q->sk.sk_receive_queue) >= dev->tx_queue_len)
293 skb_push(skb, ETH_HLEN);
295 /* Apply the forward feature mask so that we perform segmentation
296 * according to users wishes. This only works if VNET_HDR is
299 if (q->flags & IFF_VNET_HDR)
300 features |= vlan->tap_features;
301 if (netif_needs_gso(skb, features)) {
302 struct sk_buff *segs = __skb_gso_segment(skb, features, false);
308 skb_queue_tail(&q->sk.sk_receive_queue, skb);
314 struct sk_buff *nskb = segs->next;
317 skb_queue_tail(&q->sk.sk_receive_queue, segs);
321 /* If we receive a partial checksum and the tap side
322 * doesn't support checksum offload, compute the checksum.
323 * Note: it doesn't matter which checksum feature to
324 * check, we either support them all or none.
326 if (skb->ip_summed == CHECKSUM_PARTIAL &&
327 !(features & NETIF_F_ALL_CSUM) &&
328 skb_checksum_help(skb))
330 skb_queue_tail(&q->sk.sk_receive_queue, skb);
334 wake_up_interruptible_poll(sk_sleep(&q->sk), POLLIN | POLLRDNORM | POLLRDBAND);
335 return RX_HANDLER_CONSUMED;
338 /* Count errors/drops only here, thus don't care about args. */
339 macvlan_count_rx(vlan, 0, 0, 0);
341 return RX_HANDLER_CONSUMED;
344 static int macvtap_get_minor(struct macvlan_dev *vlan)
346 int retval = -ENOMEM;
348 mutex_lock(&minor_lock);
349 retval = idr_alloc(&minor_idr, vlan, 1, MACVTAP_NUM_DEVS, GFP_KERNEL);
351 vlan->minor = retval;
352 } else if (retval == -ENOSPC) {
353 printk(KERN_ERR "too many macvtap devices\n");
356 mutex_unlock(&minor_lock);
357 return retval < 0 ? retval : 0;
360 static void macvtap_free_minor(struct macvlan_dev *vlan)
362 mutex_lock(&minor_lock);
364 idr_remove(&minor_idr, vlan->minor);
367 mutex_unlock(&minor_lock);
370 static struct net_device *dev_get_by_macvtap_minor(int minor)
372 struct net_device *dev = NULL;
373 struct macvlan_dev *vlan;
375 mutex_lock(&minor_lock);
376 vlan = idr_find(&minor_idr, minor);
381 mutex_unlock(&minor_lock);
385 static int macvtap_newlink(struct net *src_net,
386 struct net_device *dev,
388 struct nlattr *data[])
390 struct macvlan_dev *vlan = netdev_priv(dev);
393 INIT_LIST_HEAD(&vlan->queue_list);
395 /* Since macvlan supports all offloads by default, make
396 * tap support all offloads also.
398 vlan->tap_features = TUN_OFFLOADS;
400 err = netdev_rx_handler_register(dev, macvtap_handle_frame, vlan);
404 /* Don't put anything that may fail after macvlan_common_newlink
405 * because we can't undo what it does.
407 return macvlan_common_newlink(src_net, dev, tb, data);
410 static void macvtap_dellink(struct net_device *dev,
411 struct list_head *head)
413 netdev_rx_handler_unregister(dev);
414 macvtap_del_queues(dev);
415 macvlan_dellink(dev, head);
418 static void macvtap_setup(struct net_device *dev)
420 macvlan_common_setup(dev);
421 dev->tx_queue_len = TUN_READQ_SIZE;
424 static struct rtnl_link_ops macvtap_link_ops __read_mostly = {
426 .setup = macvtap_setup,
427 .newlink = macvtap_newlink,
428 .dellink = macvtap_dellink,
432 static void macvtap_sock_write_space(struct sock *sk)
434 wait_queue_head_t *wqueue;
436 if (!sock_writeable(sk) ||
437 !test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags))
440 wqueue = sk_sleep(sk);
441 if (wqueue && waitqueue_active(wqueue))
442 wake_up_interruptible_poll(wqueue, POLLOUT | POLLWRNORM | POLLWRBAND);
445 static void macvtap_sock_destruct(struct sock *sk)
447 skb_queue_purge(&sk->sk_receive_queue);
450 static int macvtap_open(struct inode *inode, struct file *file)
452 struct net *net = current->nsproxy->net_ns;
453 struct net_device *dev;
454 struct macvtap_queue *q;
458 dev = dev_get_by_macvtap_minor(iminor(inode));
463 q = (struct macvtap_queue *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
468 RCU_INIT_POINTER(q->sock.wq, &q->wq);
469 init_waitqueue_head(&q->wq.wait);
470 q->sock.type = SOCK_RAW;
471 q->sock.state = SS_CONNECTED;
473 q->sock.ops = &macvtap_socket_ops;
474 sock_init_data(&q->sock, &q->sk);
475 q->sk.sk_write_space = macvtap_sock_write_space;
476 q->sk.sk_destruct = macvtap_sock_destruct;
477 q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP;
478 q->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
481 * so far only KVM virtio_net uses macvtap, enable zero copy between
482 * guest kernel and host kernel when lower device supports zerocopy
484 * The macvlan supports zerocopy iff the lower device supports zero
485 * copy so we don't have to look at the lower device directly.
487 if ((dev->features & NETIF_F_HIGHDMA) && (dev->features & NETIF_F_SG))
488 sock_set_flag(&q->sk, SOCK_ZEROCOPY);
490 err = macvtap_set_queue(dev, file, q);
502 static int macvtap_release(struct inode *inode, struct file *file)
504 struct macvtap_queue *q = file->private_data;
505 macvtap_put_queue(q);
509 static unsigned int macvtap_poll(struct file *file, poll_table * wait)
511 struct macvtap_queue *q = file->private_data;
512 unsigned int mask = POLLERR;
518 poll_wait(file, &q->wq.wait, wait);
520 if (!skb_queue_empty(&q->sk.sk_receive_queue))
521 mask |= POLLIN | POLLRDNORM;
523 if (sock_writeable(&q->sk) ||
524 (!test_and_set_bit(SOCK_ASYNC_NOSPACE, &q->sock.flags) &&
525 sock_writeable(&q->sk)))
526 mask |= POLLOUT | POLLWRNORM;
532 static inline struct sk_buff *macvtap_alloc_skb(struct sock *sk, size_t prepad,
533 size_t len, size_t linear,
534 int noblock, int *err)
538 /* Under a page? Don't bother with paged skb. */
539 if (prepad + len < PAGE_SIZE || !linear)
542 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
547 skb_reserve(skb, prepad);
548 skb_put(skb, linear);
549 skb->data_len = len - linear;
550 skb->len += len - linear;
556 * macvtap_skb_from_vnet_hdr and macvtap_skb_to_vnet_hdr should
557 * be shared with the tun/tap driver.
559 static int macvtap_skb_from_vnet_hdr(struct sk_buff *skb,
560 struct virtio_net_hdr *vnet_hdr)
562 unsigned short gso_type = 0;
563 if (vnet_hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
564 switch (vnet_hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
565 case VIRTIO_NET_HDR_GSO_TCPV4:
566 gso_type = SKB_GSO_TCPV4;
568 case VIRTIO_NET_HDR_GSO_TCPV6:
569 gso_type = SKB_GSO_TCPV6;
571 case VIRTIO_NET_HDR_GSO_UDP:
572 gso_type = SKB_GSO_UDP;
578 if (vnet_hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN)
579 gso_type |= SKB_GSO_TCP_ECN;
581 if (vnet_hdr->gso_size == 0)
585 if (vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
586 if (!skb_partial_csum_set(skb, vnet_hdr->csum_start,
587 vnet_hdr->csum_offset))
591 if (vnet_hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
592 skb_shinfo(skb)->gso_size = vnet_hdr->gso_size;
593 skb_shinfo(skb)->gso_type = gso_type;
595 /* Header must be checked, and gso_segs computed. */
596 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
597 skb_shinfo(skb)->gso_segs = 0;
602 static void macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
603 struct virtio_net_hdr *vnet_hdr)
605 memset(vnet_hdr, 0, sizeof(*vnet_hdr));
607 if (skb_is_gso(skb)) {
608 struct skb_shared_info *sinfo = skb_shinfo(skb);
610 /* This is a hint as to how much should be linear. */
611 vnet_hdr->hdr_len = skb_headlen(skb);
612 vnet_hdr->gso_size = sinfo->gso_size;
613 if (sinfo->gso_type & SKB_GSO_TCPV4)
614 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
615 else if (sinfo->gso_type & SKB_GSO_TCPV6)
616 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
617 else if (sinfo->gso_type & SKB_GSO_UDP)
618 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
621 if (sinfo->gso_type & SKB_GSO_TCP_ECN)
622 vnet_hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN;
624 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE;
626 if (skb->ip_summed == CHECKSUM_PARTIAL) {
627 vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
628 vnet_hdr->csum_start = skb_checksum_start_offset(skb);
629 vnet_hdr->csum_offset = skb->csum_offset;
630 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
631 vnet_hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID;
632 } /* else everything is zero */
635 /* Get packet from user space buffer */
636 static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
637 const struct iovec *iv, unsigned long total_len,
638 size_t count, int noblock)
640 int good_linear = SKB_MAX_HEAD(NET_IP_ALIGN);
642 struct macvlan_dev *vlan;
643 unsigned long len = total_len;
645 struct virtio_net_hdr vnet_hdr = { 0 };
646 int vnet_hdr_len = 0;
648 bool zerocopy = false;
651 if (q->flags & IFF_VNET_HDR) {
652 vnet_hdr_len = q->vnet_hdr_sz;
655 if (len < vnet_hdr_len)
659 err = memcpy_fromiovecend((void *)&vnet_hdr, iv, 0,
663 if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
664 vnet_hdr.csum_start + vnet_hdr.csum_offset + 2 >
666 vnet_hdr.hdr_len = vnet_hdr.csum_start +
667 vnet_hdr.csum_offset + 2;
669 if (vnet_hdr.hdr_len > len)
674 if (unlikely(len < ETH_HLEN))
678 if (unlikely(count > UIO_MAXIOV))
681 if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) {
682 copylen = vnet_hdr.hdr_len ? vnet_hdr.hdr_len : GOODCOPY_LEN;
683 if (copylen > good_linear)
684 copylen = good_linear;
686 if (iov_pages(iv, vnet_hdr_len + copylen, count)
693 if (vnet_hdr.hdr_len > good_linear)
694 linear = good_linear;
696 linear = vnet_hdr.hdr_len;
699 skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, copylen,
700 linear, noblock, &err);
705 err = zerocopy_sg_from_iovec(skb, iv, vnet_hdr_len, count);
707 err = skb_copy_datagram_from_iovec(skb, 0, iv, vnet_hdr_len,
709 if (!err && m && m->msg_control) {
710 struct ubuf_info *uarg = m->msg_control;
711 uarg->callback(uarg, false);
718 skb_set_network_header(skb, ETH_HLEN);
719 skb_reset_mac_header(skb);
720 skb->protocol = eth_hdr(skb)->h_proto;
723 err = macvtap_skb_from_vnet_hdr(skb, &vnet_hdr);
728 skb_probe_transport_header(skb, ETH_HLEN);
731 vlan = rcu_dereference(q->vlan);
732 /* copy skb_ubuf_info for callback when skb has no error */
734 skb_shinfo(skb)->destructor_arg = m->msg_control;
735 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
736 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
739 skb->dev = vlan->dev;
753 vlan = rcu_dereference(q->vlan);
755 this_cpu_inc(vlan->pcpu_stats->tx_dropped);
761 static ssize_t macvtap_aio_write(struct kiocb *iocb, const struct iovec *iv,
762 unsigned long count, loff_t pos)
764 struct file *file = iocb->ki_filp;
765 ssize_t result = -ENOLINK;
766 struct macvtap_queue *q = file->private_data;
768 result = macvtap_get_user(q, NULL, iv, iov_length(iv, count), count,
769 file->f_flags & O_NONBLOCK);
773 /* Put packet to the user space buffer */
774 static ssize_t macvtap_put_user(struct macvtap_queue *q,
775 const struct sk_buff *skb,
776 const struct iovec *iv, int len)
779 int vnet_hdr_len = 0;
783 if (q->flags & IFF_VNET_HDR) {
784 struct virtio_net_hdr vnet_hdr;
785 vnet_hdr_len = q->vnet_hdr_sz;
786 if ((len -= vnet_hdr_len) < 0)
789 macvtap_skb_to_vnet_hdr(skb, &vnet_hdr);
791 if (memcpy_toiovecend(iv, (void *)&vnet_hdr, 0, sizeof(vnet_hdr)))
794 total = copied = vnet_hdr_len;
797 if (!vlan_tx_tag_present(skb))
798 len = min_t(int, skb->len, len);
805 veth.h_vlan_proto = skb->vlan_proto;
806 veth.h_vlan_TCI = htons(vlan_tx_tag_get(skb));
808 vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
809 len = min_t(int, skb->len + VLAN_HLEN, len);
812 copy = min_t(int, vlan_offset, len);
813 ret = skb_copy_datagram_const_iovec(skb, 0, iv, copied, copy);
819 copy = min_t(int, sizeof(veth), len);
820 ret = memcpy_toiovecend(iv, (void *)&veth, copied, copy);
827 ret = skb_copy_datagram_const_iovec(skb, vlan_offset, iv, copied, len);
830 return ret ? ret : total;
833 static ssize_t macvtap_do_read(struct macvtap_queue *q,
834 const struct iovec *iv, unsigned long len,
843 prepare_to_wait(sk_sleep(&q->sk), &wait,
846 /* Read frames from the queue */
847 skb = skb_dequeue(&q->sk.sk_receive_queue);
853 if (signal_pending(current)) {
857 /* Nothing to read, let's sleep */
861 ret = macvtap_put_user(q, skb, iv, len);
867 finish_wait(sk_sleep(&q->sk), &wait);
871 static ssize_t macvtap_aio_read(struct kiocb *iocb, const struct iovec *iv,
872 unsigned long count, loff_t pos)
874 struct file *file = iocb->ki_filp;
875 struct macvtap_queue *q = file->private_data;
876 ssize_t len, ret = 0;
878 len = iov_length(iv, count);
884 ret = macvtap_do_read(q, iv, len, file->f_flags & O_NONBLOCK);
885 ret = min_t(ssize_t, ret, len);
892 static struct macvlan_dev *macvtap_get_vlan(struct macvtap_queue *q)
894 struct macvlan_dev *vlan;
897 vlan = rtnl_dereference(q->vlan);
904 static void macvtap_put_vlan(struct macvlan_dev *vlan)
909 static int macvtap_ioctl_set_queue(struct file *file, unsigned int flags)
911 struct macvtap_queue *q = file->private_data;
912 struct macvlan_dev *vlan;
915 vlan = macvtap_get_vlan(q);
919 if (flags & IFF_ATTACH_QUEUE)
920 ret = macvtap_enable_queue(vlan->dev, file, q);
921 else if (flags & IFF_DETACH_QUEUE)
922 ret = macvtap_disable_queue(q);
926 macvtap_put_vlan(vlan);
930 static int set_offload(struct macvtap_queue *q, unsigned long arg)
932 struct macvlan_dev *vlan;
933 netdev_features_t features;
934 netdev_features_t feature_mask = 0;
936 vlan = rtnl_dereference(q->vlan);
940 features = vlan->dev->features;
942 if (arg & TUN_F_CSUM) {
943 feature_mask = NETIF_F_HW_CSUM;
945 if (arg & (TUN_F_TSO4 | TUN_F_TSO6)) {
946 if (arg & TUN_F_TSO_ECN)
947 feature_mask |= NETIF_F_TSO_ECN;
948 if (arg & TUN_F_TSO4)
949 feature_mask |= NETIF_F_TSO;
950 if (arg & TUN_F_TSO6)
951 feature_mask |= NETIF_F_TSO6;
955 feature_mask |= NETIF_F_UFO;
958 /* tun/tap driver inverts the usage for TSO offloads, where
959 * setting the TSO bit means that the userspace wants to
960 * accept TSO frames and turning it off means that user space
961 * does not support TSO.
962 * For macvtap, we have to invert it to mean the same thing.
963 * When user space turns off TSO, we turn off GSO/LRO so that
964 * user-space will not receive TSO frames.
966 if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_UFO))
967 features |= RX_OFFLOADS;
969 features &= ~RX_OFFLOADS;
971 /* tap_features are the same as features on tun/tap and
972 * reflect user expectations.
974 vlan->tap_features = feature_mask;
975 vlan->set_features = features;
976 netdev_update_features(vlan->dev);
982 * provide compatibility with generic tun/tap interface
984 static long macvtap_ioctl(struct file *file, unsigned int cmd,
987 struct macvtap_queue *q = file->private_data;
988 struct macvlan_dev *vlan;
989 void __user *argp = (void __user *)arg;
990 struct ifreq __user *ifr = argp;
991 unsigned int __user *up = argp;
993 int __user *sp = argp;
999 /* ignore the name, just look at flags */
1000 if (get_user(u, &ifr->ifr_flags))
1004 if ((u & ~(IFF_VNET_HDR | IFF_MULTI_QUEUE)) !=
1005 (IFF_NO_PI | IFF_TAP))
1014 vlan = macvtap_get_vlan(q);
1021 if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
1022 put_user(q->flags, &ifr->ifr_flags))
1024 macvtap_put_vlan(vlan);
1029 if (get_user(u, &ifr->ifr_flags))
1032 ret = macvtap_ioctl_set_queue(file, u);
1036 case TUNGETFEATURES:
1037 if (put_user(IFF_TAP | IFF_NO_PI | IFF_VNET_HDR |
1038 IFF_MULTI_QUEUE, up))
1043 if (get_user(u, up))
1046 q->sk.sk_sndbuf = u;
1049 case TUNGETVNETHDRSZ:
1051 if (put_user(s, sp))
1055 case TUNSETVNETHDRSZ:
1056 if (get_user(s, sp))
1058 if (s < (int)sizeof(struct virtio_net_hdr))
1065 /* let the user check for future flags */
1066 if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
1067 TUN_F_TSO_ECN | TUN_F_UFO))
1071 ret = set_offload(q, arg);
1080 #ifdef CONFIG_COMPAT
1081 static long macvtap_compat_ioctl(struct file *file, unsigned int cmd,
1084 return macvtap_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
1088 static const struct file_operations macvtap_fops = {
1089 .owner = THIS_MODULE,
1090 .open = macvtap_open,
1091 .release = macvtap_release,
1092 .aio_read = macvtap_aio_read,
1093 .aio_write = macvtap_aio_write,
1094 .poll = macvtap_poll,
1095 .llseek = no_llseek,
1096 .unlocked_ioctl = macvtap_ioctl,
1097 #ifdef CONFIG_COMPAT
1098 .compat_ioctl = macvtap_compat_ioctl,
1102 static int macvtap_sendmsg(struct kiocb *iocb, struct socket *sock,
1103 struct msghdr *m, size_t total_len)
1105 struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock);
1106 return macvtap_get_user(q, m, m->msg_iov, total_len, m->msg_iovlen,
1107 m->msg_flags & MSG_DONTWAIT);
1110 static int macvtap_recvmsg(struct kiocb *iocb, struct socket *sock,
1111 struct msghdr *m, size_t total_len,
1114 struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock);
1116 if (flags & ~(MSG_DONTWAIT|MSG_TRUNC))
1118 ret = macvtap_do_read(q, m->msg_iov, total_len,
1119 flags & MSG_DONTWAIT);
1120 if (ret > total_len) {
1121 m->msg_flags |= MSG_TRUNC;
1122 ret = flags & MSG_TRUNC ? ret : total_len;
1127 /* Ops structure to mimic raw sockets with tun */
1128 static const struct proto_ops macvtap_socket_ops = {
1129 .sendmsg = macvtap_sendmsg,
1130 .recvmsg = macvtap_recvmsg,
1133 /* Get an underlying socket object from tun file. Returns error unless file is
1134 * attached to a device. The returned object works like a packet socket, it
1135 * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for
1136 * holding a reference to the file for as long as the socket is in use. */
1137 struct socket *macvtap_get_socket(struct file *file)
1139 struct macvtap_queue *q;
1140 if (file->f_op != &macvtap_fops)
1141 return ERR_PTR(-EINVAL);
1142 q = file->private_data;
1144 return ERR_PTR(-EBADFD);
1147 EXPORT_SYMBOL_GPL(macvtap_get_socket);
1149 static int macvtap_device_event(struct notifier_block *unused,
1150 unsigned long event, void *ptr)
1152 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1153 struct macvlan_dev *vlan;
1154 struct device *classdev;
1158 if (dev->rtnl_link_ops != &macvtap_link_ops)
1161 vlan = netdev_priv(dev);
1164 case NETDEV_REGISTER:
1165 /* Create the device node here after the network device has
1166 * been registered but before register_netdevice has
1169 err = macvtap_get_minor(vlan);
1171 return notifier_from_errno(err);
1173 devt = MKDEV(MAJOR(macvtap_major), vlan->minor);
1174 classdev = device_create(macvtap_class, &dev->dev, devt,
1175 dev, "tap%d", dev->ifindex);
1176 if (IS_ERR(classdev)) {
1177 macvtap_free_minor(vlan);
1178 return notifier_from_errno(PTR_ERR(classdev));
1181 case NETDEV_UNREGISTER:
1182 devt = MKDEV(MAJOR(macvtap_major), vlan->minor);
1183 device_destroy(macvtap_class, devt);
1184 macvtap_free_minor(vlan);
1191 static struct notifier_block macvtap_notifier_block __read_mostly = {
1192 .notifier_call = macvtap_device_event,
1195 static int macvtap_init(void)
1199 err = alloc_chrdev_region(&macvtap_major, 0,
1200 MACVTAP_NUM_DEVS, "macvtap");
1204 cdev_init(&macvtap_cdev, &macvtap_fops);
1205 err = cdev_add(&macvtap_cdev, macvtap_major, MACVTAP_NUM_DEVS);
1209 macvtap_class = class_create(THIS_MODULE, "macvtap");
1210 if (IS_ERR(macvtap_class)) {
1211 err = PTR_ERR(macvtap_class);
1215 err = register_netdevice_notifier(&macvtap_notifier_block);
1219 err = macvlan_link_register(&macvtap_link_ops);
1226 unregister_netdevice_notifier(&macvtap_notifier_block);
1228 class_unregister(macvtap_class);
1230 cdev_del(&macvtap_cdev);
1232 unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS);
1236 module_init(macvtap_init);
1238 static void macvtap_exit(void)
1240 rtnl_link_unregister(&macvtap_link_ops);
1241 unregister_netdevice_notifier(&macvtap_notifier_block);
1242 class_unregister(macvtap_class);
1243 cdev_del(&macvtap_cdev);
1244 unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS);
1246 module_exit(macvtap_exit);
1248 MODULE_ALIAS_RTNL_LINK("macvtap");
1249 MODULE_AUTHOR("Arnd Bergmann <arnd@arndb.de>");
1250 MODULE_LICENSE("GPL");