2 * Copyright (c) 2009, Microsoft Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, see <http://www.gnu.org/licenses/>.
17 * Haiyang Zhang <haiyangz@microsoft.com>
18 * Hank Janssen <hjanssen@microsoft.com>
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22 #include <linux/init.h>
23 #include <linux/atomic.h>
24 #include <linux/module.h>
25 #include <linux/highmem.h>
26 #include <linux/device.h>
28 #include <linux/delay.h>
29 #include <linux/netdevice.h>
30 #include <linux/inetdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/if_vlan.h>
35 #include <linux/slab.h>
37 #include <net/route.h>
39 #include <net/pkt_sched.h>
41 #include "hyperv_net.h"
43 /* Restrict GSO size to account for NVGRE */
44 #define NETVSC_GSO_MAX_SIZE 62768
46 #define RING_SIZE_MIN 64
47 static int ring_size = 128;
48 module_param(ring_size, int, S_IRUGO);
49 MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
51 static int max_num_vrss_chns = 8;
53 static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
54 NETIF_MSG_LINK | NETIF_MSG_IFUP |
55 NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR |
58 static int debug = -1;
59 module_param(debug, int, S_IRUGO);
60 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
62 static void do_set_multicast(struct work_struct *w)
64 struct net_device_context *ndevctx =
65 container_of(w, struct net_device_context, work);
66 struct netvsc_device *nvdev;
67 struct rndis_device *rdev;
69 nvdev = hv_get_drvdata(ndevctx->device_ctx);
70 if (nvdev == NULL || nvdev->ndev == NULL)
73 rdev = nvdev->extension;
77 if (nvdev->ndev->flags & IFF_PROMISC)
78 rndis_filter_set_packet_filter(rdev,
79 NDIS_PACKET_TYPE_PROMISCUOUS);
81 rndis_filter_set_packet_filter(rdev,
82 NDIS_PACKET_TYPE_BROADCAST |
83 NDIS_PACKET_TYPE_ALL_MULTICAST |
84 NDIS_PACKET_TYPE_DIRECTED);
87 static void netvsc_set_multicast_list(struct net_device *net)
89 struct net_device_context *net_device_ctx = netdev_priv(net);
91 schedule_work(&net_device_ctx->work);
94 static int netvsc_open(struct net_device *net)
96 struct net_device_context *net_device_ctx = netdev_priv(net);
97 struct hv_device *device_obj = net_device_ctx->device_ctx;
98 struct netvsc_device *nvdev;
99 struct rndis_device *rdev;
102 netif_carrier_off(net);
104 /* Open up the device */
105 ret = rndis_filter_open(device_obj);
107 netdev_err(net, "unable to open device (ret %d).\n", ret);
111 netif_tx_wake_all_queues(net);
113 nvdev = hv_get_drvdata(device_obj);
114 rdev = nvdev->extension;
115 if (!rdev->link_state)
116 netif_carrier_on(net);
121 static int netvsc_close(struct net_device *net)
123 struct net_device_context *net_device_ctx = netdev_priv(net);
124 struct hv_device *device_obj = net_device_ctx->device_ctx;
125 struct netvsc_device *nvdev = hv_get_drvdata(device_obj);
127 u32 aread, awrite, i, msec = 10, retry = 0, retry_max = 20;
128 struct vmbus_channel *chn;
130 netif_tx_disable(net);
132 /* Make sure netvsc_set_multicast_list doesn't re-enable filter! */
133 cancel_work_sync(&net_device_ctx->work);
134 ret = rndis_filter_close(device_obj);
136 netdev_err(net, "unable to close device (ret %d).\n", ret);
140 /* Ensure pending bytes in ring are read */
143 for (i = 0; i < nvdev->num_chn; i++) {
144 chn = nvdev->chn_table[i];
148 hv_get_ringbuffer_availbytes(&chn->inbound, &aread,
154 hv_get_ringbuffer_availbytes(&chn->outbound, &aread,
162 if (retry > retry_max || aread == 0)
172 netdev_err(net, "Ring buffer not empty after closing rndis\n");
179 static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size,
182 struct rndis_packet *rndis_pkt;
183 struct rndis_per_packet_info *ppi;
185 rndis_pkt = &msg->msg.pkt;
186 rndis_pkt->data_offset += ppi_size;
188 ppi = (struct rndis_per_packet_info *)((void *)rndis_pkt +
189 rndis_pkt->per_pkt_info_offset + rndis_pkt->per_pkt_info_len);
191 ppi->size = ppi_size;
192 ppi->type = pkt_type;
193 ppi->ppi_offset = sizeof(struct rndis_per_packet_info);
195 rndis_pkt->per_pkt_info_len += ppi_size;
209 /* Toeplitz hash function
210 * data: network byte order
211 * return: host byte order
213 static u32 comp_hash(u8 *key, int klen, void *data, int dlen)
222 subk.ka = ntohl(*(u32 *)key);
224 for (i = 0; i < dlen; i++) {
225 subk.kb = key[k_next];
226 k_next = (k_next + 1) % klen;
227 dt = ((u8 *)data)[i];
228 for (j = 0; j < 8; j++) {
239 static bool netvsc_set_hash(u32 *hash, struct sk_buff *skb)
241 struct flow_keys flow;
244 if (!skb_flow_dissect_flow_keys(skb, &flow, 0) ||
245 !(flow.basic.n_proto == htons(ETH_P_IP) ||
246 flow.basic.n_proto == htons(ETH_P_IPV6)))
249 if (flow.basic.ip_proto == IPPROTO_TCP)
254 *hash = comp_hash(netvsc_hash_key, HASH_KEYLEN, &flow, data_len);
259 static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
260 void *accel_priv, select_queue_fallback_t fallback)
262 struct net_device_context *net_device_ctx = netdev_priv(ndev);
263 struct hv_device *hdev = net_device_ctx->device_ctx;
264 struct netvsc_device *nvsc_dev = hv_get_drvdata(hdev);
268 if (nvsc_dev == NULL || ndev->real_num_tx_queues <= 1)
271 if (netvsc_set_hash(&hash, skb)) {
272 q_idx = nvsc_dev->send_table[hash % VRSS_SEND_TAB_SIZE] %
273 ndev->real_num_tx_queues;
274 skb_set_hash(skb, hash, PKT_HASH_TYPE_L3);
280 void netvsc_xmit_completion(void *context)
282 struct hv_netvsc_packet *packet = (struct hv_netvsc_packet *)context;
283 struct sk_buff *skb = (struct sk_buff *)
284 (unsigned long)packet->send_completion_tid;
287 dev_kfree_skb_any(skb);
290 static u32 fill_pg_buf(struct page *page, u32 offset, u32 len,
291 struct hv_page_buffer *pb)
295 /* Deal with compund pages by ignoring unused part
298 page += (offset >> PAGE_SHIFT);
299 offset &= ~PAGE_MASK;
304 bytes = PAGE_SIZE - offset;
307 pb[j].pfn = page_to_pfn(page);
308 pb[j].offset = offset;
314 if (offset == PAGE_SIZE && len) {
324 static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
325 struct hv_netvsc_packet *packet)
327 struct hv_page_buffer *pb = packet->page_buf;
329 char *data = skb->data;
330 int frags = skb_shinfo(skb)->nr_frags;
333 /* The packet is laid out thus:
334 * 1. hdr: RNDIS header and PPI
336 * 3. skb fragment data
339 slots_used += fill_pg_buf(virt_to_page(hdr),
341 len, &pb[slots_used]);
343 packet->rmsg_size = len;
344 packet->rmsg_pgcnt = slots_used;
346 slots_used += fill_pg_buf(virt_to_page(data),
347 offset_in_page(data),
348 skb_headlen(skb), &pb[slots_used]);
350 for (i = 0; i < frags; i++) {
351 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
353 slots_used += fill_pg_buf(skb_frag_page(frag),
355 skb_frag_size(frag), &pb[slots_used]);
360 static int count_skb_frag_slots(struct sk_buff *skb)
362 int i, frags = skb_shinfo(skb)->nr_frags;
365 for (i = 0; i < frags; i++) {
366 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
367 unsigned long size = skb_frag_size(frag);
368 unsigned long offset = frag->page_offset;
370 /* Skip unused frames from start of page */
371 offset &= ~PAGE_MASK;
372 pages += PFN_UP(offset + size);
377 static int netvsc_get_slots(struct sk_buff *skb)
379 char *data = skb->data;
380 unsigned int offset = offset_in_page(data);
381 unsigned int len = skb_headlen(skb);
385 slots = DIV_ROUND_UP(offset + len, PAGE_SIZE);
386 frag_slots = count_skb_frag_slots(skb);
387 return slots + frag_slots;
390 static u32 get_net_transport_info(struct sk_buff *skb, u32 *trans_off)
392 u32 ret_val = TRANSPORT_INFO_NOT_IP;
394 if ((eth_hdr(skb)->h_proto != htons(ETH_P_IP)) &&
395 (eth_hdr(skb)->h_proto != htons(ETH_P_IPV6))) {
399 *trans_off = skb_transport_offset(skb);
401 if ((eth_hdr(skb)->h_proto == htons(ETH_P_IP))) {
402 struct iphdr *iphdr = ip_hdr(skb);
404 if (iphdr->protocol == IPPROTO_TCP)
405 ret_val = TRANSPORT_INFO_IPV4_TCP;
406 else if (iphdr->protocol == IPPROTO_UDP)
407 ret_val = TRANSPORT_INFO_IPV4_UDP;
409 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
410 ret_val = TRANSPORT_INFO_IPV6_TCP;
411 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
412 ret_val = TRANSPORT_INFO_IPV6_UDP;
419 static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
421 struct net_device_context *net_device_ctx = netdev_priv(net);
422 struct hv_netvsc_packet *packet = NULL;
424 unsigned int num_data_pgs;
425 struct rndis_message *rndis_msg;
426 struct rndis_packet *rndis_pkt;
430 struct rndis_per_packet_info *ppi;
431 struct ndis_tcp_ip_checksum_info *csum_info;
432 struct ndis_tcp_lso_info *lso_info;
438 struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT];
439 struct netvsc_stats *tx_stats = this_cpu_ptr(net_device_ctx->tx_stats);
441 /* We will atmost need two pages to describe the rndis
442 * header. We can only transmit MAX_PAGE_BUFFER_COUNT number
443 * of pages in a single packet. If skb is scattered around
444 * more pages we try linearizing it.
448 skb_length = skb->len;
449 num_data_pgs = netvsc_get_slots(skb) + 2;
450 if (num_data_pgs > MAX_PAGE_BUFFER_COUNT && linear) {
451 net_alert_ratelimited("packet too big: %u pages (%u bytes)\n",
452 num_data_pgs, skb->len);
455 } else if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) {
456 if (skb_linearize(skb)) {
457 net_alert_ratelimited("failed to linearize skb\n");
465 pkt_sz = sizeof(struct hv_netvsc_packet) + RNDIS_AND_PPI_SIZE;
467 ret = skb_cow_head(skb, pkt_sz);
469 netdev_err(net, "unable to alloc hv_netvsc_packet\n");
473 /* Use the headroom for building up the packet */
474 packet = (struct hv_netvsc_packet *)skb->head;
477 packet->xmit_more = skb->xmit_more;
479 packet->vlan_tci = skb->vlan_tci;
480 packet->page_buf = page_buf;
482 packet->q_idx = skb_get_queue_mapping(skb);
484 packet->is_data_pkt = true;
485 packet->total_data_buflen = skb->len;
487 packet->rndis_msg = (struct rndis_message *)((unsigned long)packet +
488 sizeof(struct hv_netvsc_packet));
490 memset(packet->rndis_msg, 0, RNDIS_AND_PPI_SIZE);
492 /* Set the completion routine */
493 packet->send_completion = netvsc_xmit_completion;
494 packet->send_completion_ctx = packet;
495 packet->send_completion_tid = (unsigned long)skb;
497 isvlan = packet->vlan_tci & VLAN_TAG_PRESENT;
499 /* Add the rndis header */
500 rndis_msg = packet->rndis_msg;
501 rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET;
502 rndis_msg->msg_len = packet->total_data_buflen;
503 rndis_pkt = &rndis_msg->msg.pkt;
504 rndis_pkt->data_offset = sizeof(struct rndis_packet);
505 rndis_pkt->data_len = packet->total_data_buflen;
506 rndis_pkt->per_pkt_info_offset = sizeof(struct rndis_packet);
508 rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet);
510 hash = skb_get_hash_raw(skb);
511 if (hash != 0 && net->real_num_tx_queues > 1) {
512 rndis_msg_size += NDIS_HASH_PPI_SIZE;
513 ppi = init_ppi_data(rndis_msg, NDIS_HASH_PPI_SIZE,
515 *(u32 *)((void *)ppi + ppi->ppi_offset) = hash;
519 struct ndis_pkt_8021q_info *vlan;
521 rndis_msg_size += NDIS_VLAN_PPI_SIZE;
522 ppi = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE,
524 vlan = (struct ndis_pkt_8021q_info *)((void *)ppi +
526 vlan->vlanid = packet->vlan_tci & VLAN_VID_MASK;
527 vlan->pri = (packet->vlan_tci & VLAN_PRIO_MASK) >>
531 net_trans_info = get_net_transport_info(skb, &hdr_offset);
532 if (net_trans_info == TRANSPORT_INFO_NOT_IP)
536 * Setup the sendside checksum offload only if this is not a
542 if ((skb->ip_summed == CHECKSUM_NONE) ||
543 (skb->ip_summed == CHECKSUM_UNNECESSARY))
546 rndis_msg_size += NDIS_CSUM_PPI_SIZE;
547 ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
548 TCPIP_CHKSUM_PKTINFO);
550 csum_info = (struct ndis_tcp_ip_checksum_info *)((void *)ppi +
553 if (net_trans_info & (INFO_IPV4 << 16))
554 csum_info->transmit.is_ipv4 = 1;
556 csum_info->transmit.is_ipv6 = 1;
558 if (net_trans_info & INFO_TCP) {
559 csum_info->transmit.tcp_checksum = 1;
560 csum_info->transmit.tcp_header_offset = hdr_offset;
561 } else if (net_trans_info & INFO_UDP) {
562 /* UDP checksum offload is not supported on ws2008r2.
563 * Furthermore, on ws2012 and ws2012r2, there are some
564 * issues with udp checksum offload from Linux guests.
565 * (these are host issues).
566 * For now compute the checksum here.
571 ret = skb_cow_head(skb, 0);
576 udp_len = ntohs(uh->len);
578 uh->check = csum_tcpudp_magic(ip_hdr(skb)->saddr,
580 udp_len, IPPROTO_UDP,
581 csum_partial(uh, udp_len, 0));
583 uh->check = CSUM_MANGLED_0;
585 csum_info->transmit.udp_checksum = 0;
590 rndis_msg_size += NDIS_LSO_PPI_SIZE;
591 ppi = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE,
592 TCP_LARGESEND_PKTINFO);
594 lso_info = (struct ndis_tcp_lso_info *)((void *)ppi +
597 lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
598 if (net_trans_info & (INFO_IPV4 << 16)) {
599 lso_info->lso_v2_transmit.ip_version =
600 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4;
601 ip_hdr(skb)->tot_len = 0;
602 ip_hdr(skb)->check = 0;
603 tcp_hdr(skb)->check =
604 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
605 ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
607 lso_info->lso_v2_transmit.ip_version =
608 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6;
609 ipv6_hdr(skb)->payload_len = 0;
610 tcp_hdr(skb)->check =
611 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
612 &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
614 lso_info->lso_v2_transmit.tcp_header_offset = hdr_offset;
615 lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size;
618 /* Start filling in the page buffers with the rndis hdr */
619 rndis_msg->msg_len += rndis_msg_size;
620 packet->total_data_buflen = rndis_msg->msg_len;
621 packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size,
624 ret = netvsc_send(net_device_ctx->device_ctx, packet);
628 u64_stats_update_begin(&tx_stats->syncp);
630 tx_stats->bytes += skb_length;
631 u64_stats_update_end(&tx_stats->syncp);
633 if (ret != -EAGAIN) {
634 dev_kfree_skb_any(skb);
635 net->stats.tx_dropped++;
639 return (ret == -EAGAIN) ? NETDEV_TX_BUSY : NETDEV_TX_OK;
643 * netvsc_linkstatus_callback - Link up/down notification
645 void netvsc_linkstatus_callback(struct hv_device *device_obj,
646 struct rndis_message *resp)
648 struct rndis_indicate_status *indicate = &resp->msg.indicate_status;
649 struct net_device *net;
650 struct net_device_context *ndev_ctx;
651 struct netvsc_device *net_device;
652 struct rndis_device *rdev;
654 net_device = hv_get_drvdata(device_obj);
655 rdev = net_device->extension;
657 switch (indicate->status) {
658 case RNDIS_STATUS_MEDIA_CONNECT:
659 rdev->link_state = false;
661 case RNDIS_STATUS_MEDIA_DISCONNECT:
662 rdev->link_state = true;
664 case RNDIS_STATUS_NETWORK_CHANGE:
665 rdev->link_change = true;
671 net = net_device->ndev;
673 if (!net || net->reg_state != NETREG_REGISTERED)
676 ndev_ctx = netdev_priv(net);
677 if (!rdev->link_state) {
678 schedule_delayed_work(&ndev_ctx->dwork, 0);
679 schedule_delayed_work(&ndev_ctx->dwork, msecs_to_jiffies(20));
681 schedule_delayed_work(&ndev_ctx->dwork, 0);
686 * netvsc_recv_callback - Callback when we receive a packet from the
687 * "wire" on the specified device.
689 int netvsc_recv_callback(struct hv_device *device_obj,
690 struct hv_netvsc_packet *packet,
691 struct ndis_tcp_ip_checksum_info *csum_info)
693 struct net_device *net;
694 struct net_device_context *net_device_ctx;
696 struct netvsc_stats *rx_stats;
698 net = ((struct netvsc_device *)hv_get_drvdata(device_obj))->ndev;
699 if (!net || net->reg_state != NETREG_REGISTERED) {
700 packet->status = NVSP_STAT_FAIL;
703 net_device_ctx = netdev_priv(net);
704 rx_stats = this_cpu_ptr(net_device_ctx->rx_stats);
706 /* Allocate a skb - TODO direct I/O to pages? */
707 skb = netdev_alloc_skb_ip_align(net, packet->total_data_buflen);
708 if (unlikely(!skb)) {
709 ++net->stats.rx_dropped;
710 packet->status = NVSP_STAT_FAIL;
715 * Copy to skb. This copy is needed here since the memory pointed by
716 * hv_netvsc_packet cannot be deallocated
718 memcpy(skb_put(skb, packet->total_data_buflen), packet->data,
719 packet->total_data_buflen);
721 skb->protocol = eth_type_trans(skb, net);
723 /* We only look at the IP checksum here.
724 * Should we be dropping the packet if checksum
725 * failed? How do we deal with other checksums - TCP/UDP?
727 if (csum_info->receive.ip_checksum_succeeded)
728 skb->ip_summed = CHECKSUM_UNNECESSARY;
730 skb->ip_summed = CHECKSUM_NONE;
733 if (packet->vlan_tci & VLAN_TAG_PRESENT)
734 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
737 skb_record_rx_queue(skb, packet->channel->
738 offermsg.offer.sub_channel_index);
740 u64_stats_update_begin(&rx_stats->syncp);
742 rx_stats->bytes += packet->total_data_buflen;
743 u64_stats_update_end(&rx_stats->syncp);
746 * Pass the skb back up. Network stack will deallocate the skb when it
755 static void netvsc_get_drvinfo(struct net_device *net,
756 struct ethtool_drvinfo *info)
758 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
759 strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
762 static void netvsc_get_channels(struct net_device *net,
763 struct ethtool_channels *channel)
765 struct net_device_context *net_device_ctx = netdev_priv(net);
766 struct hv_device *dev = net_device_ctx->device_ctx;
767 struct netvsc_device *nvdev = hv_get_drvdata(dev);
770 channel->max_combined = nvdev->max_chn;
771 channel->combined_count = nvdev->num_chn;
775 static int netvsc_set_channels(struct net_device *net,
776 struct ethtool_channels *channels)
778 struct net_device_context *net_device_ctx = netdev_priv(net);
779 struct hv_device *dev = net_device_ctx->device_ctx;
780 struct netvsc_device *nvdev = hv_get_drvdata(dev);
781 struct netvsc_device_info device_info;
785 bool recovering = false;
787 if (!nvdev || nvdev->destroy)
790 num_chn = nvdev->num_chn;
791 max_chn = min_t(u32, nvdev->max_chn, num_online_cpus());
793 if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5) {
794 pr_info("vRSS unsupported before NVSP Version 5\n");
798 /* We do not support rx, tx, or other */
800 channels->rx_count ||
801 channels->tx_count ||
802 channels->other_count ||
803 (channels->combined_count < 1))
806 if (channels->combined_count > max_chn) {
807 pr_info("combined channels too high, using %d\n", max_chn);
808 channels->combined_count = max_chn;
811 ret = netvsc_close(net);
816 nvdev->start_remove = true;
817 rndis_filter_device_remove(dev);
819 nvdev->num_chn = channels->combined_count;
821 net_device_ctx->device_ctx = dev;
822 hv_set_drvdata(dev, net);
824 memset(&device_info, 0, sizeof(device_info));
825 device_info.num_chn = nvdev->num_chn; /* passed to RNDIS */
826 device_info.ring_size = ring_size;
827 device_info.max_num_vrss_chns = max_num_vrss_chns;
829 ret = rndis_filter_device_add(dev, &device_info);
832 netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
838 nvdev = hv_get_drvdata(dev);
840 ret = netif_set_real_num_tx_queues(net, nvdev->num_chn);
843 netdev_err(net, "could not set tx queue count (ret %d)\n", ret);
849 ret = netif_set_real_num_rx_queues(net, nvdev->num_chn);
852 netdev_err(net, "could not set rx queue count (ret %d)\n", ret);
857 netif_set_gso_max_size(net, NETVSC_GSO_MAX_SIZE);
865 /* If the above failed, we attempt to recover through the same
866 * process but with the original number of channels.
868 netdev_err(net, "could not set channels, recovering\n");
870 channels->combined_count = num_chn;
874 static int netvsc_change_mtu(struct net_device *ndev, int mtu)
876 struct net_device_context *ndevctx = netdev_priv(ndev);
877 struct hv_device *hdev = ndevctx->device_ctx;
878 struct netvsc_device *nvdev = hv_get_drvdata(hdev);
879 struct netvsc_device_info device_info;
880 int limit = ETH_DATA_LEN;
883 if (nvdev == NULL || nvdev->destroy)
886 if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
887 limit = NETVSC_MTU - ETH_HLEN;
889 if (mtu < NETVSC_MTU_MIN || mtu > limit)
892 ret = netvsc_close(ndev);
896 nvdev->start_remove = true;
897 rndis_filter_device_remove(hdev);
901 ndevctx->device_ctx = hdev;
902 hv_set_drvdata(hdev, ndev);
904 memset(&device_info, 0, sizeof(device_info));
905 device_info.ring_size = ring_size;
906 device_info.num_chn = nvdev->num_chn;
907 device_info.max_num_vrss_chns = max_num_vrss_chns;
908 rndis_filter_device_add(hdev, &device_info);
916 static struct rtnl_link_stats64 *netvsc_get_stats64(struct net_device *net,
917 struct rtnl_link_stats64 *t)
919 struct net_device_context *ndev_ctx = netdev_priv(net);
922 for_each_possible_cpu(cpu) {
923 struct netvsc_stats *tx_stats = per_cpu_ptr(ndev_ctx->tx_stats,
925 struct netvsc_stats *rx_stats = per_cpu_ptr(ndev_ctx->rx_stats,
927 u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
931 start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
932 tx_packets = tx_stats->packets;
933 tx_bytes = tx_stats->bytes;
934 } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
937 start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
938 rx_packets = rx_stats->packets;
939 rx_bytes = rx_stats->bytes;
940 } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
942 t->tx_bytes += tx_bytes;
943 t->tx_packets += tx_packets;
944 t->rx_bytes += rx_bytes;
945 t->rx_packets += rx_packets;
948 t->tx_dropped = net->stats.tx_dropped;
949 t->tx_errors = net->stats.tx_dropped;
951 t->rx_dropped = net->stats.rx_dropped;
952 t->rx_errors = net->stats.rx_errors;
957 static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
959 struct net_device_context *ndevctx = netdev_priv(ndev);
960 struct hv_device *hdev = ndevctx->device_ctx;
961 struct sockaddr *addr = p;
962 char save_adr[ETH_ALEN];
963 unsigned char save_aatype;
966 memcpy(save_adr, ndev->dev_addr, ETH_ALEN);
967 save_aatype = ndev->addr_assign_type;
969 err = eth_mac_addr(ndev, p);
973 err = rndis_filter_set_device_mac(hdev, addr->sa_data);
975 /* roll back to saved MAC */
976 memcpy(ndev->dev_addr, save_adr, ETH_ALEN);
977 ndev->addr_assign_type = save_aatype;
983 #ifdef CONFIG_NET_POLL_CONTROLLER
984 static void netvsc_poll_controller(struct net_device *net)
986 /* As netvsc_start_xmit() works synchronous we don't have to
987 * trigger anything here.
992 static const struct ethtool_ops ethtool_ops = {
993 .get_drvinfo = netvsc_get_drvinfo,
994 .get_link = ethtool_op_get_link,
995 .get_channels = netvsc_get_channels,
996 .set_channels = netvsc_set_channels,
999 static const struct net_device_ops device_ops = {
1000 .ndo_open = netvsc_open,
1001 .ndo_stop = netvsc_close,
1002 .ndo_start_xmit = netvsc_start_xmit,
1003 .ndo_set_rx_mode = netvsc_set_multicast_list,
1004 .ndo_change_mtu = netvsc_change_mtu,
1005 .ndo_validate_addr = eth_validate_addr,
1006 .ndo_set_mac_address = netvsc_set_mac_addr,
1007 .ndo_select_queue = netvsc_select_queue,
1008 .ndo_get_stats64 = netvsc_get_stats64,
1009 #ifdef CONFIG_NET_POLL_CONTROLLER
1010 .ndo_poll_controller = netvsc_poll_controller,
1015 * Send GARP packet to network peers after migrations.
1016 * After Quick Migration, the network is not immediately operational in the
1017 * current context when receiving RNDIS_STATUS_MEDIA_CONNECT event. So, add
1018 * another netif_notify_peers() into a delayed work, otherwise GARP packet
1019 * will not be sent after quick migration, and cause network disconnection.
1020 * Also, we update the carrier status here.
1022 static void netvsc_link_change(struct work_struct *w)
1024 struct net_device_context *ndev_ctx;
1025 struct net_device *net;
1026 struct netvsc_device *net_device;
1027 struct rndis_device *rdev;
1028 bool notify, refresh = false;
1029 char *argv[] = { "/etc/init.d/network", "restart", NULL };
1030 char *envp[] = { "HOME=/", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL };
1034 ndev_ctx = container_of(w, struct net_device_context, dwork.work);
1035 net_device = hv_get_drvdata(ndev_ctx->device_ctx);
1036 rdev = net_device->extension;
1037 net = net_device->ndev;
1039 if (rdev->link_state) {
1040 netif_carrier_off(net);
1043 netif_carrier_on(net);
1045 if (rdev->link_change) {
1046 rdev->link_change = false;
1054 call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
1057 netdev_notify_peers(net);
1060 static void netvsc_free_netdev(struct net_device *netdev)
1062 struct net_device_context *net_device_ctx = netdev_priv(netdev);
1064 free_percpu(net_device_ctx->tx_stats);
1065 free_percpu(net_device_ctx->rx_stats);
1066 free_netdev(netdev);
1069 static int netvsc_probe(struct hv_device *dev,
1070 const struct hv_vmbus_device_id *dev_id)
1072 struct net_device *net = NULL;
1073 struct net_device_context *net_device_ctx;
1074 struct netvsc_device_info device_info;
1075 struct netvsc_device *nvdev;
1077 u32 max_needed_headroom;
1079 net = alloc_etherdev_mq(sizeof(struct net_device_context),
1084 max_needed_headroom = sizeof(struct hv_netvsc_packet) +
1087 netif_carrier_off(net);
1089 net_device_ctx = netdev_priv(net);
1090 net_device_ctx->device_ctx = dev;
1091 net_device_ctx->msg_enable = netif_msg_init(debug, default_msg);
1092 if (netif_msg_probe(net_device_ctx))
1093 netdev_dbg(net, "netvsc msg_enable: %d\n",
1094 net_device_ctx->msg_enable);
1096 net_device_ctx->tx_stats = netdev_alloc_pcpu_stats(struct netvsc_stats);
1097 if (!net_device_ctx->tx_stats) {
1101 net_device_ctx->rx_stats = netdev_alloc_pcpu_stats(struct netvsc_stats);
1102 if (!net_device_ctx->rx_stats) {
1103 free_percpu(net_device_ctx->tx_stats);
1108 hv_set_drvdata(dev, net);
1109 INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
1110 INIT_WORK(&net_device_ctx->work, do_set_multicast);
1112 net->netdev_ops = &device_ops;
1114 net->hw_features = NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_IP_CSUM |
1116 net->features = NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_SG | NETIF_F_RXCSUM |
1117 NETIF_F_IP_CSUM | NETIF_F_TSO;
1119 net->ethtool_ops = ðtool_ops;
1120 SET_NETDEV_DEV(net, &dev->device);
1123 * Request additional head room in the skb.
1124 * We will use this space to build the rndis
1125 * heaser and other state we need to maintain.
1127 net->needed_headroom = max_needed_headroom;
1129 /* Notify the netvsc driver of the new device */
1130 memset(&device_info, 0, sizeof(device_info));
1131 device_info.ring_size = ring_size;
1132 device_info.max_num_vrss_chns = max_num_vrss_chns;
1133 ret = rndis_filter_device_add(dev, &device_info);
1135 netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
1136 netvsc_free_netdev(net);
1137 hv_set_drvdata(dev, NULL);
1140 memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
1142 nvdev = hv_get_drvdata(dev);
1143 netif_set_real_num_tx_queues(net, nvdev->num_chn);
1144 netif_set_real_num_rx_queues(net, nvdev->num_chn);
1146 ret = register_netdev(net);
1148 pr_err("Unable to register netdev.\n");
1149 rndis_filter_device_remove(dev);
1150 netvsc_free_netdev(net);
1152 schedule_delayed_work(&net_device_ctx->dwork, 0);
1158 static int netvsc_remove(struct hv_device *dev)
1160 struct net_device *net;
1161 struct net_device_context *ndev_ctx;
1162 struct netvsc_device *net_device;
1164 net_device = hv_get_drvdata(dev);
1165 net = net_device->ndev;
1168 dev_err(&dev->device, "No net device to remove\n");
1172 net_device->start_remove = true;
1174 ndev_ctx = netdev_priv(net);
1175 cancel_delayed_work_sync(&ndev_ctx->dwork);
1176 cancel_work_sync(&ndev_ctx->work);
1178 /* Stop outbound asap */
1179 netif_tx_disable(net);
1181 unregister_netdev(net);
1184 * Call to the vsc driver to let it know that the device is being
1187 rndis_filter_device_remove(dev);
1189 netvsc_free_netdev(net);
1193 static const struct hv_vmbus_device_id id_table[] = {
1199 MODULE_DEVICE_TABLE(vmbus, id_table);
1201 /* The one and only one */
1202 static struct hv_driver netvsc_drv = {
1203 .name = KBUILD_MODNAME,
1204 .id_table = id_table,
1205 .probe = netvsc_probe,
1206 .remove = netvsc_remove,
1209 static void __exit netvsc_drv_exit(void)
1211 vmbus_driver_unregister(&netvsc_drv);
1214 static int __init netvsc_drv_init(void)
1216 if (ring_size < RING_SIZE_MIN) {
1217 ring_size = RING_SIZE_MIN;
1218 pr_info("Increased ring_size to %d (min allowed)\n",
1221 return vmbus_driver_register(&netvsc_drv);
1224 MODULE_LICENSE("GPL");
1225 MODULE_DESCRIPTION("Microsoft Hyper-V network driver");
1227 module_init(netvsc_drv_init);
1228 module_exit(netvsc_drv_exit);