2 * Virtual network driver for conversing with remote driver backends.
4 * Copyright (c) 2002-2005, K A Fraser
5 * Copyright (c) 2005, XenSource Ltd
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version 2
9 * as published by the Free Software Foundation; or, when distributed
10 * separately from the Linux kernel or incorporated into other
11 * software packages, subject to the following license:
13 * Permission is hereby granted, free of charge, to any person obtaining a copy
14 * of this source file (the "Software"), to deal in the Software without
15 * restriction, including without limitation the rights to use, copy, modify,
16 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
17 * and to permit persons to whom the Software is furnished to do so, subject to
18 * the following conditions:
20 * The above copyright notice and this permission notice shall be included in
21 * all copies or substantial portions of the Software.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34 #include <linux/module.h>
35 #include <linux/kernel.h>
36 #include <linux/netdevice.h>
37 #include <linux/etherdevice.h>
38 #include <linux/skbuff.h>
39 #include <linux/ethtool.h>
40 #include <linux/if_ether.h>
42 #include <linux/udp.h>
43 #include <linux/moduleparam.h>
45 #include <linux/slab.h>
48 #include <asm/xen/page.h>
50 #include <xen/xenbus.h>
51 #include <xen/events.h>
53 #include <xen/platform_pci.h>
54 #include <xen/grant_table.h>
56 #include <xen/interface/io/netif.h>
57 #include <xen/interface/memory.h>
58 #include <xen/interface/grant_table.h>
60 static const struct ethtool_ops xennet_ethtool_ops;
66 #define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb))
68 #define RX_COPY_THRESHOLD 256
70 #define GRANT_INVALID_REF 0
72 #define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE)
73 #define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE)
74 #define TX_MAX_TARGET min_t(int, NET_TX_RING_SIZE, 256)
76 struct netfront_stats {
81 struct u64_stats_sync syncp;
84 struct netfront_info {
85 struct list_head list;
86 struct net_device *netdev;
88 struct napi_struct napi;
90 /* Split event channels support, tx_* == rx_* when using
91 * single event channel.
93 unsigned int tx_evtchn, rx_evtchn;
94 unsigned int tx_irq, rx_irq;
95 /* Only used when split event channels support is enabled */
96 char tx_irq_name[IFNAMSIZ+4]; /* DEVNAME-tx */
97 char rx_irq_name[IFNAMSIZ+4]; /* DEVNAME-rx */
99 struct xenbus_device *xbdev;
102 struct xen_netif_tx_front_ring tx;
106 * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries
107 * are linked from tx_skb_freelist through skb_entry.link.
109 * NB. Freelist index entries are always going to be less than
110 * PAGE_OFFSET, whereas pointers to skbs will always be equal or
111 * greater than PAGE_OFFSET: we use this property to distinguish
117 } tx_skbs[NET_TX_RING_SIZE];
118 grant_ref_t gref_tx_head;
119 grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
120 unsigned tx_skb_freelist;
122 spinlock_t rx_lock ____cacheline_aligned_in_smp;
123 struct xen_netif_rx_front_ring rx;
126 /* Receive-ring batched refills. */
127 #define RX_MIN_TARGET 8
128 #define RX_DFL_MIN_TARGET 64
129 #define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
130 unsigned rx_min_target, rx_max_target, rx_target;
131 struct sk_buff_head rx_batch;
133 struct timer_list rx_refill_timer;
135 struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
136 grant_ref_t gref_rx_head;
137 grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
139 unsigned long rx_pfn_array[NET_RX_RING_SIZE];
140 struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1];
141 struct mmu_update rx_mmu[NET_RX_RING_SIZE];
144 struct netfront_stats __percpu *stats;
146 unsigned long rx_gso_checksum_fixup;
149 struct netfront_rx_info {
150 struct xen_netif_rx_response rx;
151 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
154 static void skb_entry_set_link(union skb_entry *list, unsigned short id)
159 static int skb_entry_is_link(const union skb_entry *list)
161 BUILD_BUG_ON(sizeof(list->skb) != sizeof(list->link));
162 return (unsigned long)list->skb < PAGE_OFFSET;
166 * Access macros for acquiring freeing slots in tx_skbs[].
169 static void add_id_to_freelist(unsigned *head, union skb_entry *list,
172 skb_entry_set_link(&list[id], *head);
176 static unsigned short get_id_from_freelist(unsigned *head,
177 union skb_entry *list)
179 unsigned int id = *head;
180 *head = list[id].link;
184 static int xennet_rxidx(RING_IDX idx)
186 return idx & (NET_RX_RING_SIZE - 1);
189 static struct sk_buff *xennet_get_rx_skb(struct netfront_info *np,
192 int i = xennet_rxidx(ri);
193 struct sk_buff *skb = np->rx_skbs[i];
194 np->rx_skbs[i] = NULL;
198 static grant_ref_t xennet_get_rx_ref(struct netfront_info *np,
201 int i = xennet_rxidx(ri);
202 grant_ref_t ref = np->grant_rx_ref[i];
203 np->grant_rx_ref[i] = GRANT_INVALID_REF;
208 static int xennet_sysfs_addif(struct net_device *netdev);
209 static void xennet_sysfs_delif(struct net_device *netdev);
210 #else /* !CONFIG_SYSFS */
211 #define xennet_sysfs_addif(dev) (0)
212 #define xennet_sysfs_delif(dev) do { } while (0)
215 static bool xennet_can_sg(struct net_device *dev)
217 return dev->features & NETIF_F_SG;
221 static void rx_refill_timeout(unsigned long data)
223 struct net_device *dev = (struct net_device *)data;
224 struct netfront_info *np = netdev_priv(dev);
225 napi_schedule(&np->napi);
228 static int netfront_tx_slot_available(struct netfront_info *np)
230 return (np->tx.req_prod_pvt - np->tx.rsp_cons) <
231 (TX_MAX_TARGET - MAX_SKB_FRAGS - 2);
234 static void xennet_maybe_wake_tx(struct net_device *dev)
236 struct netfront_info *np = netdev_priv(dev);
238 if (unlikely(netif_queue_stopped(dev)) &&
239 netfront_tx_slot_available(np) &&
240 likely(netif_running(dev)))
241 netif_wake_queue(dev);
244 static void xennet_alloc_rx_buffers(struct net_device *dev)
247 struct netfront_info *np = netdev_priv(dev);
250 int i, batch_target, notify;
251 RING_IDX req_prod = np->rx.req_prod_pvt;
255 struct xen_netif_rx_request *req;
257 if (unlikely(!netif_carrier_ok(dev)))
261 * Allocate skbuffs greedily, even though we batch updates to the
262 * receive ring. This creates a less bursty demand on the memory
263 * allocator, so should reduce the chance of failed allocation requests
264 * both for ourself and for other kernel subsystems.
266 batch_target = np->rx_target - (req_prod - np->rx.rsp_cons);
267 for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
268 skb = __netdev_alloc_skb(dev, RX_COPY_THRESHOLD + NET_IP_ALIGN,
269 GFP_ATOMIC | __GFP_NOWARN);
273 /* Align ip header to a 16 bytes boundary */
274 skb_reserve(skb, NET_IP_ALIGN);
276 page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
280 /* Could not allocate any skbuffs. Try again later. */
281 mod_timer(&np->rx_refill_timer,
284 /* Any skbuffs queued for refill? Force them out. */
290 skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE);
291 __skb_queue_tail(&np->rx_batch, skb);
294 /* Is the batch large enough to be worthwhile? */
295 if (i < (np->rx_target/2)) {
296 if (req_prod > np->rx.sring->req_prod)
301 /* Adjust our fill target if we risked running out of buffers. */
302 if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) &&
303 ((np->rx_target *= 2) > np->rx_max_target))
304 np->rx_target = np->rx_max_target;
308 skb = __skb_dequeue(&np->rx_batch);
314 id = xennet_rxidx(req_prod + i);
316 BUG_ON(np->rx_skbs[id]);
317 np->rx_skbs[id] = skb;
319 ref = gnttab_claim_grant_reference(&np->gref_rx_head);
320 BUG_ON((signed short)ref < 0);
321 np->grant_rx_ref[id] = ref;
323 pfn = page_to_pfn(skb_frag_page(&skb_shinfo(skb)->frags[0]));
324 vaddr = page_address(skb_frag_page(&skb_shinfo(skb)->frags[0]));
326 req = RING_GET_REQUEST(&np->rx, req_prod + i);
327 gnttab_grant_foreign_access_ref(ref,
328 np->xbdev->otherend_id,
336 wmb(); /* barrier so backend seens requests */
338 /* Above is a suitable barrier to ensure backend will see requests. */
339 np->rx.req_prod_pvt = req_prod + i;
341 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify);
343 notify_remote_via_irq(np->rx_irq);
346 static int xennet_open(struct net_device *dev)
348 struct netfront_info *np = netdev_priv(dev);
350 napi_enable(&np->napi);
352 spin_lock_bh(&np->rx_lock);
353 if (netif_carrier_ok(dev)) {
354 xennet_alloc_rx_buffers(dev);
355 np->rx.sring->rsp_event = np->rx.rsp_cons + 1;
356 if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
357 napi_schedule(&np->napi);
359 spin_unlock_bh(&np->rx_lock);
361 netif_start_queue(dev);
366 static void xennet_tx_buf_gc(struct net_device *dev)
370 struct netfront_info *np = netdev_priv(dev);
373 BUG_ON(!netif_carrier_ok(dev));
376 prod = np->tx.sring->rsp_prod;
377 rmb(); /* Ensure we see responses up to 'rp'. */
379 for (cons = np->tx.rsp_cons; cons != prod; cons++) {
380 struct xen_netif_tx_response *txrsp;
382 txrsp = RING_GET_RESPONSE(&np->tx, cons);
383 if (txrsp->status == XEN_NETIF_RSP_NULL)
387 skb = np->tx_skbs[id].skb;
388 if (unlikely(gnttab_query_foreign_access(
389 np->grant_tx_ref[id]) != 0)) {
390 pr_alert("%s: warning -- grant still in use by backend domain\n",
394 gnttab_end_foreign_access_ref(
395 np->grant_tx_ref[id], GNTMAP_readonly);
396 gnttab_release_grant_reference(
397 &np->gref_tx_head, np->grant_tx_ref[id]);
398 np->grant_tx_ref[id] = GRANT_INVALID_REF;
399 add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, id);
400 dev_kfree_skb_irq(skb);
403 np->tx.rsp_cons = prod;
406 * Set a new event, then check for race with update of tx_cons.
407 * Note that it is essential to schedule a callback, no matter
408 * how few buffers are pending. Even if there is space in the
409 * transmit ring, higher layers may be blocked because too much
410 * data is outstanding: in such cases notification from Xen is
411 * likely to be the only kick that we'll get.
413 np->tx.sring->rsp_event =
414 prod + ((np->tx.sring->req_prod - prod) >> 1) + 1;
415 mb(); /* update shared area */
416 } while ((cons == prod) && (prod != np->tx.sring->rsp_prod));
418 xennet_maybe_wake_tx(dev);
421 static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
422 struct xen_netif_tx_request *tx)
424 struct netfront_info *np = netdev_priv(dev);
425 char *data = skb->data;
427 RING_IDX prod = np->tx.req_prod_pvt;
428 int frags = skb_shinfo(skb)->nr_frags;
429 unsigned int offset = offset_in_page(data);
430 unsigned int len = skb_headlen(skb);
435 /* While the header overlaps a page boundary (including being
436 larger than a page), split it it into page-sized chunks. */
437 while (len > PAGE_SIZE - offset) {
438 tx->size = PAGE_SIZE - offset;
439 tx->flags |= XEN_NETTXF_more_data;
444 id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
445 np->tx_skbs[id].skb = skb_get(skb);
446 tx = RING_GET_REQUEST(&np->tx, prod++);
448 ref = gnttab_claim_grant_reference(&np->gref_tx_head);
449 BUG_ON((signed short)ref < 0);
451 mfn = virt_to_mfn(data);
452 gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
453 mfn, GNTMAP_readonly);
455 tx->gref = np->grant_tx_ref[id] = ref;
461 /* Grant backend access to each skb fragment page. */
462 for (i = 0; i < frags; i++) {
463 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
464 struct page *page = skb_frag_page(frag);
466 len = skb_frag_size(frag);
467 offset = frag->page_offset;
469 /* Data must not cross a page boundary. */
470 BUG_ON(len + offset > PAGE_SIZE<<compound_order(page));
472 /* Skip unused frames from start of page */
473 page += offset >> PAGE_SHIFT;
474 offset &= ~PAGE_MASK;
479 BUG_ON(offset >= PAGE_SIZE);
481 bytes = PAGE_SIZE - offset;
485 tx->flags |= XEN_NETTXF_more_data;
487 id = get_id_from_freelist(&np->tx_skb_freelist,
489 np->tx_skbs[id].skb = skb_get(skb);
490 tx = RING_GET_REQUEST(&np->tx, prod++);
492 ref = gnttab_claim_grant_reference(&np->gref_tx_head);
493 BUG_ON((signed short)ref < 0);
495 mfn = pfn_to_mfn(page_to_pfn(page));
496 gnttab_grant_foreign_access_ref(ref,
497 np->xbdev->otherend_id,
498 mfn, GNTMAP_readonly);
500 tx->gref = np->grant_tx_ref[id] = ref;
509 if (offset == PAGE_SIZE && len) {
510 BUG_ON(!PageCompound(page));
517 np->tx.req_prod_pvt = prod;
521 * Count how many ring slots are required to send the frags of this
522 * skb. Each frag might be a compound page.
524 static int xennet_count_skb_frag_slots(struct sk_buff *skb)
526 int i, frags = skb_shinfo(skb)->nr_frags;
529 for (i = 0; i < frags; i++) {
530 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
531 unsigned long size = skb_frag_size(frag);
532 unsigned long offset = frag->page_offset;
534 /* Skip unused frames from start of page */
535 offset &= ~PAGE_MASK;
537 pages += PFN_UP(offset + size);
543 static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
546 struct netfront_info *np = netdev_priv(dev);
547 struct netfront_stats *stats = this_cpu_ptr(np->stats);
548 struct xen_netif_tx_request *tx;
549 char *data = skb->data;
555 unsigned int offset = offset_in_page(data);
556 unsigned int len = skb_headlen(skb);
559 /* If skb->len is too big for wire format, drop skb and alert
560 * user about misconfiguration.
562 if (unlikely(skb->len > XEN_NETIF_MAX_TX_SIZE)) {
563 net_alert_ratelimited(
564 "xennet: skb->len = %u, too big for wire format\n",
569 slots = DIV_ROUND_UP(offset + len, PAGE_SIZE) +
570 xennet_count_skb_frag_slots(skb);
571 if (unlikely(slots > MAX_SKB_FRAGS + 1)) {
572 net_alert_ratelimited(
573 "xennet: skb rides the rocket: %d slots\n", slots);
577 spin_lock_irqsave(&np->tx_lock, flags);
579 if (unlikely(!netif_carrier_ok(dev) ||
580 (slots > 1 && !xennet_can_sg(dev)) ||
581 netif_needs_gso(skb, netif_skb_features(skb)))) {
582 spin_unlock_irqrestore(&np->tx_lock, flags);
586 i = np->tx.req_prod_pvt;
588 id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
589 np->tx_skbs[id].skb = skb;
591 tx = RING_GET_REQUEST(&np->tx, i);
594 ref = gnttab_claim_grant_reference(&np->gref_tx_head);
595 BUG_ON((signed short)ref < 0);
596 mfn = virt_to_mfn(data);
597 gnttab_grant_foreign_access_ref(
598 ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly);
599 tx->gref = np->grant_tx_ref[id] = ref;
604 if (skb->ip_summed == CHECKSUM_PARTIAL)
606 tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated;
607 else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
608 /* remote but checksummed. */
609 tx->flags |= XEN_NETTXF_data_validated;
611 if (skb_shinfo(skb)->gso_size) {
612 struct xen_netif_extra_info *gso;
614 gso = (struct xen_netif_extra_info *)
615 RING_GET_REQUEST(&np->tx, ++i);
617 tx->flags |= XEN_NETTXF_extra_info;
619 gso->u.gso.size = skb_shinfo(skb)->gso_size;
620 gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
622 gso->u.gso.features = 0;
624 gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
628 np->tx.req_prod_pvt = i + 1;
630 xennet_make_frags(skb, dev, tx);
633 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify);
635 notify_remote_via_irq(np->tx_irq);
637 u64_stats_update_begin(&stats->syncp);
638 stats->tx_bytes += skb->len;
640 u64_stats_update_end(&stats->syncp);
642 /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
643 xennet_tx_buf_gc(dev);
645 if (!netfront_tx_slot_available(np))
646 netif_stop_queue(dev);
648 spin_unlock_irqrestore(&np->tx_lock, flags);
653 dev->stats.tx_dropped++;
658 static int xennet_close(struct net_device *dev)
660 struct netfront_info *np = netdev_priv(dev);
661 netif_stop_queue(np->netdev);
662 napi_disable(&np->napi);
666 static void xennet_move_rx_slot(struct netfront_info *np, struct sk_buff *skb,
669 int new = xennet_rxidx(np->rx.req_prod_pvt);
671 BUG_ON(np->rx_skbs[new]);
672 np->rx_skbs[new] = skb;
673 np->grant_rx_ref[new] = ref;
674 RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new;
675 RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref;
676 np->rx.req_prod_pvt++;
679 static int xennet_get_extras(struct netfront_info *np,
680 struct xen_netif_extra_info *extras,
684 struct xen_netif_extra_info *extra;
685 struct device *dev = &np->netdev->dev;
686 RING_IDX cons = np->rx.rsp_cons;
693 if (unlikely(cons + 1 == rp)) {
695 dev_warn(dev, "Missing extra info\n");
700 extra = (struct xen_netif_extra_info *)
701 RING_GET_RESPONSE(&np->rx, ++cons);
703 if (unlikely(!extra->type ||
704 extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
706 dev_warn(dev, "Invalid extra type: %d\n",
710 memcpy(&extras[extra->type - 1], extra,
714 skb = xennet_get_rx_skb(np, cons);
715 ref = xennet_get_rx_ref(np, cons);
716 xennet_move_rx_slot(np, skb, ref);
717 } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
719 np->rx.rsp_cons = cons;
723 static int xennet_get_responses(struct netfront_info *np,
724 struct netfront_rx_info *rinfo, RING_IDX rp,
725 struct sk_buff_head *list)
727 struct xen_netif_rx_response *rx = &rinfo->rx;
728 struct xen_netif_extra_info *extras = rinfo->extras;
729 struct device *dev = &np->netdev->dev;
730 RING_IDX cons = np->rx.rsp_cons;
731 struct sk_buff *skb = xennet_get_rx_skb(np, cons);
732 grant_ref_t ref = xennet_get_rx_ref(np, cons);
733 int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD);
738 if (rx->flags & XEN_NETRXF_extra_info) {
739 err = xennet_get_extras(np, extras, rp);
740 cons = np->rx.rsp_cons;
744 if (unlikely(rx->status < 0 ||
745 rx->offset + rx->status > PAGE_SIZE)) {
747 dev_warn(dev, "rx->offset: %x, size: %u\n",
748 rx->offset, rx->status);
749 xennet_move_rx_slot(np, skb, ref);
755 * This definitely indicates a bug, either in this driver or in
756 * the backend driver. In future this should flag the bad
757 * situation to the system controller to reboot the backend.
759 if (ref == GRANT_INVALID_REF) {
761 dev_warn(dev, "Bad rx response id %d.\n",
767 ret = gnttab_end_foreign_access_ref(ref, 0);
770 gnttab_release_grant_reference(&np->gref_rx_head, ref);
772 __skb_queue_tail(list, skb);
775 if (!(rx->flags & XEN_NETRXF_more_data))
778 if (cons + slots == rp) {
780 dev_warn(dev, "Need more slots\n");
785 rx = RING_GET_RESPONSE(&np->rx, cons + slots);
786 skb = xennet_get_rx_skb(np, cons + slots);
787 ref = xennet_get_rx_ref(np, cons + slots);
791 if (unlikely(slots > max)) {
793 dev_warn(dev, "Too many slots\n");
798 np->rx.rsp_cons = cons + slots;
803 static int xennet_set_skb_gso(struct sk_buff *skb,
804 struct xen_netif_extra_info *gso)
806 if (!gso->u.gso.size) {
808 pr_warn("GSO size must not be zero\n");
812 /* Currently only TCPv4 S.O. is supported. */
813 if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
815 pr_warn("Bad GSO type %d\n", gso->u.gso.type);
819 skb_shinfo(skb)->gso_size = gso->u.gso.size;
820 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
822 /* Header must be checked, and gso_segs computed. */
823 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
824 skb_shinfo(skb)->gso_segs = 0;
829 static RING_IDX xennet_fill_frags(struct netfront_info *np,
831 struct sk_buff_head *list)
833 struct skb_shared_info *shinfo = skb_shinfo(skb);
834 RING_IDX cons = np->rx.rsp_cons;
835 struct sk_buff *nskb;
837 while ((nskb = __skb_dequeue(list))) {
838 struct xen_netif_rx_response *rx =
839 RING_GET_RESPONSE(&np->rx, ++cons);
840 skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
842 if (shinfo->nr_frags == MAX_SKB_FRAGS) {
843 unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
845 BUG_ON(pull_to <= skb_headlen(skb));
846 __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
848 BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS);
850 skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(nfrag),
851 rx->offset, rx->status, PAGE_SIZE);
853 skb_shinfo(nskb)->nr_frags = 0;
860 static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
864 int recalculate_partial_csum = 0;
867 * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
868 * peers can fail to set NETRXF_csum_blank when sending a GSO
869 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
870 * recalculate the partial checksum.
872 if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
873 struct netfront_info *np = netdev_priv(dev);
874 np->rx_gso_checksum_fixup++;
875 skb->ip_summed = CHECKSUM_PARTIAL;
876 recalculate_partial_csum = 1;
879 /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
880 if (skb->ip_summed != CHECKSUM_PARTIAL)
883 if (skb->protocol != htons(ETH_P_IP))
886 iph = (void *)skb->data;
888 switch (iph->protocol) {
890 if (!skb_partial_csum_set(skb, 4 * iph->ihl,
891 offsetof(struct tcphdr, check)))
894 if (recalculate_partial_csum) {
895 struct tcphdr *tcph = tcp_hdr(skb);
896 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
897 skb->len - iph->ihl*4,
902 if (!skb_partial_csum_set(skb, 4 * iph->ihl,
903 offsetof(struct udphdr, check)))
906 if (recalculate_partial_csum) {
907 struct udphdr *udph = udp_hdr(skb);
908 udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
909 skb->len - iph->ihl*4,
915 pr_err("Attempting to checksum a non-TCP/UDP packet, dropping a protocol %d packet\n",
926 static int handle_incoming_queue(struct net_device *dev,
927 struct sk_buff_head *rxq)
929 struct netfront_info *np = netdev_priv(dev);
930 struct netfront_stats *stats = this_cpu_ptr(np->stats);
931 int packets_dropped = 0;
934 while ((skb = __skb_dequeue(rxq)) != NULL) {
935 int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
937 if (pull_to > skb_headlen(skb))
938 __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
940 /* Ethernet work: Delayed to here as it peeks the header. */
941 skb->protocol = eth_type_trans(skb, dev);
943 if (checksum_setup(dev, skb)) {
946 dev->stats.rx_errors++;
950 u64_stats_update_begin(&stats->syncp);
952 stats->rx_bytes += skb->len;
953 u64_stats_update_end(&stats->syncp);
956 napi_gro_receive(&np->napi, skb);
959 return packets_dropped;
962 static int xennet_poll(struct napi_struct *napi, int budget)
964 struct netfront_info *np = container_of(napi, struct netfront_info, napi);
965 struct net_device *dev = np->netdev;
967 struct netfront_rx_info rinfo;
968 struct xen_netif_rx_response *rx = &rinfo.rx;
969 struct xen_netif_extra_info *extras = rinfo.extras;
972 struct sk_buff_head rxq;
973 struct sk_buff_head errq;
974 struct sk_buff_head tmpq;
978 spin_lock(&np->rx_lock);
980 skb_queue_head_init(&rxq);
981 skb_queue_head_init(&errq);
982 skb_queue_head_init(&tmpq);
984 rp = np->rx.sring->rsp_prod;
985 rmb(); /* Ensure we see queued responses up to 'rp'. */
989 while ((i != rp) && (work_done < budget)) {
990 memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx));
991 memset(extras, 0, sizeof(rinfo.extras));
993 err = xennet_get_responses(np, &rinfo, rp, &tmpq);
997 while ((skb = __skb_dequeue(&tmpq)))
998 __skb_queue_tail(&errq, skb);
999 dev->stats.rx_errors++;
1000 i = np->rx.rsp_cons;
1004 skb = __skb_dequeue(&tmpq);
1006 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1007 struct xen_netif_extra_info *gso;
1008 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1010 if (unlikely(xennet_set_skb_gso(skb, gso))) {
1011 __skb_queue_head(&tmpq, skb);
1012 np->rx.rsp_cons += skb_queue_len(&tmpq);
1017 NETFRONT_SKB_CB(skb)->pull_to = rx->status;
1018 if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD)
1019 NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD;
1021 skb_shinfo(skb)->frags[0].page_offset = rx->offset;
1022 skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status);
1023 skb->data_len = rx->status;
1024 skb->len += rx->status;
1026 i = xennet_fill_frags(np, skb, &tmpq);
1028 if (rx->flags & XEN_NETRXF_csum_blank)
1029 skb->ip_summed = CHECKSUM_PARTIAL;
1030 else if (rx->flags & XEN_NETRXF_data_validated)
1031 skb->ip_summed = CHECKSUM_UNNECESSARY;
1033 __skb_queue_tail(&rxq, skb);
1035 np->rx.rsp_cons = ++i;
1039 __skb_queue_purge(&errq);
1041 work_done -= handle_incoming_queue(dev, &rxq);
1043 /* If we get a callback with very few responses, reduce fill target. */
1044 /* NB. Note exponential increase, linear decrease. */
1045 if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) >
1046 ((3*np->rx_target) / 4)) &&
1047 (--np->rx_target < np->rx_min_target))
1048 np->rx_target = np->rx_min_target;
1050 xennet_alloc_rx_buffers(dev);
1052 if (work_done < budget) {
1055 napi_gro_flush(napi, false);
1057 local_irq_save(flags);
1059 RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do);
1061 __napi_complete(napi);
1063 local_irq_restore(flags);
1066 spin_unlock(&np->rx_lock);
1071 static int xennet_change_mtu(struct net_device *dev, int mtu)
1073 int max = xennet_can_sg(dev) ?
1074 XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER : ETH_DATA_LEN;
1082 static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev,
1083 struct rtnl_link_stats64 *tot)
1085 struct netfront_info *np = netdev_priv(dev);
1088 for_each_possible_cpu(cpu) {
1089 struct netfront_stats *stats = per_cpu_ptr(np->stats, cpu);
1090 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1094 start = u64_stats_fetch_begin_bh(&stats->syncp);
1096 rx_packets = stats->rx_packets;
1097 tx_packets = stats->tx_packets;
1098 rx_bytes = stats->rx_bytes;
1099 tx_bytes = stats->tx_bytes;
1100 } while (u64_stats_fetch_retry_bh(&stats->syncp, start));
1102 tot->rx_packets += rx_packets;
1103 tot->tx_packets += tx_packets;
1104 tot->rx_bytes += rx_bytes;
1105 tot->tx_bytes += tx_bytes;
1108 tot->rx_errors = dev->stats.rx_errors;
1109 tot->tx_dropped = dev->stats.tx_dropped;
1114 static void xennet_release_tx_bufs(struct netfront_info *np)
1116 struct sk_buff *skb;
1119 for (i = 0; i < NET_TX_RING_SIZE; i++) {
1120 /* Skip over entries which are actually freelist references */
1121 if (skb_entry_is_link(&np->tx_skbs[i]))
1124 skb = np->tx_skbs[i].skb;
1125 gnttab_end_foreign_access_ref(np->grant_tx_ref[i],
1127 gnttab_release_grant_reference(&np->gref_tx_head,
1128 np->grant_tx_ref[i]);
1129 np->grant_tx_ref[i] = GRANT_INVALID_REF;
1130 add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, i);
1131 dev_kfree_skb_irq(skb);
1135 static void xennet_release_rx_bufs(struct netfront_info *np)
1137 struct mmu_update *mmu = np->rx_mmu;
1138 struct multicall_entry *mcl = np->rx_mcl;
1139 struct sk_buff_head free_list;
1140 struct sk_buff *skb;
1142 int xfer = 0, noxfer = 0, unused = 0;
1145 dev_warn(&np->netdev->dev, "%s: fix me for copying receiver.\n",
1149 skb_queue_head_init(&free_list);
1151 spin_lock_bh(&np->rx_lock);
1153 for (id = 0; id < NET_RX_RING_SIZE; id++) {
1154 ref = np->grant_rx_ref[id];
1155 if (ref == GRANT_INVALID_REF) {
1160 skb = np->rx_skbs[id];
1161 mfn = gnttab_end_foreign_transfer_ref(ref);
1162 gnttab_release_grant_reference(&np->gref_rx_head, ref);
1163 np->grant_rx_ref[id] = GRANT_INVALID_REF;
1166 skb_shinfo(skb)->nr_frags = 0;
1172 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1173 /* Remap the page. */
1174 const struct page *page =
1175 skb_frag_page(&skb_shinfo(skb)->frags[0]);
1176 unsigned long pfn = page_to_pfn(page);
1177 void *vaddr = page_address(page);
1179 MULTI_update_va_mapping(mcl, (unsigned long)vaddr,
1180 mfn_pte(mfn, PAGE_KERNEL),
1183 mmu->ptr = ((u64)mfn << PAGE_SHIFT)
1184 | MMU_MACHPHYS_UPDATE;
1188 set_phys_to_machine(pfn, mfn);
1190 __skb_queue_tail(&free_list, skb);
1194 dev_info(&np->netdev->dev, "%s: %d xfer, %d noxfer, %d unused\n",
1195 __func__, xfer, noxfer, unused);
1198 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1199 /* Do all the remapping work and M2P updates. */
1200 MULTI_mmu_update(mcl, np->rx_mmu, mmu - np->rx_mmu,
1203 HYPERVISOR_multicall(np->rx_mcl, mcl - np->rx_mcl);
1207 __skb_queue_purge(&free_list);
1209 spin_unlock_bh(&np->rx_lock);
1212 static void xennet_uninit(struct net_device *dev)
1214 struct netfront_info *np = netdev_priv(dev);
1215 xennet_release_tx_bufs(np);
1216 xennet_release_rx_bufs(np);
1217 gnttab_free_grant_references(np->gref_tx_head);
1218 gnttab_free_grant_references(np->gref_rx_head);
1221 static netdev_features_t xennet_fix_features(struct net_device *dev,
1222 netdev_features_t features)
1224 struct netfront_info *np = netdev_priv(dev);
1227 if (features & NETIF_F_SG) {
1228 if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg",
1233 features &= ~NETIF_F_SG;
1236 if (features & NETIF_F_TSO) {
1237 if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1238 "feature-gso-tcpv4", "%d", &val) < 0)
1242 features &= ~NETIF_F_TSO;
1248 static int xennet_set_features(struct net_device *dev,
1249 netdev_features_t features)
1251 if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) {
1252 netdev_info(dev, "Reducing MTU because no SG offload");
1253 dev->mtu = ETH_DATA_LEN;
1259 static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
1261 struct netfront_info *np = dev_id;
1262 struct net_device *dev = np->netdev;
1263 unsigned long flags;
1265 spin_lock_irqsave(&np->tx_lock, flags);
1266 xennet_tx_buf_gc(dev);
1267 spin_unlock_irqrestore(&np->tx_lock, flags);
1272 static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
1274 struct netfront_info *np = dev_id;
1275 struct net_device *dev = np->netdev;
1277 if (likely(netif_carrier_ok(dev) &&
1278 RING_HAS_UNCONSUMED_RESPONSES(&np->rx)))
1279 napi_schedule(&np->napi);
1284 static irqreturn_t xennet_interrupt(int irq, void *dev_id)
1286 xennet_tx_interrupt(irq, dev_id);
1287 xennet_rx_interrupt(irq, dev_id);
1291 #ifdef CONFIG_NET_POLL_CONTROLLER
1292 static void xennet_poll_controller(struct net_device *dev)
1294 xennet_interrupt(0, dev);
1298 static const struct net_device_ops xennet_netdev_ops = {
1299 .ndo_open = xennet_open,
1300 .ndo_uninit = xennet_uninit,
1301 .ndo_stop = xennet_close,
1302 .ndo_start_xmit = xennet_start_xmit,
1303 .ndo_change_mtu = xennet_change_mtu,
1304 .ndo_get_stats64 = xennet_get_stats64,
1305 .ndo_set_mac_address = eth_mac_addr,
1306 .ndo_validate_addr = eth_validate_addr,
1307 .ndo_fix_features = xennet_fix_features,
1308 .ndo_set_features = xennet_set_features,
1309 #ifdef CONFIG_NET_POLL_CONTROLLER
1310 .ndo_poll_controller = xennet_poll_controller,
1314 static struct net_device *xennet_create_dev(struct xenbus_device *dev)
1317 struct net_device *netdev;
1318 struct netfront_info *np;
1320 netdev = alloc_etherdev(sizeof(struct netfront_info));
1322 return ERR_PTR(-ENOMEM);
1324 np = netdev_priv(netdev);
1327 spin_lock_init(&np->tx_lock);
1328 spin_lock_init(&np->rx_lock);
1330 skb_queue_head_init(&np->rx_batch);
1331 np->rx_target = RX_DFL_MIN_TARGET;
1332 np->rx_min_target = RX_DFL_MIN_TARGET;
1333 np->rx_max_target = RX_MAX_TARGET;
1335 init_timer(&np->rx_refill_timer);
1336 np->rx_refill_timer.data = (unsigned long)netdev;
1337 np->rx_refill_timer.function = rx_refill_timeout;
1340 np->stats = alloc_percpu(struct netfront_stats);
1341 if (np->stats == NULL)
1344 for_each_possible_cpu(i) {
1345 struct netfront_stats *xen_nf_stats;
1346 xen_nf_stats = per_cpu_ptr(np->stats, i);
1347 u64_stats_init(&xen_nf_stats->syncp);
1350 /* Initialise tx_skbs as a free chain containing every entry. */
1351 np->tx_skb_freelist = 0;
1352 for (i = 0; i < NET_TX_RING_SIZE; i++) {
1353 skb_entry_set_link(&np->tx_skbs[i], i+1);
1354 np->grant_tx_ref[i] = GRANT_INVALID_REF;
1357 /* Clear out rx_skbs */
1358 for (i = 0; i < NET_RX_RING_SIZE; i++) {
1359 np->rx_skbs[i] = NULL;
1360 np->grant_rx_ref[i] = GRANT_INVALID_REF;
1363 /* A grant for every tx ring slot */
1364 if (gnttab_alloc_grant_references(TX_MAX_TARGET,
1365 &np->gref_tx_head) < 0) {
1366 pr_alert("can't alloc tx grant refs\n");
1368 goto exit_free_stats;
1370 /* A grant for every rx ring slot */
1371 if (gnttab_alloc_grant_references(RX_MAX_TARGET,
1372 &np->gref_rx_head) < 0) {
1373 pr_alert("can't alloc rx grant refs\n");
1378 netdev->netdev_ops = &xennet_netdev_ops;
1380 netif_napi_add(netdev, &np->napi, xennet_poll, 64);
1381 netdev->features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
1383 netdev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO;
1386 * Assume that all hw features are available for now. This set
1387 * will be adjusted by the call to netdev_update_features() in
1388 * xennet_connect() which is the earliest point where we can
1389 * negotiate with the backend regarding supported features.
1391 netdev->features |= netdev->hw_features;
1393 SET_ETHTOOL_OPS(netdev, &xennet_ethtool_ops);
1394 SET_NETDEV_DEV(netdev, &dev->dev);
1396 netif_set_gso_max_size(netdev, XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER);
1398 np->netdev = netdev;
1400 netif_carrier_off(netdev);
1405 gnttab_free_grant_references(np->gref_tx_head);
1407 free_percpu(np->stats);
1409 free_netdev(netdev);
1410 return ERR_PTR(err);
1414 * Entry point to this code when a new device is created. Allocate the basic
1415 * structures and the ring buffers for communication with the backend, and
1416 * inform the backend of the appropriate details for those.
1418 static int netfront_probe(struct xenbus_device *dev,
1419 const struct xenbus_device_id *id)
1422 struct net_device *netdev;
1423 struct netfront_info *info;
1425 netdev = xennet_create_dev(dev);
1426 if (IS_ERR(netdev)) {
1427 err = PTR_ERR(netdev);
1428 xenbus_dev_fatal(dev, err, "creating netdev");
1432 info = netdev_priv(netdev);
1433 dev_set_drvdata(&dev->dev, info);
1435 err = register_netdev(info->netdev);
1437 pr_warn("%s: register_netdev err=%d\n", __func__, err);
1441 err = xennet_sysfs_addif(info->netdev);
1443 unregister_netdev(info->netdev);
1444 pr_warn("%s: add sysfs failed err=%d\n", __func__, err);
1451 free_netdev(netdev);
1452 dev_set_drvdata(&dev->dev, NULL);
1456 static void xennet_end_access(int ref, void *page)
1458 /* This frees the page as a side-effect */
1459 if (ref != GRANT_INVALID_REF)
1460 gnttab_end_foreign_access(ref, 0, (unsigned long)page);
1463 static void xennet_disconnect_backend(struct netfront_info *info)
1465 /* Stop old i/f to prevent errors whilst we rebuild the state. */
1466 spin_lock_bh(&info->rx_lock);
1467 spin_lock_irq(&info->tx_lock);
1468 netif_carrier_off(info->netdev);
1469 spin_unlock_irq(&info->tx_lock);
1470 spin_unlock_bh(&info->rx_lock);
1472 if (info->tx_irq && (info->tx_irq == info->rx_irq))
1473 unbind_from_irqhandler(info->tx_irq, info);
1474 if (info->tx_irq && (info->tx_irq != info->rx_irq)) {
1475 unbind_from_irqhandler(info->tx_irq, info);
1476 unbind_from_irqhandler(info->rx_irq, info);
1478 info->tx_evtchn = info->rx_evtchn = 0;
1479 info->tx_irq = info->rx_irq = 0;
1481 /* End access and free the pages */
1482 xennet_end_access(info->tx_ring_ref, info->tx.sring);
1483 xennet_end_access(info->rx_ring_ref, info->rx.sring);
1485 info->tx_ring_ref = GRANT_INVALID_REF;
1486 info->rx_ring_ref = GRANT_INVALID_REF;
1487 info->tx.sring = NULL;
1488 info->rx.sring = NULL;
1492 * We are reconnecting to the backend, due to a suspend/resume, or a backend
1493 * driver restart. We tear down our netif structure and recreate it, but
1494 * leave the device-layer structures intact so that this is transparent to the
1495 * rest of the kernel.
1497 static int netfront_resume(struct xenbus_device *dev)
1499 struct netfront_info *info = dev_get_drvdata(&dev->dev);
1501 dev_dbg(&dev->dev, "%s\n", dev->nodename);
1503 xennet_disconnect_backend(info);
1507 static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
1509 char *s, *e, *macstr;
1512 macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
1514 return PTR_ERR(macstr);
1516 for (i = 0; i < ETH_ALEN; i++) {
1517 mac[i] = simple_strtoul(s, &e, 16);
1518 if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
1529 static int setup_netfront_single(struct netfront_info *info)
1533 err = xenbus_alloc_evtchn(info->xbdev, &info->tx_evtchn);
1537 err = bind_evtchn_to_irqhandler(info->tx_evtchn,
1539 0, info->netdev->name, info);
1542 info->rx_evtchn = info->tx_evtchn;
1543 info->rx_irq = info->tx_irq = err;
1548 xenbus_free_evtchn(info->xbdev, info->tx_evtchn);
1549 info->tx_evtchn = 0;
1554 static int setup_netfront_split(struct netfront_info *info)
1558 err = xenbus_alloc_evtchn(info->xbdev, &info->tx_evtchn);
1561 err = xenbus_alloc_evtchn(info->xbdev, &info->rx_evtchn);
1563 goto alloc_rx_evtchn_fail;
1565 snprintf(info->tx_irq_name, sizeof(info->tx_irq_name),
1566 "%s-tx", info->netdev->name);
1567 err = bind_evtchn_to_irqhandler(info->tx_evtchn,
1568 xennet_tx_interrupt,
1569 0, info->tx_irq_name, info);
1574 snprintf(info->rx_irq_name, sizeof(info->rx_irq_name),
1575 "%s-rx", info->netdev->name);
1576 err = bind_evtchn_to_irqhandler(info->rx_evtchn,
1577 xennet_rx_interrupt,
1578 0, info->rx_irq_name, info);
1586 unbind_from_irqhandler(info->tx_irq, info);
1589 xenbus_free_evtchn(info->xbdev, info->rx_evtchn);
1590 info->rx_evtchn = 0;
1591 alloc_rx_evtchn_fail:
1592 xenbus_free_evtchn(info->xbdev, info->tx_evtchn);
1593 info->tx_evtchn = 0;
1598 static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info)
1600 struct xen_netif_tx_sring *txs;
1601 struct xen_netif_rx_sring *rxs;
1603 struct net_device *netdev = info->netdev;
1604 unsigned int feature_split_evtchn;
1606 info->tx_ring_ref = GRANT_INVALID_REF;
1607 info->rx_ring_ref = GRANT_INVALID_REF;
1608 info->rx.sring = NULL;
1609 info->tx.sring = NULL;
1612 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
1613 "feature-split-event-channels", "%u",
1614 &feature_split_evtchn);
1616 feature_split_evtchn = 0;
1618 err = xen_net_read_mac(dev, netdev->dev_addr);
1620 xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
1624 txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
1627 xenbus_dev_fatal(dev, err, "allocating tx ring page");
1630 SHARED_RING_INIT(txs);
1631 FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE);
1633 err = xenbus_grant_ring(dev, virt_to_mfn(txs));
1635 goto grant_tx_ring_fail;
1637 info->tx_ring_ref = err;
1638 rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
1641 xenbus_dev_fatal(dev, err, "allocating rx ring page");
1642 goto alloc_rx_ring_fail;
1644 SHARED_RING_INIT(rxs);
1645 FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE);
1647 err = xenbus_grant_ring(dev, virt_to_mfn(rxs));
1649 goto grant_rx_ring_fail;
1650 info->rx_ring_ref = err;
1652 if (feature_split_evtchn)
1653 err = setup_netfront_split(info);
1654 /* setup single event channel if
1655 * a) feature-split-event-channels == 0
1656 * b) feature-split-event-channels == 1 but failed to setup
1658 if (!feature_split_evtchn || (feature_split_evtchn && err))
1659 err = setup_netfront_single(info);
1662 goto alloc_evtchn_fail;
1666 /* If we fail to setup netfront, it is safe to just revoke access to
1667 * granted pages because backend is not accessing it at this point.
1670 gnttab_end_foreign_access_ref(info->rx_ring_ref, 0);
1672 free_page((unsigned long)rxs);
1674 gnttab_end_foreign_access_ref(info->tx_ring_ref, 0);
1676 free_page((unsigned long)txs);
1681 /* Common code used when first setting up, and when resuming. */
1682 static int talk_to_netback(struct xenbus_device *dev,
1683 struct netfront_info *info)
1685 const char *message;
1686 struct xenbus_transaction xbt;
1689 /* Create shared ring, alloc event channel. */
1690 err = setup_netfront(dev, info);
1695 err = xenbus_transaction_start(&xbt);
1697 xenbus_dev_fatal(dev, err, "starting transaction");
1701 err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref", "%u",
1704 message = "writing tx ring-ref";
1705 goto abort_transaction;
1707 err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref", "%u",
1710 message = "writing rx ring-ref";
1711 goto abort_transaction;
1714 if (info->tx_evtchn == info->rx_evtchn) {
1715 err = xenbus_printf(xbt, dev->nodename,
1716 "event-channel", "%u", info->tx_evtchn);
1718 message = "writing event-channel";
1719 goto abort_transaction;
1722 err = xenbus_printf(xbt, dev->nodename,
1723 "event-channel-tx", "%u", info->tx_evtchn);
1725 message = "writing event-channel-tx";
1726 goto abort_transaction;
1728 err = xenbus_printf(xbt, dev->nodename,
1729 "event-channel-rx", "%u", info->rx_evtchn);
1731 message = "writing event-channel-rx";
1732 goto abort_transaction;
1736 err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u",
1739 message = "writing request-rx-copy";
1740 goto abort_transaction;
1743 err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1);
1745 message = "writing feature-rx-notify";
1746 goto abort_transaction;
1749 err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1);
1751 message = "writing feature-sg";
1752 goto abort_transaction;
1755 err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1);
1757 message = "writing feature-gso-tcpv4";
1758 goto abort_transaction;
1761 err = xenbus_transaction_end(xbt, 0);
1765 xenbus_dev_fatal(dev, err, "completing transaction");
1772 xenbus_transaction_end(xbt, 1);
1773 xenbus_dev_fatal(dev, err, "%s", message);
1775 xennet_disconnect_backend(info);
1780 static int xennet_connect(struct net_device *dev)
1782 struct netfront_info *np = netdev_priv(dev);
1783 int i, requeue_idx, err;
1784 struct sk_buff *skb;
1786 struct xen_netif_rx_request *req;
1787 unsigned int feature_rx_copy;
1789 err = xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1790 "feature-rx-copy", "%u", &feature_rx_copy);
1792 feature_rx_copy = 0;
1794 if (!feature_rx_copy) {
1796 "backend does not support copying receive path\n");
1800 err = talk_to_netback(np->xbdev, np);
1805 netdev_update_features(dev);
1808 spin_lock_bh(&np->rx_lock);
1809 spin_lock_irq(&np->tx_lock);
1811 /* Step 1: Discard all pending TX packet fragments. */
1812 xennet_release_tx_bufs(np);
1814 /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
1815 for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
1817 const struct page *page;
1818 if (!np->rx_skbs[i])
1821 skb = np->rx_skbs[requeue_idx] = xennet_get_rx_skb(np, i);
1822 ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i);
1823 req = RING_GET_REQUEST(&np->rx, requeue_idx);
1825 frag = &skb_shinfo(skb)->frags[0];
1826 page = skb_frag_page(frag);
1827 gnttab_grant_foreign_access_ref(
1828 ref, np->xbdev->otherend_id,
1829 pfn_to_mfn(page_to_pfn(page)),
1832 req->id = requeue_idx;
1837 np->rx.req_prod_pvt = requeue_idx;
1840 * Step 3: All public and private state should now be sane. Get
1841 * ready to start sending and receiving packets and give the driver
1842 * domain a kick because we've probably just requeued some
1845 netif_carrier_on(np->netdev);
1846 notify_remote_via_irq(np->tx_irq);
1847 if (np->tx_irq != np->rx_irq)
1848 notify_remote_via_irq(np->rx_irq);
1849 xennet_tx_buf_gc(dev);
1850 xennet_alloc_rx_buffers(dev);
1852 spin_unlock_irq(&np->tx_lock);
1853 spin_unlock_bh(&np->rx_lock);
1859 * Callback received when the backend's state changes.
1861 static void netback_changed(struct xenbus_device *dev,
1862 enum xenbus_state backend_state)
1864 struct netfront_info *np = dev_get_drvdata(&dev->dev);
1865 struct net_device *netdev = np->netdev;
1867 dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state));
1869 switch (backend_state) {
1870 case XenbusStateInitialising:
1871 case XenbusStateInitialised:
1872 case XenbusStateReconfiguring:
1873 case XenbusStateReconfigured:
1874 case XenbusStateUnknown:
1875 case XenbusStateClosed:
1878 case XenbusStateInitWait:
1879 if (dev->state != XenbusStateInitialising)
1881 if (xennet_connect(netdev) != 0)
1883 xenbus_switch_state(dev, XenbusStateConnected);
1886 case XenbusStateConnected:
1887 netdev_notify_peers(netdev);
1890 case XenbusStateClosing:
1891 xenbus_frontend_closed(dev);
1896 static const struct xennet_stat {
1897 char name[ETH_GSTRING_LEN];
1899 } xennet_stats[] = {
1901 "rx_gso_checksum_fixup",
1902 offsetof(struct netfront_info, rx_gso_checksum_fixup)
1906 static int xennet_get_sset_count(struct net_device *dev, int string_set)
1908 switch (string_set) {
1910 return ARRAY_SIZE(xennet_stats);
1916 static void xennet_get_ethtool_stats(struct net_device *dev,
1917 struct ethtool_stats *stats, u64 * data)
1919 void *np = netdev_priv(dev);
1922 for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
1923 data[i] = *(unsigned long *)(np + xennet_stats[i].offset);
1926 static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data)
1930 switch (stringset) {
1932 for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
1933 memcpy(data + i * ETH_GSTRING_LEN,
1934 xennet_stats[i].name, ETH_GSTRING_LEN);
1939 static const struct ethtool_ops xennet_ethtool_ops =
1941 .get_link = ethtool_op_get_link,
1943 .get_sset_count = xennet_get_sset_count,
1944 .get_ethtool_stats = xennet_get_ethtool_stats,
1945 .get_strings = xennet_get_strings,
1949 static ssize_t show_rxbuf_min(struct device *dev,
1950 struct device_attribute *attr, char *buf)
1952 struct net_device *netdev = to_net_dev(dev);
1953 struct netfront_info *info = netdev_priv(netdev);
1955 return sprintf(buf, "%u\n", info->rx_min_target);
1958 static ssize_t store_rxbuf_min(struct device *dev,
1959 struct device_attribute *attr,
1960 const char *buf, size_t len)
1962 struct net_device *netdev = to_net_dev(dev);
1963 struct netfront_info *np = netdev_priv(netdev);
1965 unsigned long target;
1967 if (!capable(CAP_NET_ADMIN))
1970 target = simple_strtoul(buf, &endp, 0);
1974 if (target < RX_MIN_TARGET)
1975 target = RX_MIN_TARGET;
1976 if (target > RX_MAX_TARGET)
1977 target = RX_MAX_TARGET;
1979 spin_lock_bh(&np->rx_lock);
1980 if (target > np->rx_max_target)
1981 np->rx_max_target = target;
1982 np->rx_min_target = target;
1983 if (target > np->rx_target)
1984 np->rx_target = target;
1986 xennet_alloc_rx_buffers(netdev);
1988 spin_unlock_bh(&np->rx_lock);
1992 static ssize_t show_rxbuf_max(struct device *dev,
1993 struct device_attribute *attr, char *buf)
1995 struct net_device *netdev = to_net_dev(dev);
1996 struct netfront_info *info = netdev_priv(netdev);
1998 return sprintf(buf, "%u\n", info->rx_max_target);
2001 static ssize_t store_rxbuf_max(struct device *dev,
2002 struct device_attribute *attr,
2003 const char *buf, size_t len)
2005 struct net_device *netdev = to_net_dev(dev);
2006 struct netfront_info *np = netdev_priv(netdev);
2008 unsigned long target;
2010 if (!capable(CAP_NET_ADMIN))
2013 target = simple_strtoul(buf, &endp, 0);
2017 if (target < RX_MIN_TARGET)
2018 target = RX_MIN_TARGET;
2019 if (target > RX_MAX_TARGET)
2020 target = RX_MAX_TARGET;
2022 spin_lock_bh(&np->rx_lock);
2023 if (target < np->rx_min_target)
2024 np->rx_min_target = target;
2025 np->rx_max_target = target;
2026 if (target < np->rx_target)
2027 np->rx_target = target;
2029 xennet_alloc_rx_buffers(netdev);
2031 spin_unlock_bh(&np->rx_lock);
2035 static ssize_t show_rxbuf_cur(struct device *dev,
2036 struct device_attribute *attr, char *buf)
2038 struct net_device *netdev = to_net_dev(dev);
2039 struct netfront_info *info = netdev_priv(netdev);
2041 return sprintf(buf, "%u\n", info->rx_target);
2044 static struct device_attribute xennet_attrs[] = {
2045 __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min),
2046 __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max),
2047 __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL),
2050 static int xennet_sysfs_addif(struct net_device *netdev)
2055 for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) {
2056 err = device_create_file(&netdev->dev,
2065 device_remove_file(&netdev->dev, &xennet_attrs[i]);
2069 static void xennet_sysfs_delif(struct net_device *netdev)
2073 for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++)
2074 device_remove_file(&netdev->dev, &xennet_attrs[i]);
2077 #endif /* CONFIG_SYSFS */
2079 static const struct xenbus_device_id netfront_ids[] = {
2085 static int xennet_remove(struct xenbus_device *dev)
2087 struct netfront_info *info = dev_get_drvdata(&dev->dev);
2089 dev_dbg(&dev->dev, "%s\n", dev->nodename);
2091 xennet_disconnect_backend(info);
2093 xennet_sysfs_delif(info->netdev);
2095 unregister_netdev(info->netdev);
2097 del_timer_sync(&info->rx_refill_timer);
2099 free_percpu(info->stats);
2101 free_netdev(info->netdev);
2106 static DEFINE_XENBUS_DRIVER(netfront, ,
2107 .probe = netfront_probe,
2108 .remove = xennet_remove,
2109 .resume = netfront_resume,
2110 .otherend_changed = netback_changed,
2113 static int __init netif_init(void)
2118 if (xen_hvm_domain() && !xen_platform_pci_unplug)
2121 pr_info("Initialising Xen virtual ethernet driver\n");
2123 return xenbus_register_frontend(&netfront_driver);
2125 module_init(netif_init);
2128 static void __exit netif_exit(void)
2130 xenbus_unregister_driver(&netfront_driver);
2132 module_exit(netif_exit);
2134 MODULE_DESCRIPTION("Xen virtual network device frontend");
2135 MODULE_LICENSE("GPL");
2136 MODULE_ALIAS("xen:vif");
2137 MODULE_ALIAS("xennet");