1 /*******************************************************************************
3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2014 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *******************************************************************************/
29 /******************************************************************************
30 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
31 ******************************************************************************/
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
35 #include <linux/types.h>
36 #include <linux/bitops.h>
37 #include <linux/module.h>
38 #include <linux/pci.h>
39 #include <linux/netdevice.h>
40 #include <linux/vmalloc.h>
41 #include <linux/string.h>
44 #include <linux/tcp.h>
45 #include <linux/sctp.h>
46 #include <linux/ipv6.h>
47 #include <linux/slab.h>
48 #include <net/checksum.h>
49 #include <net/ip6_checksum.h>
50 #include <linux/ethtool.h>
52 #include <linux/if_vlan.h>
53 #include <linux/prefetch.h>
57 const char ixgbevf_driver_name[] = "ixgbevf";
58 static const char ixgbevf_driver_string[] =
59 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
61 #define DRV_VERSION "2.12.1-k"
62 const char ixgbevf_driver_version[] = DRV_VERSION;
63 static char ixgbevf_copyright[] =
64 "Copyright (c) 2009 - 2012 Intel Corporation.";
66 static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
67 [board_82599_vf] = &ixgbevf_82599_vf_info,
68 [board_X540_vf] = &ixgbevf_X540_vf_info,
71 /* ixgbevf_pci_tbl - PCI Device ID Table
73 * Wildcard entries (PCI_ANY_ID) should come last
74 * Last entry must be all 0s
76 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
77 * Class, Class Mask, private data (not used) }
79 static const struct pci_device_id ixgbevf_pci_tbl[] = {
80 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf },
81 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf },
82 /* required last entry */
85 MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
87 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
88 MODULE_DESCRIPTION("Intel(R) 10 Gigabit Virtual Function Network Driver");
89 MODULE_LICENSE("GPL");
90 MODULE_VERSION(DRV_VERSION);
92 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
93 static int debug = -1;
94 module_param(debug, int, 0);
95 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
98 static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter);
99 static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
100 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
102 static void ixgbevf_remove_adapter(struct ixgbe_hw *hw)
104 struct ixgbevf_adapter *adapter = hw->back;
109 dev_err(&adapter->pdev->dev, "Adapter removed\n");
110 if (test_bit(__IXGBEVF_WORK_INIT, &adapter->state))
111 schedule_work(&adapter->watchdog_task);
114 static void ixgbevf_check_remove(struct ixgbe_hw *hw, u32 reg)
118 /* The following check not only optimizes a bit by not
119 * performing a read on the status register when the
120 * register just read was a status register read that
121 * returned IXGBE_FAILED_READ_REG. It also blocks any
122 * potential recursion.
124 if (reg == IXGBE_VFSTATUS) {
125 ixgbevf_remove_adapter(hw);
128 value = ixgbevf_read_reg(hw, IXGBE_VFSTATUS);
129 if (value == IXGBE_FAILED_READ_REG)
130 ixgbevf_remove_adapter(hw);
133 u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg)
135 u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
138 if (IXGBE_REMOVED(reg_addr))
139 return IXGBE_FAILED_READ_REG;
140 value = readl(reg_addr + reg);
141 if (unlikely(value == IXGBE_FAILED_READ_REG))
142 ixgbevf_check_remove(hw, reg);
147 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
148 * @adapter: pointer to adapter struct
149 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
150 * @queue: queue to map the corresponding interrupt to
151 * @msix_vector: the vector to map to the corresponding queue
153 static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
154 u8 queue, u8 msix_vector)
157 struct ixgbe_hw *hw = &adapter->hw;
158 if (direction == -1) {
160 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
161 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
164 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
166 /* tx or rx causes */
167 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
168 index = ((16 * (queue & 1)) + (8 * direction));
169 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
170 ivar &= ~(0xFF << index);
171 ivar |= (msix_vector << index);
172 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar);
176 static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring,
177 struct ixgbevf_tx_buffer *tx_buffer)
179 if (tx_buffer->skb) {
180 dev_kfree_skb_any(tx_buffer->skb);
181 if (dma_unmap_len(tx_buffer, len))
182 dma_unmap_single(tx_ring->dev,
183 dma_unmap_addr(tx_buffer, dma),
184 dma_unmap_len(tx_buffer, len),
186 } else if (dma_unmap_len(tx_buffer, len)) {
187 dma_unmap_page(tx_ring->dev,
188 dma_unmap_addr(tx_buffer, dma),
189 dma_unmap_len(tx_buffer, len),
192 tx_buffer->next_to_watch = NULL;
193 tx_buffer->skb = NULL;
194 dma_unmap_len_set(tx_buffer, len, 0);
195 /* tx_buffer must be completely set up in the transmit path */
198 #define IXGBE_MAX_TXD_PWR 14
199 #define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
201 /* Tx Descriptors needed, worst case */
202 #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
203 #define DESC_NEEDED (MAX_SKB_FRAGS + 4)
205 static void ixgbevf_tx_timeout(struct net_device *netdev);
208 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
209 * @q_vector: board private structure
210 * @tx_ring: tx ring to clean
212 static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
213 struct ixgbevf_ring *tx_ring)
215 struct ixgbevf_adapter *adapter = q_vector->adapter;
216 struct ixgbevf_tx_buffer *tx_buffer;
217 union ixgbe_adv_tx_desc *tx_desc;
218 unsigned int total_bytes = 0, total_packets = 0;
219 unsigned int budget = tx_ring->count / 2;
220 unsigned int i = tx_ring->next_to_clean;
222 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
225 tx_buffer = &tx_ring->tx_buffer_info[i];
226 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
230 union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
232 /* if next_to_watch is not set then there is no work pending */
236 /* prevent any other reads prior to eop_desc */
237 read_barrier_depends();
239 /* if DD is not set pending work has not been completed */
240 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
243 /* clear next_to_watch to prevent false hangs */
244 tx_buffer->next_to_watch = NULL;
246 /* update the statistics for this packet */
247 total_bytes += tx_buffer->bytecount;
248 total_packets += tx_buffer->gso_segs;
251 dev_kfree_skb_any(tx_buffer->skb);
253 /* unmap skb header data */
254 dma_unmap_single(tx_ring->dev,
255 dma_unmap_addr(tx_buffer, dma),
256 dma_unmap_len(tx_buffer, len),
259 /* clear tx_buffer data */
260 tx_buffer->skb = NULL;
261 dma_unmap_len_set(tx_buffer, len, 0);
263 /* unmap remaining buffers */
264 while (tx_desc != eop_desc) {
270 tx_buffer = tx_ring->tx_buffer_info;
271 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
274 /* unmap any remaining paged data */
275 if (dma_unmap_len(tx_buffer, len)) {
276 dma_unmap_page(tx_ring->dev,
277 dma_unmap_addr(tx_buffer, dma),
278 dma_unmap_len(tx_buffer, len),
280 dma_unmap_len_set(tx_buffer, len, 0);
284 /* move us one more past the eop_desc for start of next pkt */
290 tx_buffer = tx_ring->tx_buffer_info;
291 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
294 /* issue prefetch for next Tx descriptor */
297 /* update budget accounting */
299 } while (likely(budget));
302 tx_ring->next_to_clean = i;
303 u64_stats_update_begin(&tx_ring->syncp);
304 tx_ring->stats.bytes += total_bytes;
305 tx_ring->stats.packets += total_packets;
306 u64_stats_update_end(&tx_ring->syncp);
307 q_vector->tx.total_bytes += total_bytes;
308 q_vector->tx.total_packets += total_packets;
310 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
311 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
312 (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
313 /* Make sure that anybody stopping the queue after this
314 * sees the new next_to_clean.
318 if (__netif_subqueue_stopped(tx_ring->netdev,
319 tx_ring->queue_index) &&
320 !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
321 netif_wake_subqueue(tx_ring->netdev,
322 tx_ring->queue_index);
323 ++tx_ring->tx_stats.restart_queue;
331 * ixgbevf_rx_skb - Helper function to determine proper Rx method
332 * @q_vector: structure containing interrupt and ring information
333 * @skb: packet to send up
335 static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector,
338 #ifdef CONFIG_NET_RX_BUSY_POLL
339 skb_mark_napi_id(skb, &q_vector->napi);
341 if (ixgbevf_qv_busy_polling(q_vector)) {
342 netif_receive_skb(skb);
343 /* exit early if we busy polled */
346 #endif /* CONFIG_NET_RX_BUSY_POLL */
347 if (!(q_vector->adapter->flags & IXGBE_FLAG_IN_NETPOLL))
348 napi_gro_receive(&q_vector->napi, skb);
353 /* ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
354 * @ring: structure containig ring specific data
355 * @rx_desc: current Rx descriptor being processed
356 * @skb: skb currently being received and modified
358 static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
359 union ixgbe_adv_rx_desc *rx_desc,
362 skb_checksum_none_assert(skb);
364 /* Rx csum disabled */
365 if (!(ring->netdev->features & NETIF_F_RXCSUM))
368 /* if IP and error */
369 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) &&
370 ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) {
371 ring->rx_stats.csum_err++;
375 if (!ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_L4CS))
378 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) {
379 ring->rx_stats.csum_err++;
383 /* It must be a TCP or UDP packet with a valid checksum */
384 skb->ip_summed = CHECKSUM_UNNECESSARY;
387 /* ixgbevf_process_skb_fields - Populate skb header fields from Rx descriptor
388 * @rx_ring: rx descriptor ring packet is being transacted on
389 * @rx_desc: pointer to the EOP Rx descriptor
390 * @skb: pointer to current skb being populated
392 * This function checks the ring, descriptor, and packet information in
393 * order to populate the checksum, VLAN, protocol, and other fields within
396 static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring,
397 union ixgbe_adv_rx_desc *rx_desc,
400 ixgbevf_rx_checksum(rx_ring, rx_desc, skb);
402 if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
403 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
404 unsigned long *active_vlans = netdev_priv(rx_ring->netdev);
406 if (test_bit(vid & VLAN_VID_MASK, active_vlans))
407 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
410 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
413 static bool ixgbevf_alloc_mapped_skb(struct ixgbevf_ring *rx_ring,
414 struct ixgbevf_rx_buffer *bi)
416 struct sk_buff *skb = bi->skb;
417 dma_addr_t dma = bi->dma;
422 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
423 rx_ring->rx_buf_len);
424 if (unlikely(!skb)) {
425 rx_ring->rx_stats.alloc_rx_buff_failed++;
429 dma = dma_map_single(rx_ring->dev, skb->data,
430 rx_ring->rx_buf_len, DMA_FROM_DEVICE);
432 /* if mapping failed free memory back to system since
433 * there isn't much point in holding memory we can't use
435 if (dma_mapping_error(rx_ring->dev, dma)) {
436 dev_kfree_skb_any(skb);
438 rx_ring->rx_stats.alloc_rx_buff_failed++;
449 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
450 * @rx_ring: rx descriptor ring (for a specific queue) to setup buffers on
451 * @cleaned_count: number of buffers to replace
453 static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
456 union ixgbe_adv_rx_desc *rx_desc;
457 struct ixgbevf_rx_buffer *bi;
458 unsigned int i = rx_ring->next_to_use;
460 /* nothing to do or no valid netdev defined */
461 if (!cleaned_count || !rx_ring->netdev)
464 rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
465 bi = &rx_ring->rx_buffer_info[i];
469 if (!ixgbevf_alloc_mapped_skb(rx_ring, bi))
472 /* Refresh the desc even if pkt_addr didn't change
473 * because each write-back erases this info.
475 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
481 rx_desc = IXGBEVF_RX_DESC(rx_ring, 0);
482 bi = rx_ring->rx_buffer_info;
486 /* clear the hdr_addr for the next_to_use descriptor */
487 rx_desc->read.hdr_addr = 0;
490 } while (cleaned_count);
494 if (rx_ring->next_to_use != i) {
495 /* record the next descriptor to use */
496 rx_ring->next_to_use = i;
498 /* Force memory writes to complete before letting h/w
499 * know there are new descriptors to fetch. (Only
500 * applicable for weak-ordered memory model archs,
504 ixgbevf_write_tail(rx_ring, i);
508 static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
511 struct ixgbe_hw *hw = &adapter->hw;
513 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask);
516 static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
517 struct ixgbevf_ring *rx_ring,
520 union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
521 struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer;
524 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
525 u16 cleaned_count = ixgbevf_desc_unused(rx_ring);
527 i = rx_ring->next_to_clean;
528 rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
529 rx_buffer_info = &rx_ring->rx_buffer_info[i];
531 while (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_DD)) {
536 rmb(); /* read descriptor and rx_buffer_info after status DD */
538 skb = rx_buffer_info->skb;
539 prefetch(skb->data - NET_IP_ALIGN);
540 rx_buffer_info->skb = NULL;
542 dma_unmap_single(rx_ring->dev, rx_buffer_info->dma,
545 rx_buffer_info->dma = 0;
546 skb_put(skb, le16_to_cpu(rx_desc->wb.upper.length));
549 if (i == rx_ring->count)
552 next_rxd = IXGBEVF_RX_DESC(rx_ring, i);
556 next_buffer = &rx_ring->rx_buffer_info[i];
558 if (!(ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))) {
559 skb->next = next_buffer->skb;
560 IXGBE_CB(skb->next)->prev = skb;
561 rx_ring->rx_stats.non_eop_descs++;
565 /* we should not be chaining buffers, if we did drop the skb */
566 if (IXGBE_CB(skb)->prev) {
568 struct sk_buff *this = skb;
569 skb = IXGBE_CB(skb)->prev;
575 /* ERR_MASK will only have valid bits if EOP set */
576 if (unlikely(ixgbevf_test_staterr(rx_desc,
577 IXGBE_RXDADV_ERR_FRAME_ERR_MASK))) {
578 dev_kfree_skb_irq(skb);
582 /* probably a little skewed due to removing CRC */
583 total_rx_bytes += skb->len;
586 /* Workaround hardware that can't do proper VEPA multicast
589 if ((skb->pkt_type == PACKET_BROADCAST ||
590 skb->pkt_type == PACKET_MULTICAST) &&
591 ether_addr_equal(rx_ring->netdev->dev_addr,
592 eth_hdr(skb)->h_source)) {
593 dev_kfree_skb_irq(skb);
597 /* populate checksum, VLAN, and protocol */
598 ixgbevf_process_skb_fields(rx_ring, rx_desc, skb);
600 ixgbevf_rx_skb(q_vector, skb);
603 /* return some buffers to hardware, one at a time is too slow */
604 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
605 ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count);
609 /* use prefetched values */
611 rx_buffer_info = &rx_ring->rx_buffer_info[i];
614 rx_ring->next_to_clean = i;
615 u64_stats_update_begin(&rx_ring->syncp);
616 rx_ring->stats.packets += total_rx_packets;
617 rx_ring->stats.bytes += total_rx_bytes;
618 u64_stats_update_end(&rx_ring->syncp);
619 q_vector->rx.total_packets += total_rx_packets;
620 q_vector->rx.total_bytes += total_rx_bytes;
623 ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count);
625 return total_rx_packets;
629 * ixgbevf_poll - NAPI polling calback
630 * @napi: napi struct with our devices info in it
631 * @budget: amount of work driver is allowed to do this pass, in packets
633 * This function will clean more than one or more rings associated with a
636 static int ixgbevf_poll(struct napi_struct *napi, int budget)
638 struct ixgbevf_q_vector *q_vector =
639 container_of(napi, struct ixgbevf_q_vector, napi);
640 struct ixgbevf_adapter *adapter = q_vector->adapter;
641 struct ixgbevf_ring *ring;
643 bool clean_complete = true;
645 ixgbevf_for_each_ring(ring, q_vector->tx)
646 clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring);
648 #ifdef CONFIG_NET_RX_BUSY_POLL
649 if (!ixgbevf_qv_lock_napi(q_vector))
653 /* attempt to distribute budget to each queue fairly, but don't allow
654 * the budget to go below 1 because we'll exit polling */
655 if (q_vector->rx.count > 1)
656 per_ring_budget = max(budget/q_vector->rx.count, 1);
658 per_ring_budget = budget;
660 adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
661 ixgbevf_for_each_ring(ring, q_vector->rx)
662 clean_complete &= (ixgbevf_clean_rx_irq(q_vector, ring,
665 adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
667 #ifdef CONFIG_NET_RX_BUSY_POLL
668 ixgbevf_qv_unlock_napi(q_vector);
671 /* If all work not completed, return budget and keep polling */
674 /* all work done, exit the polling mode */
676 if (adapter->rx_itr_setting & 1)
677 ixgbevf_set_itr(q_vector);
678 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
679 !test_bit(__IXGBEVF_REMOVING, &adapter->state))
680 ixgbevf_irq_enable_queues(adapter,
681 1 << q_vector->v_idx);
687 * ixgbevf_write_eitr - write VTEITR register in hardware specific way
688 * @q_vector: structure containing interrupt and ring information
690 void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
692 struct ixgbevf_adapter *adapter = q_vector->adapter;
693 struct ixgbe_hw *hw = &adapter->hw;
694 int v_idx = q_vector->v_idx;
695 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
698 * set the WDIS bit to not clear the timer bits and cause an
699 * immediate assertion of the interrupt
701 itr_reg |= IXGBE_EITR_CNT_WDIS;
703 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
706 #ifdef CONFIG_NET_RX_BUSY_POLL
707 /* must be called with local_bh_disable()d */
708 static int ixgbevf_busy_poll_recv(struct napi_struct *napi)
710 struct ixgbevf_q_vector *q_vector =
711 container_of(napi, struct ixgbevf_q_vector, napi);
712 struct ixgbevf_adapter *adapter = q_vector->adapter;
713 struct ixgbevf_ring *ring;
716 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
717 return LL_FLUSH_FAILED;
719 if (!ixgbevf_qv_lock_poll(q_vector))
720 return LL_FLUSH_BUSY;
722 ixgbevf_for_each_ring(ring, q_vector->rx) {
723 found = ixgbevf_clean_rx_irq(q_vector, ring, 4);
724 #ifdef BP_EXTENDED_STATS
726 ring->stats.cleaned += found;
728 ring->stats.misses++;
734 ixgbevf_qv_unlock_poll(q_vector);
738 #endif /* CONFIG_NET_RX_BUSY_POLL */
741 * ixgbevf_configure_msix - Configure MSI-X hardware
742 * @adapter: board private structure
744 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
747 static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
749 struct ixgbevf_q_vector *q_vector;
750 int q_vectors, v_idx;
752 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
753 adapter->eims_enable_mask = 0;
756 * Populate the IVAR table and set the ITR values to the
757 * corresponding register.
759 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
760 struct ixgbevf_ring *ring;
761 q_vector = adapter->q_vector[v_idx];
763 ixgbevf_for_each_ring(ring, q_vector->rx)
764 ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx);
766 ixgbevf_for_each_ring(ring, q_vector->tx)
767 ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx);
769 if (q_vector->tx.ring && !q_vector->rx.ring) {
771 if (adapter->tx_itr_setting == 1)
772 q_vector->itr = IXGBE_10K_ITR;
774 q_vector->itr = adapter->tx_itr_setting;
776 /* rx or rx/tx vector */
777 if (adapter->rx_itr_setting == 1)
778 q_vector->itr = IXGBE_20K_ITR;
780 q_vector->itr = adapter->rx_itr_setting;
783 /* add q_vector eims value to global eims_enable_mask */
784 adapter->eims_enable_mask |= 1 << v_idx;
786 ixgbevf_write_eitr(q_vector);
789 ixgbevf_set_ivar(adapter, -1, 1, v_idx);
790 /* setup eims_other and add value to global eims_enable_mask */
791 adapter->eims_other = 1 << v_idx;
792 adapter->eims_enable_mask |= adapter->eims_other;
799 latency_invalid = 255
803 * ixgbevf_update_itr - update the dynamic ITR value based on statistics
804 * @q_vector: structure containing interrupt and ring information
805 * @ring_container: structure containing ring performance data
807 * Stores a new ITR value based on packets and byte
808 * counts during the last interrupt. The advantage of per interrupt
809 * computation is faster updates and more accurate ITR for the current
810 * traffic pattern. Constants in this function were computed
811 * based on theoretical maximum wire speed and thresholds were set based
812 * on testing data as well as attempting to minimize response time
813 * while increasing bulk throughput.
815 static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector,
816 struct ixgbevf_ring_container *ring_container)
818 int bytes = ring_container->total_bytes;
819 int packets = ring_container->total_packets;
822 u8 itr_setting = ring_container->itr;
827 /* simple throttlerate management
828 * 0-20MB/s lowest (100000 ints/s)
829 * 20-100MB/s low (20000 ints/s)
830 * 100-1249MB/s bulk (8000 ints/s)
832 /* what was last interrupt timeslice? */
833 timepassed_us = q_vector->itr >> 2;
834 bytes_perint = bytes / timepassed_us; /* bytes/usec */
836 switch (itr_setting) {
838 if (bytes_perint > 10)
839 itr_setting = low_latency;
842 if (bytes_perint > 20)
843 itr_setting = bulk_latency;
844 else if (bytes_perint <= 10)
845 itr_setting = lowest_latency;
848 if (bytes_perint <= 20)
849 itr_setting = low_latency;
853 /* clear work counters since we have the values we need */
854 ring_container->total_bytes = 0;
855 ring_container->total_packets = 0;
857 /* write updated itr to ring container */
858 ring_container->itr = itr_setting;
861 static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
863 u32 new_itr = q_vector->itr;
866 ixgbevf_update_itr(q_vector, &q_vector->tx);
867 ixgbevf_update_itr(q_vector, &q_vector->rx);
869 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
871 switch (current_itr) {
872 /* counts and packets in update_itr are dependent on these numbers */
874 new_itr = IXGBE_100K_ITR;
877 new_itr = IXGBE_20K_ITR;
881 new_itr = IXGBE_8K_ITR;
885 if (new_itr != q_vector->itr) {
886 /* do an exponential smoothing */
887 new_itr = (10 * new_itr * q_vector->itr) /
888 ((9 * new_itr) + q_vector->itr);
890 /* save the algorithm value here */
891 q_vector->itr = new_itr;
893 ixgbevf_write_eitr(q_vector);
897 static irqreturn_t ixgbevf_msix_other(int irq, void *data)
899 struct ixgbevf_adapter *adapter = data;
900 struct ixgbe_hw *hw = &adapter->hw;
902 hw->mac.get_link_status = 1;
904 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
905 !test_bit(__IXGBEVF_REMOVING, &adapter->state))
906 mod_timer(&adapter->watchdog_timer, jiffies);
908 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
914 * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues)
916 * @data: pointer to our q_vector struct for this interrupt vector
918 static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data)
920 struct ixgbevf_q_vector *q_vector = data;
922 /* EIAM disabled interrupts (on this vector) for us */
923 if (q_vector->rx.ring || q_vector->tx.ring)
924 napi_schedule(&q_vector->napi);
929 static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
932 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
934 a->rx_ring[r_idx]->next = q_vector->rx.ring;
935 q_vector->rx.ring = a->rx_ring[r_idx];
936 q_vector->rx.count++;
939 static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
942 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
944 a->tx_ring[t_idx]->next = q_vector->tx.ring;
945 q_vector->tx.ring = a->tx_ring[t_idx];
946 q_vector->tx.count++;
950 * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors
951 * @adapter: board private structure to initialize
953 * This function maps descriptor rings to the queue-specific vectors
954 * we were allotted through the MSI-X enabling code. Ideally, we'd have
955 * one vector per ring/queue, but on a constrained vector budget, we
956 * group the rings as "efficiently" as possible. You would add new
957 * mapping configurations in here.
959 static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter)
963 int rxr_idx = 0, txr_idx = 0;
964 int rxr_remaining = adapter->num_rx_queues;
965 int txr_remaining = adapter->num_tx_queues;
970 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
973 * The ideal configuration...
974 * We have enough vectors to map one per queue.
976 if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
977 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
978 map_vector_to_rxq(adapter, v_start, rxr_idx);
980 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
981 map_vector_to_txq(adapter, v_start, txr_idx);
986 * If we don't have enough vectors for a 1-to-1
987 * mapping, we'll have to group them so there are
988 * multiple queues per vector.
990 /* Re-adjusting *qpv takes care of the remainder. */
991 for (i = v_start; i < q_vectors; i++) {
992 rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
993 for (j = 0; j < rqpv; j++) {
994 map_vector_to_rxq(adapter, i, rxr_idx);
999 for (i = v_start; i < q_vectors; i++) {
1000 tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
1001 for (j = 0; j < tqpv; j++) {
1002 map_vector_to_txq(adapter, i, txr_idx);
1013 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
1014 * @adapter: board private structure
1016 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
1017 * interrupts from the kernel.
1019 static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
1021 struct net_device *netdev = adapter->netdev;
1022 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1026 for (vector = 0; vector < q_vectors; vector++) {
1027 struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector];
1028 struct msix_entry *entry = &adapter->msix_entries[vector];
1030 if (q_vector->tx.ring && q_vector->rx.ring) {
1031 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1032 "%s-%s-%d", netdev->name, "TxRx", ri++);
1034 } else if (q_vector->rx.ring) {
1035 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1036 "%s-%s-%d", netdev->name, "rx", ri++);
1037 } else if (q_vector->tx.ring) {
1038 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1039 "%s-%s-%d", netdev->name, "tx", ti++);
1041 /* skip this unused q_vector */
1044 err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0,
1045 q_vector->name, q_vector);
1047 hw_dbg(&adapter->hw,
1048 "request_irq failed for MSIX interrupt "
1049 "Error: %d\n", err);
1050 goto free_queue_irqs;
1054 err = request_irq(adapter->msix_entries[vector].vector,
1055 &ixgbevf_msix_other, 0, netdev->name, adapter);
1057 hw_dbg(&adapter->hw,
1058 "request_irq for msix_other failed: %d\n", err);
1059 goto free_queue_irqs;
1067 free_irq(adapter->msix_entries[vector].vector,
1068 adapter->q_vector[vector]);
1070 /* This failure is non-recoverable - it indicates the system is
1071 * out of MSIX vector resources and the VF driver cannot run
1072 * without them. Set the number of msix vectors to zero
1073 * indicating that not enough can be allocated. The error
1074 * will be returned to the user indicating device open failed.
1075 * Any further attempts to force the driver to open will also
1076 * fail. The only way to recover is to unload the driver and
1077 * reload it again. If the system has recovered some MSIX
1078 * vectors then it may succeed.
1080 adapter->num_msix_vectors = 0;
1084 static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)
1086 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1088 for (i = 0; i < q_vectors; i++) {
1089 struct ixgbevf_q_vector *q_vector = adapter->q_vector[i];
1090 q_vector->rx.ring = NULL;
1091 q_vector->tx.ring = NULL;
1092 q_vector->rx.count = 0;
1093 q_vector->tx.count = 0;
1098 * ixgbevf_request_irq - initialize interrupts
1099 * @adapter: board private structure
1101 * Attempts to configure interrupts using the best available
1102 * capabilities of the hardware and kernel.
1104 static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
1108 err = ixgbevf_request_msix_irqs(adapter);
1111 hw_dbg(&adapter->hw,
1112 "request_irq failed, Error %d\n", err);
1117 static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
1121 q_vectors = adapter->num_msix_vectors;
1124 free_irq(adapter->msix_entries[i].vector, adapter);
1127 for (; i >= 0; i--) {
1128 /* free only the irqs that were actually requested */
1129 if (!adapter->q_vector[i]->rx.ring &&
1130 !adapter->q_vector[i]->tx.ring)
1133 free_irq(adapter->msix_entries[i].vector,
1134 adapter->q_vector[i]);
1137 ixgbevf_reset_q_vectors(adapter);
1141 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
1142 * @adapter: board private structure
1144 static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
1146 struct ixgbe_hw *hw = &adapter->hw;
1149 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0);
1150 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
1151 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0);
1153 IXGBE_WRITE_FLUSH(hw);
1155 for (i = 0; i < adapter->num_msix_vectors; i++)
1156 synchronize_irq(adapter->msix_entries[i].vector);
1160 * ixgbevf_irq_enable - Enable default interrupt generation settings
1161 * @adapter: board private structure
1163 static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
1165 struct ixgbe_hw *hw = &adapter->hw;
1167 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask);
1168 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask);
1169 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask);
1173 * ixgbevf_configure_tx_ring - Configure 82599 VF Tx ring after Reset
1174 * @adapter: board private structure
1175 * @ring: structure containing ring specific data
1177 * Configure the Tx descriptor ring after a reset.
1179 static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter,
1180 struct ixgbevf_ring *ring)
1182 struct ixgbe_hw *hw = &adapter->hw;
1183 u64 tdba = ring->dma;
1185 u32 txdctl = IXGBE_TXDCTL_ENABLE;
1186 u8 reg_idx = ring->reg_idx;
1188 /* disable queue to avoid issues while updating state */
1189 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
1190 IXGBE_WRITE_FLUSH(hw);
1192 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(reg_idx), tdba & DMA_BIT_MASK(32));
1193 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(reg_idx), tdba >> 32);
1194 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(reg_idx),
1195 ring->count * sizeof(union ixgbe_adv_tx_desc));
1197 /* disable head writeback */
1198 IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAH(reg_idx), 0);
1199 IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAL(reg_idx), 0);
1201 /* enable relaxed ordering */
1202 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(reg_idx),
1203 (IXGBE_DCA_TXCTRL_DESC_RRO_EN |
1204 IXGBE_DCA_TXCTRL_DATA_RRO_EN));
1206 /* reset head and tail pointers */
1207 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(reg_idx), 0);
1208 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(reg_idx), 0);
1209 ring->tail = adapter->io_addr + IXGBE_VFTDT(reg_idx);
1211 /* reset ntu and ntc to place SW in sync with hardwdare */
1212 ring->next_to_clean = 0;
1213 ring->next_to_use = 0;
1215 /* In order to avoid issues WTHRESH + PTHRESH should always be equal
1216 * to or less than the number of on chip descriptors, which is
1219 txdctl |= (8 << 16); /* WTHRESH = 8 */
1221 /* Setting PTHRESH to 32 both improves performance */
1222 txdctl |= (1 << 8) | /* HTHRESH = 1 */
1223 32; /* PTHRESH = 32 */
1225 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl);
1227 /* poll to verify queue is enabled */
1229 usleep_range(1000, 2000);
1230 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(reg_idx));
1231 } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
1233 pr_err("Could not enable Tx Queue %d\n", reg_idx);
1237 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
1238 * @adapter: board private structure
1240 * Configure the Tx unit of the MAC after a reset.
1242 static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
1246 /* Setup the HW Tx Head and Tail descriptor pointers */
1247 for (i = 0; i < adapter->num_tx_queues; i++)
1248 ixgbevf_configure_tx_ring(adapter, adapter->tx_ring[i]);
1251 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1253 static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
1255 struct ixgbevf_ring *rx_ring;
1256 struct ixgbe_hw *hw = &adapter->hw;
1259 rx_ring = adapter->rx_ring[index];
1261 srrctl = IXGBE_SRRCTL_DROP_EN;
1263 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1265 srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >>
1266 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1268 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
1271 static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter)
1273 struct ixgbe_hw *hw = &adapter->hw;
1275 /* PSRTYPE must be initialized in 82599 */
1276 u32 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
1277 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR |
1278 IXGBE_PSRTYPE_L2HDR;
1280 if (adapter->num_rx_queues > 1)
1283 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1286 static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter)
1288 struct ixgbe_hw *hw = &adapter->hw;
1289 struct net_device *netdev = adapter->netdev;
1290 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1294 /* notify the PF of our intent to use this size of frame */
1295 ixgbevf_rlpml_set_vf(hw, max_frame);
1297 /* PF will allow an extra 4 bytes past for vlan tagged frames */
1298 max_frame += VLAN_HLEN;
1301 * Allocate buffer sizes that fit well into 32K and
1302 * take into account max frame size of 9.5K
1304 if ((hw->mac.type == ixgbe_mac_X540_vf) &&
1305 (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE))
1306 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1307 else if (max_frame <= IXGBEVF_RXBUFFER_2K)
1308 rx_buf_len = IXGBEVF_RXBUFFER_2K;
1309 else if (max_frame <= IXGBEVF_RXBUFFER_4K)
1310 rx_buf_len = IXGBEVF_RXBUFFER_4K;
1311 else if (max_frame <= IXGBEVF_RXBUFFER_8K)
1312 rx_buf_len = IXGBEVF_RXBUFFER_8K;
1314 rx_buf_len = IXGBEVF_RXBUFFER_10K;
1316 for (i = 0; i < adapter->num_rx_queues; i++)
1317 adapter->rx_ring[i]->rx_buf_len = rx_buf_len;
1320 #define IXGBEVF_MAX_RX_DESC_POLL 10
1321 static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
1322 struct ixgbevf_ring *ring)
1324 struct ixgbe_hw *hw = &adapter->hw;
1325 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1327 u8 reg_idx = ring->reg_idx;
1329 if (IXGBE_REMOVED(hw->hw_addr))
1331 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1332 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1334 /* write value back with RXDCTL.ENABLE bit cleared */
1335 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1337 /* the hardware may take up to 100us to really disable the rx queue */
1340 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1341 } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
1344 pr_err("RXDCTL.ENABLE queue %d not cleared while polling\n",
1348 static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
1349 struct ixgbevf_ring *ring)
1351 struct ixgbe_hw *hw = &adapter->hw;
1352 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1354 u8 reg_idx = ring->reg_idx;
1356 if (IXGBE_REMOVED(hw->hw_addr))
1359 usleep_range(1000, 2000);
1360 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1361 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
1364 pr_err("RXDCTL.ENABLE queue %d not set while polling\n",
1368 static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
1369 struct ixgbevf_ring *ring)
1371 struct ixgbe_hw *hw = &adapter->hw;
1372 u64 rdba = ring->dma;
1374 u8 reg_idx = ring->reg_idx;
1376 /* disable queue to avoid issues while updating state */
1377 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1378 ixgbevf_disable_rx_queue(adapter, ring);
1380 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(reg_idx), rdba & DMA_BIT_MASK(32));
1381 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(reg_idx), rdba >> 32);
1382 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(reg_idx),
1383 ring->count * sizeof(union ixgbe_adv_rx_desc));
1385 /* enable relaxed ordering */
1386 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx),
1387 IXGBE_DCA_RXCTRL_DESC_RRO_EN);
1389 /* reset head and tail pointers */
1390 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(reg_idx), 0);
1391 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(reg_idx), 0);
1392 ring->tail = adapter->io_addr + IXGBE_VFRDT(reg_idx);
1394 /* reset ntu and ntc to place SW in sync with hardwdare */
1395 ring->next_to_clean = 0;
1396 ring->next_to_use = 0;
1398 ixgbevf_configure_srrctl(adapter, reg_idx);
1400 /* prevent DMA from exceeding buffer space available */
1401 rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
1402 rxdctl |= ring->rx_buf_len | IXGBE_RXDCTL_RLPML_EN;
1403 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1404 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1406 ixgbevf_rx_desc_queue_enable(adapter, ring);
1407 ixgbevf_alloc_rx_buffers(ring, ixgbevf_desc_unused(ring));
1411 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
1412 * @adapter: board private structure
1414 * Configure the Rx unit of the MAC after a reset.
1416 static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
1420 ixgbevf_setup_psrtype(adapter);
1422 /* set_rx_buffer_len must be called before ring initialization */
1423 ixgbevf_set_rx_buffer_len(adapter);
1425 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1426 * the Base and Length of the Rx Descriptor Ring */
1427 for (i = 0; i < adapter->num_rx_queues; i++)
1428 ixgbevf_configure_rx_ring(adapter, adapter->rx_ring[i]);
1431 static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev,
1432 __be16 proto, u16 vid)
1434 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1435 struct ixgbe_hw *hw = &adapter->hw;
1438 spin_lock_bh(&adapter->mbx_lock);
1440 /* add VID to filter table */
1441 err = hw->mac.ops.set_vfta(hw, vid, 0, true);
1443 spin_unlock_bh(&adapter->mbx_lock);
1445 /* translate error return types so error makes sense */
1446 if (err == IXGBE_ERR_MBX)
1449 if (err == IXGBE_ERR_INVALID_ARGUMENT)
1452 set_bit(vid, adapter->active_vlans);
1457 static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev,
1458 __be16 proto, u16 vid)
1460 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1461 struct ixgbe_hw *hw = &adapter->hw;
1462 int err = -EOPNOTSUPP;
1464 spin_lock_bh(&adapter->mbx_lock);
1466 /* remove VID from filter table */
1467 err = hw->mac.ops.set_vfta(hw, vid, 0, false);
1469 spin_unlock_bh(&adapter->mbx_lock);
1471 clear_bit(vid, adapter->active_vlans);
1476 static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
1480 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
1481 ixgbevf_vlan_rx_add_vid(adapter->netdev,
1482 htons(ETH_P_8021Q), vid);
1485 static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
1487 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1488 struct ixgbe_hw *hw = &adapter->hw;
1491 if ((netdev_uc_count(netdev)) > 10) {
1492 pr_err("Too many unicast filters - No Space\n");
1496 if (!netdev_uc_empty(netdev)) {
1497 struct netdev_hw_addr *ha;
1498 netdev_for_each_uc_addr(ha, netdev) {
1499 hw->mac.ops.set_uc_addr(hw, ++count, ha->addr);
1504 * If the list is empty then send message to PF driver to
1505 * clear all macvlans on this VF.
1507 hw->mac.ops.set_uc_addr(hw, 0, NULL);
1514 * ixgbevf_set_rx_mode - Multicast and unicast set
1515 * @netdev: network interface device structure
1517 * The set_rx_method entry point is called whenever the multicast address
1518 * list, unicast address list or the network interface flags are updated.
1519 * This routine is responsible for configuring the hardware for proper
1520 * multicast mode and configuring requested unicast filters.
1522 static void ixgbevf_set_rx_mode(struct net_device *netdev)
1524 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1525 struct ixgbe_hw *hw = &adapter->hw;
1527 spin_lock_bh(&adapter->mbx_lock);
1529 /* reprogram multicast list */
1530 hw->mac.ops.update_mc_addr_list(hw, netdev);
1532 ixgbevf_write_uc_addr_list(netdev);
1534 spin_unlock_bh(&adapter->mbx_lock);
1537 static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
1540 struct ixgbevf_q_vector *q_vector;
1541 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1543 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1544 q_vector = adapter->q_vector[q_idx];
1545 #ifdef CONFIG_NET_RX_BUSY_POLL
1546 ixgbevf_qv_init_lock(adapter->q_vector[q_idx]);
1548 napi_enable(&q_vector->napi);
1552 static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
1555 struct ixgbevf_q_vector *q_vector;
1556 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1558 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1559 q_vector = adapter->q_vector[q_idx];
1560 napi_disable(&q_vector->napi);
1561 #ifdef CONFIG_NET_RX_BUSY_POLL
1562 while (!ixgbevf_qv_disable(adapter->q_vector[q_idx])) {
1563 pr_info("QV %d locked\n", q_idx);
1564 usleep_range(1000, 20000);
1566 #endif /* CONFIG_NET_RX_BUSY_POLL */
1570 static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
1572 struct ixgbe_hw *hw = &adapter->hw;
1573 unsigned int def_q = 0;
1574 unsigned int num_tcs = 0;
1575 unsigned int num_rx_queues = 1;
1578 spin_lock_bh(&adapter->mbx_lock);
1580 /* fetch queue configuration from the PF */
1581 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
1583 spin_unlock_bh(&adapter->mbx_lock);
1589 /* update default Tx ring register index */
1590 adapter->tx_ring[0]->reg_idx = def_q;
1592 /* we need as many queues as traffic classes */
1593 num_rx_queues = num_tcs;
1596 /* if we have a bad config abort request queue reset */
1597 if (adapter->num_rx_queues != num_rx_queues) {
1598 /* force mailbox timeout to prevent further messages */
1599 hw->mbx.timeout = 0;
1601 /* wait for watchdog to come around and bail us out */
1602 adapter->flags |= IXGBEVF_FLAG_QUEUE_RESET_REQUESTED;
1608 static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
1610 ixgbevf_configure_dcb(adapter);
1612 ixgbevf_set_rx_mode(adapter->netdev);
1614 ixgbevf_restore_vlan(adapter);
1616 ixgbevf_configure_tx(adapter);
1617 ixgbevf_configure_rx(adapter);
1620 static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
1622 /* Only save pre-reset stats if there are some */
1623 if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
1624 adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc -
1625 adapter->stats.base_vfgprc;
1626 adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc -
1627 adapter->stats.base_vfgptc;
1628 adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc -
1629 adapter->stats.base_vfgorc;
1630 adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc -
1631 adapter->stats.base_vfgotc;
1632 adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc -
1633 adapter->stats.base_vfmprc;
1637 static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
1639 struct ixgbe_hw *hw = &adapter->hw;
1641 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
1642 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
1643 adapter->stats.last_vfgorc |=
1644 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
1645 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
1646 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
1647 adapter->stats.last_vfgotc |=
1648 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
1649 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
1651 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
1652 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
1653 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
1654 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
1655 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
1658 static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
1660 struct ixgbe_hw *hw = &adapter->hw;
1661 int api[] = { ixgbe_mbox_api_11,
1663 ixgbe_mbox_api_unknown };
1664 int err = 0, idx = 0;
1666 spin_lock_bh(&adapter->mbx_lock);
1668 while (api[idx] != ixgbe_mbox_api_unknown) {
1669 err = ixgbevf_negotiate_api_version(hw, api[idx]);
1675 spin_unlock_bh(&adapter->mbx_lock);
1678 static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1680 struct net_device *netdev = adapter->netdev;
1681 struct ixgbe_hw *hw = &adapter->hw;
1683 ixgbevf_configure_msix(adapter);
1685 spin_lock_bh(&adapter->mbx_lock);
1687 if (is_valid_ether_addr(hw->mac.addr))
1688 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
1690 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
1692 spin_unlock_bh(&adapter->mbx_lock);
1694 smp_mb__before_atomic();
1695 clear_bit(__IXGBEVF_DOWN, &adapter->state);
1696 ixgbevf_napi_enable_all(adapter);
1698 /* enable transmits */
1699 netif_tx_start_all_queues(netdev);
1701 ixgbevf_save_reset_stats(adapter);
1702 ixgbevf_init_last_counter_stats(adapter);
1704 hw->mac.get_link_status = 1;
1705 mod_timer(&adapter->watchdog_timer, jiffies);
1708 void ixgbevf_up(struct ixgbevf_adapter *adapter)
1710 struct ixgbe_hw *hw = &adapter->hw;
1712 ixgbevf_configure(adapter);
1714 ixgbevf_up_complete(adapter);
1716 /* clear any pending interrupts, may auto mask */
1717 IXGBE_READ_REG(hw, IXGBE_VTEICR);
1719 ixgbevf_irq_enable(adapter);
1723 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
1724 * @rx_ring: ring to free buffers from
1726 static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring)
1731 if (!rx_ring->rx_buffer_info)
1734 /* Free all the Rx ring sk_buffs */
1735 for (i = 0; i < rx_ring->count; i++) {
1736 struct ixgbevf_rx_buffer *rx_buffer_info;
1738 rx_buffer_info = &rx_ring->rx_buffer_info[i];
1739 if (rx_buffer_info->dma) {
1740 dma_unmap_single(rx_ring->dev, rx_buffer_info->dma,
1741 rx_ring->rx_buf_len,
1743 rx_buffer_info->dma = 0;
1745 if (rx_buffer_info->skb) {
1746 struct sk_buff *skb = rx_buffer_info->skb;
1747 rx_buffer_info->skb = NULL;
1749 struct sk_buff *this = skb;
1750 skb = IXGBE_CB(skb)->prev;
1751 dev_kfree_skb(this);
1756 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
1757 memset(rx_ring->rx_buffer_info, 0, size);
1759 /* Zero out the descriptor ring */
1760 memset(rx_ring->desc, 0, rx_ring->size);
1764 * ixgbevf_clean_tx_ring - Free Tx Buffers
1765 * @tx_ring: ring to be cleaned
1767 static void ixgbevf_clean_tx_ring(struct ixgbevf_ring *tx_ring)
1769 struct ixgbevf_tx_buffer *tx_buffer_info;
1773 if (!tx_ring->tx_buffer_info)
1776 /* Free all the Tx ring sk_buffs */
1777 for (i = 0; i < tx_ring->count; i++) {
1778 tx_buffer_info = &tx_ring->tx_buffer_info[i];
1779 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
1782 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
1783 memset(tx_ring->tx_buffer_info, 0, size);
1785 memset(tx_ring->desc, 0, tx_ring->size);
1789 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
1790 * @adapter: board private structure
1792 static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
1796 for (i = 0; i < adapter->num_rx_queues; i++)
1797 ixgbevf_clean_rx_ring(adapter->rx_ring[i]);
1801 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
1802 * @adapter: board private structure
1804 static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
1808 for (i = 0; i < adapter->num_tx_queues; i++)
1809 ixgbevf_clean_tx_ring(adapter->tx_ring[i]);
1812 void ixgbevf_down(struct ixgbevf_adapter *adapter)
1814 struct net_device *netdev = adapter->netdev;
1815 struct ixgbe_hw *hw = &adapter->hw;
1818 /* signal that we are down to the interrupt handler */
1819 if (test_and_set_bit(__IXGBEVF_DOWN, &adapter->state))
1820 return; /* do nothing if already down */
1822 /* disable all enabled rx queues */
1823 for (i = 0; i < adapter->num_rx_queues; i++)
1824 ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]);
1826 netif_tx_disable(netdev);
1830 netif_tx_stop_all_queues(netdev);
1832 ixgbevf_irq_disable(adapter);
1834 ixgbevf_napi_disable_all(adapter);
1836 del_timer_sync(&adapter->watchdog_timer);
1837 /* can't call flush scheduled work here because it can deadlock
1838 * if linkwatch_event tries to acquire the rtnl_lock which we are
1840 while (adapter->flags & IXGBE_FLAG_IN_WATCHDOG_TASK)
1843 /* disable transmits in the hardware now that interrupts are off */
1844 for (i = 0; i < adapter->num_tx_queues; i++) {
1845 u8 reg_idx = adapter->tx_ring[i]->reg_idx;
1847 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx),
1848 IXGBE_TXDCTL_SWFLSH);
1851 netif_carrier_off(netdev);
1853 if (!pci_channel_offline(adapter->pdev))
1854 ixgbevf_reset(adapter);
1856 ixgbevf_clean_all_tx_rings(adapter);
1857 ixgbevf_clean_all_rx_rings(adapter);
1860 void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
1862 WARN_ON(in_interrupt());
1864 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
1867 ixgbevf_down(adapter);
1868 ixgbevf_up(adapter);
1870 clear_bit(__IXGBEVF_RESETTING, &adapter->state);
1873 void ixgbevf_reset(struct ixgbevf_adapter *adapter)
1875 struct ixgbe_hw *hw = &adapter->hw;
1876 struct net_device *netdev = adapter->netdev;
1878 if (hw->mac.ops.reset_hw(hw)) {
1879 hw_dbg(hw, "PF still resetting\n");
1881 hw->mac.ops.init_hw(hw);
1882 ixgbevf_negotiate_api(adapter);
1885 if (is_valid_ether_addr(adapter->hw.mac.addr)) {
1886 memcpy(netdev->dev_addr, adapter->hw.mac.addr,
1888 memcpy(netdev->perm_addr, adapter->hw.mac.addr,
1893 static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
1896 int vector_threshold;
1898 /* We'll want at least 2 (vector_threshold):
1899 * 1) TxQ[0] + RxQ[0] handler
1900 * 2) Other (Link Status Change, etc.)
1902 vector_threshold = MIN_MSIX_COUNT;
1904 /* The more we get, the more we will assign to Tx/Rx Cleanup
1905 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1906 * Right now, we simply care about how many we'll get; we'll
1907 * set them up later while requesting irq's.
1909 vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
1910 vector_threshold, vectors);
1913 dev_err(&adapter->pdev->dev,
1914 "Unable to allocate MSI-X interrupts\n");
1915 kfree(adapter->msix_entries);
1916 adapter->msix_entries = NULL;
1920 /* Adjust for only the vectors we'll use, which is minimum
1921 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
1922 * vectors we were allocated.
1924 adapter->num_msix_vectors = vectors;
1930 * ixgbevf_set_num_queues - Allocate queues for device, feature dependent
1931 * @adapter: board private structure to initialize
1933 * This is the top level queue allocation routine. The order here is very
1934 * important, starting with the "most" number of features turned on at once,
1935 * and ending with the smallest set of features. This way large combinations
1936 * can be allocated if they're turned on, and smaller combinations are the
1937 * fallthrough conditions.
1940 static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
1942 struct ixgbe_hw *hw = &adapter->hw;
1943 unsigned int def_q = 0;
1944 unsigned int num_tcs = 0;
1947 /* Start with base case */
1948 adapter->num_rx_queues = 1;
1949 adapter->num_tx_queues = 1;
1951 spin_lock_bh(&adapter->mbx_lock);
1953 /* fetch queue configuration from the PF */
1954 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
1956 spin_unlock_bh(&adapter->mbx_lock);
1961 /* we need as many queues as traffic classes */
1963 adapter->num_rx_queues = num_tcs;
1967 * ixgbevf_alloc_queues - Allocate memory for all rings
1968 * @adapter: board private structure to initialize
1970 * We allocate one ring per queue at run-time since we don't know the
1971 * number of queues at compile-time. The polling_netdev array is
1972 * intended for Multiqueue, but should work fine with a single queue.
1974 static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
1976 struct ixgbevf_ring *ring;
1979 for (; tx < adapter->num_tx_queues; tx++) {
1980 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
1982 goto err_allocation;
1984 ring->dev = &adapter->pdev->dev;
1985 ring->netdev = adapter->netdev;
1986 ring->count = adapter->tx_ring_count;
1987 ring->queue_index = tx;
1990 adapter->tx_ring[tx] = ring;
1993 for (; rx < adapter->num_rx_queues; rx++) {
1994 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
1996 goto err_allocation;
1998 ring->dev = &adapter->pdev->dev;
1999 ring->netdev = adapter->netdev;
2001 ring->count = adapter->rx_ring_count;
2002 ring->queue_index = rx;
2005 adapter->rx_ring[rx] = ring;
2012 kfree(adapter->tx_ring[--tx]);
2013 adapter->tx_ring[tx] = NULL;
2017 kfree(adapter->rx_ring[--rx]);
2018 adapter->rx_ring[rx] = NULL;
2024 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
2025 * @adapter: board private structure to initialize
2027 * Attempt to configure the interrupts using the best available
2028 * capabilities of the hardware and the kernel.
2030 static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
2032 struct net_device *netdev = adapter->netdev;
2034 int vector, v_budget;
2037 * It's easy to be greedy for MSI-X vectors, but it really
2038 * doesn't do us much good if we have a lot more vectors
2039 * than CPU's. So let's be conservative and only ask for
2040 * (roughly) the same number of vectors as there are CPU's.
2041 * The default is to use pairs of vectors.
2043 v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
2044 v_budget = min_t(int, v_budget, num_online_cpus());
2045 v_budget += NON_Q_VECTORS;
2047 /* A failure in MSI-X entry allocation isn't fatal, but it does
2048 * mean we disable MSI-X capabilities of the adapter. */
2049 adapter->msix_entries = kcalloc(v_budget,
2050 sizeof(struct msix_entry), GFP_KERNEL);
2051 if (!adapter->msix_entries) {
2056 for (vector = 0; vector < v_budget; vector++)
2057 adapter->msix_entries[vector].entry = vector;
2059 err = ixgbevf_acquire_msix_vectors(adapter, v_budget);
2063 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
2067 err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
2074 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
2075 * @adapter: board private structure to initialize
2077 * We allocate one q_vector per queue interrupt. If allocation fails we
2080 static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
2082 int q_idx, num_q_vectors;
2083 struct ixgbevf_q_vector *q_vector;
2085 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2087 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
2088 q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL);
2091 q_vector->adapter = adapter;
2092 q_vector->v_idx = q_idx;
2093 netif_napi_add(adapter->netdev, &q_vector->napi,
2095 #ifdef CONFIG_NET_RX_BUSY_POLL
2096 napi_hash_add(&q_vector->napi);
2098 adapter->q_vector[q_idx] = q_vector;
2106 q_vector = adapter->q_vector[q_idx];
2107 #ifdef CONFIG_NET_RX_BUSY_POLL
2108 napi_hash_del(&q_vector->napi);
2110 netif_napi_del(&q_vector->napi);
2112 adapter->q_vector[q_idx] = NULL;
2118 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
2119 * @adapter: board private structure to initialize
2121 * This function frees the memory allocated to the q_vectors. In addition if
2122 * NAPI is enabled it will delete any references to the NAPI struct prior
2123 * to freeing the q_vector.
2125 static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
2127 int q_idx, num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2129 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
2130 struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
2132 adapter->q_vector[q_idx] = NULL;
2133 #ifdef CONFIG_NET_RX_BUSY_POLL
2134 napi_hash_del(&q_vector->napi);
2136 netif_napi_del(&q_vector->napi);
2142 * ixgbevf_reset_interrupt_capability - Reset MSIX setup
2143 * @adapter: board private structure
2146 static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter)
2148 pci_disable_msix(adapter->pdev);
2149 kfree(adapter->msix_entries);
2150 adapter->msix_entries = NULL;
2154 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
2155 * @adapter: board private structure to initialize
2158 static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
2162 /* Number of supported queues */
2163 ixgbevf_set_num_queues(adapter);
2165 err = ixgbevf_set_interrupt_capability(adapter);
2167 hw_dbg(&adapter->hw,
2168 "Unable to setup interrupt capabilities\n");
2169 goto err_set_interrupt;
2172 err = ixgbevf_alloc_q_vectors(adapter);
2174 hw_dbg(&adapter->hw, "Unable to allocate memory for queue "
2176 goto err_alloc_q_vectors;
2179 err = ixgbevf_alloc_queues(adapter);
2181 pr_err("Unable to allocate memory for queues\n");
2182 goto err_alloc_queues;
2185 hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, "
2186 "Tx Queue count = %u\n",
2187 (adapter->num_rx_queues > 1) ? "Enabled" :
2188 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
2190 set_bit(__IXGBEVF_DOWN, &adapter->state);
2194 ixgbevf_free_q_vectors(adapter);
2195 err_alloc_q_vectors:
2196 ixgbevf_reset_interrupt_capability(adapter);
2202 * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings
2203 * @adapter: board private structure to clear interrupt scheme on
2205 * We go through and clear interrupt specific resources and reset the structure
2206 * to pre-load conditions
2208 static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
2212 for (i = 0; i < adapter->num_tx_queues; i++) {
2213 kfree(adapter->tx_ring[i]);
2214 adapter->tx_ring[i] = NULL;
2216 for (i = 0; i < adapter->num_rx_queues; i++) {
2217 kfree(adapter->rx_ring[i]);
2218 adapter->rx_ring[i] = NULL;
2221 adapter->num_tx_queues = 0;
2222 adapter->num_rx_queues = 0;
2224 ixgbevf_free_q_vectors(adapter);
2225 ixgbevf_reset_interrupt_capability(adapter);
2229 * ixgbevf_sw_init - Initialize general software structures
2230 * (struct ixgbevf_adapter)
2231 * @adapter: board private structure to initialize
2233 * ixgbevf_sw_init initializes the Adapter private data structure.
2234 * Fields are initialized based on PCI device information and
2235 * OS network device settings (MTU size).
2237 static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
2239 struct ixgbe_hw *hw = &adapter->hw;
2240 struct pci_dev *pdev = adapter->pdev;
2241 struct net_device *netdev = adapter->netdev;
2244 /* PCI config space info */
2246 hw->vendor_id = pdev->vendor;
2247 hw->device_id = pdev->device;
2248 hw->revision_id = pdev->revision;
2249 hw->subsystem_vendor_id = pdev->subsystem_vendor;
2250 hw->subsystem_device_id = pdev->subsystem_device;
2252 hw->mbx.ops.init_params(hw);
2254 /* assume legacy case in which PF would only give VF 2 queues */
2255 hw->mac.max_tx_queues = 2;
2256 hw->mac.max_rx_queues = 2;
2258 /* lock to protect mailbox accesses */
2259 spin_lock_init(&adapter->mbx_lock);
2261 err = hw->mac.ops.reset_hw(hw);
2263 dev_info(&pdev->dev,
2264 "PF still in reset state. Is the PF interface up?\n");
2266 err = hw->mac.ops.init_hw(hw);
2268 pr_err("init_shared_code failed: %d\n", err);
2271 ixgbevf_negotiate_api(adapter);
2272 err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
2274 dev_info(&pdev->dev, "Error reading MAC address\n");
2275 else if (is_zero_ether_addr(adapter->hw.mac.addr))
2276 dev_info(&pdev->dev,
2277 "MAC address not assigned by administrator.\n");
2278 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
2281 if (!is_valid_ether_addr(netdev->dev_addr)) {
2282 dev_info(&pdev->dev, "Assigning random MAC address\n");
2283 eth_hw_addr_random(netdev);
2284 memcpy(hw->mac.addr, netdev->dev_addr, netdev->addr_len);
2287 /* Enable dynamic interrupt throttling rates */
2288 adapter->rx_itr_setting = 1;
2289 adapter->tx_itr_setting = 1;
2291 /* set default ring sizes */
2292 adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
2293 adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
2295 set_bit(__IXGBEVF_DOWN, &adapter->state);
2302 #define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
2304 u32 current_counter = IXGBE_READ_REG(hw, reg); \
2305 if (current_counter < last_counter) \
2306 counter += 0x100000000LL; \
2307 last_counter = current_counter; \
2308 counter &= 0xFFFFFFFF00000000LL; \
2309 counter |= current_counter; \
2312 #define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
2314 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
2315 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
2316 u64 current_counter = (current_counter_msb << 32) | \
2317 current_counter_lsb; \
2318 if (current_counter < last_counter) \
2319 counter += 0x1000000000LL; \
2320 last_counter = current_counter; \
2321 counter &= 0xFFFFFFF000000000LL; \
2322 counter |= current_counter; \
2325 * ixgbevf_update_stats - Update the board statistics counters.
2326 * @adapter: board private structure
2328 void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
2330 struct ixgbe_hw *hw = &adapter->hw;
2333 if (!adapter->link_up)
2336 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
2337 adapter->stats.vfgprc);
2338 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
2339 adapter->stats.vfgptc);
2340 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
2341 adapter->stats.last_vfgorc,
2342 adapter->stats.vfgorc);
2343 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
2344 adapter->stats.last_vfgotc,
2345 adapter->stats.vfgotc);
2346 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
2347 adapter->stats.vfmprc);
2349 for (i = 0; i < adapter->num_rx_queues; i++) {
2350 adapter->hw_csum_rx_error +=
2351 adapter->rx_ring[i]->hw_csum_rx_error;
2352 adapter->rx_ring[i]->hw_csum_rx_error = 0;
2357 * ixgbevf_watchdog - Timer Call-back
2358 * @data: pointer to adapter cast into an unsigned long
2360 static void ixgbevf_watchdog(unsigned long data)
2362 struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data;
2363 struct ixgbe_hw *hw = &adapter->hw;
2368 * Do the watchdog outside of interrupt context due to the lovely
2369 * delays that some of the newer hardware requires
2372 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
2373 goto watchdog_short_circuit;
2375 /* get one bit for every active tx/rx interrupt vector */
2376 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
2377 struct ixgbevf_q_vector *qv = adapter->q_vector[i];
2378 if (qv->rx.ring || qv->tx.ring)
2382 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics);
2384 watchdog_short_circuit:
2385 schedule_work(&adapter->watchdog_task);
2389 * ixgbevf_tx_timeout - Respond to a Tx Hang
2390 * @netdev: network interface device structure
2392 static void ixgbevf_tx_timeout(struct net_device *netdev)
2394 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2396 /* Do the reset outside of interrupt context */
2397 schedule_work(&adapter->reset_task);
2400 static void ixgbevf_reset_task(struct work_struct *work)
2402 struct ixgbevf_adapter *adapter;
2403 adapter = container_of(work, struct ixgbevf_adapter, reset_task);
2405 /* If we're already down or resetting, just bail */
2406 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2407 test_bit(__IXGBEVF_REMOVING, &adapter->state) ||
2408 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2411 adapter->tx_timeout_count++;
2413 ixgbevf_reinit_locked(adapter);
2417 * ixgbevf_watchdog_task - worker thread to bring link up
2418 * @work: pointer to work_struct containing our data
2420 static void ixgbevf_watchdog_task(struct work_struct *work)
2422 struct ixgbevf_adapter *adapter = container_of(work,
2423 struct ixgbevf_adapter,
2425 struct net_device *netdev = adapter->netdev;
2426 struct ixgbe_hw *hw = &adapter->hw;
2427 u32 link_speed = adapter->link_speed;
2428 bool link_up = adapter->link_up;
2431 if (IXGBE_REMOVED(hw->hw_addr)) {
2432 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
2434 ixgbevf_down(adapter);
2439 ixgbevf_queue_reset_subtask(adapter);
2441 adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
2444 * Always check the link on the watchdog because we have
2447 spin_lock_bh(&adapter->mbx_lock);
2449 need_reset = hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
2451 spin_unlock_bh(&adapter->mbx_lock);
2454 adapter->link_up = link_up;
2455 adapter->link_speed = link_speed;
2456 netif_carrier_off(netdev);
2457 netif_tx_stop_all_queues(netdev);
2458 schedule_work(&adapter->reset_task);
2461 adapter->link_up = link_up;
2462 adapter->link_speed = link_speed;
2465 if (!netif_carrier_ok(netdev)) {
2466 char *link_speed_string;
2467 switch (link_speed) {
2468 case IXGBE_LINK_SPEED_10GB_FULL:
2469 link_speed_string = "10 Gbps";
2471 case IXGBE_LINK_SPEED_1GB_FULL:
2472 link_speed_string = "1 Gbps";
2474 case IXGBE_LINK_SPEED_100_FULL:
2475 link_speed_string = "100 Mbps";
2478 link_speed_string = "unknown speed";
2481 dev_info(&adapter->pdev->dev,
2482 "NIC Link is Up, %s\n", link_speed_string);
2483 netif_carrier_on(netdev);
2484 netif_tx_wake_all_queues(netdev);
2487 adapter->link_up = false;
2488 adapter->link_speed = 0;
2489 if (netif_carrier_ok(netdev)) {
2490 dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
2491 netif_carrier_off(netdev);
2492 netif_tx_stop_all_queues(netdev);
2496 ixgbevf_update_stats(adapter);
2499 /* Reset the timer */
2500 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
2501 !test_bit(__IXGBEVF_REMOVING, &adapter->state))
2502 mod_timer(&adapter->watchdog_timer,
2503 round_jiffies(jiffies + (2 * HZ)));
2505 adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
2509 * ixgbevf_free_tx_resources - Free Tx Resources per Queue
2510 * @tx_ring: Tx descriptor ring for a specific queue
2512 * Free all transmit software resources
2514 void ixgbevf_free_tx_resources(struct ixgbevf_ring *tx_ring)
2516 ixgbevf_clean_tx_ring(tx_ring);
2518 vfree(tx_ring->tx_buffer_info);
2519 tx_ring->tx_buffer_info = NULL;
2521 /* if not set, then don't free */
2525 dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc,
2528 tx_ring->desc = NULL;
2532 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
2533 * @adapter: board private structure
2535 * Free all transmit software resources
2537 static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
2541 for (i = 0; i < adapter->num_tx_queues; i++)
2542 if (adapter->tx_ring[i]->desc)
2543 ixgbevf_free_tx_resources(adapter->tx_ring[i]);
2547 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
2548 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2550 * Return 0 on success, negative on failure
2552 int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring)
2556 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
2557 tx_ring->tx_buffer_info = vzalloc(size);
2558 if (!tx_ring->tx_buffer_info)
2561 /* round up to nearest 4K */
2562 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
2563 tx_ring->size = ALIGN(tx_ring->size, 4096);
2565 tx_ring->desc = dma_alloc_coherent(tx_ring->dev, tx_ring->size,
2566 &tx_ring->dma, GFP_KERNEL);
2573 vfree(tx_ring->tx_buffer_info);
2574 tx_ring->tx_buffer_info = NULL;
2575 hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit "
2576 "descriptor ring\n");
2581 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
2582 * @adapter: board private structure
2584 * If this function returns with an error, then it's possible one or
2585 * more of the rings is populated (while the rest are not). It is the
2586 * callers duty to clean those orphaned rings.
2588 * Return 0 on success, negative on failure
2590 static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
2594 for (i = 0; i < adapter->num_tx_queues; i++) {
2595 err = ixgbevf_setup_tx_resources(adapter->tx_ring[i]);
2598 hw_dbg(&adapter->hw,
2599 "Allocation for Tx Queue %u failed\n", i);
2607 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
2608 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2610 * Returns 0 on success, negative on failure
2612 int ixgbevf_setup_rx_resources(struct ixgbevf_ring *rx_ring)
2616 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
2617 rx_ring->rx_buffer_info = vzalloc(size);
2618 if (!rx_ring->rx_buffer_info)
2621 /* Round up to nearest 4K */
2622 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
2623 rx_ring->size = ALIGN(rx_ring->size, 4096);
2625 rx_ring->desc = dma_alloc_coherent(rx_ring->dev, rx_ring->size,
2626 &rx_ring->dma, GFP_KERNEL);
2633 vfree(rx_ring->rx_buffer_info);
2634 rx_ring->rx_buffer_info = NULL;
2635 dev_err(rx_ring->dev, "Unable to allocate memory for the Rx descriptor ring\n");
2640 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
2641 * @adapter: board private structure
2643 * If this function returns with an error, then it's possible one or
2644 * more of the rings is populated (while the rest are not). It is the
2645 * callers duty to clean those orphaned rings.
2647 * Return 0 on success, negative on failure
2649 static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
2653 for (i = 0; i < adapter->num_rx_queues; i++) {
2654 err = ixgbevf_setup_rx_resources(adapter->rx_ring[i]);
2657 hw_dbg(&adapter->hw,
2658 "Allocation for Rx Queue %u failed\n", i);
2665 * ixgbevf_free_rx_resources - Free Rx Resources
2666 * @rx_ring: ring to clean the resources from
2668 * Free all receive software resources
2670 void ixgbevf_free_rx_resources(struct ixgbevf_ring *rx_ring)
2672 ixgbevf_clean_rx_ring(rx_ring);
2674 vfree(rx_ring->rx_buffer_info);
2675 rx_ring->rx_buffer_info = NULL;
2677 dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc,
2680 rx_ring->desc = NULL;
2684 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
2685 * @adapter: board private structure
2687 * Free all receive software resources
2689 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
2693 for (i = 0; i < adapter->num_rx_queues; i++)
2694 if (adapter->rx_ring[i]->desc)
2695 ixgbevf_free_rx_resources(adapter->rx_ring[i]);
2699 * ixgbevf_open - Called when a network interface is made active
2700 * @netdev: network interface device structure
2702 * Returns 0 on success, negative value on failure
2704 * The open entry point is called when a network interface is made
2705 * active by the system (IFF_UP). At this point all resources needed
2706 * for transmit and receive operations are allocated, the interrupt
2707 * handler is registered with the OS, the watchdog timer is started,
2708 * and the stack is notified that the interface is ready.
2710 static int ixgbevf_open(struct net_device *netdev)
2712 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2713 struct ixgbe_hw *hw = &adapter->hw;
2716 /* A previous failure to open the device because of a lack of
2717 * available MSIX vector resources may have reset the number
2718 * of msix vectors variable to zero. The only way to recover
2719 * is to unload/reload the driver and hope that the system has
2720 * been able to recover some MSIX vector resources.
2722 if (!adapter->num_msix_vectors)
2725 /* disallow open during test */
2726 if (test_bit(__IXGBEVF_TESTING, &adapter->state))
2729 if (hw->adapter_stopped) {
2730 ixgbevf_reset(adapter);
2731 /* if adapter is still stopped then PF isn't up and
2732 * the vf can't start. */
2733 if (hw->adapter_stopped) {
2734 err = IXGBE_ERR_MBX;
2735 pr_err("Unable to start - perhaps the PF Driver isn't "
2737 goto err_setup_reset;
2741 /* allocate transmit descriptors */
2742 err = ixgbevf_setup_all_tx_resources(adapter);
2746 /* allocate receive descriptors */
2747 err = ixgbevf_setup_all_rx_resources(adapter);
2751 ixgbevf_configure(adapter);
2754 * Map the Tx/Rx rings to the vectors we were allotted.
2755 * if request_irq will be called in this function map_rings
2756 * must be called *before* up_complete
2758 ixgbevf_map_rings_to_vectors(adapter);
2760 ixgbevf_up_complete(adapter);
2762 /* clear any pending interrupts, may auto mask */
2763 IXGBE_READ_REG(hw, IXGBE_VTEICR);
2764 err = ixgbevf_request_irq(adapter);
2768 ixgbevf_irq_enable(adapter);
2773 ixgbevf_down(adapter);
2775 ixgbevf_free_all_rx_resources(adapter);
2777 ixgbevf_free_all_tx_resources(adapter);
2778 ixgbevf_reset(adapter);
2786 * ixgbevf_close - Disables a network interface
2787 * @netdev: network interface device structure
2789 * Returns 0, this is not allowed to fail
2791 * The close entry point is called when an interface is de-activated
2792 * by the OS. The hardware is still under the drivers control, but
2793 * needs to be disabled. A global MAC reset is issued to stop the
2794 * hardware, and all transmit and receive resources are freed.
2796 static int ixgbevf_close(struct net_device *netdev)
2798 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2800 ixgbevf_down(adapter);
2801 ixgbevf_free_irq(adapter);
2803 ixgbevf_free_all_tx_resources(adapter);
2804 ixgbevf_free_all_rx_resources(adapter);
2809 static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter)
2811 struct net_device *dev = adapter->netdev;
2813 if (!(adapter->flags & IXGBEVF_FLAG_QUEUE_RESET_REQUESTED))
2816 adapter->flags &= ~IXGBEVF_FLAG_QUEUE_RESET_REQUESTED;
2818 /* if interface is down do nothing */
2819 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2820 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2823 /* Hardware has to reinitialize queues and interrupts to
2824 * match packet buffer alignment. Unfortunately, the
2825 * hardware is not flexible enough to do this dynamically.
2827 if (netif_running(dev))
2830 ixgbevf_clear_interrupt_scheme(adapter);
2831 ixgbevf_init_interrupt_scheme(adapter);
2833 if (netif_running(dev))
2837 static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
2838 u32 vlan_macip_lens, u32 type_tucmd,
2841 struct ixgbe_adv_tx_context_desc *context_desc;
2842 u16 i = tx_ring->next_to_use;
2844 context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i);
2847 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2849 /* set bits to identify this as an advanced context descriptor */
2850 type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2852 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
2853 context_desc->seqnum_seed = 0;
2854 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
2855 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
2858 static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
2859 struct ixgbevf_tx_buffer *first,
2862 struct sk_buff *skb = first->skb;
2863 u32 vlan_macip_lens, type_tucmd;
2864 u32 mss_l4len_idx, l4len;
2867 if (skb->ip_summed != CHECKSUM_PARTIAL)
2870 if (!skb_is_gso(skb))
2873 err = skb_cow_head(skb, 0);
2877 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
2878 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
2880 if (skb->protocol == htons(ETH_P_IP)) {
2881 struct iphdr *iph = ip_hdr(skb);
2884 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2888 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
2889 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
2890 IXGBE_TX_FLAGS_CSUM |
2891 IXGBE_TX_FLAGS_IPV4;
2892 } else if (skb_is_gso_v6(skb)) {
2893 ipv6_hdr(skb)->payload_len = 0;
2894 tcp_hdr(skb)->check =
2895 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2896 &ipv6_hdr(skb)->daddr,
2898 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
2899 IXGBE_TX_FLAGS_CSUM;
2902 /* compute header lengths */
2903 l4len = tcp_hdrlen(skb);
2905 *hdr_len = skb_transport_offset(skb) + l4len;
2907 /* update gso size and bytecount with header size */
2908 first->gso_segs = skb_shinfo(skb)->gso_segs;
2909 first->bytecount += (first->gso_segs - 1) * *hdr_len;
2911 /* mss_l4len_id: use 1 as index for TSO */
2912 mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
2913 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
2914 mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
2916 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
2917 vlan_macip_lens = skb_network_header_len(skb);
2918 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
2919 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
2921 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
2922 type_tucmd, mss_l4len_idx);
2927 static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
2928 struct ixgbevf_tx_buffer *first)
2930 struct sk_buff *skb = first->skb;
2931 u32 vlan_macip_lens = 0;
2932 u32 mss_l4len_idx = 0;
2935 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2937 switch (skb->protocol) {
2938 case htons(ETH_P_IP):
2939 vlan_macip_lens |= skb_network_header_len(skb);
2940 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
2941 l4_hdr = ip_hdr(skb)->protocol;
2943 case htons(ETH_P_IPV6):
2944 vlan_macip_lens |= skb_network_header_len(skb);
2945 l4_hdr = ipv6_hdr(skb)->nexthdr;
2948 if (unlikely(net_ratelimit())) {
2949 dev_warn(tx_ring->dev,
2950 "partial checksum but proto=%x!\n",
2958 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2959 mss_l4len_idx = tcp_hdrlen(skb) <<
2960 IXGBE_ADVTXD_L4LEN_SHIFT;
2963 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
2964 mss_l4len_idx = sizeof(struct sctphdr) <<
2965 IXGBE_ADVTXD_L4LEN_SHIFT;
2968 mss_l4len_idx = sizeof(struct udphdr) <<
2969 IXGBE_ADVTXD_L4LEN_SHIFT;
2972 if (unlikely(net_ratelimit())) {
2973 dev_warn(tx_ring->dev,
2974 "partial checksum but l4 proto=%x!\n",
2980 /* update TX checksum flag */
2981 first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
2984 /* vlan_macip_lens: MACLEN, VLAN tag */
2985 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
2986 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
2988 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
2989 type_tucmd, mss_l4len_idx);
2992 static __le32 ixgbevf_tx_cmd_type(u32 tx_flags)
2994 /* set type for advanced descriptor with frame checksum insertion */
2995 __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA |
2996 IXGBE_ADVTXD_DCMD_IFCS |
2997 IXGBE_ADVTXD_DCMD_DEXT);
2999 /* set HW vlan bit if vlan is present */
3000 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
3001 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE);
3003 /* set segmentation enable bits for TSO/FSO */
3004 if (tx_flags & IXGBE_TX_FLAGS_TSO)
3005 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE);
3010 static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
3011 u32 tx_flags, unsigned int paylen)
3013 __le32 olinfo_status = cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT);
3015 /* enable L4 checksum for TSO and TX checksum offload */
3016 if (tx_flags & IXGBE_TX_FLAGS_CSUM)
3017 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM);
3019 /* enble IPv4 checksum for TSO */
3020 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
3021 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM);
3023 /* use index 1 context for TSO/FSO/FCOE */
3024 if (tx_flags & IXGBE_TX_FLAGS_TSO)
3025 olinfo_status |= cpu_to_le32(1 << IXGBE_ADVTXD_IDX_SHIFT);
3027 /* Check Context must be set if Tx switch is enabled, which it
3028 * always is for case where virtual functions are running
3030 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC);
3032 tx_desc->read.olinfo_status = olinfo_status;
3035 static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
3036 struct ixgbevf_tx_buffer *first,
3040 struct sk_buff *skb = first->skb;
3041 struct ixgbevf_tx_buffer *tx_buffer;
3042 union ixgbe_adv_tx_desc *tx_desc;
3043 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
3044 unsigned int data_len = skb->data_len;
3045 unsigned int size = skb_headlen(skb);
3046 unsigned int paylen = skb->len - hdr_len;
3047 u32 tx_flags = first->tx_flags;
3049 u16 i = tx_ring->next_to_use;
3051 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
3053 ixgbevf_tx_olinfo_status(tx_desc, tx_flags, paylen);
3054 cmd_type = ixgbevf_tx_cmd_type(tx_flags);
3056 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
3057 if (dma_mapping_error(tx_ring->dev, dma))
3060 /* record length, and DMA address */
3061 dma_unmap_len_set(first, len, size);
3062 dma_unmap_addr_set(first, dma, dma);
3064 tx_desc->read.buffer_addr = cpu_to_le64(dma);
3067 while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
3068 tx_desc->read.cmd_type_len =
3069 cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD);
3073 if (i == tx_ring->count) {
3074 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
3078 dma += IXGBE_MAX_DATA_PER_TXD;
3079 size -= IXGBE_MAX_DATA_PER_TXD;
3081 tx_desc->read.buffer_addr = cpu_to_le64(dma);
3082 tx_desc->read.olinfo_status = 0;
3085 if (likely(!data_len))
3088 tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
3092 if (i == tx_ring->count) {
3093 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
3097 size = skb_frag_size(frag);
3100 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
3102 if (dma_mapping_error(tx_ring->dev, dma))
3105 tx_buffer = &tx_ring->tx_buffer_info[i];
3106 dma_unmap_len_set(tx_buffer, len, size);
3107 dma_unmap_addr_set(tx_buffer, dma, dma);
3109 tx_desc->read.buffer_addr = cpu_to_le64(dma);
3110 tx_desc->read.olinfo_status = 0;
3115 /* write last descriptor with RS and EOP bits */
3116 cmd_type |= cpu_to_le32(size) | cpu_to_le32(IXGBE_TXD_CMD);
3117 tx_desc->read.cmd_type_len = cmd_type;
3119 /* set the timestamp */
3120 first->time_stamp = jiffies;
3122 /* Force memory writes to complete before letting h/w know there
3123 * are new descriptors to fetch. (Only applicable for weak-ordered
3124 * memory model archs, such as IA-64).
3126 * We also need this memory barrier (wmb) to make certain all of the
3127 * status bits have been updated before next_to_watch is written.
3131 /* set next_to_watch value indicating a packet is present */
3132 first->next_to_watch = tx_desc;
3135 if (i == tx_ring->count)
3138 tx_ring->next_to_use = i;
3140 /* notify HW of packet */
3141 ixgbevf_write_tail(tx_ring, i);
3145 dev_err(tx_ring->dev, "TX DMA map failed\n");
3147 /* clear dma mappings for failed tx_buffer_info map */
3149 tx_buffer = &tx_ring->tx_buffer_info[i];
3150 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer);
3151 if (tx_buffer == first)
3158 tx_ring->next_to_use = i;
3161 static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
3163 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
3164 /* Herbert's original patch had:
3165 * smp_mb__after_netif_stop_queue();
3166 * but since that doesn't exist yet, just open code it. */
3169 /* We need to check again in a case another CPU has just
3170 * made room available. */
3171 if (likely(ixgbevf_desc_unused(tx_ring) < size))
3174 /* A reprieve! - use start_queue because it doesn't call schedule */
3175 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
3176 ++tx_ring->tx_stats.restart_queue;
3181 static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
3183 if (likely(ixgbevf_desc_unused(tx_ring) >= size))
3185 return __ixgbevf_maybe_stop_tx(tx_ring, size);
3188 static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3190 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3191 struct ixgbevf_tx_buffer *first;
3192 struct ixgbevf_ring *tx_ring;
3195 u16 count = TXD_USE_COUNT(skb_headlen(skb));
3196 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3200 u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL);
3202 if (!dst_mac || is_link_local_ether_addr(dst_mac)) {
3204 return NETDEV_TX_OK;
3207 tx_ring = adapter->tx_ring[skb->queue_mapping];
3210 * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
3211 * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
3212 * + 2 desc gap to keep tail from touching head,
3213 * + 1 desc for context descriptor,
3214 * otherwise try next time
3216 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3217 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
3218 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
3220 count += skb_shinfo(skb)->nr_frags;
3222 if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) {
3223 tx_ring->tx_stats.tx_busy++;
3224 return NETDEV_TX_BUSY;
3227 /* record the location of the first descriptor for this packet */
3228 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
3230 first->bytecount = skb->len;
3231 first->gso_segs = 1;
3233 if (vlan_tx_tag_present(skb)) {
3234 tx_flags |= vlan_tx_tag_get(skb);
3235 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
3236 tx_flags |= IXGBE_TX_FLAGS_VLAN;
3239 /* record initial flags and protocol */
3240 first->tx_flags = tx_flags;
3241 first->protocol = vlan_get_protocol(skb);
3243 tso = ixgbevf_tso(tx_ring, first, &hdr_len);
3247 ixgbevf_tx_csum(tx_ring, first);
3249 ixgbevf_tx_map(tx_ring, first, hdr_len);
3251 ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
3253 return NETDEV_TX_OK;
3256 dev_kfree_skb_any(first->skb);
3259 return NETDEV_TX_OK;
3263 * ixgbevf_set_mac - Change the Ethernet Address of the NIC
3264 * @netdev: network interface device structure
3265 * @p: pointer to an address structure
3267 * Returns 0 on success, negative on failure
3269 static int ixgbevf_set_mac(struct net_device *netdev, void *p)
3271 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3272 struct ixgbe_hw *hw = &adapter->hw;
3273 struct sockaddr *addr = p;
3275 if (!is_valid_ether_addr(addr->sa_data))
3276 return -EADDRNOTAVAIL;
3278 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
3279 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
3281 spin_lock_bh(&adapter->mbx_lock);
3283 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
3285 spin_unlock_bh(&adapter->mbx_lock);
3291 * ixgbevf_change_mtu - Change the Maximum Transfer Unit
3292 * @netdev: network interface device structure
3293 * @new_mtu: new value for maximum frame size
3295 * Returns 0 on success, negative on failure
3297 static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
3299 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3300 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3301 int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
3303 switch (adapter->hw.api_version) {
3304 case ixgbe_mbox_api_11:
3305 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
3308 if (adapter->hw.mac.type == ixgbe_mac_X540_vf)
3309 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
3313 /* MTU < 68 is an error and causes problems on some kernels */
3314 if ((new_mtu < 68) || (max_frame > max_possible_frame))
3317 hw_dbg(&adapter->hw, "changing MTU from %d to %d\n",
3318 netdev->mtu, new_mtu);
3319 /* must set new MTU before calling down or up */
3320 netdev->mtu = new_mtu;
3322 if (netif_running(netdev))
3323 ixgbevf_reinit_locked(adapter);
3328 static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
3330 struct net_device *netdev = pci_get_drvdata(pdev);
3331 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3336 netif_device_detach(netdev);
3338 if (netif_running(netdev)) {
3340 ixgbevf_down(adapter);
3341 ixgbevf_free_irq(adapter);
3342 ixgbevf_free_all_tx_resources(adapter);
3343 ixgbevf_free_all_rx_resources(adapter);
3347 ixgbevf_clear_interrupt_scheme(adapter);
3350 retval = pci_save_state(pdev);
3355 if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
3356 pci_disable_device(pdev);
3362 static int ixgbevf_resume(struct pci_dev *pdev)
3364 struct net_device *netdev = pci_get_drvdata(pdev);
3365 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3368 pci_restore_state(pdev);
3370 * pci_restore_state clears dev->state_saved so call
3371 * pci_save_state to restore it.
3373 pci_save_state(pdev);
3375 err = pci_enable_device_mem(pdev);
3377 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
3380 smp_mb__before_atomic();
3381 clear_bit(__IXGBEVF_DISABLED, &adapter->state);
3382 pci_set_master(pdev);
3384 ixgbevf_reset(adapter);
3387 err = ixgbevf_init_interrupt_scheme(adapter);
3390 dev_err(&pdev->dev, "Cannot initialize interrupts\n");
3394 if (netif_running(netdev)) {
3395 err = ixgbevf_open(netdev);
3400 netif_device_attach(netdev);
3405 #endif /* CONFIG_PM */
3406 static void ixgbevf_shutdown(struct pci_dev *pdev)
3408 ixgbevf_suspend(pdev, PMSG_SUSPEND);
3411 static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
3412 struct rtnl_link_stats64 *stats)
3414 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3417 const struct ixgbevf_ring *ring;
3420 ixgbevf_update_stats(adapter);
3422 stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc;
3424 for (i = 0; i < adapter->num_rx_queues; i++) {
3425 ring = adapter->rx_ring[i];
3427 start = u64_stats_fetch_begin_irq(&ring->syncp);
3428 bytes = ring->stats.bytes;
3429 packets = ring->stats.packets;
3430 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
3431 stats->rx_bytes += bytes;
3432 stats->rx_packets += packets;
3435 for (i = 0; i < adapter->num_tx_queues; i++) {
3436 ring = adapter->tx_ring[i];
3438 start = u64_stats_fetch_begin_irq(&ring->syncp);
3439 bytes = ring->stats.bytes;
3440 packets = ring->stats.packets;
3441 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
3442 stats->tx_bytes += bytes;
3443 stats->tx_packets += packets;
3449 static const struct net_device_ops ixgbevf_netdev_ops = {
3450 .ndo_open = ixgbevf_open,
3451 .ndo_stop = ixgbevf_close,
3452 .ndo_start_xmit = ixgbevf_xmit_frame,
3453 .ndo_set_rx_mode = ixgbevf_set_rx_mode,
3454 .ndo_get_stats64 = ixgbevf_get_stats,
3455 .ndo_validate_addr = eth_validate_addr,
3456 .ndo_set_mac_address = ixgbevf_set_mac,
3457 .ndo_change_mtu = ixgbevf_change_mtu,
3458 .ndo_tx_timeout = ixgbevf_tx_timeout,
3459 .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid,
3460 .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid,
3461 #ifdef CONFIG_NET_RX_BUSY_POLL
3462 .ndo_busy_poll = ixgbevf_busy_poll_recv,
3466 static void ixgbevf_assign_netdev_ops(struct net_device *dev)
3468 dev->netdev_ops = &ixgbevf_netdev_ops;
3469 ixgbevf_set_ethtool_ops(dev);
3470 dev->watchdog_timeo = 5 * HZ;
3474 * ixgbevf_probe - Device Initialization Routine
3475 * @pdev: PCI device information struct
3476 * @ent: entry in ixgbevf_pci_tbl
3478 * Returns 0 on success, negative on failure
3480 * ixgbevf_probe initializes an adapter identified by a pci_dev structure.
3481 * The OS initialization, configuring of the adapter private structure,
3482 * and a hardware reset occur.
3484 static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3486 struct net_device *netdev;
3487 struct ixgbevf_adapter *adapter = NULL;
3488 struct ixgbe_hw *hw = NULL;
3489 const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
3490 int err, pci_using_dac;
3492 err = pci_enable_device(pdev);
3496 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
3499 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3501 dev_err(&pdev->dev, "No usable DMA "
3502 "configuration, aborting\n");
3508 err = pci_request_regions(pdev, ixgbevf_driver_name);
3510 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
3514 pci_set_master(pdev);
3516 netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
3520 goto err_alloc_etherdev;
3523 SET_NETDEV_DEV(netdev, &pdev->dev);
3525 pci_set_drvdata(pdev, netdev);
3526 adapter = netdev_priv(netdev);
3528 adapter->netdev = netdev;
3529 adapter->pdev = pdev;
3532 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
3535 * call save state here in standalone driver because it relies on
3536 * adapter struct to exist, and needs to call netdev_priv
3538 pci_save_state(pdev);
3540 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
3541 pci_resource_len(pdev, 0));
3542 adapter->io_addr = hw->hw_addr;
3548 ixgbevf_assign_netdev_ops(netdev);
3551 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
3552 hw->mac.type = ii->mac;
3554 memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
3555 sizeof(struct ixgbe_mbx_operations));
3557 /* setup the private structure */
3558 err = ixgbevf_sw_init(adapter);
3562 /* The HW MAC address was set and/or determined in sw_init */
3563 if (!is_valid_ether_addr(netdev->dev_addr)) {
3564 pr_err("invalid MAC address\n");
3569 netdev->hw_features = NETIF_F_SG |
3576 netdev->features = netdev->hw_features |
3577 NETIF_F_HW_VLAN_CTAG_TX |
3578 NETIF_F_HW_VLAN_CTAG_RX |
3579 NETIF_F_HW_VLAN_CTAG_FILTER;
3581 netdev->vlan_features |= NETIF_F_TSO;
3582 netdev->vlan_features |= NETIF_F_TSO6;
3583 netdev->vlan_features |= NETIF_F_IP_CSUM;
3584 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
3585 netdev->vlan_features |= NETIF_F_SG;
3588 netdev->features |= NETIF_F_HIGHDMA;
3590 netdev->priv_flags |= IFF_UNICAST_FLT;
3592 init_timer(&adapter->watchdog_timer);
3593 adapter->watchdog_timer.function = ixgbevf_watchdog;
3594 adapter->watchdog_timer.data = (unsigned long)adapter;
3596 if (IXGBE_REMOVED(hw->hw_addr)) {
3600 INIT_WORK(&adapter->reset_task, ixgbevf_reset_task);
3601 INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task);
3602 set_bit(__IXGBEVF_WORK_INIT, &adapter->state);
3604 err = ixgbevf_init_interrupt_scheme(adapter);
3608 strcpy(netdev->name, "eth%d");
3610 err = register_netdev(netdev);
3614 netif_carrier_off(netdev);
3616 ixgbevf_init_last_counter_stats(adapter);
3618 /* print the MAC address */
3619 hw_dbg(hw, "%pM\n", netdev->dev_addr);
3621 hw_dbg(hw, "MAC: %d\n", hw->mac.type);
3623 hw_dbg(hw, "Intel(R) 82599 Virtual Function\n");
3627 ixgbevf_clear_interrupt_scheme(adapter);
3629 ixgbevf_reset_interrupt_capability(adapter);
3630 iounmap(adapter->io_addr);
3632 free_netdev(netdev);
3634 pci_release_regions(pdev);
3637 if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
3638 pci_disable_device(pdev);
3643 * ixgbevf_remove - Device Removal Routine
3644 * @pdev: PCI device information struct
3646 * ixgbevf_remove is called by the PCI subsystem to alert the driver
3647 * that it should release a PCI device. The could be caused by a
3648 * Hot-Plug event, or because the driver is going to be removed from
3651 static void ixgbevf_remove(struct pci_dev *pdev)
3653 struct net_device *netdev = pci_get_drvdata(pdev);
3654 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3656 set_bit(__IXGBEVF_REMOVING, &adapter->state);
3658 del_timer_sync(&adapter->watchdog_timer);
3660 cancel_work_sync(&adapter->reset_task);
3661 cancel_work_sync(&adapter->watchdog_task);
3663 if (netdev->reg_state == NETREG_REGISTERED)
3664 unregister_netdev(netdev);
3666 ixgbevf_clear_interrupt_scheme(adapter);
3667 ixgbevf_reset_interrupt_capability(adapter);
3669 iounmap(adapter->io_addr);
3670 pci_release_regions(pdev);
3672 hw_dbg(&adapter->hw, "Remove complete\n");
3674 free_netdev(netdev);
3676 if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
3677 pci_disable_device(pdev);
3681 * ixgbevf_io_error_detected - called when PCI error is detected
3682 * @pdev: Pointer to PCI device
3683 * @state: The current pci connection state
3685 * This function is called after a PCI bus error affecting
3686 * this device has been detected.
3688 static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
3689 pci_channel_state_t state)
3691 struct net_device *netdev = pci_get_drvdata(pdev);
3692 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3694 if (!test_bit(__IXGBEVF_WORK_INIT, &adapter->state))
3695 return PCI_ERS_RESULT_DISCONNECT;
3698 netif_device_detach(netdev);
3700 if (state == pci_channel_io_perm_failure) {
3702 return PCI_ERS_RESULT_DISCONNECT;
3705 if (netif_running(netdev))
3706 ixgbevf_down(adapter);
3708 if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
3709 pci_disable_device(pdev);
3712 /* Request a slot slot reset. */
3713 return PCI_ERS_RESULT_NEED_RESET;
3717 * ixgbevf_io_slot_reset - called after the pci bus has been reset.
3718 * @pdev: Pointer to PCI device
3720 * Restart the card from scratch, as if from a cold-boot. Implementation
3721 * resembles the first-half of the ixgbevf_resume routine.
3723 static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev)
3725 struct net_device *netdev = pci_get_drvdata(pdev);
3726 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3728 if (pci_enable_device_mem(pdev)) {
3730 "Cannot re-enable PCI device after reset.\n");
3731 return PCI_ERS_RESULT_DISCONNECT;
3734 smp_mb__before_atomic();
3735 clear_bit(__IXGBEVF_DISABLED, &adapter->state);
3736 pci_set_master(pdev);
3738 ixgbevf_reset(adapter);
3740 return PCI_ERS_RESULT_RECOVERED;
3744 * ixgbevf_io_resume - called when traffic can start flowing again.
3745 * @pdev: Pointer to PCI device
3747 * This callback is called when the error recovery driver tells us that
3748 * its OK to resume normal operation. Implementation resembles the
3749 * second-half of the ixgbevf_resume routine.
3751 static void ixgbevf_io_resume(struct pci_dev *pdev)
3753 struct net_device *netdev = pci_get_drvdata(pdev);
3754 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3756 if (netif_running(netdev))
3757 ixgbevf_up(adapter);
3759 netif_device_attach(netdev);
3762 /* PCI Error Recovery (ERS) */
3763 static const struct pci_error_handlers ixgbevf_err_handler = {
3764 .error_detected = ixgbevf_io_error_detected,
3765 .slot_reset = ixgbevf_io_slot_reset,
3766 .resume = ixgbevf_io_resume,
3769 static struct pci_driver ixgbevf_driver = {
3770 .name = ixgbevf_driver_name,
3771 .id_table = ixgbevf_pci_tbl,
3772 .probe = ixgbevf_probe,
3773 .remove = ixgbevf_remove,
3775 /* Power Management Hooks */
3776 .suspend = ixgbevf_suspend,
3777 .resume = ixgbevf_resume,
3779 .shutdown = ixgbevf_shutdown,
3780 .err_handler = &ixgbevf_err_handler
3784 * ixgbevf_init_module - Driver Registration Routine
3786 * ixgbevf_init_module is the first routine called when the driver is
3787 * loaded. All it does is register with the PCI subsystem.
3789 static int __init ixgbevf_init_module(void)
3792 pr_info("%s - version %s\n", ixgbevf_driver_string,
3793 ixgbevf_driver_version);
3795 pr_info("%s\n", ixgbevf_copyright);
3797 ret = pci_register_driver(&ixgbevf_driver);
3801 module_init(ixgbevf_init_module);
3804 * ixgbevf_exit_module - Driver Exit Cleanup Routine
3806 * ixgbevf_exit_module is called just before the driver is removed
3809 static void __exit ixgbevf_exit_module(void)
3811 pci_unregister_driver(&ixgbevf_driver);
3816 * ixgbevf_get_hw_dev_name - return device name string
3817 * used by hardware layer to print debugging information
3819 char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
3821 struct ixgbevf_adapter *adapter = hw->back;
3822 return adapter->netdev->name;
3826 module_exit(ixgbevf_exit_module);
3828 /* ixgbevf_main.c */