1 /*******************************************************************************
3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2014 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *******************************************************************************/
29 /******************************************************************************
30 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
31 ******************************************************************************/
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
35 #include <linux/types.h>
36 #include <linux/bitops.h>
37 #include <linux/module.h>
38 #include <linux/pci.h>
39 #include <linux/netdevice.h>
40 #include <linux/vmalloc.h>
41 #include <linux/string.h>
44 #include <linux/tcp.h>
45 #include <linux/sctp.h>
46 #include <linux/ipv6.h>
47 #include <linux/slab.h>
48 #include <net/checksum.h>
49 #include <net/ip6_checksum.h>
50 #include <linux/ethtool.h>
52 #include <linux/if_vlan.h>
53 #include <linux/prefetch.h>
57 const char ixgbevf_driver_name[] = "ixgbevf";
58 static const char ixgbevf_driver_string[] =
59 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
61 #define DRV_VERSION "2.12.1-k"
62 const char ixgbevf_driver_version[] = DRV_VERSION;
63 static char ixgbevf_copyright[] =
64 "Copyright (c) 2009 - 2012 Intel Corporation.";
66 static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
67 [board_82599_vf] = &ixgbevf_82599_vf_info,
68 [board_X540_vf] = &ixgbevf_X540_vf_info,
71 /* ixgbevf_pci_tbl - PCI Device ID Table
73 * Wildcard entries (PCI_ANY_ID) should come last
74 * Last entry must be all 0s
76 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
77 * Class, Class Mask, private data (not used) }
79 static DEFINE_PCI_DEVICE_TABLE(ixgbevf_pci_tbl) = {
80 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf },
81 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf },
82 /* required last entry */
85 MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
87 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
88 MODULE_DESCRIPTION("Intel(R) 10 Gigabit Virtual Function Network Driver");
89 MODULE_LICENSE("GPL");
90 MODULE_VERSION(DRV_VERSION);
92 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
93 static int debug = -1;
94 module_param(debug, int, 0);
95 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
98 static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter);
99 static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
100 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
102 static void ixgbevf_remove_adapter(struct ixgbe_hw *hw)
104 struct ixgbevf_adapter *adapter = hw->back;
109 dev_err(&adapter->pdev->dev, "Adapter removed\n");
110 if (test_bit(__IXGBEVF_WORK_INIT, &adapter->state))
111 schedule_work(&adapter->watchdog_task);
114 static void ixgbevf_check_remove(struct ixgbe_hw *hw, u32 reg)
118 /* The following check not only optimizes a bit by not
119 * performing a read on the status register when the
120 * register just read was a status register read that
121 * returned IXGBE_FAILED_READ_REG. It also blocks any
122 * potential recursion.
124 if (reg == IXGBE_VFSTATUS) {
125 ixgbevf_remove_adapter(hw);
128 value = ixgbevf_read_reg(hw, IXGBE_VFSTATUS);
129 if (value == IXGBE_FAILED_READ_REG)
130 ixgbevf_remove_adapter(hw);
133 u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg)
135 u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
138 if (IXGBE_REMOVED(reg_addr))
139 return IXGBE_FAILED_READ_REG;
140 value = readl(reg_addr + reg);
141 if (unlikely(value == IXGBE_FAILED_READ_REG))
142 ixgbevf_check_remove(hw, reg);
146 static inline void ixgbevf_release_rx_desc(struct ixgbevf_ring *rx_ring,
149 rx_ring->next_to_use = val;
152 * Force memory writes to complete before letting h/w
153 * know there are new descriptors to fetch. (Only
154 * applicable for weak-ordered memory model archs,
158 ixgbevf_write_tail(rx_ring, val);
162 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
163 * @adapter: pointer to adapter struct
164 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
165 * @queue: queue to map the corresponding interrupt to
166 * @msix_vector: the vector to map to the corresponding queue
168 static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
169 u8 queue, u8 msix_vector)
172 struct ixgbe_hw *hw = &adapter->hw;
173 if (direction == -1) {
175 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
176 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
179 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
181 /* tx or rx causes */
182 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
183 index = ((16 * (queue & 1)) + (8 * direction));
184 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
185 ivar &= ~(0xFF << index);
186 ivar |= (msix_vector << index);
187 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar);
191 static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring,
192 struct ixgbevf_tx_buffer *tx_buffer)
194 if (tx_buffer->skb) {
195 dev_kfree_skb_any(tx_buffer->skb);
196 if (dma_unmap_len(tx_buffer, len))
197 dma_unmap_single(tx_ring->dev,
198 dma_unmap_addr(tx_buffer, dma),
199 dma_unmap_len(tx_buffer, len),
201 } else if (dma_unmap_len(tx_buffer, len)) {
202 dma_unmap_page(tx_ring->dev,
203 dma_unmap_addr(tx_buffer, dma),
204 dma_unmap_len(tx_buffer, len),
207 tx_buffer->next_to_watch = NULL;
208 tx_buffer->skb = NULL;
209 dma_unmap_len_set(tx_buffer, len, 0);
210 /* tx_buffer must be completely set up in the transmit path */
213 #define IXGBE_MAX_TXD_PWR 14
214 #define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
216 /* Tx Descriptors needed, worst case */
217 #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
218 #define DESC_NEEDED (MAX_SKB_FRAGS + 4)
220 static void ixgbevf_tx_timeout(struct net_device *netdev);
223 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
224 * @q_vector: board private structure
225 * @tx_ring: tx ring to clean
227 static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
228 struct ixgbevf_ring *tx_ring)
230 struct ixgbevf_adapter *adapter = q_vector->adapter;
231 struct ixgbevf_tx_buffer *tx_buffer;
232 union ixgbe_adv_tx_desc *tx_desc;
233 unsigned int total_bytes = 0, total_packets = 0;
234 unsigned int budget = tx_ring->count / 2;
235 unsigned int i = tx_ring->next_to_clean;
237 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
240 tx_buffer = &tx_ring->tx_buffer_info[i];
241 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
245 union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
247 /* if next_to_watch is not set then there is no work pending */
251 /* prevent any other reads prior to eop_desc */
252 read_barrier_depends();
254 /* if DD is not set pending work has not been completed */
255 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
258 /* clear next_to_watch to prevent false hangs */
259 tx_buffer->next_to_watch = NULL;
261 /* update the statistics for this packet */
262 total_bytes += tx_buffer->bytecount;
263 total_packets += tx_buffer->gso_segs;
266 dev_kfree_skb_any(tx_buffer->skb);
268 /* unmap skb header data */
269 dma_unmap_single(tx_ring->dev,
270 dma_unmap_addr(tx_buffer, dma),
271 dma_unmap_len(tx_buffer, len),
274 /* clear tx_buffer data */
275 tx_buffer->skb = NULL;
276 dma_unmap_len_set(tx_buffer, len, 0);
278 /* unmap remaining buffers */
279 while (tx_desc != eop_desc) {
285 tx_buffer = tx_ring->tx_buffer_info;
286 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
289 /* unmap any remaining paged data */
290 if (dma_unmap_len(tx_buffer, len)) {
291 dma_unmap_page(tx_ring->dev,
292 dma_unmap_addr(tx_buffer, dma),
293 dma_unmap_len(tx_buffer, len),
295 dma_unmap_len_set(tx_buffer, len, 0);
299 /* move us one more past the eop_desc for start of next pkt */
305 tx_buffer = tx_ring->tx_buffer_info;
306 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
309 /* issue prefetch for next Tx descriptor */
312 /* update budget accounting */
314 } while (likely(budget));
317 tx_ring->next_to_clean = i;
318 u64_stats_update_begin(&tx_ring->syncp);
319 tx_ring->stats.bytes += total_bytes;
320 tx_ring->stats.packets += total_packets;
321 u64_stats_update_end(&tx_ring->syncp);
322 q_vector->tx.total_bytes += total_bytes;
323 q_vector->tx.total_packets += total_packets;
325 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
326 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
327 (ixgbevf_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
328 /* Make sure that anybody stopping the queue after this
329 * sees the new next_to_clean.
333 if (__netif_subqueue_stopped(tx_ring->netdev,
334 tx_ring->queue_index) &&
335 !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
336 netif_wake_subqueue(tx_ring->netdev,
337 tx_ring->queue_index);
338 ++tx_ring->tx_stats.restart_queue;
346 * ixgbevf_receive_skb - Send a completed packet up the stack
347 * @q_vector: structure containing interrupt and ring information
348 * @skb: packet to send up
349 * @status: hardware indication of status of receive
350 * @rx_desc: rx descriptor
352 static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
353 struct sk_buff *skb, u8 status,
354 union ixgbe_adv_rx_desc *rx_desc)
356 struct ixgbevf_adapter *adapter = q_vector->adapter;
357 bool is_vlan = (status & IXGBE_RXD_STAT_VP);
358 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
360 if (is_vlan && test_bit(tag & VLAN_VID_MASK, adapter->active_vlans))
361 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
363 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
364 napi_gro_receive(&q_vector->napi, skb);
370 * ixgbevf_rx_skb - Helper function to determine proper Rx method
371 * @q_vector: structure containing interrupt and ring information
372 * @skb: packet to send up
373 * @status: hardware indication of status of receive
374 * @rx_desc: rx descriptor
376 static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector,
377 struct sk_buff *skb, u8 status,
378 union ixgbe_adv_rx_desc *rx_desc)
380 #ifdef CONFIG_NET_RX_BUSY_POLL
381 skb_mark_napi_id(skb, &q_vector->napi);
383 if (ixgbevf_qv_busy_polling(q_vector)) {
384 netif_receive_skb(skb);
385 /* exit early if we busy polled */
388 #endif /* CONFIG_NET_RX_BUSY_POLL */
390 ixgbevf_receive_skb(q_vector, skb, status, rx_desc);
394 * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
395 * @ring: pointer to Rx descriptor ring structure
396 * @status_err: hardware indication of status of receive
397 * @skb: skb currently being received and modified
399 static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
400 u32 status_err, struct sk_buff *skb)
402 skb_checksum_none_assert(skb);
404 /* Rx csum disabled */
405 if (!(ring->netdev->features & NETIF_F_RXCSUM))
408 /* if IP and error */
409 if ((status_err & IXGBE_RXD_STAT_IPCS) &&
410 (status_err & IXGBE_RXDADV_ERR_IPE)) {
411 ring->rx_stats.csum_err++;
415 if (!(status_err & IXGBE_RXD_STAT_L4CS))
418 if (status_err & IXGBE_RXDADV_ERR_TCPE) {
419 ring->rx_stats.csum_err++;
423 /* It must be a TCP or UDP packet with a valid checksum */
424 skb->ip_summed = CHECKSUM_UNNECESSARY;
428 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
429 * @rx_ring: rx descriptor ring (for a specific queue) to setup buffers on
431 static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
434 union ixgbe_adv_rx_desc *rx_desc;
435 struct ixgbevf_rx_buffer *bi;
436 unsigned int i = rx_ring->next_to_use;
438 while (cleaned_count--) {
439 rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
440 bi = &rx_ring->rx_buffer_info[i];
445 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
446 rx_ring->rx_buf_len);
452 bi->dma = dma_map_single(rx_ring->dev, skb->data,
455 if (dma_mapping_error(rx_ring->dev, bi->dma)) {
458 dev_err(rx_ring->dev, "Rx DMA map failed\n");
462 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
465 if (i == rx_ring->count)
470 rx_ring->rx_stats.alloc_rx_buff_failed++;
471 if (rx_ring->next_to_use != i)
472 ixgbevf_release_rx_desc(rx_ring, i);
475 static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
478 struct ixgbe_hw *hw = &adapter->hw;
480 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask);
483 static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
484 struct ixgbevf_ring *rx_ring,
487 union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
488 struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer;
492 int cleaned_count = 0;
493 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
495 i = rx_ring->next_to_clean;
496 rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
497 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
498 rx_buffer_info = &rx_ring->rx_buffer_info[i];
500 while (staterr & IXGBE_RXD_STAT_DD) {
505 rmb(); /* read descriptor and rx_buffer_info after status DD */
506 len = le16_to_cpu(rx_desc->wb.upper.length);
507 skb = rx_buffer_info->skb;
508 prefetch(skb->data - NET_IP_ALIGN);
509 rx_buffer_info->skb = NULL;
511 if (rx_buffer_info->dma) {
512 dma_unmap_single(rx_ring->dev, rx_buffer_info->dma,
515 rx_buffer_info->dma = 0;
520 if (i == rx_ring->count)
523 next_rxd = IXGBEVF_RX_DESC(rx_ring, i);
527 next_buffer = &rx_ring->rx_buffer_info[i];
529 if (!(staterr & IXGBE_RXD_STAT_EOP)) {
530 skb->next = next_buffer->skb;
531 IXGBE_CB(skb->next)->prev = skb;
532 rx_ring->rx_stats.non_eop_descs++;
536 /* we should not be chaining buffers, if we did drop the skb */
537 if (IXGBE_CB(skb)->prev) {
539 struct sk_buff *this = skb;
540 skb = IXGBE_CB(skb)->prev;
546 /* ERR_MASK will only have valid bits if EOP set */
547 if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) {
548 dev_kfree_skb_irq(skb);
552 ixgbevf_rx_checksum(rx_ring, staterr, skb);
554 /* probably a little skewed due to removing CRC */
555 total_rx_bytes += skb->len;
558 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
560 /* Workaround hardware that can't do proper VEPA multicast
563 if ((skb->pkt_type == PACKET_BROADCAST ||
564 skb->pkt_type == PACKET_MULTICAST) &&
565 ether_addr_equal(rx_ring->netdev->dev_addr,
566 eth_hdr(skb)->h_source)) {
567 dev_kfree_skb_irq(skb);
571 ixgbevf_rx_skb(q_vector, skb, staterr, rx_desc);
574 rx_desc->wb.upper.status_error = 0;
576 /* return some buffers to hardware, one at a time is too slow */
577 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
578 ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count);
582 /* use prefetched values */
584 rx_buffer_info = &rx_ring->rx_buffer_info[i];
586 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
589 rx_ring->next_to_clean = i;
590 cleaned_count = ixgbevf_desc_unused(rx_ring);
593 ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count);
595 u64_stats_update_begin(&rx_ring->syncp);
596 rx_ring->stats.packets += total_rx_packets;
597 rx_ring->stats.bytes += total_rx_bytes;
598 u64_stats_update_end(&rx_ring->syncp);
599 q_vector->rx.total_packets += total_rx_packets;
600 q_vector->rx.total_bytes += total_rx_bytes;
602 return total_rx_packets;
606 * ixgbevf_poll - NAPI polling calback
607 * @napi: napi struct with our devices info in it
608 * @budget: amount of work driver is allowed to do this pass, in packets
610 * This function will clean more than one or more rings associated with a
613 static int ixgbevf_poll(struct napi_struct *napi, int budget)
615 struct ixgbevf_q_vector *q_vector =
616 container_of(napi, struct ixgbevf_q_vector, napi);
617 struct ixgbevf_adapter *adapter = q_vector->adapter;
618 struct ixgbevf_ring *ring;
620 bool clean_complete = true;
622 ixgbevf_for_each_ring(ring, q_vector->tx)
623 clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring);
625 #ifdef CONFIG_NET_RX_BUSY_POLL
626 if (!ixgbevf_qv_lock_napi(q_vector))
630 /* attempt to distribute budget to each queue fairly, but don't allow
631 * the budget to go below 1 because we'll exit polling */
632 if (q_vector->rx.count > 1)
633 per_ring_budget = max(budget/q_vector->rx.count, 1);
635 per_ring_budget = budget;
637 adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
638 ixgbevf_for_each_ring(ring, q_vector->rx)
639 clean_complete &= (ixgbevf_clean_rx_irq(q_vector, ring,
642 adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
644 #ifdef CONFIG_NET_RX_BUSY_POLL
645 ixgbevf_qv_unlock_napi(q_vector);
648 /* If all work not completed, return budget and keep polling */
651 /* all work done, exit the polling mode */
653 if (adapter->rx_itr_setting & 1)
654 ixgbevf_set_itr(q_vector);
655 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
656 !test_bit(__IXGBEVF_REMOVING, &adapter->state))
657 ixgbevf_irq_enable_queues(adapter,
658 1 << q_vector->v_idx);
664 * ixgbevf_write_eitr - write VTEITR register in hardware specific way
665 * @q_vector: structure containing interrupt and ring information
667 void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
669 struct ixgbevf_adapter *adapter = q_vector->adapter;
670 struct ixgbe_hw *hw = &adapter->hw;
671 int v_idx = q_vector->v_idx;
672 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
675 * set the WDIS bit to not clear the timer bits and cause an
676 * immediate assertion of the interrupt
678 itr_reg |= IXGBE_EITR_CNT_WDIS;
680 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
683 #ifdef CONFIG_NET_RX_BUSY_POLL
684 /* must be called with local_bh_disable()d */
685 static int ixgbevf_busy_poll_recv(struct napi_struct *napi)
687 struct ixgbevf_q_vector *q_vector =
688 container_of(napi, struct ixgbevf_q_vector, napi);
689 struct ixgbevf_adapter *adapter = q_vector->adapter;
690 struct ixgbevf_ring *ring;
693 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
694 return LL_FLUSH_FAILED;
696 if (!ixgbevf_qv_lock_poll(q_vector))
697 return LL_FLUSH_BUSY;
699 ixgbevf_for_each_ring(ring, q_vector->rx) {
700 found = ixgbevf_clean_rx_irq(q_vector, ring, 4);
701 #ifdef BP_EXTENDED_STATS
703 ring->stats.cleaned += found;
705 ring->stats.misses++;
711 ixgbevf_qv_unlock_poll(q_vector);
715 #endif /* CONFIG_NET_RX_BUSY_POLL */
718 * ixgbevf_configure_msix - Configure MSI-X hardware
719 * @adapter: board private structure
721 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
724 static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
726 struct ixgbevf_q_vector *q_vector;
727 int q_vectors, v_idx;
729 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
730 adapter->eims_enable_mask = 0;
733 * Populate the IVAR table and set the ITR values to the
734 * corresponding register.
736 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
737 struct ixgbevf_ring *ring;
738 q_vector = adapter->q_vector[v_idx];
740 ixgbevf_for_each_ring(ring, q_vector->rx)
741 ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx);
743 ixgbevf_for_each_ring(ring, q_vector->tx)
744 ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx);
746 if (q_vector->tx.ring && !q_vector->rx.ring) {
748 if (adapter->tx_itr_setting == 1)
749 q_vector->itr = IXGBE_10K_ITR;
751 q_vector->itr = adapter->tx_itr_setting;
753 /* rx or rx/tx vector */
754 if (adapter->rx_itr_setting == 1)
755 q_vector->itr = IXGBE_20K_ITR;
757 q_vector->itr = adapter->rx_itr_setting;
760 /* add q_vector eims value to global eims_enable_mask */
761 adapter->eims_enable_mask |= 1 << v_idx;
763 ixgbevf_write_eitr(q_vector);
766 ixgbevf_set_ivar(adapter, -1, 1, v_idx);
767 /* setup eims_other and add value to global eims_enable_mask */
768 adapter->eims_other = 1 << v_idx;
769 adapter->eims_enable_mask |= adapter->eims_other;
776 latency_invalid = 255
780 * ixgbevf_update_itr - update the dynamic ITR value based on statistics
781 * @q_vector: structure containing interrupt and ring information
782 * @ring_container: structure containing ring performance data
784 * Stores a new ITR value based on packets and byte
785 * counts during the last interrupt. The advantage of per interrupt
786 * computation is faster updates and more accurate ITR for the current
787 * traffic pattern. Constants in this function were computed
788 * based on theoretical maximum wire speed and thresholds were set based
789 * on testing data as well as attempting to minimize response time
790 * while increasing bulk throughput.
792 static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector,
793 struct ixgbevf_ring_container *ring_container)
795 int bytes = ring_container->total_bytes;
796 int packets = ring_container->total_packets;
799 u8 itr_setting = ring_container->itr;
804 /* simple throttlerate management
805 * 0-20MB/s lowest (100000 ints/s)
806 * 20-100MB/s low (20000 ints/s)
807 * 100-1249MB/s bulk (8000 ints/s)
809 /* what was last interrupt timeslice? */
810 timepassed_us = q_vector->itr >> 2;
811 bytes_perint = bytes / timepassed_us; /* bytes/usec */
813 switch (itr_setting) {
815 if (bytes_perint > 10)
816 itr_setting = low_latency;
819 if (bytes_perint > 20)
820 itr_setting = bulk_latency;
821 else if (bytes_perint <= 10)
822 itr_setting = lowest_latency;
825 if (bytes_perint <= 20)
826 itr_setting = low_latency;
830 /* clear work counters since we have the values we need */
831 ring_container->total_bytes = 0;
832 ring_container->total_packets = 0;
834 /* write updated itr to ring container */
835 ring_container->itr = itr_setting;
838 static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
840 u32 new_itr = q_vector->itr;
843 ixgbevf_update_itr(q_vector, &q_vector->tx);
844 ixgbevf_update_itr(q_vector, &q_vector->rx);
846 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
848 switch (current_itr) {
849 /* counts and packets in update_itr are dependent on these numbers */
851 new_itr = IXGBE_100K_ITR;
854 new_itr = IXGBE_20K_ITR;
858 new_itr = IXGBE_8K_ITR;
862 if (new_itr != q_vector->itr) {
863 /* do an exponential smoothing */
864 new_itr = (10 * new_itr * q_vector->itr) /
865 ((9 * new_itr) + q_vector->itr);
867 /* save the algorithm value here */
868 q_vector->itr = new_itr;
870 ixgbevf_write_eitr(q_vector);
874 static irqreturn_t ixgbevf_msix_other(int irq, void *data)
876 struct ixgbevf_adapter *adapter = data;
877 struct ixgbe_hw *hw = &adapter->hw;
879 hw->mac.get_link_status = 1;
881 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
882 !test_bit(__IXGBEVF_REMOVING, &adapter->state))
883 mod_timer(&adapter->watchdog_timer, jiffies);
885 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
891 * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues)
893 * @data: pointer to our q_vector struct for this interrupt vector
895 static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data)
897 struct ixgbevf_q_vector *q_vector = data;
899 /* EIAM disabled interrupts (on this vector) for us */
900 if (q_vector->rx.ring || q_vector->tx.ring)
901 napi_schedule(&q_vector->napi);
906 static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
909 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
911 a->rx_ring[r_idx]->next = q_vector->rx.ring;
912 q_vector->rx.ring = a->rx_ring[r_idx];
913 q_vector->rx.count++;
916 static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
919 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
921 a->tx_ring[t_idx]->next = q_vector->tx.ring;
922 q_vector->tx.ring = a->tx_ring[t_idx];
923 q_vector->tx.count++;
927 * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors
928 * @adapter: board private structure to initialize
930 * This function maps descriptor rings to the queue-specific vectors
931 * we were allotted through the MSI-X enabling code. Ideally, we'd have
932 * one vector per ring/queue, but on a constrained vector budget, we
933 * group the rings as "efficiently" as possible. You would add new
934 * mapping configurations in here.
936 static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter)
940 int rxr_idx = 0, txr_idx = 0;
941 int rxr_remaining = adapter->num_rx_queues;
942 int txr_remaining = adapter->num_tx_queues;
947 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
950 * The ideal configuration...
951 * We have enough vectors to map one per queue.
953 if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
954 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
955 map_vector_to_rxq(adapter, v_start, rxr_idx);
957 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
958 map_vector_to_txq(adapter, v_start, txr_idx);
963 * If we don't have enough vectors for a 1-to-1
964 * mapping, we'll have to group them so there are
965 * multiple queues per vector.
967 /* Re-adjusting *qpv takes care of the remainder. */
968 for (i = v_start; i < q_vectors; i++) {
969 rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
970 for (j = 0; j < rqpv; j++) {
971 map_vector_to_rxq(adapter, i, rxr_idx);
976 for (i = v_start; i < q_vectors; i++) {
977 tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
978 for (j = 0; j < tqpv; j++) {
979 map_vector_to_txq(adapter, i, txr_idx);
990 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
991 * @adapter: board private structure
993 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
994 * interrupts from the kernel.
996 static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
998 struct net_device *netdev = adapter->netdev;
999 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1003 for (vector = 0; vector < q_vectors; vector++) {
1004 struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector];
1005 struct msix_entry *entry = &adapter->msix_entries[vector];
1007 if (q_vector->tx.ring && q_vector->rx.ring) {
1008 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1009 "%s-%s-%d", netdev->name, "TxRx", ri++);
1011 } else if (q_vector->rx.ring) {
1012 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1013 "%s-%s-%d", netdev->name, "rx", ri++);
1014 } else if (q_vector->tx.ring) {
1015 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
1016 "%s-%s-%d", netdev->name, "tx", ti++);
1018 /* skip this unused q_vector */
1021 err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0,
1022 q_vector->name, q_vector);
1024 hw_dbg(&adapter->hw,
1025 "request_irq failed for MSIX interrupt "
1026 "Error: %d\n", err);
1027 goto free_queue_irqs;
1031 err = request_irq(adapter->msix_entries[vector].vector,
1032 &ixgbevf_msix_other, 0, netdev->name, adapter);
1034 hw_dbg(&adapter->hw,
1035 "request_irq for msix_other failed: %d\n", err);
1036 goto free_queue_irqs;
1044 free_irq(adapter->msix_entries[vector].vector,
1045 adapter->q_vector[vector]);
1047 /* This failure is non-recoverable - it indicates the system is
1048 * out of MSIX vector resources and the VF driver cannot run
1049 * without them. Set the number of msix vectors to zero
1050 * indicating that not enough can be allocated. The error
1051 * will be returned to the user indicating device open failed.
1052 * Any further attempts to force the driver to open will also
1053 * fail. The only way to recover is to unload the driver and
1054 * reload it again. If the system has recovered some MSIX
1055 * vectors then it may succeed.
1057 adapter->num_msix_vectors = 0;
1061 static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)
1063 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1065 for (i = 0; i < q_vectors; i++) {
1066 struct ixgbevf_q_vector *q_vector = adapter->q_vector[i];
1067 q_vector->rx.ring = NULL;
1068 q_vector->tx.ring = NULL;
1069 q_vector->rx.count = 0;
1070 q_vector->tx.count = 0;
1075 * ixgbevf_request_irq - initialize interrupts
1076 * @adapter: board private structure
1078 * Attempts to configure interrupts using the best available
1079 * capabilities of the hardware and kernel.
1081 static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
1085 err = ixgbevf_request_msix_irqs(adapter);
1088 hw_dbg(&adapter->hw,
1089 "request_irq failed, Error %d\n", err);
1094 static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
1098 q_vectors = adapter->num_msix_vectors;
1101 free_irq(adapter->msix_entries[i].vector, adapter);
1104 for (; i >= 0; i--) {
1105 /* free only the irqs that were actually requested */
1106 if (!adapter->q_vector[i]->rx.ring &&
1107 !adapter->q_vector[i]->tx.ring)
1110 free_irq(adapter->msix_entries[i].vector,
1111 adapter->q_vector[i]);
1114 ixgbevf_reset_q_vectors(adapter);
1118 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
1119 * @adapter: board private structure
1121 static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
1123 struct ixgbe_hw *hw = &adapter->hw;
1126 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0);
1127 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
1128 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0);
1130 IXGBE_WRITE_FLUSH(hw);
1132 for (i = 0; i < adapter->num_msix_vectors; i++)
1133 synchronize_irq(adapter->msix_entries[i].vector);
1137 * ixgbevf_irq_enable - Enable default interrupt generation settings
1138 * @adapter: board private structure
1140 static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
1142 struct ixgbe_hw *hw = &adapter->hw;
1144 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask);
1145 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask);
1146 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask);
1150 * ixgbevf_configure_tx_ring - Configure 82599 VF Tx ring after Reset
1151 * @adapter: board private structure
1152 * @ring: structure containing ring specific data
1154 * Configure the Tx descriptor ring after a reset.
1156 static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter,
1157 struct ixgbevf_ring *ring)
1159 struct ixgbe_hw *hw = &adapter->hw;
1160 u64 tdba = ring->dma;
1162 u32 txdctl = IXGBE_TXDCTL_ENABLE;
1163 u8 reg_idx = ring->reg_idx;
1165 /* disable queue to avoid issues while updating state */
1166 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
1167 IXGBE_WRITE_FLUSH(hw);
1169 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(reg_idx), tdba & DMA_BIT_MASK(32));
1170 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(reg_idx), tdba >> 32);
1171 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(reg_idx),
1172 ring->count * sizeof(union ixgbe_adv_tx_desc));
1174 /* disable head writeback */
1175 IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAH(reg_idx), 0);
1176 IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAL(reg_idx), 0);
1178 /* enable relaxed ordering */
1179 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(reg_idx),
1180 (IXGBE_DCA_TXCTRL_DESC_RRO_EN |
1181 IXGBE_DCA_TXCTRL_DATA_RRO_EN));
1183 /* reset head and tail pointers */
1184 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(reg_idx), 0);
1185 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(reg_idx), 0);
1186 ring->tail = adapter->io_addr + IXGBE_VFTDT(reg_idx);
1188 /* reset ntu and ntc to place SW in sync with hardwdare */
1189 ring->next_to_clean = 0;
1190 ring->next_to_use = 0;
1192 /* In order to avoid issues WTHRESH + PTHRESH should always be equal
1193 * to or less than the number of on chip descriptors, which is
1196 txdctl |= (8 << 16); /* WTHRESH = 8 */
1198 /* Setting PTHRESH to 32 both improves performance */
1199 txdctl |= (1 << 8) | /* HTHRESH = 1 */
1200 32; /* PTHRESH = 32 */
1202 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl);
1204 /* poll to verify queue is enabled */
1206 usleep_range(1000, 2000);
1207 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(reg_idx));
1208 } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
1210 pr_err("Could not enable Tx Queue %d\n", reg_idx);
1214 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
1215 * @adapter: board private structure
1217 * Configure the Tx unit of the MAC after a reset.
1219 static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
1223 /* Setup the HW Tx Head and Tail descriptor pointers */
1224 for (i = 0; i < adapter->num_tx_queues; i++)
1225 ixgbevf_configure_tx_ring(adapter, adapter->tx_ring[i]);
1228 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1230 static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
1232 struct ixgbevf_ring *rx_ring;
1233 struct ixgbe_hw *hw = &adapter->hw;
1236 rx_ring = adapter->rx_ring[index];
1238 srrctl = IXGBE_SRRCTL_DROP_EN;
1240 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1242 srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >>
1243 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1245 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
1248 static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter)
1250 struct ixgbe_hw *hw = &adapter->hw;
1252 /* PSRTYPE must be initialized in 82599 */
1253 u32 psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
1254 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR |
1255 IXGBE_PSRTYPE_L2HDR;
1257 if (adapter->num_rx_queues > 1)
1260 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1263 static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter)
1265 struct ixgbe_hw *hw = &adapter->hw;
1266 struct net_device *netdev = adapter->netdev;
1267 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1271 /* notify the PF of our intent to use this size of frame */
1272 ixgbevf_rlpml_set_vf(hw, max_frame);
1274 /* PF will allow an extra 4 bytes past for vlan tagged frames */
1275 max_frame += VLAN_HLEN;
1278 * Allocate buffer sizes that fit well into 32K and
1279 * take into account max frame size of 9.5K
1281 if ((hw->mac.type == ixgbe_mac_X540_vf) &&
1282 (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE))
1283 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1284 else if (max_frame <= IXGBEVF_RXBUFFER_2K)
1285 rx_buf_len = IXGBEVF_RXBUFFER_2K;
1286 else if (max_frame <= IXGBEVF_RXBUFFER_4K)
1287 rx_buf_len = IXGBEVF_RXBUFFER_4K;
1288 else if (max_frame <= IXGBEVF_RXBUFFER_8K)
1289 rx_buf_len = IXGBEVF_RXBUFFER_8K;
1291 rx_buf_len = IXGBEVF_RXBUFFER_10K;
1293 for (i = 0; i < adapter->num_rx_queues; i++)
1294 adapter->rx_ring[i]->rx_buf_len = rx_buf_len;
1297 #define IXGBEVF_MAX_RX_DESC_POLL 10
1298 static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
1299 struct ixgbevf_ring *ring)
1301 struct ixgbe_hw *hw = &adapter->hw;
1302 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1304 u8 reg_idx = ring->reg_idx;
1306 if (IXGBE_REMOVED(hw->hw_addr))
1308 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1309 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1311 /* write value back with RXDCTL.ENABLE bit cleared */
1312 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1314 /* the hardware may take up to 100us to really disable the rx queue */
1317 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1318 } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
1321 pr_err("RXDCTL.ENABLE queue %d not cleared while polling\n",
1325 static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
1326 struct ixgbevf_ring *ring)
1328 struct ixgbe_hw *hw = &adapter->hw;
1329 int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
1331 u8 reg_idx = ring->reg_idx;
1333 if (IXGBE_REMOVED(hw->hw_addr))
1336 usleep_range(1000, 2000);
1337 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1338 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
1341 pr_err("RXDCTL.ENABLE queue %d not set while polling\n",
1345 static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
1346 struct ixgbevf_ring *ring)
1348 struct ixgbe_hw *hw = &adapter->hw;
1349 u64 rdba = ring->dma;
1351 u8 reg_idx = ring->reg_idx;
1353 /* disable queue to avoid issues while updating state */
1354 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1355 ixgbevf_disable_rx_queue(adapter, ring);
1357 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(reg_idx), rdba & DMA_BIT_MASK(32));
1358 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(reg_idx), rdba >> 32);
1359 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(reg_idx),
1360 ring->count * sizeof(union ixgbe_adv_rx_desc));
1362 /* enable relaxed ordering */
1363 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx),
1364 IXGBE_DCA_RXCTRL_DESC_RRO_EN);
1366 /* reset head and tail pointers */
1367 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(reg_idx), 0);
1368 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(reg_idx), 0);
1369 ring->tail = adapter->io_addr + IXGBE_VFRDT(reg_idx);
1371 /* reset ntu and ntc to place SW in sync with hardwdare */
1372 ring->next_to_clean = 0;
1373 ring->next_to_use = 0;
1375 ixgbevf_configure_srrctl(adapter, reg_idx);
1377 /* prevent DMA from exceeding buffer space available */
1378 rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
1379 rxdctl |= ring->rx_buf_len | IXGBE_RXDCTL_RLPML_EN;
1380 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1381 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
1383 ixgbevf_rx_desc_queue_enable(adapter, ring);
1384 ixgbevf_alloc_rx_buffers(ring, ixgbevf_desc_unused(ring));
1388 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
1389 * @adapter: board private structure
1391 * Configure the Rx unit of the MAC after a reset.
1393 static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
1397 ixgbevf_setup_psrtype(adapter);
1399 /* set_rx_buffer_len must be called before ring initialization */
1400 ixgbevf_set_rx_buffer_len(adapter);
1402 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1403 * the Base and Length of the Rx Descriptor Ring */
1404 for (i = 0; i < adapter->num_rx_queues; i++)
1405 ixgbevf_configure_rx_ring(adapter, adapter->rx_ring[i]);
1408 static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev,
1409 __be16 proto, u16 vid)
1411 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1412 struct ixgbe_hw *hw = &adapter->hw;
1415 spin_lock_bh(&adapter->mbx_lock);
1417 /* add VID to filter table */
1418 err = hw->mac.ops.set_vfta(hw, vid, 0, true);
1420 spin_unlock_bh(&adapter->mbx_lock);
1422 /* translate error return types so error makes sense */
1423 if (err == IXGBE_ERR_MBX)
1426 if (err == IXGBE_ERR_INVALID_ARGUMENT)
1429 set_bit(vid, adapter->active_vlans);
1434 static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev,
1435 __be16 proto, u16 vid)
1437 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1438 struct ixgbe_hw *hw = &adapter->hw;
1439 int err = -EOPNOTSUPP;
1441 spin_lock_bh(&adapter->mbx_lock);
1443 /* remove VID from filter table */
1444 err = hw->mac.ops.set_vfta(hw, vid, 0, false);
1446 spin_unlock_bh(&adapter->mbx_lock);
1448 clear_bit(vid, adapter->active_vlans);
1453 static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
1457 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
1458 ixgbevf_vlan_rx_add_vid(adapter->netdev,
1459 htons(ETH_P_8021Q), vid);
1462 static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
1464 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1465 struct ixgbe_hw *hw = &adapter->hw;
1468 if ((netdev_uc_count(netdev)) > 10) {
1469 pr_err("Too many unicast filters - No Space\n");
1473 if (!netdev_uc_empty(netdev)) {
1474 struct netdev_hw_addr *ha;
1475 netdev_for_each_uc_addr(ha, netdev) {
1476 hw->mac.ops.set_uc_addr(hw, ++count, ha->addr);
1481 * If the list is empty then send message to PF driver to
1482 * clear all macvlans on this VF.
1484 hw->mac.ops.set_uc_addr(hw, 0, NULL);
1491 * ixgbevf_set_rx_mode - Multicast and unicast set
1492 * @netdev: network interface device structure
1494 * The set_rx_method entry point is called whenever the multicast address
1495 * list, unicast address list or the network interface flags are updated.
1496 * This routine is responsible for configuring the hardware for proper
1497 * multicast mode and configuring requested unicast filters.
1499 static void ixgbevf_set_rx_mode(struct net_device *netdev)
1501 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1502 struct ixgbe_hw *hw = &adapter->hw;
1504 spin_lock_bh(&adapter->mbx_lock);
1506 /* reprogram multicast list */
1507 hw->mac.ops.update_mc_addr_list(hw, netdev);
1509 ixgbevf_write_uc_addr_list(netdev);
1511 spin_unlock_bh(&adapter->mbx_lock);
1514 static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
1517 struct ixgbevf_q_vector *q_vector;
1518 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1520 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1521 q_vector = adapter->q_vector[q_idx];
1522 #ifdef CONFIG_NET_RX_BUSY_POLL
1523 ixgbevf_qv_init_lock(adapter->q_vector[q_idx]);
1525 napi_enable(&q_vector->napi);
1529 static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
1532 struct ixgbevf_q_vector *q_vector;
1533 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1535 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1536 q_vector = adapter->q_vector[q_idx];
1537 napi_disable(&q_vector->napi);
1538 #ifdef CONFIG_NET_RX_BUSY_POLL
1539 while (!ixgbevf_qv_disable(adapter->q_vector[q_idx])) {
1540 pr_info("QV %d locked\n", q_idx);
1541 usleep_range(1000, 20000);
1543 #endif /* CONFIG_NET_RX_BUSY_POLL */
1547 static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
1549 struct ixgbe_hw *hw = &adapter->hw;
1550 unsigned int def_q = 0;
1551 unsigned int num_tcs = 0;
1552 unsigned int num_rx_queues = 1;
1555 spin_lock_bh(&adapter->mbx_lock);
1557 /* fetch queue configuration from the PF */
1558 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
1560 spin_unlock_bh(&adapter->mbx_lock);
1566 /* update default Tx ring register index */
1567 adapter->tx_ring[0]->reg_idx = def_q;
1569 /* we need as many queues as traffic classes */
1570 num_rx_queues = num_tcs;
1573 /* if we have a bad config abort request queue reset */
1574 if (adapter->num_rx_queues != num_rx_queues) {
1575 /* force mailbox timeout to prevent further messages */
1576 hw->mbx.timeout = 0;
1578 /* wait for watchdog to come around and bail us out */
1579 adapter->flags |= IXGBEVF_FLAG_QUEUE_RESET_REQUESTED;
1585 static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
1587 ixgbevf_configure_dcb(adapter);
1589 ixgbevf_set_rx_mode(adapter->netdev);
1591 ixgbevf_restore_vlan(adapter);
1593 ixgbevf_configure_tx(adapter);
1594 ixgbevf_configure_rx(adapter);
1597 static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
1599 /* Only save pre-reset stats if there are some */
1600 if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
1601 adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc -
1602 adapter->stats.base_vfgprc;
1603 adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc -
1604 adapter->stats.base_vfgptc;
1605 adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc -
1606 adapter->stats.base_vfgorc;
1607 adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc -
1608 adapter->stats.base_vfgotc;
1609 adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc -
1610 adapter->stats.base_vfmprc;
1614 static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
1616 struct ixgbe_hw *hw = &adapter->hw;
1618 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
1619 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
1620 adapter->stats.last_vfgorc |=
1621 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
1622 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
1623 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
1624 adapter->stats.last_vfgotc |=
1625 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
1626 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
1628 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
1629 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
1630 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
1631 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
1632 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
1635 static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
1637 struct ixgbe_hw *hw = &adapter->hw;
1638 int api[] = { ixgbe_mbox_api_11,
1640 ixgbe_mbox_api_unknown };
1641 int err = 0, idx = 0;
1643 spin_lock_bh(&adapter->mbx_lock);
1645 while (api[idx] != ixgbe_mbox_api_unknown) {
1646 err = ixgbevf_negotiate_api_version(hw, api[idx]);
1652 spin_unlock_bh(&adapter->mbx_lock);
1655 static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1657 struct net_device *netdev = adapter->netdev;
1658 struct ixgbe_hw *hw = &adapter->hw;
1660 ixgbevf_configure_msix(adapter);
1662 spin_lock_bh(&adapter->mbx_lock);
1664 if (is_valid_ether_addr(hw->mac.addr))
1665 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
1667 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
1669 spin_unlock_bh(&adapter->mbx_lock);
1671 smp_mb__before_atomic();
1672 clear_bit(__IXGBEVF_DOWN, &adapter->state);
1673 ixgbevf_napi_enable_all(adapter);
1675 /* enable transmits */
1676 netif_tx_start_all_queues(netdev);
1678 ixgbevf_save_reset_stats(adapter);
1679 ixgbevf_init_last_counter_stats(adapter);
1681 hw->mac.get_link_status = 1;
1682 mod_timer(&adapter->watchdog_timer, jiffies);
1685 void ixgbevf_up(struct ixgbevf_adapter *adapter)
1687 struct ixgbe_hw *hw = &adapter->hw;
1689 ixgbevf_configure(adapter);
1691 ixgbevf_up_complete(adapter);
1693 /* clear any pending interrupts, may auto mask */
1694 IXGBE_READ_REG(hw, IXGBE_VTEICR);
1696 ixgbevf_irq_enable(adapter);
1700 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
1701 * @rx_ring: ring to free buffers from
1703 static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring)
1708 if (!rx_ring->rx_buffer_info)
1711 /* Free all the Rx ring sk_buffs */
1712 for (i = 0; i < rx_ring->count; i++) {
1713 struct ixgbevf_rx_buffer *rx_buffer_info;
1715 rx_buffer_info = &rx_ring->rx_buffer_info[i];
1716 if (rx_buffer_info->dma) {
1717 dma_unmap_single(rx_ring->dev, rx_buffer_info->dma,
1718 rx_ring->rx_buf_len,
1720 rx_buffer_info->dma = 0;
1722 if (rx_buffer_info->skb) {
1723 struct sk_buff *skb = rx_buffer_info->skb;
1724 rx_buffer_info->skb = NULL;
1726 struct sk_buff *this = skb;
1727 skb = IXGBE_CB(skb)->prev;
1728 dev_kfree_skb(this);
1733 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
1734 memset(rx_ring->rx_buffer_info, 0, size);
1736 /* Zero out the descriptor ring */
1737 memset(rx_ring->desc, 0, rx_ring->size);
1741 * ixgbevf_clean_tx_ring - Free Tx Buffers
1742 * @tx_ring: ring to be cleaned
1744 static void ixgbevf_clean_tx_ring(struct ixgbevf_ring *tx_ring)
1746 struct ixgbevf_tx_buffer *tx_buffer_info;
1750 if (!tx_ring->tx_buffer_info)
1753 /* Free all the Tx ring sk_buffs */
1754 for (i = 0; i < tx_ring->count; i++) {
1755 tx_buffer_info = &tx_ring->tx_buffer_info[i];
1756 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
1759 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
1760 memset(tx_ring->tx_buffer_info, 0, size);
1762 memset(tx_ring->desc, 0, tx_ring->size);
1766 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
1767 * @adapter: board private structure
1769 static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
1773 for (i = 0; i < adapter->num_rx_queues; i++)
1774 ixgbevf_clean_rx_ring(adapter->rx_ring[i]);
1778 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
1779 * @adapter: board private structure
1781 static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
1785 for (i = 0; i < adapter->num_tx_queues; i++)
1786 ixgbevf_clean_tx_ring(adapter->tx_ring[i]);
1789 void ixgbevf_down(struct ixgbevf_adapter *adapter)
1791 struct net_device *netdev = adapter->netdev;
1792 struct ixgbe_hw *hw = &adapter->hw;
1795 /* signal that we are down to the interrupt handler */
1796 if (test_and_set_bit(__IXGBEVF_DOWN, &adapter->state))
1797 return; /* do nothing if already down */
1799 /* disable all enabled rx queues */
1800 for (i = 0; i < adapter->num_rx_queues; i++)
1801 ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]);
1803 netif_tx_disable(netdev);
1807 netif_tx_stop_all_queues(netdev);
1809 ixgbevf_irq_disable(adapter);
1811 ixgbevf_napi_disable_all(adapter);
1813 del_timer_sync(&adapter->watchdog_timer);
1814 /* can't call flush scheduled work here because it can deadlock
1815 * if linkwatch_event tries to acquire the rtnl_lock which we are
1817 while (adapter->flags & IXGBE_FLAG_IN_WATCHDOG_TASK)
1820 /* disable transmits in the hardware now that interrupts are off */
1821 for (i = 0; i < adapter->num_tx_queues; i++) {
1822 u8 reg_idx = adapter->tx_ring[i]->reg_idx;
1824 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx),
1825 IXGBE_TXDCTL_SWFLSH);
1828 netif_carrier_off(netdev);
1830 if (!pci_channel_offline(adapter->pdev))
1831 ixgbevf_reset(adapter);
1833 ixgbevf_clean_all_tx_rings(adapter);
1834 ixgbevf_clean_all_rx_rings(adapter);
1837 void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
1839 WARN_ON(in_interrupt());
1841 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
1844 ixgbevf_down(adapter);
1845 ixgbevf_up(adapter);
1847 clear_bit(__IXGBEVF_RESETTING, &adapter->state);
1850 void ixgbevf_reset(struct ixgbevf_adapter *adapter)
1852 struct ixgbe_hw *hw = &adapter->hw;
1853 struct net_device *netdev = adapter->netdev;
1855 if (hw->mac.ops.reset_hw(hw)) {
1856 hw_dbg(hw, "PF still resetting\n");
1858 hw->mac.ops.init_hw(hw);
1859 ixgbevf_negotiate_api(adapter);
1862 if (is_valid_ether_addr(adapter->hw.mac.addr)) {
1863 memcpy(netdev->dev_addr, adapter->hw.mac.addr,
1865 memcpy(netdev->perm_addr, adapter->hw.mac.addr,
1870 static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
1873 int vector_threshold;
1875 /* We'll want at least 2 (vector_threshold):
1876 * 1) TxQ[0] + RxQ[0] handler
1877 * 2) Other (Link Status Change, etc.)
1879 vector_threshold = MIN_MSIX_COUNT;
1881 /* The more we get, the more we will assign to Tx/Rx Cleanup
1882 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1883 * Right now, we simply care about how many we'll get; we'll
1884 * set them up later while requesting irq's.
1886 vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
1887 vector_threshold, vectors);
1890 dev_err(&adapter->pdev->dev,
1891 "Unable to allocate MSI-X interrupts\n");
1892 kfree(adapter->msix_entries);
1893 adapter->msix_entries = NULL;
1897 /* Adjust for only the vectors we'll use, which is minimum
1898 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
1899 * vectors we were allocated.
1901 adapter->num_msix_vectors = vectors;
1907 * ixgbevf_set_num_queues - Allocate queues for device, feature dependent
1908 * @adapter: board private structure to initialize
1910 * This is the top level queue allocation routine. The order here is very
1911 * important, starting with the "most" number of features turned on at once,
1912 * and ending with the smallest set of features. This way large combinations
1913 * can be allocated if they're turned on, and smaller combinations are the
1914 * fallthrough conditions.
1917 static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
1919 struct ixgbe_hw *hw = &adapter->hw;
1920 unsigned int def_q = 0;
1921 unsigned int num_tcs = 0;
1924 /* Start with base case */
1925 adapter->num_rx_queues = 1;
1926 adapter->num_tx_queues = 1;
1928 spin_lock_bh(&adapter->mbx_lock);
1930 /* fetch queue configuration from the PF */
1931 err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
1933 spin_unlock_bh(&adapter->mbx_lock);
1938 /* we need as many queues as traffic classes */
1940 adapter->num_rx_queues = num_tcs;
1944 * ixgbevf_alloc_queues - Allocate memory for all rings
1945 * @adapter: board private structure to initialize
1947 * We allocate one ring per queue at run-time since we don't know the
1948 * number of queues at compile-time. The polling_netdev array is
1949 * intended for Multiqueue, but should work fine with a single queue.
1951 static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
1953 struct ixgbevf_ring *ring;
1956 for (; tx < adapter->num_tx_queues; tx++) {
1957 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
1959 goto err_allocation;
1961 ring->dev = &adapter->pdev->dev;
1962 ring->netdev = adapter->netdev;
1963 ring->count = adapter->tx_ring_count;
1964 ring->queue_index = tx;
1967 adapter->tx_ring[tx] = ring;
1970 for (; rx < adapter->num_rx_queues; rx++) {
1971 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
1973 goto err_allocation;
1975 ring->dev = &adapter->pdev->dev;
1976 ring->netdev = adapter->netdev;
1978 ring->count = adapter->rx_ring_count;
1979 ring->queue_index = rx;
1982 adapter->rx_ring[rx] = ring;
1989 kfree(adapter->tx_ring[--tx]);
1990 adapter->tx_ring[tx] = NULL;
1994 kfree(adapter->rx_ring[--rx]);
1995 adapter->rx_ring[rx] = NULL;
2001 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
2002 * @adapter: board private structure to initialize
2004 * Attempt to configure the interrupts using the best available
2005 * capabilities of the hardware and the kernel.
2007 static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
2009 struct net_device *netdev = adapter->netdev;
2011 int vector, v_budget;
2014 * It's easy to be greedy for MSI-X vectors, but it really
2015 * doesn't do us much good if we have a lot more vectors
2016 * than CPU's. So let's be conservative and only ask for
2017 * (roughly) the same number of vectors as there are CPU's.
2018 * The default is to use pairs of vectors.
2020 v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
2021 v_budget = min_t(int, v_budget, num_online_cpus());
2022 v_budget += NON_Q_VECTORS;
2024 /* A failure in MSI-X entry allocation isn't fatal, but it does
2025 * mean we disable MSI-X capabilities of the adapter. */
2026 adapter->msix_entries = kcalloc(v_budget,
2027 sizeof(struct msix_entry), GFP_KERNEL);
2028 if (!adapter->msix_entries) {
2033 for (vector = 0; vector < v_budget; vector++)
2034 adapter->msix_entries[vector].entry = vector;
2036 err = ixgbevf_acquire_msix_vectors(adapter, v_budget);
2040 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
2044 err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
2051 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
2052 * @adapter: board private structure to initialize
2054 * We allocate one q_vector per queue interrupt. If allocation fails we
2057 static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
2059 int q_idx, num_q_vectors;
2060 struct ixgbevf_q_vector *q_vector;
2062 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2064 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
2065 q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL);
2068 q_vector->adapter = adapter;
2069 q_vector->v_idx = q_idx;
2070 netif_napi_add(adapter->netdev, &q_vector->napi,
2072 #ifdef CONFIG_NET_RX_BUSY_POLL
2073 napi_hash_add(&q_vector->napi);
2075 adapter->q_vector[q_idx] = q_vector;
2083 q_vector = adapter->q_vector[q_idx];
2084 #ifdef CONFIG_NET_RX_BUSY_POLL
2085 napi_hash_del(&q_vector->napi);
2087 netif_napi_del(&q_vector->napi);
2089 adapter->q_vector[q_idx] = NULL;
2095 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
2096 * @adapter: board private structure to initialize
2098 * This function frees the memory allocated to the q_vectors. In addition if
2099 * NAPI is enabled it will delete any references to the NAPI struct prior
2100 * to freeing the q_vector.
2102 static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
2104 int q_idx, num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2106 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
2107 struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
2109 adapter->q_vector[q_idx] = NULL;
2110 #ifdef CONFIG_NET_RX_BUSY_POLL
2111 napi_hash_del(&q_vector->napi);
2113 netif_napi_del(&q_vector->napi);
2119 * ixgbevf_reset_interrupt_capability - Reset MSIX setup
2120 * @adapter: board private structure
2123 static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter)
2125 pci_disable_msix(adapter->pdev);
2126 kfree(adapter->msix_entries);
2127 adapter->msix_entries = NULL;
2131 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
2132 * @adapter: board private structure to initialize
2135 static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
2139 /* Number of supported queues */
2140 ixgbevf_set_num_queues(adapter);
2142 err = ixgbevf_set_interrupt_capability(adapter);
2144 hw_dbg(&adapter->hw,
2145 "Unable to setup interrupt capabilities\n");
2146 goto err_set_interrupt;
2149 err = ixgbevf_alloc_q_vectors(adapter);
2151 hw_dbg(&adapter->hw, "Unable to allocate memory for queue "
2153 goto err_alloc_q_vectors;
2156 err = ixgbevf_alloc_queues(adapter);
2158 pr_err("Unable to allocate memory for queues\n");
2159 goto err_alloc_queues;
2162 hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, "
2163 "Tx Queue count = %u\n",
2164 (adapter->num_rx_queues > 1) ? "Enabled" :
2165 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
2167 set_bit(__IXGBEVF_DOWN, &adapter->state);
2171 ixgbevf_free_q_vectors(adapter);
2172 err_alloc_q_vectors:
2173 ixgbevf_reset_interrupt_capability(adapter);
2179 * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings
2180 * @adapter: board private structure to clear interrupt scheme on
2182 * We go through and clear interrupt specific resources and reset the structure
2183 * to pre-load conditions
2185 static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
2189 for (i = 0; i < adapter->num_tx_queues; i++) {
2190 kfree(adapter->tx_ring[i]);
2191 adapter->tx_ring[i] = NULL;
2193 for (i = 0; i < adapter->num_rx_queues; i++) {
2194 kfree(adapter->rx_ring[i]);
2195 adapter->rx_ring[i] = NULL;
2198 adapter->num_tx_queues = 0;
2199 adapter->num_rx_queues = 0;
2201 ixgbevf_free_q_vectors(adapter);
2202 ixgbevf_reset_interrupt_capability(adapter);
2206 * ixgbevf_sw_init - Initialize general software structures
2207 * (struct ixgbevf_adapter)
2208 * @adapter: board private structure to initialize
2210 * ixgbevf_sw_init initializes the Adapter private data structure.
2211 * Fields are initialized based on PCI device information and
2212 * OS network device settings (MTU size).
2214 static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
2216 struct ixgbe_hw *hw = &adapter->hw;
2217 struct pci_dev *pdev = adapter->pdev;
2218 struct net_device *netdev = adapter->netdev;
2221 /* PCI config space info */
2223 hw->vendor_id = pdev->vendor;
2224 hw->device_id = pdev->device;
2225 hw->revision_id = pdev->revision;
2226 hw->subsystem_vendor_id = pdev->subsystem_vendor;
2227 hw->subsystem_device_id = pdev->subsystem_device;
2229 hw->mbx.ops.init_params(hw);
2231 /* assume legacy case in which PF would only give VF 2 queues */
2232 hw->mac.max_tx_queues = 2;
2233 hw->mac.max_rx_queues = 2;
2235 /* lock to protect mailbox accesses */
2236 spin_lock_init(&adapter->mbx_lock);
2238 err = hw->mac.ops.reset_hw(hw);
2240 dev_info(&pdev->dev,
2241 "PF still in reset state. Is the PF interface up?\n");
2243 err = hw->mac.ops.init_hw(hw);
2245 pr_err("init_shared_code failed: %d\n", err);
2248 ixgbevf_negotiate_api(adapter);
2249 err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
2251 dev_info(&pdev->dev, "Error reading MAC address\n");
2252 else if (is_zero_ether_addr(adapter->hw.mac.addr))
2253 dev_info(&pdev->dev,
2254 "MAC address not assigned by administrator.\n");
2255 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
2258 if (!is_valid_ether_addr(netdev->dev_addr)) {
2259 dev_info(&pdev->dev, "Assigning random MAC address\n");
2260 eth_hw_addr_random(netdev);
2261 memcpy(hw->mac.addr, netdev->dev_addr, netdev->addr_len);
2264 /* Enable dynamic interrupt throttling rates */
2265 adapter->rx_itr_setting = 1;
2266 adapter->tx_itr_setting = 1;
2268 /* set default ring sizes */
2269 adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
2270 adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
2272 set_bit(__IXGBEVF_DOWN, &adapter->state);
2279 #define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
2281 u32 current_counter = IXGBE_READ_REG(hw, reg); \
2282 if (current_counter < last_counter) \
2283 counter += 0x100000000LL; \
2284 last_counter = current_counter; \
2285 counter &= 0xFFFFFFFF00000000LL; \
2286 counter |= current_counter; \
2289 #define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
2291 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
2292 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
2293 u64 current_counter = (current_counter_msb << 32) | \
2294 current_counter_lsb; \
2295 if (current_counter < last_counter) \
2296 counter += 0x1000000000LL; \
2297 last_counter = current_counter; \
2298 counter &= 0xFFFFFFF000000000LL; \
2299 counter |= current_counter; \
2302 * ixgbevf_update_stats - Update the board statistics counters.
2303 * @adapter: board private structure
2305 void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
2307 struct ixgbe_hw *hw = &adapter->hw;
2310 if (!adapter->link_up)
2313 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
2314 adapter->stats.vfgprc);
2315 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
2316 adapter->stats.vfgptc);
2317 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
2318 adapter->stats.last_vfgorc,
2319 adapter->stats.vfgorc);
2320 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
2321 adapter->stats.last_vfgotc,
2322 adapter->stats.vfgotc);
2323 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
2324 adapter->stats.vfmprc);
2326 for (i = 0; i < adapter->num_rx_queues; i++) {
2327 adapter->hw_csum_rx_error +=
2328 adapter->rx_ring[i]->hw_csum_rx_error;
2329 adapter->rx_ring[i]->hw_csum_rx_error = 0;
2334 * ixgbevf_watchdog - Timer Call-back
2335 * @data: pointer to adapter cast into an unsigned long
2337 static void ixgbevf_watchdog(unsigned long data)
2339 struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data;
2340 struct ixgbe_hw *hw = &adapter->hw;
2345 * Do the watchdog outside of interrupt context due to the lovely
2346 * delays that some of the newer hardware requires
2349 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
2350 goto watchdog_short_circuit;
2352 /* get one bit for every active tx/rx interrupt vector */
2353 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
2354 struct ixgbevf_q_vector *qv = adapter->q_vector[i];
2355 if (qv->rx.ring || qv->tx.ring)
2359 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics);
2361 watchdog_short_circuit:
2362 schedule_work(&adapter->watchdog_task);
2366 * ixgbevf_tx_timeout - Respond to a Tx Hang
2367 * @netdev: network interface device structure
2369 static void ixgbevf_tx_timeout(struct net_device *netdev)
2371 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2373 /* Do the reset outside of interrupt context */
2374 schedule_work(&adapter->reset_task);
2377 static void ixgbevf_reset_task(struct work_struct *work)
2379 struct ixgbevf_adapter *adapter;
2380 adapter = container_of(work, struct ixgbevf_adapter, reset_task);
2382 /* If we're already down or resetting, just bail */
2383 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2384 test_bit(__IXGBEVF_REMOVING, &adapter->state) ||
2385 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2388 adapter->tx_timeout_count++;
2390 ixgbevf_reinit_locked(adapter);
2394 * ixgbevf_watchdog_task - worker thread to bring link up
2395 * @work: pointer to work_struct containing our data
2397 static void ixgbevf_watchdog_task(struct work_struct *work)
2399 struct ixgbevf_adapter *adapter = container_of(work,
2400 struct ixgbevf_adapter,
2402 struct net_device *netdev = adapter->netdev;
2403 struct ixgbe_hw *hw = &adapter->hw;
2404 u32 link_speed = adapter->link_speed;
2405 bool link_up = adapter->link_up;
2408 if (IXGBE_REMOVED(hw->hw_addr)) {
2409 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
2411 ixgbevf_down(adapter);
2416 ixgbevf_queue_reset_subtask(adapter);
2418 adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
2421 * Always check the link on the watchdog because we have
2424 spin_lock_bh(&adapter->mbx_lock);
2426 need_reset = hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
2428 spin_unlock_bh(&adapter->mbx_lock);
2431 adapter->link_up = link_up;
2432 adapter->link_speed = link_speed;
2433 netif_carrier_off(netdev);
2434 netif_tx_stop_all_queues(netdev);
2435 schedule_work(&adapter->reset_task);
2438 adapter->link_up = link_up;
2439 adapter->link_speed = link_speed;
2442 if (!netif_carrier_ok(netdev)) {
2443 char *link_speed_string;
2444 switch (link_speed) {
2445 case IXGBE_LINK_SPEED_10GB_FULL:
2446 link_speed_string = "10 Gbps";
2448 case IXGBE_LINK_SPEED_1GB_FULL:
2449 link_speed_string = "1 Gbps";
2451 case IXGBE_LINK_SPEED_100_FULL:
2452 link_speed_string = "100 Mbps";
2455 link_speed_string = "unknown speed";
2458 dev_info(&adapter->pdev->dev,
2459 "NIC Link is Up, %s\n", link_speed_string);
2460 netif_carrier_on(netdev);
2461 netif_tx_wake_all_queues(netdev);
2464 adapter->link_up = false;
2465 adapter->link_speed = 0;
2466 if (netif_carrier_ok(netdev)) {
2467 dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
2468 netif_carrier_off(netdev);
2469 netif_tx_stop_all_queues(netdev);
2473 ixgbevf_update_stats(adapter);
2476 /* Reset the timer */
2477 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
2478 !test_bit(__IXGBEVF_REMOVING, &adapter->state))
2479 mod_timer(&adapter->watchdog_timer,
2480 round_jiffies(jiffies + (2 * HZ)));
2482 adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
2486 * ixgbevf_free_tx_resources - Free Tx Resources per Queue
2487 * @tx_ring: Tx descriptor ring for a specific queue
2489 * Free all transmit software resources
2491 void ixgbevf_free_tx_resources(struct ixgbevf_ring *tx_ring)
2493 ixgbevf_clean_tx_ring(tx_ring);
2495 vfree(tx_ring->tx_buffer_info);
2496 tx_ring->tx_buffer_info = NULL;
2498 /* if not set, then don't free */
2502 dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc,
2505 tx_ring->desc = NULL;
2509 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
2510 * @adapter: board private structure
2512 * Free all transmit software resources
2514 static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
2518 for (i = 0; i < adapter->num_tx_queues; i++)
2519 if (adapter->tx_ring[i]->desc)
2520 ixgbevf_free_tx_resources(adapter->tx_ring[i]);
2524 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
2525 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2527 * Return 0 on success, negative on failure
2529 int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring)
2533 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
2534 tx_ring->tx_buffer_info = vzalloc(size);
2535 if (!tx_ring->tx_buffer_info)
2538 /* round up to nearest 4K */
2539 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
2540 tx_ring->size = ALIGN(tx_ring->size, 4096);
2542 tx_ring->desc = dma_alloc_coherent(tx_ring->dev, tx_ring->size,
2543 &tx_ring->dma, GFP_KERNEL);
2550 vfree(tx_ring->tx_buffer_info);
2551 tx_ring->tx_buffer_info = NULL;
2552 hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit "
2553 "descriptor ring\n");
2558 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
2559 * @adapter: board private structure
2561 * If this function returns with an error, then it's possible one or
2562 * more of the rings is populated (while the rest are not). It is the
2563 * callers duty to clean those orphaned rings.
2565 * Return 0 on success, negative on failure
2567 static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
2571 for (i = 0; i < adapter->num_tx_queues; i++) {
2572 err = ixgbevf_setup_tx_resources(adapter->tx_ring[i]);
2575 hw_dbg(&adapter->hw,
2576 "Allocation for Tx Queue %u failed\n", i);
2584 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
2585 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2587 * Returns 0 on success, negative on failure
2589 int ixgbevf_setup_rx_resources(struct ixgbevf_ring *rx_ring)
2593 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
2594 rx_ring->rx_buffer_info = vzalloc(size);
2595 if (!rx_ring->rx_buffer_info)
2598 /* Round up to nearest 4K */
2599 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
2600 rx_ring->size = ALIGN(rx_ring->size, 4096);
2602 rx_ring->desc = dma_alloc_coherent(rx_ring->dev, rx_ring->size,
2603 &rx_ring->dma, GFP_KERNEL);
2610 vfree(rx_ring->rx_buffer_info);
2611 rx_ring->rx_buffer_info = NULL;
2612 dev_err(rx_ring->dev, "Unable to allocate memory for the Rx descriptor ring\n");
2617 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
2618 * @adapter: board private structure
2620 * If this function returns with an error, then it's possible one or
2621 * more of the rings is populated (while the rest are not). It is the
2622 * callers duty to clean those orphaned rings.
2624 * Return 0 on success, negative on failure
2626 static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
2630 for (i = 0; i < adapter->num_rx_queues; i++) {
2631 err = ixgbevf_setup_rx_resources(adapter->rx_ring[i]);
2634 hw_dbg(&adapter->hw,
2635 "Allocation for Rx Queue %u failed\n", i);
2642 * ixgbevf_free_rx_resources - Free Rx Resources
2643 * @rx_ring: ring to clean the resources from
2645 * Free all receive software resources
2647 void ixgbevf_free_rx_resources(struct ixgbevf_ring *rx_ring)
2649 ixgbevf_clean_rx_ring(rx_ring);
2651 vfree(rx_ring->rx_buffer_info);
2652 rx_ring->rx_buffer_info = NULL;
2654 dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc,
2657 rx_ring->desc = NULL;
2661 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
2662 * @adapter: board private structure
2664 * Free all receive software resources
2666 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
2670 for (i = 0; i < adapter->num_rx_queues; i++)
2671 if (adapter->rx_ring[i]->desc)
2672 ixgbevf_free_rx_resources(adapter->rx_ring[i]);
2676 * ixgbevf_open - Called when a network interface is made active
2677 * @netdev: network interface device structure
2679 * Returns 0 on success, negative value on failure
2681 * The open entry point is called when a network interface is made
2682 * active by the system (IFF_UP). At this point all resources needed
2683 * for transmit and receive operations are allocated, the interrupt
2684 * handler is registered with the OS, the watchdog timer is started,
2685 * and the stack is notified that the interface is ready.
2687 static int ixgbevf_open(struct net_device *netdev)
2689 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2690 struct ixgbe_hw *hw = &adapter->hw;
2693 /* A previous failure to open the device because of a lack of
2694 * available MSIX vector resources may have reset the number
2695 * of msix vectors variable to zero. The only way to recover
2696 * is to unload/reload the driver and hope that the system has
2697 * been able to recover some MSIX vector resources.
2699 if (!adapter->num_msix_vectors)
2702 /* disallow open during test */
2703 if (test_bit(__IXGBEVF_TESTING, &adapter->state))
2706 if (hw->adapter_stopped) {
2707 ixgbevf_reset(adapter);
2708 /* if adapter is still stopped then PF isn't up and
2709 * the vf can't start. */
2710 if (hw->adapter_stopped) {
2711 err = IXGBE_ERR_MBX;
2712 pr_err("Unable to start - perhaps the PF Driver isn't "
2714 goto err_setup_reset;
2718 /* allocate transmit descriptors */
2719 err = ixgbevf_setup_all_tx_resources(adapter);
2723 /* allocate receive descriptors */
2724 err = ixgbevf_setup_all_rx_resources(adapter);
2728 ixgbevf_configure(adapter);
2731 * Map the Tx/Rx rings to the vectors we were allotted.
2732 * if request_irq will be called in this function map_rings
2733 * must be called *before* up_complete
2735 ixgbevf_map_rings_to_vectors(adapter);
2737 ixgbevf_up_complete(adapter);
2739 /* clear any pending interrupts, may auto mask */
2740 IXGBE_READ_REG(hw, IXGBE_VTEICR);
2741 err = ixgbevf_request_irq(adapter);
2745 ixgbevf_irq_enable(adapter);
2750 ixgbevf_down(adapter);
2752 ixgbevf_free_all_rx_resources(adapter);
2754 ixgbevf_free_all_tx_resources(adapter);
2755 ixgbevf_reset(adapter);
2763 * ixgbevf_close - Disables a network interface
2764 * @netdev: network interface device structure
2766 * Returns 0, this is not allowed to fail
2768 * The close entry point is called when an interface is de-activated
2769 * by the OS. The hardware is still under the drivers control, but
2770 * needs to be disabled. A global MAC reset is issued to stop the
2771 * hardware, and all transmit and receive resources are freed.
2773 static int ixgbevf_close(struct net_device *netdev)
2775 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2777 ixgbevf_down(adapter);
2778 ixgbevf_free_irq(adapter);
2780 ixgbevf_free_all_tx_resources(adapter);
2781 ixgbevf_free_all_rx_resources(adapter);
2786 static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter)
2788 struct net_device *dev = adapter->netdev;
2790 if (!(adapter->flags & IXGBEVF_FLAG_QUEUE_RESET_REQUESTED))
2793 adapter->flags &= ~IXGBEVF_FLAG_QUEUE_RESET_REQUESTED;
2795 /* if interface is down do nothing */
2796 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2797 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2800 /* Hardware has to reinitialize queues and interrupts to
2801 * match packet buffer alignment. Unfortunately, the
2802 * hardware is not flexible enough to do this dynamically.
2804 if (netif_running(dev))
2807 ixgbevf_clear_interrupt_scheme(adapter);
2808 ixgbevf_init_interrupt_scheme(adapter);
2810 if (netif_running(dev))
2814 static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
2815 u32 vlan_macip_lens, u32 type_tucmd,
2818 struct ixgbe_adv_tx_context_desc *context_desc;
2819 u16 i = tx_ring->next_to_use;
2821 context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i);
2824 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2826 /* set bits to identify this as an advanced context descriptor */
2827 type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2829 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
2830 context_desc->seqnum_seed = 0;
2831 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
2832 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
2835 static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
2836 struct ixgbevf_tx_buffer *first,
2839 struct sk_buff *skb = first->skb;
2840 u32 vlan_macip_lens, type_tucmd;
2841 u32 mss_l4len_idx, l4len;
2844 if (skb->ip_summed != CHECKSUM_PARTIAL)
2847 if (!skb_is_gso(skb))
2850 err = skb_cow_head(skb, 0);
2854 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
2855 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
2857 if (skb->protocol == htons(ETH_P_IP)) {
2858 struct iphdr *iph = ip_hdr(skb);
2861 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2865 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
2866 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
2867 IXGBE_TX_FLAGS_CSUM |
2868 IXGBE_TX_FLAGS_IPV4;
2869 } else if (skb_is_gso_v6(skb)) {
2870 ipv6_hdr(skb)->payload_len = 0;
2871 tcp_hdr(skb)->check =
2872 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2873 &ipv6_hdr(skb)->daddr,
2875 first->tx_flags |= IXGBE_TX_FLAGS_TSO |
2876 IXGBE_TX_FLAGS_CSUM;
2879 /* compute header lengths */
2880 l4len = tcp_hdrlen(skb);
2882 *hdr_len = skb_transport_offset(skb) + l4len;
2884 /* update gso size and bytecount with header size */
2885 first->gso_segs = skb_shinfo(skb)->gso_segs;
2886 first->bytecount += (first->gso_segs - 1) * *hdr_len;
2888 /* mss_l4len_id: use 1 as index for TSO */
2889 mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
2890 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
2891 mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
2893 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
2894 vlan_macip_lens = skb_network_header_len(skb);
2895 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
2896 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
2898 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
2899 type_tucmd, mss_l4len_idx);
2904 static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
2905 struct ixgbevf_tx_buffer *first)
2907 struct sk_buff *skb = first->skb;
2908 u32 vlan_macip_lens = 0;
2909 u32 mss_l4len_idx = 0;
2912 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2914 switch (skb->protocol) {
2915 case htons(ETH_P_IP):
2916 vlan_macip_lens |= skb_network_header_len(skb);
2917 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
2918 l4_hdr = ip_hdr(skb)->protocol;
2920 case htons(ETH_P_IPV6):
2921 vlan_macip_lens |= skb_network_header_len(skb);
2922 l4_hdr = ipv6_hdr(skb)->nexthdr;
2925 if (unlikely(net_ratelimit())) {
2926 dev_warn(tx_ring->dev,
2927 "partial checksum but proto=%x!\n",
2935 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2936 mss_l4len_idx = tcp_hdrlen(skb) <<
2937 IXGBE_ADVTXD_L4LEN_SHIFT;
2940 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
2941 mss_l4len_idx = sizeof(struct sctphdr) <<
2942 IXGBE_ADVTXD_L4LEN_SHIFT;
2945 mss_l4len_idx = sizeof(struct udphdr) <<
2946 IXGBE_ADVTXD_L4LEN_SHIFT;
2949 if (unlikely(net_ratelimit())) {
2950 dev_warn(tx_ring->dev,
2951 "partial checksum but l4 proto=%x!\n",
2957 /* update TX checksum flag */
2958 first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
2961 /* vlan_macip_lens: MACLEN, VLAN tag */
2962 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
2963 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
2965 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
2966 type_tucmd, mss_l4len_idx);
2969 static __le32 ixgbevf_tx_cmd_type(u32 tx_flags)
2971 /* set type for advanced descriptor with frame checksum insertion */
2972 __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA |
2973 IXGBE_ADVTXD_DCMD_IFCS |
2974 IXGBE_ADVTXD_DCMD_DEXT);
2976 /* set HW vlan bit if vlan is present */
2977 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
2978 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE);
2980 /* set segmentation enable bits for TSO/FSO */
2981 if (tx_flags & IXGBE_TX_FLAGS_TSO)
2982 cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE);
2987 static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
2988 u32 tx_flags, unsigned int paylen)
2990 __le32 olinfo_status = cpu_to_le32(paylen << IXGBE_ADVTXD_PAYLEN_SHIFT);
2992 /* enable L4 checksum for TSO and TX checksum offload */
2993 if (tx_flags & IXGBE_TX_FLAGS_CSUM)
2994 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_TXSM);
2996 /* enble IPv4 checksum for TSO */
2997 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
2998 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM);
3000 /* use index 1 context for TSO/FSO/FCOE */
3001 if (tx_flags & IXGBE_TX_FLAGS_TSO)
3002 olinfo_status |= cpu_to_le32(1 << IXGBE_ADVTXD_IDX_SHIFT);
3004 /* Check Context must be set if Tx switch is enabled, which it
3005 * always is for case where virtual functions are running
3007 olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_CC);
3009 tx_desc->read.olinfo_status = olinfo_status;
3012 static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
3013 struct ixgbevf_tx_buffer *first,
3017 struct sk_buff *skb = first->skb;
3018 struct ixgbevf_tx_buffer *tx_buffer;
3019 union ixgbe_adv_tx_desc *tx_desc;
3020 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
3021 unsigned int data_len = skb->data_len;
3022 unsigned int size = skb_headlen(skb);
3023 unsigned int paylen = skb->len - hdr_len;
3024 u32 tx_flags = first->tx_flags;
3026 u16 i = tx_ring->next_to_use;
3028 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
3030 ixgbevf_tx_olinfo_status(tx_desc, tx_flags, paylen);
3031 cmd_type = ixgbevf_tx_cmd_type(tx_flags);
3033 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
3034 if (dma_mapping_error(tx_ring->dev, dma))
3037 /* record length, and DMA address */
3038 dma_unmap_len_set(first, len, size);
3039 dma_unmap_addr_set(first, dma, dma);
3041 tx_desc->read.buffer_addr = cpu_to_le64(dma);
3044 while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
3045 tx_desc->read.cmd_type_len =
3046 cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD);
3050 if (i == tx_ring->count) {
3051 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
3055 dma += IXGBE_MAX_DATA_PER_TXD;
3056 size -= IXGBE_MAX_DATA_PER_TXD;
3058 tx_desc->read.buffer_addr = cpu_to_le64(dma);
3059 tx_desc->read.olinfo_status = 0;
3062 if (likely(!data_len))
3065 tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
3069 if (i == tx_ring->count) {
3070 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
3074 size = skb_frag_size(frag);
3077 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
3079 if (dma_mapping_error(tx_ring->dev, dma))
3082 tx_buffer = &tx_ring->tx_buffer_info[i];
3083 dma_unmap_len_set(tx_buffer, len, size);
3084 dma_unmap_addr_set(tx_buffer, dma, dma);
3086 tx_desc->read.buffer_addr = cpu_to_le64(dma);
3087 tx_desc->read.olinfo_status = 0;
3092 /* write last descriptor with RS and EOP bits */
3093 cmd_type |= cpu_to_le32(size) | cpu_to_le32(IXGBE_TXD_CMD);
3094 tx_desc->read.cmd_type_len = cmd_type;
3096 /* set the timestamp */
3097 first->time_stamp = jiffies;
3099 /* Force memory writes to complete before letting h/w know there
3100 * are new descriptors to fetch. (Only applicable for weak-ordered
3101 * memory model archs, such as IA-64).
3103 * We also need this memory barrier (wmb) to make certain all of the
3104 * status bits have been updated before next_to_watch is written.
3108 /* set next_to_watch value indicating a packet is present */
3109 first->next_to_watch = tx_desc;
3112 if (i == tx_ring->count)
3115 tx_ring->next_to_use = i;
3117 /* notify HW of packet */
3118 ixgbevf_write_tail(tx_ring, i);
3122 dev_err(tx_ring->dev, "TX DMA map failed\n");
3124 /* clear dma mappings for failed tx_buffer_info map */
3126 tx_buffer = &tx_ring->tx_buffer_info[i];
3127 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer);
3128 if (tx_buffer == first)
3135 tx_ring->next_to_use = i;
3138 static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
3140 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
3141 /* Herbert's original patch had:
3142 * smp_mb__after_netif_stop_queue();
3143 * but since that doesn't exist yet, just open code it. */
3146 /* We need to check again in a case another CPU has just
3147 * made room available. */
3148 if (likely(ixgbevf_desc_unused(tx_ring) < size))
3151 /* A reprieve! - use start_queue because it doesn't call schedule */
3152 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
3153 ++tx_ring->tx_stats.restart_queue;
3158 static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
3160 if (likely(ixgbevf_desc_unused(tx_ring) >= size))
3162 return __ixgbevf_maybe_stop_tx(tx_ring, size);
3165 static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3167 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3168 struct ixgbevf_tx_buffer *first;
3169 struct ixgbevf_ring *tx_ring;
3172 u16 count = TXD_USE_COUNT(skb_headlen(skb));
3173 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3177 u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL);
3179 if (!dst_mac || is_link_local_ether_addr(dst_mac)) {
3181 return NETDEV_TX_OK;
3184 tx_ring = adapter->tx_ring[skb->queue_mapping];
3187 * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
3188 * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
3189 * + 2 desc gap to keep tail from touching head,
3190 * + 1 desc for context descriptor,
3191 * otherwise try next time
3193 #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
3194 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
3195 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
3197 count += skb_shinfo(skb)->nr_frags;
3199 if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) {
3200 tx_ring->tx_stats.tx_busy++;
3201 return NETDEV_TX_BUSY;
3204 /* record the location of the first descriptor for this packet */
3205 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
3207 first->bytecount = skb->len;
3208 first->gso_segs = 1;
3210 if (vlan_tx_tag_present(skb)) {
3211 tx_flags |= vlan_tx_tag_get(skb);
3212 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
3213 tx_flags |= IXGBE_TX_FLAGS_VLAN;
3216 /* record initial flags and protocol */
3217 first->tx_flags = tx_flags;
3218 first->protocol = vlan_get_protocol(skb);
3220 tso = ixgbevf_tso(tx_ring, first, &hdr_len);
3224 ixgbevf_tx_csum(tx_ring, first);
3226 ixgbevf_tx_map(tx_ring, first, hdr_len);
3228 ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
3230 return NETDEV_TX_OK;
3233 dev_kfree_skb_any(first->skb);
3236 return NETDEV_TX_OK;
3240 * ixgbevf_set_mac - Change the Ethernet Address of the NIC
3241 * @netdev: network interface device structure
3242 * @p: pointer to an address structure
3244 * Returns 0 on success, negative on failure
3246 static int ixgbevf_set_mac(struct net_device *netdev, void *p)
3248 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3249 struct ixgbe_hw *hw = &adapter->hw;
3250 struct sockaddr *addr = p;
3252 if (!is_valid_ether_addr(addr->sa_data))
3253 return -EADDRNOTAVAIL;
3255 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
3256 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
3258 spin_lock_bh(&adapter->mbx_lock);
3260 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
3262 spin_unlock_bh(&adapter->mbx_lock);
3268 * ixgbevf_change_mtu - Change the Maximum Transfer Unit
3269 * @netdev: network interface device structure
3270 * @new_mtu: new value for maximum frame size
3272 * Returns 0 on success, negative on failure
3274 static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
3276 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3277 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3278 int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
3280 switch (adapter->hw.api_version) {
3281 case ixgbe_mbox_api_11:
3282 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
3285 if (adapter->hw.mac.type == ixgbe_mac_X540_vf)
3286 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
3290 /* MTU < 68 is an error and causes problems on some kernels */
3291 if ((new_mtu < 68) || (max_frame > max_possible_frame))
3294 hw_dbg(&adapter->hw, "changing MTU from %d to %d\n",
3295 netdev->mtu, new_mtu);
3296 /* must set new MTU before calling down or up */
3297 netdev->mtu = new_mtu;
3299 if (netif_running(netdev))
3300 ixgbevf_reinit_locked(adapter);
3305 static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
3307 struct net_device *netdev = pci_get_drvdata(pdev);
3308 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3313 netif_device_detach(netdev);
3315 if (netif_running(netdev)) {
3317 ixgbevf_down(adapter);
3318 ixgbevf_free_irq(adapter);
3319 ixgbevf_free_all_tx_resources(adapter);
3320 ixgbevf_free_all_rx_resources(adapter);
3324 ixgbevf_clear_interrupt_scheme(adapter);
3327 retval = pci_save_state(pdev);
3332 if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
3333 pci_disable_device(pdev);
3339 static int ixgbevf_resume(struct pci_dev *pdev)
3341 struct net_device *netdev = pci_get_drvdata(pdev);
3342 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3345 pci_restore_state(pdev);
3347 * pci_restore_state clears dev->state_saved so call
3348 * pci_save_state to restore it.
3350 pci_save_state(pdev);
3352 err = pci_enable_device_mem(pdev);
3354 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
3357 smp_mb__before_atomic();
3358 clear_bit(__IXGBEVF_DISABLED, &adapter->state);
3359 pci_set_master(pdev);
3361 ixgbevf_reset(adapter);
3364 err = ixgbevf_init_interrupt_scheme(adapter);
3367 dev_err(&pdev->dev, "Cannot initialize interrupts\n");
3371 if (netif_running(netdev)) {
3372 err = ixgbevf_open(netdev);
3377 netif_device_attach(netdev);
3382 #endif /* CONFIG_PM */
3383 static void ixgbevf_shutdown(struct pci_dev *pdev)
3385 ixgbevf_suspend(pdev, PMSG_SUSPEND);
3388 static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
3389 struct rtnl_link_stats64 *stats)
3391 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3394 const struct ixgbevf_ring *ring;
3397 ixgbevf_update_stats(adapter);
3399 stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc;
3401 for (i = 0; i < adapter->num_rx_queues; i++) {
3402 ring = adapter->rx_ring[i];
3404 start = u64_stats_fetch_begin_irq(&ring->syncp);
3405 bytes = ring->stats.bytes;
3406 packets = ring->stats.packets;
3407 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
3408 stats->rx_bytes += bytes;
3409 stats->rx_packets += packets;
3412 for (i = 0; i < adapter->num_tx_queues; i++) {
3413 ring = adapter->tx_ring[i];
3415 start = u64_stats_fetch_begin_irq(&ring->syncp);
3416 bytes = ring->stats.bytes;
3417 packets = ring->stats.packets;
3418 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
3419 stats->tx_bytes += bytes;
3420 stats->tx_packets += packets;
3426 static const struct net_device_ops ixgbevf_netdev_ops = {
3427 .ndo_open = ixgbevf_open,
3428 .ndo_stop = ixgbevf_close,
3429 .ndo_start_xmit = ixgbevf_xmit_frame,
3430 .ndo_set_rx_mode = ixgbevf_set_rx_mode,
3431 .ndo_get_stats64 = ixgbevf_get_stats,
3432 .ndo_validate_addr = eth_validate_addr,
3433 .ndo_set_mac_address = ixgbevf_set_mac,
3434 .ndo_change_mtu = ixgbevf_change_mtu,
3435 .ndo_tx_timeout = ixgbevf_tx_timeout,
3436 .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid,
3437 .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid,
3438 #ifdef CONFIG_NET_RX_BUSY_POLL
3439 .ndo_busy_poll = ixgbevf_busy_poll_recv,
3443 static void ixgbevf_assign_netdev_ops(struct net_device *dev)
3445 dev->netdev_ops = &ixgbevf_netdev_ops;
3446 ixgbevf_set_ethtool_ops(dev);
3447 dev->watchdog_timeo = 5 * HZ;
3451 * ixgbevf_probe - Device Initialization Routine
3452 * @pdev: PCI device information struct
3453 * @ent: entry in ixgbevf_pci_tbl
3455 * Returns 0 on success, negative on failure
3457 * ixgbevf_probe initializes an adapter identified by a pci_dev structure.
3458 * The OS initialization, configuring of the adapter private structure,
3459 * and a hardware reset occur.
3461 static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3463 struct net_device *netdev;
3464 struct ixgbevf_adapter *adapter = NULL;
3465 struct ixgbe_hw *hw = NULL;
3466 const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
3467 static int cards_found;
3468 int err, pci_using_dac;
3470 err = pci_enable_device(pdev);
3474 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
3477 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3479 dev_err(&pdev->dev, "No usable DMA "
3480 "configuration, aborting\n");
3486 err = pci_request_regions(pdev, ixgbevf_driver_name);
3488 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
3492 pci_set_master(pdev);
3494 netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
3498 goto err_alloc_etherdev;
3501 SET_NETDEV_DEV(netdev, &pdev->dev);
3503 pci_set_drvdata(pdev, netdev);
3504 adapter = netdev_priv(netdev);
3506 adapter->netdev = netdev;
3507 adapter->pdev = pdev;
3510 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
3513 * call save state here in standalone driver because it relies on
3514 * adapter struct to exist, and needs to call netdev_priv
3516 pci_save_state(pdev);
3518 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
3519 pci_resource_len(pdev, 0));
3520 adapter->io_addr = hw->hw_addr;
3526 ixgbevf_assign_netdev_ops(netdev);
3528 adapter->bd_number = cards_found;
3531 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
3532 hw->mac.type = ii->mac;
3534 memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
3535 sizeof(struct ixgbe_mbx_operations));
3537 /* setup the private structure */
3538 err = ixgbevf_sw_init(adapter);
3542 /* The HW MAC address was set and/or determined in sw_init */
3543 if (!is_valid_ether_addr(netdev->dev_addr)) {
3544 pr_err("invalid MAC address\n");
3549 netdev->hw_features = NETIF_F_SG |
3556 netdev->features = netdev->hw_features |
3557 NETIF_F_HW_VLAN_CTAG_TX |
3558 NETIF_F_HW_VLAN_CTAG_RX |
3559 NETIF_F_HW_VLAN_CTAG_FILTER;
3561 netdev->vlan_features |= NETIF_F_TSO;
3562 netdev->vlan_features |= NETIF_F_TSO6;
3563 netdev->vlan_features |= NETIF_F_IP_CSUM;
3564 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
3565 netdev->vlan_features |= NETIF_F_SG;
3568 netdev->features |= NETIF_F_HIGHDMA;
3570 netdev->priv_flags |= IFF_UNICAST_FLT;
3572 init_timer(&adapter->watchdog_timer);
3573 adapter->watchdog_timer.function = ixgbevf_watchdog;
3574 adapter->watchdog_timer.data = (unsigned long)adapter;
3576 if (IXGBE_REMOVED(hw->hw_addr)) {
3580 INIT_WORK(&adapter->reset_task, ixgbevf_reset_task);
3581 INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task);
3582 set_bit(__IXGBEVF_WORK_INIT, &adapter->state);
3584 err = ixgbevf_init_interrupt_scheme(adapter);
3588 strcpy(netdev->name, "eth%d");
3590 err = register_netdev(netdev);
3594 netif_carrier_off(netdev);
3596 ixgbevf_init_last_counter_stats(adapter);
3598 /* print the MAC address */
3599 hw_dbg(hw, "%pM\n", netdev->dev_addr);
3601 hw_dbg(hw, "MAC: %d\n", hw->mac.type);
3603 hw_dbg(hw, "Intel(R) 82599 Virtual Function\n");
3608 ixgbevf_clear_interrupt_scheme(adapter);
3610 ixgbevf_reset_interrupt_capability(adapter);
3611 iounmap(adapter->io_addr);
3613 free_netdev(netdev);
3615 pci_release_regions(pdev);
3618 if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
3619 pci_disable_device(pdev);
3624 * ixgbevf_remove - Device Removal Routine
3625 * @pdev: PCI device information struct
3627 * ixgbevf_remove is called by the PCI subsystem to alert the driver
3628 * that it should release a PCI device. The could be caused by a
3629 * Hot-Plug event, or because the driver is going to be removed from
3632 static void ixgbevf_remove(struct pci_dev *pdev)
3634 struct net_device *netdev = pci_get_drvdata(pdev);
3635 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3637 set_bit(__IXGBEVF_REMOVING, &adapter->state);
3639 del_timer_sync(&adapter->watchdog_timer);
3641 cancel_work_sync(&adapter->reset_task);
3642 cancel_work_sync(&adapter->watchdog_task);
3644 if (netdev->reg_state == NETREG_REGISTERED)
3645 unregister_netdev(netdev);
3647 ixgbevf_clear_interrupt_scheme(adapter);
3648 ixgbevf_reset_interrupt_capability(adapter);
3650 iounmap(adapter->io_addr);
3651 pci_release_regions(pdev);
3653 hw_dbg(&adapter->hw, "Remove complete\n");
3655 free_netdev(netdev);
3657 if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
3658 pci_disable_device(pdev);
3662 * ixgbevf_io_error_detected - called when PCI error is detected
3663 * @pdev: Pointer to PCI device
3664 * @state: The current pci connection state
3666 * This function is called after a PCI bus error affecting
3667 * this device has been detected.
3669 static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
3670 pci_channel_state_t state)
3672 struct net_device *netdev = pci_get_drvdata(pdev);
3673 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3675 if (!test_bit(__IXGBEVF_WORK_INIT, &adapter->state))
3676 return PCI_ERS_RESULT_DISCONNECT;
3679 netif_device_detach(netdev);
3681 if (state == pci_channel_io_perm_failure) {
3683 return PCI_ERS_RESULT_DISCONNECT;
3686 if (netif_running(netdev))
3687 ixgbevf_down(adapter);
3689 if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
3690 pci_disable_device(pdev);
3693 /* Request a slot slot reset. */
3694 return PCI_ERS_RESULT_NEED_RESET;
3698 * ixgbevf_io_slot_reset - called after the pci bus has been reset.
3699 * @pdev: Pointer to PCI device
3701 * Restart the card from scratch, as if from a cold-boot. Implementation
3702 * resembles the first-half of the ixgbevf_resume routine.
3704 static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev)
3706 struct net_device *netdev = pci_get_drvdata(pdev);
3707 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3709 if (pci_enable_device_mem(pdev)) {
3711 "Cannot re-enable PCI device after reset.\n");
3712 return PCI_ERS_RESULT_DISCONNECT;
3715 smp_mb__before_atomic();
3716 clear_bit(__IXGBEVF_DISABLED, &adapter->state);
3717 pci_set_master(pdev);
3719 ixgbevf_reset(adapter);
3721 return PCI_ERS_RESULT_RECOVERED;
3725 * ixgbevf_io_resume - called when traffic can start flowing again.
3726 * @pdev: Pointer to PCI device
3728 * This callback is called when the error recovery driver tells us that
3729 * its OK to resume normal operation. Implementation resembles the
3730 * second-half of the ixgbevf_resume routine.
3732 static void ixgbevf_io_resume(struct pci_dev *pdev)
3734 struct net_device *netdev = pci_get_drvdata(pdev);
3735 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3737 if (netif_running(netdev))
3738 ixgbevf_up(adapter);
3740 netif_device_attach(netdev);
3743 /* PCI Error Recovery (ERS) */
3744 static const struct pci_error_handlers ixgbevf_err_handler = {
3745 .error_detected = ixgbevf_io_error_detected,
3746 .slot_reset = ixgbevf_io_slot_reset,
3747 .resume = ixgbevf_io_resume,
3750 static struct pci_driver ixgbevf_driver = {
3751 .name = ixgbevf_driver_name,
3752 .id_table = ixgbevf_pci_tbl,
3753 .probe = ixgbevf_probe,
3754 .remove = ixgbevf_remove,
3756 /* Power Management Hooks */
3757 .suspend = ixgbevf_suspend,
3758 .resume = ixgbevf_resume,
3760 .shutdown = ixgbevf_shutdown,
3761 .err_handler = &ixgbevf_err_handler
3765 * ixgbevf_init_module - Driver Registration Routine
3767 * ixgbevf_init_module is the first routine called when the driver is
3768 * loaded. All it does is register with the PCI subsystem.
3770 static int __init ixgbevf_init_module(void)
3773 pr_info("%s - version %s\n", ixgbevf_driver_string,
3774 ixgbevf_driver_version);
3776 pr_info("%s\n", ixgbevf_copyright);
3778 ret = pci_register_driver(&ixgbevf_driver);
3782 module_init(ixgbevf_init_module);
3785 * ixgbevf_exit_module - Driver Exit Cleanup Routine
3787 * ixgbevf_exit_module is called just before the driver is removed
3790 static void __exit ixgbevf_exit_module(void)
3792 pci_unregister_driver(&ixgbevf_driver);
3797 * ixgbevf_get_hw_dev_name - return device name string
3798 * used by hardware layer to print debugging information
3800 char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
3802 struct ixgbevf_adapter *adapter = hw->back;
3803 return adapter->netdev->name;
3807 module_exit(ixgbevf_exit_module);
3809 /* ixgbevf_main.c */