i40e/i40evf: Drop useless "IN_NETPOLL" flag
[firefly-linux-kernel-4.4.55.git] / drivers / net / ethernet / intel / i40e / i40e_main.c
1 /*******************************************************************************
2  *
3  * Intel Ethernet Controller XL710 Family Linux Driver
4  * Copyright(c) 2013 - 2015 Intel Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along
16  * with this program.  If not, see <http://www.gnu.org/licenses/>.
17  *
18  * The full GNU General Public License is included in this distribution in
19  * the file called "COPYING".
20  *
21  * Contact Information:
22  * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24  *
25  ******************************************************************************/
26
27 /* Local includes */
28 #include "i40e.h"
29 #include "i40e_diag.h"
30 #ifdef CONFIG_I40E_VXLAN
31 #include <net/vxlan.h>
32 #endif
33
34 const char i40e_driver_name[] = "i40e";
35 static const char i40e_driver_string[] =
36                         "Intel(R) Ethernet Connection XL710 Network Driver";
37
38 #define DRV_KERN "-k"
39
40 #define DRV_VERSION_MAJOR 1
41 #define DRV_VERSION_MINOR 3
42 #define DRV_VERSION_BUILD 28
43 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
44              __stringify(DRV_VERSION_MINOR) "." \
45              __stringify(DRV_VERSION_BUILD)    DRV_KERN
46 const char i40e_driver_version_str[] = DRV_VERSION;
47 static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation.";
48
49 /* a bit of forward declarations */
50 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
51 static void i40e_handle_reset_warning(struct i40e_pf *pf);
52 static int i40e_add_vsi(struct i40e_vsi *vsi);
53 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
54 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
55 static int i40e_setup_misc_vector(struct i40e_pf *pf);
56 static void i40e_determine_queue_usage(struct i40e_pf *pf);
57 static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
58 static void i40e_fdir_sb_setup(struct i40e_pf *pf);
59 static int i40e_veb_get_bw_info(struct i40e_veb *veb);
60
61 /* i40e_pci_tbl - PCI Device ID Table
62  *
63  * Last entry must be all 0s
64  *
65  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
66  *   Class, Class Mask, private data (not used) }
67  */
68 static const struct pci_device_id i40e_pci_tbl[] = {
69         {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
70         {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
71         {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_A), 0},
72         {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
73         {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
74         {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
75         {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
76         {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
77         {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
78         {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
79         {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
80         {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
81         {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
82         {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
83         {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
84         {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
85         /* required last entry */
86         {0, }
87 };
88 MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
89
90 #define I40E_MAX_VF_COUNT 128
91 static int debug = -1;
92 module_param(debug, int, 0);
93 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
94
95 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
96 MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
97 MODULE_LICENSE("GPL");
98 MODULE_VERSION(DRV_VERSION);
99
100 /**
101  * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
102  * @hw:   pointer to the HW structure
103  * @mem:  ptr to mem struct to fill out
104  * @size: size of memory requested
105  * @alignment: what to align the allocation to
106  **/
107 int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
108                             u64 size, u32 alignment)
109 {
110         struct i40e_pf *pf = (struct i40e_pf *)hw->back;
111
112         mem->size = ALIGN(size, alignment);
113         mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size,
114                                       &mem->pa, GFP_KERNEL);
115         if (!mem->va)
116                 return -ENOMEM;
117
118         return 0;
119 }
120
121 /**
122  * i40e_free_dma_mem_d - OS specific memory free for shared code
123  * @hw:   pointer to the HW structure
124  * @mem:  ptr to mem struct to free
125  **/
126 int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
127 {
128         struct i40e_pf *pf = (struct i40e_pf *)hw->back;
129
130         dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
131         mem->va = NULL;
132         mem->pa = 0;
133         mem->size = 0;
134
135         return 0;
136 }
137
138 /**
139  * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
140  * @hw:   pointer to the HW structure
141  * @mem:  ptr to mem struct to fill out
142  * @size: size of memory requested
143  **/
144 int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
145                              u32 size)
146 {
147         mem->size = size;
148         mem->va = kzalloc(size, GFP_KERNEL);
149
150         if (!mem->va)
151                 return -ENOMEM;
152
153         return 0;
154 }
155
156 /**
157  * i40e_free_virt_mem_d - OS specific memory free for shared code
158  * @hw:   pointer to the HW structure
159  * @mem:  ptr to mem struct to free
160  **/
161 int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
162 {
163         /* it's ok to kfree a NULL pointer */
164         kfree(mem->va);
165         mem->va = NULL;
166         mem->size = 0;
167
168         return 0;
169 }
170
171 /**
172  * i40e_get_lump - find a lump of free generic resource
173  * @pf: board private structure
174  * @pile: the pile of resource to search
175  * @needed: the number of items needed
176  * @id: an owner id to stick on the items assigned
177  *
178  * Returns the base item index of the lump, or negative for error
179  *
180  * The search_hint trick and lack of advanced fit-finding only work
181  * because we're highly likely to have all the same size lump requests.
182  * Linear search time and any fragmentation should be minimal.
183  **/
184 static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
185                          u16 needed, u16 id)
186 {
187         int ret = -ENOMEM;
188         int i, j;
189
190         if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
191                 dev_info(&pf->pdev->dev,
192                          "param err: pile=%p needed=%d id=0x%04x\n",
193                          pile, needed, id);
194                 return -EINVAL;
195         }
196
197         /* start the linear search with an imperfect hint */
198         i = pile->search_hint;
199         while (i < pile->num_entries) {
200                 /* skip already allocated entries */
201                 if (pile->list[i] & I40E_PILE_VALID_BIT) {
202                         i++;
203                         continue;
204                 }
205
206                 /* do we have enough in this lump? */
207                 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
208                         if (pile->list[i+j] & I40E_PILE_VALID_BIT)
209                                 break;
210                 }
211
212                 if (j == needed) {
213                         /* there was enough, so assign it to the requestor */
214                         for (j = 0; j < needed; j++)
215                                 pile->list[i+j] = id | I40E_PILE_VALID_BIT;
216                         ret = i;
217                         pile->search_hint = i + j;
218                         break;
219                 }
220
221                 /* not enough, so skip over it and continue looking */
222                 i += j;
223         }
224
225         return ret;
226 }
227
228 /**
229  * i40e_put_lump - return a lump of generic resource
230  * @pile: the pile of resource to search
231  * @index: the base item index
232  * @id: the owner id of the items assigned
233  *
234  * Returns the count of items in the lump
235  **/
236 static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
237 {
238         int valid_id = (id | I40E_PILE_VALID_BIT);
239         int count = 0;
240         int i;
241
242         if (!pile || index >= pile->num_entries)
243                 return -EINVAL;
244
245         for (i = index;
246              i < pile->num_entries && pile->list[i] == valid_id;
247              i++) {
248                 pile->list[i] = 0;
249                 count++;
250         }
251
252         if (count && index < pile->search_hint)
253                 pile->search_hint = index;
254
255         return count;
256 }
257
258 /**
259  * i40e_find_vsi_from_id - searches for the vsi with the given id
260  * @pf - the pf structure to search for the vsi
261  * @id - id of the vsi it is searching for
262  **/
263 struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
264 {
265         int i;
266
267         for (i = 0; i < pf->num_alloc_vsi; i++)
268                 if (pf->vsi[i] && (pf->vsi[i]->id == id))
269                         return pf->vsi[i];
270
271         return NULL;
272 }
273
274 /**
275  * i40e_service_event_schedule - Schedule the service task to wake up
276  * @pf: board private structure
277  *
278  * If not already scheduled, this puts the task into the work queue
279  **/
280 static void i40e_service_event_schedule(struct i40e_pf *pf)
281 {
282         if (!test_bit(__I40E_DOWN, &pf->state) &&
283             !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) &&
284             !test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state))
285                 schedule_work(&pf->service_task);
286 }
287
288 /**
289  * i40e_tx_timeout - Respond to a Tx Hang
290  * @netdev: network interface device structure
291  *
292  * If any port has noticed a Tx timeout, it is likely that the whole
293  * device is munged, not just the one netdev port, so go for the full
294  * reset.
295  **/
296 #ifdef I40E_FCOE
297 void i40e_tx_timeout(struct net_device *netdev)
298 #else
299 static void i40e_tx_timeout(struct net_device *netdev)
300 #endif
301 {
302         struct i40e_netdev_priv *np = netdev_priv(netdev);
303         struct i40e_vsi *vsi = np->vsi;
304         struct i40e_pf *pf = vsi->back;
305         struct i40e_ring *tx_ring = NULL;
306         unsigned int i, hung_queue = 0;
307         u32 head, val;
308
309         pf->tx_timeout_count++;
310
311         /* find the stopped queue the same way the stack does */
312         for (i = 0; i < netdev->num_tx_queues; i++) {
313                 struct netdev_queue *q;
314                 unsigned long trans_start;
315
316                 q = netdev_get_tx_queue(netdev, i);
317                 trans_start = q->trans_start ? : netdev->trans_start;
318                 if (netif_xmit_stopped(q) &&
319                     time_after(jiffies,
320                                (trans_start + netdev->watchdog_timeo))) {
321                         hung_queue = i;
322                         break;
323                 }
324         }
325
326         if (i == netdev->num_tx_queues) {
327                 netdev_info(netdev, "tx_timeout: no netdev hung queue found\n");
328         } else {
329                 /* now that we have an index, find the tx_ring struct */
330                 for (i = 0; i < vsi->num_queue_pairs; i++) {
331                         if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
332                                 if (hung_queue ==
333                                     vsi->tx_rings[i]->queue_index) {
334                                         tx_ring = vsi->tx_rings[i];
335                                         break;
336                                 }
337                         }
338                 }
339         }
340
341         if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
342                 pf->tx_timeout_recovery_level = 1;  /* reset after some time */
343         else if (time_before(jiffies,
344                       (pf->tx_timeout_last_recovery + netdev->watchdog_timeo)))
345                 return;   /* don't do any new action before the next timeout */
346
347         if (tx_ring) {
348                 head = i40e_get_head(tx_ring);
349                 /* Read interrupt register */
350                 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
351                         val = rd32(&pf->hw,
352                              I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
353                                                 tx_ring->vsi->base_vector - 1));
354                 else
355                         val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
356
357                 netdev_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
358                             vsi->seid, hung_queue, tx_ring->next_to_clean,
359                             head, tx_ring->next_to_use,
360                             readl(tx_ring->tail), val);
361         }
362
363         pf->tx_timeout_last_recovery = jiffies;
364         netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n",
365                     pf->tx_timeout_recovery_level, hung_queue);
366
367         switch (pf->tx_timeout_recovery_level) {
368         case 1:
369                 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
370                 break;
371         case 2:
372                 set_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
373                 break;
374         case 3:
375                 set_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
376                 break;
377         default:
378                 netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
379                 break;
380         }
381
382         i40e_service_event_schedule(pf);
383         pf->tx_timeout_recovery_level++;
384 }
385
386 /**
387  * i40e_release_rx_desc - Store the new tail and head values
388  * @rx_ring: ring to bump
389  * @val: new head index
390  **/
391 static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
392 {
393         rx_ring->next_to_use = val;
394
395         /* Force memory writes to complete before letting h/w
396          * know there are new descriptors to fetch.  (Only
397          * applicable for weak-ordered memory model archs,
398          * such as IA-64).
399          */
400         wmb();
401         writel(val, rx_ring->tail);
402 }
403
404 /**
405  * i40e_get_vsi_stats_struct - Get System Network Statistics
406  * @vsi: the VSI we care about
407  *
408  * Returns the address of the device statistics structure.
409  * The statistics are actually updated from the service task.
410  **/
411 struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
412 {
413         return &vsi->net_stats;
414 }
415
416 /**
417  * i40e_get_netdev_stats_struct - Get statistics for netdev interface
418  * @netdev: network interface device structure
419  *
420  * Returns the address of the device statistics structure.
421  * The statistics are actually updated from the service task.
422  **/
423 #ifdef I40E_FCOE
424 struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
425                                              struct net_device *netdev,
426                                              struct rtnl_link_stats64 *stats)
427 #else
428 static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
429                                              struct net_device *netdev,
430                                              struct rtnl_link_stats64 *stats)
431 #endif
432 {
433         struct i40e_netdev_priv *np = netdev_priv(netdev);
434         struct i40e_ring *tx_ring, *rx_ring;
435         struct i40e_vsi *vsi = np->vsi;
436         struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
437         int i;
438
439         if (test_bit(__I40E_DOWN, &vsi->state))
440                 return stats;
441
442         if (!vsi->tx_rings)
443                 return stats;
444
445         rcu_read_lock();
446         for (i = 0; i < vsi->num_queue_pairs; i++) {
447                 u64 bytes, packets;
448                 unsigned int start;
449
450                 tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
451                 if (!tx_ring)
452                         continue;
453
454                 do {
455                         start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
456                         packets = tx_ring->stats.packets;
457                         bytes   = tx_ring->stats.bytes;
458                 } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
459
460                 stats->tx_packets += packets;
461                 stats->tx_bytes   += bytes;
462                 rx_ring = &tx_ring[1];
463
464                 do {
465                         start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
466                         packets = rx_ring->stats.packets;
467                         bytes   = rx_ring->stats.bytes;
468                 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
469
470                 stats->rx_packets += packets;
471                 stats->rx_bytes   += bytes;
472         }
473         rcu_read_unlock();
474
475         /* following stats updated by i40e_watchdog_subtask() */
476         stats->multicast        = vsi_stats->multicast;
477         stats->tx_errors        = vsi_stats->tx_errors;
478         stats->tx_dropped       = vsi_stats->tx_dropped;
479         stats->rx_errors        = vsi_stats->rx_errors;
480         stats->rx_dropped       = vsi_stats->rx_dropped;
481         stats->rx_crc_errors    = vsi_stats->rx_crc_errors;
482         stats->rx_length_errors = vsi_stats->rx_length_errors;
483
484         return stats;
485 }
486
487 /**
488  * i40e_vsi_reset_stats - Resets all stats of the given vsi
489  * @vsi: the VSI to have its stats reset
490  **/
491 void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
492 {
493         struct rtnl_link_stats64 *ns;
494         int i;
495
496         if (!vsi)
497                 return;
498
499         ns = i40e_get_vsi_stats_struct(vsi);
500         memset(ns, 0, sizeof(*ns));
501         memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
502         memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
503         memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
504         if (vsi->rx_rings && vsi->rx_rings[0]) {
505                 for (i = 0; i < vsi->num_queue_pairs; i++) {
506                         memset(&vsi->rx_rings[i]->stats, 0,
507                                sizeof(vsi->rx_rings[i]->stats));
508                         memset(&vsi->rx_rings[i]->rx_stats, 0,
509                                sizeof(vsi->rx_rings[i]->rx_stats));
510                         memset(&vsi->tx_rings[i]->stats, 0,
511                                sizeof(vsi->tx_rings[i]->stats));
512                         memset(&vsi->tx_rings[i]->tx_stats, 0,
513                                sizeof(vsi->tx_rings[i]->tx_stats));
514                 }
515         }
516         vsi->stat_offsets_loaded = false;
517 }
518
519 /**
520  * i40e_pf_reset_stats - Reset all of the stats for the given PF
521  * @pf: the PF to be reset
522  **/
523 void i40e_pf_reset_stats(struct i40e_pf *pf)
524 {
525         int i;
526
527         memset(&pf->stats, 0, sizeof(pf->stats));
528         memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
529         pf->stat_offsets_loaded = false;
530
531         for (i = 0; i < I40E_MAX_VEB; i++) {
532                 if (pf->veb[i]) {
533                         memset(&pf->veb[i]->stats, 0,
534                                sizeof(pf->veb[i]->stats));
535                         memset(&pf->veb[i]->stats_offsets, 0,
536                                sizeof(pf->veb[i]->stats_offsets));
537                         pf->veb[i]->stat_offsets_loaded = false;
538                 }
539         }
540 }
541
542 /**
543  * i40e_stat_update48 - read and update a 48 bit stat from the chip
544  * @hw: ptr to the hardware info
545  * @hireg: the high 32 bit reg to read
546  * @loreg: the low 32 bit reg to read
547  * @offset_loaded: has the initial offset been loaded yet
548  * @offset: ptr to current offset value
549  * @stat: ptr to the stat
550  *
551  * Since the device stats are not reset at PFReset, they likely will not
552  * be zeroed when the driver starts.  We'll save the first values read
553  * and use them as offsets to be subtracted from the raw values in order
554  * to report stats that count from zero.  In the process, we also manage
555  * the potential roll-over.
556  **/
557 static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
558                                bool offset_loaded, u64 *offset, u64 *stat)
559 {
560         u64 new_data;
561
562         if (hw->device_id == I40E_DEV_ID_QEMU) {
563                 new_data = rd32(hw, loreg);
564                 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
565         } else {
566                 new_data = rd64(hw, loreg);
567         }
568         if (!offset_loaded)
569                 *offset = new_data;
570         if (likely(new_data >= *offset))
571                 *stat = new_data - *offset;
572         else
573                 *stat = (new_data + BIT_ULL(48)) - *offset;
574         *stat &= 0xFFFFFFFFFFFFULL;
575 }
576
577 /**
578  * i40e_stat_update32 - read and update a 32 bit stat from the chip
579  * @hw: ptr to the hardware info
580  * @reg: the hw reg to read
581  * @offset_loaded: has the initial offset been loaded yet
582  * @offset: ptr to current offset value
583  * @stat: ptr to the stat
584  **/
585 static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
586                                bool offset_loaded, u64 *offset, u64 *stat)
587 {
588         u32 new_data;
589
590         new_data = rd32(hw, reg);
591         if (!offset_loaded)
592                 *offset = new_data;
593         if (likely(new_data >= *offset))
594                 *stat = (u32)(new_data - *offset);
595         else
596                 *stat = (u32)((new_data + BIT_ULL(32)) - *offset);
597 }
598
599 /**
600  * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
601  * @vsi: the VSI to be updated
602  **/
603 void i40e_update_eth_stats(struct i40e_vsi *vsi)
604 {
605         int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
606         struct i40e_pf *pf = vsi->back;
607         struct i40e_hw *hw = &pf->hw;
608         struct i40e_eth_stats *oes;
609         struct i40e_eth_stats *es;     /* device's eth stats */
610
611         es = &vsi->eth_stats;
612         oes = &vsi->eth_stats_offsets;
613
614         /* Gather up the stats that the hw collects */
615         i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
616                            vsi->stat_offsets_loaded,
617                            &oes->tx_errors, &es->tx_errors);
618         i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
619                            vsi->stat_offsets_loaded,
620                            &oes->rx_discards, &es->rx_discards);
621         i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
622                            vsi->stat_offsets_loaded,
623                            &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
624         i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
625                            vsi->stat_offsets_loaded,
626                            &oes->tx_errors, &es->tx_errors);
627
628         i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
629                            I40E_GLV_GORCL(stat_idx),
630                            vsi->stat_offsets_loaded,
631                            &oes->rx_bytes, &es->rx_bytes);
632         i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
633                            I40E_GLV_UPRCL(stat_idx),
634                            vsi->stat_offsets_loaded,
635                            &oes->rx_unicast, &es->rx_unicast);
636         i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
637                            I40E_GLV_MPRCL(stat_idx),
638                            vsi->stat_offsets_loaded,
639                            &oes->rx_multicast, &es->rx_multicast);
640         i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
641                            I40E_GLV_BPRCL(stat_idx),
642                            vsi->stat_offsets_loaded,
643                            &oes->rx_broadcast, &es->rx_broadcast);
644
645         i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
646                            I40E_GLV_GOTCL(stat_idx),
647                            vsi->stat_offsets_loaded,
648                            &oes->tx_bytes, &es->tx_bytes);
649         i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
650                            I40E_GLV_UPTCL(stat_idx),
651                            vsi->stat_offsets_loaded,
652                            &oes->tx_unicast, &es->tx_unicast);
653         i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
654                            I40E_GLV_MPTCL(stat_idx),
655                            vsi->stat_offsets_loaded,
656                            &oes->tx_multicast, &es->tx_multicast);
657         i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
658                            I40E_GLV_BPTCL(stat_idx),
659                            vsi->stat_offsets_loaded,
660                            &oes->tx_broadcast, &es->tx_broadcast);
661         vsi->stat_offsets_loaded = true;
662 }
663
664 /**
665  * i40e_update_veb_stats - Update Switch component statistics
666  * @veb: the VEB being updated
667  **/
668 static void i40e_update_veb_stats(struct i40e_veb *veb)
669 {
670         struct i40e_pf *pf = veb->pf;
671         struct i40e_hw *hw = &pf->hw;
672         struct i40e_eth_stats *oes;
673         struct i40e_eth_stats *es;     /* device's eth stats */
674         struct i40e_veb_tc_stats *veb_oes;
675         struct i40e_veb_tc_stats *veb_es;
676         int i, idx = 0;
677
678         idx = veb->stats_idx;
679         es = &veb->stats;
680         oes = &veb->stats_offsets;
681         veb_es = &veb->tc_stats;
682         veb_oes = &veb->tc_stats_offsets;
683
684         /* Gather up the stats that the hw collects */
685         i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
686                            veb->stat_offsets_loaded,
687                            &oes->tx_discards, &es->tx_discards);
688         if (hw->revision_id > 0)
689                 i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
690                                    veb->stat_offsets_loaded,
691                                    &oes->rx_unknown_protocol,
692                                    &es->rx_unknown_protocol);
693         i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
694                            veb->stat_offsets_loaded,
695                            &oes->rx_bytes, &es->rx_bytes);
696         i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
697                            veb->stat_offsets_loaded,
698                            &oes->rx_unicast, &es->rx_unicast);
699         i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
700                            veb->stat_offsets_loaded,
701                            &oes->rx_multicast, &es->rx_multicast);
702         i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
703                            veb->stat_offsets_loaded,
704                            &oes->rx_broadcast, &es->rx_broadcast);
705
706         i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
707                            veb->stat_offsets_loaded,
708                            &oes->tx_bytes, &es->tx_bytes);
709         i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
710                            veb->stat_offsets_loaded,
711                            &oes->tx_unicast, &es->tx_unicast);
712         i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
713                            veb->stat_offsets_loaded,
714                            &oes->tx_multicast, &es->tx_multicast);
715         i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
716                            veb->stat_offsets_loaded,
717                            &oes->tx_broadcast, &es->tx_broadcast);
718         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
719                 i40e_stat_update48(hw, I40E_GLVEBTC_RPCH(i, idx),
720                                    I40E_GLVEBTC_RPCL(i, idx),
721                                    veb->stat_offsets_loaded,
722                                    &veb_oes->tc_rx_packets[i],
723                                    &veb_es->tc_rx_packets[i]);
724                 i40e_stat_update48(hw, I40E_GLVEBTC_RBCH(i, idx),
725                                    I40E_GLVEBTC_RBCL(i, idx),
726                                    veb->stat_offsets_loaded,
727                                    &veb_oes->tc_rx_bytes[i],
728                                    &veb_es->tc_rx_bytes[i]);
729                 i40e_stat_update48(hw, I40E_GLVEBTC_TPCH(i, idx),
730                                    I40E_GLVEBTC_TPCL(i, idx),
731                                    veb->stat_offsets_loaded,
732                                    &veb_oes->tc_tx_packets[i],
733                                    &veb_es->tc_tx_packets[i]);
734                 i40e_stat_update48(hw, I40E_GLVEBTC_TBCH(i, idx),
735                                    I40E_GLVEBTC_TBCL(i, idx),
736                                    veb->stat_offsets_loaded,
737                                    &veb_oes->tc_tx_bytes[i],
738                                    &veb_es->tc_tx_bytes[i]);
739         }
740         veb->stat_offsets_loaded = true;
741 }
742
743 #ifdef I40E_FCOE
744 /**
745  * i40e_update_fcoe_stats - Update FCoE-specific ethernet statistics counters.
746  * @vsi: the VSI that is capable of doing FCoE
747  **/
748 static void i40e_update_fcoe_stats(struct i40e_vsi *vsi)
749 {
750         struct i40e_pf *pf = vsi->back;
751         struct i40e_hw *hw = &pf->hw;
752         struct i40e_fcoe_stats *ofs;
753         struct i40e_fcoe_stats *fs;     /* device's eth stats */
754         int idx;
755
756         if (vsi->type != I40E_VSI_FCOE)
757                 return;
758
759         idx = (pf->pf_seid - I40E_BASE_PF_SEID) + I40E_FCOE_PF_STAT_OFFSET;
760         fs = &vsi->fcoe_stats;
761         ofs = &vsi->fcoe_stats_offsets;
762
763         i40e_stat_update32(hw, I40E_GL_FCOEPRC(idx),
764                            vsi->fcoe_stat_offsets_loaded,
765                            &ofs->rx_fcoe_packets, &fs->rx_fcoe_packets);
766         i40e_stat_update48(hw, I40E_GL_FCOEDWRCH(idx), I40E_GL_FCOEDWRCL(idx),
767                            vsi->fcoe_stat_offsets_loaded,
768                            &ofs->rx_fcoe_dwords, &fs->rx_fcoe_dwords);
769         i40e_stat_update32(hw, I40E_GL_FCOERPDC(idx),
770                            vsi->fcoe_stat_offsets_loaded,
771                            &ofs->rx_fcoe_dropped, &fs->rx_fcoe_dropped);
772         i40e_stat_update32(hw, I40E_GL_FCOEPTC(idx),
773                            vsi->fcoe_stat_offsets_loaded,
774                            &ofs->tx_fcoe_packets, &fs->tx_fcoe_packets);
775         i40e_stat_update48(hw, I40E_GL_FCOEDWTCH(idx), I40E_GL_FCOEDWTCL(idx),
776                            vsi->fcoe_stat_offsets_loaded,
777                            &ofs->tx_fcoe_dwords, &fs->tx_fcoe_dwords);
778         i40e_stat_update32(hw, I40E_GL_FCOECRC(idx),
779                            vsi->fcoe_stat_offsets_loaded,
780                            &ofs->fcoe_bad_fccrc, &fs->fcoe_bad_fccrc);
781         i40e_stat_update32(hw, I40E_GL_FCOELAST(idx),
782                            vsi->fcoe_stat_offsets_loaded,
783                            &ofs->fcoe_last_error, &fs->fcoe_last_error);
784         i40e_stat_update32(hw, I40E_GL_FCOEDDPC(idx),
785                            vsi->fcoe_stat_offsets_loaded,
786                            &ofs->fcoe_ddp_count, &fs->fcoe_ddp_count);
787
788         vsi->fcoe_stat_offsets_loaded = true;
789 }
790
791 #endif
792 /**
793  * i40e_update_link_xoff_rx - Update XOFF received in link flow control mode
794  * @pf: the corresponding PF
795  *
796  * Update the Rx XOFF counter (PAUSE frames) in link flow control mode
797  **/
798 static void i40e_update_link_xoff_rx(struct i40e_pf *pf)
799 {
800         struct i40e_hw_port_stats *osd = &pf->stats_offsets;
801         struct i40e_hw_port_stats *nsd = &pf->stats;
802         struct i40e_hw *hw = &pf->hw;
803         u64 xoff = 0;
804
805         if ((hw->fc.current_mode != I40E_FC_FULL) &&
806             (hw->fc.current_mode != I40E_FC_RX_PAUSE))
807                 return;
808
809         xoff = nsd->link_xoff_rx;
810         i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
811                            pf->stat_offsets_loaded,
812                            &osd->link_xoff_rx, &nsd->link_xoff_rx);
813
814         /* No new LFC xoff rx */
815         if (!(nsd->link_xoff_rx - xoff))
816                 return;
817
818 }
819
820 /**
821  * i40e_update_prio_xoff_rx - Update XOFF received in PFC mode
822  * @pf: the corresponding PF
823  *
824  * Update the Rx XOFF counter (PAUSE frames) in PFC mode
825  **/
826 static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
827 {
828         struct i40e_hw_port_stats *osd = &pf->stats_offsets;
829         struct i40e_hw_port_stats *nsd = &pf->stats;
830         bool xoff[I40E_MAX_TRAFFIC_CLASS] = {false};
831         struct i40e_dcbx_config *dcb_cfg;
832         struct i40e_hw *hw = &pf->hw;
833         u16 i;
834         u8 tc;
835
836         dcb_cfg = &hw->local_dcbx_config;
837
838         /* Collect Link XOFF stats when PFC is disabled */
839         if (!dcb_cfg->pfc.pfcenable) {
840                 i40e_update_link_xoff_rx(pf);
841                 return;
842         }
843
844         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
845                 u64 prio_xoff = nsd->priority_xoff_rx[i];
846
847                 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
848                                    pf->stat_offsets_loaded,
849                                    &osd->priority_xoff_rx[i],
850                                    &nsd->priority_xoff_rx[i]);
851
852                 /* No new PFC xoff rx */
853                 if (!(nsd->priority_xoff_rx[i] - prio_xoff))
854                         continue;
855                 /* Get the TC for given priority */
856                 tc = dcb_cfg->etscfg.prioritytable[i];
857                 xoff[tc] = true;
858         }
859 }
860
861 /**
862  * i40e_update_vsi_stats - Update the vsi statistics counters.
863  * @vsi: the VSI to be updated
864  *
865  * There are a few instances where we store the same stat in a
866  * couple of different structs.  This is partly because we have
867  * the netdev stats that need to be filled out, which is slightly
868  * different from the "eth_stats" defined by the chip and used in
869  * VF communications.  We sort it out here.
870  **/
871 static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
872 {
873         struct i40e_pf *pf = vsi->back;
874         struct rtnl_link_stats64 *ons;
875         struct rtnl_link_stats64 *ns;   /* netdev stats */
876         struct i40e_eth_stats *oes;
877         struct i40e_eth_stats *es;     /* device's eth stats */
878         u32 tx_restart, tx_busy;
879         struct i40e_ring *p;
880         u32 rx_page, rx_buf;
881         u64 bytes, packets;
882         unsigned int start;
883         u64 tx_linearize;
884         u64 rx_p, rx_b;
885         u64 tx_p, tx_b;
886         u16 q;
887
888         if (test_bit(__I40E_DOWN, &vsi->state) ||
889             test_bit(__I40E_CONFIG_BUSY, &pf->state))
890                 return;
891
892         ns = i40e_get_vsi_stats_struct(vsi);
893         ons = &vsi->net_stats_offsets;
894         es = &vsi->eth_stats;
895         oes = &vsi->eth_stats_offsets;
896
897         /* Gather up the netdev and vsi stats that the driver collects
898          * on the fly during packet processing
899          */
900         rx_b = rx_p = 0;
901         tx_b = tx_p = 0;
902         tx_restart = tx_busy = tx_linearize = 0;
903         rx_page = 0;
904         rx_buf = 0;
905         rcu_read_lock();
906         for (q = 0; q < vsi->num_queue_pairs; q++) {
907                 /* locate Tx ring */
908                 p = ACCESS_ONCE(vsi->tx_rings[q]);
909
910                 do {
911                         start = u64_stats_fetch_begin_irq(&p->syncp);
912                         packets = p->stats.packets;
913                         bytes = p->stats.bytes;
914                 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
915                 tx_b += bytes;
916                 tx_p += packets;
917                 tx_restart += p->tx_stats.restart_queue;
918                 tx_busy += p->tx_stats.tx_busy;
919                 tx_linearize += p->tx_stats.tx_linearize;
920
921                 /* Rx queue is part of the same block as Tx queue */
922                 p = &p[1];
923                 do {
924                         start = u64_stats_fetch_begin_irq(&p->syncp);
925                         packets = p->stats.packets;
926                         bytes = p->stats.bytes;
927                 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
928                 rx_b += bytes;
929                 rx_p += packets;
930                 rx_buf += p->rx_stats.alloc_buff_failed;
931                 rx_page += p->rx_stats.alloc_page_failed;
932         }
933         rcu_read_unlock();
934         vsi->tx_restart = tx_restart;
935         vsi->tx_busy = tx_busy;
936         vsi->tx_linearize = tx_linearize;
937         vsi->rx_page_failed = rx_page;
938         vsi->rx_buf_failed = rx_buf;
939
940         ns->rx_packets = rx_p;
941         ns->rx_bytes = rx_b;
942         ns->tx_packets = tx_p;
943         ns->tx_bytes = tx_b;
944
945         /* update netdev stats from eth stats */
946         i40e_update_eth_stats(vsi);
947         ons->tx_errors = oes->tx_errors;
948         ns->tx_errors = es->tx_errors;
949         ons->multicast = oes->rx_multicast;
950         ns->multicast = es->rx_multicast;
951         ons->rx_dropped = oes->rx_discards;
952         ns->rx_dropped = es->rx_discards;
953         ons->tx_dropped = oes->tx_discards;
954         ns->tx_dropped = es->tx_discards;
955
956         /* pull in a couple PF stats if this is the main vsi */
957         if (vsi == pf->vsi[pf->lan_vsi]) {
958                 ns->rx_crc_errors = pf->stats.crc_errors;
959                 ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes;
960                 ns->rx_length_errors = pf->stats.rx_length_errors;
961         }
962 }
963
964 /**
965  * i40e_update_pf_stats - Update the PF statistics counters.
966  * @pf: the PF to be updated
967  **/
968 static void i40e_update_pf_stats(struct i40e_pf *pf)
969 {
970         struct i40e_hw_port_stats *osd = &pf->stats_offsets;
971         struct i40e_hw_port_stats *nsd = &pf->stats;
972         struct i40e_hw *hw = &pf->hw;
973         u32 val;
974         int i;
975
976         i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
977                            I40E_GLPRT_GORCL(hw->port),
978                            pf->stat_offsets_loaded,
979                            &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
980         i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
981                            I40E_GLPRT_GOTCL(hw->port),
982                            pf->stat_offsets_loaded,
983                            &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
984         i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
985                            pf->stat_offsets_loaded,
986                            &osd->eth.rx_discards,
987                            &nsd->eth.rx_discards);
988         i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
989                            I40E_GLPRT_UPRCL(hw->port),
990                            pf->stat_offsets_loaded,
991                            &osd->eth.rx_unicast,
992                            &nsd->eth.rx_unicast);
993         i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
994                            I40E_GLPRT_MPRCL(hw->port),
995                            pf->stat_offsets_loaded,
996                            &osd->eth.rx_multicast,
997                            &nsd->eth.rx_multicast);
998         i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
999                            I40E_GLPRT_BPRCL(hw->port),
1000                            pf->stat_offsets_loaded,
1001                            &osd->eth.rx_broadcast,
1002                            &nsd->eth.rx_broadcast);
1003         i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
1004                            I40E_GLPRT_UPTCL(hw->port),
1005                            pf->stat_offsets_loaded,
1006                            &osd->eth.tx_unicast,
1007                            &nsd->eth.tx_unicast);
1008         i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
1009                            I40E_GLPRT_MPTCL(hw->port),
1010                            pf->stat_offsets_loaded,
1011                            &osd->eth.tx_multicast,
1012                            &nsd->eth.tx_multicast);
1013         i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
1014                            I40E_GLPRT_BPTCL(hw->port),
1015                            pf->stat_offsets_loaded,
1016                            &osd->eth.tx_broadcast,
1017                            &nsd->eth.tx_broadcast);
1018
1019         i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
1020                            pf->stat_offsets_loaded,
1021                            &osd->tx_dropped_link_down,
1022                            &nsd->tx_dropped_link_down);
1023
1024         i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
1025                            pf->stat_offsets_loaded,
1026                            &osd->crc_errors, &nsd->crc_errors);
1027
1028         i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
1029                            pf->stat_offsets_loaded,
1030                            &osd->illegal_bytes, &nsd->illegal_bytes);
1031
1032         i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
1033                            pf->stat_offsets_loaded,
1034                            &osd->mac_local_faults,
1035                            &nsd->mac_local_faults);
1036         i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
1037                            pf->stat_offsets_loaded,
1038                            &osd->mac_remote_faults,
1039                            &nsd->mac_remote_faults);
1040
1041         i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
1042                            pf->stat_offsets_loaded,
1043                            &osd->rx_length_errors,
1044                            &nsd->rx_length_errors);
1045
1046         i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
1047                            pf->stat_offsets_loaded,
1048                            &osd->link_xon_rx, &nsd->link_xon_rx);
1049         i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
1050                            pf->stat_offsets_loaded,
1051                            &osd->link_xon_tx, &nsd->link_xon_tx);
1052         i40e_update_prio_xoff_rx(pf);  /* handles I40E_GLPRT_LXOFFRXC */
1053         i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
1054                            pf->stat_offsets_loaded,
1055                            &osd->link_xoff_tx, &nsd->link_xoff_tx);
1056
1057         for (i = 0; i < 8; i++) {
1058                 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
1059                                    pf->stat_offsets_loaded,
1060                                    &osd->priority_xon_rx[i],
1061                                    &nsd->priority_xon_rx[i]);
1062                 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
1063                                    pf->stat_offsets_loaded,
1064                                    &osd->priority_xon_tx[i],
1065                                    &nsd->priority_xon_tx[i]);
1066                 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
1067                                    pf->stat_offsets_loaded,
1068                                    &osd->priority_xoff_tx[i],
1069                                    &nsd->priority_xoff_tx[i]);
1070                 i40e_stat_update32(hw,
1071                                    I40E_GLPRT_RXON2OFFCNT(hw->port, i),
1072                                    pf->stat_offsets_loaded,
1073                                    &osd->priority_xon_2_xoff[i],
1074                                    &nsd->priority_xon_2_xoff[i]);
1075         }
1076
1077         i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
1078                            I40E_GLPRT_PRC64L(hw->port),
1079                            pf->stat_offsets_loaded,
1080                            &osd->rx_size_64, &nsd->rx_size_64);
1081         i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
1082                            I40E_GLPRT_PRC127L(hw->port),
1083                            pf->stat_offsets_loaded,
1084                            &osd->rx_size_127, &nsd->rx_size_127);
1085         i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
1086                            I40E_GLPRT_PRC255L(hw->port),
1087                            pf->stat_offsets_loaded,
1088                            &osd->rx_size_255, &nsd->rx_size_255);
1089         i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
1090                            I40E_GLPRT_PRC511L(hw->port),
1091                            pf->stat_offsets_loaded,
1092                            &osd->rx_size_511, &nsd->rx_size_511);
1093         i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
1094                            I40E_GLPRT_PRC1023L(hw->port),
1095                            pf->stat_offsets_loaded,
1096                            &osd->rx_size_1023, &nsd->rx_size_1023);
1097         i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
1098                            I40E_GLPRT_PRC1522L(hw->port),
1099                            pf->stat_offsets_loaded,
1100                            &osd->rx_size_1522, &nsd->rx_size_1522);
1101         i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
1102                            I40E_GLPRT_PRC9522L(hw->port),
1103                            pf->stat_offsets_loaded,
1104                            &osd->rx_size_big, &nsd->rx_size_big);
1105
1106         i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
1107                            I40E_GLPRT_PTC64L(hw->port),
1108                            pf->stat_offsets_loaded,
1109                            &osd->tx_size_64, &nsd->tx_size_64);
1110         i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
1111                            I40E_GLPRT_PTC127L(hw->port),
1112                            pf->stat_offsets_loaded,
1113                            &osd->tx_size_127, &nsd->tx_size_127);
1114         i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
1115                            I40E_GLPRT_PTC255L(hw->port),
1116                            pf->stat_offsets_loaded,
1117                            &osd->tx_size_255, &nsd->tx_size_255);
1118         i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
1119                            I40E_GLPRT_PTC511L(hw->port),
1120                            pf->stat_offsets_loaded,
1121                            &osd->tx_size_511, &nsd->tx_size_511);
1122         i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
1123                            I40E_GLPRT_PTC1023L(hw->port),
1124                            pf->stat_offsets_loaded,
1125                            &osd->tx_size_1023, &nsd->tx_size_1023);
1126         i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
1127                            I40E_GLPRT_PTC1522L(hw->port),
1128                            pf->stat_offsets_loaded,
1129                            &osd->tx_size_1522, &nsd->tx_size_1522);
1130         i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
1131                            I40E_GLPRT_PTC9522L(hw->port),
1132                            pf->stat_offsets_loaded,
1133                            &osd->tx_size_big, &nsd->tx_size_big);
1134
1135         i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
1136                            pf->stat_offsets_loaded,
1137                            &osd->rx_undersize, &nsd->rx_undersize);
1138         i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
1139                            pf->stat_offsets_loaded,
1140                            &osd->rx_fragments, &nsd->rx_fragments);
1141         i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
1142                            pf->stat_offsets_loaded,
1143                            &osd->rx_oversize, &nsd->rx_oversize);
1144         i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
1145                            pf->stat_offsets_loaded,
1146                            &osd->rx_jabber, &nsd->rx_jabber);
1147
1148         /* FDIR stats */
1149         i40e_stat_update32(hw,
1150                            I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(pf->hw.pf_id)),
1151                            pf->stat_offsets_loaded,
1152                            &osd->fd_atr_match, &nsd->fd_atr_match);
1153         i40e_stat_update32(hw,
1154                            I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(pf->hw.pf_id)),
1155                            pf->stat_offsets_loaded,
1156                            &osd->fd_sb_match, &nsd->fd_sb_match);
1157         i40e_stat_update32(hw,
1158                       I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id)),
1159                       pf->stat_offsets_loaded,
1160                       &osd->fd_atr_tunnel_match, &nsd->fd_atr_tunnel_match);
1161
1162         val = rd32(hw, I40E_PRTPM_EEE_STAT);
1163         nsd->tx_lpi_status =
1164                        (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
1165                         I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
1166         nsd->rx_lpi_status =
1167                        (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
1168                         I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
1169         i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
1170                            pf->stat_offsets_loaded,
1171                            &osd->tx_lpi_count, &nsd->tx_lpi_count);
1172         i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
1173                            pf->stat_offsets_loaded,
1174                            &osd->rx_lpi_count, &nsd->rx_lpi_count);
1175
1176         if (pf->flags & I40E_FLAG_FD_SB_ENABLED &&
1177             !(pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED))
1178                 nsd->fd_sb_status = true;
1179         else
1180                 nsd->fd_sb_status = false;
1181
1182         if (pf->flags & I40E_FLAG_FD_ATR_ENABLED &&
1183             !(pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
1184                 nsd->fd_atr_status = true;
1185         else
1186                 nsd->fd_atr_status = false;
1187
1188         pf->stat_offsets_loaded = true;
1189 }
1190
1191 /**
1192  * i40e_update_stats - Update the various statistics counters.
1193  * @vsi: the VSI to be updated
1194  *
1195  * Update the various stats for this VSI and its related entities.
1196  **/
1197 void i40e_update_stats(struct i40e_vsi *vsi)
1198 {
1199         struct i40e_pf *pf = vsi->back;
1200
1201         if (vsi == pf->vsi[pf->lan_vsi])
1202                 i40e_update_pf_stats(pf);
1203
1204         i40e_update_vsi_stats(vsi);
1205 #ifdef I40E_FCOE
1206         i40e_update_fcoe_stats(vsi);
1207 #endif
1208 }
1209
1210 /**
1211  * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
1212  * @vsi: the VSI to be searched
1213  * @macaddr: the MAC address
1214  * @vlan: the vlan
1215  * @is_vf: make sure its a VF filter, else doesn't matter
1216  * @is_netdev: make sure its a netdev filter, else doesn't matter
1217  *
1218  * Returns ptr to the filter object or NULL
1219  **/
1220 static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
1221                                                 u8 *macaddr, s16 vlan,
1222                                                 bool is_vf, bool is_netdev)
1223 {
1224         struct i40e_mac_filter *f;
1225
1226         if (!vsi || !macaddr)
1227                 return NULL;
1228
1229         list_for_each_entry(f, &vsi->mac_filter_list, list) {
1230                 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1231                     (vlan == f->vlan)    &&
1232                     (!is_vf || f->is_vf) &&
1233                     (!is_netdev || f->is_netdev))
1234                         return f;
1235         }
1236         return NULL;
1237 }
1238
1239 /**
1240  * i40e_find_mac - Find a mac addr in the macvlan filters list
1241  * @vsi: the VSI to be searched
1242  * @macaddr: the MAC address we are searching for
1243  * @is_vf: make sure its a VF filter, else doesn't matter
1244  * @is_netdev: make sure its a netdev filter, else doesn't matter
1245  *
1246  * Returns the first filter with the provided MAC address or NULL if
1247  * MAC address was not found
1248  **/
1249 struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
1250                                       bool is_vf, bool is_netdev)
1251 {
1252         struct i40e_mac_filter *f;
1253
1254         if (!vsi || !macaddr)
1255                 return NULL;
1256
1257         list_for_each_entry(f, &vsi->mac_filter_list, list) {
1258                 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1259                     (!is_vf || f->is_vf) &&
1260                     (!is_netdev || f->is_netdev))
1261                         return f;
1262         }
1263         return NULL;
1264 }
1265
1266 /**
1267  * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode
1268  * @vsi: the VSI to be searched
1269  *
1270  * Returns true if VSI is in vlan mode or false otherwise
1271  **/
1272 bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
1273 {
1274         struct i40e_mac_filter *f;
1275
1276         /* Only -1 for all the filters denotes not in vlan mode
1277          * so we have to go through all the list in order to make sure
1278          */
1279         list_for_each_entry(f, &vsi->mac_filter_list, list) {
1280                 if (f->vlan >= 0 || vsi->info.pvid)
1281                         return true;
1282         }
1283
1284         return false;
1285 }
1286
1287 /**
1288  * i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans
1289  * @vsi: the VSI to be searched
1290  * @macaddr: the mac address to be filtered
1291  * @is_vf: true if it is a VF
1292  * @is_netdev: true if it is a netdev
1293  *
1294  * Goes through all the macvlan filters and adds a
1295  * macvlan filter for each unique vlan that already exists
1296  *
1297  * Returns first filter found on success, else NULL
1298  **/
1299 struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
1300                                              bool is_vf, bool is_netdev)
1301 {
1302         struct i40e_mac_filter *f;
1303
1304         list_for_each_entry(f, &vsi->mac_filter_list, list) {
1305                 if (vsi->info.pvid)
1306                         f->vlan = le16_to_cpu(vsi->info.pvid);
1307                 if (!i40e_find_filter(vsi, macaddr, f->vlan,
1308                                       is_vf, is_netdev)) {
1309                         if (!i40e_add_filter(vsi, macaddr, f->vlan,
1310                                              is_vf, is_netdev))
1311                                 return NULL;
1312                 }
1313         }
1314
1315         return list_first_entry_or_null(&vsi->mac_filter_list,
1316                                         struct i40e_mac_filter, list);
1317 }
1318
1319 /**
1320  * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
1321  * @vsi: the PF Main VSI - inappropriate for any other VSI
1322  * @macaddr: the MAC address
1323  *
1324  * Some older firmware configurations set up a default promiscuous VLAN
1325  * filter that needs to be removed.
1326  **/
1327 static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
1328 {
1329         struct i40e_aqc_remove_macvlan_element_data element;
1330         struct i40e_pf *pf = vsi->back;
1331         i40e_status ret;
1332
1333         /* Only appropriate for the PF main VSI */
1334         if (vsi->type != I40E_VSI_MAIN)
1335                 return -EINVAL;
1336
1337         memset(&element, 0, sizeof(element));
1338         ether_addr_copy(element.mac_addr, macaddr);
1339         element.vlan_tag = 0;
1340         element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1341                         I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1342         ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1343         if (ret)
1344                 return -ENOENT;
1345
1346         return 0;
1347 }
1348
1349 /**
1350  * i40e_add_filter - Add a mac/vlan filter to the VSI
1351  * @vsi: the VSI to be searched
1352  * @macaddr: the MAC address
1353  * @vlan: the vlan
1354  * @is_vf: make sure its a VF filter, else doesn't matter
1355  * @is_netdev: make sure its a netdev filter, else doesn't matter
1356  *
1357  * Returns ptr to the filter object or NULL when no memory available.
1358  **/
1359 struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
1360                                         u8 *macaddr, s16 vlan,
1361                                         bool is_vf, bool is_netdev)
1362 {
1363         struct i40e_mac_filter *f;
1364
1365         if (!vsi || !macaddr)
1366                 return NULL;
1367
1368         f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
1369         if (!f) {
1370                 f = kzalloc(sizeof(*f), GFP_ATOMIC);
1371                 if (!f)
1372                         goto add_filter_out;
1373
1374                 ether_addr_copy(f->macaddr, macaddr);
1375                 f->vlan = vlan;
1376                 f->changed = true;
1377
1378                 INIT_LIST_HEAD(&f->list);
1379                 list_add(&f->list, &vsi->mac_filter_list);
1380         }
1381
1382         /* increment counter and add a new flag if needed */
1383         if (is_vf) {
1384                 if (!f->is_vf) {
1385                         f->is_vf = true;
1386                         f->counter++;
1387                 }
1388         } else if (is_netdev) {
1389                 if (!f->is_netdev) {
1390                         f->is_netdev = true;
1391                         f->counter++;
1392                 }
1393         } else {
1394                 f->counter++;
1395         }
1396
1397         /* changed tells sync_filters_subtask to
1398          * push the filter down to the firmware
1399          */
1400         if (f->changed) {
1401                 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1402                 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1403         }
1404
1405 add_filter_out:
1406         return f;
1407 }
1408
1409 /**
1410  * i40e_del_filter - Remove a mac/vlan filter from the VSI
1411  * @vsi: the VSI to be searched
1412  * @macaddr: the MAC address
1413  * @vlan: the vlan
1414  * @is_vf: make sure it's a VF filter, else doesn't matter
1415  * @is_netdev: make sure it's a netdev filter, else doesn't matter
1416  **/
1417 void i40e_del_filter(struct i40e_vsi *vsi,
1418                      u8 *macaddr, s16 vlan,
1419                      bool is_vf, bool is_netdev)
1420 {
1421         struct i40e_mac_filter *f;
1422
1423         if (!vsi || !macaddr)
1424                 return;
1425
1426         f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
1427         if (!f || f->counter == 0)
1428                 return;
1429
1430         if (is_vf) {
1431                 if (f->is_vf) {
1432                         f->is_vf = false;
1433                         f->counter--;
1434                 }
1435         } else if (is_netdev) {
1436                 if (f->is_netdev) {
1437                         f->is_netdev = false;
1438                         f->counter--;
1439                 }
1440         } else {
1441                 /* make sure we don't remove a filter in use by VF or netdev */
1442                 int min_f = 0;
1443
1444                 min_f += (f->is_vf ? 1 : 0);
1445                 min_f += (f->is_netdev ? 1 : 0);
1446
1447                 if (f->counter > min_f)
1448                         f->counter--;
1449         }
1450
1451         /* counter == 0 tells sync_filters_subtask to
1452          * remove the filter from the firmware's list
1453          */
1454         if (f->counter == 0) {
1455                 f->changed = true;
1456                 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1457                 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1458         }
1459 }
1460
1461 /**
1462  * i40e_set_mac - NDO callback to set mac address
1463  * @netdev: network interface device structure
1464  * @p: pointer to an address structure
1465  *
1466  * Returns 0 on success, negative on failure
1467  **/
1468 #ifdef I40E_FCOE
1469 int i40e_set_mac(struct net_device *netdev, void *p)
1470 #else
1471 static int i40e_set_mac(struct net_device *netdev, void *p)
1472 #endif
1473 {
1474         struct i40e_netdev_priv *np = netdev_priv(netdev);
1475         struct i40e_vsi *vsi = np->vsi;
1476         struct i40e_pf *pf = vsi->back;
1477         struct i40e_hw *hw = &pf->hw;
1478         struct sockaddr *addr = p;
1479         struct i40e_mac_filter *f;
1480
1481         if (!is_valid_ether_addr(addr->sa_data))
1482                 return -EADDRNOTAVAIL;
1483
1484         if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) {
1485                 netdev_info(netdev, "already using mac address %pM\n",
1486                             addr->sa_data);
1487                 return 0;
1488         }
1489
1490         if (test_bit(__I40E_DOWN, &vsi->back->state) ||
1491             test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
1492                 return -EADDRNOTAVAIL;
1493
1494         if (ether_addr_equal(hw->mac.addr, addr->sa_data))
1495                 netdev_info(netdev, "returning to hw mac address %pM\n",
1496                             hw->mac.addr);
1497         else
1498                 netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
1499
1500         if (vsi->type == I40E_VSI_MAIN) {
1501                 i40e_status ret;
1502
1503                 ret = i40e_aq_mac_address_write(&vsi->back->hw,
1504                                                 I40E_AQC_WRITE_TYPE_LAA_WOL,
1505                                                 addr->sa_data, NULL);
1506                 if (ret) {
1507                         netdev_info(netdev,
1508                                     "Addr change for Main VSI failed: %d\n",
1509                                     ret);
1510                         return -EADDRNOTAVAIL;
1511                 }
1512         }
1513
1514         if (ether_addr_equal(netdev->dev_addr, hw->mac.addr)) {
1515                 struct i40e_aqc_remove_macvlan_element_data element;
1516
1517                 memset(&element, 0, sizeof(element));
1518                 ether_addr_copy(element.mac_addr, netdev->dev_addr);
1519                 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1520                 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1521         } else {
1522                 i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY,
1523                                 false, false);
1524         }
1525
1526         if (ether_addr_equal(addr->sa_data, hw->mac.addr)) {
1527                 struct i40e_aqc_add_macvlan_element_data element;
1528
1529                 memset(&element, 0, sizeof(element));
1530                 ether_addr_copy(element.mac_addr, hw->mac.addr);
1531                 element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH);
1532                 i40e_aq_add_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1533         } else {
1534                 f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY,
1535                                     false, false);
1536                 if (f)
1537                         f->is_laa = true;
1538         }
1539
1540         i40e_sync_vsi_filters(vsi, false);
1541         ether_addr_copy(netdev->dev_addr, addr->sa_data);
1542
1543         return 0;
1544 }
1545
1546 /**
1547  * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
1548  * @vsi: the VSI being setup
1549  * @ctxt: VSI context structure
1550  * @enabled_tc: Enabled TCs bitmap
1551  * @is_add: True if called before Add VSI
1552  *
1553  * Setup VSI queue mapping for enabled traffic classes.
1554  **/
1555 #ifdef I40E_FCOE
1556 void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1557                               struct i40e_vsi_context *ctxt,
1558                               u8 enabled_tc,
1559                               bool is_add)
1560 #else
1561 static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1562                                      struct i40e_vsi_context *ctxt,
1563                                      u8 enabled_tc,
1564                                      bool is_add)
1565 #endif
1566 {
1567         struct i40e_pf *pf = vsi->back;
1568         u16 sections = 0;
1569         u8 netdev_tc = 0;
1570         u16 numtc = 0;
1571         u16 qcount;
1572         u8 offset;
1573         u16 qmap;
1574         int i;
1575         u16 num_tc_qps = 0;
1576
1577         sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1578         offset = 0;
1579
1580         if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
1581                 /* Find numtc from enabled TC bitmap */
1582                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1583                         if (enabled_tc & BIT_ULL(i)) /* TC is enabled */
1584                                 numtc++;
1585                 }
1586                 if (!numtc) {
1587                         dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
1588                         numtc = 1;
1589                 }
1590         } else {
1591                 /* At least TC0 is enabled in case of non-DCB case */
1592                 numtc = 1;
1593         }
1594
1595         vsi->tc_config.numtc = numtc;
1596         vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1597         /* Number of queues per enabled TC */
1598         /* In MFP case we can have a much lower count of MSIx
1599          * vectors available and so we need to lower the used
1600          * q count.
1601          */
1602         if (pf->flags & I40E_FLAG_MSIX_ENABLED)
1603                 qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix);
1604         else
1605                 qcount = vsi->alloc_queue_pairs;
1606         num_tc_qps = qcount / numtc;
1607         num_tc_qps = min_t(int, num_tc_qps, i40e_pf_get_max_q_per_tc(pf));
1608
1609         /* Setup queue offset/count for all TCs for given VSI */
1610         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1611                 /* See if the given TC is enabled for the given VSI */
1612                 if (vsi->tc_config.enabled_tc & BIT_ULL(i)) {
1613                         /* TC is enabled */
1614                         int pow, num_qps;
1615
1616                         switch (vsi->type) {
1617                         case I40E_VSI_MAIN:
1618                                 qcount = min_t(int, pf->rss_size, num_tc_qps);
1619                                 break;
1620 #ifdef I40E_FCOE
1621                         case I40E_VSI_FCOE:
1622                                 qcount = num_tc_qps;
1623                                 break;
1624 #endif
1625                         case I40E_VSI_FDIR:
1626                         case I40E_VSI_SRIOV:
1627                         case I40E_VSI_VMDQ2:
1628                         default:
1629                                 qcount = num_tc_qps;
1630                                 WARN_ON(i != 0);
1631                                 break;
1632                         }
1633                         vsi->tc_config.tc_info[i].qoffset = offset;
1634                         vsi->tc_config.tc_info[i].qcount = qcount;
1635
1636                         /* find the next higher power-of-2 of num queue pairs */
1637                         num_qps = qcount;
1638                         pow = 0;
1639                         while (num_qps && (BIT_ULL(pow) < qcount)) {
1640                                 pow++;
1641                                 num_qps >>= 1;
1642                         }
1643
1644                         vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1645                         qmap =
1646                             (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1647                             (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1648
1649                         offset += qcount;
1650                 } else {
1651                         /* TC is not enabled so set the offset to
1652                          * default queue and allocate one queue
1653                          * for the given TC.
1654                          */
1655                         vsi->tc_config.tc_info[i].qoffset = 0;
1656                         vsi->tc_config.tc_info[i].qcount = 1;
1657                         vsi->tc_config.tc_info[i].netdev_tc = 0;
1658
1659                         qmap = 0;
1660                 }
1661                 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
1662         }
1663
1664         /* Set actual Tx/Rx queue pairs */
1665         vsi->num_queue_pairs = offset;
1666         if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) {
1667                 if (vsi->req_queue_pairs > 0)
1668                         vsi->num_queue_pairs = vsi->req_queue_pairs;
1669                 else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
1670                         vsi->num_queue_pairs = pf->num_lan_msix;
1671         }
1672
1673         /* Scheduler section valid can only be set for ADD VSI */
1674         if (is_add) {
1675                 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1676
1677                 ctxt->info.up_enable_bits = enabled_tc;
1678         }
1679         if (vsi->type == I40E_VSI_SRIOV) {
1680                 ctxt->info.mapping_flags |=
1681                                      cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
1682                 for (i = 0; i < vsi->num_queue_pairs; i++)
1683                         ctxt->info.queue_mapping[i] =
1684                                                cpu_to_le16(vsi->base_queue + i);
1685         } else {
1686                 ctxt->info.mapping_flags |=
1687                                         cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1688                 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1689         }
1690         ctxt->info.valid_sections |= cpu_to_le16(sections);
1691 }
1692
1693 /**
1694  * i40e_set_rx_mode - NDO callback to set the netdev filters
1695  * @netdev: network interface device structure
1696  **/
1697 #ifdef I40E_FCOE
1698 void i40e_set_rx_mode(struct net_device *netdev)
1699 #else
1700 static void i40e_set_rx_mode(struct net_device *netdev)
1701 #endif
1702 {
1703         struct i40e_netdev_priv *np = netdev_priv(netdev);
1704         struct i40e_mac_filter *f, *ftmp;
1705         struct i40e_vsi *vsi = np->vsi;
1706         struct netdev_hw_addr *uca;
1707         struct netdev_hw_addr *mca;
1708         struct netdev_hw_addr *ha;
1709
1710         /* add addr if not already in the filter list */
1711         netdev_for_each_uc_addr(uca, netdev) {
1712                 if (!i40e_find_mac(vsi, uca->addr, false, true)) {
1713                         if (i40e_is_vsi_in_vlan(vsi))
1714                                 i40e_put_mac_in_vlan(vsi, uca->addr,
1715                                                      false, true);
1716                         else
1717                                 i40e_add_filter(vsi, uca->addr, I40E_VLAN_ANY,
1718                                                 false, true);
1719                 }
1720         }
1721
1722         netdev_for_each_mc_addr(mca, netdev) {
1723                 if (!i40e_find_mac(vsi, mca->addr, false, true)) {
1724                         if (i40e_is_vsi_in_vlan(vsi))
1725                                 i40e_put_mac_in_vlan(vsi, mca->addr,
1726                                                      false, true);
1727                         else
1728                                 i40e_add_filter(vsi, mca->addr, I40E_VLAN_ANY,
1729                                                 false, true);
1730                 }
1731         }
1732
1733         /* remove filter if not in netdev list */
1734         list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1735
1736                 if (!f->is_netdev)
1737                         continue;
1738
1739                 netdev_for_each_mc_addr(mca, netdev)
1740                         if (ether_addr_equal(mca->addr, f->macaddr))
1741                                 goto bottom_of_search_loop;
1742
1743                 netdev_for_each_uc_addr(uca, netdev)
1744                         if (ether_addr_equal(uca->addr, f->macaddr))
1745                                 goto bottom_of_search_loop;
1746
1747                 for_each_dev_addr(netdev, ha)
1748                         if (ether_addr_equal(ha->addr, f->macaddr))
1749                                 goto bottom_of_search_loop;
1750
1751                 /* f->macaddr wasn't found in uc, mc, or ha list so delete it */
1752                 i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY, false, true);
1753
1754 bottom_of_search_loop:
1755                 continue;
1756         }
1757
1758         /* check for other flag changes */
1759         if (vsi->current_netdev_flags != vsi->netdev->flags) {
1760                 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1761                 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1762         }
1763 }
1764
1765 /**
1766  * i40e_sync_vsi_filters - Update the VSI filter list to the HW
1767  * @vsi: ptr to the VSI
1768  * @grab_rtnl: whether RTNL needs to be grabbed
1769  *
1770  * Push any outstanding VSI filter changes through the AdminQ.
1771  *
1772  * Returns 0 or error value
1773  **/
1774 int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl)
1775 {
1776         struct i40e_mac_filter *f, *ftmp;
1777         bool promisc_forced_on = false;
1778         bool add_happened = false;
1779         int filter_list_len = 0;
1780         u32 changed_flags = 0;
1781         i40e_status ret = 0;
1782         struct i40e_pf *pf;
1783         int num_add = 0;
1784         int num_del = 0;
1785         int aq_err = 0;
1786         u16 cmd_flags;
1787
1788         /* empty array typed pointers, kcalloc later */
1789         struct i40e_aqc_add_macvlan_element_data *add_list;
1790         struct i40e_aqc_remove_macvlan_element_data *del_list;
1791
1792         while (test_and_set_bit(__I40E_CONFIG_BUSY, &vsi->state))
1793                 usleep_range(1000, 2000);
1794         pf = vsi->back;
1795
1796         if (vsi->netdev) {
1797                 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
1798                 vsi->current_netdev_flags = vsi->netdev->flags;
1799         }
1800
1801         if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
1802                 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
1803
1804                 filter_list_len = pf->hw.aq.asq_buf_size /
1805                             sizeof(struct i40e_aqc_remove_macvlan_element_data);
1806                 del_list = kcalloc(filter_list_len,
1807                             sizeof(struct i40e_aqc_remove_macvlan_element_data),
1808                             GFP_KERNEL);
1809                 if (!del_list)
1810                         return -ENOMEM;
1811
1812                 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1813                         if (!f->changed)
1814                                 continue;
1815
1816                         if (f->counter != 0)
1817                                 continue;
1818                         f->changed = false;
1819                         cmd_flags = 0;
1820
1821                         /* add to delete list */
1822                         ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
1823                         del_list[num_del].vlan_tag =
1824                                 cpu_to_le16((u16)(f->vlan ==
1825                                             I40E_VLAN_ANY ? 0 : f->vlan));
1826
1827                         cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1828                         del_list[num_del].flags = cmd_flags;
1829                         num_del++;
1830
1831                         /* unlink from filter list */
1832                         list_del(&f->list);
1833                         kfree(f);
1834
1835                         /* flush a full buffer */
1836                         if (num_del == filter_list_len) {
1837                                 ret = i40e_aq_remove_macvlan(&pf->hw,
1838                                                   vsi->seid, del_list, num_del,
1839                                                   NULL);
1840                                 aq_err = pf->hw.aq.asq_last_status;
1841                                 num_del = 0;
1842                                 memset(del_list, 0, sizeof(*del_list));
1843
1844                                 if (ret && aq_err != I40E_AQ_RC_ENOENT)
1845                                         dev_info(&pf->pdev->dev,
1846                                                  "ignoring delete macvlan error, err %s, aq_err %s while flushing a full buffer\n",
1847                                                  i40e_stat_str(&pf->hw, ret),
1848                                                  i40e_aq_str(&pf->hw, aq_err));
1849                         }
1850                 }
1851                 if (num_del) {
1852                         ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
1853                                                      del_list, num_del, NULL);
1854                         aq_err = pf->hw.aq.asq_last_status;
1855                         num_del = 0;
1856
1857                         if (ret && aq_err != I40E_AQ_RC_ENOENT)
1858                                 dev_info(&pf->pdev->dev,
1859                                          "ignoring delete macvlan error, err %s aq_err %s\n",
1860                                          i40e_stat_str(&pf->hw, ret),
1861                                          i40e_aq_str(&pf->hw, aq_err));
1862                 }
1863
1864                 kfree(del_list);
1865                 del_list = NULL;
1866
1867                 /* do all the adds now */
1868                 filter_list_len = pf->hw.aq.asq_buf_size /
1869                                sizeof(struct i40e_aqc_add_macvlan_element_data),
1870                 add_list = kcalloc(filter_list_len,
1871                                sizeof(struct i40e_aqc_add_macvlan_element_data),
1872                                GFP_KERNEL);
1873                 if (!add_list)
1874                         return -ENOMEM;
1875
1876                 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1877                         if (!f->changed)
1878                                 continue;
1879
1880                         if (f->counter == 0)
1881                                 continue;
1882                         f->changed = false;
1883                         add_happened = true;
1884                         cmd_flags = 0;
1885
1886                         /* add to add array */
1887                         ether_addr_copy(add_list[num_add].mac_addr, f->macaddr);
1888                         add_list[num_add].vlan_tag =
1889                                 cpu_to_le16(
1890                                  (u16)(f->vlan == I40E_VLAN_ANY ? 0 : f->vlan));
1891                         add_list[num_add].queue_number = 0;
1892
1893                         cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
1894                         add_list[num_add].flags = cpu_to_le16(cmd_flags);
1895                         num_add++;
1896
1897                         /* flush a full buffer */
1898                         if (num_add == filter_list_len) {
1899                                 ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
1900                                                           add_list, num_add,
1901                                                           NULL);
1902                                 aq_err = pf->hw.aq.asq_last_status;
1903                                 num_add = 0;
1904
1905                                 if (ret)
1906                                         break;
1907                                 memset(add_list, 0, sizeof(*add_list));
1908                         }
1909                 }
1910                 if (num_add) {
1911                         ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
1912                                                   add_list, num_add, NULL);
1913                         aq_err = pf->hw.aq.asq_last_status;
1914                         num_add = 0;
1915                 }
1916                 kfree(add_list);
1917                 add_list = NULL;
1918
1919                 if (add_happened && ret && aq_err != I40E_AQ_RC_EINVAL) {
1920                         dev_info(&pf->pdev->dev,
1921                                  "add filter failed, err %s aq_err %s\n",
1922                                  i40e_stat_str(&pf->hw, ret),
1923                                  i40e_aq_str(&pf->hw, aq_err));
1924                         if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) &&
1925                             !test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1926                                       &vsi->state)) {
1927                                 promisc_forced_on = true;
1928                                 set_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1929                                         &vsi->state);
1930                                 dev_info(&pf->pdev->dev, "promiscuous mode forced on\n");
1931                         }
1932                 }
1933         }
1934
1935         /* check for changes in promiscuous modes */
1936         if (changed_flags & IFF_ALLMULTI) {
1937                 bool cur_multipromisc;
1938
1939                 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
1940                 ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
1941                                                             vsi->seid,
1942                                                             cur_multipromisc,
1943                                                             NULL);
1944                 if (ret)
1945                         dev_info(&pf->pdev->dev,
1946                                  "set multi promisc failed, err %s aq_err %s\n",
1947                                  i40e_stat_str(&pf->hw, ret),
1948                                  i40e_aq_str(&pf->hw,
1949                                              pf->hw.aq.asq_last_status));
1950         }
1951         if ((changed_flags & IFF_PROMISC) || promisc_forced_on) {
1952                 bool cur_promisc;
1953
1954                 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
1955                                test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
1956                                         &vsi->state));
1957                 if (vsi->type == I40E_VSI_MAIN && pf->lan_veb != I40E_NO_VEB) {
1958                         /* set defport ON for Main VSI instead of true promisc
1959                          * this way we will get all unicast/multicast and VLAN
1960                          * promisc behavior but will not get VF or VMDq traffic
1961                          * replicated on the Main VSI.
1962                          */
1963                         if (pf->cur_promisc != cur_promisc) {
1964                                 pf->cur_promisc = cur_promisc;
1965                                 if (grab_rtnl)
1966                                         i40e_do_reset_safe(pf,
1967                                                 BIT(__I40E_PF_RESET_REQUESTED));
1968                                 else
1969                                         i40e_do_reset(pf,
1970                                                 BIT(__I40E_PF_RESET_REQUESTED));
1971                         }
1972                 } else {
1973                         ret = i40e_aq_set_vsi_unicast_promiscuous(
1974                                                           &vsi->back->hw,
1975                                                           vsi->seid,
1976                                                           cur_promisc, NULL);
1977                         if (ret)
1978                                 dev_info(&pf->pdev->dev,
1979                                          "set unicast promisc failed, err %d, aq_err %d\n",
1980                                          ret, pf->hw.aq.asq_last_status);
1981                         ret = i40e_aq_set_vsi_multicast_promiscuous(
1982                                                           &vsi->back->hw,
1983                                                           vsi->seid,
1984                                                           cur_promisc, NULL);
1985                         if (ret)
1986                                 dev_info(&pf->pdev->dev,
1987                                          "set multicast promisc failed, err %d, aq_err %d\n",
1988                                          ret, pf->hw.aq.asq_last_status);
1989                 }
1990                 ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
1991                                                 vsi->seid,
1992                                                 cur_promisc, NULL);
1993                 if (ret)
1994                         dev_info(&pf->pdev->dev,
1995                                  "set brdcast promisc failed, err %s, aq_err %s\n",
1996                                  i40e_stat_str(&pf->hw, ret),
1997                                  i40e_aq_str(&pf->hw,
1998                                              pf->hw.aq.asq_last_status));
1999         }
2000
2001         clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
2002         return 0;
2003 }
2004
2005 /**
2006  * i40e_sync_filters_subtask - Sync the VSI filter list with HW
2007  * @pf: board private structure
2008  **/
2009 static void i40e_sync_filters_subtask(struct i40e_pf *pf)
2010 {
2011         int v;
2012
2013         if (!pf || !(pf->flags & I40E_FLAG_FILTER_SYNC))
2014                 return;
2015         pf->flags &= ~I40E_FLAG_FILTER_SYNC;
2016
2017         for (v = 0; v < pf->num_alloc_vsi; v++) {
2018                 if (pf->vsi[v] &&
2019                     (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED))
2020                         i40e_sync_vsi_filters(pf->vsi[v], true);
2021         }
2022 }
2023
2024 /**
2025  * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
2026  * @netdev: network interface device structure
2027  * @new_mtu: new value for maximum frame size
2028  *
2029  * Returns 0 on success, negative on failure
2030  **/
2031 static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
2032 {
2033         struct i40e_netdev_priv *np = netdev_priv(netdev);
2034         int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
2035         struct i40e_vsi *vsi = np->vsi;
2036
2037         /* MTU < 68 is an error and causes problems on some kernels */
2038         if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER))
2039                 return -EINVAL;
2040
2041         netdev_info(netdev, "changing MTU from %d to %d\n",
2042                     netdev->mtu, new_mtu);
2043         netdev->mtu = new_mtu;
2044         if (netif_running(netdev))
2045                 i40e_vsi_reinit_locked(vsi);
2046
2047         return 0;
2048 }
2049
2050 /**
2051  * i40e_ioctl - Access the hwtstamp interface
2052  * @netdev: network interface device structure
2053  * @ifr: interface request data
2054  * @cmd: ioctl command
2055  **/
2056 int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2057 {
2058         struct i40e_netdev_priv *np = netdev_priv(netdev);
2059         struct i40e_pf *pf = np->vsi->back;
2060
2061         switch (cmd) {
2062         case SIOCGHWTSTAMP:
2063                 return i40e_ptp_get_ts_config(pf, ifr);
2064         case SIOCSHWTSTAMP:
2065                 return i40e_ptp_set_ts_config(pf, ifr);
2066         default:
2067                 return -EOPNOTSUPP;
2068         }
2069 }
2070
2071 /**
2072  * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
2073  * @vsi: the vsi being adjusted
2074  **/
2075 void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
2076 {
2077         struct i40e_vsi_context ctxt;
2078         i40e_status ret;
2079
2080         if ((vsi->info.valid_sections &
2081              cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2082             ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
2083                 return;  /* already enabled */
2084
2085         vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2086         vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2087                                     I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2088
2089         ctxt.seid = vsi->seid;
2090         ctxt.info = vsi->info;
2091         ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2092         if (ret) {
2093                 dev_info(&vsi->back->pdev->dev,
2094                          "update vlan stripping failed, err %s aq_err %s\n",
2095                          i40e_stat_str(&vsi->back->hw, ret),
2096                          i40e_aq_str(&vsi->back->hw,
2097                                      vsi->back->hw.aq.asq_last_status));
2098         }
2099 }
2100
2101 /**
2102  * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI
2103  * @vsi: the vsi being adjusted
2104  **/
2105 void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
2106 {
2107         struct i40e_vsi_context ctxt;
2108         i40e_status ret;
2109
2110         if ((vsi->info.valid_sections &
2111              cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2112             ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
2113              I40E_AQ_VSI_PVLAN_EMOD_MASK))
2114                 return;  /* already disabled */
2115
2116         vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2117         vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2118                                     I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2119
2120         ctxt.seid = vsi->seid;
2121         ctxt.info = vsi->info;
2122         ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2123         if (ret) {
2124                 dev_info(&vsi->back->pdev->dev,
2125                          "update vlan stripping failed, err %s aq_err %s\n",
2126                          i40e_stat_str(&vsi->back->hw, ret),
2127                          i40e_aq_str(&vsi->back->hw,
2128                                      vsi->back->hw.aq.asq_last_status));
2129         }
2130 }
2131
2132 /**
2133  * i40e_vlan_rx_register - Setup or shutdown vlan offload
2134  * @netdev: network interface to be adjusted
2135  * @features: netdev features to test if VLAN offload is enabled or not
2136  **/
2137 static void i40e_vlan_rx_register(struct net_device *netdev, u32 features)
2138 {
2139         struct i40e_netdev_priv *np = netdev_priv(netdev);
2140         struct i40e_vsi *vsi = np->vsi;
2141
2142         if (features & NETIF_F_HW_VLAN_CTAG_RX)
2143                 i40e_vlan_stripping_enable(vsi);
2144         else
2145                 i40e_vlan_stripping_disable(vsi);
2146 }
2147
2148 /**
2149  * i40e_vsi_add_vlan - Add vsi membership for given vlan
2150  * @vsi: the vsi being configured
2151  * @vid: vlan id to be added (0 = untagged only , -1 = any)
2152  **/
2153 int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
2154 {
2155         struct i40e_mac_filter *f, *add_f;
2156         bool is_netdev, is_vf;
2157
2158         is_vf = (vsi->type == I40E_VSI_SRIOV);
2159         is_netdev = !!(vsi->netdev);
2160
2161         if (is_netdev) {
2162                 add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, vid,
2163                                         is_vf, is_netdev);
2164                 if (!add_f) {
2165                         dev_info(&vsi->back->pdev->dev,
2166                                  "Could not add vlan filter %d for %pM\n",
2167                                  vid, vsi->netdev->dev_addr);
2168                         return -ENOMEM;
2169                 }
2170         }
2171
2172         list_for_each_entry(f, &vsi->mac_filter_list, list) {
2173                 add_f = i40e_add_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
2174                 if (!add_f) {
2175                         dev_info(&vsi->back->pdev->dev,
2176                                  "Could not add vlan filter %d for %pM\n",
2177                                  vid, f->macaddr);
2178                         return -ENOMEM;
2179                 }
2180         }
2181
2182         /* Now if we add a vlan tag, make sure to check if it is the first
2183          * tag (i.e. a "tag" -1 does exist) and if so replace the -1 "tag"
2184          * with 0, so we now accept untagged and specified tagged traffic
2185          * (and not any taged and untagged)
2186          */
2187         if (vid > 0) {
2188                 if (is_netdev && i40e_find_filter(vsi, vsi->netdev->dev_addr,
2189                                                   I40E_VLAN_ANY,
2190                                                   is_vf, is_netdev)) {
2191                         i40e_del_filter(vsi, vsi->netdev->dev_addr,
2192                                         I40E_VLAN_ANY, is_vf, is_netdev);
2193                         add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, 0,
2194                                                 is_vf, is_netdev);
2195                         if (!add_f) {
2196                                 dev_info(&vsi->back->pdev->dev,
2197                                          "Could not add filter 0 for %pM\n",
2198                                          vsi->netdev->dev_addr);
2199                                 return -ENOMEM;
2200                         }
2201                 }
2202         }
2203
2204         /* Do not assume that I40E_VLAN_ANY should be reset to VLAN 0 */
2205         if (vid > 0 && !vsi->info.pvid) {
2206                 list_for_each_entry(f, &vsi->mac_filter_list, list) {
2207                         if (i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY,
2208                                              is_vf, is_netdev)) {
2209                                 i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY,
2210                                                 is_vf, is_netdev);
2211                                 add_f = i40e_add_filter(vsi, f->macaddr,
2212                                                         0, is_vf, is_netdev);
2213                                 if (!add_f) {
2214                                         dev_info(&vsi->back->pdev->dev,
2215                                                  "Could not add filter 0 for %pM\n",
2216                                                  f->macaddr);
2217                                         return -ENOMEM;
2218                                 }
2219                         }
2220                 }
2221         }
2222
2223         if (test_bit(__I40E_DOWN, &vsi->back->state) ||
2224             test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
2225                 return 0;
2226
2227         return i40e_sync_vsi_filters(vsi, false);
2228 }
2229
2230 /**
2231  * i40e_vsi_kill_vlan - Remove vsi membership for given vlan
2232  * @vsi: the vsi being configured
2233  * @vid: vlan id to be removed (0 = untagged only , -1 = any)
2234  *
2235  * Return: 0 on success or negative otherwise
2236  **/
2237 int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
2238 {
2239         struct net_device *netdev = vsi->netdev;
2240         struct i40e_mac_filter *f, *add_f;
2241         bool is_vf, is_netdev;
2242         int filter_count = 0;
2243
2244         is_vf = (vsi->type == I40E_VSI_SRIOV);
2245         is_netdev = !!(netdev);
2246
2247         if (is_netdev)
2248                 i40e_del_filter(vsi, netdev->dev_addr, vid, is_vf, is_netdev);
2249
2250         list_for_each_entry(f, &vsi->mac_filter_list, list)
2251                 i40e_del_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
2252
2253         /* go through all the filters for this VSI and if there is only
2254          * vid == 0 it means there are no other filters, so vid 0 must
2255          * be replaced with -1. This signifies that we should from now
2256          * on accept any traffic (with any tag present, or untagged)
2257          */
2258         list_for_each_entry(f, &vsi->mac_filter_list, list) {
2259                 if (is_netdev) {
2260                         if (f->vlan &&
2261                             ether_addr_equal(netdev->dev_addr, f->macaddr))
2262                                 filter_count++;
2263                 }
2264
2265                 if (f->vlan)
2266                         filter_count++;
2267         }
2268
2269         if (!filter_count && is_netdev) {
2270                 i40e_del_filter(vsi, netdev->dev_addr, 0, is_vf, is_netdev);
2271                 f = i40e_add_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY,
2272                                     is_vf, is_netdev);
2273                 if (!f) {
2274                         dev_info(&vsi->back->pdev->dev,
2275                                  "Could not add filter %d for %pM\n",
2276                                  I40E_VLAN_ANY, netdev->dev_addr);
2277                         return -ENOMEM;
2278                 }
2279         }
2280
2281         if (!filter_count) {
2282                 list_for_each_entry(f, &vsi->mac_filter_list, list) {
2283                         i40e_del_filter(vsi, f->macaddr, 0, is_vf, is_netdev);
2284                         add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY,
2285                                             is_vf, is_netdev);
2286                         if (!add_f) {
2287                                 dev_info(&vsi->back->pdev->dev,
2288                                          "Could not add filter %d for %pM\n",
2289                                          I40E_VLAN_ANY, f->macaddr);
2290                                 return -ENOMEM;
2291                         }
2292                 }
2293         }
2294
2295         if (test_bit(__I40E_DOWN, &vsi->back->state) ||
2296             test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
2297                 return 0;
2298
2299         return i40e_sync_vsi_filters(vsi, false);
2300 }
2301
2302 /**
2303  * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
2304  * @netdev: network interface to be adjusted
2305  * @vid: vlan id to be added
2306  *
2307  * net_device_ops implementation for adding vlan ids
2308  **/
2309 #ifdef I40E_FCOE
2310 int i40e_vlan_rx_add_vid(struct net_device *netdev,
2311                          __always_unused __be16 proto, u16 vid)
2312 #else
2313 static int i40e_vlan_rx_add_vid(struct net_device *netdev,
2314                                 __always_unused __be16 proto, u16 vid)
2315 #endif
2316 {
2317         struct i40e_netdev_priv *np = netdev_priv(netdev);
2318         struct i40e_vsi *vsi = np->vsi;
2319         int ret = 0;
2320
2321         if (vid > 4095)
2322                 return -EINVAL;
2323
2324         netdev_info(netdev, "adding %pM vid=%d\n", netdev->dev_addr, vid);
2325
2326         /* If the network stack called us with vid = 0 then
2327          * it is asking to receive priority tagged packets with
2328          * vlan id 0.  Our HW receives them by default when configured
2329          * to receive untagged packets so there is no need to add an
2330          * extra filter for vlan 0 tagged packets.
2331          */
2332         if (vid)
2333                 ret = i40e_vsi_add_vlan(vsi, vid);
2334
2335         if (!ret && (vid < VLAN_N_VID))
2336                 set_bit(vid, vsi->active_vlans);
2337
2338         return ret;
2339 }
2340
2341 /**
2342  * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
2343  * @netdev: network interface to be adjusted
2344  * @vid: vlan id to be removed
2345  *
2346  * net_device_ops implementation for removing vlan ids
2347  **/
2348 #ifdef I40E_FCOE
2349 int i40e_vlan_rx_kill_vid(struct net_device *netdev,
2350                           __always_unused __be16 proto, u16 vid)
2351 #else
2352 static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
2353                                  __always_unused __be16 proto, u16 vid)
2354 #endif
2355 {
2356         struct i40e_netdev_priv *np = netdev_priv(netdev);
2357         struct i40e_vsi *vsi = np->vsi;
2358
2359         netdev_info(netdev, "removing %pM vid=%d\n", netdev->dev_addr, vid);
2360
2361         /* return code is ignored as there is nothing a user
2362          * can do about failure to remove and a log message was
2363          * already printed from the other function
2364          */
2365         i40e_vsi_kill_vlan(vsi, vid);
2366
2367         clear_bit(vid, vsi->active_vlans);
2368
2369         return 0;
2370 }
2371
2372 /**
2373  * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up
2374  * @vsi: the vsi being brought back up
2375  **/
2376 static void i40e_restore_vlan(struct i40e_vsi *vsi)
2377 {
2378         u16 vid;
2379
2380         if (!vsi->netdev)
2381                 return;
2382
2383         i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features);
2384
2385         for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
2386                 i40e_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q),
2387                                      vid);
2388 }
2389
2390 /**
2391  * i40e_vsi_add_pvid - Add pvid for the VSI
2392  * @vsi: the vsi being adjusted
2393  * @vid: the vlan id to set as a PVID
2394  **/
2395 int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
2396 {
2397         struct i40e_vsi_context ctxt;
2398         i40e_status ret;
2399
2400         vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2401         vsi->info.pvid = cpu_to_le16(vid);
2402         vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED |
2403                                     I40E_AQ_VSI_PVLAN_INSERT_PVID |
2404                                     I40E_AQ_VSI_PVLAN_EMOD_STR;
2405
2406         ctxt.seid = vsi->seid;
2407         ctxt.info = vsi->info;
2408         ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2409         if (ret) {
2410                 dev_info(&vsi->back->pdev->dev,
2411                          "add pvid failed, err %s aq_err %s\n",
2412                          i40e_stat_str(&vsi->back->hw, ret),
2413                          i40e_aq_str(&vsi->back->hw,
2414                                      vsi->back->hw.aq.asq_last_status));
2415                 return -ENOENT;
2416         }
2417
2418         return 0;
2419 }
2420
2421 /**
2422  * i40e_vsi_remove_pvid - Remove the pvid from the VSI
2423  * @vsi: the vsi being adjusted
2424  *
2425  * Just use the vlan_rx_register() service to put it back to normal
2426  **/
2427 void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
2428 {
2429         i40e_vlan_stripping_disable(vsi);
2430
2431         vsi->info.pvid = 0;
2432 }
2433
2434 /**
2435  * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources
2436  * @vsi: ptr to the VSI
2437  *
2438  * If this function returns with an error, then it's possible one or
2439  * more of the rings is populated (while the rest are not).  It is the
2440  * callers duty to clean those orphaned rings.
2441  *
2442  * Return 0 on success, negative on failure
2443  **/
2444 static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
2445 {
2446         int i, err = 0;
2447
2448         for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2449                 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
2450
2451         return err;
2452 }
2453
2454 /**
2455  * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues
2456  * @vsi: ptr to the VSI
2457  *
2458  * Free VSI's transmit software resources
2459  **/
2460 static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
2461 {
2462         int i;
2463
2464         if (!vsi->tx_rings)
2465                 return;
2466
2467         for (i = 0; i < vsi->num_queue_pairs; i++)
2468                 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
2469                         i40e_free_tx_resources(vsi->tx_rings[i]);
2470 }
2471
2472 /**
2473  * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources
2474  * @vsi: ptr to the VSI
2475  *
2476  * If this function returns with an error, then it's possible one or
2477  * more of the rings is populated (while the rest are not).  It is the
2478  * callers duty to clean those orphaned rings.
2479  *
2480  * Return 0 on success, negative on failure
2481  **/
2482 static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
2483 {
2484         int i, err = 0;
2485
2486         for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2487                 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
2488 #ifdef I40E_FCOE
2489         i40e_fcoe_setup_ddp_resources(vsi);
2490 #endif
2491         return err;
2492 }
2493
2494 /**
2495  * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues
2496  * @vsi: ptr to the VSI
2497  *
2498  * Free all receive software resources
2499  **/
2500 static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
2501 {
2502         int i;
2503
2504         if (!vsi->rx_rings)
2505                 return;
2506
2507         for (i = 0; i < vsi->num_queue_pairs; i++)
2508                 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
2509                         i40e_free_rx_resources(vsi->rx_rings[i]);
2510 #ifdef I40E_FCOE
2511         i40e_fcoe_free_ddp_resources(vsi);
2512 #endif
2513 }
2514
2515 /**
2516  * i40e_config_xps_tx_ring - Configure XPS for a Tx ring
2517  * @ring: The Tx ring to configure
2518  *
2519  * This enables/disables XPS for a given Tx descriptor ring
2520  * based on the TCs enabled for the VSI that ring belongs to.
2521  **/
2522 static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
2523 {
2524         struct i40e_vsi *vsi = ring->vsi;
2525         cpumask_var_t mask;
2526
2527         if (!ring->q_vector || !ring->netdev)
2528                 return;
2529
2530         /* Single TC mode enable XPS */
2531         if (vsi->tc_config.numtc <= 1) {
2532                 if (!test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state))
2533                         netif_set_xps_queue(ring->netdev,
2534                                             &ring->q_vector->affinity_mask,
2535                                             ring->queue_index);
2536         } else if (alloc_cpumask_var(&mask, GFP_KERNEL)) {
2537                 /* Disable XPS to allow selection based on TC */
2538                 bitmap_zero(cpumask_bits(mask), nr_cpumask_bits);
2539                 netif_set_xps_queue(ring->netdev, mask, ring->queue_index);
2540                 free_cpumask_var(mask);
2541         }
2542 }
2543
2544 /**
2545  * i40e_configure_tx_ring - Configure a transmit ring context and rest
2546  * @ring: The Tx ring to configure
2547  *
2548  * Configure the Tx descriptor ring in the HMC context.
2549  **/
2550 static int i40e_configure_tx_ring(struct i40e_ring *ring)
2551 {
2552         struct i40e_vsi *vsi = ring->vsi;
2553         u16 pf_q = vsi->base_queue + ring->queue_index;
2554         struct i40e_hw *hw = &vsi->back->hw;
2555         struct i40e_hmc_obj_txq tx_ctx;
2556         i40e_status err = 0;
2557         u32 qtx_ctl = 0;
2558
2559         /* some ATR related tx ring init */
2560         if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
2561                 ring->atr_sample_rate = vsi->back->atr_sample_rate;
2562                 ring->atr_count = 0;
2563         } else {
2564                 ring->atr_sample_rate = 0;
2565         }
2566
2567         /* configure XPS */
2568         i40e_config_xps_tx_ring(ring);
2569
2570         /* clear the context structure first */
2571         memset(&tx_ctx, 0, sizeof(tx_ctx));
2572
2573         tx_ctx.new_context = 1;
2574         tx_ctx.base = (ring->dma / 128);
2575         tx_ctx.qlen = ring->count;
2576         tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
2577                                                I40E_FLAG_FD_ATR_ENABLED));
2578 #ifdef I40E_FCOE
2579         tx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
2580 #endif
2581         tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
2582         /* FDIR VSI tx ring can still use RS bit and writebacks */
2583         if (vsi->type != I40E_VSI_FDIR)
2584                 tx_ctx.head_wb_ena = 1;
2585         tx_ctx.head_wb_addr = ring->dma +
2586                               (ring->count * sizeof(struct i40e_tx_desc));
2587
2588         /* As part of VSI creation/update, FW allocates certain
2589          * Tx arbitration queue sets for each TC enabled for
2590          * the VSI. The FW returns the handles to these queue
2591          * sets as part of the response buffer to Add VSI,
2592          * Update VSI, etc. AQ commands. It is expected that
2593          * these queue set handles be associated with the Tx
2594          * queues by the driver as part of the TX queue context
2595          * initialization. This has to be done regardless of
2596          * DCB as by default everything is mapped to TC0.
2597          */
2598         tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
2599         tx_ctx.rdylist_act = 0;
2600
2601         /* clear the context in the HMC */
2602         err = i40e_clear_lan_tx_queue_context(hw, pf_q);
2603         if (err) {
2604                 dev_info(&vsi->back->pdev->dev,
2605                          "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
2606                          ring->queue_index, pf_q, err);
2607                 return -ENOMEM;
2608         }
2609
2610         /* set the context in the HMC */
2611         err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
2612         if (err) {
2613                 dev_info(&vsi->back->pdev->dev,
2614                          "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
2615                          ring->queue_index, pf_q, err);
2616                 return -ENOMEM;
2617         }
2618
2619         /* Now associate this queue with this PCI function */
2620         if (vsi->type == I40E_VSI_VMDQ2) {
2621                 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
2622                 qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) &
2623                            I40E_QTX_CTL_VFVM_INDX_MASK;
2624         } else {
2625                 qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
2626         }
2627
2628         qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2629                     I40E_QTX_CTL_PF_INDX_MASK);
2630         wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
2631         i40e_flush(hw);
2632
2633         /* cache tail off for easier writes later */
2634         ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
2635
2636         return 0;
2637 }
2638
2639 /**
2640  * i40e_configure_rx_ring - Configure a receive ring context
2641  * @ring: The Rx ring to configure
2642  *
2643  * Configure the Rx descriptor ring in the HMC context.
2644  **/
2645 static int i40e_configure_rx_ring(struct i40e_ring *ring)
2646 {
2647         struct i40e_vsi *vsi = ring->vsi;
2648         u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len;
2649         u16 pf_q = vsi->base_queue + ring->queue_index;
2650         struct i40e_hw *hw = &vsi->back->hw;
2651         struct i40e_hmc_obj_rxq rx_ctx;
2652         i40e_status err = 0;
2653
2654         ring->state = 0;
2655
2656         /* clear the context structure first */
2657         memset(&rx_ctx, 0, sizeof(rx_ctx));
2658
2659         ring->rx_buf_len = vsi->rx_buf_len;
2660         ring->rx_hdr_len = vsi->rx_hdr_len;
2661
2662         rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT;
2663         rx_ctx.hbuff = ring->rx_hdr_len >> I40E_RXQ_CTX_HBUFF_SHIFT;
2664
2665         rx_ctx.base = (ring->dma / 128);
2666         rx_ctx.qlen = ring->count;
2667
2668         if (vsi->back->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) {
2669                 set_ring_16byte_desc_enabled(ring);
2670                 rx_ctx.dsize = 0;
2671         } else {
2672                 rx_ctx.dsize = 1;
2673         }
2674
2675         rx_ctx.dtype = vsi->dtype;
2676         if (vsi->dtype) {
2677                 set_ring_ps_enabled(ring);
2678                 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2      |
2679                                   I40E_RX_SPLIT_IP      |
2680                                   I40E_RX_SPLIT_TCP_UDP |
2681                                   I40E_RX_SPLIT_SCTP;
2682         } else {
2683                 rx_ctx.hsplit_0 = 0;
2684         }
2685
2686         rx_ctx.rxmax = min_t(u16, vsi->max_frame,
2687                                   (chain_len * ring->rx_buf_len));
2688         if (hw->revision_id == 0)
2689                 rx_ctx.lrxqthresh = 0;
2690         else
2691                 rx_ctx.lrxqthresh = 2;
2692         rx_ctx.crcstrip = 1;
2693         rx_ctx.l2tsel = 1;
2694         /* this controls whether VLAN is stripped from inner headers */
2695         rx_ctx.showiv = 0;
2696 #ifdef I40E_FCOE
2697         rx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
2698 #endif
2699         /* set the prefena field to 1 because the manual says to */
2700         rx_ctx.prefena = 1;
2701
2702         /* clear the context in the HMC */
2703         err = i40e_clear_lan_rx_queue_context(hw, pf_q);
2704         if (err) {
2705                 dev_info(&vsi->back->pdev->dev,
2706                          "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
2707                          ring->queue_index, pf_q, err);
2708                 return -ENOMEM;
2709         }
2710
2711         /* set the context in the HMC */
2712         err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
2713         if (err) {
2714                 dev_info(&vsi->back->pdev->dev,
2715                          "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
2716                          ring->queue_index, pf_q, err);
2717                 return -ENOMEM;
2718         }
2719
2720         /* cache tail for quicker writes, and clear the reg before use */
2721         ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
2722         writel(0, ring->tail);
2723
2724         if (ring_is_ps_enabled(ring)) {
2725                 i40e_alloc_rx_headers(ring);
2726                 i40e_alloc_rx_buffers_ps(ring, I40E_DESC_UNUSED(ring));
2727         } else {
2728                 i40e_alloc_rx_buffers_1buf(ring, I40E_DESC_UNUSED(ring));
2729         }
2730
2731         return 0;
2732 }
2733
2734 /**
2735  * i40e_vsi_configure_tx - Configure the VSI for Tx
2736  * @vsi: VSI structure describing this set of rings and resources
2737  *
2738  * Configure the Tx VSI for operation.
2739  **/
2740 static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
2741 {
2742         int err = 0;
2743         u16 i;
2744
2745         for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
2746                 err = i40e_configure_tx_ring(vsi->tx_rings[i]);
2747
2748         return err;
2749 }
2750
2751 /**
2752  * i40e_vsi_configure_rx - Configure the VSI for Rx
2753  * @vsi: the VSI being configured
2754  *
2755  * Configure the Rx VSI for operation.
2756  **/
2757 static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
2758 {
2759         int err = 0;
2760         u16 i;
2761
2762         if (vsi->netdev && (vsi->netdev->mtu > ETH_DATA_LEN))
2763                 vsi->max_frame = vsi->netdev->mtu + ETH_HLEN
2764                                + ETH_FCS_LEN + VLAN_HLEN;
2765         else
2766                 vsi->max_frame = I40E_RXBUFFER_2048;
2767
2768         /* figure out correct receive buffer length */
2769         switch (vsi->back->flags & (I40E_FLAG_RX_1BUF_ENABLED |
2770                                     I40E_FLAG_RX_PS_ENABLED)) {
2771         case I40E_FLAG_RX_1BUF_ENABLED:
2772                 vsi->rx_hdr_len = 0;
2773                 vsi->rx_buf_len = vsi->max_frame;
2774                 vsi->dtype = I40E_RX_DTYPE_NO_SPLIT;
2775                 break;
2776         case I40E_FLAG_RX_PS_ENABLED:
2777                 vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
2778                 vsi->rx_buf_len = I40E_RXBUFFER_2048;
2779                 vsi->dtype = I40E_RX_DTYPE_HEADER_SPLIT;
2780                 break;
2781         default:
2782                 vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
2783                 vsi->rx_buf_len = I40E_RXBUFFER_2048;
2784                 vsi->dtype = I40E_RX_DTYPE_SPLIT_ALWAYS;
2785                 break;
2786         }
2787
2788 #ifdef I40E_FCOE
2789         /* setup rx buffer for FCoE */
2790         if ((vsi->type == I40E_VSI_FCOE) &&
2791             (vsi->back->flags & I40E_FLAG_FCOE_ENABLED)) {
2792                 vsi->rx_hdr_len = 0;
2793                 vsi->rx_buf_len = I40E_RXBUFFER_3072;
2794                 vsi->max_frame = I40E_RXBUFFER_3072;
2795                 vsi->dtype = I40E_RX_DTYPE_NO_SPLIT;
2796         }
2797
2798 #endif /* I40E_FCOE */
2799         /* round up for the chip's needs */
2800         vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len,
2801                                 BIT_ULL(I40E_RXQ_CTX_HBUFF_SHIFT));
2802         vsi->rx_buf_len = ALIGN(vsi->rx_buf_len,
2803                                 BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
2804
2805         /* set up individual rings */
2806         for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2807                 err = i40e_configure_rx_ring(vsi->rx_rings[i]);
2808
2809         return err;
2810 }
2811
2812 /**
2813  * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC
2814  * @vsi: ptr to the VSI
2815  **/
2816 static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
2817 {
2818         struct i40e_ring *tx_ring, *rx_ring;
2819         u16 qoffset, qcount;
2820         int i, n;
2821
2822         if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
2823                 /* Reset the TC information */
2824                 for (i = 0; i < vsi->num_queue_pairs; i++) {
2825                         rx_ring = vsi->rx_rings[i];
2826                         tx_ring = vsi->tx_rings[i];
2827                         rx_ring->dcb_tc = 0;
2828                         tx_ring->dcb_tc = 0;
2829                 }
2830         }
2831
2832         for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
2833                 if (!(vsi->tc_config.enabled_tc & BIT_ULL(n)))
2834                         continue;
2835
2836                 qoffset = vsi->tc_config.tc_info[n].qoffset;
2837                 qcount = vsi->tc_config.tc_info[n].qcount;
2838                 for (i = qoffset; i < (qoffset + qcount); i++) {
2839                         rx_ring = vsi->rx_rings[i];
2840                         tx_ring = vsi->tx_rings[i];
2841                         rx_ring->dcb_tc = n;
2842                         tx_ring->dcb_tc = n;
2843                 }
2844         }
2845 }
2846
2847 /**
2848  * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI
2849  * @vsi: ptr to the VSI
2850  **/
2851 static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
2852 {
2853         if (vsi->netdev)
2854                 i40e_set_rx_mode(vsi->netdev);
2855 }
2856
2857 /**
2858  * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters
2859  * @vsi: Pointer to the targeted VSI
2860  *
2861  * This function replays the hlist on the hw where all the SB Flow Director
2862  * filters were saved.
2863  **/
2864 static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
2865 {
2866         struct i40e_fdir_filter *filter;
2867         struct i40e_pf *pf = vsi->back;
2868         struct hlist_node *node;
2869
2870         if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
2871                 return;
2872
2873         hlist_for_each_entry_safe(filter, node,
2874                                   &pf->fdir_filter_list, fdir_node) {
2875                 i40e_add_del_fdir(vsi, filter, true);
2876         }
2877 }
2878
2879 /**
2880  * i40e_vsi_configure - Set up the VSI for action
2881  * @vsi: the VSI being configured
2882  **/
2883 static int i40e_vsi_configure(struct i40e_vsi *vsi)
2884 {
2885         int err;
2886
2887         i40e_set_vsi_rx_mode(vsi);
2888         i40e_restore_vlan(vsi);
2889         i40e_vsi_config_dcb_rings(vsi);
2890         err = i40e_vsi_configure_tx(vsi);
2891         if (!err)
2892                 err = i40e_vsi_configure_rx(vsi);
2893
2894         return err;
2895 }
2896
2897 /**
2898  * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW
2899  * @vsi: the VSI being configured
2900  **/
2901 static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
2902 {
2903         struct i40e_pf *pf = vsi->back;
2904         struct i40e_q_vector *q_vector;
2905         struct i40e_hw *hw = &pf->hw;
2906         u16 vector;
2907         int i, q;
2908         u32 val;
2909         u32 qp;
2910
2911         /* The interrupt indexing is offset by 1 in the PFINT_ITRn
2912          * and PFINT_LNKLSTn registers, e.g.:
2913          *   PFINT_ITRn[0..n-1] gets msix-1..msix-n  (qpair interrupts)
2914          */
2915         qp = vsi->base_queue;
2916         vector = vsi->base_vector;
2917         for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
2918                 q_vector = vsi->q_vectors[i];
2919                 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
2920                 q_vector->rx.latency_range = I40E_LOW_LATENCY;
2921                 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
2922                      q_vector->rx.itr);
2923                 q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
2924                 q_vector->tx.latency_range = I40E_LOW_LATENCY;
2925                 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
2926                      q_vector->tx.itr);
2927
2928                 /* Linked list for the queuepairs assigned to this vector */
2929                 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
2930                 for (q = 0; q < q_vector->num_ringpairs; q++) {
2931                         val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
2932                               (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)  |
2933                               (vector      << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2934                               (qp          << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
2935                               (I40E_QUEUE_TYPE_TX
2936                                       << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
2937
2938                         wr32(hw, I40E_QINT_RQCTL(qp), val);
2939
2940                         val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
2941                               (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)  |
2942                               (vector      << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
2943                               ((qp+1)      << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)|
2944                               (I40E_QUEUE_TYPE_RX
2945                                       << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
2946
2947                         /* Terminate the linked list */
2948                         if (q == (q_vector->num_ringpairs - 1))
2949                                 val |= (I40E_QUEUE_END_OF_LIST
2950                                            << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
2951
2952                         wr32(hw, I40E_QINT_TQCTL(qp), val);
2953                         qp++;
2954                 }
2955         }
2956
2957         i40e_flush(hw);
2958 }
2959
2960 /**
2961  * i40e_enable_misc_int_causes - enable the non-queue interrupts
2962  * @hw: ptr to the hardware info
2963  **/
2964 static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
2965 {
2966         struct i40e_hw *hw = &pf->hw;
2967         u32 val;
2968
2969         /* clear things first */
2970         wr32(hw, I40E_PFINT_ICR0_ENA, 0);  /* disable all */
2971         rd32(hw, I40E_PFINT_ICR0);         /* read to clear */
2972
2973         val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK       |
2974               I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK    |
2975               I40E_PFINT_ICR0_ENA_GRST_MASK          |
2976               I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
2977               I40E_PFINT_ICR0_ENA_GPIO_MASK          |
2978               I40E_PFINT_ICR0_ENA_HMC_ERR_MASK       |
2979               I40E_PFINT_ICR0_ENA_VFLR_MASK          |
2980               I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
2981
2982         if (pf->flags & I40E_FLAG_IWARP_ENABLED)
2983                 val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
2984
2985         if (pf->flags & I40E_FLAG_PTP)
2986                 val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
2987
2988         wr32(hw, I40E_PFINT_ICR0_ENA, val);
2989
2990         /* SW_ITR_IDX = 0, but don't change INTENA */
2991         wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
2992                                         I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
2993
2994         /* OTHER_ITR_IDX = 0 */
2995         wr32(hw, I40E_PFINT_STAT_CTL0, 0);
2996 }
2997
2998 /**
2999  * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW
3000  * @vsi: the VSI being configured
3001  **/
3002 static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
3003 {
3004         struct i40e_q_vector *q_vector = vsi->q_vectors[0];
3005         struct i40e_pf *pf = vsi->back;
3006         struct i40e_hw *hw = &pf->hw;
3007         u32 val;
3008
3009         /* set the ITR configuration */
3010         q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
3011         q_vector->rx.latency_range = I40E_LOW_LATENCY;
3012         wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr);
3013         q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
3014         q_vector->tx.latency_range = I40E_LOW_LATENCY;
3015         wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr);
3016
3017         i40e_enable_misc_int_causes(pf);
3018
3019         /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
3020         wr32(hw, I40E_PFINT_LNKLST0, 0);
3021
3022         /* Associate the queue pair to the vector and enable the queue int */
3023         val = I40E_QINT_RQCTL_CAUSE_ENA_MASK                  |
3024               (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3025               (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3026
3027         wr32(hw, I40E_QINT_RQCTL(0), val);
3028
3029         val = I40E_QINT_TQCTL_CAUSE_ENA_MASK                  |
3030               (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3031               (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3032
3033         wr32(hw, I40E_QINT_TQCTL(0), val);
3034         i40e_flush(hw);
3035 }
3036
3037 /**
3038  * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0
3039  * @pf: board private structure
3040  **/
3041 void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
3042 {
3043         struct i40e_hw *hw = &pf->hw;
3044
3045         wr32(hw, I40E_PFINT_DYN_CTL0,
3046              I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
3047         i40e_flush(hw);
3048 }
3049
3050 /**
3051  * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
3052  * @pf: board private structure
3053  **/
3054 void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
3055 {
3056         struct i40e_hw *hw = &pf->hw;
3057         u32 val;
3058
3059         val = I40E_PFINT_DYN_CTL0_INTENA_MASK   |
3060               I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3061               (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3062
3063         wr32(hw, I40E_PFINT_DYN_CTL0, val);
3064         i40e_flush(hw);
3065 }
3066
3067 /**
3068  * i40e_irq_dynamic_disable - Disable default interrupt generation settings
3069  * @vsi: pointer to a vsi
3070  * @vector: disable a particular Hw Interrupt vector
3071  **/
3072 void i40e_irq_dynamic_disable(struct i40e_vsi *vsi, int vector)
3073 {
3074         struct i40e_pf *pf = vsi->back;
3075         struct i40e_hw *hw = &pf->hw;
3076         u32 val;
3077
3078         val = I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
3079         wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
3080         i40e_flush(hw);
3081 }
3082
3083 /**
3084  * i40e_msix_clean_rings - MSIX mode Interrupt Handler
3085  * @irq: interrupt number
3086  * @data: pointer to a q_vector
3087  **/
3088 static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
3089 {
3090         struct i40e_q_vector *q_vector = data;
3091
3092         if (!q_vector->tx.ring && !q_vector->rx.ring)
3093                 return IRQ_HANDLED;
3094
3095         napi_schedule(&q_vector->napi);
3096
3097         return IRQ_HANDLED;
3098 }
3099
3100 /**
3101  * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
3102  * @vsi: the VSI being configured
3103  * @basename: name for the vector
3104  *
3105  * Allocates MSI-X vectors and requests interrupts from the kernel.
3106  **/
3107 static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
3108 {
3109         int q_vectors = vsi->num_q_vectors;
3110         struct i40e_pf *pf = vsi->back;
3111         int base = vsi->base_vector;
3112         int rx_int_idx = 0;
3113         int tx_int_idx = 0;
3114         int vector, err;
3115
3116         for (vector = 0; vector < q_vectors; vector++) {
3117                 struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
3118
3119                 if (q_vector->tx.ring && q_vector->rx.ring) {
3120                         snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3121                                  "%s-%s-%d", basename, "TxRx", rx_int_idx++);
3122                         tx_int_idx++;
3123                 } else if (q_vector->rx.ring) {
3124                         snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3125                                  "%s-%s-%d", basename, "rx", rx_int_idx++);
3126                 } else if (q_vector->tx.ring) {
3127                         snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3128                                  "%s-%s-%d", basename, "tx", tx_int_idx++);
3129                 } else {
3130                         /* skip this unused q_vector */
3131                         continue;
3132                 }
3133                 err = request_irq(pf->msix_entries[base + vector].vector,
3134                                   vsi->irq_handler,
3135                                   0,
3136                                   q_vector->name,
3137                                   q_vector);
3138                 if (err) {
3139                         dev_info(&pf->pdev->dev,
3140                                  "MSIX request_irq failed, error: %d\n", err);
3141                         goto free_queue_irqs;
3142                 }
3143                 /* assign the mask for this irq */
3144                 irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
3145                                       &q_vector->affinity_mask);
3146         }
3147
3148         vsi->irqs_ready = true;
3149         return 0;
3150
3151 free_queue_irqs:
3152         while (vector) {
3153                 vector--;
3154                 irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
3155                                       NULL);
3156                 free_irq(pf->msix_entries[base + vector].vector,
3157                          &(vsi->q_vectors[vector]));
3158         }
3159         return err;
3160 }
3161
3162 /**
3163  * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI
3164  * @vsi: the VSI being un-configured
3165  **/
3166 static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
3167 {
3168         struct i40e_pf *pf = vsi->back;
3169         struct i40e_hw *hw = &pf->hw;
3170         int base = vsi->base_vector;
3171         int i;
3172
3173         for (i = 0; i < vsi->num_queue_pairs; i++) {
3174                 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), 0);
3175                 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), 0);
3176         }
3177
3178         if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3179                 for (i = vsi->base_vector;
3180                      i < (vsi->num_q_vectors + vsi->base_vector); i++)
3181                         wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
3182
3183                 i40e_flush(hw);
3184                 for (i = 0; i < vsi->num_q_vectors; i++)
3185                         synchronize_irq(pf->msix_entries[i + base].vector);
3186         } else {
3187                 /* Legacy and MSI mode - this stops all interrupt handling */
3188                 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
3189                 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
3190                 i40e_flush(hw);
3191                 synchronize_irq(pf->pdev->irq);
3192         }
3193 }
3194
3195 /**
3196  * i40e_vsi_enable_irq - Enable IRQ for the given VSI
3197  * @vsi: the VSI being configured
3198  **/
3199 static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
3200 {
3201         struct i40e_pf *pf = vsi->back;
3202         int i;
3203
3204         if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3205                 for (i = 0; i < vsi->num_q_vectors; i++)
3206                         i40e_irq_dynamic_enable(vsi, i);
3207         } else {
3208                 i40e_irq_dynamic_enable_icr0(pf);
3209         }
3210
3211         i40e_flush(&pf->hw);
3212         return 0;
3213 }
3214
3215 /**
3216  * i40e_stop_misc_vector - Stop the vector that handles non-queue events
3217  * @pf: board private structure
3218  **/
3219 static void i40e_stop_misc_vector(struct i40e_pf *pf)
3220 {
3221         /* Disable ICR 0 */
3222         wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
3223         i40e_flush(&pf->hw);
3224 }
3225
3226 /**
3227  * i40e_intr - MSI/Legacy and non-queue interrupt handler
3228  * @irq: interrupt number
3229  * @data: pointer to a q_vector
3230  *
3231  * This is the handler used for all MSI/Legacy interrupts, and deals
3232  * with both queue and non-queue interrupts.  This is also used in
3233  * MSIX mode to handle the non-queue interrupts.
3234  **/
3235 static irqreturn_t i40e_intr(int irq, void *data)
3236 {
3237         struct i40e_pf *pf = (struct i40e_pf *)data;
3238         struct i40e_hw *hw = &pf->hw;
3239         irqreturn_t ret = IRQ_NONE;
3240         u32 icr0, icr0_remaining;
3241         u32 val, ena_mask;
3242
3243         icr0 = rd32(hw, I40E_PFINT_ICR0);
3244         ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
3245
3246         /* if sharing a legacy IRQ, we might get called w/o an intr pending */
3247         if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
3248                 goto enable_intr;
3249
3250         /* if interrupt but no bits showing, must be SWINT */
3251         if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) ||
3252             (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
3253                 pf->sw_int_count++;
3254
3255         if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
3256             (ena_mask & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) {
3257                 ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3258                 icr0 &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3259                 dev_info(&pf->pdev->dev, "cleared PE_CRITERR\n");
3260         }
3261
3262         /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
3263         if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
3264
3265                 /* temporarily disable queue cause for NAPI processing */
3266                 u32 qval = rd32(hw, I40E_QINT_RQCTL(0));
3267
3268                 qval &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
3269                 wr32(hw, I40E_QINT_RQCTL(0), qval);
3270
3271                 qval = rd32(hw, I40E_QINT_TQCTL(0));
3272                 qval &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
3273                 wr32(hw, I40E_QINT_TQCTL(0), qval);
3274
3275                 if (!test_bit(__I40E_DOWN, &pf->state))
3276                         napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0]->napi);
3277         }
3278
3279         if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
3280                 ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3281                 set_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
3282         }
3283
3284         if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
3285                 ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
3286                 set_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
3287         }
3288
3289         if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
3290                 ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
3291                 set_bit(__I40E_VFLR_EVENT_PENDING, &pf->state);
3292         }
3293
3294         if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
3295                 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
3296                         set_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
3297                 ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
3298                 val = rd32(hw, I40E_GLGEN_RSTAT);
3299                 val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
3300                        >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
3301                 if (val == I40E_RESET_CORER) {
3302                         pf->corer_count++;
3303                 } else if (val == I40E_RESET_GLOBR) {
3304                         pf->globr_count++;
3305                 } else if (val == I40E_RESET_EMPR) {
3306                         pf->empr_count++;
3307                         set_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state);
3308                 }
3309         }
3310
3311         if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
3312                 icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
3313                 dev_info(&pf->pdev->dev, "HMC error interrupt\n");
3314                 dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n",
3315                          rd32(hw, I40E_PFHMC_ERRORINFO),
3316                          rd32(hw, I40E_PFHMC_ERRORDATA));
3317         }
3318
3319         if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
3320                 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
3321
3322                 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) {
3323                         icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
3324                         i40e_ptp_tx_hwtstamp(pf);
3325                 }
3326         }
3327
3328         /* If a critical error is pending we have no choice but to reset the
3329          * device.
3330          * Report and mask out any remaining unexpected interrupts.
3331          */
3332         icr0_remaining = icr0 & ena_mask;
3333         if (icr0_remaining) {
3334                 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
3335                          icr0_remaining);
3336                 if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
3337                     (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
3338                     (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) {
3339                         dev_info(&pf->pdev->dev, "device will be reset\n");
3340                         set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
3341                         i40e_service_event_schedule(pf);
3342                 }
3343                 ena_mask &= ~icr0_remaining;
3344         }
3345         ret = IRQ_HANDLED;
3346
3347 enable_intr:
3348         /* re-enable interrupt causes */
3349         wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
3350         if (!test_bit(__I40E_DOWN, &pf->state)) {
3351                 i40e_service_event_schedule(pf);
3352                 i40e_irq_dynamic_enable_icr0(pf);
3353         }
3354
3355         return ret;
3356 }
3357
3358 /**
3359  * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes
3360  * @tx_ring:  tx ring to clean
3361  * @budget:   how many cleans we're allowed
3362  *
3363  * Returns true if there's any budget left (e.g. the clean is finished)
3364  **/
3365 static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
3366 {
3367         struct i40e_vsi *vsi = tx_ring->vsi;
3368         u16 i = tx_ring->next_to_clean;
3369         struct i40e_tx_buffer *tx_buf;
3370         struct i40e_tx_desc *tx_desc;
3371
3372         tx_buf = &tx_ring->tx_bi[i];
3373         tx_desc = I40E_TX_DESC(tx_ring, i);
3374         i -= tx_ring->count;
3375
3376         do {
3377                 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
3378
3379                 /* if next_to_watch is not set then there is no work pending */
3380                 if (!eop_desc)
3381                         break;
3382
3383                 /* prevent any other reads prior to eop_desc */
3384                 read_barrier_depends();
3385
3386                 /* if the descriptor isn't done, no work yet to do */
3387                 if (!(eop_desc->cmd_type_offset_bsz &
3388                       cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
3389                         break;
3390
3391                 /* clear next_to_watch to prevent false hangs */
3392                 tx_buf->next_to_watch = NULL;
3393
3394                 tx_desc->buffer_addr = 0;
3395                 tx_desc->cmd_type_offset_bsz = 0;
3396                 /* move past filter desc */
3397                 tx_buf++;
3398                 tx_desc++;
3399                 i++;
3400                 if (unlikely(!i)) {
3401                         i -= tx_ring->count;
3402                         tx_buf = tx_ring->tx_bi;
3403                         tx_desc = I40E_TX_DESC(tx_ring, 0);
3404                 }
3405                 /* unmap skb header data */
3406                 dma_unmap_single(tx_ring->dev,
3407                                  dma_unmap_addr(tx_buf, dma),
3408                                  dma_unmap_len(tx_buf, len),
3409                                  DMA_TO_DEVICE);
3410                 if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB)
3411                         kfree(tx_buf->raw_buf);
3412
3413                 tx_buf->raw_buf = NULL;
3414                 tx_buf->tx_flags = 0;
3415                 tx_buf->next_to_watch = NULL;
3416                 dma_unmap_len_set(tx_buf, len, 0);
3417                 tx_desc->buffer_addr = 0;
3418                 tx_desc->cmd_type_offset_bsz = 0;
3419
3420                 /* move us past the eop_desc for start of next FD desc */
3421                 tx_buf++;
3422                 tx_desc++;
3423                 i++;
3424                 if (unlikely(!i)) {
3425                         i -= tx_ring->count;
3426                         tx_buf = tx_ring->tx_bi;
3427                         tx_desc = I40E_TX_DESC(tx_ring, 0);
3428                 }
3429
3430                 /* update budget accounting */
3431                 budget--;
3432         } while (likely(budget));
3433
3434         i += tx_ring->count;
3435         tx_ring->next_to_clean = i;
3436
3437         if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED)
3438                 i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx);
3439
3440         return budget > 0;
3441 }
3442
3443 /**
3444  * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring
3445  * @irq: interrupt number
3446  * @data: pointer to a q_vector
3447  **/
3448 static irqreturn_t i40e_fdir_clean_ring(int irq, void *data)
3449 {
3450         struct i40e_q_vector *q_vector = data;
3451         struct i40e_vsi *vsi;
3452
3453         if (!q_vector->tx.ring)
3454                 return IRQ_HANDLED;
3455
3456         vsi = q_vector->tx.ring->vsi;
3457         i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit);
3458
3459         return IRQ_HANDLED;
3460 }
3461
3462 /**
3463  * i40e_map_vector_to_qp - Assigns the queue pair to the vector
3464  * @vsi: the VSI being configured
3465  * @v_idx: vector index
3466  * @qp_idx: queue pair index
3467  **/
3468 static void i40e_map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
3469 {
3470         struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
3471         struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
3472         struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
3473
3474         tx_ring->q_vector = q_vector;
3475         tx_ring->next = q_vector->tx.ring;
3476         q_vector->tx.ring = tx_ring;
3477         q_vector->tx.count++;
3478
3479         rx_ring->q_vector = q_vector;
3480         rx_ring->next = q_vector->rx.ring;
3481         q_vector->rx.ring = rx_ring;
3482         q_vector->rx.count++;
3483 }
3484
3485 /**
3486  * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors
3487  * @vsi: the VSI being configured
3488  *
3489  * This function maps descriptor rings to the queue-specific vectors
3490  * we were allotted through the MSI-X enabling code.  Ideally, we'd have
3491  * one vector per queue pair, but on a constrained vector budget, we
3492  * group the queue pairs as "efficiently" as possible.
3493  **/
3494 static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
3495 {
3496         int qp_remaining = vsi->num_queue_pairs;
3497         int q_vectors = vsi->num_q_vectors;
3498         int num_ringpairs;
3499         int v_start = 0;
3500         int qp_idx = 0;
3501
3502         /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to
3503          * group them so there are multiple queues per vector.
3504          * It is also important to go through all the vectors available to be
3505          * sure that if we don't use all the vectors, that the remaining vectors
3506          * are cleared. This is especially important when decreasing the
3507          * number of queues in use.
3508          */
3509         for (; v_start < q_vectors; v_start++) {
3510                 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
3511
3512                 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
3513
3514                 q_vector->num_ringpairs = num_ringpairs;
3515
3516                 q_vector->rx.count = 0;
3517                 q_vector->tx.count = 0;
3518                 q_vector->rx.ring = NULL;
3519                 q_vector->tx.ring = NULL;
3520
3521                 while (num_ringpairs--) {
3522                         i40e_map_vector_to_qp(vsi, v_start, qp_idx);
3523                         qp_idx++;
3524                         qp_remaining--;
3525                 }
3526         }
3527 }
3528
3529 /**
3530  * i40e_vsi_request_irq - Request IRQ from the OS
3531  * @vsi: the VSI being configured
3532  * @basename: name for the vector
3533  **/
3534 static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
3535 {
3536         struct i40e_pf *pf = vsi->back;
3537         int err;
3538
3539         if (pf->flags & I40E_FLAG_MSIX_ENABLED)
3540                 err = i40e_vsi_request_irq_msix(vsi, basename);
3541         else if (pf->flags & I40E_FLAG_MSI_ENABLED)
3542                 err = request_irq(pf->pdev->irq, i40e_intr, 0,
3543                                   pf->int_name, pf);
3544         else
3545                 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
3546                                   pf->int_name, pf);
3547
3548         if (err)
3549                 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
3550
3551         return err;
3552 }
3553
3554 #ifdef CONFIG_NET_POLL_CONTROLLER
3555 /**
3556  * i40e_netpoll - A Polling 'interrupt'handler
3557  * @netdev: network interface device structure
3558  *
3559  * This is used by netconsole to send skbs without having to re-enable
3560  * interrupts.  It's not called while the normal interrupt routine is executing.
3561  **/
3562 #ifdef I40E_FCOE
3563 void i40e_netpoll(struct net_device *netdev)
3564 #else
3565 static void i40e_netpoll(struct net_device *netdev)
3566 #endif
3567 {
3568         struct i40e_netdev_priv *np = netdev_priv(netdev);
3569         struct i40e_vsi *vsi = np->vsi;
3570         struct i40e_pf *pf = vsi->back;
3571         int i;
3572
3573         /* if interface is down do nothing */
3574         if (test_bit(__I40E_DOWN, &vsi->state))
3575                 return;
3576
3577         if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3578                 for (i = 0; i < vsi->num_q_vectors; i++)
3579                         i40e_msix_clean_rings(0, vsi->q_vectors[i]);
3580         } else {
3581                 i40e_intr(pf->pdev->irq, netdev);
3582         }
3583 }
3584 #endif
3585
3586 /**
3587  * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled
3588  * @pf: the PF being configured
3589  * @pf_q: the PF queue
3590  * @enable: enable or disable state of the queue
3591  *
3592  * This routine will wait for the given Tx queue of the PF to reach the
3593  * enabled or disabled state.
3594  * Returns -ETIMEDOUT in case of failing to reach the requested state after
3595  * multiple retries; else will return 0 in case of success.
3596  **/
3597 static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable)
3598 {
3599         int i;
3600         u32 tx_reg;
3601
3602         for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
3603                 tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q));
3604                 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3605                         break;
3606
3607                 usleep_range(10, 20);
3608         }
3609         if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
3610                 return -ETIMEDOUT;
3611
3612         return 0;
3613 }
3614
3615 /**
3616  * i40e_vsi_control_tx - Start or stop a VSI's rings
3617  * @vsi: the VSI being configured
3618  * @enable: start or stop the rings
3619  **/
3620 static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
3621 {
3622         struct i40e_pf *pf = vsi->back;
3623         struct i40e_hw *hw = &pf->hw;
3624         int i, j, pf_q, ret = 0;
3625         u32 tx_reg;
3626
3627         pf_q = vsi->base_queue;
3628         for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
3629
3630                 /* warn the TX unit of coming changes */
3631                 i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
3632                 if (!enable)
3633                         usleep_range(10, 20);
3634
3635                 for (j = 0; j < 50; j++) {
3636                         tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
3637                         if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
3638                             ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
3639                                 break;
3640                         usleep_range(1000, 2000);
3641                 }
3642                 /* Skip if the queue is already in the requested state */
3643                 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3644                         continue;
3645
3646                 /* turn on/off the queue */
3647                 if (enable) {
3648                         wr32(hw, I40E_QTX_HEAD(pf_q), 0);
3649                         tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
3650                 } else {
3651                         tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3652                 }
3653
3654                 wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
3655                 /* No waiting for the Tx queue to disable */
3656                 if (!enable && test_bit(__I40E_PORT_TX_SUSPENDED, &pf->state))
3657                         continue;
3658
3659                 /* wait for the change to finish */
3660                 ret = i40e_pf_txq_wait(pf, pf_q, enable);
3661                 if (ret) {
3662                         dev_info(&pf->pdev->dev,
3663                                  "VSI seid %d Tx ring %d %sable timeout\n",
3664                                  vsi->seid, pf_q, (enable ? "en" : "dis"));
3665                         break;
3666                 }
3667         }
3668
3669         if (hw->revision_id == 0)
3670                 mdelay(50);
3671         return ret;
3672 }
3673
3674 /**
3675  * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
3676  * @pf: the PF being configured
3677  * @pf_q: the PF queue
3678  * @enable: enable or disable state of the queue
3679  *
3680  * This routine will wait for the given Rx queue of the PF to reach the
3681  * enabled or disabled state.
3682  * Returns -ETIMEDOUT in case of failing to reach the requested state after
3683  * multiple retries; else will return 0 in case of success.
3684  **/
3685 static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
3686 {
3687         int i;
3688         u32 rx_reg;
3689
3690         for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
3691                 rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q));
3692                 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3693                         break;
3694
3695                 usleep_range(10, 20);
3696         }
3697         if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
3698                 return -ETIMEDOUT;
3699
3700         return 0;
3701 }
3702
3703 /**
3704  * i40e_vsi_control_rx - Start or stop a VSI's rings
3705  * @vsi: the VSI being configured
3706  * @enable: start or stop the rings
3707  **/
3708 static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
3709 {
3710         struct i40e_pf *pf = vsi->back;
3711         struct i40e_hw *hw = &pf->hw;
3712         int i, j, pf_q, ret = 0;
3713         u32 rx_reg;
3714
3715         pf_q = vsi->base_queue;
3716         for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
3717                 for (j = 0; j < 50; j++) {
3718                         rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
3719                         if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
3720                             ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
3721                                 break;
3722                         usleep_range(1000, 2000);
3723                 }
3724
3725                 /* Skip if the queue is already in the requested state */
3726                 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3727                         continue;
3728
3729                 /* turn on/off the queue */
3730                 if (enable)
3731                         rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
3732                 else
3733                         rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3734                 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
3735
3736                 /* wait for the change to finish */
3737                 ret = i40e_pf_rxq_wait(pf, pf_q, enable);
3738                 if (ret) {
3739                         dev_info(&pf->pdev->dev,
3740                                  "VSI seid %d Rx ring %d %sable timeout\n",
3741                                  vsi->seid, pf_q, (enable ? "en" : "dis"));
3742                         break;
3743                 }
3744         }
3745
3746         return ret;
3747 }
3748
3749 /**
3750  * i40e_vsi_control_rings - Start or stop a VSI's rings
3751  * @vsi: the VSI being configured
3752  * @enable: start or stop the rings
3753  **/
3754 int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request)
3755 {
3756         int ret = 0;
3757
3758         /* do rx first for enable and last for disable */
3759         if (request) {
3760                 ret = i40e_vsi_control_rx(vsi, request);
3761                 if (ret)
3762                         return ret;
3763                 ret = i40e_vsi_control_tx(vsi, request);
3764         } else {
3765                 /* Ignore return value, we need to shutdown whatever we can */
3766                 i40e_vsi_control_tx(vsi, request);
3767                 i40e_vsi_control_rx(vsi, request);
3768         }
3769
3770         return ret;
3771 }
3772
3773 /**
3774  * i40e_vsi_free_irq - Free the irq association with the OS
3775  * @vsi: the VSI being configured
3776  **/
3777 static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
3778 {
3779         struct i40e_pf *pf = vsi->back;
3780         struct i40e_hw *hw = &pf->hw;
3781         int base = vsi->base_vector;
3782         u32 val, qp;
3783         int i;
3784
3785         if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3786                 if (!vsi->q_vectors)
3787                         return;
3788
3789                 if (!vsi->irqs_ready)
3790                         return;
3791
3792                 vsi->irqs_ready = false;
3793                 for (i = 0; i < vsi->num_q_vectors; i++) {
3794                         u16 vector = i + base;
3795
3796                         /* free only the irqs that were actually requested */
3797                         if (!vsi->q_vectors[i] ||
3798                             !vsi->q_vectors[i]->num_ringpairs)
3799                                 continue;
3800
3801                         /* clear the affinity_mask in the IRQ descriptor */
3802                         irq_set_affinity_hint(pf->msix_entries[vector].vector,
3803                                               NULL);
3804                         free_irq(pf->msix_entries[vector].vector,
3805                                  vsi->q_vectors[i]);
3806
3807                         /* Tear down the interrupt queue link list
3808                          *
3809                          * We know that they come in pairs and always
3810                          * the Rx first, then the Tx.  To clear the
3811                          * link list, stick the EOL value into the
3812                          * next_q field of the registers.
3813                          */
3814                         val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
3815                         qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
3816                                 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3817                         val |= I40E_QUEUE_END_OF_LIST
3818                                 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3819                         wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
3820
3821                         while (qp != I40E_QUEUE_END_OF_LIST) {
3822                                 u32 next;
3823
3824                                 val = rd32(hw, I40E_QINT_RQCTL(qp));
3825
3826                                 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK  |
3827                                          I40E_QINT_RQCTL_MSIX0_INDX_MASK |
3828                                          I40E_QINT_RQCTL_CAUSE_ENA_MASK  |
3829                                          I40E_QINT_RQCTL_INTEVENT_MASK);
3830
3831                                 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
3832                                          I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
3833
3834                                 wr32(hw, I40E_QINT_RQCTL(qp), val);
3835
3836                                 val = rd32(hw, I40E_QINT_TQCTL(qp));
3837
3838                                 next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
3839                                         >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
3840
3841                                 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK  |
3842                                          I40E_QINT_TQCTL_MSIX0_INDX_MASK |
3843                                          I40E_QINT_TQCTL_CAUSE_ENA_MASK  |
3844                                          I40E_QINT_TQCTL_INTEVENT_MASK);
3845
3846                                 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
3847                                          I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
3848
3849                                 wr32(hw, I40E_QINT_TQCTL(qp), val);
3850                                 qp = next;
3851                         }
3852                 }
3853         } else {
3854                 free_irq(pf->pdev->irq, pf);
3855
3856                 val = rd32(hw, I40E_PFINT_LNKLST0);
3857                 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
3858                         >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
3859                 val |= I40E_QUEUE_END_OF_LIST
3860                         << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
3861                 wr32(hw, I40E_PFINT_LNKLST0, val);
3862
3863                 val = rd32(hw, I40E_QINT_RQCTL(qp));
3864                 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK  |
3865                          I40E_QINT_RQCTL_MSIX0_INDX_MASK |
3866                          I40E_QINT_RQCTL_CAUSE_ENA_MASK  |
3867                          I40E_QINT_RQCTL_INTEVENT_MASK);
3868
3869                 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
3870                         I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
3871
3872                 wr32(hw, I40E_QINT_RQCTL(qp), val);
3873
3874                 val = rd32(hw, I40E_QINT_TQCTL(qp));
3875
3876                 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK  |
3877                          I40E_QINT_TQCTL_MSIX0_INDX_MASK |
3878                          I40E_QINT_TQCTL_CAUSE_ENA_MASK  |
3879                          I40E_QINT_TQCTL_INTEVENT_MASK);
3880
3881                 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
3882                         I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
3883
3884                 wr32(hw, I40E_QINT_TQCTL(qp), val);
3885         }
3886 }
3887
3888 /**
3889  * i40e_free_q_vector - Free memory allocated for specific interrupt vector
3890  * @vsi: the VSI being configured
3891  * @v_idx: Index of vector to be freed
3892  *
3893  * This function frees the memory allocated to the q_vector.  In addition if
3894  * NAPI is enabled it will delete any references to the NAPI struct prior
3895  * to freeing the q_vector.
3896  **/
3897 static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
3898 {
3899         struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
3900         struct i40e_ring *ring;
3901
3902         if (!q_vector)
3903                 return;
3904
3905         /* disassociate q_vector from rings */
3906         i40e_for_each_ring(ring, q_vector->tx)
3907                 ring->q_vector = NULL;
3908
3909         i40e_for_each_ring(ring, q_vector->rx)
3910                 ring->q_vector = NULL;
3911
3912         /* only VSI w/ an associated netdev is set up w/ NAPI */
3913         if (vsi->netdev)
3914                 netif_napi_del(&q_vector->napi);
3915
3916         vsi->q_vectors[v_idx] = NULL;
3917
3918         kfree_rcu(q_vector, rcu);
3919 }
3920
3921 /**
3922  * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
3923  * @vsi: the VSI being un-configured
3924  *
3925  * This frees the memory allocated to the q_vectors and
3926  * deletes references to the NAPI struct.
3927  **/
3928 static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
3929 {
3930         int v_idx;
3931
3932         for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
3933                 i40e_free_q_vector(vsi, v_idx);
3934 }
3935
3936 /**
3937  * i40e_reset_interrupt_capability - Disable interrupt setup in OS
3938  * @pf: board private structure
3939  **/
3940 static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
3941 {
3942         /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */
3943         if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3944                 pci_disable_msix(pf->pdev);
3945                 kfree(pf->msix_entries);
3946                 pf->msix_entries = NULL;
3947                 kfree(pf->irq_pile);
3948                 pf->irq_pile = NULL;
3949         } else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
3950                 pci_disable_msi(pf->pdev);
3951         }
3952         pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
3953 }
3954
3955 /**
3956  * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings
3957  * @pf: board private structure
3958  *
3959  * We go through and clear interrupt specific resources and reset the structure
3960  * to pre-load conditions
3961  **/
3962 static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
3963 {
3964         int i;
3965
3966         i40e_stop_misc_vector(pf);
3967         if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3968                 synchronize_irq(pf->msix_entries[0].vector);
3969                 free_irq(pf->msix_entries[0].vector, pf);
3970         }
3971
3972         i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
3973         for (i = 0; i < pf->num_alloc_vsi; i++)
3974                 if (pf->vsi[i])
3975                         i40e_vsi_free_q_vectors(pf->vsi[i]);
3976         i40e_reset_interrupt_capability(pf);
3977 }
3978
3979 /**
3980  * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI
3981  * @vsi: the VSI being configured
3982  **/
3983 static void i40e_napi_enable_all(struct i40e_vsi *vsi)
3984 {
3985         int q_idx;
3986
3987         if (!vsi->netdev)
3988                 return;
3989
3990         for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
3991                 napi_enable(&vsi->q_vectors[q_idx]->napi);
3992 }
3993
3994 /**
3995  * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI
3996  * @vsi: the VSI being configured
3997  **/
3998 static void i40e_napi_disable_all(struct i40e_vsi *vsi)
3999 {
4000         int q_idx;
4001
4002         if (!vsi->netdev)
4003                 return;
4004
4005         for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
4006                 napi_disable(&vsi->q_vectors[q_idx]->napi);
4007 }
4008
4009 /**
4010  * i40e_vsi_close - Shut down a VSI
4011  * @vsi: the vsi to be quelled
4012  **/
4013 static void i40e_vsi_close(struct i40e_vsi *vsi)
4014 {
4015         if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
4016                 i40e_down(vsi);
4017         i40e_vsi_free_irq(vsi);
4018         i40e_vsi_free_tx_resources(vsi);
4019         i40e_vsi_free_rx_resources(vsi);
4020         vsi->current_netdev_flags = 0;
4021 }
4022
4023 /**
4024  * i40e_quiesce_vsi - Pause a given VSI
4025  * @vsi: the VSI being paused
4026  **/
4027 static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
4028 {
4029         if (test_bit(__I40E_DOWN, &vsi->state))
4030                 return;
4031
4032         /* No need to disable FCoE VSI when Tx suspended */
4033         if ((test_bit(__I40E_PORT_TX_SUSPENDED, &vsi->back->state)) &&
4034             vsi->type == I40E_VSI_FCOE) {
4035                 dev_dbg(&vsi->back->pdev->dev,
4036                          "VSI seid %d skipping FCoE VSI disable\n", vsi->seid);
4037                 return;
4038         }
4039
4040         set_bit(__I40E_NEEDS_RESTART, &vsi->state);
4041         if (vsi->netdev && netif_running(vsi->netdev))
4042                 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
4043         else
4044                 i40e_vsi_close(vsi);
4045 }
4046
4047 /**
4048  * i40e_unquiesce_vsi - Resume a given VSI
4049  * @vsi: the VSI being resumed
4050  **/
4051 static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
4052 {
4053         if (!test_bit(__I40E_NEEDS_RESTART, &vsi->state))
4054                 return;
4055
4056         clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
4057         if (vsi->netdev && netif_running(vsi->netdev))
4058                 vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
4059         else
4060                 i40e_vsi_open(vsi);   /* this clears the DOWN bit */
4061 }
4062
4063 /**
4064  * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
4065  * @pf: the PF
4066  **/
4067 static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
4068 {
4069         int v;
4070
4071         for (v = 0; v < pf->num_alloc_vsi; v++) {
4072                 if (pf->vsi[v])
4073                         i40e_quiesce_vsi(pf->vsi[v]);
4074         }
4075 }
4076
4077 /**
4078  * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
4079  * @pf: the PF
4080  **/
4081 static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
4082 {
4083         int v;
4084
4085         for (v = 0; v < pf->num_alloc_vsi; v++) {
4086                 if (pf->vsi[v])
4087                         i40e_unquiesce_vsi(pf->vsi[v]);
4088         }
4089 }
4090
4091 #ifdef CONFIG_I40E_DCB
4092 /**
4093  * i40e_vsi_wait_txq_disabled - Wait for VSI's queues to be disabled
4094  * @vsi: the VSI being configured
4095  *
4096  * This function waits for the given VSI's Tx queues to be disabled.
4097  **/
4098 static int i40e_vsi_wait_txq_disabled(struct i40e_vsi *vsi)
4099 {
4100         struct i40e_pf *pf = vsi->back;
4101         int i, pf_q, ret;
4102
4103         pf_q = vsi->base_queue;
4104         for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4105                 /* Check and wait for the disable status of the queue */
4106                 ret = i40e_pf_txq_wait(pf, pf_q, false);
4107                 if (ret) {
4108                         dev_info(&pf->pdev->dev,
4109                                  "VSI seid %d Tx ring %d disable timeout\n",
4110                                  vsi->seid, pf_q);
4111                         return ret;
4112                 }
4113         }
4114
4115         return 0;
4116 }
4117
4118 /**
4119  * i40e_pf_wait_txq_disabled - Wait for all queues of PF VSIs to be disabled
4120  * @pf: the PF
4121  *
4122  * This function waits for the Tx queues to be in disabled state for all the
4123  * VSIs that are managed by this PF.
4124  **/
4125 static int i40e_pf_wait_txq_disabled(struct i40e_pf *pf)
4126 {
4127         int v, ret = 0;
4128
4129         for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
4130                 /* No need to wait for FCoE VSI queues */
4131                 if (pf->vsi[v] && pf->vsi[v]->type != I40E_VSI_FCOE) {
4132                         ret = i40e_vsi_wait_txq_disabled(pf->vsi[v]);
4133                         if (ret)
4134                                 break;
4135                 }
4136         }
4137
4138         return ret;
4139 }
4140
4141 #endif
4142
4143 /**
4144  * i40e_detect_recover_hung_queue - Function to detect and recover hung_queue
4145  * @q_idx: TX queue number
4146  * @vsi: Pointer to VSI struct
4147  *
4148  * This function checks specified queue for given VSI. Detects hung condition.
4149  * Sets hung bit since it is two step process. Before next run of service task
4150  * if napi_poll runs, it reset 'hung' bit for respective q_vector. If not,
4151  * hung condition remain unchanged and during subsequent run, this function
4152  * issues SW interrupt to recover from hung condition.
4153  **/
4154 static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi)
4155 {
4156         struct i40e_ring *tx_ring = NULL;
4157         struct i40e_pf  *pf;
4158         u32 head, val, tx_pending;
4159         int i;
4160
4161         pf = vsi->back;
4162
4163         /* now that we have an index, find the tx_ring struct */
4164         for (i = 0; i < vsi->num_queue_pairs; i++) {
4165                 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
4166                         if (q_idx == vsi->tx_rings[i]->queue_index) {
4167                                 tx_ring = vsi->tx_rings[i];
4168                                 break;
4169                         }
4170                 }
4171         }
4172
4173         if (!tx_ring)
4174                 return;
4175
4176         /* Read interrupt register */
4177         if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4178                 val = rd32(&pf->hw,
4179                            I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
4180                                                tx_ring->vsi->base_vector - 1));
4181         else
4182                 val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
4183
4184         head = i40e_get_head(tx_ring);
4185
4186         tx_pending = i40e_get_tx_pending(tx_ring);
4187
4188         /* Interrupts are disabled and TX pending is non-zero,
4189          * trigger the SW interrupt (don't wait). Worst case
4190          * there will be one extra interrupt which may result
4191          * into not cleaning any queues because queues are cleaned.
4192          */
4193         if (tx_pending && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK)))
4194                 i40e_force_wb(vsi, tx_ring->q_vector);
4195 }
4196
4197 /**
4198  * i40e_detect_recover_hung - Function to detect and recover hung_queues
4199  * @pf:  pointer to PF struct
4200  *
4201  * LAN VSI has netdev and netdev has TX queues. This function is to check
4202  * each of those TX queues if they are hung, trigger recovery by issuing
4203  * SW interrupt.
4204  **/
4205 static void i40e_detect_recover_hung(struct i40e_pf *pf)
4206 {
4207         struct net_device *netdev;
4208         struct i40e_vsi *vsi;
4209         int i;
4210
4211         /* Only for LAN VSI */
4212         vsi = pf->vsi[pf->lan_vsi];
4213
4214         if (!vsi)
4215                 return;
4216
4217         /* Make sure, VSI state is not DOWN/RECOVERY_PENDING */
4218         if (test_bit(__I40E_DOWN, &vsi->back->state) ||
4219             test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
4220                 return;
4221
4222         /* Make sure type is MAIN VSI */
4223         if (vsi->type != I40E_VSI_MAIN)
4224                 return;
4225
4226         netdev = vsi->netdev;
4227         if (!netdev)
4228                 return;
4229
4230         /* Bail out if netif_carrier is not OK */
4231         if (!netif_carrier_ok(netdev))
4232                 return;
4233
4234         /* Go thru' TX queues for netdev */
4235         for (i = 0; i < netdev->num_tx_queues; i++) {
4236                 struct netdev_queue *q;
4237
4238                 q = netdev_get_tx_queue(netdev, i);
4239                 if (q)
4240                         i40e_detect_recover_hung_queue(i, vsi);
4241         }
4242 }
4243
4244 /**
4245  * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
4246  * @pf: pointer to PF
4247  *
4248  * Get TC map for ISCSI PF type that will include iSCSI TC
4249  * and LAN TC.
4250  **/
4251 static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
4252 {
4253         struct i40e_dcb_app_priority_table app;
4254         struct i40e_hw *hw = &pf->hw;
4255         u8 enabled_tc = 1; /* TC0 is always enabled */
4256         u8 tc, i;
4257         /* Get the iSCSI APP TLV */
4258         struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4259
4260         for (i = 0; i < dcbcfg->numapps; i++) {
4261                 app = dcbcfg->app[i];
4262                 if (app.selector == I40E_APP_SEL_TCPIP &&
4263                     app.protocolid == I40E_APP_PROTOID_ISCSI) {
4264                         tc = dcbcfg->etscfg.prioritytable[app.priority];
4265                         enabled_tc |= BIT_ULL(tc);
4266                         break;
4267                 }
4268         }
4269
4270         return enabled_tc;
4271 }
4272
4273 /**
4274  * i40e_dcb_get_num_tc -  Get the number of TCs from DCBx config
4275  * @dcbcfg: the corresponding DCBx configuration structure
4276  *
4277  * Return the number of TCs from given DCBx configuration
4278  **/
4279 static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
4280 {
4281         u8 num_tc = 0;
4282         int i;
4283
4284         /* Scan the ETS Config Priority Table to find
4285          * traffic class enabled for a given priority
4286          * and use the traffic class index to get the
4287          * number of traffic classes enabled
4288          */
4289         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
4290                 if (dcbcfg->etscfg.prioritytable[i] > num_tc)
4291                         num_tc = dcbcfg->etscfg.prioritytable[i];
4292         }
4293
4294         /* Traffic class index starts from zero so
4295          * increment to return the actual count
4296          */
4297         return num_tc + 1;
4298 }
4299
4300 /**
4301  * i40e_dcb_get_enabled_tc - Get enabled traffic classes
4302  * @dcbcfg: the corresponding DCBx configuration structure
4303  *
4304  * Query the current DCB configuration and return the number of
4305  * traffic classes enabled from the given DCBX config
4306  **/
4307 static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
4308 {
4309         u8 num_tc = i40e_dcb_get_num_tc(dcbcfg);
4310         u8 enabled_tc = 1;
4311         u8 i;
4312
4313         for (i = 0; i < num_tc; i++)
4314                 enabled_tc |= BIT(i);
4315
4316         return enabled_tc;
4317 }
4318
4319 /**
4320  * i40e_pf_get_num_tc - Get enabled traffic classes for PF
4321  * @pf: PF being queried
4322  *
4323  * Return number of traffic classes enabled for the given PF
4324  **/
4325 static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
4326 {
4327         struct i40e_hw *hw = &pf->hw;
4328         u8 i, enabled_tc;
4329         u8 num_tc = 0;
4330         struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4331
4332         /* If DCB is not enabled then always in single TC */
4333         if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
4334                 return 1;
4335
4336         /* SFP mode will be enabled for all TCs on port */
4337         if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
4338                 return i40e_dcb_get_num_tc(dcbcfg);
4339
4340         /* MFP mode return count of enabled TCs for this PF */
4341         if (pf->hw.func_caps.iscsi)
4342                 enabled_tc =  i40e_get_iscsi_tc_map(pf);
4343         else
4344                 return 1; /* Only TC0 */
4345
4346         /* At least have TC0 */
4347         enabled_tc = (enabled_tc ? enabled_tc : 0x1);
4348         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4349                 if (enabled_tc & BIT_ULL(i))
4350                         num_tc++;
4351         }
4352         return num_tc;
4353 }
4354
4355 /**
4356  * i40e_pf_get_default_tc - Get bitmap for first enabled TC
4357  * @pf: PF being queried
4358  *
4359  * Return a bitmap for first enabled traffic class for this PF.
4360  **/
4361 static u8 i40e_pf_get_default_tc(struct i40e_pf *pf)
4362 {
4363         u8 enabled_tc = pf->hw.func_caps.enabled_tcmap;
4364         u8 i = 0;
4365
4366         if (!enabled_tc)
4367                 return 0x1; /* TC0 */
4368
4369         /* Find the first enabled TC */
4370         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4371                 if (enabled_tc & BIT_ULL(i))
4372                         break;
4373         }
4374
4375         return BIT(i);
4376 }
4377
4378 /**
4379  * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes
4380  * @pf: PF being queried
4381  *
4382  * Return a bitmap for enabled traffic classes for this PF.
4383  **/
4384 static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
4385 {
4386         /* If DCB is not enabled for this PF then just return default TC */
4387         if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
4388                 return i40e_pf_get_default_tc(pf);
4389
4390         /* SFP mode we want PF to be enabled for all TCs */
4391         if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
4392                 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
4393
4394         /* MFP enabled and iSCSI PF type */
4395         if (pf->hw.func_caps.iscsi)
4396                 return i40e_get_iscsi_tc_map(pf);
4397         else
4398                 return i40e_pf_get_default_tc(pf);
4399 }
4400
4401 /**
4402  * i40e_vsi_get_bw_info - Query VSI BW Information
4403  * @vsi: the VSI being queried
4404  *
4405  * Returns 0 on success, negative value on failure
4406  **/
4407 static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
4408 {
4409         struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
4410         struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
4411         struct i40e_pf *pf = vsi->back;
4412         struct i40e_hw *hw = &pf->hw;
4413         i40e_status ret;
4414         u32 tc_bw_max;
4415         int i;
4416
4417         /* Get the VSI level BW configuration */
4418         ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
4419         if (ret) {
4420                 dev_info(&pf->pdev->dev,
4421                          "couldn't get PF vsi bw config, err %s aq_err %s\n",
4422                          i40e_stat_str(&pf->hw, ret),
4423                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4424                 return -EINVAL;
4425         }
4426
4427         /* Get the VSI level BW configuration per TC */
4428         ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
4429                                                NULL);
4430         if (ret) {
4431                 dev_info(&pf->pdev->dev,
4432                          "couldn't get PF vsi ets bw config, err %s aq_err %s\n",
4433                          i40e_stat_str(&pf->hw, ret),
4434                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4435                 return -EINVAL;
4436         }
4437
4438         if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
4439                 dev_info(&pf->pdev->dev,
4440                          "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
4441                          bw_config.tc_valid_bits,
4442                          bw_ets_config.tc_valid_bits);
4443                 /* Still continuing */
4444         }
4445
4446         vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit);
4447         vsi->bw_max_quanta = bw_config.max_bw;
4448         tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) |
4449                     (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16);
4450         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4451                 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
4452                 vsi->bw_ets_limit_credits[i] =
4453                                         le16_to_cpu(bw_ets_config.credits[i]);
4454                 /* 3 bits out of 4 for each TC */
4455                 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
4456         }
4457
4458         return 0;
4459 }
4460
4461 /**
4462  * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
4463  * @vsi: the VSI being configured
4464  * @enabled_tc: TC bitmap
4465  * @bw_credits: BW shared credits per TC
4466  *
4467  * Returns 0 on success, negative value on failure
4468  **/
4469 static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
4470                                        u8 *bw_share)
4471 {
4472         struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
4473         i40e_status ret;
4474         int i;
4475
4476         bw_data.tc_valid_bits = enabled_tc;
4477         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4478                 bw_data.tc_bw_credits[i] = bw_share[i];
4479
4480         ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
4481                                        NULL);
4482         if (ret) {
4483                 dev_info(&vsi->back->pdev->dev,
4484                          "AQ command Config VSI BW allocation per TC failed = %d\n",
4485                          vsi->back->hw.aq.asq_last_status);
4486                 return -EINVAL;
4487         }
4488
4489         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4490                 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
4491
4492         return 0;
4493 }
4494
4495 /**
4496  * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration
4497  * @vsi: the VSI being configured
4498  * @enabled_tc: TC map to be enabled
4499  *
4500  **/
4501 static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
4502 {
4503         struct net_device *netdev = vsi->netdev;
4504         struct i40e_pf *pf = vsi->back;
4505         struct i40e_hw *hw = &pf->hw;
4506         u8 netdev_tc = 0;
4507         int i;
4508         struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4509
4510         if (!netdev)
4511                 return;
4512
4513         if (!enabled_tc) {
4514                 netdev_reset_tc(netdev);
4515                 return;
4516         }
4517
4518         /* Set up actual enabled TCs on the VSI */
4519         if (netdev_set_num_tc(netdev, vsi->tc_config.numtc))
4520                 return;
4521
4522         /* set per TC queues for the VSI */
4523         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4524                 /* Only set TC queues for enabled tcs
4525                  *
4526                  * e.g. For a VSI that has TC0 and TC3 enabled the
4527                  * enabled_tc bitmap would be 0x00001001; the driver
4528                  * will set the numtc for netdev as 2 that will be
4529                  * referenced by the netdev layer as TC 0 and 1.
4530                  */
4531                 if (vsi->tc_config.enabled_tc & BIT_ULL(i))
4532                         netdev_set_tc_queue(netdev,
4533                                         vsi->tc_config.tc_info[i].netdev_tc,
4534                                         vsi->tc_config.tc_info[i].qcount,
4535                                         vsi->tc_config.tc_info[i].qoffset);
4536         }
4537
4538         /* Assign UP2TC map for the VSI */
4539         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
4540                 /* Get the actual TC# for the UP */
4541                 u8 ets_tc = dcbcfg->etscfg.prioritytable[i];
4542                 /* Get the mapped netdev TC# for the UP */
4543                 netdev_tc =  vsi->tc_config.tc_info[ets_tc].netdev_tc;
4544                 netdev_set_prio_tc_map(netdev, i, netdev_tc);
4545         }
4546 }
4547
4548 /**
4549  * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map
4550  * @vsi: the VSI being configured
4551  * @ctxt: the ctxt buffer returned from AQ VSI update param command
4552  **/
4553 static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
4554                                       struct i40e_vsi_context *ctxt)
4555 {
4556         /* copy just the sections touched not the entire info
4557          * since not all sections are valid as returned by
4558          * update vsi params
4559          */
4560         vsi->info.mapping_flags = ctxt->info.mapping_flags;
4561         memcpy(&vsi->info.queue_mapping,
4562                &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping));
4563         memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping,
4564                sizeof(vsi->info.tc_mapping));
4565 }
4566
4567 /**
4568  * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
4569  * @vsi: VSI to be configured
4570  * @enabled_tc: TC bitmap
4571  *
4572  * This configures a particular VSI for TCs that are mapped to the
4573  * given TC bitmap. It uses default bandwidth share for TCs across
4574  * VSIs to configure TC for a particular VSI.
4575  *
4576  * NOTE:
4577  * It is expected that the VSI queues have been quisced before calling
4578  * this function.
4579  **/
4580 static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
4581 {
4582         u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
4583         struct i40e_vsi_context ctxt;
4584         int ret = 0;
4585         int i;
4586
4587         /* Check if enabled_tc is same as existing or new TCs */
4588         if (vsi->tc_config.enabled_tc == enabled_tc)
4589                 return ret;
4590
4591         /* Enable ETS TCs with equal BW Share for now across all VSIs */
4592         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4593                 if (enabled_tc & BIT_ULL(i))
4594                         bw_share[i] = 1;
4595         }
4596
4597         ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
4598         if (ret) {
4599                 dev_info(&vsi->back->pdev->dev,
4600                          "Failed configuring TC map %d for VSI %d\n",
4601                          enabled_tc, vsi->seid);
4602                 goto out;
4603         }
4604
4605         /* Update Queue Pairs Mapping for currently enabled UPs */
4606         ctxt.seid = vsi->seid;
4607         ctxt.pf_num = vsi->back->hw.pf_id;
4608         ctxt.vf_num = 0;
4609         ctxt.uplink_seid = vsi->uplink_seid;
4610         ctxt.info = vsi->info;
4611         i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
4612
4613         /* Update the VSI after updating the VSI queue-mapping information */
4614         ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
4615         if (ret) {
4616                 dev_info(&vsi->back->pdev->dev,
4617                          "Update vsi tc config failed, err %s aq_err %s\n",
4618                          i40e_stat_str(&vsi->back->hw, ret),
4619                          i40e_aq_str(&vsi->back->hw,
4620                                      vsi->back->hw.aq.asq_last_status));
4621                 goto out;
4622         }
4623         /* update the local VSI info with updated queue map */
4624         i40e_vsi_update_queue_map(vsi, &ctxt);
4625         vsi->info.valid_sections = 0;
4626
4627         /* Update current VSI BW information */
4628         ret = i40e_vsi_get_bw_info(vsi);
4629         if (ret) {
4630                 dev_info(&vsi->back->pdev->dev,
4631                          "Failed updating vsi bw info, err %s aq_err %s\n",
4632                          i40e_stat_str(&vsi->back->hw, ret),
4633                          i40e_aq_str(&vsi->back->hw,
4634                                      vsi->back->hw.aq.asq_last_status));
4635                 goto out;
4636         }
4637
4638         /* Update the netdev TC setup */
4639         i40e_vsi_config_netdev_tc(vsi, enabled_tc);
4640 out:
4641         return ret;
4642 }
4643
4644 /**
4645  * i40e_veb_config_tc - Configure TCs for given VEB
4646  * @veb: given VEB
4647  * @enabled_tc: TC bitmap
4648  *
4649  * Configures given TC bitmap for VEB (switching) element
4650  **/
4651 int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
4652 {
4653         struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0};
4654         struct i40e_pf *pf = veb->pf;
4655         int ret = 0;
4656         int i;
4657
4658         /* No TCs or already enabled TCs just return */
4659         if (!enabled_tc || veb->enabled_tc == enabled_tc)
4660                 return ret;
4661
4662         bw_data.tc_valid_bits = enabled_tc;
4663         /* bw_data.absolute_credits is not set (relative) */
4664
4665         /* Enable ETS TCs with equal BW Share for now */
4666         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4667                 if (enabled_tc & BIT_ULL(i))
4668                         bw_data.tc_bw_share_credits[i] = 1;
4669         }
4670
4671         ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid,
4672                                                    &bw_data, NULL);
4673         if (ret) {
4674                 dev_info(&pf->pdev->dev,
4675                          "VEB bw config failed, err %s aq_err %s\n",
4676                          i40e_stat_str(&pf->hw, ret),
4677                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4678                 goto out;
4679         }
4680
4681         /* Update the BW information */
4682         ret = i40e_veb_get_bw_info(veb);
4683         if (ret) {
4684                 dev_info(&pf->pdev->dev,
4685                          "Failed getting veb bw config, err %s aq_err %s\n",
4686                          i40e_stat_str(&pf->hw, ret),
4687                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4688         }
4689
4690 out:
4691         return ret;
4692 }
4693
4694 #ifdef CONFIG_I40E_DCB
4695 /**
4696  * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs
4697  * @pf: PF struct
4698  *
4699  * Reconfigure VEB/VSIs on a given PF; it is assumed that
4700  * the caller would've quiesce all the VSIs before calling
4701  * this function
4702  **/
4703 static void i40e_dcb_reconfigure(struct i40e_pf *pf)
4704 {
4705         u8 tc_map = 0;
4706         int ret;
4707         u8 v;
4708
4709         /* Enable the TCs available on PF to all VEBs */
4710         tc_map = i40e_pf_get_tc_map(pf);
4711         for (v = 0; v < I40E_MAX_VEB; v++) {
4712                 if (!pf->veb[v])
4713                         continue;
4714                 ret = i40e_veb_config_tc(pf->veb[v], tc_map);
4715                 if (ret) {
4716                         dev_info(&pf->pdev->dev,
4717                                  "Failed configuring TC for VEB seid=%d\n",
4718                                  pf->veb[v]->seid);
4719                         /* Will try to configure as many components */
4720                 }
4721         }
4722
4723         /* Update each VSI */
4724         for (v = 0; v < pf->num_alloc_vsi; v++) {
4725                 if (!pf->vsi[v])
4726                         continue;
4727
4728                 /* - Enable all TCs for the LAN VSI
4729 #ifdef I40E_FCOE
4730                  * - For FCoE VSI only enable the TC configured
4731                  *   as per the APP TLV
4732 #endif
4733                  * - For all others keep them at TC0 for now
4734                  */
4735                 if (v == pf->lan_vsi)
4736                         tc_map = i40e_pf_get_tc_map(pf);
4737                 else
4738                         tc_map = i40e_pf_get_default_tc(pf);
4739 #ifdef I40E_FCOE
4740                 if (pf->vsi[v]->type == I40E_VSI_FCOE)
4741                         tc_map = i40e_get_fcoe_tc_map(pf);
4742 #endif /* #ifdef I40E_FCOE */
4743
4744                 ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
4745                 if (ret) {
4746                         dev_info(&pf->pdev->dev,
4747                                  "Failed configuring TC for VSI seid=%d\n",
4748                                  pf->vsi[v]->seid);
4749                         /* Will try to configure as many components */
4750                 } else {
4751                         /* Re-configure VSI vectors based on updated TC map */
4752                         i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
4753                         if (pf->vsi[v]->netdev)
4754                                 i40e_dcbnl_set_all(pf->vsi[v]);
4755                 }
4756         }
4757 }
4758
4759 /**
4760  * i40e_resume_port_tx - Resume port Tx
4761  * @pf: PF struct
4762  *
4763  * Resume a port's Tx and issue a PF reset in case of failure to
4764  * resume.
4765  **/
4766 static int i40e_resume_port_tx(struct i40e_pf *pf)
4767 {
4768         struct i40e_hw *hw = &pf->hw;
4769         int ret;
4770
4771         ret = i40e_aq_resume_port_tx(hw, NULL);
4772         if (ret) {
4773                 dev_info(&pf->pdev->dev,
4774                          "Resume Port Tx failed, err %s aq_err %s\n",
4775                           i40e_stat_str(&pf->hw, ret),
4776                           i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4777                 /* Schedule PF reset to recover */
4778                 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
4779                 i40e_service_event_schedule(pf);
4780         }
4781
4782         return ret;
4783 }
4784
4785 /**
4786  * i40e_init_pf_dcb - Initialize DCB configuration
4787  * @pf: PF being configured
4788  *
4789  * Query the current DCB configuration and cache it
4790  * in the hardware structure
4791  **/
4792 static int i40e_init_pf_dcb(struct i40e_pf *pf)
4793 {
4794         struct i40e_hw *hw = &pf->hw;
4795         int err = 0;
4796
4797         /* Do not enable DCB for SW1 and SW2 images even if the FW is capable */
4798         if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
4799             (pf->hw.aq.fw_maj_ver < 4))
4800                 goto out;
4801
4802         /* Get the initial DCB configuration */
4803         err = i40e_init_dcb(hw);
4804         if (!err) {
4805                 /* Device/Function is not DCBX capable */
4806                 if ((!hw->func_caps.dcb) ||
4807                     (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) {
4808                         dev_info(&pf->pdev->dev,
4809                                  "DCBX offload is not supported or is disabled for this PF.\n");
4810
4811                         if (pf->flags & I40E_FLAG_MFP_ENABLED)
4812                                 goto out;
4813
4814                 } else {
4815                         /* When status is not DISABLED then DCBX in FW */
4816                         pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
4817                                        DCB_CAP_DCBX_VER_IEEE;
4818
4819                         pf->flags |= I40E_FLAG_DCB_CAPABLE;
4820                         /* Enable DCB tagging only when more than one TC */
4821                         if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
4822                                 pf->flags |= I40E_FLAG_DCB_ENABLED;
4823                         dev_dbg(&pf->pdev->dev,
4824                                 "DCBX offload is supported for this PF.\n");
4825                 }
4826         } else {
4827                 dev_info(&pf->pdev->dev,
4828                          "Query for DCB configuration failed, err %s aq_err %s\n",
4829                          i40e_stat_str(&pf->hw, err),
4830                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4831         }
4832
4833 out:
4834         return err;
4835 }
4836 #endif /* CONFIG_I40E_DCB */
4837 #define SPEED_SIZE 14
4838 #define FC_SIZE 8
4839 /**
4840  * i40e_print_link_message - print link up or down
4841  * @vsi: the VSI for which link needs a message
4842  */
4843 void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
4844 {
4845         char *speed = "Unknown";
4846         char *fc = "Unknown";
4847
4848         if (vsi->current_isup == isup)
4849                 return;
4850         vsi->current_isup = isup;
4851         if (!isup) {
4852                 netdev_info(vsi->netdev, "NIC Link is Down\n");
4853                 return;
4854         }
4855
4856         /* Warn user if link speed on NPAR enabled partition is not at
4857          * least 10GB
4858          */
4859         if (vsi->back->hw.func_caps.npar_enable &&
4860             (vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
4861              vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
4862                 netdev_warn(vsi->netdev,
4863                             "The partition detected link speed that is less than 10Gbps\n");
4864
4865         switch (vsi->back->hw.phy.link_info.link_speed) {
4866         case I40E_LINK_SPEED_40GB:
4867                 speed = "40 G";
4868                 break;
4869         case I40E_LINK_SPEED_20GB:
4870                 speed = "20 G";
4871                 break;
4872         case I40E_LINK_SPEED_10GB:
4873                 speed = "10 G";
4874                 break;
4875         case I40E_LINK_SPEED_1GB:
4876                 speed = "1000 M";
4877                 break;
4878         case I40E_LINK_SPEED_100MB:
4879                 speed = "100 M";
4880                 break;
4881         default:
4882                 break;
4883         }
4884
4885         switch (vsi->back->hw.fc.current_mode) {
4886         case I40E_FC_FULL:
4887                 fc = "RX/TX";
4888                 break;
4889         case I40E_FC_TX_PAUSE:
4890                 fc = "TX";
4891                 break;
4892         case I40E_FC_RX_PAUSE:
4893                 fc = "RX";
4894                 break;
4895         default:
4896                 fc = "None";
4897                 break;
4898         }
4899
4900         netdev_info(vsi->netdev, "NIC Link is Up %sbps Full Duplex, Flow Control: %s\n",
4901                     speed, fc);
4902 }
4903
4904 /**
4905  * i40e_up_complete - Finish the last steps of bringing up a connection
4906  * @vsi: the VSI being configured
4907  **/
4908 static int i40e_up_complete(struct i40e_vsi *vsi)
4909 {
4910         struct i40e_pf *pf = vsi->back;
4911         int err;
4912
4913         if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4914                 i40e_vsi_configure_msix(vsi);
4915         else
4916                 i40e_configure_msi_and_legacy(vsi);
4917
4918         /* start rings */
4919         err = i40e_vsi_control_rings(vsi, true);
4920         if (err)
4921                 return err;
4922
4923         clear_bit(__I40E_DOWN, &vsi->state);
4924         i40e_napi_enable_all(vsi);
4925         i40e_vsi_enable_irq(vsi);
4926
4927         if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
4928             (vsi->netdev)) {
4929                 i40e_print_link_message(vsi, true);
4930                 netif_tx_start_all_queues(vsi->netdev);
4931                 netif_carrier_on(vsi->netdev);
4932         } else if (vsi->netdev) {
4933                 i40e_print_link_message(vsi, false);
4934                 /* need to check for qualified module here*/
4935                 if ((pf->hw.phy.link_info.link_info &
4936                         I40E_AQ_MEDIA_AVAILABLE) &&
4937                     (!(pf->hw.phy.link_info.an_info &
4938                         I40E_AQ_QUALIFIED_MODULE)))
4939                         netdev_err(vsi->netdev,
4940                                    "the driver failed to link because an unqualified module was detected.");
4941         }
4942
4943         /* replay FDIR SB filters */
4944         if (vsi->type == I40E_VSI_FDIR) {
4945                 /* reset fd counters */
4946                 pf->fd_add_err = pf->fd_atr_cnt = 0;
4947                 if (pf->fd_tcp_rule > 0) {
4948                         pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
4949                         if (I40E_DEBUG_FD & pf->hw.debug_mask)
4950                                 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n");
4951                         pf->fd_tcp_rule = 0;
4952                 }
4953                 i40e_fdir_filter_restore(vsi);
4954         }
4955         i40e_service_event_schedule(pf);
4956
4957         return 0;
4958 }
4959
4960 /**
4961  * i40e_vsi_reinit_locked - Reset the VSI
4962  * @vsi: the VSI being configured
4963  *
4964  * Rebuild the ring structs after some configuration
4965  * has changed, e.g. MTU size.
4966  **/
4967 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
4968 {
4969         struct i40e_pf *pf = vsi->back;
4970
4971         WARN_ON(in_interrupt());
4972         while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state))
4973                 usleep_range(1000, 2000);
4974         i40e_down(vsi);
4975
4976         /* Give a VF some time to respond to the reset.  The
4977          * two second wait is based upon the watchdog cycle in
4978          * the VF driver.
4979          */
4980         if (vsi->type == I40E_VSI_SRIOV)
4981                 msleep(2000);
4982         i40e_up(vsi);
4983         clear_bit(__I40E_CONFIG_BUSY, &pf->state);
4984 }
4985
4986 /**
4987  * i40e_up - Bring the connection back up after being down
4988  * @vsi: the VSI being configured
4989  **/
4990 int i40e_up(struct i40e_vsi *vsi)
4991 {
4992         int err;
4993
4994         err = i40e_vsi_configure(vsi);
4995         if (!err)
4996                 err = i40e_up_complete(vsi);
4997
4998         return err;
4999 }
5000
5001 /**
5002  * i40e_down - Shutdown the connection processing
5003  * @vsi: the VSI being stopped
5004  **/
5005 void i40e_down(struct i40e_vsi *vsi)
5006 {
5007         int i;
5008
5009         /* It is assumed that the caller of this function
5010          * sets the vsi->state __I40E_DOWN bit.
5011          */
5012         if (vsi->netdev) {
5013                 netif_carrier_off(vsi->netdev);
5014                 netif_tx_disable(vsi->netdev);
5015         }
5016         i40e_vsi_disable_irq(vsi);
5017         i40e_vsi_control_rings(vsi, false);
5018         i40e_napi_disable_all(vsi);
5019
5020         for (i = 0; i < vsi->num_queue_pairs; i++) {
5021                 i40e_clean_tx_ring(vsi->tx_rings[i]);
5022                 i40e_clean_rx_ring(vsi->rx_rings[i]);
5023         }
5024 }
5025
5026 /**
5027  * i40e_setup_tc - configure multiple traffic classes
5028  * @netdev: net device to configure
5029  * @tc: number of traffic classes to enable
5030  **/
5031 #ifdef I40E_FCOE
5032 int i40e_setup_tc(struct net_device *netdev, u8 tc)
5033 #else
5034 static int i40e_setup_tc(struct net_device *netdev, u8 tc)
5035 #endif
5036 {
5037         struct i40e_netdev_priv *np = netdev_priv(netdev);
5038         struct i40e_vsi *vsi = np->vsi;
5039         struct i40e_pf *pf = vsi->back;
5040         u8 enabled_tc = 0;
5041         int ret = -EINVAL;
5042         int i;
5043
5044         /* Check if DCB enabled to continue */
5045         if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
5046                 netdev_info(netdev, "DCB is not enabled for adapter\n");
5047                 goto exit;
5048         }
5049
5050         /* Check if MFP enabled */
5051         if (pf->flags & I40E_FLAG_MFP_ENABLED) {
5052                 netdev_info(netdev, "Configuring TC not supported in MFP mode\n");
5053                 goto exit;
5054         }
5055
5056         /* Check whether tc count is within enabled limit */
5057         if (tc > i40e_pf_get_num_tc(pf)) {
5058                 netdev_info(netdev, "TC count greater than enabled on link for adapter\n");
5059                 goto exit;
5060         }
5061
5062         /* Generate TC map for number of tc requested */
5063         for (i = 0; i < tc; i++)
5064                 enabled_tc |= BIT_ULL(i);
5065
5066         /* Requesting same TC configuration as already enabled */
5067         if (enabled_tc == vsi->tc_config.enabled_tc)
5068                 return 0;
5069
5070         /* Quiesce VSI queues */
5071         i40e_quiesce_vsi(vsi);
5072
5073         /* Configure VSI for enabled TCs */
5074         ret = i40e_vsi_config_tc(vsi, enabled_tc);
5075         if (ret) {
5076                 netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
5077                             vsi->seid);
5078                 goto exit;
5079         }
5080
5081         /* Unquiesce VSI */
5082         i40e_unquiesce_vsi(vsi);
5083
5084 exit:
5085         return ret;
5086 }
5087
5088 /**
5089  * i40e_open - Called when a network interface is made active
5090  * @netdev: network interface device structure
5091  *
5092  * The open entry point is called when a network interface is made
5093  * active by the system (IFF_UP).  At this point all resources needed
5094  * for transmit and receive operations are allocated, the interrupt
5095  * handler is registered with the OS, the netdev watchdog subtask is
5096  * enabled, and the stack is notified that the interface is ready.
5097  *
5098  * Returns 0 on success, negative value on failure
5099  **/
5100 int i40e_open(struct net_device *netdev)
5101 {
5102         struct i40e_netdev_priv *np = netdev_priv(netdev);
5103         struct i40e_vsi *vsi = np->vsi;
5104         struct i40e_pf *pf = vsi->back;
5105         int err;
5106
5107         /* disallow open during test or if eeprom is broken */
5108         if (test_bit(__I40E_TESTING, &pf->state) ||
5109             test_bit(__I40E_BAD_EEPROM, &pf->state))
5110                 return -EBUSY;
5111
5112         netif_carrier_off(netdev);
5113
5114         err = i40e_vsi_open(vsi);
5115         if (err)
5116                 return err;
5117
5118         /* configure global TSO hardware offload settings */
5119         wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH |
5120                                                        TCP_FLAG_FIN) >> 16);
5121         wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH |
5122                                                        TCP_FLAG_FIN |
5123                                                        TCP_FLAG_CWR) >> 16);
5124         wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
5125
5126 #ifdef CONFIG_I40E_VXLAN
5127         vxlan_get_rx_port(netdev);
5128 #endif
5129
5130         return 0;
5131 }
5132
5133 /**
5134  * i40e_vsi_open -
5135  * @vsi: the VSI to open
5136  *
5137  * Finish initialization of the VSI.
5138  *
5139  * Returns 0 on success, negative value on failure
5140  **/
5141 int i40e_vsi_open(struct i40e_vsi *vsi)
5142 {
5143         struct i40e_pf *pf = vsi->back;
5144         char int_name[I40E_INT_NAME_STR_LEN];
5145         int err;
5146
5147         /* allocate descriptors */
5148         err = i40e_vsi_setup_tx_resources(vsi);
5149         if (err)
5150                 goto err_setup_tx;
5151         err = i40e_vsi_setup_rx_resources(vsi);
5152         if (err)
5153                 goto err_setup_rx;
5154
5155         err = i40e_vsi_configure(vsi);
5156         if (err)
5157                 goto err_setup_rx;
5158
5159         if (vsi->netdev) {
5160                 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
5161                          dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
5162                 err = i40e_vsi_request_irq(vsi, int_name);
5163                 if (err)
5164                         goto err_setup_rx;
5165
5166                 /* Notify the stack of the actual queue counts. */
5167                 err = netif_set_real_num_tx_queues(vsi->netdev,
5168                                                    vsi->num_queue_pairs);
5169                 if (err)
5170                         goto err_set_queues;
5171
5172                 err = netif_set_real_num_rx_queues(vsi->netdev,
5173                                                    vsi->num_queue_pairs);
5174                 if (err)
5175                         goto err_set_queues;
5176
5177         } else if (vsi->type == I40E_VSI_FDIR) {
5178                 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir",
5179                          dev_driver_string(&pf->pdev->dev),
5180                          dev_name(&pf->pdev->dev));
5181                 err = i40e_vsi_request_irq(vsi, int_name);
5182
5183         } else {
5184                 err = -EINVAL;
5185                 goto err_setup_rx;
5186         }
5187
5188         err = i40e_up_complete(vsi);
5189         if (err)
5190                 goto err_up_complete;
5191
5192         return 0;
5193
5194 err_up_complete:
5195         i40e_down(vsi);
5196 err_set_queues:
5197         i40e_vsi_free_irq(vsi);
5198 err_setup_rx:
5199         i40e_vsi_free_rx_resources(vsi);
5200 err_setup_tx:
5201         i40e_vsi_free_tx_resources(vsi);
5202         if (vsi == pf->vsi[pf->lan_vsi])
5203                 i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
5204
5205         return err;
5206 }
5207
5208 /**
5209  * i40e_fdir_filter_exit - Cleans up the Flow Director accounting
5210  * @pf: Pointer to PF
5211  *
5212  * This function destroys the hlist where all the Flow Director
5213  * filters were saved.
5214  **/
5215 static void i40e_fdir_filter_exit(struct i40e_pf *pf)
5216 {
5217         struct i40e_fdir_filter *filter;
5218         struct hlist_node *node2;
5219
5220         hlist_for_each_entry_safe(filter, node2,
5221                                   &pf->fdir_filter_list, fdir_node) {
5222                 hlist_del(&filter->fdir_node);
5223                 kfree(filter);
5224         }
5225         pf->fdir_pf_active_filters = 0;
5226 }
5227
5228 /**
5229  * i40e_close - Disables a network interface
5230  * @netdev: network interface device structure
5231  *
5232  * The close entry point is called when an interface is de-activated
5233  * by the OS.  The hardware is still under the driver's control, but
5234  * this netdev interface is disabled.
5235  *
5236  * Returns 0, this is not allowed to fail
5237  **/
5238 #ifdef I40E_FCOE
5239 int i40e_close(struct net_device *netdev)
5240 #else
5241 static int i40e_close(struct net_device *netdev)
5242 #endif
5243 {
5244         struct i40e_netdev_priv *np = netdev_priv(netdev);
5245         struct i40e_vsi *vsi = np->vsi;
5246
5247         i40e_vsi_close(vsi);
5248
5249         return 0;
5250 }
5251
5252 /**
5253  * i40e_do_reset - Start a PF or Core Reset sequence
5254  * @pf: board private structure
5255  * @reset_flags: which reset is requested
5256  *
5257  * The essential difference in resets is that the PF Reset
5258  * doesn't clear the packet buffers, doesn't reset the PE
5259  * firmware, and doesn't bother the other PFs on the chip.
5260  **/
5261 void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
5262 {
5263         u32 val;
5264
5265         WARN_ON(in_interrupt());
5266
5267         if (i40e_check_asq_alive(&pf->hw))
5268                 i40e_vc_notify_reset(pf);
5269
5270         /* do the biggest reset indicated */
5271         if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
5272
5273                 /* Request a Global Reset
5274                  *
5275                  * This will start the chip's countdown to the actual full
5276                  * chip reset event, and a warning interrupt to be sent
5277                  * to all PFs, including the requestor.  Our handler
5278                  * for the warning interrupt will deal with the shutdown
5279                  * and recovery of the switch setup.
5280                  */
5281                 dev_dbg(&pf->pdev->dev, "GlobalR requested\n");
5282                 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
5283                 val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
5284                 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
5285
5286         } else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) {
5287
5288                 /* Request a Core Reset
5289                  *
5290                  * Same as Global Reset, except does *not* include the MAC/PHY
5291                  */
5292                 dev_dbg(&pf->pdev->dev, "CoreR requested\n");
5293                 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
5294                 val |= I40E_GLGEN_RTRIG_CORER_MASK;
5295                 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
5296                 i40e_flush(&pf->hw);
5297
5298         } else if (reset_flags & BIT_ULL(__I40E_PF_RESET_REQUESTED)) {
5299
5300                 /* Request a PF Reset
5301                  *
5302                  * Resets only the PF-specific registers
5303                  *
5304                  * This goes directly to the tear-down and rebuild of
5305                  * the switch, since we need to do all the recovery as
5306                  * for the Core Reset.
5307                  */
5308                 dev_dbg(&pf->pdev->dev, "PFR requested\n");
5309                 i40e_handle_reset_warning(pf);
5310
5311         } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
5312                 int v;
5313
5314                 /* Find the VSI(s) that requested a re-init */
5315                 dev_info(&pf->pdev->dev,
5316                          "VSI reinit requested\n");
5317                 for (v = 0; v < pf->num_alloc_vsi; v++) {
5318                         struct i40e_vsi *vsi = pf->vsi[v];
5319
5320                         if (vsi != NULL &&
5321                             test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) {
5322                                 i40e_vsi_reinit_locked(pf->vsi[v]);
5323                                 clear_bit(__I40E_REINIT_REQUESTED, &vsi->state);
5324                         }
5325                 }
5326         } else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
5327                 int v;
5328
5329                 /* Find the VSI(s) that needs to be brought down */
5330                 dev_info(&pf->pdev->dev, "VSI down requested\n");
5331                 for (v = 0; v < pf->num_alloc_vsi; v++) {
5332                         struct i40e_vsi *vsi = pf->vsi[v];
5333
5334                         if (vsi != NULL &&
5335                             test_bit(__I40E_DOWN_REQUESTED, &vsi->state)) {
5336                                 set_bit(__I40E_DOWN, &vsi->state);
5337                                 i40e_down(vsi);
5338                                 clear_bit(__I40E_DOWN_REQUESTED, &vsi->state);
5339                         }
5340                 }
5341         } else {
5342                 dev_info(&pf->pdev->dev,
5343                          "bad reset request 0x%08x\n", reset_flags);
5344         }
5345 }
5346
5347 #ifdef CONFIG_I40E_DCB
5348 /**
5349  * i40e_dcb_need_reconfig - Check if DCB needs reconfig
5350  * @pf: board private structure
5351  * @old_cfg: current DCB config
5352  * @new_cfg: new DCB config
5353  **/
5354 bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
5355                             struct i40e_dcbx_config *old_cfg,
5356                             struct i40e_dcbx_config *new_cfg)
5357 {
5358         bool need_reconfig = false;
5359
5360         /* Check if ETS configuration has changed */
5361         if (memcmp(&new_cfg->etscfg,
5362                    &old_cfg->etscfg,
5363                    sizeof(new_cfg->etscfg))) {
5364                 /* If Priority Table has changed reconfig is needed */
5365                 if (memcmp(&new_cfg->etscfg.prioritytable,
5366                            &old_cfg->etscfg.prioritytable,
5367                            sizeof(new_cfg->etscfg.prioritytable))) {
5368                         need_reconfig = true;
5369                         dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
5370                 }
5371
5372                 if (memcmp(&new_cfg->etscfg.tcbwtable,
5373                            &old_cfg->etscfg.tcbwtable,
5374                            sizeof(new_cfg->etscfg.tcbwtable)))
5375                         dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
5376
5377                 if (memcmp(&new_cfg->etscfg.tsatable,
5378                            &old_cfg->etscfg.tsatable,
5379                            sizeof(new_cfg->etscfg.tsatable)))
5380                         dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
5381         }
5382
5383         /* Check if PFC configuration has changed */
5384         if (memcmp(&new_cfg->pfc,
5385                    &old_cfg->pfc,
5386                    sizeof(new_cfg->pfc))) {
5387                 need_reconfig = true;
5388                 dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
5389         }
5390
5391         /* Check if APP Table has changed */
5392         if (memcmp(&new_cfg->app,
5393                    &old_cfg->app,
5394                    sizeof(new_cfg->app))) {
5395                 need_reconfig = true;
5396                 dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
5397         }
5398
5399         dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig);
5400         return need_reconfig;
5401 }
5402
5403 /**
5404  * i40e_handle_lldp_event - Handle LLDP Change MIB event
5405  * @pf: board private structure
5406  * @e: event info posted on ARQ
5407  **/
5408 static int i40e_handle_lldp_event(struct i40e_pf *pf,
5409                                   struct i40e_arq_event_info *e)
5410 {
5411         struct i40e_aqc_lldp_get_mib *mib =
5412                 (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
5413         struct i40e_hw *hw = &pf->hw;
5414         struct i40e_dcbx_config tmp_dcbx_cfg;
5415         bool need_reconfig = false;
5416         int ret = 0;
5417         u8 type;
5418
5419         /* Not DCB capable or capability disabled */
5420         if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
5421                 return ret;
5422
5423         /* Ignore if event is not for Nearest Bridge */
5424         type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
5425                 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
5426         dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type);
5427         if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
5428                 return ret;
5429
5430         /* Check MIB Type and return if event for Remote MIB update */
5431         type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
5432         dev_dbg(&pf->pdev->dev,
5433                 "LLDP event mib type %s\n", type ? "remote" : "local");
5434         if (type == I40E_AQ_LLDP_MIB_REMOTE) {
5435                 /* Update the remote cached instance and return */
5436                 ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
5437                                 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
5438                                 &hw->remote_dcbx_config);
5439                 goto exit;
5440         }
5441
5442         /* Store the old configuration */
5443         tmp_dcbx_cfg = hw->local_dcbx_config;
5444
5445         /* Reset the old DCBx configuration data */
5446         memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config));
5447         /* Get updated DCBX data from firmware */
5448         ret = i40e_get_dcb_config(&pf->hw);
5449         if (ret) {
5450                 dev_info(&pf->pdev->dev,
5451                          "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
5452                          i40e_stat_str(&pf->hw, ret),
5453                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5454                 goto exit;
5455         }
5456
5457         /* No change detected in DCBX configs */
5458         if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config,
5459                     sizeof(tmp_dcbx_cfg))) {
5460                 dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
5461                 goto exit;
5462         }
5463
5464         need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg,
5465                                                &hw->local_dcbx_config);
5466
5467         i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config);
5468
5469         if (!need_reconfig)
5470                 goto exit;
5471
5472         /* Enable DCB tagging only when more than one TC */
5473         if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
5474                 pf->flags |= I40E_FLAG_DCB_ENABLED;
5475         else
5476                 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
5477
5478         set_bit(__I40E_PORT_TX_SUSPENDED, &pf->state);
5479         /* Reconfiguration needed quiesce all VSIs */
5480         i40e_pf_quiesce_all_vsi(pf);
5481
5482         /* Changes in configuration update VEB/VSI */
5483         i40e_dcb_reconfigure(pf);
5484
5485         ret = i40e_resume_port_tx(pf);
5486
5487         clear_bit(__I40E_PORT_TX_SUSPENDED, &pf->state);
5488         /* In case of error no point in resuming VSIs */
5489         if (ret)
5490                 goto exit;
5491
5492         /* Wait for the PF's Tx queues to be disabled */
5493         ret = i40e_pf_wait_txq_disabled(pf);
5494         if (ret) {
5495                 /* Schedule PF reset to recover */
5496                 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
5497                 i40e_service_event_schedule(pf);
5498         } else {
5499                 i40e_pf_unquiesce_all_vsi(pf);
5500         }
5501
5502 exit:
5503         return ret;
5504 }
5505 #endif /* CONFIG_I40E_DCB */
5506
5507 /**
5508  * i40e_do_reset_safe - Protected reset path for userland calls.
5509  * @pf: board private structure
5510  * @reset_flags: which reset is requested
5511  *
5512  **/
5513 void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
5514 {
5515         rtnl_lock();
5516         i40e_do_reset(pf, reset_flags);
5517         rtnl_unlock();
5518 }
5519
5520 /**
5521  * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
5522  * @pf: board private structure
5523  * @e: event info posted on ARQ
5524  *
5525  * Handler for LAN Queue Overflow Event generated by the firmware for PF
5526  * and VF queues
5527  **/
5528 static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
5529                                            struct i40e_arq_event_info *e)
5530 {
5531         struct i40e_aqc_lan_overflow *data =
5532                 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
5533         u32 queue = le32_to_cpu(data->prtdcb_rupto);
5534         u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
5535         struct i40e_hw *hw = &pf->hw;
5536         struct i40e_vf *vf;
5537         u16 vf_id;
5538
5539         dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
5540                 queue, qtx_ctl);
5541
5542         /* Queue belongs to VF, find the VF and issue VF reset */
5543         if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
5544             >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
5545                 vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
5546                          >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
5547                 vf_id -= hw->func_caps.vf_base_id;
5548                 vf = &pf->vf[vf_id];
5549                 i40e_vc_notify_vf_reset(vf);
5550                 /* Allow VF to process pending reset notification */
5551                 msleep(20);
5552                 i40e_reset_vf(vf, false);
5553         }
5554 }
5555
5556 /**
5557  * i40e_service_event_complete - Finish up the service event
5558  * @pf: board private structure
5559  **/
5560 static void i40e_service_event_complete(struct i40e_pf *pf)
5561 {
5562         BUG_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state));
5563
5564         /* flush memory to make sure state is correct before next watchog */
5565         smp_mb__before_atomic();
5566         clear_bit(__I40E_SERVICE_SCHED, &pf->state);
5567 }
5568
5569 /**
5570  * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
5571  * @pf: board private structure
5572  **/
5573 u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
5574 {
5575         u32 val, fcnt_prog;
5576
5577         val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
5578         fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
5579         return fcnt_prog;
5580 }
5581
5582 /**
5583  * i40e_get_current_fd_count - Get total FD filters programmed for this PF
5584  * @pf: board private structure
5585  **/
5586 u32 i40e_get_current_fd_count(struct i40e_pf *pf)
5587 {
5588         u32 val, fcnt_prog;
5589
5590         val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
5591         fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
5592                     ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
5593                       I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
5594         return fcnt_prog;
5595 }
5596
5597 /**
5598  * i40e_get_global_fd_count - Get total FD filters programmed on device
5599  * @pf: board private structure
5600  **/
5601 u32 i40e_get_global_fd_count(struct i40e_pf *pf)
5602 {
5603         u32 val, fcnt_prog;
5604
5605         val = rd32(&pf->hw, I40E_GLQF_FDCNT_0);
5606         fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) +
5607                     ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >>
5608                      I40E_GLQF_FDCNT_0_BESTCNT_SHIFT);
5609         return fcnt_prog;
5610 }
5611
5612 /**
5613  * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
5614  * @pf: board private structure
5615  **/
5616 void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
5617 {
5618         struct i40e_fdir_filter *filter;
5619         u32 fcnt_prog, fcnt_avail;
5620         struct hlist_node *node;
5621
5622         if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
5623                 return;
5624
5625         /* Check if, FD SB or ATR was auto disabled and if there is enough room
5626          * to re-enable
5627          */
5628         fcnt_prog = i40e_get_global_fd_count(pf);
5629         fcnt_avail = pf->fdir_pf_filter_count;
5630         if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) ||
5631             (pf->fd_add_err == 0) ||
5632             (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) {
5633                 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
5634                     (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
5635                         pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
5636                         if (I40E_DEBUG_FD & pf->hw.debug_mask)
5637                                 dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
5638                 }
5639         }
5640         /* Wait for some more space to be available to turn on ATR */
5641         if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM * 2)) {
5642                 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
5643                     (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) {
5644                         pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
5645                         if (I40E_DEBUG_FD & pf->hw.debug_mask)
5646                                 dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n");
5647                 }
5648         }
5649
5650         /* if hw had a problem adding a filter, delete it */
5651         if (pf->fd_inv > 0) {
5652                 hlist_for_each_entry_safe(filter, node,
5653                                           &pf->fdir_filter_list, fdir_node) {
5654                         if (filter->fd_id == pf->fd_inv) {
5655                                 hlist_del(&filter->fdir_node);
5656                                 kfree(filter);
5657                                 pf->fdir_pf_active_filters--;
5658                         }
5659                 }
5660         }
5661 }
5662
5663 #define I40E_MIN_FD_FLUSH_INTERVAL 10
5664 #define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30
5665 /**
5666  * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB
5667  * @pf: board private structure
5668  **/
5669 static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
5670 {
5671         unsigned long min_flush_time;
5672         int flush_wait_retry = 50;
5673         bool disable_atr = false;
5674         int fd_room;
5675         int reg;
5676
5677         if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)))
5678                 return;
5679
5680         if (!time_after(jiffies, pf->fd_flush_timestamp +
5681                                  (I40E_MIN_FD_FLUSH_INTERVAL * HZ)))
5682                 return;
5683
5684         /* If the flush is happening too quick and we have mostly SB rules we
5685          * should not re-enable ATR for some time.
5686          */
5687         min_flush_time = pf->fd_flush_timestamp +
5688                          (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
5689         fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
5690
5691         if (!(time_after(jiffies, min_flush_time)) &&
5692             (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
5693                 if (I40E_DEBUG_FD & pf->hw.debug_mask)
5694                         dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
5695                 disable_atr = true;
5696         }
5697
5698         pf->fd_flush_timestamp = jiffies;
5699         pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
5700         /* flush all filters */
5701         wr32(&pf->hw, I40E_PFQF_CTL_1,
5702              I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
5703         i40e_flush(&pf->hw);
5704         pf->fd_flush_cnt++;
5705         pf->fd_add_err = 0;
5706         do {
5707                 /* Check FD flush status every 5-6msec */
5708                 usleep_range(5000, 6000);
5709                 reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
5710                 if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
5711                         break;
5712         } while (flush_wait_retry--);
5713         if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
5714                 dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
5715         } else {
5716                 /* replay sideband filters */
5717                 i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
5718                 if (!disable_atr)
5719                         pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
5720                 clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
5721                 if (I40E_DEBUG_FD & pf->hw.debug_mask)
5722                         dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
5723         }
5724
5725 }
5726
5727 /**
5728  * i40e_get_current_atr_count - Get the count of total FD ATR filters programmed
5729  * @pf: board private structure
5730  **/
5731 u32 i40e_get_current_atr_cnt(struct i40e_pf *pf)
5732 {
5733         return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters;
5734 }
5735
5736 /* We can see up to 256 filter programming desc in transit if the filters are
5737  * being applied really fast; before we see the first
5738  * filter miss error on Rx queue 0. Accumulating enough error messages before
5739  * reacting will make sure we don't cause flush too often.
5740  */
5741 #define I40E_MAX_FD_PROGRAM_ERROR 256
5742
5743 /**
5744  * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
5745  * @pf: board private structure
5746  **/
5747 static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
5748 {
5749
5750         /* if interface is down do nothing */
5751         if (test_bit(__I40E_DOWN, &pf->state))
5752                 return;
5753
5754         if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)))
5755                 return;
5756
5757         if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
5758                 i40e_fdir_flush_and_replay(pf);
5759
5760         i40e_fdir_check_and_reenable(pf);
5761
5762 }
5763
5764 /**
5765  * i40e_vsi_link_event - notify VSI of a link event
5766  * @vsi: vsi to be notified
5767  * @link_up: link up or down
5768  **/
5769 static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
5770 {
5771         if (!vsi || test_bit(__I40E_DOWN, &vsi->state))
5772                 return;
5773
5774         switch (vsi->type) {
5775         case I40E_VSI_MAIN:
5776 #ifdef I40E_FCOE
5777         case I40E_VSI_FCOE:
5778 #endif
5779                 if (!vsi->netdev || !vsi->netdev_registered)
5780                         break;
5781
5782                 if (link_up) {
5783                         netif_carrier_on(vsi->netdev);
5784                         netif_tx_wake_all_queues(vsi->netdev);
5785                 } else {
5786                         netif_carrier_off(vsi->netdev);
5787                         netif_tx_stop_all_queues(vsi->netdev);
5788                 }
5789                 break;
5790
5791         case I40E_VSI_SRIOV:
5792         case I40E_VSI_VMDQ2:
5793         case I40E_VSI_CTRL:
5794         case I40E_VSI_MIRROR:
5795         default:
5796                 /* there is no notification for other VSIs */
5797                 break;
5798         }
5799 }
5800
5801 /**
5802  * i40e_veb_link_event - notify elements on the veb of a link event
5803  * @veb: veb to be notified
5804  * @link_up: link up or down
5805  **/
5806 static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
5807 {
5808         struct i40e_pf *pf;
5809         int i;
5810
5811         if (!veb || !veb->pf)
5812                 return;
5813         pf = veb->pf;
5814
5815         /* depth first... */
5816         for (i = 0; i < I40E_MAX_VEB; i++)
5817                 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
5818                         i40e_veb_link_event(pf->veb[i], link_up);
5819
5820         /* ... now the local VSIs */
5821         for (i = 0; i < pf->num_alloc_vsi; i++)
5822                 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
5823                         i40e_vsi_link_event(pf->vsi[i], link_up);
5824 }
5825
5826 /**
5827  * i40e_link_event - Update netif_carrier status
5828  * @pf: board private structure
5829  **/
5830 static void i40e_link_event(struct i40e_pf *pf)
5831 {
5832         struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
5833         u8 new_link_speed, old_link_speed;
5834         i40e_status status;
5835         bool new_link, old_link;
5836
5837         /* set this to force the get_link_status call to refresh state */
5838         pf->hw.phy.get_link_info = true;
5839
5840         old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
5841
5842         status = i40e_get_link_status(&pf->hw, &new_link);
5843         if (status) {
5844                 dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n",
5845                         status);
5846                 return;
5847         }
5848
5849         old_link_speed = pf->hw.phy.link_info_old.link_speed;
5850         new_link_speed = pf->hw.phy.link_info.link_speed;
5851
5852         if (new_link == old_link &&
5853             new_link_speed == old_link_speed &&
5854             (test_bit(__I40E_DOWN, &vsi->state) ||
5855              new_link == netif_carrier_ok(vsi->netdev)))
5856                 return;
5857
5858         if (!test_bit(__I40E_DOWN, &vsi->state))
5859                 i40e_print_link_message(vsi, new_link);
5860
5861         /* Notify the base of the switch tree connected to
5862          * the link.  Floating VEBs are not notified.
5863          */
5864         if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
5865                 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
5866         else
5867                 i40e_vsi_link_event(vsi, new_link);
5868
5869         if (pf->vf)
5870                 i40e_vc_notify_link_state(pf);
5871
5872         if (pf->flags & I40E_FLAG_PTP)
5873                 i40e_ptp_set_increment(pf);
5874 }
5875
5876 /**
5877  * i40e_watchdog_subtask - periodic checks not using event driven response
5878  * @pf: board private structure
5879  **/
5880 static void i40e_watchdog_subtask(struct i40e_pf *pf)
5881 {
5882         int i;
5883
5884         /* if interface is down do nothing */
5885         if (test_bit(__I40E_DOWN, &pf->state) ||
5886             test_bit(__I40E_CONFIG_BUSY, &pf->state))
5887                 return;
5888
5889         /* make sure we don't do these things too often */
5890         if (time_before(jiffies, (pf->service_timer_previous +
5891                                   pf->service_timer_period)))
5892                 return;
5893         pf->service_timer_previous = jiffies;
5894
5895         if (pf->flags & I40E_FLAG_LINK_POLLING_ENABLED)
5896                 i40e_link_event(pf);
5897
5898         /* Update the stats for active netdevs so the network stack
5899          * can look at updated numbers whenever it cares to
5900          */
5901         for (i = 0; i < pf->num_alloc_vsi; i++)
5902                 if (pf->vsi[i] && pf->vsi[i]->netdev)
5903                         i40e_update_stats(pf->vsi[i]);
5904
5905         if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) {
5906                 /* Update the stats for the active switching components */
5907                 for (i = 0; i < I40E_MAX_VEB; i++)
5908                         if (pf->veb[i])
5909                                 i40e_update_veb_stats(pf->veb[i]);
5910         }
5911
5912         i40e_ptp_rx_hang(pf->vsi[pf->lan_vsi]);
5913 }
5914
5915 /**
5916  * i40e_reset_subtask - Set up for resetting the device and driver
5917  * @pf: board private structure
5918  **/
5919 static void i40e_reset_subtask(struct i40e_pf *pf)
5920 {
5921         u32 reset_flags = 0;
5922
5923         rtnl_lock();
5924         if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) {
5925                 reset_flags |= BIT_ULL(__I40E_REINIT_REQUESTED);
5926                 clear_bit(__I40E_REINIT_REQUESTED, &pf->state);
5927         }
5928         if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) {
5929                 reset_flags |= BIT_ULL(__I40E_PF_RESET_REQUESTED);
5930                 clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
5931         }
5932         if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) {
5933                 reset_flags |= BIT_ULL(__I40E_CORE_RESET_REQUESTED);
5934                 clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
5935         }
5936         if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) {
5937                 reset_flags |= BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED);
5938                 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
5939         }
5940         if (test_bit(__I40E_DOWN_REQUESTED, &pf->state)) {
5941                 reset_flags |= BIT_ULL(__I40E_DOWN_REQUESTED);
5942                 clear_bit(__I40E_DOWN_REQUESTED, &pf->state);
5943         }
5944
5945         /* If there's a recovery already waiting, it takes
5946          * precedence before starting a new reset sequence.
5947          */
5948         if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) {
5949                 i40e_handle_reset_warning(pf);
5950                 goto unlock;
5951         }
5952
5953         /* If we're already down or resetting, just bail */
5954         if (reset_flags &&
5955             !test_bit(__I40E_DOWN, &pf->state) &&
5956             !test_bit(__I40E_CONFIG_BUSY, &pf->state))
5957                 i40e_do_reset(pf, reset_flags);
5958
5959 unlock:
5960         rtnl_unlock();
5961 }
5962
5963 /**
5964  * i40e_handle_link_event - Handle link event
5965  * @pf: board private structure
5966  * @e: event info posted on ARQ
5967  **/
5968 static void i40e_handle_link_event(struct i40e_pf *pf,
5969                                    struct i40e_arq_event_info *e)
5970 {
5971         struct i40e_hw *hw = &pf->hw;
5972         struct i40e_aqc_get_link_status *status =
5973                 (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
5974
5975         /* save off old link status information */
5976         hw->phy.link_info_old = hw->phy.link_info;
5977
5978         /* Do a new status request to re-enable LSE reporting
5979          * and load new status information into the hw struct
5980          * This completely ignores any state information
5981          * in the ARQ event info, instead choosing to always
5982          * issue the AQ update link status command.
5983          */
5984         i40e_link_event(pf);
5985
5986         /* check for unqualified module, if link is down */
5987         if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
5988             (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
5989             (!(status->link_info & I40E_AQ_LINK_UP)))
5990                 dev_err(&pf->pdev->dev,
5991                         "The driver failed to link because an unqualified module was detected.\n");
5992 }
5993
5994 /**
5995  * i40e_clean_adminq_subtask - Clean the AdminQ rings
5996  * @pf: board private structure
5997  **/
5998 static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
5999 {
6000         struct i40e_arq_event_info event;
6001         struct i40e_hw *hw = &pf->hw;
6002         u16 pending, i = 0;
6003         i40e_status ret;
6004         u16 opcode;
6005         u32 oldval;
6006         u32 val;
6007
6008         /* Do not run clean AQ when PF reset fails */
6009         if (test_bit(__I40E_RESET_FAILED, &pf->state))
6010                 return;
6011
6012         /* check for error indications */
6013         val = rd32(&pf->hw, pf->hw.aq.arq.len);
6014         oldval = val;
6015         if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
6016                 dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
6017                 val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
6018         }
6019         if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
6020                 dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
6021                 val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
6022         }
6023         if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
6024                 dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
6025                 val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
6026         }
6027         if (oldval != val)
6028                 wr32(&pf->hw, pf->hw.aq.arq.len, val);
6029
6030         val = rd32(&pf->hw, pf->hw.aq.asq.len);
6031         oldval = val;
6032         if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
6033                 dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
6034                 val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
6035         }
6036         if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
6037                 dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
6038                 val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
6039         }
6040         if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
6041                 dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
6042                 val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
6043         }
6044         if (oldval != val)
6045                 wr32(&pf->hw, pf->hw.aq.asq.len, val);
6046
6047         event.buf_len = I40E_MAX_AQ_BUF_SIZE;
6048         event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
6049         if (!event.msg_buf)
6050                 return;
6051
6052         do {
6053                 ret = i40e_clean_arq_element(hw, &event, &pending);
6054                 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
6055                         break;
6056                 else if (ret) {
6057                         dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
6058                         break;
6059                 }
6060
6061                 opcode = le16_to_cpu(event.desc.opcode);
6062                 switch (opcode) {
6063
6064                 case i40e_aqc_opc_get_link_status:
6065                         i40e_handle_link_event(pf, &event);
6066                         break;
6067                 case i40e_aqc_opc_send_msg_to_pf:
6068                         ret = i40e_vc_process_vf_msg(pf,
6069                                         le16_to_cpu(event.desc.retval),
6070                                         le32_to_cpu(event.desc.cookie_high),
6071                                         le32_to_cpu(event.desc.cookie_low),
6072                                         event.msg_buf,
6073                                         event.msg_len);
6074                         break;
6075                 case i40e_aqc_opc_lldp_update_mib:
6076                         dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
6077 #ifdef CONFIG_I40E_DCB
6078                         rtnl_lock();
6079                         ret = i40e_handle_lldp_event(pf, &event);
6080                         rtnl_unlock();
6081 #endif /* CONFIG_I40E_DCB */
6082                         break;
6083                 case i40e_aqc_opc_event_lan_overflow:
6084                         dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
6085                         i40e_handle_lan_overflow_event(pf, &event);
6086                         break;
6087                 case i40e_aqc_opc_send_msg_to_peer:
6088                         dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
6089                         break;
6090                 case i40e_aqc_opc_nvm_erase:
6091                 case i40e_aqc_opc_nvm_update:
6092                         i40e_debug(&pf->hw, I40E_DEBUG_NVM, "ARQ NVM operation completed\n");
6093                         break;
6094                 default:
6095                         dev_info(&pf->pdev->dev,
6096                                  "ARQ Error: Unknown event 0x%04x received\n",
6097                                  opcode);
6098                         break;
6099                 }
6100         } while (pending && (i++ < pf->adminq_work_limit));
6101
6102         clear_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
6103         /* re-enable Admin queue interrupt cause */
6104         val = rd32(hw, I40E_PFINT_ICR0_ENA);
6105         val |=  I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
6106         wr32(hw, I40E_PFINT_ICR0_ENA, val);
6107         i40e_flush(hw);
6108
6109         kfree(event.msg_buf);
6110 }
6111
6112 /**
6113  * i40e_verify_eeprom - make sure eeprom is good to use
6114  * @pf: board private structure
6115  **/
6116 static void i40e_verify_eeprom(struct i40e_pf *pf)
6117 {
6118         int err;
6119
6120         err = i40e_diag_eeprom_test(&pf->hw);
6121         if (err) {
6122                 /* retry in case of garbage read */
6123                 err = i40e_diag_eeprom_test(&pf->hw);
6124                 if (err) {
6125                         dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
6126                                  err);
6127                         set_bit(__I40E_BAD_EEPROM, &pf->state);
6128                 }
6129         }
6130
6131         if (!err && test_bit(__I40E_BAD_EEPROM, &pf->state)) {
6132                 dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n");
6133                 clear_bit(__I40E_BAD_EEPROM, &pf->state);
6134         }
6135 }
6136
6137 /**
6138  * i40e_enable_pf_switch_lb
6139  * @pf: pointer to the PF structure
6140  *
6141  * enable switch loop back or die - no point in a return value
6142  **/
6143 static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
6144 {
6145         struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
6146         struct i40e_vsi_context ctxt;
6147         int ret;
6148
6149         ctxt.seid = pf->main_vsi_seid;
6150         ctxt.pf_num = pf->hw.pf_id;
6151         ctxt.vf_num = 0;
6152         ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
6153         if (ret) {
6154                 dev_info(&pf->pdev->dev,
6155                          "couldn't get PF vsi config, err %s aq_err %s\n",
6156                          i40e_stat_str(&pf->hw, ret),
6157                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6158                 return;
6159         }
6160         ctxt.flags = I40E_AQ_VSI_TYPE_PF;
6161         ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
6162         ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
6163
6164         ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
6165         if (ret) {
6166                 dev_info(&pf->pdev->dev,
6167                          "update vsi switch failed, err %s aq_err %s\n",
6168                          i40e_stat_str(&pf->hw, ret),
6169                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6170         }
6171 }
6172
6173 /**
6174  * i40e_disable_pf_switch_lb
6175  * @pf: pointer to the PF structure
6176  *
6177  * disable switch loop back or die - no point in a return value
6178  **/
6179 static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
6180 {
6181         struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
6182         struct i40e_vsi_context ctxt;
6183         int ret;
6184
6185         ctxt.seid = pf->main_vsi_seid;
6186         ctxt.pf_num = pf->hw.pf_id;
6187         ctxt.vf_num = 0;
6188         ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
6189         if (ret) {
6190                 dev_info(&pf->pdev->dev,
6191                          "couldn't get PF vsi config, err %s aq_err %s\n",
6192                          i40e_stat_str(&pf->hw, ret),
6193                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6194                 return;
6195         }
6196         ctxt.flags = I40E_AQ_VSI_TYPE_PF;
6197         ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
6198         ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
6199
6200         ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
6201         if (ret) {
6202                 dev_info(&pf->pdev->dev,
6203                          "update vsi switch failed, err %s aq_err %s\n",
6204                          i40e_stat_str(&pf->hw, ret),
6205                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6206         }
6207 }
6208
6209 /**
6210  * i40e_config_bridge_mode - Configure the HW bridge mode
6211  * @veb: pointer to the bridge instance
6212  *
6213  * Configure the loop back mode for the LAN VSI that is downlink to the
6214  * specified HW bridge instance. It is expected this function is called
6215  * when a new HW bridge is instantiated.
6216  **/
6217 static void i40e_config_bridge_mode(struct i40e_veb *veb)
6218 {
6219         struct i40e_pf *pf = veb->pf;
6220
6221         dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n",
6222                  veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
6223         if (veb->bridge_mode & BRIDGE_MODE_VEPA)
6224                 i40e_disable_pf_switch_lb(pf);
6225         else
6226                 i40e_enable_pf_switch_lb(pf);
6227 }
6228
6229 /**
6230  * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
6231  * @veb: pointer to the VEB instance
6232  *
6233  * This is a recursive function that first builds the attached VSIs then
6234  * recurses in to build the next layer of VEB.  We track the connections
6235  * through our own index numbers because the seid's from the HW could
6236  * change across the reset.
6237  **/
6238 static int i40e_reconstitute_veb(struct i40e_veb *veb)
6239 {
6240         struct i40e_vsi *ctl_vsi = NULL;
6241         struct i40e_pf *pf = veb->pf;
6242         int v, veb_idx;
6243         int ret;
6244
6245         /* build VSI that owns this VEB, temporarily attached to base VEB */
6246         for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
6247                 if (pf->vsi[v] &&
6248                     pf->vsi[v]->veb_idx == veb->idx &&
6249                     pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
6250                         ctl_vsi = pf->vsi[v];
6251                         break;
6252                 }
6253         }
6254         if (!ctl_vsi) {
6255                 dev_info(&pf->pdev->dev,
6256                          "missing owner VSI for veb_idx %d\n", veb->idx);
6257                 ret = -ENOENT;
6258                 goto end_reconstitute;
6259         }
6260         if (ctl_vsi != pf->vsi[pf->lan_vsi])
6261                 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
6262         ret = i40e_add_vsi(ctl_vsi);
6263         if (ret) {
6264                 dev_info(&pf->pdev->dev,
6265                          "rebuild of veb_idx %d owner VSI failed: %d\n",
6266                          veb->idx, ret);
6267                 goto end_reconstitute;
6268         }
6269         i40e_vsi_reset_stats(ctl_vsi);
6270
6271         /* create the VEB in the switch and move the VSI onto the VEB */
6272         ret = i40e_add_veb(veb, ctl_vsi);
6273         if (ret)
6274                 goto end_reconstitute;
6275
6276         if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
6277                 veb->bridge_mode = BRIDGE_MODE_VEB;
6278         else
6279                 veb->bridge_mode = BRIDGE_MODE_VEPA;
6280         i40e_config_bridge_mode(veb);
6281
6282         /* create the remaining VSIs attached to this VEB */
6283         for (v = 0; v < pf->num_alloc_vsi; v++) {
6284                 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
6285                         continue;
6286
6287                 if (pf->vsi[v]->veb_idx == veb->idx) {
6288                         struct i40e_vsi *vsi = pf->vsi[v];
6289
6290                         vsi->uplink_seid = veb->seid;
6291                         ret = i40e_add_vsi(vsi);
6292                         if (ret) {
6293                                 dev_info(&pf->pdev->dev,
6294                                          "rebuild of vsi_idx %d failed: %d\n",
6295                                          v, ret);
6296                                 goto end_reconstitute;
6297                         }
6298                         i40e_vsi_reset_stats(vsi);
6299                 }
6300         }
6301
6302         /* create any VEBs attached to this VEB - RECURSION */
6303         for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
6304                 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
6305                         pf->veb[veb_idx]->uplink_seid = veb->seid;
6306                         ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
6307                         if (ret)
6308                                 break;
6309                 }
6310         }
6311
6312 end_reconstitute:
6313         return ret;
6314 }
6315
6316 /**
6317  * i40e_get_capabilities - get info about the HW
6318  * @pf: the PF struct
6319  **/
6320 static int i40e_get_capabilities(struct i40e_pf *pf)
6321 {
6322         struct i40e_aqc_list_capabilities_element_resp *cap_buf;
6323         u16 data_size;
6324         int buf_len;
6325         int err;
6326
6327         buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
6328         do {
6329                 cap_buf = kzalloc(buf_len, GFP_KERNEL);
6330                 if (!cap_buf)
6331                         return -ENOMEM;
6332
6333                 /* this loads the data into the hw struct for us */
6334                 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
6335                                             &data_size,
6336                                             i40e_aqc_opc_list_func_capabilities,
6337                                             NULL);
6338                 /* data loaded, buffer no longer needed */
6339                 kfree(cap_buf);
6340
6341                 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
6342                         /* retry with a larger buffer */
6343                         buf_len = data_size;
6344                 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
6345                         dev_info(&pf->pdev->dev,
6346                                  "capability discovery failed, err %s aq_err %s\n",
6347                                  i40e_stat_str(&pf->hw, err),
6348                                  i40e_aq_str(&pf->hw,
6349                                              pf->hw.aq.asq_last_status));
6350                         return -ENODEV;
6351                 }
6352         } while (err);
6353
6354         if (pf->hw.debug_mask & I40E_DEBUG_USER)
6355                 dev_info(&pf->pdev->dev,
6356                          "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
6357                          pf->hw.pf_id, pf->hw.func_caps.num_vfs,
6358                          pf->hw.func_caps.num_msix_vectors,
6359                          pf->hw.func_caps.num_msix_vectors_vf,
6360                          pf->hw.func_caps.fd_filters_guaranteed,
6361                          pf->hw.func_caps.fd_filters_best_effort,
6362                          pf->hw.func_caps.num_tx_qp,
6363                          pf->hw.func_caps.num_vsis);
6364
6365 #define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
6366                        + pf->hw.func_caps.num_vfs)
6367         if (pf->hw.revision_id == 0 && (DEF_NUM_VSI > pf->hw.func_caps.num_vsis)) {
6368                 dev_info(&pf->pdev->dev,
6369                          "got num_vsis %d, setting num_vsis to %d\n",
6370                          pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
6371                 pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
6372         }
6373
6374         return 0;
6375 }
6376
6377 static int i40e_vsi_clear(struct i40e_vsi *vsi);
6378
6379 /**
6380  * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband
6381  * @pf: board private structure
6382  **/
6383 static void i40e_fdir_sb_setup(struct i40e_pf *pf)
6384 {
6385         struct i40e_vsi *vsi;
6386         int i;
6387
6388         /* quick workaround for an NVM issue that leaves a critical register
6389          * uninitialized
6390          */
6391         if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) {
6392                 static const u32 hkey[] = {
6393                         0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36,
6394                         0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb,
6395                         0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21,
6396                         0x95b3a76d};
6397
6398                 for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++)
6399                         wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]);
6400         }
6401
6402         if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
6403                 return;
6404
6405         /* find existing VSI and see if it needs configuring */
6406         vsi = NULL;
6407         for (i = 0; i < pf->num_alloc_vsi; i++) {
6408                 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
6409                         vsi = pf->vsi[i];
6410                         break;
6411                 }
6412         }
6413
6414         /* create a new VSI if none exists */
6415         if (!vsi) {
6416                 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR,
6417                                      pf->vsi[pf->lan_vsi]->seid, 0);
6418                 if (!vsi) {
6419                         dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
6420                         pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
6421                         return;
6422                 }
6423         }
6424
6425         i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
6426 }
6427
6428 /**
6429  * i40e_fdir_teardown - release the Flow Director resources
6430  * @pf: board private structure
6431  **/
6432 static void i40e_fdir_teardown(struct i40e_pf *pf)
6433 {
6434         int i;
6435
6436         i40e_fdir_filter_exit(pf);
6437         for (i = 0; i < pf->num_alloc_vsi; i++) {
6438                 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
6439                         i40e_vsi_release(pf->vsi[i]);
6440                         break;
6441                 }
6442         }
6443 }
6444
6445 /**
6446  * i40e_prep_for_reset - prep for the core to reset
6447  * @pf: board private structure
6448  *
6449  * Close up the VFs and other things in prep for PF Reset.
6450   **/
6451 static void i40e_prep_for_reset(struct i40e_pf *pf)
6452 {
6453         struct i40e_hw *hw = &pf->hw;
6454         i40e_status ret = 0;
6455         u32 v;
6456
6457         clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
6458         if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
6459                 return;
6460
6461         dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
6462
6463         /* quiesce the VSIs and their queues that are not already DOWN */
6464         i40e_pf_quiesce_all_vsi(pf);
6465
6466         for (v = 0; v < pf->num_alloc_vsi; v++) {
6467                 if (pf->vsi[v])
6468                         pf->vsi[v]->seid = 0;
6469         }
6470
6471         i40e_shutdown_adminq(&pf->hw);
6472
6473         /* call shutdown HMC */
6474         if (hw->hmc.hmc_obj) {
6475                 ret = i40e_shutdown_lan_hmc(hw);
6476                 if (ret)
6477                         dev_warn(&pf->pdev->dev,
6478                                  "shutdown_lan_hmc failed: %d\n", ret);
6479         }
6480 }
6481
6482 /**
6483  * i40e_send_version - update firmware with driver version
6484  * @pf: PF struct
6485  */
6486 static void i40e_send_version(struct i40e_pf *pf)
6487 {
6488         struct i40e_driver_version dv;
6489
6490         dv.major_version = DRV_VERSION_MAJOR;
6491         dv.minor_version = DRV_VERSION_MINOR;
6492         dv.build_version = DRV_VERSION_BUILD;
6493         dv.subbuild_version = 0;
6494         strlcpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string));
6495         i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
6496 }
6497
6498 /**
6499  * i40e_reset_and_rebuild - reset and rebuild using a saved config
6500  * @pf: board private structure
6501  * @reinit: if the Main VSI needs to re-initialized.
6502  **/
6503 static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
6504 {
6505         struct i40e_hw *hw = &pf->hw;
6506         u8 set_fc_aq_fail = 0;
6507         i40e_status ret;
6508         u32 v;
6509
6510         /* Now we wait for GRST to settle out.
6511          * We don't have to delete the VEBs or VSIs from the hw switch
6512          * because the reset will make them disappear.
6513          */
6514         ret = i40e_pf_reset(hw);
6515         if (ret) {
6516                 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
6517                 set_bit(__I40E_RESET_FAILED, &pf->state);
6518                 goto clear_recovery;
6519         }
6520         pf->pfr_count++;
6521
6522         if (test_bit(__I40E_DOWN, &pf->state))
6523                 goto clear_recovery;
6524         dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
6525
6526         /* rebuild the basics for the AdminQ, HMC, and initial HW switch */
6527         ret = i40e_init_adminq(&pf->hw);
6528         if (ret) {
6529                 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n",
6530                          i40e_stat_str(&pf->hw, ret),
6531                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6532                 goto clear_recovery;
6533         }
6534
6535         /* re-verify the eeprom if we just had an EMP reset */
6536         if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state))
6537                 i40e_verify_eeprom(pf);
6538
6539         i40e_clear_pxe_mode(hw);
6540         ret = i40e_get_capabilities(pf);
6541         if (ret)
6542                 goto end_core_reset;
6543
6544         ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
6545                                 hw->func_caps.num_rx_qp,
6546                                 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
6547         if (ret) {
6548                 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
6549                 goto end_core_reset;
6550         }
6551         ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
6552         if (ret) {
6553                 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
6554                 goto end_core_reset;
6555         }
6556
6557 #ifdef CONFIG_I40E_DCB
6558         ret = i40e_init_pf_dcb(pf);
6559         if (ret) {
6560                 dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", ret);
6561                 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
6562                 /* Continue without DCB enabled */
6563         }
6564 #endif /* CONFIG_I40E_DCB */
6565 #ifdef I40E_FCOE
6566         i40e_init_pf_fcoe(pf);
6567
6568 #endif
6569         /* do basic switch setup */
6570         ret = i40e_setup_pf_switch(pf, reinit);
6571         if (ret)
6572                 goto end_core_reset;
6573
6574         /* driver is only interested in link up/down and module qualification
6575          * reports from firmware
6576          */
6577         ret = i40e_aq_set_phy_int_mask(&pf->hw,
6578                                        I40E_AQ_EVENT_LINK_UPDOWN |
6579                                        I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL);
6580         if (ret)
6581                 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
6582                          i40e_stat_str(&pf->hw, ret),
6583                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6584
6585         /* make sure our flow control settings are restored */
6586         ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
6587         if (ret)
6588                 dev_dbg(&pf->pdev->dev, "setting flow control: ret = %s last_status = %s\n",
6589                         i40e_stat_str(&pf->hw, ret),
6590                         i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6591
6592         /* Rebuild the VSIs and VEBs that existed before reset.
6593          * They are still in our local switch element arrays, so only
6594          * need to rebuild the switch model in the HW.
6595          *
6596          * If there were VEBs but the reconstitution failed, we'll try
6597          * try to recover minimal use by getting the basic PF VSI working.
6598          */
6599         if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) {
6600                 dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
6601                 /* find the one VEB connected to the MAC, and find orphans */
6602                 for (v = 0; v < I40E_MAX_VEB; v++) {
6603                         if (!pf->veb[v])
6604                                 continue;
6605
6606                         if (pf->veb[v]->uplink_seid == pf->mac_seid ||
6607                             pf->veb[v]->uplink_seid == 0) {
6608                                 ret = i40e_reconstitute_veb(pf->veb[v]);
6609
6610                                 if (!ret)
6611                                         continue;
6612
6613                                 /* If Main VEB failed, we're in deep doodoo,
6614                                  * so give up rebuilding the switch and set up
6615                                  * for minimal rebuild of PF VSI.
6616                                  * If orphan failed, we'll report the error
6617                                  * but try to keep going.
6618                                  */
6619                                 if (pf->veb[v]->uplink_seid == pf->mac_seid) {
6620                                         dev_info(&pf->pdev->dev,
6621                                                  "rebuild of switch failed: %d, will try to set up simple PF connection\n",
6622                                                  ret);
6623                                         pf->vsi[pf->lan_vsi]->uplink_seid
6624                                                                 = pf->mac_seid;
6625                                         break;
6626                                 } else if (pf->veb[v]->uplink_seid == 0) {
6627                                         dev_info(&pf->pdev->dev,
6628                                                  "rebuild of orphan VEB failed: %d\n",
6629                                                  ret);
6630                                 }
6631                         }
6632                 }
6633         }
6634
6635         if (pf->vsi[pf->lan_vsi]->uplink_seid == pf->mac_seid) {
6636                 dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
6637                 /* no VEB, so rebuild only the Main VSI */
6638                 ret = i40e_add_vsi(pf->vsi[pf->lan_vsi]);
6639                 if (ret) {
6640                         dev_info(&pf->pdev->dev,
6641                                  "rebuild of Main VSI failed: %d\n", ret);
6642                         goto end_core_reset;
6643                 }
6644         }
6645
6646         if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
6647             (pf->hw.aq.fw_maj_ver < 4)) {
6648                 msleep(75);
6649                 ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
6650                 if (ret)
6651                         dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
6652                                  i40e_stat_str(&pf->hw, ret),
6653                                  i40e_aq_str(&pf->hw,
6654                                              pf->hw.aq.asq_last_status));
6655         }
6656         /* reinit the misc interrupt */
6657         if (pf->flags & I40E_FLAG_MSIX_ENABLED)
6658                 ret = i40e_setup_misc_vector(pf);
6659
6660         /* restart the VSIs that were rebuilt and running before the reset */
6661         i40e_pf_unquiesce_all_vsi(pf);
6662
6663         if (pf->num_alloc_vfs) {
6664                 for (v = 0; v < pf->num_alloc_vfs; v++)
6665                         i40e_reset_vf(&pf->vf[v], true);
6666         }
6667
6668         /* tell the firmware that we're starting */
6669         i40e_send_version(pf);
6670
6671 end_core_reset:
6672         clear_bit(__I40E_RESET_FAILED, &pf->state);
6673 clear_recovery:
6674         clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
6675 }
6676
6677 /**
6678  * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild
6679  * @pf: board private structure
6680  *
6681  * Close up the VFs and other things in prep for a Core Reset,
6682  * then get ready to rebuild the world.
6683  **/
6684 static void i40e_handle_reset_warning(struct i40e_pf *pf)
6685 {
6686         i40e_prep_for_reset(pf);
6687         i40e_reset_and_rebuild(pf, false);
6688 }
6689
6690 /**
6691  * i40e_handle_mdd_event
6692  * @pf: pointer to the PF structure
6693  *
6694  * Called from the MDD irq handler to identify possibly malicious vfs
6695  **/
6696 static void i40e_handle_mdd_event(struct i40e_pf *pf)
6697 {
6698         struct i40e_hw *hw = &pf->hw;
6699         bool mdd_detected = false;
6700         bool pf_mdd_detected = false;
6701         struct i40e_vf *vf;
6702         u32 reg;
6703         int i;
6704
6705         if (!test_bit(__I40E_MDD_EVENT_PENDING, &pf->state))
6706                 return;
6707
6708         /* find what triggered the MDD event */
6709         reg = rd32(hw, I40E_GL_MDET_TX);
6710         if (reg & I40E_GL_MDET_TX_VALID_MASK) {
6711                 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
6712                                 I40E_GL_MDET_TX_PF_NUM_SHIFT;
6713                 u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
6714                                 I40E_GL_MDET_TX_VF_NUM_SHIFT;
6715                 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
6716                                 I40E_GL_MDET_TX_EVENT_SHIFT;
6717                 u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
6718                                 I40E_GL_MDET_TX_QUEUE_SHIFT) -
6719                                 pf->hw.func_caps.base_queue;
6720                 if (netif_msg_tx_err(pf))
6721                         dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n",
6722                                  event, queue, pf_num, vf_num);
6723                 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
6724                 mdd_detected = true;
6725         }
6726         reg = rd32(hw, I40E_GL_MDET_RX);
6727         if (reg & I40E_GL_MDET_RX_VALID_MASK) {
6728                 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
6729                                 I40E_GL_MDET_RX_FUNCTION_SHIFT;
6730                 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
6731                                 I40E_GL_MDET_RX_EVENT_SHIFT;
6732                 u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
6733                                 I40E_GL_MDET_RX_QUEUE_SHIFT) -
6734                                 pf->hw.func_caps.base_queue;
6735                 if (netif_msg_rx_err(pf))
6736                         dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
6737                                  event, queue, func);
6738                 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
6739                 mdd_detected = true;
6740         }
6741
6742         if (mdd_detected) {
6743                 reg = rd32(hw, I40E_PF_MDET_TX);
6744                 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
6745                         wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
6746                         dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n");
6747                         pf_mdd_detected = true;
6748                 }
6749                 reg = rd32(hw, I40E_PF_MDET_RX);
6750                 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
6751                         wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
6752                         dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n");
6753                         pf_mdd_detected = true;
6754                 }
6755                 /* Queue belongs to the PF, initiate a reset */
6756                 if (pf_mdd_detected) {
6757                         set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
6758                         i40e_service_event_schedule(pf);
6759                 }
6760         }
6761
6762         /* see if one of the VFs needs its hand slapped */
6763         for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
6764                 vf = &(pf->vf[i]);
6765                 reg = rd32(hw, I40E_VP_MDET_TX(i));
6766                 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
6767                         wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
6768                         vf->num_mdd_events++;
6769                         dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
6770                                  i);
6771                 }
6772
6773                 reg = rd32(hw, I40E_VP_MDET_RX(i));
6774                 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
6775                         wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
6776                         vf->num_mdd_events++;
6777                         dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
6778                                  i);
6779                 }
6780
6781                 if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) {
6782                         dev_info(&pf->pdev->dev,
6783                                  "Too many MDD events on VF %d, disabled\n", i);
6784                         dev_info(&pf->pdev->dev,
6785                                  "Use PF Control I/F to re-enable the VF\n");
6786                         set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
6787                 }
6788         }
6789
6790         /* re-enable mdd interrupt cause */
6791         clear_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
6792         reg = rd32(hw, I40E_PFINT_ICR0_ENA);
6793         reg |=  I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
6794         wr32(hw, I40E_PFINT_ICR0_ENA, reg);
6795         i40e_flush(hw);
6796 }
6797
6798 #ifdef CONFIG_I40E_VXLAN
6799 /**
6800  * i40e_sync_vxlan_filters_subtask - Sync the VSI filter list with HW
6801  * @pf: board private structure
6802  **/
6803 static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
6804 {
6805         struct i40e_hw *hw = &pf->hw;
6806         i40e_status ret;
6807         __be16 port;
6808         int i;
6809
6810         if (!(pf->flags & I40E_FLAG_VXLAN_FILTER_SYNC))
6811                 return;
6812
6813         pf->flags &= ~I40E_FLAG_VXLAN_FILTER_SYNC;
6814
6815         for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
6816                 if (pf->pending_vxlan_bitmap & BIT_ULL(i)) {
6817                         pf->pending_vxlan_bitmap &= ~BIT_ULL(i);
6818                         port = pf->vxlan_ports[i];
6819                         if (port)
6820                                 ret = i40e_aq_add_udp_tunnel(hw, ntohs(port),
6821                                                      I40E_AQC_TUNNEL_TYPE_VXLAN,
6822                                                      NULL, NULL);
6823                         else
6824                                 ret = i40e_aq_del_udp_tunnel(hw, i, NULL);
6825
6826                         if (ret) {
6827                                 dev_info(&pf->pdev->dev,
6828                                          "%s vxlan port %d, index %d failed, err %s aq_err %s\n",
6829                                          port ? "add" : "delete",
6830                                          ntohs(port), i,
6831                                          i40e_stat_str(&pf->hw, ret),
6832                                          i40e_aq_str(&pf->hw,
6833                                                     pf->hw.aq.asq_last_status));
6834                                 pf->vxlan_ports[i] = 0;
6835                         }
6836                 }
6837         }
6838 }
6839
6840 #endif
6841 /**
6842  * i40e_service_task - Run the driver's async subtasks
6843  * @work: pointer to work_struct containing our data
6844  **/
6845 static void i40e_service_task(struct work_struct *work)
6846 {
6847         struct i40e_pf *pf = container_of(work,
6848                                           struct i40e_pf,
6849                                           service_task);
6850         unsigned long start_time = jiffies;
6851
6852         /* don't bother with service tasks if a reset is in progress */
6853         if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
6854                 i40e_service_event_complete(pf);
6855                 return;
6856         }
6857
6858         i40e_detect_recover_hung(pf);
6859         i40e_reset_subtask(pf);
6860         i40e_handle_mdd_event(pf);
6861         i40e_vc_process_vflr_event(pf);
6862         i40e_watchdog_subtask(pf);
6863         i40e_fdir_reinit_subtask(pf);
6864         i40e_sync_filters_subtask(pf);
6865 #ifdef CONFIG_I40E_VXLAN
6866         i40e_sync_vxlan_filters_subtask(pf);
6867 #endif
6868         i40e_clean_adminq_subtask(pf);
6869
6870         i40e_service_event_complete(pf);
6871
6872         /* If the tasks have taken longer than one timer cycle or there
6873          * is more work to be done, reschedule the service task now
6874          * rather than wait for the timer to tick again.
6875          */
6876         if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
6877             test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state)            ||
6878             test_bit(__I40E_MDD_EVENT_PENDING, &pf->state)               ||
6879             test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
6880                 i40e_service_event_schedule(pf);
6881 }
6882
6883 /**
6884  * i40e_service_timer - timer callback
6885  * @data: pointer to PF struct
6886  **/
6887 static void i40e_service_timer(unsigned long data)
6888 {
6889         struct i40e_pf *pf = (struct i40e_pf *)data;
6890
6891         mod_timer(&pf->service_timer,
6892                   round_jiffies(jiffies + pf->service_timer_period));
6893         i40e_service_event_schedule(pf);
6894 }
6895
6896 /**
6897  * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI
6898  * @vsi: the VSI being configured
6899  **/
6900 static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
6901 {
6902         struct i40e_pf *pf = vsi->back;
6903
6904         switch (vsi->type) {
6905         case I40E_VSI_MAIN:
6906                 vsi->alloc_queue_pairs = pf->num_lan_qps;
6907                 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
6908                                       I40E_REQ_DESCRIPTOR_MULTIPLE);
6909                 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
6910                         vsi->num_q_vectors = pf->num_lan_msix;
6911                 else
6912                         vsi->num_q_vectors = 1;
6913
6914                 break;
6915
6916         case I40E_VSI_FDIR:
6917                 vsi->alloc_queue_pairs = 1;
6918                 vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT,
6919                                       I40E_REQ_DESCRIPTOR_MULTIPLE);
6920                 vsi->num_q_vectors = 1;
6921                 break;
6922
6923         case I40E_VSI_VMDQ2:
6924                 vsi->alloc_queue_pairs = pf->num_vmdq_qps;
6925                 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
6926                                       I40E_REQ_DESCRIPTOR_MULTIPLE);
6927                 vsi->num_q_vectors = pf->num_vmdq_msix;
6928                 break;
6929
6930         case I40E_VSI_SRIOV:
6931                 vsi->alloc_queue_pairs = pf->num_vf_qps;
6932                 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
6933                                       I40E_REQ_DESCRIPTOR_MULTIPLE);
6934                 break;
6935
6936 #ifdef I40E_FCOE
6937         case I40E_VSI_FCOE:
6938                 vsi->alloc_queue_pairs = pf->num_fcoe_qps;
6939                 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
6940                                       I40E_REQ_DESCRIPTOR_MULTIPLE);
6941                 vsi->num_q_vectors = pf->num_fcoe_msix;
6942                 break;
6943
6944 #endif /* I40E_FCOE */
6945         default:
6946                 WARN_ON(1);
6947                 return -ENODATA;
6948         }
6949
6950         return 0;
6951 }
6952
6953 /**
6954  * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
6955  * @type: VSI pointer
6956  * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
6957  *
6958  * On error: returns error code (negative)
6959  * On success: returns 0
6960  **/
6961 static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
6962 {
6963         int size;
6964         int ret = 0;
6965
6966         /* allocate memory for both Tx and Rx ring pointers */
6967         size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2;
6968         vsi->tx_rings = kzalloc(size, GFP_KERNEL);
6969         if (!vsi->tx_rings)
6970                 return -ENOMEM;
6971         vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs];
6972
6973         if (alloc_qvectors) {
6974                 /* allocate memory for q_vector pointers */
6975                 size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors;
6976                 vsi->q_vectors = kzalloc(size, GFP_KERNEL);
6977                 if (!vsi->q_vectors) {
6978                         ret = -ENOMEM;
6979                         goto err_vectors;
6980                 }
6981         }
6982         return ret;
6983
6984 err_vectors:
6985         kfree(vsi->tx_rings);
6986         return ret;
6987 }
6988
6989 /**
6990  * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
6991  * @pf: board private structure
6992  * @type: type of VSI
6993  *
6994  * On error: returns error code (negative)
6995  * On success: returns vsi index in PF (positive)
6996  **/
6997 static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
6998 {
6999         int ret = -ENODEV;
7000         struct i40e_vsi *vsi;
7001         int vsi_idx;
7002         int i;
7003
7004         /* Need to protect the allocation of the VSIs at the PF level */
7005         mutex_lock(&pf->switch_mutex);
7006
7007         /* VSI list may be fragmented if VSI creation/destruction has
7008          * been happening.  We can afford to do a quick scan to look
7009          * for any free VSIs in the list.
7010          *
7011          * find next empty vsi slot, looping back around if necessary
7012          */
7013         i = pf->next_vsi;
7014         while (i < pf->num_alloc_vsi && pf->vsi[i])
7015                 i++;
7016         if (i >= pf->num_alloc_vsi) {
7017                 i = 0;
7018                 while (i < pf->next_vsi && pf->vsi[i])
7019                         i++;
7020         }
7021
7022         if (i < pf->num_alloc_vsi && !pf->vsi[i]) {
7023                 vsi_idx = i;             /* Found one! */
7024         } else {
7025                 ret = -ENODEV;
7026                 goto unlock_pf;  /* out of VSI slots! */
7027         }
7028         pf->next_vsi = ++i;
7029
7030         vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
7031         if (!vsi) {
7032                 ret = -ENOMEM;
7033                 goto unlock_pf;
7034         }
7035         vsi->type = type;
7036         vsi->back = pf;
7037         set_bit(__I40E_DOWN, &vsi->state);
7038         vsi->flags = 0;
7039         vsi->idx = vsi_idx;
7040         vsi->rx_itr_setting = pf->rx_itr_default;
7041         vsi->tx_itr_setting = pf->tx_itr_default;
7042         vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ?
7043                                 pf->rss_table_size : 64;
7044         vsi->netdev_registered = false;
7045         vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
7046         INIT_LIST_HEAD(&vsi->mac_filter_list);
7047         vsi->irqs_ready = false;
7048
7049         ret = i40e_set_num_rings_in_vsi(vsi);
7050         if (ret)
7051                 goto err_rings;
7052
7053         ret = i40e_vsi_alloc_arrays(vsi, true);
7054         if (ret)
7055                 goto err_rings;
7056
7057         /* Setup default MSIX irq handler for VSI */
7058         i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
7059
7060         pf->vsi[vsi_idx] = vsi;
7061         ret = vsi_idx;
7062         goto unlock_pf;
7063
7064 err_rings:
7065         pf->next_vsi = i - 1;
7066         kfree(vsi);
7067 unlock_pf:
7068         mutex_unlock(&pf->switch_mutex);
7069         return ret;
7070 }
7071
7072 /**
7073  * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
7074  * @type: VSI pointer
7075  * @free_qvectors: a bool to specify if q_vectors need to be freed.
7076  *
7077  * On error: returns error code (negative)
7078  * On success: returns 0
7079  **/
7080 static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
7081 {
7082         /* free the ring and vector containers */
7083         if (free_qvectors) {
7084                 kfree(vsi->q_vectors);
7085                 vsi->q_vectors = NULL;
7086         }
7087         kfree(vsi->tx_rings);
7088         vsi->tx_rings = NULL;
7089         vsi->rx_rings = NULL;
7090 }
7091
7092 /**
7093  * i40e_vsi_clear - Deallocate the VSI provided
7094  * @vsi: the VSI being un-configured
7095  **/
7096 static int i40e_vsi_clear(struct i40e_vsi *vsi)
7097 {
7098         struct i40e_pf *pf;
7099
7100         if (!vsi)
7101                 return 0;
7102
7103         if (!vsi->back)
7104                 goto free_vsi;
7105         pf = vsi->back;
7106
7107         mutex_lock(&pf->switch_mutex);
7108         if (!pf->vsi[vsi->idx]) {
7109                 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](%p,type %d)\n",
7110                         vsi->idx, vsi->idx, vsi, vsi->type);
7111                 goto unlock_vsi;
7112         }
7113
7114         if (pf->vsi[vsi->idx] != vsi) {
7115                 dev_err(&pf->pdev->dev,
7116                         "pf->vsi[%d](%p, type %d) != vsi[%d](%p,type %d): no free!\n",
7117                         pf->vsi[vsi->idx]->idx,
7118                         pf->vsi[vsi->idx],
7119                         pf->vsi[vsi->idx]->type,
7120                         vsi->idx, vsi, vsi->type);
7121                 goto unlock_vsi;
7122         }
7123
7124         /* updates the PF for this cleared vsi */
7125         i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
7126         i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
7127
7128         i40e_vsi_free_arrays(vsi, true);
7129
7130         pf->vsi[vsi->idx] = NULL;
7131         if (vsi->idx < pf->next_vsi)
7132                 pf->next_vsi = vsi->idx;
7133
7134 unlock_vsi:
7135         mutex_unlock(&pf->switch_mutex);
7136 free_vsi:
7137         kfree(vsi);
7138
7139         return 0;
7140 }
7141
7142 /**
7143  * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
7144  * @vsi: the VSI being cleaned
7145  **/
7146 static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
7147 {
7148         int i;
7149
7150         if (vsi->tx_rings && vsi->tx_rings[0]) {
7151                 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
7152                         kfree_rcu(vsi->tx_rings[i], rcu);
7153                         vsi->tx_rings[i] = NULL;
7154                         vsi->rx_rings[i] = NULL;
7155                 }
7156         }
7157 }
7158
7159 /**
7160  * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
7161  * @vsi: the VSI being configured
7162  **/
7163 static int i40e_alloc_rings(struct i40e_vsi *vsi)
7164 {
7165         struct i40e_ring *tx_ring, *rx_ring;
7166         struct i40e_pf *pf = vsi->back;
7167         int i;
7168
7169         /* Set basic values in the rings to be used later during open() */
7170         for (i = 0; i < vsi->alloc_queue_pairs; i++) {
7171                 /* allocate space for both Tx and Rx in one shot */
7172                 tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL);
7173                 if (!tx_ring)
7174                         goto err_out;
7175
7176                 tx_ring->queue_index = i;
7177                 tx_ring->reg_idx = vsi->base_queue + i;
7178                 tx_ring->ring_active = false;
7179                 tx_ring->vsi = vsi;
7180                 tx_ring->netdev = vsi->netdev;
7181                 tx_ring->dev = &pf->pdev->dev;
7182                 tx_ring->count = vsi->num_desc;
7183                 tx_ring->size = 0;
7184                 tx_ring->dcb_tc = 0;
7185                 if (vsi->back->flags & I40E_FLAG_WB_ON_ITR_CAPABLE)
7186                         tx_ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
7187                 if (vsi->back->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE)
7188                         tx_ring->flags |= I40E_TXR_FLAGS_OUTER_UDP_CSUM;
7189                 vsi->tx_rings[i] = tx_ring;
7190
7191                 rx_ring = &tx_ring[1];
7192                 rx_ring->queue_index = i;
7193                 rx_ring->reg_idx = vsi->base_queue + i;
7194                 rx_ring->ring_active = false;
7195                 rx_ring->vsi = vsi;
7196                 rx_ring->netdev = vsi->netdev;
7197                 rx_ring->dev = &pf->pdev->dev;
7198                 rx_ring->count = vsi->num_desc;
7199                 rx_ring->size = 0;
7200                 rx_ring->dcb_tc = 0;
7201                 if (pf->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED)
7202                         set_ring_16byte_desc_enabled(rx_ring);
7203                 else
7204                         clear_ring_16byte_desc_enabled(rx_ring);
7205                 vsi->rx_rings[i] = rx_ring;
7206         }
7207
7208         return 0;
7209
7210 err_out:
7211         i40e_vsi_clear_rings(vsi);
7212         return -ENOMEM;
7213 }
7214
7215 /**
7216  * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
7217  * @pf: board private structure
7218  * @vectors: the number of MSI-X vectors to request
7219  *
7220  * Returns the number of vectors reserved, or error
7221  **/
7222 static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
7223 {
7224         vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries,
7225                                         I40E_MIN_MSIX, vectors);
7226         if (vectors < 0) {
7227                 dev_info(&pf->pdev->dev,
7228                          "MSI-X vector reservation failed: %d\n", vectors);
7229                 vectors = 0;
7230         }
7231
7232         return vectors;
7233 }
7234
7235 /**
7236  * i40e_init_msix - Setup the MSIX capability
7237  * @pf: board private structure
7238  *
7239  * Work with the OS to set up the MSIX vectors needed.
7240  *
7241  * Returns the number of vectors reserved or negative on failure
7242  **/
7243 static int i40e_init_msix(struct i40e_pf *pf)
7244 {
7245         struct i40e_hw *hw = &pf->hw;
7246         int vectors_left;
7247         int v_budget, i;
7248         int v_actual;
7249
7250         if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
7251                 return -ENODEV;
7252
7253         /* The number of vectors we'll request will be comprised of:
7254          *   - Add 1 for "other" cause for Admin Queue events, etc.
7255          *   - The number of LAN queue pairs
7256          *      - Queues being used for RSS.
7257          *              We don't need as many as max_rss_size vectors.
7258          *              use rss_size instead in the calculation since that
7259          *              is governed by number of cpus in the system.
7260          *      - assumes symmetric Tx/Rx pairing
7261          *   - The number of VMDq pairs
7262 #ifdef I40E_FCOE
7263          *   - The number of FCOE qps.
7264 #endif
7265          * Once we count this up, try the request.
7266          *
7267          * If we can't get what we want, we'll simplify to nearly nothing
7268          * and try again.  If that still fails, we punt.
7269          */
7270         vectors_left = hw->func_caps.num_msix_vectors;
7271         v_budget = 0;
7272
7273         /* reserve one vector for miscellaneous handler */
7274         if (vectors_left) {
7275                 v_budget++;
7276                 vectors_left--;
7277         }
7278
7279         /* reserve vectors for the main PF traffic queues */
7280         pf->num_lan_msix = min_t(int, num_online_cpus(), vectors_left);
7281         vectors_left -= pf->num_lan_msix;
7282         v_budget += pf->num_lan_msix;
7283
7284         /* reserve one vector for sideband flow director */
7285         if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
7286                 if (vectors_left) {
7287                         v_budget++;
7288                         vectors_left--;
7289                 } else {
7290                         pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
7291                 }
7292         }
7293
7294 #ifdef I40E_FCOE
7295         /* can we reserve enough for FCoE? */
7296         if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
7297                 if (!vectors_left)
7298                         pf->num_fcoe_msix = 0;
7299                 else if (vectors_left >= pf->num_fcoe_qps)
7300                         pf->num_fcoe_msix = pf->num_fcoe_qps;
7301                 else
7302                         pf->num_fcoe_msix = 1;
7303                 v_budget += pf->num_fcoe_msix;
7304                 vectors_left -= pf->num_fcoe_msix;
7305         }
7306
7307 #endif
7308         /* any vectors left over go for VMDq support */
7309         if (pf->flags & I40E_FLAG_VMDQ_ENABLED) {
7310                 int vmdq_vecs_wanted = pf->num_vmdq_vsis * pf->num_vmdq_qps;
7311                 int vmdq_vecs = min_t(int, vectors_left, vmdq_vecs_wanted);
7312
7313                 /* if we're short on vectors for what's desired, we limit
7314                  * the queues per vmdq.  If this is still more than are
7315                  * available, the user will need to change the number of
7316                  * queues/vectors used by the PF later with the ethtool
7317                  * channels command
7318                  */
7319                 if (vmdq_vecs < vmdq_vecs_wanted)
7320                         pf->num_vmdq_qps = 1;
7321                 pf->num_vmdq_msix = pf->num_vmdq_qps;
7322
7323                 v_budget += vmdq_vecs;
7324                 vectors_left -= vmdq_vecs;
7325         }
7326
7327         pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
7328                                    GFP_KERNEL);
7329         if (!pf->msix_entries)
7330                 return -ENOMEM;
7331
7332         for (i = 0; i < v_budget; i++)
7333                 pf->msix_entries[i].entry = i;
7334         v_actual = i40e_reserve_msix_vectors(pf, v_budget);
7335
7336         if (v_actual != v_budget) {
7337                 /* If we have limited resources, we will start with no vectors
7338                  * for the special features and then allocate vectors to some
7339                  * of these features based on the policy and at the end disable
7340                  * the features that did not get any vectors.
7341                  */
7342 #ifdef I40E_FCOE
7343                 pf->num_fcoe_qps = 0;
7344                 pf->num_fcoe_msix = 0;
7345 #endif
7346                 pf->num_vmdq_msix = 0;
7347         }
7348
7349         if (v_actual < I40E_MIN_MSIX) {
7350                 pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
7351                 kfree(pf->msix_entries);
7352                 pf->msix_entries = NULL;
7353                 return -ENODEV;
7354
7355         } else if (v_actual == I40E_MIN_MSIX) {
7356                 /* Adjust for minimal MSIX use */
7357                 pf->num_vmdq_vsis = 0;
7358                 pf->num_vmdq_qps = 0;
7359                 pf->num_lan_qps = 1;
7360                 pf->num_lan_msix = 1;
7361
7362         } else if (v_actual != v_budget) {
7363                 int vec;
7364
7365                 /* reserve the misc vector */
7366                 vec = v_actual - 1;
7367
7368                 /* Scale vector usage down */
7369                 pf->num_vmdq_msix = 1;    /* force VMDqs to only one vector */
7370                 pf->num_vmdq_vsis = 1;
7371                 pf->num_vmdq_qps = 1;
7372                 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
7373
7374                 /* partition out the remaining vectors */
7375                 switch (vec) {
7376                 case 2:
7377                         pf->num_lan_msix = 1;
7378                         break;
7379                 case 3:
7380 #ifdef I40E_FCOE
7381                         /* give one vector to FCoE */
7382                         if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
7383                                 pf->num_lan_msix = 1;
7384                                 pf->num_fcoe_msix = 1;
7385                         }
7386 #else
7387                         pf->num_lan_msix = 2;
7388 #endif
7389                         break;
7390                 default:
7391 #ifdef I40E_FCOE
7392                         /* give one vector to FCoE */
7393                         if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
7394                                 pf->num_fcoe_msix = 1;
7395                                 vec--;
7396                         }
7397 #endif
7398                         /* give the rest to the PF */
7399                         pf->num_lan_msix = min_t(int, vec, pf->num_lan_qps);
7400                         break;
7401                 }
7402         }
7403
7404         if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
7405             (pf->num_vmdq_msix == 0)) {
7406                 dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
7407                 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
7408         }
7409 #ifdef I40E_FCOE
7410
7411         if ((pf->flags & I40E_FLAG_FCOE_ENABLED) && (pf->num_fcoe_msix == 0)) {
7412                 dev_info(&pf->pdev->dev, "FCOE disabled, not enough MSI-X vectors\n");
7413                 pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
7414         }
7415 #endif
7416         return v_actual;
7417 }
7418
7419 /**
7420  * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
7421  * @vsi: the VSI being configured
7422  * @v_idx: index of the vector in the vsi struct
7423  *
7424  * We allocate one q_vector.  If allocation fails we return -ENOMEM.
7425  **/
7426 static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
7427 {
7428         struct i40e_q_vector *q_vector;
7429
7430         /* allocate q_vector */
7431         q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
7432         if (!q_vector)
7433                 return -ENOMEM;
7434
7435         q_vector->vsi = vsi;
7436         q_vector->v_idx = v_idx;
7437         cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
7438         if (vsi->netdev)
7439                 netif_napi_add(vsi->netdev, &q_vector->napi,
7440                                i40e_napi_poll, NAPI_POLL_WEIGHT);
7441
7442         q_vector->rx.latency_range = I40E_LOW_LATENCY;
7443         q_vector->tx.latency_range = I40E_LOW_LATENCY;
7444
7445         /* tie q_vector and vsi together */
7446         vsi->q_vectors[v_idx] = q_vector;
7447
7448         return 0;
7449 }
7450
7451 /**
7452  * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
7453  * @vsi: the VSI being configured
7454  *
7455  * We allocate one q_vector per queue interrupt.  If allocation fails we
7456  * return -ENOMEM.
7457  **/
7458 static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
7459 {
7460         struct i40e_pf *pf = vsi->back;
7461         int v_idx, num_q_vectors;
7462         int err;
7463
7464         /* if not MSIX, give the one vector only to the LAN VSI */
7465         if (pf->flags & I40E_FLAG_MSIX_ENABLED)
7466                 num_q_vectors = vsi->num_q_vectors;
7467         else if (vsi == pf->vsi[pf->lan_vsi])
7468                 num_q_vectors = 1;
7469         else
7470                 return -EINVAL;
7471
7472         for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
7473                 err = i40e_vsi_alloc_q_vector(vsi, v_idx);
7474                 if (err)
7475                         goto err_out;
7476         }
7477
7478         return 0;
7479
7480 err_out:
7481         while (v_idx--)
7482                 i40e_free_q_vector(vsi, v_idx);
7483
7484         return err;
7485 }
7486
7487 /**
7488  * i40e_init_interrupt_scheme - Determine proper interrupt scheme
7489  * @pf: board private structure to initialize
7490  **/
7491 static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
7492 {
7493         int vectors = 0;
7494         ssize_t size;
7495
7496         if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
7497                 vectors = i40e_init_msix(pf);
7498                 if (vectors < 0) {
7499                         pf->flags &= ~(I40E_FLAG_MSIX_ENABLED   |
7500 #ifdef I40E_FCOE
7501                                        I40E_FLAG_FCOE_ENABLED   |
7502 #endif
7503                                        I40E_FLAG_RSS_ENABLED    |
7504                                        I40E_FLAG_DCB_CAPABLE    |
7505                                        I40E_FLAG_SRIOV_ENABLED  |
7506                                        I40E_FLAG_FD_SB_ENABLED  |
7507                                        I40E_FLAG_FD_ATR_ENABLED |
7508                                        I40E_FLAG_VMDQ_ENABLED);
7509
7510                         /* rework the queue expectations without MSIX */
7511                         i40e_determine_queue_usage(pf);
7512                 }
7513         }
7514
7515         if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
7516             (pf->flags & I40E_FLAG_MSI_ENABLED)) {
7517                 dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
7518                 vectors = pci_enable_msi(pf->pdev);
7519                 if (vectors < 0) {
7520                         dev_info(&pf->pdev->dev, "MSI init failed - %d\n",
7521                                  vectors);
7522                         pf->flags &= ~I40E_FLAG_MSI_ENABLED;
7523                 }
7524                 vectors = 1;  /* one MSI or Legacy vector */
7525         }
7526
7527         if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
7528                 dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
7529
7530         /* set up vector assignment tracking */
7531         size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors);
7532         pf->irq_pile = kzalloc(size, GFP_KERNEL);
7533         if (!pf->irq_pile) {
7534                 dev_err(&pf->pdev->dev, "error allocating irq_pile memory\n");
7535                 return -ENOMEM;
7536         }
7537         pf->irq_pile->num_entries = vectors;
7538         pf->irq_pile->search_hint = 0;
7539
7540         /* track first vector for misc interrupts, ignore return */
7541         (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1);
7542
7543         return 0;
7544 }
7545
7546 /**
7547  * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
7548  * @pf: board private structure
7549  *
7550  * This sets up the handler for MSIX 0, which is used to manage the
7551  * non-queue interrupts, e.g. AdminQ and errors.  This is not used
7552  * when in MSI or Legacy interrupt mode.
7553  **/
7554 static int i40e_setup_misc_vector(struct i40e_pf *pf)
7555 {
7556         struct i40e_hw *hw = &pf->hw;
7557         int err = 0;
7558
7559         /* Only request the irq if this is the first time through, and
7560          * not when we're rebuilding after a Reset
7561          */
7562         if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
7563                 err = request_irq(pf->msix_entries[0].vector,
7564                                   i40e_intr, 0, pf->int_name, pf);
7565                 if (err) {
7566                         dev_info(&pf->pdev->dev,
7567                                  "request_irq for %s failed: %d\n",
7568                                  pf->int_name, err);
7569                         return -EFAULT;
7570                 }
7571         }
7572
7573         i40e_enable_misc_int_causes(pf);
7574
7575         /* associate no queues to the misc vector */
7576         wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
7577         wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K);
7578
7579         i40e_flush(hw);
7580
7581         i40e_irq_dynamic_enable_icr0(pf);
7582
7583         return err;
7584 }
7585
7586 /**
7587  * i40e_config_rss_aq - Prepare for RSS using AQ commands
7588  * @vsi: vsi structure
7589  * @seed: RSS hash seed
7590  **/
7591 static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed)
7592 {
7593         struct i40e_aqc_get_set_rss_key_data rss_key;
7594         struct i40e_pf *pf = vsi->back;
7595         struct i40e_hw *hw = &pf->hw;
7596         bool pf_lut = false;
7597         u8 *rss_lut;
7598         int ret, i;
7599
7600         memset(&rss_key, 0, sizeof(rss_key));
7601         memcpy(&rss_key, seed, sizeof(rss_key));
7602
7603         rss_lut = kzalloc(pf->rss_table_size, GFP_KERNEL);
7604         if (!rss_lut)
7605                 return -ENOMEM;
7606
7607         /* Populate the LUT with max no. of queues in round robin fashion */
7608         for (i = 0; i < vsi->rss_table_size; i++)
7609                 rss_lut[i] = i % vsi->rss_size;
7610
7611         ret = i40e_aq_set_rss_key(hw, vsi->id, &rss_key);
7612         if (ret) {
7613                 dev_info(&pf->pdev->dev,
7614                          "Cannot set RSS key, err %s aq_err %s\n",
7615                          i40e_stat_str(&pf->hw, ret),
7616                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7617                 goto config_rss_aq_out;
7618         }
7619
7620         if (vsi->type == I40E_VSI_MAIN)
7621                 pf_lut = true;
7622
7623         ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, rss_lut,
7624                                   vsi->rss_table_size);
7625         if (ret)
7626                 dev_info(&pf->pdev->dev,
7627                          "Cannot set RSS lut, err %s aq_err %s\n",
7628                          i40e_stat_str(&pf->hw, ret),
7629                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7630
7631 config_rss_aq_out:
7632         kfree(rss_lut);
7633         return ret;
7634 }
7635
7636 /**
7637  * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used
7638  * @vsi: VSI structure
7639  **/
7640 static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
7641 {
7642         u8 seed[I40E_HKEY_ARRAY_SIZE];
7643         struct i40e_pf *pf = vsi->back;
7644
7645         netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
7646         vsi->rss_size = min_t(int, pf->rss_size, vsi->num_queue_pairs);
7647
7648         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE)
7649                 return i40e_config_rss_aq(vsi, seed);
7650
7651         return 0;
7652 }
7653
7654 /**
7655  * i40e_config_rss_reg - Prepare for RSS if used
7656  * @pf: board private structure
7657  * @seed: RSS hash seed
7658  **/
7659 static int i40e_config_rss_reg(struct i40e_pf *pf, const u8 *seed)
7660 {
7661         struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
7662         struct i40e_hw *hw = &pf->hw;
7663         u32 *seed_dw = (u32 *)seed;
7664         u32 current_queue = 0;
7665         u32 lut = 0;
7666         int i, j;
7667
7668         /* Fill out hash function seed */
7669         for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
7670                 wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
7671
7672         for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) {
7673                 lut = 0;
7674                 for (j = 0; j < 4; j++) {
7675                         if (current_queue == vsi->rss_size)
7676                                 current_queue = 0;
7677                         lut |= ((current_queue) << (8 * j));
7678                         current_queue++;
7679                 }
7680                 wr32(&pf->hw, I40E_PFQF_HLUT(i), lut);
7681         }
7682         i40e_flush(hw);
7683
7684         return 0;
7685 }
7686
7687 /**
7688  * i40e_config_rss - Prepare for RSS if used
7689  * @pf: board private structure
7690  **/
7691 static int i40e_config_rss(struct i40e_pf *pf)
7692 {
7693         struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
7694         u8 seed[I40E_HKEY_ARRAY_SIZE];
7695         struct i40e_hw *hw = &pf->hw;
7696         u32 reg_val;
7697         u64 hena;
7698
7699         netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
7700
7701         /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
7702         hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
7703                 ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
7704         hena |= i40e_pf_get_default_rss_hena(pf);
7705
7706         wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
7707         wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
7708
7709         vsi->rss_size = min_t(int, pf->rss_size, vsi->num_queue_pairs);
7710
7711         /* Determine the RSS table size based on the hardware capabilities */
7712         reg_val = rd32(hw, I40E_PFQF_CTL_0);
7713         reg_val = (pf->rss_table_size == 512) ?
7714                         (reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) :
7715                         (reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512);
7716         wr32(hw, I40E_PFQF_CTL_0, reg_val);
7717
7718         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE)
7719                 return i40e_config_rss_aq(pf->vsi[pf->lan_vsi], seed);
7720         else
7721                 return i40e_config_rss_reg(pf, seed);
7722 }
7723
7724 /**
7725  * i40e_reconfig_rss_queues - change number of queues for rss and rebuild
7726  * @pf: board private structure
7727  * @queue_count: the requested queue count for rss.
7728  *
7729  * returns 0 if rss is not enabled, if enabled returns the final rss queue
7730  * count which may be different from the requested queue count.
7731  **/
7732 int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
7733 {
7734         struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
7735         int new_rss_size;
7736
7737         if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
7738                 return 0;
7739
7740         new_rss_size = min_t(int, queue_count, pf->rss_size_max);
7741
7742         if (queue_count != vsi->num_queue_pairs) {
7743                 vsi->req_queue_pairs = queue_count;
7744                 i40e_prep_for_reset(pf);
7745
7746                 pf->rss_size = new_rss_size;
7747
7748                 i40e_reset_and_rebuild(pf, true);
7749                 i40e_config_rss(pf);
7750         }
7751         dev_info(&pf->pdev->dev, "RSS count:  %d\n", pf->rss_size);
7752         return pf->rss_size;
7753 }
7754
7755 /**
7756  * i40e_get_npar_bw_setting - Retrieve BW settings for this PF partition
7757  * @pf: board private structure
7758  **/
7759 i40e_status i40e_get_npar_bw_setting(struct i40e_pf *pf)
7760 {
7761         i40e_status status;
7762         bool min_valid, max_valid;
7763         u32 max_bw, min_bw;
7764
7765         status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw,
7766                                            &min_valid, &max_valid);
7767
7768         if (!status) {
7769                 if (min_valid)
7770                         pf->npar_min_bw = min_bw;
7771                 if (max_valid)
7772                         pf->npar_max_bw = max_bw;
7773         }
7774
7775         return status;
7776 }
7777
7778 /**
7779  * i40e_set_npar_bw_setting - Set BW settings for this PF partition
7780  * @pf: board private structure
7781  **/
7782 i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf)
7783 {
7784         struct i40e_aqc_configure_partition_bw_data bw_data;
7785         i40e_status status;
7786
7787         /* Set the valid bit for this PF */
7788         bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
7789         bw_data.max_bw[pf->hw.pf_id] = pf->npar_max_bw & I40E_ALT_BW_VALUE_MASK;
7790         bw_data.min_bw[pf->hw.pf_id] = pf->npar_min_bw & I40E_ALT_BW_VALUE_MASK;
7791
7792         /* Set the new bandwidths */
7793         status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL);
7794
7795         return status;
7796 }
7797
7798 /**
7799  * i40e_commit_npar_bw_setting - Commit BW settings for this PF partition
7800  * @pf: board private structure
7801  **/
7802 i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf)
7803 {
7804         /* Commit temporary BW setting to permanent NVM image */
7805         enum i40e_admin_queue_err last_aq_status;
7806         i40e_status ret;
7807         u16 nvm_word;
7808
7809         if (pf->hw.partition_id != 1) {
7810                 dev_info(&pf->pdev->dev,
7811                          "Commit BW only works on partition 1! This is partition %d",
7812                          pf->hw.partition_id);
7813                 ret = I40E_NOT_SUPPORTED;
7814                 goto bw_commit_out;
7815         }
7816
7817         /* Acquire NVM for read access */
7818         ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
7819         last_aq_status = pf->hw.aq.asq_last_status;
7820         if (ret) {
7821                 dev_info(&pf->pdev->dev,
7822                          "Cannot acquire NVM for read access, err %s aq_err %s\n",
7823                          i40e_stat_str(&pf->hw, ret),
7824                          i40e_aq_str(&pf->hw, last_aq_status));
7825                 goto bw_commit_out;
7826         }
7827
7828         /* Read word 0x10 of NVM - SW compatibility word 1 */
7829         ret = i40e_aq_read_nvm(&pf->hw,
7830                                I40E_SR_NVM_CONTROL_WORD,
7831                                0x10, sizeof(nvm_word), &nvm_word,
7832                                false, NULL);
7833         /* Save off last admin queue command status before releasing
7834          * the NVM
7835          */
7836         last_aq_status = pf->hw.aq.asq_last_status;
7837         i40e_release_nvm(&pf->hw);
7838         if (ret) {
7839                 dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n",
7840                          i40e_stat_str(&pf->hw, ret),
7841                          i40e_aq_str(&pf->hw, last_aq_status));
7842                 goto bw_commit_out;
7843         }
7844
7845         /* Wait a bit for NVM release to complete */
7846         msleep(50);
7847
7848         /* Acquire NVM for write access */
7849         ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE);
7850         last_aq_status = pf->hw.aq.asq_last_status;
7851         if (ret) {
7852                 dev_info(&pf->pdev->dev,
7853                          "Cannot acquire NVM for write access, err %s aq_err %s\n",
7854                          i40e_stat_str(&pf->hw, ret),
7855                          i40e_aq_str(&pf->hw, last_aq_status));
7856                 goto bw_commit_out;
7857         }
7858         /* Write it back out unchanged to initiate update NVM,
7859          * which will force a write of the shadow (alt) RAM to
7860          * the NVM - thus storing the bandwidth values permanently.
7861          */
7862         ret = i40e_aq_update_nvm(&pf->hw,
7863                                  I40E_SR_NVM_CONTROL_WORD,
7864                                  0x10, sizeof(nvm_word),
7865                                  &nvm_word, true, NULL);
7866         /* Save off last admin queue command status before releasing
7867          * the NVM
7868          */
7869         last_aq_status = pf->hw.aq.asq_last_status;
7870         i40e_release_nvm(&pf->hw);
7871         if (ret)
7872                 dev_info(&pf->pdev->dev,
7873                          "BW settings NOT SAVED, err %s aq_err %s\n",
7874                          i40e_stat_str(&pf->hw, ret),
7875                          i40e_aq_str(&pf->hw, last_aq_status));
7876 bw_commit_out:
7877
7878         return ret;
7879 }
7880
7881 /**
7882  * i40e_sw_init - Initialize general software structures (struct i40e_pf)
7883  * @pf: board private structure to initialize
7884  *
7885  * i40e_sw_init initializes the Adapter private data structure.
7886  * Fields are initialized based on PCI device information and
7887  * OS network device settings (MTU size).
7888  **/
7889 static int i40e_sw_init(struct i40e_pf *pf)
7890 {
7891         int err = 0;
7892         int size;
7893
7894         pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE,
7895                                 (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK));
7896         pf->hw.debug_mask = pf->msg_enable | I40E_DEBUG_DIAG;
7897         if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) {
7898                 if (I40E_DEBUG_USER & debug)
7899                         pf->hw.debug_mask = debug;
7900                 pf->msg_enable = netif_msg_init((debug & ~I40E_DEBUG_USER),
7901                                                 I40E_DEFAULT_MSG_ENABLE);
7902         }
7903
7904         /* Set default capability flags */
7905         pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
7906                     I40E_FLAG_MSI_ENABLED     |
7907                     I40E_FLAG_LINK_POLLING_ENABLED |
7908                     I40E_FLAG_MSIX_ENABLED;
7909
7910         if (iommu_present(&pci_bus_type))
7911                 pf->flags |= I40E_FLAG_RX_PS_ENABLED;
7912         else
7913                 pf->flags |= I40E_FLAG_RX_1BUF_ENABLED;
7914
7915         /* Set default ITR */
7916         pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF;
7917         pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF;
7918
7919         /* Depending on PF configurations, it is possible that the RSS
7920          * maximum might end up larger than the available queues
7921          */
7922         pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width);
7923         pf->rss_size = 1;
7924         pf->rss_table_size = pf->hw.func_caps.rss_table_size;
7925         pf->rss_size_max = min_t(int, pf->rss_size_max,
7926                                  pf->hw.func_caps.num_tx_qp);
7927         if (pf->hw.func_caps.rss) {
7928                 pf->flags |= I40E_FLAG_RSS_ENABLED;
7929                 pf->rss_size = min_t(int, pf->rss_size_max, num_online_cpus());
7930         }
7931
7932         /* MFP mode enabled */
7933         if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) {
7934                 pf->flags |= I40E_FLAG_MFP_ENABLED;
7935                 dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
7936                 if (i40e_get_npar_bw_setting(pf))
7937                         dev_warn(&pf->pdev->dev,
7938                                  "Could not get NPAR bw settings\n");
7939                 else
7940                         dev_info(&pf->pdev->dev,
7941                                  "Min BW = %8.8x, Max BW = %8.8x\n",
7942                                  pf->npar_min_bw, pf->npar_max_bw);
7943         }
7944
7945         /* FW/NVM is not yet fixed in this regard */
7946         if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
7947             (pf->hw.func_caps.fd_filters_best_effort > 0)) {
7948                 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
7949                 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
7950                 if (pf->flags & I40E_FLAG_MFP_ENABLED &&
7951                     pf->hw.num_partitions > 1)
7952                         dev_info(&pf->pdev->dev,
7953                                  "Flow Director Sideband mode Disabled in MFP mode\n");
7954                 else
7955                         pf->flags |= I40E_FLAG_FD_SB_ENABLED;
7956                 pf->fdir_pf_filter_count =
7957                                  pf->hw.func_caps.fd_filters_guaranteed;
7958                 pf->hw.fdir_shared_filter_count =
7959                                  pf->hw.func_caps.fd_filters_best_effort;
7960         }
7961
7962         if (pf->hw.func_caps.vmdq) {
7963                 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
7964                 pf->flags |= I40E_FLAG_VMDQ_ENABLED;
7965         }
7966
7967 #ifdef I40E_FCOE
7968         i40e_init_pf_fcoe(pf);
7969
7970 #endif /* I40E_FCOE */
7971 #ifdef CONFIG_PCI_IOV
7972         if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
7973                 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
7974                 pf->flags |= I40E_FLAG_SRIOV_ENABLED;
7975                 pf->num_req_vfs = min_t(int,
7976                                         pf->hw.func_caps.num_vfs,
7977                                         I40E_MAX_VF_COUNT);
7978         }
7979 #endif /* CONFIG_PCI_IOV */
7980         if (pf->hw.mac.type == I40E_MAC_X722) {
7981                 pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE |
7982                              I40E_FLAG_128_QP_RSS_CAPABLE |
7983                              I40E_FLAG_HW_ATR_EVICT_CAPABLE |
7984                              I40E_FLAG_OUTER_UDP_CSUM_CAPABLE |
7985                              I40E_FLAG_WB_ON_ITR_CAPABLE |
7986                              I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE;
7987         }
7988         pf->eeprom_version = 0xDEAD;
7989         pf->lan_veb = I40E_NO_VEB;
7990         pf->lan_vsi = I40E_NO_VSI;
7991
7992         /* By default FW has this off for performance reasons */
7993         pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED;
7994
7995         /* set up queue assignment tracking */
7996         size = sizeof(struct i40e_lump_tracking)
7997                 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
7998         pf->qp_pile = kzalloc(size, GFP_KERNEL);
7999         if (!pf->qp_pile) {
8000                 err = -ENOMEM;
8001                 goto sw_init_done;
8002         }
8003         pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
8004         pf->qp_pile->search_hint = 0;
8005
8006         pf->tx_timeout_recovery_level = 1;
8007
8008         mutex_init(&pf->switch_mutex);
8009
8010         /* If NPAR is enabled nudge the Tx scheduler */
8011         if (pf->hw.func_caps.npar_enable && (!i40e_get_npar_bw_setting(pf)))
8012                 i40e_set_npar_bw_setting(pf);
8013
8014 sw_init_done:
8015         return err;
8016 }
8017
8018 /**
8019  * i40e_set_ntuple - set the ntuple feature flag and take action
8020  * @pf: board private structure to initialize
8021  * @features: the feature set that the stack is suggesting
8022  *
8023  * returns a bool to indicate if reset needs to happen
8024  **/
8025 bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
8026 {
8027         bool need_reset = false;
8028
8029         /* Check if Flow Director n-tuple support was enabled or disabled.  If
8030          * the state changed, we need to reset.
8031          */
8032         if (features & NETIF_F_NTUPLE) {
8033                 /* Enable filters and mark for reset */
8034                 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
8035                         need_reset = true;
8036                 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
8037         } else {
8038                 /* turn off filters, mark for reset and clear SW filter list */
8039                 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
8040                         need_reset = true;
8041                         i40e_fdir_filter_exit(pf);
8042                 }
8043                 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
8044                 pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
8045                 /* reset fd counters */
8046                 pf->fd_add_err = pf->fd_atr_cnt = pf->fd_tcp_rule = 0;
8047                 pf->fdir_pf_active_filters = 0;
8048                 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
8049                 if (I40E_DEBUG_FD & pf->hw.debug_mask)
8050                         dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
8051                 /* if ATR was auto disabled it can be re-enabled. */
8052                 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
8053                     (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
8054                         pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
8055         }
8056         return need_reset;
8057 }
8058
8059 /**
8060  * i40e_set_features - set the netdev feature flags
8061  * @netdev: ptr to the netdev being adjusted
8062  * @features: the feature set that the stack is suggesting
8063  **/
8064 static int i40e_set_features(struct net_device *netdev,
8065                              netdev_features_t features)
8066 {
8067         struct i40e_netdev_priv *np = netdev_priv(netdev);
8068         struct i40e_vsi *vsi = np->vsi;
8069         struct i40e_pf *pf = vsi->back;
8070         bool need_reset;
8071
8072         if (features & NETIF_F_HW_VLAN_CTAG_RX)
8073                 i40e_vlan_stripping_enable(vsi);
8074         else
8075                 i40e_vlan_stripping_disable(vsi);
8076
8077         need_reset = i40e_set_ntuple(pf, features);
8078
8079         if (need_reset)
8080                 i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
8081
8082         return 0;
8083 }
8084
8085 #ifdef CONFIG_I40E_VXLAN
8086 /**
8087  * i40e_get_vxlan_port_idx - Lookup a possibly offloaded for Rx UDP port
8088  * @pf: board private structure
8089  * @port: The UDP port to look up
8090  *
8091  * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found
8092  **/
8093 static u8 i40e_get_vxlan_port_idx(struct i40e_pf *pf, __be16 port)
8094 {
8095         u8 i;
8096
8097         for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
8098                 if (pf->vxlan_ports[i] == port)
8099                         return i;
8100         }
8101
8102         return i;
8103 }
8104
8105 /**
8106  * i40e_add_vxlan_port - Get notifications about VXLAN ports that come up
8107  * @netdev: This physical port's netdev
8108  * @sa_family: Socket Family that VXLAN is notifying us about
8109  * @port: New UDP port number that VXLAN started listening to
8110  **/
8111 static void i40e_add_vxlan_port(struct net_device *netdev,
8112                                 sa_family_t sa_family, __be16 port)
8113 {
8114         struct i40e_netdev_priv *np = netdev_priv(netdev);
8115         struct i40e_vsi *vsi = np->vsi;
8116         struct i40e_pf *pf = vsi->back;
8117         u8 next_idx;
8118         u8 idx;
8119
8120         if (sa_family == AF_INET6)
8121                 return;
8122
8123         idx = i40e_get_vxlan_port_idx(pf, port);
8124
8125         /* Check if port already exists */
8126         if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
8127                 netdev_info(netdev, "vxlan port %d already offloaded\n",
8128                             ntohs(port));
8129                 return;
8130         }
8131
8132         /* Now check if there is space to add the new port */
8133         next_idx = i40e_get_vxlan_port_idx(pf, 0);
8134
8135         if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
8136                 netdev_info(netdev, "maximum number of vxlan UDP ports reached, not adding port %d\n",
8137                             ntohs(port));
8138                 return;
8139         }
8140
8141         /* New port: add it and mark its index in the bitmap */
8142         pf->vxlan_ports[next_idx] = port;
8143         pf->pending_vxlan_bitmap |= BIT_ULL(next_idx);
8144         pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
8145 }
8146
8147 /**
8148  * i40e_del_vxlan_port - Get notifications about VXLAN ports that go away
8149  * @netdev: This physical port's netdev
8150  * @sa_family: Socket Family that VXLAN is notifying us about
8151  * @port: UDP port number that VXLAN stopped listening to
8152  **/
8153 static void i40e_del_vxlan_port(struct net_device *netdev,
8154                                 sa_family_t sa_family, __be16 port)
8155 {
8156         struct i40e_netdev_priv *np = netdev_priv(netdev);
8157         struct i40e_vsi *vsi = np->vsi;
8158         struct i40e_pf *pf = vsi->back;
8159         u8 idx;
8160
8161         if (sa_family == AF_INET6)
8162                 return;
8163
8164         idx = i40e_get_vxlan_port_idx(pf, port);
8165
8166         /* Check if port already exists */
8167         if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
8168                 /* if port exists, set it to 0 (mark for deletion)
8169                  * and make it pending
8170                  */
8171                 pf->vxlan_ports[idx] = 0;
8172                 pf->pending_vxlan_bitmap |= BIT_ULL(idx);
8173                 pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
8174         } else {
8175                 netdev_warn(netdev, "vxlan port %d was not found, not deleting\n",
8176                             ntohs(port));
8177         }
8178 }
8179
8180 #endif
8181 static int i40e_get_phys_port_id(struct net_device *netdev,
8182                                  struct netdev_phys_item_id *ppid)
8183 {
8184         struct i40e_netdev_priv *np = netdev_priv(netdev);
8185         struct i40e_pf *pf = np->vsi->back;
8186         struct i40e_hw *hw = &pf->hw;
8187
8188         if (!(pf->flags & I40E_FLAG_PORT_ID_VALID))
8189                 return -EOPNOTSUPP;
8190
8191         ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id));
8192         memcpy(ppid->id, hw->mac.port_addr, ppid->id_len);
8193
8194         return 0;
8195 }
8196
8197 /**
8198  * i40e_ndo_fdb_add - add an entry to the hardware database
8199  * @ndm: the input from the stack
8200  * @tb: pointer to array of nladdr (unused)
8201  * @dev: the net device pointer
8202  * @addr: the MAC address entry being added
8203  * @flags: instructions from stack about fdb operation
8204  */
8205 static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
8206                             struct net_device *dev,
8207                             const unsigned char *addr, u16 vid,
8208                             u16 flags)
8209 {
8210         struct i40e_netdev_priv *np = netdev_priv(dev);
8211         struct i40e_pf *pf = np->vsi->back;
8212         int err = 0;
8213
8214         if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
8215                 return -EOPNOTSUPP;
8216
8217         if (vid) {
8218                 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
8219                 return -EINVAL;
8220         }
8221
8222         /* Hardware does not support aging addresses so if a
8223          * ndm_state is given only allow permanent addresses
8224          */
8225         if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
8226                 netdev_info(dev, "FDB only supports static addresses\n");
8227                 return -EINVAL;
8228         }
8229
8230         if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
8231                 err = dev_uc_add_excl(dev, addr);
8232         else if (is_multicast_ether_addr(addr))
8233                 err = dev_mc_add_excl(dev, addr);
8234         else
8235                 err = -EINVAL;
8236
8237         /* Only return duplicate errors if NLM_F_EXCL is set */
8238         if (err == -EEXIST && !(flags & NLM_F_EXCL))
8239                 err = 0;
8240
8241         return err;
8242 }
8243
8244 /**
8245  * i40e_ndo_bridge_setlink - Set the hardware bridge mode
8246  * @dev: the netdev being configured
8247  * @nlh: RTNL message
8248  *
8249  * Inserts a new hardware bridge if not already created and
8250  * enables the bridging mode requested (VEB or VEPA). If the
8251  * hardware bridge has already been inserted and the request
8252  * is to change the mode then that requires a PF reset to
8253  * allow rebuild of the components with required hardware
8254  * bridge mode enabled.
8255  **/
8256 static int i40e_ndo_bridge_setlink(struct net_device *dev,
8257                                    struct nlmsghdr *nlh,
8258                                    u16 flags)
8259 {
8260         struct i40e_netdev_priv *np = netdev_priv(dev);
8261         struct i40e_vsi *vsi = np->vsi;
8262         struct i40e_pf *pf = vsi->back;
8263         struct i40e_veb *veb = NULL;
8264         struct nlattr *attr, *br_spec;
8265         int i, rem;
8266
8267         /* Only for PF VSI for now */
8268         if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
8269                 return -EOPNOTSUPP;
8270
8271         /* Find the HW bridge for PF VSI */
8272         for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
8273                 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
8274                         veb = pf->veb[i];
8275         }
8276
8277         br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
8278
8279         nla_for_each_nested(attr, br_spec, rem) {
8280                 __u16 mode;
8281
8282                 if (nla_type(attr) != IFLA_BRIDGE_MODE)
8283                         continue;
8284
8285                 mode = nla_get_u16(attr);
8286                 if ((mode != BRIDGE_MODE_VEPA) &&
8287                     (mode != BRIDGE_MODE_VEB))
8288                         return -EINVAL;
8289
8290                 /* Insert a new HW bridge */
8291                 if (!veb) {
8292                         veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
8293                                              vsi->tc_config.enabled_tc);
8294                         if (veb) {
8295                                 veb->bridge_mode = mode;
8296                                 i40e_config_bridge_mode(veb);
8297                         } else {
8298                                 /* No Bridge HW offload available */
8299                                 return -ENOENT;
8300                         }
8301                         break;
8302                 } else if (mode != veb->bridge_mode) {
8303                         /* Existing HW bridge but different mode needs reset */
8304                         veb->bridge_mode = mode;
8305                         /* TODO: If no VFs or VMDq VSIs, disallow VEB mode */
8306                         if (mode == BRIDGE_MODE_VEB)
8307                                 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
8308                         else
8309                                 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
8310                         i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
8311                         break;
8312                 }
8313         }
8314
8315         return 0;
8316 }
8317
8318 /**
8319  * i40e_ndo_bridge_getlink - Get the hardware bridge mode
8320  * @skb: skb buff
8321  * @pid: process id
8322  * @seq: RTNL message seq #
8323  * @dev: the netdev being configured
8324  * @filter_mask: unused
8325  * @nlflags: netlink flags passed in
8326  *
8327  * Return the mode in which the hardware bridge is operating in
8328  * i.e VEB or VEPA.
8329  **/
8330 static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
8331                                    struct net_device *dev,
8332                                    u32 __always_unused filter_mask,
8333                                    int nlflags)
8334 {
8335         struct i40e_netdev_priv *np = netdev_priv(dev);
8336         struct i40e_vsi *vsi = np->vsi;
8337         struct i40e_pf *pf = vsi->back;
8338         struct i40e_veb *veb = NULL;
8339         int i;
8340
8341         /* Only for PF VSI for now */
8342         if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
8343                 return -EOPNOTSUPP;
8344
8345         /* Find the HW bridge for the PF VSI */
8346         for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
8347                 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
8348                         veb = pf->veb[i];
8349         }
8350
8351         if (!veb)
8352                 return 0;
8353
8354         return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
8355                                        nlflags, 0, 0, filter_mask, NULL);
8356 }
8357
8358 #define I40E_MAX_TUNNEL_HDR_LEN 80
8359 /**
8360  * i40e_features_check - Validate encapsulated packet conforms to limits
8361  * @skb: skb buff
8362  * @netdev: This physical port's netdev
8363  * @features: Offload features that the stack believes apply
8364  **/
8365 static netdev_features_t i40e_features_check(struct sk_buff *skb,
8366                                              struct net_device *dev,
8367                                              netdev_features_t features)
8368 {
8369         if (skb->encapsulation &&
8370             (skb_inner_mac_header(skb) - skb_transport_header(skb) >
8371              I40E_MAX_TUNNEL_HDR_LEN))
8372                 return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
8373
8374         return features;
8375 }
8376
8377 static const struct net_device_ops i40e_netdev_ops = {
8378         .ndo_open               = i40e_open,
8379         .ndo_stop               = i40e_close,
8380         .ndo_start_xmit         = i40e_lan_xmit_frame,
8381         .ndo_get_stats64        = i40e_get_netdev_stats_struct,
8382         .ndo_set_rx_mode        = i40e_set_rx_mode,
8383         .ndo_validate_addr      = eth_validate_addr,
8384         .ndo_set_mac_address    = i40e_set_mac,
8385         .ndo_change_mtu         = i40e_change_mtu,
8386         .ndo_do_ioctl           = i40e_ioctl,
8387         .ndo_tx_timeout         = i40e_tx_timeout,
8388         .ndo_vlan_rx_add_vid    = i40e_vlan_rx_add_vid,
8389         .ndo_vlan_rx_kill_vid   = i40e_vlan_rx_kill_vid,
8390 #ifdef CONFIG_NET_POLL_CONTROLLER
8391         .ndo_poll_controller    = i40e_netpoll,
8392 #endif
8393         .ndo_setup_tc           = i40e_setup_tc,
8394 #ifdef I40E_FCOE
8395         .ndo_fcoe_enable        = i40e_fcoe_enable,
8396         .ndo_fcoe_disable       = i40e_fcoe_disable,
8397 #endif
8398         .ndo_set_features       = i40e_set_features,
8399         .ndo_set_vf_mac         = i40e_ndo_set_vf_mac,
8400         .ndo_set_vf_vlan        = i40e_ndo_set_vf_port_vlan,
8401         .ndo_set_vf_rate        = i40e_ndo_set_vf_bw,
8402         .ndo_get_vf_config      = i40e_ndo_get_vf_config,
8403         .ndo_set_vf_link_state  = i40e_ndo_set_vf_link_state,
8404         .ndo_set_vf_spoofchk    = i40e_ndo_set_vf_spoofchk,
8405 #ifdef CONFIG_I40E_VXLAN
8406         .ndo_add_vxlan_port     = i40e_add_vxlan_port,
8407         .ndo_del_vxlan_port     = i40e_del_vxlan_port,
8408 #endif
8409         .ndo_get_phys_port_id   = i40e_get_phys_port_id,
8410         .ndo_fdb_add            = i40e_ndo_fdb_add,
8411         .ndo_features_check     = i40e_features_check,
8412         .ndo_bridge_getlink     = i40e_ndo_bridge_getlink,
8413         .ndo_bridge_setlink     = i40e_ndo_bridge_setlink,
8414 };
8415
8416 /**
8417  * i40e_config_netdev - Setup the netdev flags
8418  * @vsi: the VSI being configured
8419  *
8420  * Returns 0 on success, negative value on failure
8421  **/
8422 static int i40e_config_netdev(struct i40e_vsi *vsi)
8423 {
8424         u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
8425         struct i40e_pf *pf = vsi->back;
8426         struct i40e_hw *hw = &pf->hw;
8427         struct i40e_netdev_priv *np;
8428         struct net_device *netdev;
8429         u8 mac_addr[ETH_ALEN];
8430         int etherdev_size;
8431
8432         etherdev_size = sizeof(struct i40e_netdev_priv);
8433         netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
8434         if (!netdev)
8435                 return -ENOMEM;
8436
8437         vsi->netdev = netdev;
8438         np = netdev_priv(netdev);
8439         np->vsi = vsi;
8440
8441         netdev->hw_enc_features |= NETIF_F_IP_CSUM       |
8442                                   NETIF_F_GSO_UDP_TUNNEL |
8443                                   NETIF_F_TSO;
8444
8445         netdev->features = NETIF_F_SG                  |
8446                            NETIF_F_IP_CSUM             |
8447                            NETIF_F_SCTP_CSUM           |
8448                            NETIF_F_HIGHDMA             |
8449                            NETIF_F_GSO_UDP_TUNNEL      |
8450                            NETIF_F_HW_VLAN_CTAG_TX     |
8451                            NETIF_F_HW_VLAN_CTAG_RX     |
8452                            NETIF_F_HW_VLAN_CTAG_FILTER |
8453                            NETIF_F_IPV6_CSUM           |
8454                            NETIF_F_TSO                 |
8455                            NETIF_F_TSO_ECN             |
8456                            NETIF_F_TSO6                |
8457                            NETIF_F_RXCSUM              |
8458                            NETIF_F_RXHASH              |
8459                            0;
8460
8461         if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
8462                 netdev->features |= NETIF_F_NTUPLE;
8463
8464         /* copy netdev features into list of user selectable features */
8465         netdev->hw_features |= netdev->features;
8466
8467         if (vsi->type == I40E_VSI_MAIN) {
8468                 SET_NETDEV_DEV(netdev, &pf->pdev->dev);
8469                 ether_addr_copy(mac_addr, hw->mac.perm_addr);
8470                 /* The following steps are necessary to prevent reception
8471                  * of tagged packets - some older NVM configurations load a
8472                  * default a MAC-VLAN filter that accepts any tagged packet
8473                  * which must be replaced by a normal filter.
8474                  */
8475                 if (!i40e_rm_default_mac_filter(vsi, mac_addr))
8476                         i40e_add_filter(vsi, mac_addr,
8477                                         I40E_VLAN_ANY, false, true);
8478         } else {
8479                 /* relate the VSI_VMDQ name to the VSI_MAIN name */
8480                 snprintf(netdev->name, IFNAMSIZ, "%sv%%d",
8481                          pf->vsi[pf->lan_vsi]->netdev->name);
8482                 random_ether_addr(mac_addr);
8483                 i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, false);
8484         }
8485         i40e_add_filter(vsi, brdcast, I40E_VLAN_ANY, false, false);
8486
8487         ether_addr_copy(netdev->dev_addr, mac_addr);
8488         ether_addr_copy(netdev->perm_addr, mac_addr);
8489         /* vlan gets same features (except vlan offload)
8490          * after any tweaks for specific VSI types
8491          */
8492         netdev->vlan_features = netdev->features & ~(NETIF_F_HW_VLAN_CTAG_TX |
8493                                                      NETIF_F_HW_VLAN_CTAG_RX |
8494                                                    NETIF_F_HW_VLAN_CTAG_FILTER);
8495         netdev->priv_flags |= IFF_UNICAST_FLT;
8496         netdev->priv_flags |= IFF_SUPP_NOFCS;
8497         /* Setup netdev TC information */
8498         i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc);
8499
8500         netdev->netdev_ops = &i40e_netdev_ops;
8501         netdev->watchdog_timeo = 5 * HZ;
8502         i40e_set_ethtool_ops(netdev);
8503 #ifdef I40E_FCOE
8504         i40e_fcoe_config_netdev(netdev, vsi);
8505 #endif
8506
8507         return 0;
8508 }
8509
8510 /**
8511  * i40e_vsi_delete - Delete a VSI from the switch
8512  * @vsi: the VSI being removed
8513  *
8514  * Returns 0 on success, negative value on failure
8515  **/
8516 static void i40e_vsi_delete(struct i40e_vsi *vsi)
8517 {
8518         /* remove default VSI is not allowed */
8519         if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
8520                 return;
8521
8522         i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
8523 }
8524
8525 /**
8526  * i40e_is_vsi_uplink_mode_veb - Check if the VSI's uplink bridge mode is VEB
8527  * @vsi: the VSI being queried
8528  *
8529  * Returns 1 if HW bridge mode is VEB and return 0 in case of VEPA mode
8530  **/
8531 int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
8532 {
8533         struct i40e_veb *veb;
8534         struct i40e_pf *pf = vsi->back;
8535
8536         /* Uplink is not a bridge so default to VEB */
8537         if (vsi->veb_idx == I40E_NO_VEB)
8538                 return 1;
8539
8540         veb = pf->veb[vsi->veb_idx];
8541         /* Uplink is a bridge in VEPA mode */
8542         if (veb && (veb->bridge_mode & BRIDGE_MODE_VEPA))
8543                 return 0;
8544
8545         /* Uplink is a bridge in VEB mode */
8546         return 1;
8547 }
8548
8549 /**
8550  * i40e_add_vsi - Add a VSI to the switch
8551  * @vsi: the VSI being configured
8552  *
8553  * This initializes a VSI context depending on the VSI type to be added and
8554  * passes it down to the add_vsi aq command.
8555  **/
8556 static int i40e_add_vsi(struct i40e_vsi *vsi)
8557 {
8558         int ret = -ENODEV;
8559         struct i40e_mac_filter *f, *ftmp;
8560         struct i40e_pf *pf = vsi->back;
8561         struct i40e_hw *hw = &pf->hw;
8562         struct i40e_vsi_context ctxt;
8563         u8 enabled_tc = 0x1; /* TC0 enabled */
8564         int f_count = 0;
8565
8566         memset(&ctxt, 0, sizeof(ctxt));
8567         switch (vsi->type) {
8568         case I40E_VSI_MAIN:
8569                 /* The PF's main VSI is already setup as part of the
8570                  * device initialization, so we'll not bother with
8571                  * the add_vsi call, but we will retrieve the current
8572                  * VSI context.
8573                  */
8574                 ctxt.seid = pf->main_vsi_seid;
8575                 ctxt.pf_num = pf->hw.pf_id;
8576                 ctxt.vf_num = 0;
8577                 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
8578                 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
8579                 if (ret) {
8580                         dev_info(&pf->pdev->dev,
8581                                  "couldn't get PF vsi config, err %s aq_err %s\n",
8582                                  i40e_stat_str(&pf->hw, ret),
8583                                  i40e_aq_str(&pf->hw,
8584                                              pf->hw.aq.asq_last_status));
8585                         return -ENOENT;
8586                 }
8587                 vsi->info = ctxt.info;
8588                 vsi->info.valid_sections = 0;
8589
8590                 vsi->seid = ctxt.seid;
8591                 vsi->id = ctxt.vsi_number;
8592
8593                 enabled_tc = i40e_pf_get_tc_map(pf);
8594
8595                 /* MFP mode setup queue map and update VSI */
8596                 if ((pf->flags & I40E_FLAG_MFP_ENABLED) &&
8597                     !(pf->hw.func_caps.iscsi)) { /* NIC type PF */
8598                         memset(&ctxt, 0, sizeof(ctxt));
8599                         ctxt.seid = pf->main_vsi_seid;
8600                         ctxt.pf_num = pf->hw.pf_id;
8601                         ctxt.vf_num = 0;
8602                         i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
8603                         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
8604                         if (ret) {
8605                                 dev_info(&pf->pdev->dev,
8606                                          "update vsi failed, err %s aq_err %s\n",
8607                                          i40e_stat_str(&pf->hw, ret),
8608                                          i40e_aq_str(&pf->hw,
8609                                                     pf->hw.aq.asq_last_status));
8610                                 ret = -ENOENT;
8611                                 goto err;
8612                         }
8613                         /* update the local VSI info queue map */
8614                         i40e_vsi_update_queue_map(vsi, &ctxt);
8615                         vsi->info.valid_sections = 0;
8616                 } else {
8617                         /* Default/Main VSI is only enabled for TC0
8618                          * reconfigure it to enable all TCs that are
8619                          * available on the port in SFP mode.
8620                          * For MFP case the iSCSI PF would use this
8621                          * flow to enable LAN+iSCSI TC.
8622                          */
8623                         ret = i40e_vsi_config_tc(vsi, enabled_tc);
8624                         if (ret) {
8625                                 dev_info(&pf->pdev->dev,
8626                                          "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n",
8627                                          enabled_tc,
8628                                          i40e_stat_str(&pf->hw, ret),
8629                                          i40e_aq_str(&pf->hw,
8630                                                     pf->hw.aq.asq_last_status));
8631                                 ret = -ENOENT;
8632                         }
8633                 }
8634                 break;
8635
8636         case I40E_VSI_FDIR:
8637                 ctxt.pf_num = hw->pf_id;
8638                 ctxt.vf_num = 0;
8639                 ctxt.uplink_seid = vsi->uplink_seid;
8640                 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
8641                 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
8642                 if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) &&
8643                     (i40e_is_vsi_uplink_mode_veb(vsi))) {
8644                         ctxt.info.valid_sections |=
8645                              cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
8646                         ctxt.info.switch_id =
8647                            cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
8648                 }
8649                 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
8650                 break;
8651
8652         case I40E_VSI_VMDQ2:
8653                 ctxt.pf_num = hw->pf_id;
8654                 ctxt.vf_num = 0;
8655                 ctxt.uplink_seid = vsi->uplink_seid;
8656                 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
8657                 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
8658
8659                 /* This VSI is connected to VEB so the switch_id
8660                  * should be set to zero by default.
8661                  */
8662                 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
8663                         ctxt.info.valid_sections |=
8664                                 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
8665                         ctxt.info.switch_id =
8666                                 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
8667                 }
8668
8669                 /* Setup the VSI tx/rx queue map for TC0 only for now */
8670                 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
8671                 break;
8672
8673         case I40E_VSI_SRIOV:
8674                 ctxt.pf_num = hw->pf_id;
8675                 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
8676                 ctxt.uplink_seid = vsi->uplink_seid;
8677                 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
8678                 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
8679
8680                 /* This VSI is connected to VEB so the switch_id
8681                  * should be set to zero by default.
8682                  */
8683                 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
8684                         ctxt.info.valid_sections |=
8685                                 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
8686                         ctxt.info.switch_id =
8687                                 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
8688                 }
8689
8690                 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
8691                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
8692                 if (pf->vf[vsi->vf_id].spoofchk) {
8693                         ctxt.info.valid_sections |=
8694                                 cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
8695                         ctxt.info.sec_flags |=
8696                                 (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
8697                                  I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
8698                 }
8699                 /* Setup the VSI tx/rx queue map for TC0 only for now */
8700                 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
8701                 break;
8702
8703 #ifdef I40E_FCOE
8704         case I40E_VSI_FCOE:
8705                 ret = i40e_fcoe_vsi_init(vsi, &ctxt);
8706                 if (ret) {
8707                         dev_info(&pf->pdev->dev, "failed to initialize FCoE VSI\n");
8708                         return ret;
8709                 }
8710                 break;
8711
8712 #endif /* I40E_FCOE */
8713         default:
8714                 return -ENODEV;
8715         }
8716
8717         if (vsi->type != I40E_VSI_MAIN) {
8718                 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
8719                 if (ret) {
8720                         dev_info(&vsi->back->pdev->dev,
8721                                  "add vsi failed, err %s aq_err %s\n",
8722                                  i40e_stat_str(&pf->hw, ret),
8723                                  i40e_aq_str(&pf->hw,
8724                                              pf->hw.aq.asq_last_status));
8725                         ret = -ENOENT;
8726                         goto err;
8727                 }
8728                 vsi->info = ctxt.info;
8729                 vsi->info.valid_sections = 0;
8730                 vsi->seid = ctxt.seid;
8731                 vsi->id = ctxt.vsi_number;
8732         }
8733
8734         /* If macvlan filters already exist, force them to get loaded */
8735         list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
8736                 f->changed = true;
8737                 f_count++;
8738
8739                 if (f->is_laa && vsi->type == I40E_VSI_MAIN) {
8740                         struct i40e_aqc_remove_macvlan_element_data element;
8741
8742                         memset(&element, 0, sizeof(element));
8743                         ether_addr_copy(element.mac_addr, f->macaddr);
8744                         element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
8745                         ret = i40e_aq_remove_macvlan(hw, vsi->seid,
8746                                                      &element, 1, NULL);
8747                         if (ret) {
8748                                 /* some older FW has a different default */
8749                                 element.flags |=
8750                                                I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
8751                                 i40e_aq_remove_macvlan(hw, vsi->seid,
8752                                                        &element, 1, NULL);
8753                         }
8754
8755                         i40e_aq_mac_address_write(hw,
8756                                                   I40E_AQC_WRITE_TYPE_LAA_WOL,
8757                                                   f->macaddr, NULL);
8758                 }
8759         }
8760         if (f_count) {
8761                 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
8762                 pf->flags |= I40E_FLAG_FILTER_SYNC;
8763         }
8764
8765         /* Update VSI BW information */
8766         ret = i40e_vsi_get_bw_info(vsi);
8767         if (ret) {
8768                 dev_info(&pf->pdev->dev,
8769                          "couldn't get vsi bw info, err %s aq_err %s\n",
8770                          i40e_stat_str(&pf->hw, ret),
8771                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
8772                 /* VSI is already added so not tearing that up */
8773                 ret = 0;
8774         }
8775
8776 err:
8777         return ret;
8778 }
8779
8780 /**
8781  * i40e_vsi_release - Delete a VSI and free its resources
8782  * @vsi: the VSI being removed
8783  *
8784  * Returns 0 on success or < 0 on error
8785  **/
8786 int i40e_vsi_release(struct i40e_vsi *vsi)
8787 {
8788         struct i40e_mac_filter *f, *ftmp;
8789         struct i40e_veb *veb = NULL;
8790         struct i40e_pf *pf;
8791         u16 uplink_seid;
8792         int i, n;
8793
8794         pf = vsi->back;
8795
8796         /* release of a VEB-owner or last VSI is not allowed */
8797         if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) {
8798                 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n",
8799                          vsi->seid, vsi->uplink_seid);
8800                 return -ENODEV;
8801         }
8802         if (vsi == pf->vsi[pf->lan_vsi] &&
8803             !test_bit(__I40E_DOWN, &pf->state)) {
8804                 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
8805                 return -ENODEV;
8806         }
8807
8808         uplink_seid = vsi->uplink_seid;
8809         if (vsi->type != I40E_VSI_SRIOV) {
8810                 if (vsi->netdev_registered) {
8811                         vsi->netdev_registered = false;
8812                         if (vsi->netdev) {
8813                                 /* results in a call to i40e_close() */
8814                                 unregister_netdev(vsi->netdev);
8815                         }
8816                 } else {
8817                         i40e_vsi_close(vsi);
8818                 }
8819                 i40e_vsi_disable_irq(vsi);
8820         }
8821
8822         list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list)
8823                 i40e_del_filter(vsi, f->macaddr, f->vlan,
8824                                 f->is_vf, f->is_netdev);
8825         i40e_sync_vsi_filters(vsi, false);
8826
8827         i40e_vsi_delete(vsi);
8828         i40e_vsi_free_q_vectors(vsi);
8829         if (vsi->netdev) {
8830                 free_netdev(vsi->netdev);
8831                 vsi->netdev = NULL;
8832         }
8833         i40e_vsi_clear_rings(vsi);
8834         i40e_vsi_clear(vsi);
8835
8836         /* If this was the last thing on the VEB, except for the
8837          * controlling VSI, remove the VEB, which puts the controlling
8838          * VSI onto the next level down in the switch.
8839          *
8840          * Well, okay, there's one more exception here: don't remove
8841          * the orphan VEBs yet.  We'll wait for an explicit remove request
8842          * from up the network stack.
8843          */
8844         for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) {
8845                 if (pf->vsi[i] &&
8846                     pf->vsi[i]->uplink_seid == uplink_seid &&
8847                     (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
8848                         n++;      /* count the VSIs */
8849                 }
8850         }
8851         for (i = 0; i < I40E_MAX_VEB; i++) {
8852                 if (!pf->veb[i])
8853                         continue;
8854                 if (pf->veb[i]->uplink_seid == uplink_seid)
8855                         n++;     /* count the VEBs */
8856                 if (pf->veb[i]->seid == uplink_seid)
8857                         veb = pf->veb[i];
8858         }
8859         if (n == 0 && veb && veb->uplink_seid != 0)
8860                 i40e_veb_release(veb);
8861
8862         return 0;
8863 }
8864
8865 /**
8866  * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI
8867  * @vsi: ptr to the VSI
8868  *
8869  * This should only be called after i40e_vsi_mem_alloc() which allocates the
8870  * corresponding SW VSI structure and initializes num_queue_pairs for the
8871  * newly allocated VSI.
8872  *
8873  * Returns 0 on success or negative on failure
8874  **/
8875 static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
8876 {
8877         int ret = -ENOENT;
8878         struct i40e_pf *pf = vsi->back;
8879
8880         if (vsi->q_vectors[0]) {
8881                 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
8882                          vsi->seid);
8883                 return -EEXIST;
8884         }
8885
8886         if (vsi->base_vector) {
8887                 dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
8888                          vsi->seid, vsi->base_vector);
8889                 return -EEXIST;
8890         }
8891
8892         ret = i40e_vsi_alloc_q_vectors(vsi);
8893         if (ret) {
8894                 dev_info(&pf->pdev->dev,
8895                          "failed to allocate %d q_vector for VSI %d, ret=%d\n",
8896                          vsi->num_q_vectors, vsi->seid, ret);
8897                 vsi->num_q_vectors = 0;
8898                 goto vector_setup_out;
8899         }
8900
8901         /* In Legacy mode, we do not have to get any other vector since we
8902          * piggyback on the misc/ICR0 for queue interrupts.
8903         */
8904         if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
8905                 return ret;
8906         if (vsi->num_q_vectors)
8907                 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
8908                                                  vsi->num_q_vectors, vsi->idx);
8909         if (vsi->base_vector < 0) {
8910                 dev_info(&pf->pdev->dev,
8911                          "failed to get tracking for %d vectors for VSI %d, err=%d\n",
8912                          vsi->num_q_vectors, vsi->seid, vsi->base_vector);
8913                 i40e_vsi_free_q_vectors(vsi);
8914                 ret = -ENOENT;
8915                 goto vector_setup_out;
8916         }
8917
8918 vector_setup_out:
8919         return ret;
8920 }
8921
8922 /**
8923  * i40e_vsi_reinit_setup - return and reallocate resources for a VSI
8924  * @vsi: pointer to the vsi.
8925  *
8926  * This re-allocates a vsi's queue resources.
8927  *
8928  * Returns pointer to the successfully allocated and configured VSI sw struct
8929  * on success, otherwise returns NULL on failure.
8930  **/
8931 static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
8932 {
8933         struct i40e_pf *pf = vsi->back;
8934         u8 enabled_tc;
8935         int ret;
8936
8937         i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
8938         i40e_vsi_clear_rings(vsi);
8939
8940         i40e_vsi_free_arrays(vsi, false);
8941         i40e_set_num_rings_in_vsi(vsi);
8942         ret = i40e_vsi_alloc_arrays(vsi, false);
8943         if (ret)
8944                 goto err_vsi;
8945
8946         ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx);
8947         if (ret < 0) {
8948                 dev_info(&pf->pdev->dev,
8949                          "failed to get tracking for %d queues for VSI %d err %d\n",
8950                          vsi->alloc_queue_pairs, vsi->seid, ret);
8951                 goto err_vsi;
8952         }
8953         vsi->base_queue = ret;
8954
8955         /* Update the FW view of the VSI. Force a reset of TC and queue
8956          * layout configurations.
8957          */
8958         enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
8959         pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
8960         pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
8961         i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
8962
8963         /* assign it some queues */
8964         ret = i40e_alloc_rings(vsi);
8965         if (ret)
8966                 goto err_rings;
8967
8968         /* map all of the rings to the q_vectors */
8969         i40e_vsi_map_rings_to_vectors(vsi);
8970         return vsi;
8971
8972 err_rings:
8973         i40e_vsi_free_q_vectors(vsi);
8974         if (vsi->netdev_registered) {
8975                 vsi->netdev_registered = false;
8976                 unregister_netdev(vsi->netdev);
8977                 free_netdev(vsi->netdev);
8978                 vsi->netdev = NULL;
8979         }
8980         i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
8981 err_vsi:
8982         i40e_vsi_clear(vsi);
8983         return NULL;
8984 }
8985
8986 /**
8987  * i40e_vsi_setup - Set up a VSI by a given type
8988  * @pf: board private structure
8989  * @type: VSI type
8990  * @uplink_seid: the switch element to link to
8991  * @param1: usage depends upon VSI type. For VF types, indicates VF id
8992  *
8993  * This allocates the sw VSI structure and its queue resources, then add a VSI
8994  * to the identified VEB.
8995  *
8996  * Returns pointer to the successfully allocated and configure VSI sw struct on
8997  * success, otherwise returns NULL on failure.
8998  **/
8999 struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
9000                                 u16 uplink_seid, u32 param1)
9001 {
9002         struct i40e_vsi *vsi = NULL;
9003         struct i40e_veb *veb = NULL;
9004         int ret, i;
9005         int v_idx;
9006
9007         /* The requested uplink_seid must be either
9008          *     - the PF's port seid
9009          *              no VEB is needed because this is the PF
9010          *              or this is a Flow Director special case VSI
9011          *     - seid of an existing VEB
9012          *     - seid of a VSI that owns an existing VEB
9013          *     - seid of a VSI that doesn't own a VEB
9014          *              a new VEB is created and the VSI becomes the owner
9015          *     - seid of the PF VSI, which is what creates the first VEB
9016          *              this is a special case of the previous
9017          *
9018          * Find which uplink_seid we were given and create a new VEB if needed
9019          */
9020         for (i = 0; i < I40E_MAX_VEB; i++) {
9021                 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
9022                         veb = pf->veb[i];
9023                         break;
9024                 }
9025         }
9026
9027         if (!veb && uplink_seid != pf->mac_seid) {
9028
9029                 for (i = 0; i < pf->num_alloc_vsi; i++) {
9030                         if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
9031                                 vsi = pf->vsi[i];
9032                                 break;
9033                         }
9034                 }
9035                 if (!vsi) {
9036                         dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
9037                                  uplink_seid);
9038                         return NULL;
9039                 }
9040
9041                 if (vsi->uplink_seid == pf->mac_seid)
9042                         veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
9043                                              vsi->tc_config.enabled_tc);
9044                 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
9045                         veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
9046                                              vsi->tc_config.enabled_tc);
9047                 if (veb) {
9048                         if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) {
9049                                 dev_info(&vsi->back->pdev->dev,
9050                                          "New VSI creation error, uplink seid of LAN VSI expected.\n");
9051                                 return NULL;
9052                         }
9053                         /* We come up by default in VEPA mode if SRIOV is not
9054                          * already enabled, in which case we can't force VEPA
9055                          * mode.
9056                          */
9057                         if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
9058                                 veb->bridge_mode = BRIDGE_MODE_VEPA;
9059                                 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
9060                         }
9061                         i40e_config_bridge_mode(veb);
9062                 }
9063                 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
9064                         if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
9065                                 veb = pf->veb[i];
9066                 }
9067                 if (!veb) {
9068                         dev_info(&pf->pdev->dev, "couldn't add VEB\n");
9069                         return NULL;
9070                 }
9071
9072                 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
9073                 uplink_seid = veb->seid;
9074         }
9075
9076         /* get vsi sw struct */
9077         v_idx = i40e_vsi_mem_alloc(pf, type);
9078         if (v_idx < 0)
9079                 goto err_alloc;
9080         vsi = pf->vsi[v_idx];
9081         if (!vsi)
9082                 goto err_alloc;
9083         vsi->type = type;
9084         vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
9085
9086         if (type == I40E_VSI_MAIN)
9087                 pf->lan_vsi = v_idx;
9088         else if (type == I40E_VSI_SRIOV)
9089                 vsi->vf_id = param1;
9090         /* assign it some queues */
9091         ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs,
9092                                 vsi->idx);
9093         if (ret < 0) {
9094                 dev_info(&pf->pdev->dev,
9095                          "failed to get tracking for %d queues for VSI %d err=%d\n",
9096                          vsi->alloc_queue_pairs, vsi->seid, ret);
9097                 goto err_vsi;
9098         }
9099         vsi->base_queue = ret;
9100
9101         /* get a VSI from the hardware */
9102         vsi->uplink_seid = uplink_seid;
9103         ret = i40e_add_vsi(vsi);
9104         if (ret)
9105                 goto err_vsi;
9106
9107         switch (vsi->type) {
9108         /* setup the netdev if needed */
9109         case I40E_VSI_MAIN:
9110         case I40E_VSI_VMDQ2:
9111         case I40E_VSI_FCOE:
9112                 ret = i40e_config_netdev(vsi);
9113                 if (ret)
9114                         goto err_netdev;
9115                 ret = register_netdev(vsi->netdev);
9116                 if (ret)
9117                         goto err_netdev;
9118                 vsi->netdev_registered = true;
9119                 netif_carrier_off(vsi->netdev);
9120 #ifdef CONFIG_I40E_DCB
9121                 /* Setup DCB netlink interface */
9122                 i40e_dcbnl_setup(vsi);
9123 #endif /* CONFIG_I40E_DCB */
9124                 /* fall through */
9125
9126         case I40E_VSI_FDIR:
9127                 /* set up vectors and rings if needed */
9128                 ret = i40e_vsi_setup_vectors(vsi);
9129                 if (ret)
9130                         goto err_msix;
9131
9132                 ret = i40e_alloc_rings(vsi);
9133                 if (ret)
9134                         goto err_rings;
9135
9136                 /* map all of the rings to the q_vectors */
9137                 i40e_vsi_map_rings_to_vectors(vsi);
9138
9139                 i40e_vsi_reset_stats(vsi);
9140                 break;
9141
9142         default:
9143                 /* no netdev or rings for the other VSI types */
9144                 break;
9145         }
9146
9147         if ((pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) &&
9148             (vsi->type == I40E_VSI_VMDQ2)) {
9149                 ret = i40e_vsi_config_rss(vsi);
9150         }
9151         return vsi;
9152
9153 err_rings:
9154         i40e_vsi_free_q_vectors(vsi);
9155 err_msix:
9156         if (vsi->netdev_registered) {
9157                 vsi->netdev_registered = false;
9158                 unregister_netdev(vsi->netdev);
9159                 free_netdev(vsi->netdev);
9160                 vsi->netdev = NULL;
9161         }
9162 err_netdev:
9163         i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
9164 err_vsi:
9165         i40e_vsi_clear(vsi);
9166 err_alloc:
9167         return NULL;
9168 }
9169
9170 /**
9171  * i40e_veb_get_bw_info - Query VEB BW information
9172  * @veb: the veb to query
9173  *
9174  * Query the Tx scheduler BW configuration data for given VEB
9175  **/
9176 static int i40e_veb_get_bw_info(struct i40e_veb *veb)
9177 {
9178         struct i40e_aqc_query_switching_comp_ets_config_resp ets_data;
9179         struct i40e_aqc_query_switching_comp_bw_config_resp bw_data;
9180         struct i40e_pf *pf = veb->pf;
9181         struct i40e_hw *hw = &pf->hw;
9182         u32 tc_bw_max;
9183         int ret = 0;
9184         int i;
9185
9186         ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
9187                                                   &bw_data, NULL);
9188         if (ret) {
9189                 dev_info(&pf->pdev->dev,
9190                          "query veb bw config failed, err %s aq_err %s\n",
9191                          i40e_stat_str(&pf->hw, ret),
9192                          i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
9193                 goto out;
9194         }
9195
9196         ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
9197                                                    &ets_data, NULL);
9198         if (ret) {
9199                 dev_info(&pf->pdev->dev,
9200                          "query veb bw ets config failed, err %s aq_err %s\n",
9201                          i40e_stat_str(&pf->hw, ret),
9202                          i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
9203                 goto out;
9204         }
9205
9206         veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
9207         veb->bw_max_quanta = ets_data.tc_bw_max;
9208         veb->is_abs_credits = bw_data.absolute_credits_enable;
9209         veb->enabled_tc = ets_data.tc_valid_bits;
9210         tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
9211                     (le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
9212         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
9213                 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i];
9214                 veb->bw_tc_limit_credits[i] =
9215                                         le16_to_cpu(bw_data.tc_bw_limits[i]);
9216                 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7);
9217         }
9218
9219 out:
9220         return ret;
9221 }
9222
9223 /**
9224  * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF
9225  * @pf: board private structure
9226  *
9227  * On error: returns error code (negative)
9228  * On success: returns vsi index in PF (positive)
9229  **/
9230 static int i40e_veb_mem_alloc(struct i40e_pf *pf)
9231 {
9232         int ret = -ENOENT;
9233         struct i40e_veb *veb;
9234         int i;
9235
9236         /* Need to protect the allocation of switch elements at the PF level */
9237         mutex_lock(&pf->switch_mutex);
9238
9239         /* VEB list may be fragmented if VEB creation/destruction has
9240          * been happening.  We can afford to do a quick scan to look
9241          * for any free slots in the list.
9242          *
9243          * find next empty veb slot, looping back around if necessary
9244          */
9245         i = 0;
9246         while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL))
9247                 i++;
9248         if (i >= I40E_MAX_VEB) {
9249                 ret = -ENOMEM;
9250                 goto err_alloc_veb;  /* out of VEB slots! */
9251         }
9252
9253         veb = kzalloc(sizeof(*veb), GFP_KERNEL);
9254         if (!veb) {
9255                 ret = -ENOMEM;
9256                 goto err_alloc_veb;
9257         }
9258         veb->pf = pf;
9259         veb->idx = i;
9260         veb->enabled_tc = 1;
9261
9262         pf->veb[i] = veb;
9263         ret = i;
9264 err_alloc_veb:
9265         mutex_unlock(&pf->switch_mutex);
9266         return ret;
9267 }
9268
9269 /**
9270  * i40e_switch_branch_release - Delete a branch of the switch tree
9271  * @branch: where to start deleting
9272  *
9273  * This uses recursion to find the tips of the branch to be
9274  * removed, deleting until we get back to and can delete this VEB.
9275  **/
9276 static void i40e_switch_branch_release(struct i40e_veb *branch)
9277 {
9278         struct i40e_pf *pf = branch->pf;
9279         u16 branch_seid = branch->seid;
9280         u16 veb_idx = branch->idx;
9281         int i;
9282
9283         /* release any VEBs on this VEB - RECURSION */
9284         for (i = 0; i < I40E_MAX_VEB; i++) {
9285                 if (!pf->veb[i])
9286                         continue;
9287                 if (pf->veb[i]->uplink_seid == branch->seid)
9288                         i40e_switch_branch_release(pf->veb[i]);
9289         }
9290
9291         /* Release the VSIs on this VEB, but not the owner VSI.
9292          *
9293          * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
9294          *       the VEB itself, so don't use (*branch) after this loop.
9295          */
9296         for (i = 0; i < pf->num_alloc_vsi; i++) {
9297                 if (!pf->vsi[i])
9298                         continue;
9299                 if (pf->vsi[i]->uplink_seid == branch_seid &&
9300                    (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
9301                         i40e_vsi_release(pf->vsi[i]);
9302                 }
9303         }
9304
9305         /* There's one corner case where the VEB might not have been
9306          * removed, so double check it here and remove it if needed.
9307          * This case happens if the veb was created from the debugfs
9308          * commands and no VSIs were added to it.
9309          */
9310         if (pf->veb[veb_idx])
9311                 i40e_veb_release(pf->veb[veb_idx]);
9312 }
9313
9314 /**
9315  * i40e_veb_clear - remove veb struct
9316  * @veb: the veb to remove
9317  **/
9318 static void i40e_veb_clear(struct i40e_veb *veb)
9319 {
9320         if (!veb)
9321                 return;
9322
9323         if (veb->pf) {
9324                 struct i40e_pf *pf = veb->pf;
9325
9326                 mutex_lock(&pf->switch_mutex);
9327                 if (pf->veb[veb->idx] == veb)
9328                         pf->veb[veb->idx] = NULL;
9329                 mutex_unlock(&pf->switch_mutex);
9330         }
9331
9332         kfree(veb);
9333 }
9334
9335 /**
9336  * i40e_veb_release - Delete a VEB and free its resources
9337  * @veb: the VEB being removed
9338  **/
9339 void i40e_veb_release(struct i40e_veb *veb)
9340 {
9341         struct i40e_vsi *vsi = NULL;
9342         struct i40e_pf *pf;
9343         int i, n = 0;
9344
9345         pf = veb->pf;
9346
9347         /* find the remaining VSI and check for extras */
9348         for (i = 0; i < pf->num_alloc_vsi; i++) {
9349                 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
9350                         n++;
9351                         vsi = pf->vsi[i];
9352                 }
9353         }
9354         if (n != 1) {
9355                 dev_info(&pf->pdev->dev,
9356                          "can't remove VEB %d with %d VSIs left\n",
9357                          veb->seid, n);
9358                 return;
9359         }
9360
9361         /* move the remaining VSI to uplink veb */
9362         vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
9363         if (veb->uplink_seid) {
9364                 vsi->uplink_seid = veb->uplink_seid;
9365                 if (veb->uplink_seid == pf->mac_seid)
9366                         vsi->veb_idx = I40E_NO_VEB;
9367                 else
9368                         vsi->veb_idx = veb->veb_idx;
9369         } else {
9370                 /* floating VEB */
9371                 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
9372                 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
9373         }
9374
9375         i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
9376         i40e_veb_clear(veb);
9377 }
9378
9379 /**
9380  * i40e_add_veb - create the VEB in the switch
9381  * @veb: the VEB to be instantiated
9382  * @vsi: the controlling VSI
9383  **/
9384 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
9385 {
9386         struct i40e_pf *pf = veb->pf;
9387         bool is_default = veb->pf->cur_promisc;
9388         bool is_cloud = false;
9389         int ret;
9390
9391         /* get a VEB from the hardware */
9392         ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
9393                               veb->enabled_tc, is_default,
9394                               is_cloud, &veb->seid, NULL);
9395         if (ret) {
9396                 dev_info(&pf->pdev->dev,
9397                          "couldn't add VEB, err %s aq_err %s\n",
9398                          i40e_stat_str(&pf->hw, ret),
9399                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9400                 return -EPERM;
9401         }
9402
9403         /* get statistics counter */
9404         ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL,
9405                                          &veb->stats_idx, NULL, NULL, NULL);
9406         if (ret) {
9407                 dev_info(&pf->pdev->dev,
9408                          "couldn't get VEB statistics idx, err %s aq_err %s\n",
9409                          i40e_stat_str(&pf->hw, ret),
9410                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9411                 return -EPERM;
9412         }
9413         ret = i40e_veb_get_bw_info(veb);
9414         if (ret) {
9415                 dev_info(&pf->pdev->dev,
9416                          "couldn't get VEB bw info, err %s aq_err %s\n",
9417                          i40e_stat_str(&pf->hw, ret),
9418                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9419                 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
9420                 return -ENOENT;
9421         }
9422
9423         vsi->uplink_seid = veb->seid;
9424         vsi->veb_idx = veb->idx;
9425         vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
9426
9427         return 0;
9428 }
9429
9430 /**
9431  * i40e_veb_setup - Set up a VEB
9432  * @pf: board private structure
9433  * @flags: VEB setup flags
9434  * @uplink_seid: the switch element to link to
9435  * @vsi_seid: the initial VSI seid
9436  * @enabled_tc: Enabled TC bit-map
9437  *
9438  * This allocates the sw VEB structure and links it into the switch
9439  * It is possible and legal for this to be a duplicate of an already
9440  * existing VEB.  It is also possible for both uplink and vsi seids
9441  * to be zero, in order to create a floating VEB.
9442  *
9443  * Returns pointer to the successfully allocated VEB sw struct on
9444  * success, otherwise returns NULL on failure.
9445  **/
9446 struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
9447                                 u16 uplink_seid, u16 vsi_seid,
9448                                 u8 enabled_tc)
9449 {
9450         struct i40e_veb *veb, *uplink_veb = NULL;
9451         int vsi_idx, veb_idx;
9452         int ret;
9453
9454         /* if one seid is 0, the other must be 0 to create a floating relay */
9455         if ((uplink_seid == 0 || vsi_seid == 0) &&
9456             (uplink_seid + vsi_seid != 0)) {
9457                 dev_info(&pf->pdev->dev,
9458                          "one, not both seid's are 0: uplink=%d vsi=%d\n",
9459                          uplink_seid, vsi_seid);
9460                 return NULL;
9461         }
9462
9463         /* make sure there is such a vsi and uplink */
9464         for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
9465                 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
9466                         break;
9467         if (vsi_idx >= pf->num_alloc_vsi && vsi_seid != 0) {
9468                 dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
9469                          vsi_seid);
9470                 return NULL;
9471         }
9472
9473         if (uplink_seid && uplink_seid != pf->mac_seid) {
9474                 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
9475                         if (pf->veb[veb_idx] &&
9476                             pf->veb[veb_idx]->seid == uplink_seid) {
9477                                 uplink_veb = pf->veb[veb_idx];
9478                                 break;
9479                         }
9480                 }
9481                 if (!uplink_veb) {
9482                         dev_info(&pf->pdev->dev,
9483                                  "uplink seid %d not found\n", uplink_seid);
9484                         return NULL;
9485                 }
9486         }
9487
9488         /* get veb sw struct */
9489         veb_idx = i40e_veb_mem_alloc(pf);
9490         if (veb_idx < 0)
9491                 goto err_alloc;
9492         veb = pf->veb[veb_idx];
9493         veb->flags = flags;
9494         veb->uplink_seid = uplink_seid;
9495         veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
9496         veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
9497
9498         /* create the VEB in the switch */
9499         ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
9500         if (ret)
9501                 goto err_veb;
9502         if (vsi_idx == pf->lan_vsi)
9503                 pf->lan_veb = veb->idx;
9504
9505         return veb;
9506
9507 err_veb:
9508         i40e_veb_clear(veb);
9509 err_alloc:
9510         return NULL;
9511 }
9512
9513 /**
9514  * i40e_setup_pf_switch_element - set PF vars based on switch type
9515  * @pf: board private structure
9516  * @ele: element we are building info from
9517  * @num_reported: total number of elements
9518  * @printconfig: should we print the contents
9519  *
9520  * helper function to assist in extracting a few useful SEID values.
9521  **/
9522 static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
9523                                 struct i40e_aqc_switch_config_element_resp *ele,
9524                                 u16 num_reported, bool printconfig)
9525 {
9526         u16 downlink_seid = le16_to_cpu(ele->downlink_seid);
9527         u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
9528         u8 element_type = ele->element_type;
9529         u16 seid = le16_to_cpu(ele->seid);
9530
9531         if (printconfig)
9532                 dev_info(&pf->pdev->dev,
9533                          "type=%d seid=%d uplink=%d downlink=%d\n",
9534                          element_type, seid, uplink_seid, downlink_seid);
9535
9536         switch (element_type) {
9537         case I40E_SWITCH_ELEMENT_TYPE_MAC:
9538                 pf->mac_seid = seid;
9539                 break;
9540         case I40E_SWITCH_ELEMENT_TYPE_VEB:
9541                 /* Main VEB? */
9542                 if (uplink_seid != pf->mac_seid)
9543                         break;
9544                 if (pf->lan_veb == I40E_NO_VEB) {
9545                         int v;
9546
9547                         /* find existing or else empty VEB */
9548                         for (v = 0; v < I40E_MAX_VEB; v++) {
9549                                 if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
9550                                         pf->lan_veb = v;
9551                                         break;
9552                                 }
9553                         }
9554                         if (pf->lan_veb == I40E_NO_VEB) {
9555                                 v = i40e_veb_mem_alloc(pf);
9556                                 if (v < 0)
9557                                         break;
9558                                 pf->lan_veb = v;
9559                         }
9560                 }
9561
9562                 pf->veb[pf->lan_veb]->seid = seid;
9563                 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
9564                 pf->veb[pf->lan_veb]->pf = pf;
9565                 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
9566                 break;
9567         case I40E_SWITCH_ELEMENT_TYPE_VSI:
9568                 if (num_reported != 1)
9569                         break;
9570                 /* This is immediately after a reset so we can assume this is
9571                  * the PF's VSI
9572                  */
9573                 pf->mac_seid = uplink_seid;
9574                 pf->pf_seid = downlink_seid;
9575                 pf->main_vsi_seid = seid;
9576                 if (printconfig)
9577                         dev_info(&pf->pdev->dev,
9578                                  "pf_seid=%d main_vsi_seid=%d\n",
9579                                  pf->pf_seid, pf->main_vsi_seid);
9580                 break;
9581         case I40E_SWITCH_ELEMENT_TYPE_PF:
9582         case I40E_SWITCH_ELEMENT_TYPE_VF:
9583         case I40E_SWITCH_ELEMENT_TYPE_EMP:
9584         case I40E_SWITCH_ELEMENT_TYPE_BMC:
9585         case I40E_SWITCH_ELEMENT_TYPE_PE:
9586         case I40E_SWITCH_ELEMENT_TYPE_PA:
9587                 /* ignore these for now */
9588                 break;
9589         default:
9590                 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n",
9591                          element_type, seid);
9592                 break;
9593         }
9594 }
9595
9596 /**
9597  * i40e_fetch_switch_configuration - Get switch config from firmware
9598  * @pf: board private structure
9599  * @printconfig: should we print the contents
9600  *
9601  * Get the current switch configuration from the device and
9602  * extract a few useful SEID values.
9603  **/
9604 int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
9605 {
9606         struct i40e_aqc_get_switch_config_resp *sw_config;
9607         u16 next_seid = 0;
9608         int ret = 0;
9609         u8 *aq_buf;
9610         int i;
9611
9612         aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL);
9613         if (!aq_buf)
9614                 return -ENOMEM;
9615
9616         sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
9617         do {
9618                 u16 num_reported, num_total;
9619
9620                 ret = i40e_aq_get_switch_config(&pf->hw, sw_config,
9621                                                 I40E_AQ_LARGE_BUF,
9622                                                 &next_seid, NULL);
9623                 if (ret) {
9624                         dev_info(&pf->pdev->dev,
9625                                  "get switch config failed err %s aq_err %s\n",
9626                                  i40e_stat_str(&pf->hw, ret),
9627                                  i40e_aq_str(&pf->hw,
9628                                              pf->hw.aq.asq_last_status));
9629                         kfree(aq_buf);
9630                         return -ENOENT;
9631                 }
9632
9633                 num_reported = le16_to_cpu(sw_config->header.num_reported);
9634                 num_total = le16_to_cpu(sw_config->header.num_total);
9635
9636                 if (printconfig)
9637                         dev_info(&pf->pdev->dev,
9638                                  "header: %d reported %d total\n",
9639                                  num_reported, num_total);
9640
9641                 for (i = 0; i < num_reported; i++) {
9642                         struct i40e_aqc_switch_config_element_resp *ele =
9643                                 &sw_config->element[i];
9644
9645                         i40e_setup_pf_switch_element(pf, ele, num_reported,
9646                                                      printconfig);
9647                 }
9648         } while (next_seid != 0);
9649
9650         kfree(aq_buf);
9651         return ret;
9652 }
9653
9654 /**
9655  * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
9656  * @pf: board private structure
9657  * @reinit: if the Main VSI needs to re-initialized.
9658  *
9659  * Returns 0 on success, negative value on failure
9660  **/
9661 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
9662 {
9663         int ret;
9664
9665         /* find out what's out there already */
9666         ret = i40e_fetch_switch_configuration(pf, false);
9667         if (ret) {
9668                 dev_info(&pf->pdev->dev,
9669                          "couldn't fetch switch config, err %s aq_err %s\n",
9670                          i40e_stat_str(&pf->hw, ret),
9671                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9672                 return ret;
9673         }
9674         i40e_pf_reset_stats(pf);
9675
9676         /* first time setup */
9677         if (pf->lan_vsi == I40E_NO_VSI || reinit) {
9678                 struct i40e_vsi *vsi = NULL;
9679                 u16 uplink_seid;
9680
9681                 /* Set up the PF VSI associated with the PF's main VSI
9682                  * that is already in the HW switch
9683                  */
9684                 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
9685                         uplink_seid = pf->veb[pf->lan_veb]->seid;
9686                 else
9687                         uplink_seid = pf->mac_seid;
9688                 if (pf->lan_vsi == I40E_NO_VSI)
9689                         vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
9690                 else if (reinit)
9691                         vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
9692                 if (!vsi) {
9693                         dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
9694                         i40e_fdir_teardown(pf);
9695                         return -EAGAIN;
9696                 }
9697         } else {
9698                 /* force a reset of TC and queue layout configurations */
9699                 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
9700
9701                 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
9702                 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
9703                 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
9704         }
9705         i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
9706
9707         i40e_fdir_sb_setup(pf);
9708
9709         /* Setup static PF queue filter control settings */
9710         ret = i40e_setup_pf_filter_control(pf);
9711         if (ret) {
9712                 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n",
9713                          ret);
9714                 /* Failure here should not stop continuing other steps */
9715         }
9716
9717         /* enable RSS in the HW, even for only one queue, as the stack can use
9718          * the hash
9719          */
9720         if ((pf->flags & I40E_FLAG_RSS_ENABLED))
9721                 i40e_config_rss(pf);
9722
9723         /* fill in link information and enable LSE reporting */
9724         i40e_update_link_info(&pf->hw);
9725         i40e_link_event(pf);
9726
9727         /* Initialize user-specific link properties */
9728         pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
9729                                   I40E_AQ_AN_COMPLETED) ? true : false);
9730
9731         i40e_ptp_init(pf);
9732
9733         return ret;
9734 }
9735
9736 /**
9737  * i40e_determine_queue_usage - Work out queue distribution
9738  * @pf: board private structure
9739  **/
9740 static void i40e_determine_queue_usage(struct i40e_pf *pf)
9741 {
9742         int queues_left;
9743
9744         pf->num_lan_qps = 0;
9745 #ifdef I40E_FCOE
9746         pf->num_fcoe_qps = 0;
9747 #endif
9748
9749         /* Find the max queues to be put into basic use.  We'll always be
9750          * using TC0, whether or not DCB is running, and TC0 will get the
9751          * big RSS set.
9752          */
9753         queues_left = pf->hw.func_caps.num_tx_qp;
9754
9755         if ((queues_left == 1) ||
9756             !(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
9757                 /* one qp for PF, no queues for anything else */
9758                 queues_left = 0;
9759                 pf->rss_size = pf->num_lan_qps = 1;
9760
9761                 /* make sure all the fancies are disabled */
9762                 pf->flags &= ~(I40E_FLAG_RSS_ENABLED    |
9763 #ifdef I40E_FCOE
9764                                I40E_FLAG_FCOE_ENABLED   |
9765 #endif
9766                                I40E_FLAG_FD_SB_ENABLED  |
9767                                I40E_FLAG_FD_ATR_ENABLED |
9768                                I40E_FLAG_DCB_CAPABLE    |
9769                                I40E_FLAG_SRIOV_ENABLED  |
9770                                I40E_FLAG_VMDQ_ENABLED);
9771         } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
9772                                   I40E_FLAG_FD_SB_ENABLED |
9773                                   I40E_FLAG_FD_ATR_ENABLED |
9774                                   I40E_FLAG_DCB_CAPABLE))) {
9775                 /* one qp for PF */
9776                 pf->rss_size = pf->num_lan_qps = 1;
9777                 queues_left -= pf->num_lan_qps;
9778
9779                 pf->flags &= ~(I40E_FLAG_RSS_ENABLED    |
9780 #ifdef I40E_FCOE
9781                                I40E_FLAG_FCOE_ENABLED   |
9782 #endif
9783                                I40E_FLAG_FD_SB_ENABLED  |
9784                                I40E_FLAG_FD_ATR_ENABLED |
9785                                I40E_FLAG_DCB_ENABLED    |
9786                                I40E_FLAG_VMDQ_ENABLED);
9787         } else {
9788                 /* Not enough queues for all TCs */
9789                 if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
9790                     (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
9791                         pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
9792                         dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
9793                 }
9794                 pf->num_lan_qps = max_t(int, pf->rss_size_max,
9795                                         num_online_cpus());
9796                 pf->num_lan_qps = min_t(int, pf->num_lan_qps,
9797                                         pf->hw.func_caps.num_tx_qp);
9798
9799                 queues_left -= pf->num_lan_qps;
9800         }
9801
9802 #ifdef I40E_FCOE
9803         if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
9804                 if (I40E_DEFAULT_FCOE <= queues_left) {
9805                         pf->num_fcoe_qps = I40E_DEFAULT_FCOE;
9806                 } else if (I40E_MINIMUM_FCOE <= queues_left) {
9807                         pf->num_fcoe_qps = I40E_MINIMUM_FCOE;
9808                 } else {
9809                         pf->num_fcoe_qps = 0;
9810                         pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
9811                         dev_info(&pf->pdev->dev, "not enough queues for FCoE. FCoE feature will be disabled\n");
9812                 }
9813
9814                 queues_left -= pf->num_fcoe_qps;
9815         }
9816
9817 #endif
9818         if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
9819                 if (queues_left > 1) {
9820                         queues_left -= 1; /* save 1 queue for FD */
9821                 } else {
9822                         pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
9823                         dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");
9824                 }
9825         }
9826
9827         if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
9828             pf->num_vf_qps && pf->num_req_vfs && queues_left) {
9829                 pf->num_req_vfs = min_t(int, pf->num_req_vfs,
9830                                         (queues_left / pf->num_vf_qps));
9831                 queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
9832         }
9833
9834         if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
9835             pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
9836                 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
9837                                           (queues_left / pf->num_vmdq_qps));
9838                 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
9839         }
9840
9841         pf->queues_left = queues_left;
9842         dev_dbg(&pf->pdev->dev,
9843                 "qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n",
9844                 pf->hw.func_caps.num_tx_qp,
9845                 !!(pf->flags & I40E_FLAG_FD_SB_ENABLED),
9846                 pf->num_lan_qps, pf->rss_size, pf->num_req_vfs, pf->num_vf_qps,
9847                 pf->num_vmdq_vsis, pf->num_vmdq_qps, queues_left);
9848 #ifdef I40E_FCOE
9849         dev_dbg(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps);
9850 #endif
9851 }
9852
9853 /**
9854  * i40e_setup_pf_filter_control - Setup PF static filter control
9855  * @pf: PF to be setup
9856  *
9857  * i40e_setup_pf_filter_control sets up a PF's initial filter control
9858  * settings. If PE/FCoE are enabled then it will also set the per PF
9859  * based filter sizes required for them. It also enables Flow director,
9860  * ethertype and macvlan type filter settings for the pf.
9861  *
9862  * Returns 0 on success, negative on failure
9863  **/
9864 static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
9865 {
9866         struct i40e_filter_control_settings *settings = &pf->filter_settings;
9867
9868         settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
9869
9870         /* Flow Director is enabled */
9871         if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))
9872                 settings->enable_fdir = true;
9873
9874         /* Ethtype and MACVLAN filters enabled for PF */
9875         settings->enable_ethtype = true;
9876         settings->enable_macvlan = true;
9877
9878         if (i40e_set_filter_control(&pf->hw, settings))
9879                 return -ENOENT;
9880
9881         return 0;
9882 }
9883
9884 #define INFO_STRING_LEN 255
9885 static void i40e_print_features(struct i40e_pf *pf)
9886 {
9887         struct i40e_hw *hw = &pf->hw;
9888         char *buf, *string;
9889
9890         string = kzalloc(INFO_STRING_LEN, GFP_KERNEL);
9891         if (!string) {
9892                 dev_err(&pf->pdev->dev, "Features string allocation failed\n");
9893                 return;
9894         }
9895
9896         buf = string;
9897
9898         buf += sprintf(string, "Features: PF-id[%d] ", hw->pf_id);
9899 #ifdef CONFIG_PCI_IOV
9900         buf += sprintf(buf, "VFs: %d ", pf->num_req_vfs);
9901 #endif
9902         buf += sprintf(buf, "VSIs: %d QP: %d RX: %s ",
9903                        pf->hw.func_caps.num_vsis,
9904                        pf->vsi[pf->lan_vsi]->num_queue_pairs,
9905                        pf->flags & I40E_FLAG_RX_PS_ENABLED ? "PS" : "1BUF");
9906
9907         if (pf->flags & I40E_FLAG_RSS_ENABLED)
9908                 buf += sprintf(buf, "RSS ");
9909         if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
9910                 buf += sprintf(buf, "FD_ATR ");
9911         if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
9912                 buf += sprintf(buf, "FD_SB ");
9913                 buf += sprintf(buf, "NTUPLE ");
9914         }
9915         if (pf->flags & I40E_FLAG_DCB_CAPABLE)
9916                 buf += sprintf(buf, "DCB ");
9917 #if IS_ENABLED(CONFIG_VXLAN)
9918         buf += sprintf(buf, "VxLAN ");
9919 #endif
9920         if (pf->flags & I40E_FLAG_PTP)
9921                 buf += sprintf(buf, "PTP ");
9922 #ifdef I40E_FCOE
9923         if (pf->flags & I40E_FLAG_FCOE_ENABLED)
9924                 buf += sprintf(buf, "FCOE ");
9925 #endif
9926
9927         BUG_ON(buf > (string + INFO_STRING_LEN));
9928         dev_info(&pf->pdev->dev, "%s\n", string);
9929         kfree(string);
9930 }
9931
9932 /**
9933  * i40e_probe - Device initialization routine
9934  * @pdev: PCI device information struct
9935  * @ent: entry in i40e_pci_tbl
9936  *
9937  * i40e_probe initializes a PF identified by a pci_dev structure.
9938  * The OS initialization, configuring of the PF private structure,
9939  * and a hardware reset occur.
9940  *
9941  * Returns 0 on success, negative on failure
9942  **/
9943 static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
9944 {
9945         struct i40e_aq_get_phy_abilities_resp abilities;
9946         struct i40e_pf *pf;
9947         struct i40e_hw *hw;
9948         static u16 pfs_found;
9949         u16 wol_nvm_bits;
9950         u16 link_status;
9951         int err = 0;
9952         u32 len;
9953         u32 i;
9954
9955         err = pci_enable_device_mem(pdev);
9956         if (err)
9957                 return err;
9958
9959         /* set up for high or low dma */
9960         err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9961         if (err) {
9962                 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9963                 if (err) {
9964                         dev_err(&pdev->dev,
9965                                 "DMA configuration failed: 0x%x\n", err);
9966                         goto err_dma;
9967                 }
9968         }
9969
9970         /* set up pci connections */
9971         err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
9972                                            IORESOURCE_MEM), i40e_driver_name);
9973         if (err) {
9974                 dev_info(&pdev->dev,
9975                          "pci_request_selected_regions failed %d\n", err);
9976                 goto err_pci_reg;
9977         }
9978
9979         pci_enable_pcie_error_reporting(pdev);
9980         pci_set_master(pdev);
9981
9982         /* Now that we have a PCI connection, we need to do the
9983          * low level device setup.  This is primarily setting up
9984          * the Admin Queue structures and then querying for the
9985          * device's current profile information.
9986          */
9987         pf = kzalloc(sizeof(*pf), GFP_KERNEL);
9988         if (!pf) {
9989                 err = -ENOMEM;
9990                 goto err_pf_alloc;
9991         }
9992         pf->next_vsi = 0;
9993         pf->pdev = pdev;
9994         set_bit(__I40E_DOWN, &pf->state);
9995
9996         hw = &pf->hw;
9997         hw->back = pf;
9998
9999         pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0),
10000                                 I40E_MAX_CSR_SPACE);
10001
10002         hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len);
10003         if (!hw->hw_addr) {
10004                 err = -EIO;
10005                 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
10006                          (unsigned int)pci_resource_start(pdev, 0),
10007                          pf->ioremap_len, err);
10008                 goto err_ioremap;
10009         }
10010         hw->vendor_id = pdev->vendor;
10011         hw->device_id = pdev->device;
10012         pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
10013         hw->subsystem_vendor_id = pdev->subsystem_vendor;
10014         hw->subsystem_device_id = pdev->subsystem_device;
10015         hw->bus.device = PCI_SLOT(pdev->devfn);
10016         hw->bus.func = PCI_FUNC(pdev->devfn);
10017         pf->instance = pfs_found;
10018
10019         if (debug != -1) {
10020                 pf->msg_enable = pf->hw.debug_mask;
10021                 pf->msg_enable = debug;
10022         }
10023
10024         /* do a special CORER for clearing PXE mode once at init */
10025         if (hw->revision_id == 0 &&
10026             (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) {
10027                 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
10028                 i40e_flush(hw);
10029                 msleep(200);
10030                 pf->corer_count++;
10031
10032                 i40e_clear_pxe_mode(hw);
10033         }
10034
10035         /* Reset here to make sure all is clean and to define PF 'n' */
10036         i40e_clear_hw(hw);
10037         err = i40e_pf_reset(hw);
10038         if (err) {
10039                 dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
10040                 goto err_pf_reset;
10041         }
10042         pf->pfr_count++;
10043
10044         hw->aq.num_arq_entries = I40E_AQ_LEN;
10045         hw->aq.num_asq_entries = I40E_AQ_LEN;
10046         hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
10047         hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
10048         pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
10049
10050         snprintf(pf->int_name, sizeof(pf->int_name) - 1,
10051                  "%s-%s:misc",
10052                  dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev));
10053
10054         err = i40e_init_shared_code(hw);
10055         if (err) {
10056                 dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
10057                          err);
10058                 goto err_pf_reset;
10059         }
10060
10061         /* set up a default setting for link flow control */
10062         pf->hw.fc.requested_mode = I40E_FC_NONE;
10063
10064         err = i40e_init_adminq(hw);
10065         dev_info(&pdev->dev, "%s\n", i40e_fw_version_str(hw));
10066
10067         /* provide additional fw info, like api and ver */
10068         dev_info(&pdev->dev, "fw_version:%d.%d.%05d\n",
10069                  hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build);
10070         dev_info(&pdev->dev, "fw api version:%d.%d\n",
10071                  hw->aq.api_maj_ver, hw->aq.api_min_ver);
10072
10073         if (err) {
10074                 dev_info(&pdev->dev,
10075                          "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
10076                 goto err_pf_reset;
10077         }
10078
10079         if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
10080             hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
10081                 dev_info(&pdev->dev,
10082                          "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
10083         else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
10084                  hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1))
10085                 dev_info(&pdev->dev,
10086                          "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
10087
10088         i40e_verify_eeprom(pf);
10089
10090         /* Rev 0 hardware was never productized */
10091         if (hw->revision_id < 1)
10092                 dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
10093
10094         i40e_clear_pxe_mode(hw);
10095         err = i40e_get_capabilities(pf);
10096         if (err)
10097                 goto err_adminq_setup;
10098
10099         err = i40e_sw_init(pf);
10100         if (err) {
10101                 dev_info(&pdev->dev, "sw_init failed: %d\n", err);
10102                 goto err_sw_init;
10103         }
10104
10105         err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
10106                                 hw->func_caps.num_rx_qp,
10107                                 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
10108         if (err) {
10109                 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
10110                 goto err_init_lan_hmc;
10111         }
10112
10113         err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
10114         if (err) {
10115                 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err);
10116                 err = -ENOENT;
10117                 goto err_configure_lan_hmc;
10118         }
10119
10120         /* Disable LLDP for NICs that have firmware versions lower than v4.3.
10121          * Ignore error return codes because if it was already disabled via
10122          * hardware settings this will fail
10123          */
10124         if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
10125             (pf->hw.aq.fw_maj_ver < 4)) {
10126                 dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n");
10127                 i40e_aq_stop_lldp(hw, true, NULL);
10128         }
10129
10130         i40e_get_mac_addr(hw, hw->mac.addr);
10131         if (!is_valid_ether_addr(hw->mac.addr)) {
10132                 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
10133                 err = -EIO;
10134                 goto err_mac_addr;
10135         }
10136         dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
10137         ether_addr_copy(hw->mac.perm_addr, hw->mac.addr);
10138         i40e_get_port_mac_addr(hw, hw->mac.port_addr);
10139         if (is_valid_ether_addr(hw->mac.port_addr))
10140                 pf->flags |= I40E_FLAG_PORT_ID_VALID;
10141 #ifdef I40E_FCOE
10142         err = i40e_get_san_mac_addr(hw, hw->mac.san_addr);
10143         if (err)
10144                 dev_info(&pdev->dev,
10145                          "(non-fatal) SAN MAC retrieval failed: %d\n", err);
10146         if (!is_valid_ether_addr(hw->mac.san_addr)) {
10147                 dev_warn(&pdev->dev, "invalid SAN MAC address %pM, falling back to LAN MAC\n",
10148                          hw->mac.san_addr);
10149                 ether_addr_copy(hw->mac.san_addr, hw->mac.addr);
10150         }
10151         dev_info(&pf->pdev->dev, "SAN MAC: %pM\n", hw->mac.san_addr);
10152 #endif /* I40E_FCOE */
10153
10154         pci_set_drvdata(pdev, pf);
10155         pci_save_state(pdev);
10156 #ifdef CONFIG_I40E_DCB
10157         err = i40e_init_pf_dcb(pf);
10158         if (err) {
10159                 dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
10160                 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
10161                 /* Continue without DCB enabled */
10162         }
10163 #endif /* CONFIG_I40E_DCB */
10164
10165         /* set up periodic task facility */
10166         setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf);
10167         pf->service_timer_period = HZ;
10168
10169         INIT_WORK(&pf->service_task, i40e_service_task);
10170         clear_bit(__I40E_SERVICE_SCHED, &pf->state);
10171         pf->flags |= I40E_FLAG_NEED_LINK_UPDATE;
10172
10173         /* NVM bit on means WoL disabled for the port */
10174         i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
10175         if ((1 << hw->port) & wol_nvm_bits || hw->partition_id != 1)
10176                 pf->wol_en = false;
10177         else
10178                 pf->wol_en = true;
10179         device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
10180
10181         /* set up the main switch operations */
10182         i40e_determine_queue_usage(pf);
10183         err = i40e_init_interrupt_scheme(pf);
10184         if (err)
10185                 goto err_switch_setup;
10186
10187         /* The number of VSIs reported by the FW is the minimum guaranteed
10188          * to us; HW supports far more and we share the remaining pool with
10189          * the other PFs. We allocate space for more than the guarantee with
10190          * the understanding that we might not get them all later.
10191          */
10192         if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
10193                 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
10194         else
10195                 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
10196
10197         /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */
10198         len = sizeof(struct i40e_vsi *) * pf->num_alloc_vsi;
10199         pf->vsi = kzalloc(len, GFP_KERNEL);
10200         if (!pf->vsi) {
10201                 err = -ENOMEM;
10202                 goto err_switch_setup;
10203         }
10204
10205 #ifdef CONFIG_PCI_IOV
10206         /* prep for VF support */
10207         if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
10208             (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
10209             !test_bit(__I40E_BAD_EEPROM, &pf->state)) {
10210                 if (pci_num_vf(pdev))
10211                         pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
10212         }
10213 #endif
10214         err = i40e_setup_pf_switch(pf, false);
10215         if (err) {
10216                 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
10217                 goto err_vsis;
10218         }
10219         /* if FDIR VSI was set up, start it now */
10220         for (i = 0; i < pf->num_alloc_vsi; i++) {
10221                 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
10222                         i40e_vsi_open(pf->vsi[i]);
10223                         break;
10224                 }
10225         }
10226
10227         /* driver is only interested in link up/down and module qualification
10228          * reports from firmware
10229          */
10230         err = i40e_aq_set_phy_int_mask(&pf->hw,
10231                                        I40E_AQ_EVENT_LINK_UPDOWN |
10232                                        I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL);
10233         if (err)
10234                 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
10235                          i40e_stat_str(&pf->hw, err),
10236                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10237
10238         if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
10239             (pf->hw.aq.fw_maj_ver < 4)) {
10240                 msleep(75);
10241                 err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
10242                 if (err)
10243                         dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
10244                                  i40e_stat_str(&pf->hw, err),
10245                                  i40e_aq_str(&pf->hw,
10246                                              pf->hw.aq.asq_last_status));
10247         }
10248         /* The main driver is (mostly) up and happy. We need to set this state
10249          * before setting up the misc vector or we get a race and the vector
10250          * ends up disabled forever.
10251          */
10252         clear_bit(__I40E_DOWN, &pf->state);
10253
10254         /* In case of MSIX we are going to setup the misc vector right here
10255          * to handle admin queue events etc. In case of legacy and MSI
10256          * the misc functionality and queue processing is combined in
10257          * the same vector and that gets setup at open.
10258          */
10259         if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
10260                 err = i40e_setup_misc_vector(pf);
10261                 if (err) {
10262                         dev_info(&pdev->dev,
10263                                  "setup of misc vector failed: %d\n", err);
10264                         goto err_vsis;
10265                 }
10266         }
10267
10268 #ifdef CONFIG_PCI_IOV
10269         /* prep for VF support */
10270         if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
10271             (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
10272             !test_bit(__I40E_BAD_EEPROM, &pf->state)) {
10273                 u32 val;
10274
10275                 /* disable link interrupts for VFs */
10276                 val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
10277                 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
10278                 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
10279                 i40e_flush(hw);
10280
10281                 if (pci_num_vf(pdev)) {
10282                         dev_info(&pdev->dev,
10283                                  "Active VFs found, allocating resources.\n");
10284                         err = i40e_alloc_vfs(pf, pci_num_vf(pdev));
10285                         if (err)
10286                                 dev_info(&pdev->dev,
10287                                          "Error %d allocating resources for existing VFs\n",
10288                                          err);
10289                 }
10290         }
10291 #endif /* CONFIG_PCI_IOV */
10292
10293         pfs_found++;
10294
10295         i40e_dbg_pf_init(pf);
10296
10297         /* tell the firmware that we're starting */
10298         i40e_send_version(pf);
10299
10300         /* since everything's happy, start the service_task timer */
10301         mod_timer(&pf->service_timer,
10302                   round_jiffies(jiffies + pf->service_timer_period));
10303
10304 #ifdef I40E_FCOE
10305         /* create FCoE interface */
10306         i40e_fcoe_vsi_setup(pf);
10307
10308 #endif
10309 #define PCI_SPEED_SIZE 8
10310 #define PCI_WIDTH_SIZE 8
10311         /* Devices on the IOSF bus do not have this information
10312          * and will report PCI Gen 1 x 1 by default so don't bother
10313          * checking them.
10314          */
10315         if (!(pf->flags & I40E_FLAG_NO_PCI_LINK_CHECK)) {
10316                 char speed[PCI_SPEED_SIZE] = "Unknown";
10317                 char width[PCI_WIDTH_SIZE] = "Unknown";
10318
10319                 /* Get the negotiated link width and speed from PCI config
10320                  * space
10321                  */
10322                 pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA,
10323                                           &link_status);
10324
10325                 i40e_set_pci_config_data(hw, link_status);
10326
10327                 switch (hw->bus.speed) {
10328                 case i40e_bus_speed_8000:
10329                         strncpy(speed, "8.0", PCI_SPEED_SIZE); break;
10330                 case i40e_bus_speed_5000:
10331                         strncpy(speed, "5.0", PCI_SPEED_SIZE); break;
10332                 case i40e_bus_speed_2500:
10333                         strncpy(speed, "2.5", PCI_SPEED_SIZE); break;
10334                 default:
10335                         break;
10336                 }
10337                 switch (hw->bus.width) {
10338                 case i40e_bus_width_pcie_x8:
10339                         strncpy(width, "8", PCI_WIDTH_SIZE); break;
10340                 case i40e_bus_width_pcie_x4:
10341                         strncpy(width, "4", PCI_WIDTH_SIZE); break;
10342                 case i40e_bus_width_pcie_x2:
10343                         strncpy(width, "2", PCI_WIDTH_SIZE); break;
10344                 case i40e_bus_width_pcie_x1:
10345                         strncpy(width, "1", PCI_WIDTH_SIZE); break;
10346                 default:
10347                         break;
10348                 }
10349
10350                 dev_info(&pdev->dev, "PCI-Express: Speed %sGT/s Width x%s\n",
10351                          speed, width);
10352
10353                 if (hw->bus.width < i40e_bus_width_pcie_x8 ||
10354                     hw->bus.speed < i40e_bus_speed_8000) {
10355                         dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
10356                         dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
10357                 }
10358         }
10359
10360         /* get the requested speeds from the fw */
10361         err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
10362         if (err)
10363                 dev_dbg(&pf->pdev->dev, "get requested speeds ret =  %s last_status =  %s\n",
10364                         i40e_stat_str(&pf->hw, err),
10365                         i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10366         pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
10367
10368         /* get the supported phy types from the fw */
10369         err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
10370         if (err)
10371                 dev_dbg(&pf->pdev->dev, "get supported phy types ret =  %s last_status =  %s\n",
10372                         i40e_stat_str(&pf->hw, err),
10373                         i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10374         pf->hw.phy.phy_types = le32_to_cpu(abilities.phy_type);
10375
10376         /* print a string summarizing features */
10377         i40e_print_features(pf);
10378
10379         return 0;
10380
10381         /* Unwind what we've done if something failed in the setup */
10382 err_vsis:
10383         set_bit(__I40E_DOWN, &pf->state);
10384         i40e_clear_interrupt_scheme(pf);
10385         kfree(pf->vsi);
10386 err_switch_setup:
10387         i40e_reset_interrupt_capability(pf);
10388         del_timer_sync(&pf->service_timer);
10389 err_mac_addr:
10390 err_configure_lan_hmc:
10391         (void)i40e_shutdown_lan_hmc(hw);
10392 err_init_lan_hmc:
10393         kfree(pf->qp_pile);
10394 err_sw_init:
10395 err_adminq_setup:
10396         (void)i40e_shutdown_adminq(hw);
10397 err_pf_reset:
10398         iounmap(hw->hw_addr);
10399 err_ioremap:
10400         kfree(pf);
10401 err_pf_alloc:
10402         pci_disable_pcie_error_reporting(pdev);
10403         pci_release_selected_regions(pdev,
10404                                      pci_select_bars(pdev, IORESOURCE_MEM));
10405 err_pci_reg:
10406 err_dma:
10407         pci_disable_device(pdev);
10408         return err;
10409 }
10410
10411 /**
10412  * i40e_remove - Device removal routine
10413  * @pdev: PCI device information struct
10414  *
10415  * i40e_remove is called by the PCI subsystem to alert the driver
10416  * that is should release a PCI device.  This could be caused by a
10417  * Hot-Plug event, or because the driver is going to be removed from
10418  * memory.
10419  **/
10420 static void i40e_remove(struct pci_dev *pdev)
10421 {
10422         struct i40e_pf *pf = pci_get_drvdata(pdev);
10423         i40e_status ret_code;
10424         int i;
10425
10426         i40e_dbg_pf_exit(pf);
10427
10428         i40e_ptp_stop(pf);
10429
10430         /* no more scheduling of any task */
10431         set_bit(__I40E_DOWN, &pf->state);
10432         del_timer_sync(&pf->service_timer);
10433         cancel_work_sync(&pf->service_task);
10434         i40e_fdir_teardown(pf);
10435
10436         if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
10437                 i40e_free_vfs(pf);
10438                 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
10439         }
10440
10441         i40e_fdir_teardown(pf);
10442
10443         /* If there is a switch structure or any orphans, remove them.
10444          * This will leave only the PF's VSI remaining.
10445          */
10446         for (i = 0; i < I40E_MAX_VEB; i++) {
10447                 if (!pf->veb[i])
10448                         continue;
10449
10450                 if (pf->veb[i]->uplink_seid == pf->mac_seid ||
10451                     pf->veb[i]->uplink_seid == 0)
10452                         i40e_switch_branch_release(pf->veb[i]);
10453         }
10454
10455         /* Now we can shutdown the PF's VSI, just before we kill
10456          * adminq and hmc.
10457          */
10458         if (pf->vsi[pf->lan_vsi])
10459                 i40e_vsi_release(pf->vsi[pf->lan_vsi]);
10460
10461         /* shutdown and destroy the HMC */
10462         if (pf->hw.hmc.hmc_obj) {
10463                 ret_code = i40e_shutdown_lan_hmc(&pf->hw);
10464                 if (ret_code)
10465                         dev_warn(&pdev->dev,
10466                                  "Failed to destroy the HMC resources: %d\n",
10467                                  ret_code);
10468         }
10469
10470         /* shutdown the adminq */
10471         ret_code = i40e_shutdown_adminq(&pf->hw);
10472         if (ret_code)
10473                 dev_warn(&pdev->dev,
10474                          "Failed to destroy the Admin Queue resources: %d\n",
10475                          ret_code);
10476
10477         /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
10478         i40e_clear_interrupt_scheme(pf);
10479         for (i = 0; i < pf->num_alloc_vsi; i++) {
10480                 if (pf->vsi[i]) {
10481                         i40e_vsi_clear_rings(pf->vsi[i]);
10482                         i40e_vsi_clear(pf->vsi[i]);
10483                         pf->vsi[i] = NULL;
10484                 }
10485         }
10486
10487         for (i = 0; i < I40E_MAX_VEB; i++) {
10488                 kfree(pf->veb[i]);
10489                 pf->veb[i] = NULL;
10490         }
10491
10492         kfree(pf->qp_pile);
10493         kfree(pf->vsi);
10494
10495         iounmap(pf->hw.hw_addr);
10496         kfree(pf);
10497         pci_release_selected_regions(pdev,
10498                                      pci_select_bars(pdev, IORESOURCE_MEM));
10499
10500         pci_disable_pcie_error_reporting(pdev);
10501         pci_disable_device(pdev);
10502 }
10503
10504 /**
10505  * i40e_pci_error_detected - warning that something funky happened in PCI land
10506  * @pdev: PCI device information struct
10507  *
10508  * Called to warn that something happened and the error handling steps
10509  * are in progress.  Allows the driver to quiesce things, be ready for
10510  * remediation.
10511  **/
10512 static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
10513                                                 enum pci_channel_state error)
10514 {
10515         struct i40e_pf *pf = pci_get_drvdata(pdev);
10516
10517         dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
10518
10519         /* shutdown all operations */
10520         if (!test_bit(__I40E_SUSPENDED, &pf->state)) {
10521                 rtnl_lock();
10522                 i40e_prep_for_reset(pf);
10523                 rtnl_unlock();
10524         }
10525
10526         /* Request a slot reset */
10527         return PCI_ERS_RESULT_NEED_RESET;
10528 }
10529
10530 /**
10531  * i40e_pci_error_slot_reset - a PCI slot reset just happened
10532  * @pdev: PCI device information struct
10533  *
10534  * Called to find if the driver can work with the device now that
10535  * the pci slot has been reset.  If a basic connection seems good
10536  * (registers are readable and have sane content) then return a
10537  * happy little PCI_ERS_RESULT_xxx.
10538  **/
10539 static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
10540 {
10541         struct i40e_pf *pf = pci_get_drvdata(pdev);
10542         pci_ers_result_t result;
10543         int err;
10544         u32 reg;
10545
10546         dev_dbg(&pdev->dev, "%s\n", __func__);
10547         if (pci_enable_device_mem(pdev)) {
10548                 dev_info(&pdev->dev,
10549                          "Cannot re-enable PCI device after reset.\n");
10550                 result = PCI_ERS_RESULT_DISCONNECT;
10551         } else {
10552                 pci_set_master(pdev);
10553                 pci_restore_state(pdev);
10554                 pci_save_state(pdev);
10555                 pci_wake_from_d3(pdev, false);
10556
10557                 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
10558                 if (reg == 0)
10559                         result = PCI_ERS_RESULT_RECOVERED;
10560                 else
10561                         result = PCI_ERS_RESULT_DISCONNECT;
10562         }
10563
10564         err = pci_cleanup_aer_uncorrect_error_status(pdev);
10565         if (err) {
10566                 dev_info(&pdev->dev,
10567                          "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
10568                          err);
10569                 /* non-fatal, continue */
10570         }
10571
10572         return result;
10573 }
10574
10575 /**
10576  * i40e_pci_error_resume - restart operations after PCI error recovery
10577  * @pdev: PCI device information struct
10578  *
10579  * Called to allow the driver to bring things back up after PCI error
10580  * and/or reset recovery has finished.
10581  **/
10582 static void i40e_pci_error_resume(struct pci_dev *pdev)
10583 {
10584         struct i40e_pf *pf = pci_get_drvdata(pdev);
10585
10586         dev_dbg(&pdev->dev, "%s\n", __func__);
10587         if (test_bit(__I40E_SUSPENDED, &pf->state))
10588                 return;
10589
10590         rtnl_lock();
10591         i40e_handle_reset_warning(pf);
10592         rtnl_unlock();
10593 }
10594
10595 /**
10596  * i40e_shutdown - PCI callback for shutting down
10597  * @pdev: PCI device information struct
10598  **/
10599 static void i40e_shutdown(struct pci_dev *pdev)
10600 {
10601         struct i40e_pf *pf = pci_get_drvdata(pdev);
10602         struct i40e_hw *hw = &pf->hw;
10603
10604         set_bit(__I40E_SUSPENDED, &pf->state);
10605         set_bit(__I40E_DOWN, &pf->state);
10606         rtnl_lock();
10607         i40e_prep_for_reset(pf);
10608         rtnl_unlock();
10609
10610         wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
10611         wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
10612
10613         del_timer_sync(&pf->service_timer);
10614         cancel_work_sync(&pf->service_task);
10615         i40e_fdir_teardown(pf);
10616
10617         rtnl_lock();
10618         i40e_prep_for_reset(pf);
10619         rtnl_unlock();
10620
10621         wr32(hw, I40E_PFPM_APM,
10622              (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
10623         wr32(hw, I40E_PFPM_WUFC,
10624              (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
10625
10626         i40e_clear_interrupt_scheme(pf);
10627
10628         if (system_state == SYSTEM_POWER_OFF) {
10629                 pci_wake_from_d3(pdev, pf->wol_en);
10630                 pci_set_power_state(pdev, PCI_D3hot);
10631         }
10632 }
10633
10634 #ifdef CONFIG_PM
10635 /**
10636  * i40e_suspend - PCI callback for moving to D3
10637  * @pdev: PCI device information struct
10638  **/
10639 static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
10640 {
10641         struct i40e_pf *pf = pci_get_drvdata(pdev);
10642         struct i40e_hw *hw = &pf->hw;
10643
10644         set_bit(__I40E_SUSPENDED, &pf->state);
10645         set_bit(__I40E_DOWN, &pf->state);
10646
10647         rtnl_lock();
10648         i40e_prep_for_reset(pf);
10649         rtnl_unlock();
10650
10651         wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
10652         wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
10653
10654         pci_wake_from_d3(pdev, pf->wol_en);
10655         pci_set_power_state(pdev, PCI_D3hot);
10656
10657         return 0;
10658 }
10659
10660 /**
10661  * i40e_resume - PCI callback for waking up from D3
10662  * @pdev: PCI device information struct
10663  **/
10664 static int i40e_resume(struct pci_dev *pdev)
10665 {
10666         struct i40e_pf *pf = pci_get_drvdata(pdev);
10667         u32 err;
10668
10669         pci_set_power_state(pdev, PCI_D0);
10670         pci_restore_state(pdev);
10671         /* pci_restore_state() clears dev->state_saves, so
10672          * call pci_save_state() again to restore it.
10673          */
10674         pci_save_state(pdev);
10675
10676         err = pci_enable_device_mem(pdev);
10677         if (err) {
10678                 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
10679                 return err;
10680         }
10681         pci_set_master(pdev);
10682
10683         /* no wakeup events while running */
10684         pci_wake_from_d3(pdev, false);
10685
10686         /* handling the reset will rebuild the device state */
10687         if (test_and_clear_bit(__I40E_SUSPENDED, &pf->state)) {
10688                 clear_bit(__I40E_DOWN, &pf->state);
10689                 rtnl_lock();
10690                 i40e_reset_and_rebuild(pf, false);
10691                 rtnl_unlock();
10692         }
10693
10694         return 0;
10695 }
10696
10697 #endif
10698 static const struct pci_error_handlers i40e_err_handler = {
10699         .error_detected = i40e_pci_error_detected,
10700         .slot_reset = i40e_pci_error_slot_reset,
10701         .resume = i40e_pci_error_resume,
10702 };
10703
10704 static struct pci_driver i40e_driver = {
10705         .name     = i40e_driver_name,
10706         .id_table = i40e_pci_tbl,
10707         .probe    = i40e_probe,
10708         .remove   = i40e_remove,
10709 #ifdef CONFIG_PM
10710         .suspend  = i40e_suspend,
10711         .resume   = i40e_resume,
10712 #endif
10713         .shutdown = i40e_shutdown,
10714         .err_handler = &i40e_err_handler,
10715         .sriov_configure = i40e_pci_sriov_configure,
10716 };
10717
10718 /**
10719  * i40e_init_module - Driver registration routine
10720  *
10721  * i40e_init_module is the first routine called when the driver is
10722  * loaded. All it does is register with the PCI subsystem.
10723  **/
10724 static int __init i40e_init_module(void)
10725 {
10726         pr_info("%s: %s - version %s\n", i40e_driver_name,
10727                 i40e_driver_string, i40e_driver_version_str);
10728         pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
10729
10730         i40e_dbg_init();
10731         return pci_register_driver(&i40e_driver);
10732 }
10733 module_init(i40e_init_module);
10734
10735 /**
10736  * i40e_exit_module - Driver exit cleanup routine
10737  *
10738  * i40e_exit_module is called just before the driver is removed
10739  * from memory.
10740  **/
10741 static void __exit i40e_exit_module(void)
10742 {
10743         pci_unregister_driver(&i40e_driver);
10744         i40e_dbg_exit();
10745 }
10746 module_exit(i40e_exit_module);