thermal: make ops constant
[firefly-linux-kernel-4.4.55.git] / drivers / net / sfc / nic.c
1 /****************************************************************************
2  * Driver for Solarflare Solarstorm network controllers and boards
3  * Copyright 2005-2006 Fen Systems Ltd.
4  * Copyright 2006-2009 Solarflare Communications Inc.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License version 2 as published
8  * by the Free Software Foundation, incorporated herein by reference.
9  */
10
11 #include <linux/bitops.h>
12 #include <linux/delay.h>
13 #include <linux/pci.h>
14 #include <linux/module.h>
15 #include <linux/seq_file.h>
16 #include "net_driver.h"
17 #include "bitfield.h"
18 #include "efx.h"
19 #include "nic.h"
20 #include "regs.h"
21 #include "io.h"
22 #include "workarounds.h"
23
24 /**************************************************************************
25  *
26  * Configurable values
27  *
28  **************************************************************************
29  */
30
31 /* This is set to 16 for a good reason.  In summary, if larger than
32  * 16, the descriptor cache holds more than a default socket
33  * buffer's worth of packets (for UDP we can only have at most one
34  * socket buffer's worth outstanding).  This combined with the fact
35  * that we only get 1 TX event per descriptor cache means the NIC
36  * goes idle.
37  */
38 #define TX_DC_ENTRIES 16
39 #define TX_DC_ENTRIES_ORDER 1
40
41 #define RX_DC_ENTRIES 64
42 #define RX_DC_ENTRIES_ORDER 3
43
44 /* RX FIFO XOFF watermark
45  *
46  * When the amount of the RX FIFO increases used increases past this
47  * watermark send XOFF. Only used if RX flow control is enabled (ethtool -A)
48  * This also has an effect on RX/TX arbitration
49  */
50 int efx_nic_rx_xoff_thresh = -1;
51 module_param_named(rx_xoff_thresh_bytes, efx_nic_rx_xoff_thresh, int, 0644);
52 MODULE_PARM_DESC(rx_xoff_thresh_bytes, "RX fifo XOFF threshold");
53
54 /* RX FIFO XON watermark
55  *
56  * When the amount of the RX FIFO used decreases below this
57  * watermark send XON. Only used if TX flow control is enabled (ethtool -A)
58  * This also has an effect on RX/TX arbitration
59  */
60 int efx_nic_rx_xon_thresh = -1;
61 module_param_named(rx_xon_thresh_bytes, efx_nic_rx_xon_thresh, int, 0644);
62 MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
63
64 /* If EFX_MAX_INT_ERRORS internal errors occur within
65  * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
66  * disable it.
67  */
68 #define EFX_INT_ERROR_EXPIRE 3600
69 #define EFX_MAX_INT_ERRORS 5
70
71 /* We poll for events every FLUSH_INTERVAL ms, and check FLUSH_POLL_COUNT times
72  */
73 #define EFX_FLUSH_INTERVAL 10
74 #define EFX_FLUSH_POLL_COUNT 100
75
76 /* Size and alignment of special buffers (4KB) */
77 #define EFX_BUF_SIZE 4096
78
79 /* Depth of RX flush request fifo */
80 #define EFX_RX_FLUSH_COUNT 4
81
82 /* Generated event code for efx_generate_test_event() */
83 #define EFX_CHANNEL_MAGIC_TEST(_channel)        \
84         (0x00010100 + (_channel)->channel)
85
86 /* Generated event code for efx_generate_fill_event() */
87 #define EFX_CHANNEL_MAGIC_FILL(_channel)        \
88         (0x00010200 + (_channel)->channel)
89
90 /**************************************************************************
91  *
92  * Solarstorm hardware access
93  *
94  **************************************************************************/
95
96 static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value,
97                                      unsigned int index)
98 {
99         efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base,
100                         value, index);
101 }
102
103 /* Read the current event from the event queue */
104 static inline efx_qword_t *efx_event(struct efx_channel *channel,
105                                      unsigned int index)
106 {
107         return ((efx_qword_t *) (channel->eventq.addr)) + index;
108 }
109
110 /* See if an event is present
111  *
112  * We check both the high and low dword of the event for all ones.  We
113  * wrote all ones when we cleared the event, and no valid event can
114  * have all ones in either its high or low dwords.  This approach is
115  * robust against reordering.
116  *
117  * Note that using a single 64-bit comparison is incorrect; even
118  * though the CPU read will be atomic, the DMA write may not be.
119  */
120 static inline int efx_event_present(efx_qword_t *event)
121 {
122         return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
123                   EFX_DWORD_IS_ALL_ONES(event->dword[1]));
124 }
125
126 static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b,
127                                      const efx_oword_t *mask)
128 {
129         return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) ||
130                 ((a->u64[1] ^ b->u64[1]) & mask->u64[1]);
131 }
132
133 int efx_nic_test_registers(struct efx_nic *efx,
134                            const struct efx_nic_register_test *regs,
135                            size_t n_regs)
136 {
137         unsigned address = 0, i, j;
138         efx_oword_t mask, imask, original, reg, buf;
139
140         /* Falcon should be in loopback to isolate the XMAC from the PHY */
141         WARN_ON(!LOOPBACK_INTERNAL(efx));
142
143         for (i = 0; i < n_regs; ++i) {
144                 address = regs[i].address;
145                 mask = imask = regs[i].mask;
146                 EFX_INVERT_OWORD(imask);
147
148                 efx_reado(efx, &original, address);
149
150                 /* bit sweep on and off */
151                 for (j = 0; j < 128; j++) {
152                         if (!EFX_EXTRACT_OWORD32(mask, j, j))
153                                 continue;
154
155                         /* Test this testable bit can be set in isolation */
156                         EFX_AND_OWORD(reg, original, mask);
157                         EFX_SET_OWORD32(reg, j, j, 1);
158
159                         efx_writeo(efx, &reg, address);
160                         efx_reado(efx, &buf, address);
161
162                         if (efx_masked_compare_oword(&reg, &buf, &mask))
163                                 goto fail;
164
165                         /* Test this testable bit can be cleared in isolation */
166                         EFX_OR_OWORD(reg, original, mask);
167                         EFX_SET_OWORD32(reg, j, j, 0);
168
169                         efx_writeo(efx, &reg, address);
170                         efx_reado(efx, &buf, address);
171
172                         if (efx_masked_compare_oword(&reg, &buf, &mask))
173                                 goto fail;
174                 }
175
176                 efx_writeo(efx, &original, address);
177         }
178
179         return 0;
180
181 fail:
182         netif_err(efx, hw, efx->net_dev,
183                   "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT
184                   " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg),
185                   EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask));
186         return -EIO;
187 }
188
189 /**************************************************************************
190  *
191  * Special buffer handling
192  * Special buffers are used for event queues and the TX and RX
193  * descriptor rings.
194  *
195  *************************************************************************/
196
197 /*
198  * Initialise a special buffer
199  *
200  * This will define a buffer (previously allocated via
201  * efx_alloc_special_buffer()) in the buffer table, allowing
202  * it to be used for event queues, descriptor rings etc.
203  */
204 static void
205 efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
206 {
207         efx_qword_t buf_desc;
208         int index;
209         dma_addr_t dma_addr;
210         int i;
211
212         EFX_BUG_ON_PARANOID(!buffer->addr);
213
214         /* Write buffer descriptors to NIC */
215         for (i = 0; i < buffer->entries; i++) {
216                 index = buffer->index + i;
217                 dma_addr = buffer->dma_addr + (i * 4096);
218                 netif_dbg(efx, probe, efx->net_dev,
219                           "mapping special buffer %d at %llx\n",
220                           index, (unsigned long long)dma_addr);
221                 EFX_POPULATE_QWORD_3(buf_desc,
222                                      FRF_AZ_BUF_ADR_REGION, 0,
223                                      FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12,
224                                      FRF_AZ_BUF_OWNER_ID_FBUF, 0);
225                 efx_write_buf_tbl(efx, &buf_desc, index);
226         }
227 }
228
229 /* Unmaps a buffer and clears the buffer table entries */
230 static void
231 efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
232 {
233         efx_oword_t buf_tbl_upd;
234         unsigned int start = buffer->index;
235         unsigned int end = (buffer->index + buffer->entries - 1);
236
237         if (!buffer->entries)
238                 return;
239
240         netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n",
241                   buffer->index, buffer->index + buffer->entries - 1);
242
243         EFX_POPULATE_OWORD_4(buf_tbl_upd,
244                              FRF_AZ_BUF_UPD_CMD, 0,
245                              FRF_AZ_BUF_CLR_CMD, 1,
246                              FRF_AZ_BUF_CLR_END_ID, end,
247                              FRF_AZ_BUF_CLR_START_ID, start);
248         efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD);
249 }
250
251 /*
252  * Allocate a new special buffer
253  *
254  * This allocates memory for a new buffer, clears it and allocates a
255  * new buffer ID range.  It does not write into the buffer table.
256  *
257  * This call will allocate 4KB buffers, since 8KB buffers can't be
258  * used for event queues and descriptor rings.
259  */
260 static int efx_alloc_special_buffer(struct efx_nic *efx,
261                                     struct efx_special_buffer *buffer,
262                                     unsigned int len)
263 {
264         len = ALIGN(len, EFX_BUF_SIZE);
265
266         buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len,
267                                           &buffer->dma_addr, GFP_KERNEL);
268         if (!buffer->addr)
269                 return -ENOMEM;
270         buffer->len = len;
271         buffer->entries = len / EFX_BUF_SIZE;
272         BUG_ON(buffer->dma_addr & (EFX_BUF_SIZE - 1));
273
274         /* All zeros is a potentially valid event so memset to 0xff */
275         memset(buffer->addr, 0xff, len);
276
277         /* Select new buffer ID */
278         buffer->index = efx->next_buffer_table;
279         efx->next_buffer_table += buffer->entries;
280
281         netif_dbg(efx, probe, efx->net_dev,
282                   "allocating special buffers %d-%d at %llx+%x "
283                   "(virt %p phys %llx)\n", buffer->index,
284                   buffer->index + buffer->entries - 1,
285                   (u64)buffer->dma_addr, len,
286                   buffer->addr, (u64)virt_to_phys(buffer->addr));
287
288         return 0;
289 }
290
291 static void
292 efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
293 {
294         if (!buffer->addr)
295                 return;
296
297         netif_dbg(efx, hw, efx->net_dev,
298                   "deallocating special buffers %d-%d at %llx+%x "
299                   "(virt %p phys %llx)\n", buffer->index,
300                   buffer->index + buffer->entries - 1,
301                   (u64)buffer->dma_addr, buffer->len,
302                   buffer->addr, (u64)virt_to_phys(buffer->addr));
303
304         dma_free_coherent(&efx->pci_dev->dev, buffer->len, buffer->addr,
305                           buffer->dma_addr);
306         buffer->addr = NULL;
307         buffer->entries = 0;
308 }
309
310 /**************************************************************************
311  *
312  * Generic buffer handling
313  * These buffers are used for interrupt status and MAC stats
314  *
315  **************************************************************************/
316
317 int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
318                          unsigned int len)
319 {
320         buffer->addr = pci_alloc_consistent(efx->pci_dev, len,
321                                             &buffer->dma_addr);
322         if (!buffer->addr)
323                 return -ENOMEM;
324         buffer->len = len;
325         memset(buffer->addr, 0, len);
326         return 0;
327 }
328
329 void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer)
330 {
331         if (buffer->addr) {
332                 pci_free_consistent(efx->pci_dev, buffer->len,
333                                     buffer->addr, buffer->dma_addr);
334                 buffer->addr = NULL;
335         }
336 }
337
338 /**************************************************************************
339  *
340  * TX path
341  *
342  **************************************************************************/
343
344 /* Returns a pointer to the specified transmit descriptor in the TX
345  * descriptor queue belonging to the specified channel.
346  */
347 static inline efx_qword_t *
348 efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
349 {
350         return ((efx_qword_t *) (tx_queue->txd.addr)) + index;
351 }
352
353 /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
354 static inline void efx_notify_tx_desc(struct efx_tx_queue *tx_queue)
355 {
356         unsigned write_ptr;
357         efx_dword_t reg;
358
359         write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
360         EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr);
361         efx_writed_page(tx_queue->efx, &reg,
362                         FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
363 }
364
365
366 /* For each entry inserted into the software descriptor ring, create a
367  * descriptor in the hardware TX descriptor ring (in host memory), and
368  * write a doorbell.
369  */
370 void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
371 {
372
373         struct efx_tx_buffer *buffer;
374         efx_qword_t *txd;
375         unsigned write_ptr;
376
377         BUG_ON(tx_queue->write_count == tx_queue->insert_count);
378
379         do {
380                 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
381                 buffer = &tx_queue->buffer[write_ptr];
382                 txd = efx_tx_desc(tx_queue, write_ptr);
383                 ++tx_queue->write_count;
384
385                 /* Create TX descriptor ring entry */
386                 EFX_POPULATE_QWORD_4(*txd,
387                                      FSF_AZ_TX_KER_CONT, buffer->continuation,
388                                      FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
389                                      FSF_AZ_TX_KER_BUF_REGION, 0,
390                                      FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);
391         } while (tx_queue->write_count != tx_queue->insert_count);
392
393         wmb(); /* Ensure descriptors are written before they are fetched */
394         efx_notify_tx_desc(tx_queue);
395 }
396
397 /* Allocate hardware resources for a TX queue */
398 int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
399 {
400         struct efx_nic *efx = tx_queue->efx;
401         unsigned entries;
402
403         entries = tx_queue->ptr_mask + 1;
404         return efx_alloc_special_buffer(efx, &tx_queue->txd,
405                                         entries * sizeof(efx_qword_t));
406 }
407
408 void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
409 {
410         efx_oword_t tx_desc_ptr;
411         struct efx_nic *efx = tx_queue->efx;
412
413         tx_queue->flushed = FLUSH_NONE;
414
415         /* Pin TX descriptor ring */
416         efx_init_special_buffer(efx, &tx_queue->txd);
417
418         /* Push TX descriptor ring to card */
419         EFX_POPULATE_OWORD_10(tx_desc_ptr,
420                               FRF_AZ_TX_DESCQ_EN, 1,
421                               FRF_AZ_TX_ISCSI_DDIG_EN, 0,
422                               FRF_AZ_TX_ISCSI_HDIG_EN, 0,
423                               FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
424                               FRF_AZ_TX_DESCQ_EVQ_ID,
425                               tx_queue->channel->channel,
426                               FRF_AZ_TX_DESCQ_OWNER_ID, 0,
427                               FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue,
428                               FRF_AZ_TX_DESCQ_SIZE,
429                               __ffs(tx_queue->txd.entries),
430                               FRF_AZ_TX_DESCQ_TYPE, 0,
431                               FRF_BZ_TX_NON_IP_DROP_DIS, 1);
432
433         if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
434                 int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
435                 EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
436                 EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_TCP_CHKSM_DIS,
437                                     !csum);
438         }
439
440         efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
441                          tx_queue->queue);
442
443         if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
444                 efx_oword_t reg;
445
446                 /* Only 128 bits in this register */
447                 BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128);
448
449                 efx_reado(efx, &reg, FR_AA_TX_CHKSM_CFG);
450                 if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
451                         clear_bit_le(tx_queue->queue, (void *)&reg);
452                 else
453                         set_bit_le(tx_queue->queue, (void *)&reg);
454                 efx_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG);
455         }
456 }
457
458 static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue)
459 {
460         struct efx_nic *efx = tx_queue->efx;
461         efx_oword_t tx_flush_descq;
462
463         tx_queue->flushed = FLUSH_PENDING;
464
465         /* Post a flush command */
466         EFX_POPULATE_OWORD_2(tx_flush_descq,
467                              FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
468                              FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue);
469         efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ);
470 }
471
472 void efx_nic_fini_tx(struct efx_tx_queue *tx_queue)
473 {
474         struct efx_nic *efx = tx_queue->efx;
475         efx_oword_t tx_desc_ptr;
476
477         /* The queue should have been flushed */
478         WARN_ON(tx_queue->flushed != FLUSH_DONE);
479
480         /* Remove TX descriptor ring from card */
481         EFX_ZERO_OWORD(tx_desc_ptr);
482         efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
483                          tx_queue->queue);
484
485         /* Unpin TX descriptor ring */
486         efx_fini_special_buffer(efx, &tx_queue->txd);
487 }
488
489 /* Free buffers backing TX queue */
490 void efx_nic_remove_tx(struct efx_tx_queue *tx_queue)
491 {
492         efx_free_special_buffer(tx_queue->efx, &tx_queue->txd);
493 }
494
495 /**************************************************************************
496  *
497  * RX path
498  *
499  **************************************************************************/
500
501 /* Returns a pointer to the specified descriptor in the RX descriptor queue */
502 static inline efx_qword_t *
503 efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
504 {
505         return ((efx_qword_t *) (rx_queue->rxd.addr)) + index;
506 }
507
508 /* This creates an entry in the RX descriptor queue */
509 static inline void
510 efx_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index)
511 {
512         struct efx_rx_buffer *rx_buf;
513         efx_qword_t *rxd;
514
515         rxd = efx_rx_desc(rx_queue, index);
516         rx_buf = efx_rx_buffer(rx_queue, index);
517         EFX_POPULATE_QWORD_3(*rxd,
518                              FSF_AZ_RX_KER_BUF_SIZE,
519                              rx_buf->len -
520                              rx_queue->efx->type->rx_buffer_padding,
521                              FSF_AZ_RX_KER_BUF_REGION, 0,
522                              FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
523 }
524
525 /* This writes to the RX_DESC_WPTR register for the specified receive
526  * descriptor ring.
527  */
528 void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue)
529 {
530         struct efx_nic *efx = rx_queue->efx;
531         efx_dword_t reg;
532         unsigned write_ptr;
533
534         while (rx_queue->notified_count != rx_queue->added_count) {
535                 efx_build_rx_desc(
536                         rx_queue,
537                         rx_queue->notified_count & rx_queue->ptr_mask);
538                 ++rx_queue->notified_count;
539         }
540
541         wmb();
542         write_ptr = rx_queue->added_count & rx_queue->ptr_mask;
543         EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr);
544         efx_writed_page(efx, &reg, FR_AZ_RX_DESC_UPD_DWORD_P0,
545                         efx_rx_queue_index(rx_queue));
546 }
547
548 int efx_nic_probe_rx(struct efx_rx_queue *rx_queue)
549 {
550         struct efx_nic *efx = rx_queue->efx;
551         unsigned entries;
552
553         entries = rx_queue->ptr_mask + 1;
554         return efx_alloc_special_buffer(efx, &rx_queue->rxd,
555                                         entries * sizeof(efx_qword_t));
556 }
557
558 void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
559 {
560         efx_oword_t rx_desc_ptr;
561         struct efx_nic *efx = rx_queue->efx;
562         bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0;
563         bool iscsi_digest_en = is_b0;
564
565         netif_dbg(efx, hw, efx->net_dev,
566                   "RX queue %d ring in special buffers %d-%d\n",
567                   efx_rx_queue_index(rx_queue), rx_queue->rxd.index,
568                   rx_queue->rxd.index + rx_queue->rxd.entries - 1);
569
570         rx_queue->flushed = FLUSH_NONE;
571
572         /* Pin RX descriptor ring */
573         efx_init_special_buffer(efx, &rx_queue->rxd);
574
575         /* Push RX descriptor ring to card */
576         EFX_POPULATE_OWORD_10(rx_desc_ptr,
577                               FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en,
578                               FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en,
579                               FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
580                               FRF_AZ_RX_DESCQ_EVQ_ID,
581                               efx_rx_queue_channel(rx_queue)->channel,
582                               FRF_AZ_RX_DESCQ_OWNER_ID, 0,
583                               FRF_AZ_RX_DESCQ_LABEL,
584                               efx_rx_queue_index(rx_queue),
585                               FRF_AZ_RX_DESCQ_SIZE,
586                               __ffs(rx_queue->rxd.entries),
587                               FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
588                               /* For >=B0 this is scatter so disable */
589                               FRF_AZ_RX_DESCQ_JUMBO, !is_b0,
590                               FRF_AZ_RX_DESCQ_EN, 1);
591         efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
592                          efx_rx_queue_index(rx_queue));
593 }
594
595 static void efx_flush_rx_queue(struct efx_rx_queue *rx_queue)
596 {
597         struct efx_nic *efx = rx_queue->efx;
598         efx_oword_t rx_flush_descq;
599
600         rx_queue->flushed = FLUSH_PENDING;
601
602         /* Post a flush command */
603         EFX_POPULATE_OWORD_2(rx_flush_descq,
604                              FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
605                              FRF_AZ_RX_FLUSH_DESCQ,
606                              efx_rx_queue_index(rx_queue));
607         efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ);
608 }
609
610 void efx_nic_fini_rx(struct efx_rx_queue *rx_queue)
611 {
612         efx_oword_t rx_desc_ptr;
613         struct efx_nic *efx = rx_queue->efx;
614
615         /* The queue should already have been flushed */
616         WARN_ON(rx_queue->flushed != FLUSH_DONE);
617
618         /* Remove RX descriptor ring from card */
619         EFX_ZERO_OWORD(rx_desc_ptr);
620         efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
621                          efx_rx_queue_index(rx_queue));
622
623         /* Unpin RX descriptor ring */
624         efx_fini_special_buffer(efx, &rx_queue->rxd);
625 }
626
627 /* Free buffers backing RX queue */
628 void efx_nic_remove_rx(struct efx_rx_queue *rx_queue)
629 {
630         efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd);
631 }
632
633 /**************************************************************************
634  *
635  * Event queue processing
636  * Event queues are processed by per-channel tasklets.
637  *
638  **************************************************************************/
639
640 /* Update a channel's event queue's read pointer (RPTR) register
641  *
642  * This writes the EVQ_RPTR_REG register for the specified channel's
643  * event queue.
644  */
645 void efx_nic_eventq_read_ack(struct efx_channel *channel)
646 {
647         efx_dword_t reg;
648         struct efx_nic *efx = channel->efx;
649
650         EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, channel->eventq_read_ptr);
651         efx_writed_table(efx, &reg, efx->type->evq_rptr_tbl_base,
652                          channel->channel);
653 }
654
655 /* Use HW to insert a SW defined event */
656 static void efx_generate_event(struct efx_channel *channel, efx_qword_t *event)
657 {
658         efx_oword_t drv_ev_reg;
659
660         BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 ||
661                      FRF_AZ_DRV_EV_DATA_WIDTH != 64);
662         drv_ev_reg.u32[0] = event->u32[0];
663         drv_ev_reg.u32[1] = event->u32[1];
664         drv_ev_reg.u32[2] = 0;
665         drv_ev_reg.u32[3] = 0;
666         EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, channel->channel);
667         efx_writeo(channel->efx, &drv_ev_reg, FR_AZ_DRV_EV);
668 }
669
670 /* Handle a transmit completion event
671  *
672  * The NIC batches TX completion events; the message we receive is of
673  * the form "complete all TX events up to this index".
674  */
675 static int
676 efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
677 {
678         unsigned int tx_ev_desc_ptr;
679         unsigned int tx_ev_q_label;
680         struct efx_tx_queue *tx_queue;
681         struct efx_nic *efx = channel->efx;
682         int tx_packets = 0;
683
684         if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
685                 /* Transmit completion */
686                 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
687                 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
688                 tx_queue = efx_channel_get_tx_queue(
689                         channel, tx_ev_q_label % EFX_TXQ_TYPES);
690                 tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) &
691                               tx_queue->ptr_mask);
692                 channel->irq_mod_score += tx_packets;
693                 efx_xmit_done(tx_queue, tx_ev_desc_ptr);
694         } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
695                 /* Rewrite the FIFO write pointer */
696                 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
697                 tx_queue = efx_channel_get_tx_queue(
698                         channel, tx_ev_q_label % EFX_TXQ_TYPES);
699
700                 if (efx_dev_registered(efx))
701                         netif_tx_lock(efx->net_dev);
702                 efx_notify_tx_desc(tx_queue);
703                 if (efx_dev_registered(efx))
704                         netif_tx_unlock(efx->net_dev);
705         } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) &&
706                    EFX_WORKAROUND_10727(efx)) {
707                 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
708         } else {
709                 netif_err(efx, tx_err, efx->net_dev,
710                           "channel %d unexpected TX event "
711                           EFX_QWORD_FMT"\n", channel->channel,
712                           EFX_QWORD_VAL(*event));
713         }
714
715         return tx_packets;
716 }
717
718 /* Detect errors included in the rx_evt_pkt_ok bit. */
719 static void efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
720                                  const efx_qword_t *event,
721                                  bool *rx_ev_pkt_ok,
722                                  bool *discard)
723 {
724         struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
725         struct efx_nic *efx = rx_queue->efx;
726         bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
727         bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
728         bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
729         bool rx_ev_other_err, rx_ev_pause_frm;
730         bool rx_ev_hdr_type, rx_ev_mcast_pkt;
731         unsigned rx_ev_pkt_type;
732
733         rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
734         rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
735         rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC);
736         rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE);
737         rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event,
738                                                  FSF_AZ_RX_EV_BUF_OWNER_ID_ERR);
739         rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event,
740                                                   FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR);
741         rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event,
742                                                    FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR);
743         rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR);
744         rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC);
745         rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ?
746                           0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB));
747         rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR);
748
749         /* Every error apart from tobe_disc and pause_frm */
750         rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
751                            rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
752                            rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
753
754         /* Count errors that are not in MAC stats.  Ignore expected
755          * checksum errors during self-test. */
756         if (rx_ev_frm_trunc)
757                 ++channel->n_rx_frm_trunc;
758         else if (rx_ev_tobe_disc)
759                 ++channel->n_rx_tobe_disc;
760         else if (!efx->loopback_selftest) {
761                 if (rx_ev_ip_hdr_chksum_err)
762                         ++channel->n_rx_ip_hdr_chksum_err;
763                 else if (rx_ev_tcp_udp_chksum_err)
764                         ++channel->n_rx_tcp_udp_chksum_err;
765         }
766
767         /* The frame must be discarded if any of these are true. */
768         *discard = (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
769                     rx_ev_tobe_disc | rx_ev_pause_frm);
770
771         /* TOBE_DISC is expected on unicast mismatches; don't print out an
772          * error message.  FRM_TRUNC indicates RXDP dropped the packet due
773          * to a FIFO overflow.
774          */
775 #ifdef EFX_ENABLE_DEBUG
776         if (rx_ev_other_err && net_ratelimit()) {
777                 netif_dbg(efx, rx_err, efx->net_dev,
778                           " RX queue %d unexpected RX event "
779                           EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n",
780                           efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event),
781                           rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
782                           rx_ev_ip_hdr_chksum_err ?
783                           " [IP_HDR_CHKSUM_ERR]" : "",
784                           rx_ev_tcp_udp_chksum_err ?
785                           " [TCP_UDP_CHKSUM_ERR]" : "",
786                           rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
787                           rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
788                           rx_ev_drib_nib ? " [DRIB_NIB]" : "",
789                           rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
790                           rx_ev_pause_frm ? " [PAUSE]" : "");
791         }
792 #endif
793 }
794
795 /* Handle receive events that are not in-order. */
796 static void
797 efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
798 {
799         struct efx_nic *efx = rx_queue->efx;
800         unsigned expected, dropped;
801
802         expected = rx_queue->removed_count & rx_queue->ptr_mask;
803         dropped = (index - expected) & rx_queue->ptr_mask;
804         netif_info(efx, rx_err, efx->net_dev,
805                    "dropped %d events (index=%d expected=%d)\n",
806                    dropped, index, expected);
807
808         efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ?
809                            RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
810 }
811
812 /* Handle a packet received event
813  *
814  * The NIC gives a "discard" flag if it's a unicast packet with the
815  * wrong destination address
816  * Also "is multicast" and "matches multicast filter" flags can be used to
817  * discard non-matching multicast packets.
818  */
819 static void
820 efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
821 {
822         unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
823         unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
824         unsigned expected_ptr;
825         bool rx_ev_pkt_ok, discard = false, checksummed;
826         struct efx_rx_queue *rx_queue;
827         struct efx_nic *efx = channel->efx;
828
829         /* Basic packet information */
830         rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
831         rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
832         rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
833         WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT));
834         WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP) != 1);
835         WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
836                 channel->channel);
837
838         rx_queue = efx_channel_get_rx_queue(channel);
839
840         rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
841         expected_ptr = rx_queue->removed_count & rx_queue->ptr_mask;
842         if (unlikely(rx_ev_desc_ptr != expected_ptr))
843                 efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr);
844
845         if (likely(rx_ev_pkt_ok)) {
846                 /* If packet is marked as OK and packet type is TCP/IP or
847                  * UDP/IP, then we can rely on the hardware checksum.
848                  */
849                 checksummed =
850                         likely(efx->rx_checksum_enabled) &&
851                         (rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP ||
852                          rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP);
853         } else {
854                 efx_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok, &discard);
855                 checksummed = false;
856         }
857
858         /* Detect multicast packets that didn't match the filter */
859         rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
860         if (rx_ev_mcast_pkt) {
861                 unsigned int rx_ev_mcast_hash_match =
862                         EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH);
863
864                 if (unlikely(!rx_ev_mcast_hash_match)) {
865                         ++channel->n_rx_mcast_mismatch;
866                         discard = true;
867                 }
868         }
869
870         channel->irq_mod_score += 2;
871
872         /* Handle received packet */
873         efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt,
874                       checksummed, discard);
875 }
876
877 static void
878 efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event)
879 {
880         struct efx_nic *efx = channel->efx;
881         unsigned code;
882
883         code = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
884         if (code == EFX_CHANNEL_MAGIC_TEST(channel))
885                 ++channel->magic_count;
886         else if (code == EFX_CHANNEL_MAGIC_FILL(channel))
887                 /* The queue must be empty, so we won't receive any rx
888                  * events, so efx_process_channel() won't refill the
889                  * queue. Refill it here */
890                 efx_fast_push_rx_descriptors(efx_channel_get_rx_queue(channel));
891         else
892                 netif_dbg(efx, hw, efx->net_dev, "channel %d received "
893                           "generated event "EFX_QWORD_FMT"\n",
894                           channel->channel, EFX_QWORD_VAL(*event));
895 }
896
897 /* Global events are basically PHY events */
898 static void
899 efx_handle_global_event(struct efx_channel *channel, efx_qword_t *event)
900 {
901         struct efx_nic *efx = channel->efx;
902         bool handled = false;
903
904         if (EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_G_PHY0_INTR) ||
905             EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XG_PHY0_INTR) ||
906             EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XFP_PHY0_INTR)) {
907                 /* Ignored */
908                 handled = true;
909         }
910
911         if ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) &&
912             EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_XG_MGT_INTR)) {
913                 efx->xmac_poll_required = true;
914                 handled = true;
915         }
916
917         if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ?
918             EFX_QWORD_FIELD(*event, FSF_AA_GLB_EV_RX_RECOVERY) :
919             EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_RX_RECOVERY)) {
920                 netif_err(efx, rx_err, efx->net_dev,
921                           "channel %d seen global RX_RESET event. Resetting.\n",
922                           channel->channel);
923
924                 atomic_inc(&efx->rx_reset);
925                 efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ?
926                                    RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
927                 handled = true;
928         }
929
930         if (!handled)
931                 netif_err(efx, hw, efx->net_dev,
932                           "channel %d unknown global event "
933                           EFX_QWORD_FMT "\n", channel->channel,
934                           EFX_QWORD_VAL(*event));
935 }
936
937 static void
938 efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
939 {
940         struct efx_nic *efx = channel->efx;
941         unsigned int ev_sub_code;
942         unsigned int ev_sub_data;
943
944         ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE);
945         ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
946
947         switch (ev_sub_code) {
948         case FSE_AZ_TX_DESCQ_FLS_DONE_EV:
949                 netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n",
950                            channel->channel, ev_sub_data);
951                 break;
952         case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
953                 netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n",
954                            channel->channel, ev_sub_data);
955                 break;
956         case FSE_AZ_EVQ_INIT_DONE_EV:
957                 netif_dbg(efx, hw, efx->net_dev,
958                           "channel %d EVQ %d initialised\n",
959                           channel->channel, ev_sub_data);
960                 break;
961         case FSE_AZ_SRM_UPD_DONE_EV:
962                 netif_vdbg(efx, hw, efx->net_dev,
963                            "channel %d SRAM update done\n", channel->channel);
964                 break;
965         case FSE_AZ_WAKE_UP_EV:
966                 netif_vdbg(efx, hw, efx->net_dev,
967                            "channel %d RXQ %d wakeup event\n",
968                            channel->channel, ev_sub_data);
969                 break;
970         case FSE_AZ_TIMER_EV:
971                 netif_vdbg(efx, hw, efx->net_dev,
972                            "channel %d RX queue %d timer expired\n",
973                            channel->channel, ev_sub_data);
974                 break;
975         case FSE_AA_RX_RECOVER_EV:
976                 netif_err(efx, rx_err, efx->net_dev,
977                           "channel %d seen DRIVER RX_RESET event. "
978                         "Resetting.\n", channel->channel);
979                 atomic_inc(&efx->rx_reset);
980                 efx_schedule_reset(efx,
981                                    EFX_WORKAROUND_6555(efx) ?
982                                    RESET_TYPE_RX_RECOVERY :
983                                    RESET_TYPE_DISABLE);
984                 break;
985         case FSE_BZ_RX_DSC_ERROR_EV:
986                 netif_err(efx, rx_err, efx->net_dev,
987                           "RX DMA Q %d reports descriptor fetch error."
988                           " RX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
989                 efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH);
990                 break;
991         case FSE_BZ_TX_DSC_ERROR_EV:
992                 netif_err(efx, tx_err, efx->net_dev,
993                           "TX DMA Q %d reports descriptor fetch error."
994                           " TX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
995                 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
996                 break;
997         default:
998                 netif_vdbg(efx, hw, efx->net_dev,
999                            "channel %d unknown driver event code %d "
1000                            "data %04x\n", channel->channel, ev_sub_code,
1001                            ev_sub_data);
1002                 break;
1003         }
1004 }
1005
1006 int efx_nic_process_eventq(struct efx_channel *channel, int budget)
1007 {
1008         struct efx_nic *efx = channel->efx;
1009         unsigned int read_ptr;
1010         efx_qword_t event, *p_event;
1011         int ev_code;
1012         int tx_packets = 0;
1013         int spent = 0;
1014
1015         read_ptr = channel->eventq_read_ptr;
1016
1017         for (;;) {
1018                 p_event = efx_event(channel, read_ptr);
1019                 event = *p_event;
1020
1021                 if (!efx_event_present(&event))
1022                         /* End of events */
1023                         break;
1024
1025                 netif_vdbg(channel->efx, intr, channel->efx->net_dev,
1026                            "channel %d event is "EFX_QWORD_FMT"\n",
1027                            channel->channel, EFX_QWORD_VAL(event));
1028
1029                 /* Clear this event by marking it all ones */
1030                 EFX_SET_QWORD(*p_event);
1031
1032                 /* Increment read pointer */
1033                 read_ptr = (read_ptr + 1) & channel->eventq_mask;
1034
1035                 ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
1036
1037                 switch (ev_code) {
1038                 case FSE_AZ_EV_CODE_RX_EV:
1039                         efx_handle_rx_event(channel, &event);
1040                         if (++spent == budget)
1041                                 goto out;
1042                         break;
1043                 case FSE_AZ_EV_CODE_TX_EV:
1044                         tx_packets += efx_handle_tx_event(channel, &event);
1045                         if (tx_packets > efx->txq_entries) {
1046                                 spent = budget;
1047                                 goto out;
1048                         }
1049                         break;
1050                 case FSE_AZ_EV_CODE_DRV_GEN_EV:
1051                         efx_handle_generated_event(channel, &event);
1052                         break;
1053                 case FSE_AZ_EV_CODE_GLOBAL_EV:
1054                         efx_handle_global_event(channel, &event);
1055                         break;
1056                 case FSE_AZ_EV_CODE_DRIVER_EV:
1057                         efx_handle_driver_event(channel, &event);
1058                         break;
1059                 case FSE_CZ_EV_CODE_MCDI_EV:
1060                         efx_mcdi_process_event(channel, &event);
1061                         break;
1062                 default:
1063                         netif_err(channel->efx, hw, channel->efx->net_dev,
1064                                   "channel %d unknown event type %d (data "
1065                                   EFX_QWORD_FMT ")\n", channel->channel,
1066                                   ev_code, EFX_QWORD_VAL(event));
1067                 }
1068         }
1069
1070 out:
1071         channel->eventq_read_ptr = read_ptr;
1072         return spent;
1073 }
1074
1075
1076 /* Allocate buffer table entries for event queue */
1077 int efx_nic_probe_eventq(struct efx_channel *channel)
1078 {
1079         struct efx_nic *efx = channel->efx;
1080         unsigned entries;
1081
1082         entries = channel->eventq_mask + 1;
1083         return efx_alloc_special_buffer(efx, &channel->eventq,
1084                                         entries * sizeof(efx_qword_t));
1085 }
1086
1087 void efx_nic_init_eventq(struct efx_channel *channel)
1088 {
1089         efx_oword_t reg;
1090         struct efx_nic *efx = channel->efx;
1091
1092         netif_dbg(efx, hw, efx->net_dev,
1093                   "channel %d event queue in special buffers %d-%d\n",
1094                   channel->channel, channel->eventq.index,
1095                   channel->eventq.index + channel->eventq.entries - 1);
1096
1097         if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
1098                 EFX_POPULATE_OWORD_3(reg,
1099                                      FRF_CZ_TIMER_Q_EN, 1,
1100                                      FRF_CZ_HOST_NOTIFY_MODE, 0,
1101                                      FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
1102                 efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
1103         }
1104
1105         /* Pin event queue buffer */
1106         efx_init_special_buffer(efx, &channel->eventq);
1107
1108         /* Fill event queue with all ones (i.e. empty events) */
1109         memset(channel->eventq.addr, 0xff, channel->eventq.len);
1110
1111         /* Push event queue to card */
1112         EFX_POPULATE_OWORD_3(reg,
1113                              FRF_AZ_EVQ_EN, 1,
1114                              FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries),
1115                              FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index);
1116         efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
1117                          channel->channel);
1118
1119         efx->type->push_irq_moderation(channel);
1120 }
1121
1122 void efx_nic_fini_eventq(struct efx_channel *channel)
1123 {
1124         efx_oword_t reg;
1125         struct efx_nic *efx = channel->efx;
1126
1127         /* Remove event queue from card */
1128         EFX_ZERO_OWORD(reg);
1129         efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
1130                          channel->channel);
1131         if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
1132                 efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
1133
1134         /* Unpin event queue */
1135         efx_fini_special_buffer(efx, &channel->eventq);
1136 }
1137
1138 /* Free buffers backing event queue */
1139 void efx_nic_remove_eventq(struct efx_channel *channel)
1140 {
1141         efx_free_special_buffer(channel->efx, &channel->eventq);
1142 }
1143
1144
1145 void efx_nic_generate_test_event(struct efx_channel *channel)
1146 {
1147         unsigned int magic = EFX_CHANNEL_MAGIC_TEST(channel);
1148         efx_qword_t test_event;
1149
1150         EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE,
1151                              FSE_AZ_EV_CODE_DRV_GEN_EV,
1152                              FSF_AZ_DRV_GEN_EV_MAGIC, magic);
1153         efx_generate_event(channel, &test_event);
1154 }
1155
1156 void efx_nic_generate_fill_event(struct efx_channel *channel)
1157 {
1158         unsigned int magic = EFX_CHANNEL_MAGIC_FILL(channel);
1159         efx_qword_t test_event;
1160
1161         EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE,
1162                              FSE_AZ_EV_CODE_DRV_GEN_EV,
1163                              FSF_AZ_DRV_GEN_EV_MAGIC, magic);
1164         efx_generate_event(channel, &test_event);
1165 }
1166
1167 /**************************************************************************
1168  *
1169  * Flush handling
1170  *
1171  **************************************************************************/
1172
1173
1174 static void efx_poll_flush_events(struct efx_nic *efx)
1175 {
1176         struct efx_channel *channel = efx_get_channel(efx, 0);
1177         struct efx_tx_queue *tx_queue;
1178         struct efx_rx_queue *rx_queue;
1179         unsigned int read_ptr = channel->eventq_read_ptr;
1180         unsigned int end_ptr = (read_ptr - 1) & channel->eventq_mask;
1181
1182         do {
1183                 efx_qword_t *event = efx_event(channel, read_ptr);
1184                 int ev_code, ev_sub_code, ev_queue;
1185                 bool ev_failed;
1186
1187                 if (!efx_event_present(event))
1188                         break;
1189
1190                 ev_code = EFX_QWORD_FIELD(*event, FSF_AZ_EV_CODE);
1191                 ev_sub_code = EFX_QWORD_FIELD(*event,
1192                                               FSF_AZ_DRIVER_EV_SUBCODE);
1193                 if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV &&
1194                     ev_sub_code == FSE_AZ_TX_DESCQ_FLS_DONE_EV) {
1195                         ev_queue = EFX_QWORD_FIELD(*event,
1196                                                    FSF_AZ_DRIVER_EV_SUBDATA);
1197                         if (ev_queue < EFX_TXQ_TYPES * efx->n_tx_channels) {
1198                                 tx_queue = efx_get_tx_queue(
1199                                         efx, ev_queue / EFX_TXQ_TYPES,
1200                                         ev_queue % EFX_TXQ_TYPES);
1201                                 tx_queue->flushed = FLUSH_DONE;
1202                         }
1203                 } else if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV &&
1204                            ev_sub_code == FSE_AZ_RX_DESCQ_FLS_DONE_EV) {
1205                         ev_queue = EFX_QWORD_FIELD(
1206                                 *event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
1207                         ev_failed = EFX_QWORD_FIELD(
1208                                 *event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
1209                         if (ev_queue < efx->n_rx_channels) {
1210                                 rx_queue = efx_get_rx_queue(efx, ev_queue);
1211                                 rx_queue->flushed =
1212                                         ev_failed ? FLUSH_FAILED : FLUSH_DONE;
1213                         }
1214                 }
1215
1216                 /* We're about to destroy the queue anyway, so
1217                  * it's ok to throw away every non-flush event */
1218                 EFX_SET_QWORD(*event);
1219
1220                 read_ptr = (read_ptr + 1) & channel->eventq_mask;
1221         } while (read_ptr != end_ptr);
1222
1223         channel->eventq_read_ptr = read_ptr;
1224 }
1225
1226 /* Handle tx and rx flushes at the same time, since they run in
1227  * parallel in the hardware and there's no reason for us to
1228  * serialise them */
1229 int efx_nic_flush_queues(struct efx_nic *efx)
1230 {
1231         struct efx_channel *channel;
1232         struct efx_rx_queue *rx_queue;
1233         struct efx_tx_queue *tx_queue;
1234         int i, tx_pending, rx_pending;
1235
1236         /* If necessary prepare the hardware for flushing */
1237         efx->type->prepare_flush(efx);
1238
1239         /* Flush all tx queues in parallel */
1240         efx_for_each_channel(channel, efx) {
1241                 efx_for_each_channel_tx_queue(tx_queue, channel)
1242                         efx_flush_tx_queue(tx_queue);
1243         }
1244
1245         /* The hardware supports four concurrent rx flushes, each of which may
1246          * need to be retried if there is an outstanding descriptor fetch */
1247         for (i = 0; i < EFX_FLUSH_POLL_COUNT; ++i) {
1248                 rx_pending = tx_pending = 0;
1249                 efx_for_each_channel(channel, efx) {
1250                         efx_for_each_channel_rx_queue(rx_queue, channel) {
1251                                 if (rx_queue->flushed == FLUSH_PENDING)
1252                                         ++rx_pending;
1253                         }
1254                 }
1255                 efx_for_each_channel(channel, efx) {
1256                         efx_for_each_channel_rx_queue(rx_queue, channel) {
1257                                 if (rx_pending == EFX_RX_FLUSH_COUNT)
1258                                         break;
1259                                 if (rx_queue->flushed == FLUSH_FAILED ||
1260                                     rx_queue->flushed == FLUSH_NONE) {
1261                                         efx_flush_rx_queue(rx_queue);
1262                                         ++rx_pending;
1263                                 }
1264                         }
1265                         efx_for_each_channel_tx_queue(tx_queue, channel) {
1266                                 if (tx_queue->flushed != FLUSH_DONE)
1267                                         ++tx_pending;
1268                         }
1269                 }
1270
1271                 if (rx_pending == 0 && tx_pending == 0)
1272                         return 0;
1273
1274                 msleep(EFX_FLUSH_INTERVAL);
1275                 efx_poll_flush_events(efx);
1276         }
1277
1278         /* Mark the queues as all flushed. We're going to return failure
1279          * leading to a reset, or fake up success anyway */
1280         efx_for_each_channel(channel, efx) {
1281                 efx_for_each_channel_tx_queue(tx_queue, channel) {
1282                         if (tx_queue->flushed != FLUSH_DONE)
1283                                 netif_err(efx, hw, efx->net_dev,
1284                                           "tx queue %d flush command timed out\n",
1285                                           tx_queue->queue);
1286                         tx_queue->flushed = FLUSH_DONE;
1287                 }
1288                 efx_for_each_channel_rx_queue(rx_queue, channel) {
1289                         if (rx_queue->flushed != FLUSH_DONE)
1290                                 netif_err(efx, hw, efx->net_dev,
1291                                           "rx queue %d flush command timed out\n",
1292                                           efx_rx_queue_index(rx_queue));
1293                         rx_queue->flushed = FLUSH_DONE;
1294                 }
1295         }
1296
1297         return -ETIMEDOUT;
1298 }
1299
1300 /**************************************************************************
1301  *
1302  * Hardware interrupts
1303  * The hardware interrupt handler does very little work; all the event
1304  * queue processing is carried out by per-channel tasklets.
1305  *
1306  **************************************************************************/
1307
1308 /* Enable/disable/generate interrupts */
1309 static inline void efx_nic_interrupts(struct efx_nic *efx,
1310                                       bool enabled, bool force)
1311 {
1312         efx_oword_t int_en_reg_ker;
1313
1314         EFX_POPULATE_OWORD_3(int_en_reg_ker,
1315                              FRF_AZ_KER_INT_LEVE_SEL, efx->fatal_irq_level,
1316                              FRF_AZ_KER_INT_KER, force,
1317                              FRF_AZ_DRV_INT_EN_KER, enabled);
1318         efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER);
1319 }
1320
1321 void efx_nic_enable_interrupts(struct efx_nic *efx)
1322 {
1323         struct efx_channel *channel;
1324
1325         EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr));
1326         wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
1327
1328         /* Enable interrupts */
1329         efx_nic_interrupts(efx, true, false);
1330
1331         /* Force processing of all the channels to get the EVQ RPTRs up to
1332            date */
1333         efx_for_each_channel(channel, efx)
1334                 efx_schedule_channel(channel);
1335 }
1336
1337 void efx_nic_disable_interrupts(struct efx_nic *efx)
1338 {
1339         /* Disable interrupts */
1340         efx_nic_interrupts(efx, false, false);
1341 }
1342
1343 /* Generate a test interrupt
1344  * Interrupt must already have been enabled, otherwise nasty things
1345  * may happen.
1346  */
1347 void efx_nic_generate_interrupt(struct efx_nic *efx)
1348 {
1349         efx_nic_interrupts(efx, true, true);
1350 }
1351
1352 /* Process a fatal interrupt
1353  * Disable bus mastering ASAP and schedule a reset
1354  */
1355 irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx)
1356 {
1357         struct falcon_nic_data *nic_data = efx->nic_data;
1358         efx_oword_t *int_ker = efx->irq_status.addr;
1359         efx_oword_t fatal_intr;
1360         int error, mem_perr;
1361
1362         efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER);
1363         error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR);
1364
1365         netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status "
1366                   EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
1367                   EFX_OWORD_VAL(fatal_intr),
1368                   error ? "disabling bus mastering" : "no recognised error");
1369
1370         /* If this is a memory parity error dump which blocks are offending */
1371         mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) ||
1372                     EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER));
1373         if (mem_perr) {
1374                 efx_oword_t reg;
1375                 efx_reado(efx, &reg, FR_AZ_MEM_STAT);
1376                 netif_err(efx, hw, efx->net_dev,
1377                           "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n",
1378                           EFX_OWORD_VAL(reg));
1379         }
1380
1381         /* Disable both devices */
1382         pci_clear_master(efx->pci_dev);
1383         if (efx_nic_is_dual_func(efx))
1384                 pci_clear_master(nic_data->pci_dev2);
1385         efx_nic_disable_interrupts(efx);
1386
1387         /* Count errors and reset or disable the NIC accordingly */
1388         if (efx->int_error_count == 0 ||
1389             time_after(jiffies, efx->int_error_expire)) {
1390                 efx->int_error_count = 0;
1391                 efx->int_error_expire =
1392                         jiffies + EFX_INT_ERROR_EXPIRE * HZ;
1393         }
1394         if (++efx->int_error_count < EFX_MAX_INT_ERRORS) {
1395                 netif_err(efx, hw, efx->net_dev,
1396                           "SYSTEM ERROR - reset scheduled\n");
1397                 efx_schedule_reset(efx, RESET_TYPE_INT_ERROR);
1398         } else {
1399                 netif_err(efx, hw, efx->net_dev,
1400                           "SYSTEM ERROR - max number of errors seen."
1401                           "NIC will be disabled\n");
1402                 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1403         }
1404
1405         return IRQ_HANDLED;
1406 }
1407
1408 /* Handle a legacy interrupt
1409  * Acknowledges the interrupt and schedule event queue processing.
1410  */
1411 static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
1412 {
1413         struct efx_nic *efx = dev_id;
1414         efx_oword_t *int_ker = efx->irq_status.addr;
1415         irqreturn_t result = IRQ_NONE;
1416         struct efx_channel *channel;
1417         efx_dword_t reg;
1418         u32 queues;
1419         int syserr;
1420
1421         /* Read the ISR which also ACKs the interrupts */
1422         efx_readd(efx, &reg, FR_BZ_INT_ISR0);
1423         queues = EFX_EXTRACT_DWORD(reg, 0, 31);
1424
1425         /* Check to see if we have a serious error condition */
1426         if (queues & (1U << efx->fatal_irq_level)) {
1427                 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1428                 if (unlikely(syserr))
1429                         return efx_nic_fatal_interrupt(efx);
1430         }
1431
1432         if (queues != 0) {
1433                 if (EFX_WORKAROUND_15783(efx))
1434                         efx->irq_zero_count = 0;
1435
1436                 /* Schedule processing of any interrupting queues */
1437                 efx_for_each_channel(channel, efx) {
1438                         if (queues & 1)
1439                                 efx_schedule_channel(channel);
1440                         queues >>= 1;
1441                 }
1442                 result = IRQ_HANDLED;
1443
1444         } else if (EFX_WORKAROUND_15783(efx)) {
1445                 efx_qword_t *event;
1446
1447                 /* We can't return IRQ_HANDLED more than once on seeing ISR=0
1448                  * because this might be a shared interrupt. */
1449                 if (efx->irq_zero_count++ == 0)
1450                         result = IRQ_HANDLED;
1451
1452                 /* Ensure we schedule or rearm all event queues */
1453                 efx_for_each_channel(channel, efx) {
1454                         event = efx_event(channel, channel->eventq_read_ptr);
1455                         if (efx_event_present(event))
1456                                 efx_schedule_channel(channel);
1457                         else
1458                                 efx_nic_eventq_read_ack(channel);
1459                 }
1460         }
1461
1462         if (result == IRQ_HANDLED) {
1463                 efx->last_irq_cpu = raw_smp_processor_id();
1464                 netif_vdbg(efx, intr, efx->net_dev,
1465                            "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
1466                            irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
1467         }
1468
1469         return result;
1470 }
1471
1472 /* Handle an MSI interrupt
1473  *
1474  * Handle an MSI hardware interrupt.  This routine schedules event
1475  * queue processing.  No interrupt acknowledgement cycle is necessary.
1476  * Also, we never need to check that the interrupt is for us, since
1477  * MSI interrupts cannot be shared.
1478  */
1479 static irqreturn_t efx_msi_interrupt(int irq, void *dev_id)
1480 {
1481         struct efx_channel *channel = *(struct efx_channel **)dev_id;
1482         struct efx_nic *efx = channel->efx;
1483         efx_oword_t *int_ker = efx->irq_status.addr;
1484         int syserr;
1485
1486         efx->last_irq_cpu = raw_smp_processor_id();
1487         netif_vdbg(efx, intr, efx->net_dev,
1488                    "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
1489                    irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
1490
1491         /* Check to see if we have a serious error condition */
1492         if (channel->channel == efx->fatal_irq_level) {
1493                 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1494                 if (unlikely(syserr))
1495                         return efx_nic_fatal_interrupt(efx);
1496         }
1497
1498         /* Schedule processing of the channel */
1499         efx_schedule_channel(channel);
1500
1501         return IRQ_HANDLED;
1502 }
1503
1504
1505 /* Setup RSS indirection table.
1506  * This maps from the hash value of the packet to RXQ
1507  */
1508 void efx_nic_push_rx_indir_table(struct efx_nic *efx)
1509 {
1510         size_t i = 0;
1511         efx_dword_t dword;
1512
1513         if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
1514                 return;
1515
1516         BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
1517                      FR_BZ_RX_INDIRECTION_TBL_ROWS);
1518
1519         for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) {
1520                 EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
1521                                      efx->rx_indir_table[i]);
1522                 efx_writed_table(efx, &dword, FR_BZ_RX_INDIRECTION_TBL, i);
1523         }
1524 }
1525
1526 /* Hook interrupt handler(s)
1527  * Try MSI and then legacy interrupts.
1528  */
1529 int efx_nic_init_interrupt(struct efx_nic *efx)
1530 {
1531         struct efx_channel *channel;
1532         int rc;
1533
1534         if (!EFX_INT_MODE_USE_MSI(efx)) {
1535                 irq_handler_t handler;
1536                 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
1537                         handler = efx_legacy_interrupt;
1538                 else
1539                         handler = falcon_legacy_interrupt_a1;
1540
1541                 rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED,
1542                                  efx->name, efx);
1543                 if (rc) {
1544                         netif_err(efx, drv, efx->net_dev,
1545                                   "failed to hook legacy IRQ %d\n",
1546                                   efx->pci_dev->irq);
1547                         goto fail1;
1548                 }
1549                 return 0;
1550         }
1551
1552         /* Hook MSI or MSI-X interrupt */
1553         efx_for_each_channel(channel, efx) {
1554                 rc = request_irq(channel->irq, efx_msi_interrupt,
1555                                  IRQF_PROBE_SHARED, /* Not shared */
1556                                  efx->channel_name[channel->channel],
1557                                  &efx->channel[channel->channel]);
1558                 if (rc) {
1559                         netif_err(efx, drv, efx->net_dev,
1560                                   "failed to hook IRQ %d\n", channel->irq);
1561                         goto fail2;
1562                 }
1563         }
1564
1565         return 0;
1566
1567  fail2:
1568         efx_for_each_channel(channel, efx)
1569                 free_irq(channel->irq, &efx->channel[channel->channel]);
1570  fail1:
1571         return rc;
1572 }
1573
1574 void efx_nic_fini_interrupt(struct efx_nic *efx)
1575 {
1576         struct efx_channel *channel;
1577         efx_oword_t reg;
1578
1579         /* Disable MSI/MSI-X interrupts */
1580         efx_for_each_channel(channel, efx) {
1581                 if (channel->irq)
1582                         free_irq(channel->irq, &efx->channel[channel->channel]);
1583         }
1584
1585         /* ACK legacy interrupt */
1586         if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
1587                 efx_reado(efx, &reg, FR_BZ_INT_ISR0);
1588         else
1589                 falcon_irq_ack_a1(efx);
1590
1591         /* Disable legacy interrupt */
1592         if (efx->legacy_irq)
1593                 free_irq(efx->legacy_irq, efx);
1594 }
1595
1596 u32 efx_nic_fpga_ver(struct efx_nic *efx)
1597 {
1598         efx_oword_t altera_build;
1599         efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD);
1600         return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER);
1601 }
1602
1603 void efx_nic_init_common(struct efx_nic *efx)
1604 {
1605         efx_oword_t temp;
1606
1607         /* Set positions of descriptor caches in SRAM. */
1608         EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR,
1609                              efx->type->tx_dc_base / 8);
1610         efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG);
1611         EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR,
1612                              efx->type->rx_dc_base / 8);
1613         efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG);
1614
1615         /* Set TX descriptor cache size. */
1616         BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER));
1617         EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER);
1618         efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG);
1619
1620         /* Set RX descriptor cache size.  Set low watermark to size-8, as
1621          * this allows most efficient prefetching.
1622          */
1623         BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER));
1624         EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER);
1625         efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG);
1626         EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8);
1627         efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM);
1628
1629         /* Program INT_KER address */
1630         EFX_POPULATE_OWORD_2(temp,
1631                              FRF_AZ_NORM_INT_VEC_DIS_KER,
1632                              EFX_INT_MODE_USE_MSI(efx),
1633                              FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr);
1634         efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER);
1635
1636         if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx))
1637                 /* Use an interrupt level unused by event queues */
1638                 efx->fatal_irq_level = 0x1f;
1639         else
1640                 /* Use a valid MSI-X vector */
1641                 efx->fatal_irq_level = 0;
1642
1643         /* Enable all the genuinely fatal interrupts.  (They are still
1644          * masked by the overall interrupt mask, controlled by
1645          * falcon_interrupts()).
1646          *
1647          * Note: All other fatal interrupts are enabled
1648          */
1649         EFX_POPULATE_OWORD_3(temp,
1650                              FRF_AZ_ILL_ADR_INT_KER_EN, 1,
1651                              FRF_AZ_RBUF_OWN_INT_KER_EN, 1,
1652                              FRF_AZ_TBUF_OWN_INT_KER_EN, 1);
1653         if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
1654                 EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1);
1655         EFX_INVERT_OWORD(temp);
1656         efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
1657
1658         efx_nic_push_rx_indir_table(efx);
1659
1660         /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
1661          * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
1662          */
1663         efx_reado(efx, &temp, FR_AZ_TX_RESERVED);
1664         EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe);
1665         EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1);
1666         EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
1667         EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 0);
1668         EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1);
1669         /* Enable SW_EV to inherit in char driver - assume harmless here */
1670         EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
1671         /* Prefetch threshold 2 => fetch when descriptor cache half empty */
1672         EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2);
1673         /* Disable hardware watchdog which can misfire */
1674         EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff);
1675         /* Squash TX of packets of 16 bytes or less */
1676         if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
1677                 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
1678         efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
1679 }
1680
1681 /* Register dump */
1682
1683 #define REGISTER_REVISION_A     1
1684 #define REGISTER_REVISION_B     2
1685 #define REGISTER_REVISION_C     3
1686 #define REGISTER_REVISION_Z     3       /* latest revision */
1687
1688 struct efx_nic_reg {
1689         u32 offset:24;
1690         u32 min_revision:2, max_revision:2;
1691 };
1692
1693 #define REGISTER(name, min_rev, max_rev) {                              \
1694         FR_ ## min_rev ## max_rev ## _ ## name,                         \
1695         REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev    \
1696 }
1697 #define REGISTER_AA(name) REGISTER(name, A, A)
1698 #define REGISTER_AB(name) REGISTER(name, A, B)
1699 #define REGISTER_AZ(name) REGISTER(name, A, Z)
1700 #define REGISTER_BB(name) REGISTER(name, B, B)
1701 #define REGISTER_BZ(name) REGISTER(name, B, Z)
1702 #define REGISTER_CZ(name) REGISTER(name, C, Z)
1703
1704 static const struct efx_nic_reg efx_nic_regs[] = {
1705         REGISTER_AZ(ADR_REGION),
1706         REGISTER_AZ(INT_EN_KER),
1707         REGISTER_BZ(INT_EN_CHAR),
1708         REGISTER_AZ(INT_ADR_KER),
1709         REGISTER_BZ(INT_ADR_CHAR),
1710         /* INT_ACK_KER is WO */
1711         /* INT_ISR0 is RC */
1712         REGISTER_AZ(HW_INIT),
1713         REGISTER_CZ(USR_EV_CFG),
1714         REGISTER_AB(EE_SPI_HCMD),
1715         REGISTER_AB(EE_SPI_HADR),
1716         REGISTER_AB(EE_SPI_HDATA),
1717         REGISTER_AB(EE_BASE_PAGE),
1718         REGISTER_AB(EE_VPD_CFG0),
1719         /* EE_VPD_SW_CNTL and EE_VPD_SW_DATA are not used */
1720         /* PMBX_DBG_IADDR and PBMX_DBG_IDATA are indirect */
1721         /* PCIE_CORE_INDIRECT is indirect */
1722         REGISTER_AB(NIC_STAT),
1723         REGISTER_AB(GPIO_CTL),
1724         REGISTER_AB(GLB_CTL),
1725         /* FATAL_INTR_KER and FATAL_INTR_CHAR are partly RC */
1726         REGISTER_BZ(DP_CTRL),
1727         REGISTER_AZ(MEM_STAT),
1728         REGISTER_AZ(CS_DEBUG),
1729         REGISTER_AZ(ALTERA_BUILD),
1730         REGISTER_AZ(CSR_SPARE),
1731         REGISTER_AB(PCIE_SD_CTL0123),
1732         REGISTER_AB(PCIE_SD_CTL45),
1733         REGISTER_AB(PCIE_PCS_CTL_STAT),
1734         /* DEBUG_DATA_OUT is not used */
1735         /* DRV_EV is WO */
1736         REGISTER_AZ(EVQ_CTL),
1737         REGISTER_AZ(EVQ_CNT1),
1738         REGISTER_AZ(EVQ_CNT2),
1739         REGISTER_AZ(BUF_TBL_CFG),
1740         REGISTER_AZ(SRM_RX_DC_CFG),
1741         REGISTER_AZ(SRM_TX_DC_CFG),
1742         REGISTER_AZ(SRM_CFG),
1743         /* BUF_TBL_UPD is WO */
1744         REGISTER_AZ(SRM_UPD_EVQ),
1745         REGISTER_AZ(SRAM_PARITY),
1746         REGISTER_AZ(RX_CFG),
1747         REGISTER_BZ(RX_FILTER_CTL),
1748         /* RX_FLUSH_DESCQ is WO */
1749         REGISTER_AZ(RX_DC_CFG),
1750         REGISTER_AZ(RX_DC_PF_WM),
1751         REGISTER_BZ(RX_RSS_TKEY),
1752         /* RX_NODESC_DROP is RC */
1753         REGISTER_AA(RX_SELF_RST),
1754         /* RX_DEBUG, RX_PUSH_DROP are not used */
1755         REGISTER_CZ(RX_RSS_IPV6_REG1),
1756         REGISTER_CZ(RX_RSS_IPV6_REG2),
1757         REGISTER_CZ(RX_RSS_IPV6_REG3),
1758         /* TX_FLUSH_DESCQ is WO */
1759         REGISTER_AZ(TX_DC_CFG),
1760         REGISTER_AA(TX_CHKSM_CFG),
1761         REGISTER_AZ(TX_CFG),
1762         /* TX_PUSH_DROP is not used */
1763         REGISTER_AZ(TX_RESERVED),
1764         REGISTER_BZ(TX_PACE),
1765         /* TX_PACE_DROP_QID is RC */
1766         REGISTER_BB(TX_VLAN),
1767         REGISTER_BZ(TX_IPFIL_PORTEN),
1768         REGISTER_AB(MD_TXD),
1769         REGISTER_AB(MD_RXD),
1770         REGISTER_AB(MD_CS),
1771         REGISTER_AB(MD_PHY_ADR),
1772         REGISTER_AB(MD_ID),
1773         /* MD_STAT is RC */
1774         REGISTER_AB(MAC_STAT_DMA),
1775         REGISTER_AB(MAC_CTRL),
1776         REGISTER_BB(GEN_MODE),
1777         REGISTER_AB(MAC_MC_HASH_REG0),
1778         REGISTER_AB(MAC_MC_HASH_REG1),
1779         REGISTER_AB(GM_CFG1),
1780         REGISTER_AB(GM_CFG2),
1781         /* GM_IPG and GM_HD are not used */
1782         REGISTER_AB(GM_MAX_FLEN),
1783         /* GM_TEST is not used */
1784         REGISTER_AB(GM_ADR1),
1785         REGISTER_AB(GM_ADR2),
1786         REGISTER_AB(GMF_CFG0),
1787         REGISTER_AB(GMF_CFG1),
1788         REGISTER_AB(GMF_CFG2),
1789         REGISTER_AB(GMF_CFG3),
1790         REGISTER_AB(GMF_CFG4),
1791         REGISTER_AB(GMF_CFG5),
1792         REGISTER_BB(TX_SRC_MAC_CTL),
1793         REGISTER_AB(XM_ADR_LO),
1794         REGISTER_AB(XM_ADR_HI),
1795         REGISTER_AB(XM_GLB_CFG),
1796         REGISTER_AB(XM_TX_CFG),
1797         REGISTER_AB(XM_RX_CFG),
1798         REGISTER_AB(XM_MGT_INT_MASK),
1799         REGISTER_AB(XM_FC),
1800         REGISTER_AB(XM_PAUSE_TIME),
1801         REGISTER_AB(XM_TX_PARAM),
1802         REGISTER_AB(XM_RX_PARAM),
1803         /* XM_MGT_INT_MSK (note no 'A') is RC */
1804         REGISTER_AB(XX_PWR_RST),
1805         REGISTER_AB(XX_SD_CTL),
1806         REGISTER_AB(XX_TXDRV_CTL),
1807         /* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */
1808         /* XX_CORE_STAT is partly RC */
1809 };
1810
1811 struct efx_nic_reg_table {
1812         u32 offset:24;
1813         u32 min_revision:2, max_revision:2;
1814         u32 step:6, rows:21;
1815 };
1816
1817 #define REGISTER_TABLE_DIMENSIONS(_, offset, min_rev, max_rev, step, rows) { \
1818         offset,                                                         \
1819         REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev,   \
1820         step, rows                                                      \
1821 }
1822 #define REGISTER_TABLE(name, min_rev, max_rev)                          \
1823         REGISTER_TABLE_DIMENSIONS(                                      \
1824                 name, FR_ ## min_rev ## max_rev ## _ ## name,           \
1825                 min_rev, max_rev,                                       \
1826                 FR_ ## min_rev ## max_rev ## _ ## name ## _STEP,        \
1827                 FR_ ## min_rev ## max_rev ## _ ## name ## _ROWS)
1828 #define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, A, A)
1829 #define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, A, Z)
1830 #define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, B, B)
1831 #define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, B, Z)
1832 #define REGISTER_TABLE_BB_CZ(name)                                      \
1833         REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, B, B,           \
1834                                   FR_BZ_ ## name ## _STEP,              \
1835                                   FR_BB_ ## name ## _ROWS),             \
1836         REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, C, Z,           \
1837                                   FR_BZ_ ## name ## _STEP,              \
1838                                   FR_CZ_ ## name ## _ROWS)
1839 #define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, C, Z)
1840
1841 static const struct efx_nic_reg_table efx_nic_reg_tables[] = {
1842         /* DRIVER is not used */
1843         /* EVQ_RPTR, TIMER_COMMAND, USR_EV and {RX,TX}_DESC_UPD are WO */
1844         REGISTER_TABLE_BB(TX_IPFIL_TBL),
1845         REGISTER_TABLE_BB(TX_SRC_MAC_TBL),
1846         REGISTER_TABLE_AA(RX_DESC_PTR_TBL_KER),
1847         REGISTER_TABLE_BB_CZ(RX_DESC_PTR_TBL),
1848         REGISTER_TABLE_AA(TX_DESC_PTR_TBL_KER),
1849         REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL),
1850         REGISTER_TABLE_AA(EVQ_PTR_TBL_KER),
1851         REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL),
1852         /* We can't reasonably read all of the buffer table (up to 8MB!).
1853          * However this driver will only use a few entries.  Reading
1854          * 1K entries allows for some expansion of queue count and
1855          * size before we need to change the version. */
1856         REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER,
1857                                   A, A, 8, 1024),
1858         REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL,
1859                                   B, Z, 8, 1024),
1860         REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0),
1861         REGISTER_TABLE_BB_CZ(TIMER_TBL),
1862         REGISTER_TABLE_BB_CZ(TX_PACE_TBL),
1863         REGISTER_TABLE_BZ(RX_INDIRECTION_TBL),
1864         /* TX_FILTER_TBL0 is huge and not used by this driver */
1865         REGISTER_TABLE_CZ(TX_MAC_FILTER_TBL0),
1866         REGISTER_TABLE_CZ(MC_TREG_SMEM),
1867         /* MSIX_PBA_TABLE is not mapped */
1868         /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */
1869         REGISTER_TABLE_BZ(RX_FILTER_TBL0),
1870 };
1871
1872 size_t efx_nic_get_regs_len(struct efx_nic *efx)
1873 {
1874         const struct efx_nic_reg *reg;
1875         const struct efx_nic_reg_table *table;
1876         size_t len = 0;
1877
1878         for (reg = efx_nic_regs;
1879              reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs);
1880              reg++)
1881                 if (efx->type->revision >= reg->min_revision &&
1882                     efx->type->revision <= reg->max_revision)
1883                         len += sizeof(efx_oword_t);
1884
1885         for (table = efx_nic_reg_tables;
1886              table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables);
1887              table++)
1888                 if (efx->type->revision >= table->min_revision &&
1889                     efx->type->revision <= table->max_revision)
1890                         len += table->rows * min_t(size_t, table->step, 16);
1891
1892         return len;
1893 }
1894
1895 void efx_nic_get_regs(struct efx_nic *efx, void *buf)
1896 {
1897         const struct efx_nic_reg *reg;
1898         const struct efx_nic_reg_table *table;
1899
1900         for (reg = efx_nic_regs;
1901              reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs);
1902              reg++) {
1903                 if (efx->type->revision >= reg->min_revision &&
1904                     efx->type->revision <= reg->max_revision) {
1905                         efx_reado(efx, (efx_oword_t *)buf, reg->offset);
1906                         buf += sizeof(efx_oword_t);
1907                 }
1908         }
1909
1910         for (table = efx_nic_reg_tables;
1911              table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables);
1912              table++) {
1913                 size_t size, i;
1914
1915                 if (!(efx->type->revision >= table->min_revision &&
1916                       efx->type->revision <= table->max_revision))
1917                         continue;
1918
1919                 size = min_t(size_t, table->step, 16);
1920
1921                 for (i = 0; i < table->rows; i++) {
1922                         switch (table->step) {
1923                         case 4: /* 32-bit register or SRAM */
1924                                 efx_readd_table(efx, buf, table->offset, i);
1925                                 break;
1926                         case 8: /* 64-bit SRAM */
1927                                 efx_sram_readq(efx,
1928                                                efx->membase + table->offset,
1929                                                buf, i);
1930                                 break;
1931                         case 16: /* 128-bit register */
1932                                 efx_reado_table(efx, buf, table->offset, i);
1933                                 break;
1934                         case 32: /* 128-bit register, interleaved */
1935                                 efx_reado_table(efx, buf, table->offset, 2 * i);
1936                                 break;
1937                         default:
1938                                 WARN_ON(1);
1939                                 return;
1940                         }
1941                         buf += size;
1942                 }
1943         }
1944 }