8033555e53c2f6f524211b4d848dcc13a0e14f78
[firefly-linux-kernel-4.4.55.git] / drivers / net / ethernet / qlogic / qlge / qlge_main.c
1 /*
2  * QLogic qlge NIC HBA Driver
3  * Copyright (c)  2003-2008 QLogic Corporation
4  * See LICENSE.qlge for copyright and licensing details.
5  * Author:     Linux qlge network device driver by
6  *                      Ron Mercer <ron.mercer@qlogic.com>
7  */
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/bitops.h>
11 #include <linux/types.h>
12 #include <linux/module.h>
13 #include <linux/list.h>
14 #include <linux/pci.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/pagemap.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/dmapool.h>
20 #include <linux/mempool.h>
21 #include <linux/spinlock.h>
22 #include <linux/kthread.h>
23 #include <linux/interrupt.h>
24 #include <linux/errno.h>
25 #include <linux/ioport.h>
26 #include <linux/in.h>
27 #include <linux/ip.h>
28 #include <linux/ipv6.h>
29 #include <net/ipv6.h>
30 #include <linux/tcp.h>
31 #include <linux/udp.h>
32 #include <linux/if_arp.h>
33 #include <linux/if_ether.h>
34 #include <linux/netdevice.h>
35 #include <linux/etherdevice.h>
36 #include <linux/ethtool.h>
37 #include <linux/if_vlan.h>
38 #include <linux/skbuff.h>
39 #include <linux/delay.h>
40 #include <linux/mm.h>
41 #include <linux/vmalloc.h>
42 #include <linux/prefetch.h>
43 #include <net/ip6_checksum.h>
44
45 #include "qlge.h"
46
47 char qlge_driver_name[] = DRV_NAME;
48 const char qlge_driver_version[] = DRV_VERSION;
49
50 MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
51 MODULE_DESCRIPTION(DRV_STRING " ");
52 MODULE_LICENSE("GPL");
53 MODULE_VERSION(DRV_VERSION);
54
55 static const u32 default_msg =
56     NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
57 /* NETIF_MSG_TIMER |    */
58     NETIF_MSG_IFDOWN |
59     NETIF_MSG_IFUP |
60     NETIF_MSG_RX_ERR |
61     NETIF_MSG_TX_ERR |
62 /*  NETIF_MSG_TX_QUEUED | */
63 /*  NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
64 /* NETIF_MSG_PKTDATA | */
65     NETIF_MSG_HW | NETIF_MSG_WOL | 0;
66
67 static int debug = -1;  /* defaults above */
68 module_param(debug, int, 0664);
69 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
70
71 #define MSIX_IRQ 0
72 #define MSI_IRQ 1
73 #define LEG_IRQ 2
74 static int qlge_irq_type = MSIX_IRQ;
75 module_param(qlge_irq_type, int, 0664);
76 MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
77
78 static int qlge_mpi_coredump;
79 module_param(qlge_mpi_coredump, int, 0);
80 MODULE_PARM_DESC(qlge_mpi_coredump,
81                 "Option to enable MPI firmware dump. "
82                 "Default is OFF - Do Not allocate memory. ");
83
84 static int qlge_force_coredump;
85 module_param(qlge_force_coredump, int, 0);
86 MODULE_PARM_DESC(qlge_force_coredump,
87                 "Option to allow force of firmware core dump. "
88                 "Default is OFF - Do not allow.");
89
90 static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
91         {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
92         {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
93         /* required last entry */
94         {0,}
95 };
96
97 MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
98
99 static int ql_wol(struct ql_adapter *qdev);
100 static void qlge_set_multicast_list(struct net_device *ndev);
101
102 /* This hardware semaphore causes exclusive access to
103  * resources shared between the NIC driver, MPI firmware,
104  * FCOE firmware and the FC driver.
105  */
106 static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
107 {
108         u32 sem_bits = 0;
109
110         switch (sem_mask) {
111         case SEM_XGMAC0_MASK:
112                 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
113                 break;
114         case SEM_XGMAC1_MASK:
115                 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
116                 break;
117         case SEM_ICB_MASK:
118                 sem_bits = SEM_SET << SEM_ICB_SHIFT;
119                 break;
120         case SEM_MAC_ADDR_MASK:
121                 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
122                 break;
123         case SEM_FLASH_MASK:
124                 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
125                 break;
126         case SEM_PROBE_MASK:
127                 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
128                 break;
129         case SEM_RT_IDX_MASK:
130                 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
131                 break;
132         case SEM_PROC_REG_MASK:
133                 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
134                 break;
135         default:
136                 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
137                 return -EINVAL;
138         }
139
140         ql_write32(qdev, SEM, sem_bits | sem_mask);
141         return !(ql_read32(qdev, SEM) & sem_bits);
142 }
143
144 int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
145 {
146         unsigned int wait_count = 30;
147         do {
148                 if (!ql_sem_trylock(qdev, sem_mask))
149                         return 0;
150                 udelay(100);
151         } while (--wait_count);
152         return -ETIMEDOUT;
153 }
154
155 void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
156 {
157         ql_write32(qdev, SEM, sem_mask);
158         ql_read32(qdev, SEM);   /* flush */
159 }
160
161 /* This function waits for a specific bit to come ready
162  * in a given register.  It is used mostly by the initialize
163  * process, but is also used in kernel thread API such as
164  * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
165  */
166 int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
167 {
168         u32 temp;
169         int count = UDELAY_COUNT;
170
171         while (count) {
172                 temp = ql_read32(qdev, reg);
173
174                 /* check for errors */
175                 if (temp & err_bit) {
176                         netif_alert(qdev, probe, qdev->ndev,
177                                     "register 0x%.08x access error, value = 0x%.08x!.\n",
178                                     reg, temp);
179                         return -EIO;
180                 } else if (temp & bit)
181                         return 0;
182                 udelay(UDELAY_DELAY);
183                 count--;
184         }
185         netif_alert(qdev, probe, qdev->ndev,
186                     "Timed out waiting for reg %x to come ready.\n", reg);
187         return -ETIMEDOUT;
188 }
189
190 /* The CFG register is used to download TX and RX control blocks
191  * to the chip. This function waits for an operation to complete.
192  */
193 static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
194 {
195         int count = UDELAY_COUNT;
196         u32 temp;
197
198         while (count) {
199                 temp = ql_read32(qdev, CFG);
200                 if (temp & CFG_LE)
201                         return -EIO;
202                 if (!(temp & bit))
203                         return 0;
204                 udelay(UDELAY_DELAY);
205                 count--;
206         }
207         return -ETIMEDOUT;
208 }
209
210
211 /* Used to issue init control blocks to hw. Maps control block,
212  * sets address, triggers download, waits for completion.
213  */
214 int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
215                  u16 q_id)
216 {
217         u64 map;
218         int status = 0;
219         int direction;
220         u32 mask;
221         u32 value;
222
223         direction =
224             (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
225             PCI_DMA_FROMDEVICE;
226
227         map = pci_map_single(qdev->pdev, ptr, size, direction);
228         if (pci_dma_mapping_error(qdev->pdev, map)) {
229                 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
230                 return -ENOMEM;
231         }
232
233         status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
234         if (status)
235                 return status;
236
237         status = ql_wait_cfg(qdev, bit);
238         if (status) {
239                 netif_err(qdev, ifup, qdev->ndev,
240                           "Timed out waiting for CFG to come ready.\n");
241                 goto exit;
242         }
243
244         ql_write32(qdev, ICB_L, (u32) map);
245         ql_write32(qdev, ICB_H, (u32) (map >> 32));
246
247         mask = CFG_Q_MASK | (bit << 16);
248         value = bit | (q_id << CFG_Q_SHIFT);
249         ql_write32(qdev, CFG, (mask | value));
250
251         /*
252          * Wait for the bit to clear after signaling hw.
253          */
254         status = ql_wait_cfg(qdev, bit);
255 exit:
256         ql_sem_unlock(qdev, SEM_ICB_MASK);      /* does flush too */
257         pci_unmap_single(qdev->pdev, map, size, direction);
258         return status;
259 }
260
261 /* Get a specific MAC address from the CAM.  Used for debug and reg dump. */
262 int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
263                         u32 *value)
264 {
265         u32 offset = 0;
266         int status;
267
268         switch (type) {
269         case MAC_ADDR_TYPE_MULTI_MAC:
270         case MAC_ADDR_TYPE_CAM_MAC:
271                 {
272                         status =
273                             ql_wait_reg_rdy(qdev,
274                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
275                         if (status)
276                                 goto exit;
277                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
278                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
279                                    MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
280                         status =
281                             ql_wait_reg_rdy(qdev,
282                                 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
283                         if (status)
284                                 goto exit;
285                         *value++ = ql_read32(qdev, MAC_ADDR_DATA);
286                         status =
287                             ql_wait_reg_rdy(qdev,
288                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
289                         if (status)
290                                 goto exit;
291                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
292                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
293                                    MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
294                         status =
295                             ql_wait_reg_rdy(qdev,
296                                 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
297                         if (status)
298                                 goto exit;
299                         *value++ = ql_read32(qdev, MAC_ADDR_DATA);
300                         if (type == MAC_ADDR_TYPE_CAM_MAC) {
301                                 status =
302                                     ql_wait_reg_rdy(qdev,
303                                         MAC_ADDR_IDX, MAC_ADDR_MW, 0);
304                                 if (status)
305                                         goto exit;
306                                 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
307                                            (index << MAC_ADDR_IDX_SHIFT) | /* index */
308                                            MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
309                                 status =
310                                     ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
311                                                     MAC_ADDR_MR, 0);
312                                 if (status)
313                                         goto exit;
314                                 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
315                         }
316                         break;
317                 }
318         case MAC_ADDR_TYPE_VLAN:
319         case MAC_ADDR_TYPE_MULTI_FLTR:
320         default:
321                 netif_crit(qdev, ifup, qdev->ndev,
322                            "Address type %d not yet supported.\n", type);
323                 status = -EPERM;
324         }
325 exit:
326         return status;
327 }
328
329 /* Set up a MAC, multicast or VLAN address for the
330  * inbound frame matching.
331  */
332 static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
333                                u16 index)
334 {
335         u32 offset = 0;
336         int status = 0;
337
338         switch (type) {
339         case MAC_ADDR_TYPE_MULTI_MAC:
340                 {
341                         u32 upper = (addr[0] << 8) | addr[1];
342                         u32 lower = (addr[2] << 24) | (addr[3] << 16) |
343                                         (addr[4] << 8) | (addr[5]);
344
345                         status =
346                                 ql_wait_reg_rdy(qdev,
347                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
348                         if (status)
349                                 goto exit;
350                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
351                                 (index << MAC_ADDR_IDX_SHIFT) |
352                                 type | MAC_ADDR_E);
353                         ql_write32(qdev, MAC_ADDR_DATA, lower);
354                         status =
355                                 ql_wait_reg_rdy(qdev,
356                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
357                         if (status)
358                                 goto exit;
359                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
360                                 (index << MAC_ADDR_IDX_SHIFT) |
361                                 type | MAC_ADDR_E);
362
363                         ql_write32(qdev, MAC_ADDR_DATA, upper);
364                         status =
365                                 ql_wait_reg_rdy(qdev,
366                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
367                         if (status)
368                                 goto exit;
369                         break;
370                 }
371         case MAC_ADDR_TYPE_CAM_MAC:
372                 {
373                         u32 cam_output;
374                         u32 upper = (addr[0] << 8) | addr[1];
375                         u32 lower =
376                             (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
377                             (addr[5]);
378                         status =
379                             ql_wait_reg_rdy(qdev,
380                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
381                         if (status)
382                                 goto exit;
383                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
384                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
385                                    type);       /* type */
386                         ql_write32(qdev, MAC_ADDR_DATA, lower);
387                         status =
388                             ql_wait_reg_rdy(qdev,
389                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
390                         if (status)
391                                 goto exit;
392                         ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
393                                    (index << MAC_ADDR_IDX_SHIFT) | /* index */
394                                    type);       /* type */
395                         ql_write32(qdev, MAC_ADDR_DATA, upper);
396                         status =
397                             ql_wait_reg_rdy(qdev,
398                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
399                         if (status)
400                                 goto exit;
401                         ql_write32(qdev, MAC_ADDR_IDX, (offset) |       /* offset */
402                                    (index << MAC_ADDR_IDX_SHIFT) |      /* index */
403                                    type);       /* type */
404                         /* This field should also include the queue id
405                            and possibly the function id.  Right now we hardcode
406                            the route field to NIC core.
407                          */
408                         cam_output = (CAM_OUT_ROUTE_NIC |
409                                       (qdev->
410                                        func << CAM_OUT_FUNC_SHIFT) |
411                                         (0 << CAM_OUT_CQ_ID_SHIFT));
412                         if (qdev->ndev->features & NETIF_F_HW_VLAN_RX)
413                                 cam_output |= CAM_OUT_RV;
414                         /* route to NIC core */
415                         ql_write32(qdev, MAC_ADDR_DATA, cam_output);
416                         break;
417                 }
418         case MAC_ADDR_TYPE_VLAN:
419                 {
420                         u32 enable_bit = *((u32 *) &addr[0]);
421                         /* For VLAN, the addr actually holds a bit that
422                          * either enables or disables the vlan id we are
423                          * addressing. It's either MAC_ADDR_E on or off.
424                          * That's bit-27 we're talking about.
425                          */
426                         status =
427                             ql_wait_reg_rdy(qdev,
428                                 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
429                         if (status)
430                                 goto exit;
431                         ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
432                                    (index << MAC_ADDR_IDX_SHIFT) |      /* index */
433                                    type |       /* type */
434                                    enable_bit); /* enable/disable */
435                         break;
436                 }
437         case MAC_ADDR_TYPE_MULTI_FLTR:
438         default:
439                 netif_crit(qdev, ifup, qdev->ndev,
440                            "Address type %d not yet supported.\n", type);
441                 status = -EPERM;
442         }
443 exit:
444         return status;
445 }
446
447 /* Set or clear MAC address in hardware. We sometimes
448  * have to clear it to prevent wrong frame routing
449  * especially in a bonding environment.
450  */
451 static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
452 {
453         int status;
454         char zero_mac_addr[ETH_ALEN];
455         char *addr;
456
457         if (set) {
458                 addr = &qdev->current_mac_addr[0];
459                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
460                              "Set Mac addr %pM\n", addr);
461         } else {
462                 memset(zero_mac_addr, 0, ETH_ALEN);
463                 addr = &zero_mac_addr[0];
464                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
465                              "Clearing MAC address\n");
466         }
467         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
468         if (status)
469                 return status;
470         status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
471                         MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
472         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
473         if (status)
474                 netif_err(qdev, ifup, qdev->ndev,
475                           "Failed to init mac address.\n");
476         return status;
477 }
478
479 void ql_link_on(struct ql_adapter *qdev)
480 {
481         netif_err(qdev, link, qdev->ndev, "Link is up.\n");
482         netif_carrier_on(qdev->ndev);
483         ql_set_mac_addr(qdev, 1);
484 }
485
486 void ql_link_off(struct ql_adapter *qdev)
487 {
488         netif_err(qdev, link, qdev->ndev, "Link is down.\n");
489         netif_carrier_off(qdev->ndev);
490         ql_set_mac_addr(qdev, 0);
491 }
492
493 /* Get a specific frame routing value from the CAM.
494  * Used for debug and reg dump.
495  */
496 int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
497 {
498         int status = 0;
499
500         status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
501         if (status)
502                 goto exit;
503
504         ql_write32(qdev, RT_IDX,
505                    RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
506         status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
507         if (status)
508                 goto exit;
509         *value = ql_read32(qdev, RT_DATA);
510 exit:
511         return status;
512 }
513
514 /* The NIC function for this chip has 16 routing indexes.  Each one can be used
515  * to route different frame types to various inbound queues.  We send broadcast/
516  * multicast/error frames to the default queue for slow handling,
517  * and CAM hit/RSS frames to the fast handling queues.
518  */
519 static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
520                               int enable)
521 {
522         int status = -EINVAL; /* Return error if no mask match. */
523         u32 value = 0;
524
525         switch (mask) {
526         case RT_IDX_CAM_HIT:
527                 {
528                         value = RT_IDX_DST_CAM_Q |      /* dest */
529                             RT_IDX_TYPE_NICQ |  /* type */
530                             (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
531                         break;
532                 }
533         case RT_IDX_VALID:      /* Promiscuous Mode frames. */
534                 {
535                         value = RT_IDX_DST_DFLT_Q |     /* dest */
536                             RT_IDX_TYPE_NICQ |  /* type */
537                             (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
538                         break;
539                 }
540         case RT_IDX_ERR:        /* Pass up MAC,IP,TCP/UDP error frames. */
541                 {
542                         value = RT_IDX_DST_DFLT_Q |     /* dest */
543                             RT_IDX_TYPE_NICQ |  /* type */
544                             (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
545                         break;
546                 }
547         case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
548                 {
549                         value = RT_IDX_DST_DFLT_Q | /* dest */
550                                 RT_IDX_TYPE_NICQ | /* type */
551                                 (RT_IDX_IP_CSUM_ERR_SLOT <<
552                                 RT_IDX_IDX_SHIFT); /* index */
553                         break;
554                 }
555         case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
556                 {
557                         value = RT_IDX_DST_DFLT_Q | /* dest */
558                                 RT_IDX_TYPE_NICQ | /* type */
559                                 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
560                                 RT_IDX_IDX_SHIFT); /* index */
561                         break;
562                 }
563         case RT_IDX_BCAST:      /* Pass up Broadcast frames to default Q. */
564                 {
565                         value = RT_IDX_DST_DFLT_Q |     /* dest */
566                             RT_IDX_TYPE_NICQ |  /* type */
567                             (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
568                         break;
569                 }
570         case RT_IDX_MCAST:      /* Pass up All Multicast frames. */
571                 {
572                         value = RT_IDX_DST_DFLT_Q |     /* dest */
573                             RT_IDX_TYPE_NICQ |  /* type */
574                             (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
575                         break;
576                 }
577         case RT_IDX_MCAST_MATCH:        /* Pass up matched Multicast frames. */
578                 {
579                         value = RT_IDX_DST_DFLT_Q |     /* dest */
580                             RT_IDX_TYPE_NICQ |  /* type */
581                             (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
582                         break;
583                 }
584         case RT_IDX_RSS_MATCH:  /* Pass up matched RSS frames. */
585                 {
586                         value = RT_IDX_DST_RSS |        /* dest */
587                             RT_IDX_TYPE_NICQ |  /* type */
588                             (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
589                         break;
590                 }
591         case 0:         /* Clear the E-bit on an entry. */
592                 {
593                         value = RT_IDX_DST_DFLT_Q |     /* dest */
594                             RT_IDX_TYPE_NICQ |  /* type */
595                             (index << RT_IDX_IDX_SHIFT);/* index */
596                         break;
597                 }
598         default:
599                 netif_err(qdev, ifup, qdev->ndev,
600                           "Mask type %d not yet supported.\n", mask);
601                 status = -EPERM;
602                 goto exit;
603         }
604
605         if (value) {
606                 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
607                 if (status)
608                         goto exit;
609                 value |= (enable ? RT_IDX_E : 0);
610                 ql_write32(qdev, RT_IDX, value);
611                 ql_write32(qdev, RT_DATA, enable ? mask : 0);
612         }
613 exit:
614         return status;
615 }
616
617 static void ql_enable_interrupts(struct ql_adapter *qdev)
618 {
619         ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
620 }
621
622 static void ql_disable_interrupts(struct ql_adapter *qdev)
623 {
624         ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
625 }
626
627 /* If we're running with multiple MSI-X vectors then we enable on the fly.
628  * Otherwise, we may have multiple outstanding workers and don't want to
629  * enable until the last one finishes. In this case, the irq_cnt gets
630  * incremented every time we queue a worker and decremented every time
631  * a worker finishes.  Once it hits zero we enable the interrupt.
632  */
633 u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
634 {
635         u32 var = 0;
636         unsigned long hw_flags = 0;
637         struct intr_context *ctx = qdev->intr_context + intr;
638
639         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
640                 /* Always enable if we're MSIX multi interrupts and
641                  * it's not the default (zeroeth) interrupt.
642                  */
643                 ql_write32(qdev, INTR_EN,
644                            ctx->intr_en_mask);
645                 var = ql_read32(qdev, STS);
646                 return var;
647         }
648
649         spin_lock_irqsave(&qdev->hw_lock, hw_flags);
650         if (atomic_dec_and_test(&ctx->irq_cnt)) {
651                 ql_write32(qdev, INTR_EN,
652                            ctx->intr_en_mask);
653                 var = ql_read32(qdev, STS);
654         }
655         spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
656         return var;
657 }
658
659 static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
660 {
661         u32 var = 0;
662         struct intr_context *ctx;
663
664         /* HW disables for us if we're MSIX multi interrupts and
665          * it's not the default (zeroeth) interrupt.
666          */
667         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
668                 return 0;
669
670         ctx = qdev->intr_context + intr;
671         spin_lock(&qdev->hw_lock);
672         if (!atomic_read(&ctx->irq_cnt)) {
673                 ql_write32(qdev, INTR_EN,
674                 ctx->intr_dis_mask);
675                 var = ql_read32(qdev, STS);
676         }
677         atomic_inc(&ctx->irq_cnt);
678         spin_unlock(&qdev->hw_lock);
679         return var;
680 }
681
682 static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
683 {
684         int i;
685         for (i = 0; i < qdev->intr_count; i++) {
686                 /* The enable call does a atomic_dec_and_test
687                  * and enables only if the result is zero.
688                  * So we precharge it here.
689                  */
690                 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
691                         i == 0))
692                         atomic_set(&qdev->intr_context[i].irq_cnt, 1);
693                 ql_enable_completion_interrupt(qdev, i);
694         }
695
696 }
697
698 static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
699 {
700         int status, i;
701         u16 csum = 0;
702         __le16 *flash = (__le16 *)&qdev->flash;
703
704         status = strncmp((char *)&qdev->flash, str, 4);
705         if (status) {
706                 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
707                 return  status;
708         }
709
710         for (i = 0; i < size; i++)
711                 csum += le16_to_cpu(*flash++);
712
713         if (csum)
714                 netif_err(qdev, ifup, qdev->ndev,
715                           "Invalid flash checksum, csum = 0x%.04x.\n", csum);
716
717         return csum;
718 }
719
720 static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
721 {
722         int status = 0;
723         /* wait for reg to come ready */
724         status = ql_wait_reg_rdy(qdev,
725                         FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
726         if (status)
727                 goto exit;
728         /* set up for reg read */
729         ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
730         /* wait for reg to come ready */
731         status = ql_wait_reg_rdy(qdev,
732                         FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
733         if (status)
734                 goto exit;
735          /* This data is stored on flash as an array of
736          * __le32.  Since ql_read32() returns cpu endian
737          * we need to swap it back.
738          */
739         *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
740 exit:
741         return status;
742 }
743
744 static int ql_get_8000_flash_params(struct ql_adapter *qdev)
745 {
746         u32 i, size;
747         int status;
748         __le32 *p = (__le32 *)&qdev->flash;
749         u32 offset;
750         u8 mac_addr[6];
751
752         /* Get flash offset for function and adjust
753          * for dword access.
754          */
755         if (!qdev->port)
756                 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
757         else
758                 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
759
760         if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
761                 return -ETIMEDOUT;
762
763         size = sizeof(struct flash_params_8000) / sizeof(u32);
764         for (i = 0; i < size; i++, p++) {
765                 status = ql_read_flash_word(qdev, i+offset, p);
766                 if (status) {
767                         netif_err(qdev, ifup, qdev->ndev,
768                                   "Error reading flash.\n");
769                         goto exit;
770                 }
771         }
772
773         status = ql_validate_flash(qdev,
774                         sizeof(struct flash_params_8000) / sizeof(u16),
775                         "8000");
776         if (status) {
777                 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
778                 status = -EINVAL;
779                 goto exit;
780         }
781
782         /* Extract either manufacturer or BOFM modified
783          * MAC address.
784          */
785         if (qdev->flash.flash_params_8000.data_type1 == 2)
786                 memcpy(mac_addr,
787                         qdev->flash.flash_params_8000.mac_addr1,
788                         qdev->ndev->addr_len);
789         else
790                 memcpy(mac_addr,
791                         qdev->flash.flash_params_8000.mac_addr,
792                         qdev->ndev->addr_len);
793
794         if (!is_valid_ether_addr(mac_addr)) {
795                 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
796                 status = -EINVAL;
797                 goto exit;
798         }
799
800         memcpy(qdev->ndev->dev_addr,
801                 mac_addr,
802                 qdev->ndev->addr_len);
803
804 exit:
805         ql_sem_unlock(qdev, SEM_FLASH_MASK);
806         return status;
807 }
808
809 static int ql_get_8012_flash_params(struct ql_adapter *qdev)
810 {
811         int i;
812         int status;
813         __le32 *p = (__le32 *)&qdev->flash;
814         u32 offset = 0;
815         u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
816
817         /* Second function's parameters follow the first
818          * function's.
819          */
820         if (qdev->port)
821                 offset = size;
822
823         if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
824                 return -ETIMEDOUT;
825
826         for (i = 0; i < size; i++, p++) {
827                 status = ql_read_flash_word(qdev, i+offset, p);
828                 if (status) {
829                         netif_err(qdev, ifup, qdev->ndev,
830                                   "Error reading flash.\n");
831                         goto exit;
832                 }
833
834         }
835
836         status = ql_validate_flash(qdev,
837                         sizeof(struct flash_params_8012) / sizeof(u16),
838                         "8012");
839         if (status) {
840                 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
841                 status = -EINVAL;
842                 goto exit;
843         }
844
845         if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
846                 status = -EINVAL;
847                 goto exit;
848         }
849
850         memcpy(qdev->ndev->dev_addr,
851                 qdev->flash.flash_params_8012.mac_addr,
852                 qdev->ndev->addr_len);
853
854 exit:
855         ql_sem_unlock(qdev, SEM_FLASH_MASK);
856         return status;
857 }
858
859 /* xgmac register are located behind the xgmac_addr and xgmac_data
860  * register pair.  Each read/write requires us to wait for the ready
861  * bit before reading/writing the data.
862  */
863 static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
864 {
865         int status;
866         /* wait for reg to come ready */
867         status = ql_wait_reg_rdy(qdev,
868                         XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
869         if (status)
870                 return status;
871         /* write the data to the data reg */
872         ql_write32(qdev, XGMAC_DATA, data);
873         /* trigger the write */
874         ql_write32(qdev, XGMAC_ADDR, reg);
875         return status;
876 }
877
878 /* xgmac register are located behind the xgmac_addr and xgmac_data
879  * register pair.  Each read/write requires us to wait for the ready
880  * bit before reading/writing the data.
881  */
882 int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
883 {
884         int status = 0;
885         /* wait for reg to come ready */
886         status = ql_wait_reg_rdy(qdev,
887                         XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
888         if (status)
889                 goto exit;
890         /* set up for reg read */
891         ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
892         /* wait for reg to come ready */
893         status = ql_wait_reg_rdy(qdev,
894                         XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
895         if (status)
896                 goto exit;
897         /* get the data */
898         *data = ql_read32(qdev, XGMAC_DATA);
899 exit:
900         return status;
901 }
902
903 /* This is used for reading the 64-bit statistics regs. */
904 int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
905 {
906         int status = 0;
907         u32 hi = 0;
908         u32 lo = 0;
909
910         status = ql_read_xgmac_reg(qdev, reg, &lo);
911         if (status)
912                 goto exit;
913
914         status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
915         if (status)
916                 goto exit;
917
918         *data = (u64) lo | ((u64) hi << 32);
919
920 exit:
921         return status;
922 }
923
924 static int ql_8000_port_initialize(struct ql_adapter *qdev)
925 {
926         int status;
927         /*
928          * Get MPI firmware version for driver banner
929          * and ethool info.
930          */
931         status = ql_mb_about_fw(qdev);
932         if (status)
933                 goto exit;
934         status = ql_mb_get_fw_state(qdev);
935         if (status)
936                 goto exit;
937         /* Wake up a worker to get/set the TX/RX frame sizes. */
938         queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
939 exit:
940         return status;
941 }
942
943 /* Take the MAC Core out of reset.
944  * Enable statistics counting.
945  * Take the transmitter/receiver out of reset.
946  * This functionality may be done in the MPI firmware at a
947  * later date.
948  */
949 static int ql_8012_port_initialize(struct ql_adapter *qdev)
950 {
951         int status = 0;
952         u32 data;
953
954         if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
955                 /* Another function has the semaphore, so
956                  * wait for the port init bit to come ready.
957                  */
958                 netif_info(qdev, link, qdev->ndev,
959                            "Another function has the semaphore, so wait for the port init bit to come ready.\n");
960                 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
961                 if (status) {
962                         netif_crit(qdev, link, qdev->ndev,
963                                    "Port initialize timed out.\n");
964                 }
965                 return status;
966         }
967
968         netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
969         /* Set the core reset. */
970         status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
971         if (status)
972                 goto end;
973         data |= GLOBAL_CFG_RESET;
974         status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
975         if (status)
976                 goto end;
977
978         /* Clear the core reset and turn on jumbo for receiver. */
979         data &= ~GLOBAL_CFG_RESET;      /* Clear core reset. */
980         data |= GLOBAL_CFG_JUMBO;       /* Turn on jumbo. */
981         data |= GLOBAL_CFG_TX_STAT_EN;
982         data |= GLOBAL_CFG_RX_STAT_EN;
983         status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
984         if (status)
985                 goto end;
986
987         /* Enable transmitter, and clear it's reset. */
988         status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
989         if (status)
990                 goto end;
991         data &= ~TX_CFG_RESET;  /* Clear the TX MAC reset. */
992         data |= TX_CFG_EN;      /* Enable the transmitter. */
993         status = ql_write_xgmac_reg(qdev, TX_CFG, data);
994         if (status)
995                 goto end;
996
997         /* Enable receiver and clear it's reset. */
998         status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
999         if (status)
1000                 goto end;
1001         data &= ~RX_CFG_RESET;  /* Clear the RX MAC reset. */
1002         data |= RX_CFG_EN;      /* Enable the receiver. */
1003         status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1004         if (status)
1005                 goto end;
1006
1007         /* Turn on jumbo. */
1008         status =
1009             ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1010         if (status)
1011                 goto end;
1012         status =
1013             ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1014         if (status)
1015                 goto end;
1016
1017         /* Signal to the world that the port is enabled.        */
1018         ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1019 end:
1020         ql_sem_unlock(qdev, qdev->xg_sem_mask);
1021         return status;
1022 }
1023
1024 static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1025 {
1026         return PAGE_SIZE << qdev->lbq_buf_order;
1027 }
1028
1029 /* Get the next large buffer. */
1030 static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
1031 {
1032         struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1033         rx_ring->lbq_curr_idx++;
1034         if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1035                 rx_ring->lbq_curr_idx = 0;
1036         rx_ring->lbq_free_cnt++;
1037         return lbq_desc;
1038 }
1039
1040 static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1041                 struct rx_ring *rx_ring)
1042 {
1043         struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1044
1045         pci_dma_sync_single_for_cpu(qdev->pdev,
1046                                         dma_unmap_addr(lbq_desc, mapaddr),
1047                                     rx_ring->lbq_buf_size,
1048                                         PCI_DMA_FROMDEVICE);
1049
1050         /* If it's the last chunk of our master page then
1051          * we unmap it.
1052          */
1053         if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1054                                         == ql_lbq_block_size(qdev))
1055                 pci_unmap_page(qdev->pdev,
1056                                 lbq_desc->p.pg_chunk.map,
1057                                 ql_lbq_block_size(qdev),
1058                                 PCI_DMA_FROMDEVICE);
1059         return lbq_desc;
1060 }
1061
1062 /* Get the next small buffer. */
1063 static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
1064 {
1065         struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1066         rx_ring->sbq_curr_idx++;
1067         if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1068                 rx_ring->sbq_curr_idx = 0;
1069         rx_ring->sbq_free_cnt++;
1070         return sbq_desc;
1071 }
1072
1073 /* Update an rx ring index. */
1074 static void ql_update_cq(struct rx_ring *rx_ring)
1075 {
1076         rx_ring->cnsmr_idx++;
1077         rx_ring->curr_entry++;
1078         if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1079                 rx_ring->cnsmr_idx = 0;
1080                 rx_ring->curr_entry = rx_ring->cq_base;
1081         }
1082 }
1083
1084 static void ql_write_cq_idx(struct rx_ring *rx_ring)
1085 {
1086         ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1087 }
1088
1089 static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1090                                                 struct bq_desc *lbq_desc)
1091 {
1092         if (!rx_ring->pg_chunk.page) {
1093                 u64 map;
1094                 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1095                                                 GFP_ATOMIC,
1096                                                 qdev->lbq_buf_order);
1097                 if (unlikely(!rx_ring->pg_chunk.page)) {
1098                         netif_err(qdev, drv, qdev->ndev,
1099                                   "page allocation failed.\n");
1100                         return -ENOMEM;
1101                 }
1102                 rx_ring->pg_chunk.offset = 0;
1103                 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1104                                         0, ql_lbq_block_size(qdev),
1105                                         PCI_DMA_FROMDEVICE);
1106                 if (pci_dma_mapping_error(qdev->pdev, map)) {
1107                         __free_pages(rx_ring->pg_chunk.page,
1108                                         qdev->lbq_buf_order);
1109                         netif_err(qdev, drv, qdev->ndev,
1110                                   "PCI mapping failed.\n");
1111                         return -ENOMEM;
1112                 }
1113                 rx_ring->pg_chunk.map = map;
1114                 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1115         }
1116
1117         /* Copy the current master pg_chunk info
1118          * to the current descriptor.
1119          */
1120         lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1121
1122         /* Adjust the master page chunk for next
1123          * buffer get.
1124          */
1125         rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1126         if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1127                 rx_ring->pg_chunk.page = NULL;
1128                 lbq_desc->p.pg_chunk.last_flag = 1;
1129         } else {
1130                 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1131                 get_page(rx_ring->pg_chunk.page);
1132                 lbq_desc->p.pg_chunk.last_flag = 0;
1133         }
1134         return 0;
1135 }
1136 /* Process (refill) a large buffer queue. */
1137 static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1138 {
1139         u32 clean_idx = rx_ring->lbq_clean_idx;
1140         u32 start_idx = clean_idx;
1141         struct bq_desc *lbq_desc;
1142         u64 map;
1143         int i;
1144
1145         while (rx_ring->lbq_free_cnt > 32) {
1146                 for (i = (rx_ring->lbq_clean_idx % 16); i < 16; i++) {
1147                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1148                                      "lbq: try cleaning clean_idx = %d.\n",
1149                                      clean_idx);
1150                         lbq_desc = &rx_ring->lbq[clean_idx];
1151                         if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
1152                                 rx_ring->lbq_clean_idx = clean_idx;
1153                                 netif_err(qdev, ifup, qdev->ndev,
1154                                                 "Could not get a page chunk, i=%d, clean_idx =%d .\n",
1155                                                 i, clean_idx);
1156                                 return;
1157                         }
1158
1159                         map = lbq_desc->p.pg_chunk.map +
1160                                 lbq_desc->p.pg_chunk.offset;
1161                                 dma_unmap_addr_set(lbq_desc, mapaddr, map);
1162                         dma_unmap_len_set(lbq_desc, maplen,
1163                                         rx_ring->lbq_buf_size);
1164                                 *lbq_desc->addr = cpu_to_le64(map);
1165
1166                         pci_dma_sync_single_for_device(qdev->pdev, map,
1167                                                 rx_ring->lbq_buf_size,
1168                                                 PCI_DMA_FROMDEVICE);
1169                         clean_idx++;
1170                         if (clean_idx == rx_ring->lbq_len)
1171                                 clean_idx = 0;
1172                 }
1173
1174                 rx_ring->lbq_clean_idx = clean_idx;
1175                 rx_ring->lbq_prod_idx += 16;
1176                 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1177                         rx_ring->lbq_prod_idx = 0;
1178                 rx_ring->lbq_free_cnt -= 16;
1179         }
1180
1181         if (start_idx != clean_idx) {
1182                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1183                              "lbq: updating prod idx = %d.\n",
1184                              rx_ring->lbq_prod_idx);
1185                 ql_write_db_reg(rx_ring->lbq_prod_idx,
1186                                 rx_ring->lbq_prod_idx_db_reg);
1187         }
1188 }
1189
1190 /* Process (refill) a small buffer queue. */
1191 static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1192 {
1193         u32 clean_idx = rx_ring->sbq_clean_idx;
1194         u32 start_idx = clean_idx;
1195         struct bq_desc *sbq_desc;
1196         u64 map;
1197         int i;
1198
1199         while (rx_ring->sbq_free_cnt > 16) {
1200                 for (i = (rx_ring->sbq_clean_idx % 16); i < 16; i++) {
1201                         sbq_desc = &rx_ring->sbq[clean_idx];
1202                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1203                                      "sbq: try cleaning clean_idx = %d.\n",
1204                                      clean_idx);
1205                         if (sbq_desc->p.skb == NULL) {
1206                                 netif_printk(qdev, rx_status, KERN_DEBUG,
1207                                              qdev->ndev,
1208                                              "sbq: getting new skb for index %d.\n",
1209                                              sbq_desc->index);
1210                                 sbq_desc->p.skb =
1211                                     netdev_alloc_skb(qdev->ndev,
1212                                                      SMALL_BUFFER_SIZE);
1213                                 if (sbq_desc->p.skb == NULL) {
1214                                         netif_err(qdev, probe, qdev->ndev,
1215                                                   "Couldn't get an skb.\n");
1216                                         rx_ring->sbq_clean_idx = clean_idx;
1217                                         return;
1218                                 }
1219                                 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1220                                 map = pci_map_single(qdev->pdev,
1221                                                      sbq_desc->p.skb->data,
1222                                                      rx_ring->sbq_buf_size,
1223                                                      PCI_DMA_FROMDEVICE);
1224                                 if (pci_dma_mapping_error(qdev->pdev, map)) {
1225                                         netif_err(qdev, ifup, qdev->ndev,
1226                                                   "PCI mapping failed.\n");
1227                                         rx_ring->sbq_clean_idx = clean_idx;
1228                                         dev_kfree_skb_any(sbq_desc->p.skb);
1229                                         sbq_desc->p.skb = NULL;
1230                                         return;
1231                                 }
1232                                 dma_unmap_addr_set(sbq_desc, mapaddr, map);
1233                                 dma_unmap_len_set(sbq_desc, maplen,
1234                                                   rx_ring->sbq_buf_size);
1235                                 *sbq_desc->addr = cpu_to_le64(map);
1236                         }
1237
1238                         clean_idx++;
1239                         if (clean_idx == rx_ring->sbq_len)
1240                                 clean_idx = 0;
1241                 }
1242                 rx_ring->sbq_clean_idx = clean_idx;
1243                 rx_ring->sbq_prod_idx += 16;
1244                 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1245                         rx_ring->sbq_prod_idx = 0;
1246                 rx_ring->sbq_free_cnt -= 16;
1247         }
1248
1249         if (start_idx != clean_idx) {
1250                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1251                              "sbq: updating prod idx = %d.\n",
1252                              rx_ring->sbq_prod_idx);
1253                 ql_write_db_reg(rx_ring->sbq_prod_idx,
1254                                 rx_ring->sbq_prod_idx_db_reg);
1255         }
1256 }
1257
1258 static void ql_update_buffer_queues(struct ql_adapter *qdev,
1259                                     struct rx_ring *rx_ring)
1260 {
1261         ql_update_sbq(qdev, rx_ring);
1262         ql_update_lbq(qdev, rx_ring);
1263 }
1264
1265 /* Unmaps tx buffers.  Can be called from send() if a pci mapping
1266  * fails at some stage, or from the interrupt when a tx completes.
1267  */
1268 static void ql_unmap_send(struct ql_adapter *qdev,
1269                           struct tx_ring_desc *tx_ring_desc, int mapped)
1270 {
1271         int i;
1272         for (i = 0; i < mapped; i++) {
1273                 if (i == 0 || (i == 7 && mapped > 7)) {
1274                         /*
1275                          * Unmap the skb->data area, or the
1276                          * external sglist (AKA the Outbound
1277                          * Address List (OAL)).
1278                          * If its the zeroeth element, then it's
1279                          * the skb->data area.  If it's the 7th
1280                          * element and there is more than 6 frags,
1281                          * then its an OAL.
1282                          */
1283                         if (i == 7) {
1284                                 netif_printk(qdev, tx_done, KERN_DEBUG,
1285                                              qdev->ndev,
1286                                              "unmapping OAL area.\n");
1287                         }
1288                         pci_unmap_single(qdev->pdev,
1289                                          dma_unmap_addr(&tx_ring_desc->map[i],
1290                                                         mapaddr),
1291                                          dma_unmap_len(&tx_ring_desc->map[i],
1292                                                        maplen),
1293                                          PCI_DMA_TODEVICE);
1294                 } else {
1295                         netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1296                                      "unmapping frag %d.\n", i);
1297                         pci_unmap_page(qdev->pdev,
1298                                        dma_unmap_addr(&tx_ring_desc->map[i],
1299                                                       mapaddr),
1300                                        dma_unmap_len(&tx_ring_desc->map[i],
1301                                                      maplen), PCI_DMA_TODEVICE);
1302                 }
1303         }
1304
1305 }
1306
1307 /* Map the buffers for this transmit.  This will return
1308  * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1309  */
1310 static int ql_map_send(struct ql_adapter *qdev,
1311                        struct ob_mac_iocb_req *mac_iocb_ptr,
1312                        struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1313 {
1314         int len = skb_headlen(skb);
1315         dma_addr_t map;
1316         int frag_idx, err, map_idx = 0;
1317         struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1318         int frag_cnt = skb_shinfo(skb)->nr_frags;
1319
1320         if (frag_cnt) {
1321                 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1322                              "frag_cnt = %d.\n", frag_cnt);
1323         }
1324         /*
1325          * Map the skb buffer first.
1326          */
1327         map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1328
1329         err = pci_dma_mapping_error(qdev->pdev, map);
1330         if (err) {
1331                 netif_err(qdev, tx_queued, qdev->ndev,
1332                           "PCI mapping failed with error: %d\n", err);
1333
1334                 return NETDEV_TX_BUSY;
1335         }
1336
1337         tbd->len = cpu_to_le32(len);
1338         tbd->addr = cpu_to_le64(map);
1339         dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1340         dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1341         map_idx++;
1342
1343         /*
1344          * This loop fills the remainder of the 8 address descriptors
1345          * in the IOCB.  If there are more than 7 fragments, then the
1346          * eighth address desc will point to an external list (OAL).
1347          * When this happens, the remainder of the frags will be stored
1348          * in this list.
1349          */
1350         for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1351                 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1352                 tbd++;
1353                 if (frag_idx == 6 && frag_cnt > 7) {
1354                         /* Let's tack on an sglist.
1355                          * Our control block will now
1356                          * look like this:
1357                          * iocb->seg[0] = skb->data
1358                          * iocb->seg[1] = frag[0]
1359                          * iocb->seg[2] = frag[1]
1360                          * iocb->seg[3] = frag[2]
1361                          * iocb->seg[4] = frag[3]
1362                          * iocb->seg[5] = frag[4]
1363                          * iocb->seg[6] = frag[5]
1364                          * iocb->seg[7] = ptr to OAL (external sglist)
1365                          * oal->seg[0] = frag[6]
1366                          * oal->seg[1] = frag[7]
1367                          * oal->seg[2] = frag[8]
1368                          * oal->seg[3] = frag[9]
1369                          * oal->seg[4] = frag[10]
1370                          *      etc...
1371                          */
1372                         /* Tack on the OAL in the eighth segment of IOCB. */
1373                         map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1374                                              sizeof(struct oal),
1375                                              PCI_DMA_TODEVICE);
1376                         err = pci_dma_mapping_error(qdev->pdev, map);
1377                         if (err) {
1378                                 netif_err(qdev, tx_queued, qdev->ndev,
1379                                           "PCI mapping outbound address list with error: %d\n",
1380                                           err);
1381                                 goto map_error;
1382                         }
1383
1384                         tbd->addr = cpu_to_le64(map);
1385                         /*
1386                          * The length is the number of fragments
1387                          * that remain to be mapped times the length
1388                          * of our sglist (OAL).
1389                          */
1390                         tbd->len =
1391                             cpu_to_le32((sizeof(struct tx_buf_desc) *
1392                                          (frag_cnt - frag_idx)) | TX_DESC_C);
1393                         dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1394                                            map);
1395                         dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1396                                           sizeof(struct oal));
1397                         tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1398                         map_idx++;
1399                 }
1400
1401                 map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
1402                                        DMA_TO_DEVICE);
1403
1404                 err = dma_mapping_error(&qdev->pdev->dev, map);
1405                 if (err) {
1406                         netif_err(qdev, tx_queued, qdev->ndev,
1407                                   "PCI mapping frags failed with error: %d.\n",
1408                                   err);
1409                         goto map_error;
1410                 }
1411
1412                 tbd->addr = cpu_to_le64(map);
1413                 tbd->len = cpu_to_le32(skb_frag_size(frag));
1414                 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1415                 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1416                                   skb_frag_size(frag));
1417
1418         }
1419         /* Save the number of segments we've mapped. */
1420         tx_ring_desc->map_cnt = map_idx;
1421         /* Terminate the last segment. */
1422         tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1423         return NETDEV_TX_OK;
1424
1425 map_error:
1426         /*
1427          * If the first frag mapping failed, then i will be zero.
1428          * This causes the unmap of the skb->data area.  Otherwise
1429          * we pass in the number of frags that mapped successfully
1430          * so they can be umapped.
1431          */
1432         ql_unmap_send(qdev, tx_ring_desc, map_idx);
1433         return NETDEV_TX_BUSY;
1434 }
1435
1436 /* Categorizing receive firmware frame errors */
1437 static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err,
1438                                  struct rx_ring *rx_ring)
1439 {
1440         struct nic_stats *stats = &qdev->nic_stats;
1441
1442         stats->rx_err_count++;
1443         rx_ring->rx_errors++;
1444
1445         switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
1446         case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
1447                 stats->rx_code_err++;
1448                 break;
1449         case IB_MAC_IOCB_RSP_ERR_OVERSIZE:
1450                 stats->rx_oversize_err++;
1451                 break;
1452         case IB_MAC_IOCB_RSP_ERR_UNDERSIZE:
1453                 stats->rx_undersize_err++;
1454                 break;
1455         case IB_MAC_IOCB_RSP_ERR_PREAMBLE:
1456                 stats->rx_preamble_err++;
1457                 break;
1458         case IB_MAC_IOCB_RSP_ERR_FRAME_LEN:
1459                 stats->rx_frame_len_err++;
1460                 break;
1461         case IB_MAC_IOCB_RSP_ERR_CRC:
1462                 stats->rx_crc_err++;
1463         default:
1464                 break;
1465         }
1466 }
1467
1468 /* Process an inbound completion from an rx ring. */
1469 static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1470                                         struct rx_ring *rx_ring,
1471                                         struct ib_mac_iocb_rsp *ib_mac_rsp,
1472                                         u32 length,
1473                                         u16 vlan_id)
1474 {
1475         struct sk_buff *skb;
1476         struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1477         struct napi_struct *napi = &rx_ring->napi;
1478
1479         /* Frame error, so drop the packet. */
1480         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1481                 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1482                 put_page(lbq_desc->p.pg_chunk.page);
1483                 return;
1484         }
1485         napi->dev = qdev->ndev;
1486
1487         skb = napi_get_frags(napi);
1488         if (!skb) {
1489                 netif_err(qdev, drv, qdev->ndev,
1490                           "Couldn't get an skb, exiting.\n");
1491                 rx_ring->rx_dropped++;
1492                 put_page(lbq_desc->p.pg_chunk.page);
1493                 return;
1494         }
1495         prefetch(lbq_desc->p.pg_chunk.va);
1496         __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1497                              lbq_desc->p.pg_chunk.page,
1498                              lbq_desc->p.pg_chunk.offset,
1499                              length);
1500
1501         skb->len += length;
1502         skb->data_len += length;
1503         skb->truesize += length;
1504         skb_shinfo(skb)->nr_frags++;
1505
1506         rx_ring->rx_packets++;
1507         rx_ring->rx_bytes += length;
1508         skb->ip_summed = CHECKSUM_UNNECESSARY;
1509         skb_record_rx_queue(skb, rx_ring->cq_id);
1510         if (vlan_id != 0xffff)
1511                 __vlan_hwaccel_put_tag(skb, vlan_id);
1512         napi_gro_frags(napi);
1513 }
1514
1515 /* Process an inbound completion from an rx ring. */
1516 static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1517                                         struct rx_ring *rx_ring,
1518                                         struct ib_mac_iocb_rsp *ib_mac_rsp,
1519                                         u32 length,
1520                                         u16 vlan_id)
1521 {
1522         struct net_device *ndev = qdev->ndev;
1523         struct sk_buff *skb = NULL;
1524         void *addr;
1525         struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1526         struct napi_struct *napi = &rx_ring->napi;
1527
1528         skb = netdev_alloc_skb(ndev, length);
1529         if (!skb) {
1530                 netif_err(qdev, drv, qdev->ndev,
1531                           "Couldn't get an skb, need to unwind!.\n");
1532                 rx_ring->rx_dropped++;
1533                 put_page(lbq_desc->p.pg_chunk.page);
1534                 return;
1535         }
1536
1537         addr = lbq_desc->p.pg_chunk.va;
1538         prefetch(addr);
1539
1540         /* Frame error, so drop the packet. */
1541         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1542                 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1543                 goto err_out;
1544         }
1545
1546         /* The max framesize filter on this chip is set higher than
1547          * MTU since FCoE uses 2k frames.
1548          */
1549         if (skb->len > ndev->mtu + ETH_HLEN) {
1550                 netif_err(qdev, drv, qdev->ndev,
1551                           "Segment too small, dropping.\n");
1552                 rx_ring->rx_dropped++;
1553                 goto err_out;
1554         }
1555         memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
1556         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1557                      "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1558                      length);
1559         skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1560                                 lbq_desc->p.pg_chunk.offset+ETH_HLEN,
1561                                 length-ETH_HLEN);
1562         skb->len += length-ETH_HLEN;
1563         skb->data_len += length-ETH_HLEN;
1564         skb->truesize += length-ETH_HLEN;
1565
1566         rx_ring->rx_packets++;
1567         rx_ring->rx_bytes += skb->len;
1568         skb->protocol = eth_type_trans(skb, ndev);
1569         skb_checksum_none_assert(skb);
1570
1571         if ((ndev->features & NETIF_F_RXCSUM) &&
1572                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1573                 /* TCP frame. */
1574                 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1575                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1576                                      "TCP checksum done!\n");
1577                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1578                 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1579                                 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1580                         /* Unfragmented ipv4 UDP frame. */
1581                         struct iphdr *iph =
1582                                 (struct iphdr *) ((u8 *)addr + ETH_HLEN);
1583                         if (!(iph->frag_off &
1584                                 htons(IP_MF|IP_OFFSET))) {
1585                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1586                                 netif_printk(qdev, rx_status, KERN_DEBUG,
1587                                              qdev->ndev,
1588                                              "UDP checksum done!\n");
1589                         }
1590                 }
1591         }
1592
1593         skb_record_rx_queue(skb, rx_ring->cq_id);
1594         if (vlan_id != 0xffff)
1595                 __vlan_hwaccel_put_tag(skb, vlan_id);
1596         if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1597                 napi_gro_receive(napi, skb);
1598         else
1599                 netif_receive_skb(skb);
1600         return;
1601 err_out:
1602         dev_kfree_skb_any(skb);
1603         put_page(lbq_desc->p.pg_chunk.page);
1604 }
1605
1606 /* Process an inbound completion from an rx ring. */
1607 static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1608                                         struct rx_ring *rx_ring,
1609                                         struct ib_mac_iocb_rsp *ib_mac_rsp,
1610                                         u32 length,
1611                                         u16 vlan_id)
1612 {
1613         struct net_device *ndev = qdev->ndev;
1614         struct sk_buff *skb = NULL;
1615         struct sk_buff *new_skb = NULL;
1616         struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1617
1618         skb = sbq_desc->p.skb;
1619         /* Allocate new_skb and copy */
1620         new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1621         if (new_skb == NULL) {
1622                 netif_err(qdev, probe, qdev->ndev,
1623                           "No skb available, drop the packet.\n");
1624                 rx_ring->rx_dropped++;
1625                 return;
1626         }
1627         skb_reserve(new_skb, NET_IP_ALIGN);
1628         memcpy(skb_put(new_skb, length), skb->data, length);
1629         skb = new_skb;
1630
1631         /* Frame error, so drop the packet. */
1632         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1633                 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1634                 dev_kfree_skb_any(skb);
1635                 return;
1636         }
1637
1638         /* loopback self test for ethtool */
1639         if (test_bit(QL_SELFTEST, &qdev->flags)) {
1640                 ql_check_lb_frame(qdev, skb);
1641                 dev_kfree_skb_any(skb);
1642                 return;
1643         }
1644
1645         /* The max framesize filter on this chip is set higher than
1646          * MTU since FCoE uses 2k frames.
1647          */
1648         if (skb->len > ndev->mtu + ETH_HLEN) {
1649                 dev_kfree_skb_any(skb);
1650                 rx_ring->rx_dropped++;
1651                 return;
1652         }
1653
1654         prefetch(skb->data);
1655         if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1656                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1657                              "%s Multicast.\n",
1658                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1659                              IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1660                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1661                              IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1662                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1663                              IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1664         }
1665         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1666                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1667                              "Promiscuous Packet.\n");
1668
1669         rx_ring->rx_packets++;
1670         rx_ring->rx_bytes += skb->len;
1671         skb->protocol = eth_type_trans(skb, ndev);
1672         skb_checksum_none_assert(skb);
1673
1674         /* If rx checksum is on, and there are no
1675          * csum or frame errors.
1676          */
1677         if ((ndev->features & NETIF_F_RXCSUM) &&
1678                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1679                 /* TCP frame. */
1680                 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1681                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1682                                      "TCP checksum done!\n");
1683                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1684                 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1685                                 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1686                         /* Unfragmented ipv4 UDP frame. */
1687                         struct iphdr *iph = (struct iphdr *) skb->data;
1688                         if (!(iph->frag_off &
1689                                 htons(IP_MF|IP_OFFSET))) {
1690                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1691                                 netif_printk(qdev, rx_status, KERN_DEBUG,
1692                                              qdev->ndev,
1693                                              "UDP checksum done!\n");
1694                         }
1695                 }
1696         }
1697
1698         skb_record_rx_queue(skb, rx_ring->cq_id);
1699         if (vlan_id != 0xffff)
1700                 __vlan_hwaccel_put_tag(skb, vlan_id);
1701         if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1702                 napi_gro_receive(&rx_ring->napi, skb);
1703         else
1704                 netif_receive_skb(skb);
1705 }
1706
1707 static void ql_realign_skb(struct sk_buff *skb, int len)
1708 {
1709         void *temp_addr = skb->data;
1710
1711         /* Undo the skb_reserve(skb,32) we did before
1712          * giving to hardware, and realign data on
1713          * a 2-byte boundary.
1714          */
1715         skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1716         skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1717         skb_copy_to_linear_data(skb, temp_addr,
1718                 (unsigned int)len);
1719 }
1720
1721 /*
1722  * This function builds an skb for the given inbound
1723  * completion.  It will be rewritten for readability in the near
1724  * future, but for not it works well.
1725  */
1726 static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1727                                        struct rx_ring *rx_ring,
1728                                        struct ib_mac_iocb_rsp *ib_mac_rsp)
1729 {
1730         struct bq_desc *lbq_desc;
1731         struct bq_desc *sbq_desc;
1732         struct sk_buff *skb = NULL;
1733         u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1734        u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1735
1736         /*
1737          * Handle the header buffer if present.
1738          */
1739         if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1740             ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1741                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1742                              "Header of %d bytes in small buffer.\n", hdr_len);
1743                 /*
1744                  * Headers fit nicely into a small buffer.
1745                  */
1746                 sbq_desc = ql_get_curr_sbuf(rx_ring);
1747                 pci_unmap_single(qdev->pdev,
1748                                 dma_unmap_addr(sbq_desc, mapaddr),
1749                                 dma_unmap_len(sbq_desc, maplen),
1750                                 PCI_DMA_FROMDEVICE);
1751                 skb = sbq_desc->p.skb;
1752                 ql_realign_skb(skb, hdr_len);
1753                 skb_put(skb, hdr_len);
1754                 sbq_desc->p.skb = NULL;
1755         }
1756
1757         /*
1758          * Handle the data buffer(s).
1759          */
1760         if (unlikely(!length)) {        /* Is there data too? */
1761                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1762                              "No Data buffer in this packet.\n");
1763                 return skb;
1764         }
1765
1766         if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1767                 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1768                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1769                                      "Headers in small, data of %d bytes in small, combine them.\n",
1770                                      length);
1771                         /*
1772                          * Data is less than small buffer size so it's
1773                          * stuffed in a small buffer.
1774                          * For this case we append the data
1775                          * from the "data" small buffer to the "header" small
1776                          * buffer.
1777                          */
1778                         sbq_desc = ql_get_curr_sbuf(rx_ring);
1779                         pci_dma_sync_single_for_cpu(qdev->pdev,
1780                                                     dma_unmap_addr
1781                                                     (sbq_desc, mapaddr),
1782                                                     dma_unmap_len
1783                                                     (sbq_desc, maplen),
1784                                                     PCI_DMA_FROMDEVICE);
1785                         memcpy(skb_put(skb, length),
1786                                sbq_desc->p.skb->data, length);
1787                         pci_dma_sync_single_for_device(qdev->pdev,
1788                                                        dma_unmap_addr
1789                                                        (sbq_desc,
1790                                                         mapaddr),
1791                                                        dma_unmap_len
1792                                                        (sbq_desc,
1793                                                         maplen),
1794                                                        PCI_DMA_FROMDEVICE);
1795                 } else {
1796                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1797                                      "%d bytes in a single small buffer.\n",
1798                                      length);
1799                         sbq_desc = ql_get_curr_sbuf(rx_ring);
1800                         skb = sbq_desc->p.skb;
1801                         ql_realign_skb(skb, length);
1802                         skb_put(skb, length);
1803                         pci_unmap_single(qdev->pdev,
1804                                          dma_unmap_addr(sbq_desc,
1805                                                         mapaddr),
1806                                          dma_unmap_len(sbq_desc,
1807                                                        maplen),
1808                                          PCI_DMA_FROMDEVICE);
1809                         sbq_desc->p.skb = NULL;
1810                 }
1811         } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1812                 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1813                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1814                                      "Header in small, %d bytes in large. Chain large to small!\n",
1815                                      length);
1816                         /*
1817                          * The data is in a single large buffer.  We
1818                          * chain it to the header buffer's skb and let
1819                          * it rip.
1820                          */
1821                         lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1822                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1823                                      "Chaining page at offset = %d, for %d bytes  to skb.\n",
1824                                      lbq_desc->p.pg_chunk.offset, length);
1825                         skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1826                                                 lbq_desc->p.pg_chunk.offset,
1827                                                 length);
1828                         skb->len += length;
1829                         skb->data_len += length;
1830                         skb->truesize += length;
1831                 } else {
1832                         /*
1833                          * The headers and data are in a single large buffer. We
1834                          * copy it to a new skb and let it go. This can happen with
1835                          * jumbo mtu on a non-TCP/UDP frame.
1836                          */
1837                         lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1838                         skb = netdev_alloc_skb(qdev->ndev, length);
1839                         if (skb == NULL) {
1840                                 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1841                                              "No skb available, drop the packet.\n");
1842                                 return NULL;
1843                         }
1844                         pci_unmap_page(qdev->pdev,
1845                                        dma_unmap_addr(lbq_desc,
1846                                                       mapaddr),
1847                                        dma_unmap_len(lbq_desc, maplen),
1848                                        PCI_DMA_FROMDEVICE);
1849                         skb_reserve(skb, NET_IP_ALIGN);
1850                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1851                                      "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1852                                      length);
1853                         skb_fill_page_desc(skb, 0,
1854                                                 lbq_desc->p.pg_chunk.page,
1855                                                 lbq_desc->p.pg_chunk.offset,
1856                                                 length);
1857                         skb->len += length;
1858                         skb->data_len += length;
1859                         skb->truesize += length;
1860                         length -= length;
1861                         __pskb_pull_tail(skb,
1862                                 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1863                                 VLAN_ETH_HLEN : ETH_HLEN);
1864                 }
1865         } else {
1866                 /*
1867                  * The data is in a chain of large buffers
1868                  * pointed to by a small buffer.  We loop
1869                  * thru and chain them to the our small header
1870                  * buffer's skb.
1871                  * frags:  There are 18 max frags and our small
1872                  *         buffer will hold 32 of them. The thing is,
1873                  *         we'll use 3 max for our 9000 byte jumbo
1874                  *         frames.  If the MTU goes up we could
1875                  *          eventually be in trouble.
1876                  */
1877                 int size, i = 0;
1878                 sbq_desc = ql_get_curr_sbuf(rx_ring);
1879                 pci_unmap_single(qdev->pdev,
1880                                  dma_unmap_addr(sbq_desc, mapaddr),
1881                                  dma_unmap_len(sbq_desc, maplen),
1882                                  PCI_DMA_FROMDEVICE);
1883                 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1884                         /*
1885                          * This is an non TCP/UDP IP frame, so
1886                          * the headers aren't split into a small
1887                          * buffer.  We have to use the small buffer
1888                          * that contains our sg list as our skb to
1889                          * send upstairs. Copy the sg list here to
1890                          * a local buffer and use it to find the
1891                          * pages to chain.
1892                          */
1893                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1894                                      "%d bytes of headers & data in chain of large.\n",
1895                                      length);
1896                         skb = sbq_desc->p.skb;
1897                         sbq_desc->p.skb = NULL;
1898                         skb_reserve(skb, NET_IP_ALIGN);
1899                 }
1900                 while (length > 0) {
1901                         lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1902                         size = (length < rx_ring->lbq_buf_size) ? length :
1903                                 rx_ring->lbq_buf_size;
1904
1905                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1906                                      "Adding page %d to skb for %d bytes.\n",
1907                                      i, size);
1908                         skb_fill_page_desc(skb, i,
1909                                                 lbq_desc->p.pg_chunk.page,
1910                                                 lbq_desc->p.pg_chunk.offset,
1911                                                 size);
1912                         skb->len += size;
1913                         skb->data_len += size;
1914                         skb->truesize += size;
1915                         length -= size;
1916                         i++;
1917                 }
1918                 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1919                                 VLAN_ETH_HLEN : ETH_HLEN);
1920         }
1921         return skb;
1922 }
1923
1924 /* Process an inbound completion from an rx ring. */
1925 static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1926                                    struct rx_ring *rx_ring,
1927                                    struct ib_mac_iocb_rsp *ib_mac_rsp,
1928                                    u16 vlan_id)
1929 {
1930         struct net_device *ndev = qdev->ndev;
1931         struct sk_buff *skb = NULL;
1932
1933         QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1934
1935         skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1936         if (unlikely(!skb)) {
1937                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1938                              "No skb available, drop packet.\n");
1939                 rx_ring->rx_dropped++;
1940                 return;
1941         }
1942
1943         /* Frame error, so drop the packet. */
1944         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1945                 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1946                 dev_kfree_skb_any(skb);
1947                 return;
1948         }
1949
1950         /* The max framesize filter on this chip is set higher than
1951          * MTU since FCoE uses 2k frames.
1952          */
1953         if (skb->len > ndev->mtu + ETH_HLEN) {
1954                 dev_kfree_skb_any(skb);
1955                 rx_ring->rx_dropped++;
1956                 return;
1957         }
1958
1959         /* loopback self test for ethtool */
1960         if (test_bit(QL_SELFTEST, &qdev->flags)) {
1961                 ql_check_lb_frame(qdev, skb);
1962                 dev_kfree_skb_any(skb);
1963                 return;
1964         }
1965
1966         prefetch(skb->data);
1967         if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1968                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1969                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1970                              IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1971                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1972                              IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1973                              (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1974                              IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1975                 rx_ring->rx_multicast++;
1976         }
1977         if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1978                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1979                              "Promiscuous Packet.\n");
1980         }
1981
1982         skb->protocol = eth_type_trans(skb, ndev);
1983         skb_checksum_none_assert(skb);
1984
1985         /* If rx checksum is on, and there are no
1986          * csum or frame errors.
1987          */
1988         if ((ndev->features & NETIF_F_RXCSUM) &&
1989                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1990                 /* TCP frame. */
1991                 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1992                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1993                                      "TCP checksum done!\n");
1994                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1995                 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1996                                 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1997                 /* Unfragmented ipv4 UDP frame. */
1998                         struct iphdr *iph = (struct iphdr *) skb->data;
1999                         if (!(iph->frag_off &
2000                                 htons(IP_MF|IP_OFFSET))) {
2001                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
2002                                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2003                                              "TCP checksum done!\n");
2004                         }
2005                 }
2006         }
2007
2008         rx_ring->rx_packets++;
2009         rx_ring->rx_bytes += skb->len;
2010         skb_record_rx_queue(skb, rx_ring->cq_id);
2011         if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0))
2012                 __vlan_hwaccel_put_tag(skb, vlan_id);
2013         if (skb->ip_summed == CHECKSUM_UNNECESSARY)
2014                 napi_gro_receive(&rx_ring->napi, skb);
2015         else
2016                 netif_receive_skb(skb);
2017 }
2018
2019 /* Process an inbound completion from an rx ring. */
2020 static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2021                                         struct rx_ring *rx_ring,
2022                                         struct ib_mac_iocb_rsp *ib_mac_rsp)
2023 {
2024         u32 length = le32_to_cpu(ib_mac_rsp->data_len);
2025         u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
2026                         ((le16_to_cpu(ib_mac_rsp->vlan_id) &
2027                         IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2028
2029         QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2030
2031         if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2032                 /* The data and headers are split into
2033                  * separate buffers.
2034                  */
2035                 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2036                                                 vlan_id);
2037         } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2038                 /* The data fit in a single small buffer.
2039                  * Allocate a new skb, copy the data and
2040                  * return the buffer to the free pool.
2041                  */
2042                 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2043                                                 length, vlan_id);
2044         } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2045                 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2046                 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2047                 /* TCP packet in a page chunk that's been checksummed.
2048                  * Tack it on to our GRO skb and let it go.
2049                  */
2050                 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2051                                                 length, vlan_id);
2052         } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2053                 /* Non-TCP packet in a page chunk. Allocate an
2054                  * skb, tack it on frags, and send it up.
2055                  */
2056                 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2057                                                 length, vlan_id);
2058         } else {
2059                 /* Non-TCP/UDP large frames that span multiple buffers
2060                  * can be processed corrrectly by the split frame logic.
2061                  */
2062                 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2063                                                 vlan_id);
2064         }
2065
2066         return (unsigned long)length;
2067 }
2068
2069 /* Process an outbound completion from an rx ring. */
2070 static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2071                                    struct ob_mac_iocb_rsp *mac_rsp)
2072 {
2073         struct tx_ring *tx_ring;
2074         struct tx_ring_desc *tx_ring_desc;
2075
2076         QL_DUMP_OB_MAC_RSP(mac_rsp);
2077         tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2078         tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2079         ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
2080         tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2081         tx_ring->tx_packets++;
2082         dev_kfree_skb(tx_ring_desc->skb);
2083         tx_ring_desc->skb = NULL;
2084
2085         if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2086                                         OB_MAC_IOCB_RSP_S |
2087                                         OB_MAC_IOCB_RSP_L |
2088                                         OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2089                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2090                         netif_warn(qdev, tx_done, qdev->ndev,
2091                                    "Total descriptor length did not match transfer length.\n");
2092                 }
2093                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2094                         netif_warn(qdev, tx_done, qdev->ndev,
2095                                    "Frame too short to be valid, not sent.\n");
2096                 }
2097                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2098                         netif_warn(qdev, tx_done, qdev->ndev,
2099                                    "Frame too long, but sent anyway.\n");
2100                 }
2101                 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
2102                         netif_warn(qdev, tx_done, qdev->ndev,
2103                                    "PCI backplane error. Frame not sent.\n");
2104                 }
2105         }
2106         atomic_inc(&tx_ring->tx_count);
2107 }
2108
2109 /* Fire up a handler to reset the MPI processor. */
2110 void ql_queue_fw_error(struct ql_adapter *qdev)
2111 {
2112         ql_link_off(qdev);
2113         queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2114 }
2115
2116 void ql_queue_asic_error(struct ql_adapter *qdev)
2117 {
2118         ql_link_off(qdev);
2119         ql_disable_interrupts(qdev);
2120         /* Clear adapter up bit to signal the recovery
2121          * process that it shouldn't kill the reset worker
2122          * thread
2123          */
2124         clear_bit(QL_ADAPTER_UP, &qdev->flags);
2125         /* Set asic recovery bit to indicate reset process that we are
2126          * in fatal error recovery process rather than normal close
2127          */
2128         set_bit(QL_ASIC_RECOVERY, &qdev->flags);
2129         queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2130 }
2131
2132 static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2133                                     struct ib_ae_iocb_rsp *ib_ae_rsp)
2134 {
2135         switch (ib_ae_rsp->event) {
2136         case MGMT_ERR_EVENT:
2137                 netif_err(qdev, rx_err, qdev->ndev,
2138                           "Management Processor Fatal Error.\n");
2139                 ql_queue_fw_error(qdev);
2140                 return;
2141
2142         case CAM_LOOKUP_ERR_EVENT:
2143                 netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2144                 netdev_err(qdev->ndev, "This event shouldn't occur.\n");
2145                 ql_queue_asic_error(qdev);
2146                 return;
2147
2148         case SOFT_ECC_ERROR_EVENT:
2149                 netdev_err(qdev->ndev, "Soft ECC error detected.\n");
2150                 ql_queue_asic_error(qdev);
2151                 break;
2152
2153         case PCI_ERR_ANON_BUF_RD:
2154                 netdev_err(qdev->ndev, "PCI error occurred when reading "
2155                                         "anonymous buffers from rx_ring %d.\n",
2156                                         ib_ae_rsp->q_id);
2157                 ql_queue_asic_error(qdev);
2158                 break;
2159
2160         default:
2161                 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2162                           ib_ae_rsp->event);
2163                 ql_queue_asic_error(qdev);
2164                 break;
2165         }
2166 }
2167
2168 static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2169 {
2170         struct ql_adapter *qdev = rx_ring->qdev;
2171         u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2172         struct ob_mac_iocb_rsp *net_rsp = NULL;
2173         int count = 0;
2174
2175         struct tx_ring *tx_ring;
2176         /* While there are entries in the completion queue. */
2177         while (prod != rx_ring->cnsmr_idx) {
2178
2179                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2180                              "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2181                              rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2182
2183                 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2184                 rmb();
2185                 switch (net_rsp->opcode) {
2186
2187                 case OPCODE_OB_MAC_TSO_IOCB:
2188                 case OPCODE_OB_MAC_IOCB:
2189                         ql_process_mac_tx_intr(qdev, net_rsp);
2190                         break;
2191                 default:
2192                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2193                                      "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2194                                      net_rsp->opcode);
2195                 }
2196                 count++;
2197                 ql_update_cq(rx_ring);
2198                 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2199         }
2200         if (!net_rsp)
2201                 return 0;
2202         ql_write_cq_idx(rx_ring);
2203         tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2204         if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
2205                 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2206                         /*
2207                          * The queue got stopped because the tx_ring was full.
2208                          * Wake it up, because it's now at least 25% empty.
2209                          */
2210                         netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2211         }
2212
2213         return count;
2214 }
2215
2216 static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2217 {
2218         struct ql_adapter *qdev = rx_ring->qdev;
2219         u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2220         struct ql_net_rsp_iocb *net_rsp;
2221         int count = 0;
2222
2223         /* While there are entries in the completion queue. */
2224         while (prod != rx_ring->cnsmr_idx) {
2225
2226                 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2227                              "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2228                              rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2229
2230                 net_rsp = rx_ring->curr_entry;
2231                 rmb();
2232                 switch (net_rsp->opcode) {
2233                 case OPCODE_IB_MAC_IOCB:
2234                         ql_process_mac_rx_intr(qdev, rx_ring,
2235                                                (struct ib_mac_iocb_rsp *)
2236                                                net_rsp);
2237                         break;
2238
2239                 case OPCODE_IB_AE_IOCB:
2240                         ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2241                                                 net_rsp);
2242                         break;
2243                 default:
2244                         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2245                                      "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2246                                      net_rsp->opcode);
2247                         break;
2248                 }
2249                 count++;
2250                 ql_update_cq(rx_ring);
2251                 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2252                 if (count == budget)
2253                         break;
2254         }
2255         ql_update_buffer_queues(qdev, rx_ring);
2256         ql_write_cq_idx(rx_ring);
2257         return count;
2258 }
2259
2260 static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2261 {
2262         struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2263         struct ql_adapter *qdev = rx_ring->qdev;
2264         struct rx_ring *trx_ring;
2265         int i, work_done = 0;
2266         struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
2267
2268         netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2269                      "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
2270
2271         /* Service the TX rings first.  They start
2272          * right after the RSS rings. */
2273         for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2274                 trx_ring = &qdev->rx_ring[i];
2275                 /* If this TX completion ring belongs to this vector and
2276                  * it's not empty then service it.
2277                  */
2278                 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2279                         (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2280                                         trx_ring->cnsmr_idx)) {
2281                         netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2282                                      "%s: Servicing TX completion ring %d.\n",
2283                                      __func__, trx_ring->cq_id);
2284                         ql_clean_outbound_rx_ring(trx_ring);
2285                 }
2286         }
2287
2288         /*
2289          * Now service the RSS ring if it's active.
2290          */
2291         if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2292                                         rx_ring->cnsmr_idx) {
2293                 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2294                              "%s: Servicing RX completion ring %d.\n",
2295                              __func__, rx_ring->cq_id);
2296                 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2297         }
2298
2299         if (work_done < budget) {
2300                 napi_complete(napi);
2301                 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2302         }
2303         return work_done;
2304 }
2305
2306 static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
2307 {
2308         struct ql_adapter *qdev = netdev_priv(ndev);
2309
2310         if (features & NETIF_F_HW_VLAN_RX) {
2311                 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2312                                  NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2313         } else {
2314                 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2315         }
2316 }
2317
2318 static netdev_features_t qlge_fix_features(struct net_device *ndev,
2319         netdev_features_t features)
2320 {
2321         /*
2322          * Since there is no support for separate rx/tx vlan accel
2323          * enable/disable make sure tx flag is always in same state as rx.
2324          */
2325         if (features & NETIF_F_HW_VLAN_RX)
2326                 features |= NETIF_F_HW_VLAN_TX;
2327         else
2328                 features &= ~NETIF_F_HW_VLAN_TX;
2329
2330         return features;
2331 }
2332
2333 static int qlge_set_features(struct net_device *ndev,
2334         netdev_features_t features)
2335 {
2336         netdev_features_t changed = ndev->features ^ features;
2337
2338         if (changed & NETIF_F_HW_VLAN_RX)
2339                 qlge_vlan_mode(ndev, features);
2340
2341         return 0;
2342 }
2343
2344 static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
2345 {
2346         u32 enable_bit = MAC_ADDR_E;
2347         int err;
2348
2349         err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2350                                   MAC_ADDR_TYPE_VLAN, vid);
2351         if (err)
2352                 netif_err(qdev, ifup, qdev->ndev,
2353                           "Failed to init vlan address.\n");
2354         return err;
2355 }
2356
2357 static int qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
2358 {
2359         struct ql_adapter *qdev = netdev_priv(ndev);
2360         int status;
2361         int err;
2362
2363         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2364         if (status)
2365                 return status;
2366
2367         err = __qlge_vlan_rx_add_vid(qdev, vid);
2368         set_bit(vid, qdev->active_vlans);
2369
2370         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2371
2372         return err;
2373 }
2374
2375 static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
2376 {
2377         u32 enable_bit = 0;
2378         int err;
2379
2380         err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2381                                   MAC_ADDR_TYPE_VLAN, vid);
2382         if (err)
2383                 netif_err(qdev, ifup, qdev->ndev,
2384                           "Failed to clear vlan address.\n");
2385         return err;
2386 }
2387
2388 static int qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
2389 {
2390         struct ql_adapter *qdev = netdev_priv(ndev);
2391         int status;
2392         int err;
2393
2394         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2395         if (status)
2396                 return status;
2397
2398         err = __qlge_vlan_rx_kill_vid(qdev, vid);
2399         clear_bit(vid, qdev->active_vlans);
2400
2401         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2402
2403         return err;
2404 }
2405
2406 static void qlge_restore_vlan(struct ql_adapter *qdev)
2407 {
2408         int status;
2409         u16 vid;
2410
2411         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2412         if (status)
2413                 return;
2414
2415         for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
2416                 __qlge_vlan_rx_add_vid(qdev, vid);
2417
2418         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2419 }
2420
2421 /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2422 static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2423 {
2424         struct rx_ring *rx_ring = dev_id;
2425         napi_schedule(&rx_ring->napi);
2426         return IRQ_HANDLED;
2427 }
2428
2429 /* This handles a fatal error, MPI activity, and the default
2430  * rx_ring in an MSI-X multiple vector environment.
2431  * In MSI/Legacy environment it also process the rest of
2432  * the rx_rings.
2433  */
2434 static irqreturn_t qlge_isr(int irq, void *dev_id)
2435 {
2436         struct rx_ring *rx_ring = dev_id;
2437         struct ql_adapter *qdev = rx_ring->qdev;
2438         struct intr_context *intr_context = &qdev->intr_context[0];
2439         u32 var;
2440         int work_done = 0;
2441
2442         spin_lock(&qdev->hw_lock);
2443         if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
2444                 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2445                              "Shared Interrupt, Not ours!\n");
2446                 spin_unlock(&qdev->hw_lock);
2447                 return IRQ_NONE;
2448         }
2449         spin_unlock(&qdev->hw_lock);
2450
2451         var = ql_disable_completion_interrupt(qdev, intr_context->intr);
2452
2453         /*
2454          * Check for fatal error.
2455          */
2456         if (var & STS_FE) {
2457                 ql_queue_asic_error(qdev);
2458                 netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
2459                 var = ql_read32(qdev, ERR_STS);
2460                 netdev_err(qdev->ndev, "Resetting chip. "
2461                                         "Error Status Register = 0x%x\n", var);
2462                 return IRQ_HANDLED;
2463         }
2464
2465         /*
2466          * Check MPI processor activity.
2467          */
2468         if ((var & STS_PI) &&
2469                 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
2470                 /*
2471                  * We've got an async event or mailbox completion.
2472                  * Handle it and clear the source of the interrupt.
2473                  */
2474                 netif_err(qdev, intr, qdev->ndev,
2475                           "Got MPI processor interrupt.\n");
2476                 ql_disable_completion_interrupt(qdev, intr_context->intr);
2477                 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2478                 queue_delayed_work_on(smp_processor_id(),
2479                                 qdev->workqueue, &qdev->mpi_work, 0);
2480                 work_done++;
2481         }
2482
2483         /*
2484          * Get the bit-mask that shows the active queues for this
2485          * pass.  Compare it to the queues that this irq services
2486          * and call napi if there's a match.
2487          */
2488         var = ql_read32(qdev, ISR1);
2489         if (var & intr_context->irq_mask) {
2490                 netif_info(qdev, intr, qdev->ndev,
2491                            "Waking handler for rx_ring[0].\n");
2492                 ql_disable_completion_interrupt(qdev, intr_context->intr);
2493                 napi_schedule(&rx_ring->napi);
2494                 work_done++;
2495         }
2496         ql_enable_completion_interrupt(qdev, intr_context->intr);
2497         return work_done ? IRQ_HANDLED : IRQ_NONE;
2498 }
2499
2500 static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2501 {
2502
2503         if (skb_is_gso(skb)) {
2504                 int err;
2505                 if (skb_header_cloned(skb)) {
2506                         err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2507                         if (err)
2508                                 return err;
2509                 }
2510
2511                 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2512                 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2513                 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2514                 mac_iocb_ptr->total_hdrs_len =
2515                     cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2516                 mac_iocb_ptr->net_trans_offset =
2517                     cpu_to_le16(skb_network_offset(skb) |
2518                                 skb_transport_offset(skb)
2519                                 << OB_MAC_TRANSPORT_HDR_SHIFT);
2520                 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2521                 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2522                 if (likely(skb->protocol == htons(ETH_P_IP))) {
2523                         struct iphdr *iph = ip_hdr(skb);
2524                         iph->check = 0;
2525                         mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2526                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2527                                                                  iph->daddr, 0,
2528                                                                  IPPROTO_TCP,
2529                                                                  0);
2530                 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2531                         mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2532                         tcp_hdr(skb)->check =
2533                             ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2534                                              &ipv6_hdr(skb)->daddr,
2535                                              0, IPPROTO_TCP, 0);
2536                 }
2537                 return 1;
2538         }
2539         return 0;
2540 }
2541
2542 static void ql_hw_csum_setup(struct sk_buff *skb,
2543                              struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2544 {
2545         int len;
2546         struct iphdr *iph = ip_hdr(skb);
2547         __sum16 *check;
2548         mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2549         mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2550         mac_iocb_ptr->net_trans_offset =
2551                 cpu_to_le16(skb_network_offset(skb) |
2552                 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2553
2554         mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2555         len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2556         if (likely(iph->protocol == IPPROTO_TCP)) {
2557                 check = &(tcp_hdr(skb)->check);
2558                 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2559                 mac_iocb_ptr->total_hdrs_len =
2560                     cpu_to_le16(skb_transport_offset(skb) +
2561                                 (tcp_hdr(skb)->doff << 2));
2562         } else {
2563                 check = &(udp_hdr(skb)->check);
2564                 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2565                 mac_iocb_ptr->total_hdrs_len =
2566                     cpu_to_le16(skb_transport_offset(skb) +
2567                                 sizeof(struct udphdr));
2568         }
2569         *check = ~csum_tcpudp_magic(iph->saddr,
2570                                     iph->daddr, len, iph->protocol, 0);
2571 }
2572
2573 static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2574 {
2575         struct tx_ring_desc *tx_ring_desc;
2576         struct ob_mac_iocb_req *mac_iocb_ptr;
2577         struct ql_adapter *qdev = netdev_priv(ndev);
2578         int tso;
2579         struct tx_ring *tx_ring;
2580         u32 tx_ring_idx = (u32) skb->queue_mapping;
2581
2582         tx_ring = &qdev->tx_ring[tx_ring_idx];
2583
2584         if (skb_padto(skb, ETH_ZLEN))
2585                 return NETDEV_TX_OK;
2586
2587         if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2588                 netif_info(qdev, tx_queued, qdev->ndev,
2589                            "%s: BUG! shutting down tx queue %d due to lack of resources.\n",
2590                            __func__, tx_ring_idx);
2591                 netif_stop_subqueue(ndev, tx_ring->wq_id);
2592                 tx_ring->tx_errors++;
2593                 return NETDEV_TX_BUSY;
2594         }
2595         tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2596         mac_iocb_ptr = tx_ring_desc->queue_entry;
2597         memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
2598
2599         mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2600         mac_iocb_ptr->tid = tx_ring_desc->index;
2601         /* We use the upper 32-bits to store the tx queue for this IO.
2602          * When we get the completion we can use it to establish the context.
2603          */
2604         mac_iocb_ptr->txq_idx = tx_ring_idx;
2605         tx_ring_desc->skb = skb;
2606
2607         mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2608
2609         if (vlan_tx_tag_present(skb)) {
2610                 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2611                              "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
2612                 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2613                 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2614         }
2615         tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2616         if (tso < 0) {
2617                 dev_kfree_skb_any(skb);
2618                 return NETDEV_TX_OK;
2619         } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2620                 ql_hw_csum_setup(skb,
2621                                  (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2622         }
2623         if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2624                         NETDEV_TX_OK) {
2625                 netif_err(qdev, tx_queued, qdev->ndev,
2626                           "Could not map the segments.\n");
2627                 tx_ring->tx_errors++;
2628                 return NETDEV_TX_BUSY;
2629         }
2630         QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2631         tx_ring->prod_idx++;
2632         if (tx_ring->prod_idx == tx_ring->wq_len)
2633                 tx_ring->prod_idx = 0;
2634         wmb();
2635
2636         ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
2637         netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2638                      "tx queued, slot %d, len %d\n",
2639                      tx_ring->prod_idx, skb->len);
2640
2641         atomic_dec(&tx_ring->tx_count);
2642
2643         if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2644                 netif_stop_subqueue(ndev, tx_ring->wq_id);
2645                 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2646                         /*
2647                          * The queue got stopped because the tx_ring was full.
2648                          * Wake it up, because it's now at least 25% empty.
2649                          */
2650                         netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2651         }
2652         return NETDEV_TX_OK;
2653 }
2654
2655
2656 static void ql_free_shadow_space(struct ql_adapter *qdev)
2657 {
2658         if (qdev->rx_ring_shadow_reg_area) {
2659                 pci_free_consistent(qdev->pdev,
2660                                     PAGE_SIZE,
2661                                     qdev->rx_ring_shadow_reg_area,
2662                                     qdev->rx_ring_shadow_reg_dma);
2663                 qdev->rx_ring_shadow_reg_area = NULL;
2664         }
2665         if (qdev->tx_ring_shadow_reg_area) {
2666                 pci_free_consistent(qdev->pdev,
2667                                     PAGE_SIZE,
2668                                     qdev->tx_ring_shadow_reg_area,
2669                                     qdev->tx_ring_shadow_reg_dma);
2670                 qdev->tx_ring_shadow_reg_area = NULL;
2671         }
2672 }
2673
2674 static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2675 {
2676         qdev->rx_ring_shadow_reg_area =
2677             pci_alloc_consistent(qdev->pdev,
2678                                  PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2679         if (qdev->rx_ring_shadow_reg_area == NULL) {
2680                 netif_err(qdev, ifup, qdev->ndev,
2681                           "Allocation of RX shadow space failed.\n");
2682                 return -ENOMEM;
2683         }
2684         memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
2685         qdev->tx_ring_shadow_reg_area =
2686             pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2687                                  &qdev->tx_ring_shadow_reg_dma);
2688         if (qdev->tx_ring_shadow_reg_area == NULL) {
2689                 netif_err(qdev, ifup, qdev->ndev,
2690                           "Allocation of TX shadow space failed.\n");
2691                 goto err_wqp_sh_area;
2692         }
2693         memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
2694         return 0;
2695
2696 err_wqp_sh_area:
2697         pci_free_consistent(qdev->pdev,
2698                             PAGE_SIZE,
2699                             qdev->rx_ring_shadow_reg_area,
2700                             qdev->rx_ring_shadow_reg_dma);
2701         return -ENOMEM;
2702 }
2703
2704 static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2705 {
2706         struct tx_ring_desc *tx_ring_desc;
2707         int i;
2708         struct ob_mac_iocb_req *mac_iocb_ptr;
2709
2710         mac_iocb_ptr = tx_ring->wq_base;
2711         tx_ring_desc = tx_ring->q;
2712         for (i = 0; i < tx_ring->wq_len; i++) {
2713                 tx_ring_desc->index = i;
2714                 tx_ring_desc->skb = NULL;
2715                 tx_ring_desc->queue_entry = mac_iocb_ptr;
2716                 mac_iocb_ptr++;
2717                 tx_ring_desc++;
2718         }
2719         atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2720 }
2721
2722 static void ql_free_tx_resources(struct ql_adapter *qdev,
2723                                  struct tx_ring *tx_ring)
2724 {
2725         if (tx_ring->wq_base) {
2726                 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2727                                     tx_ring->wq_base, tx_ring->wq_base_dma);
2728                 tx_ring->wq_base = NULL;
2729         }
2730         kfree(tx_ring->q);
2731         tx_ring->q = NULL;
2732 }
2733
2734 static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2735                                  struct tx_ring *tx_ring)
2736 {
2737         tx_ring->wq_base =
2738             pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2739                                  &tx_ring->wq_base_dma);
2740
2741         if ((tx_ring->wq_base == NULL) ||
2742             tx_ring->wq_base_dma & WQ_ADDR_ALIGN)
2743                 goto pci_alloc_err;
2744
2745         tx_ring->q =
2746             kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2747         if (tx_ring->q == NULL)
2748                 goto err;
2749
2750         return 0;
2751 err:
2752         pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2753                             tx_ring->wq_base, tx_ring->wq_base_dma);
2754         tx_ring->wq_base = NULL;
2755 pci_alloc_err:
2756         netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
2757         return -ENOMEM;
2758 }
2759
2760 static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2761 {
2762         struct bq_desc *lbq_desc;
2763
2764         uint32_t  curr_idx, clean_idx;
2765
2766         curr_idx = rx_ring->lbq_curr_idx;
2767         clean_idx = rx_ring->lbq_clean_idx;
2768         while (curr_idx != clean_idx) {
2769                 lbq_desc = &rx_ring->lbq[curr_idx];
2770
2771                 if (lbq_desc->p.pg_chunk.last_flag) {
2772                         pci_unmap_page(qdev->pdev,
2773                                 lbq_desc->p.pg_chunk.map,
2774                                 ql_lbq_block_size(qdev),
2775                                        PCI_DMA_FROMDEVICE);
2776                         lbq_desc->p.pg_chunk.last_flag = 0;
2777                 }
2778
2779                 put_page(lbq_desc->p.pg_chunk.page);
2780                 lbq_desc->p.pg_chunk.page = NULL;
2781
2782                 if (++curr_idx == rx_ring->lbq_len)
2783                         curr_idx = 0;
2784
2785         }
2786 }
2787
2788 static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2789 {
2790         int i;
2791         struct bq_desc *sbq_desc;
2792
2793         for (i = 0; i < rx_ring->sbq_len; i++) {
2794                 sbq_desc = &rx_ring->sbq[i];
2795                 if (sbq_desc == NULL) {
2796                         netif_err(qdev, ifup, qdev->ndev,
2797                                   "sbq_desc %d is NULL.\n", i);
2798                         return;
2799                 }
2800                 if (sbq_desc->p.skb) {
2801                         pci_unmap_single(qdev->pdev,
2802                                          dma_unmap_addr(sbq_desc, mapaddr),
2803                                          dma_unmap_len(sbq_desc, maplen),
2804                                          PCI_DMA_FROMDEVICE);
2805                         dev_kfree_skb(sbq_desc->p.skb);
2806                         sbq_desc->p.skb = NULL;
2807                 }
2808         }
2809 }
2810
2811 /* Free all large and small rx buffers associated
2812  * with the completion queues for this device.
2813  */
2814 static void ql_free_rx_buffers(struct ql_adapter *qdev)
2815 {
2816         int i;
2817         struct rx_ring *rx_ring;
2818
2819         for (i = 0; i < qdev->rx_ring_count; i++) {
2820                 rx_ring = &qdev->rx_ring[i];
2821                 if (rx_ring->lbq)
2822                         ql_free_lbq_buffers(qdev, rx_ring);
2823                 if (rx_ring->sbq)
2824                         ql_free_sbq_buffers(qdev, rx_ring);
2825         }
2826 }
2827
2828 static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2829 {
2830         struct rx_ring *rx_ring;
2831         int i;
2832
2833         for (i = 0; i < qdev->rx_ring_count; i++) {
2834                 rx_ring = &qdev->rx_ring[i];
2835                 if (rx_ring->type != TX_Q)
2836                         ql_update_buffer_queues(qdev, rx_ring);
2837         }
2838 }
2839
2840 static void ql_init_lbq_ring(struct ql_adapter *qdev,
2841                                 struct rx_ring *rx_ring)
2842 {
2843         int i;
2844         struct bq_desc *lbq_desc;
2845         __le64 *bq = rx_ring->lbq_base;
2846
2847         memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2848         for (i = 0; i < rx_ring->lbq_len; i++) {
2849                 lbq_desc = &rx_ring->lbq[i];
2850                 memset(lbq_desc, 0, sizeof(*lbq_desc));
2851                 lbq_desc->index = i;
2852                 lbq_desc->addr = bq;
2853                 bq++;
2854         }
2855 }
2856
2857 static void ql_init_sbq_ring(struct ql_adapter *qdev,
2858                                 struct rx_ring *rx_ring)
2859 {
2860         int i;
2861         struct bq_desc *sbq_desc;
2862         __le64 *bq = rx_ring->sbq_base;
2863
2864         memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
2865         for (i = 0; i < rx_ring->sbq_len; i++) {
2866                 sbq_desc = &rx_ring->sbq[i];
2867                 memset(sbq_desc, 0, sizeof(*sbq_desc));
2868                 sbq_desc->index = i;
2869                 sbq_desc->addr = bq;
2870                 bq++;
2871         }
2872 }
2873
2874 static void ql_free_rx_resources(struct ql_adapter *qdev,
2875                                  struct rx_ring *rx_ring)
2876 {
2877         /* Free the small buffer queue. */
2878         if (rx_ring->sbq_base) {
2879                 pci_free_consistent(qdev->pdev,
2880                                     rx_ring->sbq_size,
2881                                     rx_ring->sbq_base, rx_ring->sbq_base_dma);
2882                 rx_ring->sbq_base = NULL;
2883         }
2884
2885         /* Free the small buffer queue control blocks. */
2886         kfree(rx_ring->sbq);
2887         rx_ring->sbq = NULL;
2888
2889         /* Free the large buffer queue. */
2890         if (rx_ring->lbq_base) {
2891                 pci_free_consistent(qdev->pdev,
2892                                     rx_ring->lbq_size,
2893                                     rx_ring->lbq_base, rx_ring->lbq_base_dma);
2894                 rx_ring->lbq_base = NULL;
2895         }
2896
2897         /* Free the large buffer queue control blocks. */
2898         kfree(rx_ring->lbq);
2899         rx_ring->lbq = NULL;
2900
2901         /* Free the rx queue. */
2902         if (rx_ring->cq_base) {
2903                 pci_free_consistent(qdev->pdev,
2904                                     rx_ring->cq_size,
2905                                     rx_ring->cq_base, rx_ring->cq_base_dma);
2906                 rx_ring->cq_base = NULL;
2907         }
2908 }
2909
2910 /* Allocate queues and buffers for this completions queue based
2911  * on the values in the parameter structure. */
2912 static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2913                                  struct rx_ring *rx_ring)
2914 {
2915
2916         /*
2917          * Allocate the completion queue for this rx_ring.
2918          */
2919         rx_ring->cq_base =
2920             pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2921                                  &rx_ring->cq_base_dma);
2922
2923         if (rx_ring->cq_base == NULL) {
2924                 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
2925                 return -ENOMEM;
2926         }
2927
2928         if (rx_ring->sbq_len) {
2929                 /*
2930                  * Allocate small buffer queue.
2931                  */
2932                 rx_ring->sbq_base =
2933                     pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2934                                          &rx_ring->sbq_base_dma);
2935
2936                 if (rx_ring->sbq_base == NULL) {
2937                         netif_err(qdev, ifup, qdev->ndev,
2938                                   "Small buffer queue allocation failed.\n");
2939                         goto err_mem;
2940                 }
2941
2942                 /*
2943                  * Allocate small buffer queue control blocks.
2944                  */
2945                 rx_ring->sbq = kmalloc_array(rx_ring->sbq_len,
2946                                              sizeof(struct bq_desc),
2947                                              GFP_KERNEL);
2948                 if (rx_ring->sbq == NULL)
2949                         goto err_mem;
2950
2951                 ql_init_sbq_ring(qdev, rx_ring);
2952         }
2953
2954         if (rx_ring->lbq_len) {
2955                 /*
2956                  * Allocate large buffer queue.
2957                  */
2958                 rx_ring->lbq_base =
2959                     pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2960                                          &rx_ring->lbq_base_dma);
2961
2962                 if (rx_ring->lbq_base == NULL) {
2963                         netif_err(qdev, ifup, qdev->ndev,
2964                                   "Large buffer queue allocation failed.\n");
2965                         goto err_mem;
2966                 }
2967                 /*
2968                  * Allocate large buffer queue control blocks.
2969                  */
2970                 rx_ring->lbq = kmalloc_array(rx_ring->lbq_len,
2971                                              sizeof(struct bq_desc),
2972                                              GFP_KERNEL);
2973                 if (rx_ring->lbq == NULL)
2974                         goto err_mem;
2975
2976                 ql_init_lbq_ring(qdev, rx_ring);
2977         }
2978
2979         return 0;
2980
2981 err_mem:
2982         ql_free_rx_resources(qdev, rx_ring);
2983         return -ENOMEM;
2984 }
2985
2986 static void ql_tx_ring_clean(struct ql_adapter *qdev)
2987 {
2988         struct tx_ring *tx_ring;
2989         struct tx_ring_desc *tx_ring_desc;
2990         int i, j;
2991
2992         /*
2993          * Loop through all queues and free
2994          * any resources.
2995          */
2996         for (j = 0; j < qdev->tx_ring_count; j++) {
2997                 tx_ring = &qdev->tx_ring[j];
2998                 for (i = 0; i < tx_ring->wq_len; i++) {
2999                         tx_ring_desc = &tx_ring->q[i];
3000                         if (tx_ring_desc && tx_ring_desc->skb) {
3001                                 netif_err(qdev, ifdown, qdev->ndev,
3002                                           "Freeing lost SKB %p, from queue %d, index %d.\n",
3003                                           tx_ring_desc->skb, j,
3004                                           tx_ring_desc->index);
3005                                 ql_unmap_send(qdev, tx_ring_desc,
3006                                               tx_ring_desc->map_cnt);
3007                                 dev_kfree_skb(tx_ring_desc->skb);
3008                                 tx_ring_desc->skb = NULL;
3009                         }
3010                 }
3011         }
3012 }
3013
3014 static void ql_free_mem_resources(struct ql_adapter *qdev)
3015 {
3016         int i;
3017
3018         for (i = 0; i < qdev->tx_ring_count; i++)
3019                 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
3020         for (i = 0; i < qdev->rx_ring_count; i++)
3021                 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
3022         ql_free_shadow_space(qdev);
3023 }
3024
3025 static int ql_alloc_mem_resources(struct ql_adapter *qdev)
3026 {
3027         int i;
3028
3029         /* Allocate space for our shadow registers and such. */
3030         if (ql_alloc_shadow_space(qdev))
3031                 return -ENOMEM;
3032
3033         for (i = 0; i < qdev->rx_ring_count; i++) {
3034                 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
3035                         netif_err(qdev, ifup, qdev->ndev,
3036                                   "RX resource allocation failed.\n");
3037                         goto err_mem;
3038                 }
3039         }
3040         /* Allocate tx queue resources */
3041         for (i = 0; i < qdev->tx_ring_count; i++) {
3042                 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
3043                         netif_err(qdev, ifup, qdev->ndev,
3044                                   "TX resource allocation failed.\n");
3045                         goto err_mem;
3046                 }
3047         }
3048         return 0;
3049
3050 err_mem:
3051         ql_free_mem_resources(qdev);
3052         return -ENOMEM;
3053 }
3054
3055 /* Set up the rx ring control block and pass it to the chip.
3056  * The control block is defined as
3057  * "Completion Queue Initialization Control Block", or cqicb.
3058  */
3059 static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
3060 {
3061         struct cqicb *cqicb = &rx_ring->cqicb;
3062         void *shadow_reg = qdev->rx_ring_shadow_reg_area +
3063                 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3064         u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
3065                 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3066         void __iomem *doorbell_area =
3067             qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
3068         int err = 0;
3069         u16 bq_len;
3070         u64 tmp;
3071         __le64 *base_indirect_ptr;
3072         int page_entries;
3073
3074         /* Set up the shadow registers for this ring. */
3075         rx_ring->prod_idx_sh_reg = shadow_reg;
3076         rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
3077         *rx_ring->prod_idx_sh_reg = 0;
3078         shadow_reg += sizeof(u64);
3079         shadow_reg_dma += sizeof(u64);
3080         rx_ring->lbq_base_indirect = shadow_reg;
3081         rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
3082         shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3083         shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3084         rx_ring->sbq_base_indirect = shadow_reg;
3085         rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3086
3087         /* PCI doorbell mem area + 0x00 for consumer index register */
3088         rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
3089         rx_ring->cnsmr_idx = 0;
3090         rx_ring->curr_entry = rx_ring->cq_base;
3091
3092         /* PCI doorbell mem area + 0x04 for valid register */
3093         rx_ring->valid_db_reg = doorbell_area + 0x04;
3094
3095         /* PCI doorbell mem area + 0x18 for large buffer consumer */
3096         rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
3097
3098         /* PCI doorbell mem area + 0x1c */
3099         rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
3100
3101         memset((void *)cqicb, 0, sizeof(struct cqicb));
3102         cqicb->msix_vect = rx_ring->irq;
3103
3104         bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3105         cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
3106
3107         cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
3108
3109         cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
3110
3111         /*
3112          * Set up the control block load flags.
3113          */
3114         cqicb->flags = FLAGS_LC |       /* Load queue base address */
3115             FLAGS_LV |          /* Load MSI-X vector */
3116             FLAGS_LI;           /* Load irq delay values */
3117         if (rx_ring->lbq_len) {
3118                 cqicb->flags |= FLAGS_LL;       /* Load lbq values */
3119                 tmp = (u64)rx_ring->lbq_base_dma;
3120                 base_indirect_ptr = rx_ring->lbq_base_indirect;
3121                 page_entries = 0;
3122                 do {
3123                         *base_indirect_ptr = cpu_to_le64(tmp);
3124                         tmp += DB_PAGE_SIZE;
3125                         base_indirect_ptr++;
3126                         page_entries++;
3127                 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3128                 cqicb->lbq_addr =
3129                     cpu_to_le64(rx_ring->lbq_base_indirect_dma);
3130                 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3131                         (u16) rx_ring->lbq_buf_size;
3132                 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3133                 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3134                         (u16) rx_ring->lbq_len;
3135                 cqicb->lbq_len = cpu_to_le16(bq_len);
3136                 rx_ring->lbq_prod_idx = 0;
3137                 rx_ring->lbq_curr_idx = 0;
3138                 rx_ring->lbq_clean_idx = 0;
3139                 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
3140         }
3141         if (rx_ring->sbq_len) {
3142                 cqicb->flags |= FLAGS_LS;       /* Load sbq values */
3143                 tmp = (u64)rx_ring->sbq_base_dma;
3144                 base_indirect_ptr = rx_ring->sbq_base_indirect;
3145                 page_entries = 0;
3146                 do {
3147                         *base_indirect_ptr = cpu_to_le64(tmp);
3148                         tmp += DB_PAGE_SIZE;
3149                         base_indirect_ptr++;
3150                         page_entries++;
3151                 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
3152                 cqicb->sbq_addr =
3153                     cpu_to_le64(rx_ring->sbq_base_indirect_dma);
3154                 cqicb->sbq_buf_size =
3155                     cpu_to_le16((u16)(rx_ring->sbq_buf_size));
3156                 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3157                         (u16) rx_ring->sbq_len;
3158                 cqicb->sbq_len = cpu_to_le16(bq_len);
3159                 rx_ring->sbq_prod_idx = 0;
3160                 rx_ring->sbq_curr_idx = 0;
3161                 rx_ring->sbq_clean_idx = 0;
3162                 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
3163         }
3164         switch (rx_ring->type) {
3165         case TX_Q:
3166                 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3167                 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3168                 break;
3169         case RX_Q:
3170                 /* Inbound completion handling rx_rings run in
3171                  * separate NAPI contexts.
3172                  */
3173                 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3174                                64);
3175                 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3176                 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3177                 break;
3178         default:
3179                 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3180                              "Invalid rx_ring->type = %d.\n", rx_ring->type);
3181         }
3182         err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3183                            CFG_LCQ, rx_ring->cq_id);
3184         if (err) {
3185                 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
3186                 return err;
3187         }
3188         return err;
3189 }
3190
3191 static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3192 {
3193         struct wqicb *wqicb = (struct wqicb *)tx_ring;
3194         void __iomem *doorbell_area =
3195             qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3196         void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3197             (tx_ring->wq_id * sizeof(u64));
3198         u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3199             (tx_ring->wq_id * sizeof(u64));
3200         int err = 0;
3201
3202         /*
3203          * Assign doorbell registers for this tx_ring.
3204          */
3205         /* TX PCI doorbell mem area for tx producer index */
3206         tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
3207         tx_ring->prod_idx = 0;
3208         /* TX PCI doorbell mem area + 0x04 */
3209         tx_ring->valid_db_reg = doorbell_area + 0x04;
3210
3211         /*
3212          * Assign shadow registers for this tx_ring.
3213          */
3214         tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3215         tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3216
3217         wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3218         wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3219                                    Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3220         wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3221         wqicb->rid = 0;
3222         wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
3223
3224         wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
3225
3226         ql_init_tx_ring(qdev, tx_ring);
3227
3228         err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
3229                            (u16) tx_ring->wq_id);
3230         if (err) {
3231                 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
3232                 return err;
3233         }
3234         return err;
3235 }
3236
3237 static void ql_disable_msix(struct ql_adapter *qdev)
3238 {
3239         if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3240                 pci_disable_msix(qdev->pdev);
3241                 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3242                 kfree(qdev->msi_x_entry);
3243                 qdev->msi_x_entry = NULL;
3244         } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3245                 pci_disable_msi(qdev->pdev);
3246                 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3247         }
3248 }
3249
3250 /* We start by trying to get the number of vectors
3251  * stored in qdev->intr_count. If we don't get that
3252  * many then we reduce the count and try again.
3253  */
3254 static void ql_enable_msix(struct ql_adapter *qdev)
3255 {
3256         int i, err;
3257
3258         /* Get the MSIX vectors. */
3259         if (qlge_irq_type == MSIX_IRQ) {
3260                 /* Try to alloc space for the msix struct,
3261                  * if it fails then go to MSI/legacy.
3262                  */
3263                 qdev->msi_x_entry = kcalloc(qdev->intr_count,
3264                                             sizeof(struct msix_entry),
3265                                             GFP_KERNEL);
3266                 if (!qdev->msi_x_entry) {
3267                         qlge_irq_type = MSI_IRQ;
3268                         goto msi;
3269                 }
3270
3271                 for (i = 0; i < qdev->intr_count; i++)
3272                         qdev->msi_x_entry[i].entry = i;
3273
3274                 /* Loop to get our vectors.  We start with
3275                  * what we want and settle for what we get.
3276                  */
3277                 do {
3278                         err = pci_enable_msix(qdev->pdev,
3279                                 qdev->msi_x_entry, qdev->intr_count);
3280                         if (err > 0)
3281                                 qdev->intr_count = err;
3282                 } while (err > 0);
3283
3284                 if (err < 0) {
3285                         kfree(qdev->msi_x_entry);
3286                         qdev->msi_x_entry = NULL;
3287                         netif_warn(qdev, ifup, qdev->ndev,
3288                                    "MSI-X Enable failed, trying MSI.\n");
3289                         qdev->intr_count = 1;
3290                         qlge_irq_type = MSI_IRQ;
3291                 } else if (err == 0) {
3292                         set_bit(QL_MSIX_ENABLED, &qdev->flags);
3293                         netif_info(qdev, ifup, qdev->ndev,
3294                                    "MSI-X Enabled, got %d vectors.\n",
3295                                    qdev->intr_count);
3296                         return;
3297                 }
3298         }
3299 msi:
3300         qdev->intr_count = 1;
3301         if (qlge_irq_type == MSI_IRQ) {
3302                 if (!pci_enable_msi(qdev->pdev)) {
3303                         set_bit(QL_MSI_ENABLED, &qdev->flags);
3304                         netif_info(qdev, ifup, qdev->ndev,
3305                                    "Running with MSI interrupts.\n");
3306                         return;
3307                 }
3308         }
3309         qlge_irq_type = LEG_IRQ;
3310         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3311                      "Running with legacy interrupts.\n");
3312 }
3313
3314 /* Each vector services 1 RSS ring and and 1 or more
3315  * TX completion rings.  This function loops through
3316  * the TX completion rings and assigns the vector that
3317  * will service it.  An example would be if there are
3318  * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3319  * This would mean that vector 0 would service RSS ring 0
3320  * and TX completion rings 0,1,2 and 3.  Vector 1 would
3321  * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3322  */
3323 static void ql_set_tx_vect(struct ql_adapter *qdev)
3324 {
3325         int i, j, vect;
3326         u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3327
3328         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3329                 /* Assign irq vectors to TX rx_rings.*/
3330                 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3331                                          i < qdev->rx_ring_count; i++) {
3332                         if (j == tx_rings_per_vector) {
3333                                 vect++;
3334                                 j = 0;
3335                         }
3336                         qdev->rx_ring[i].irq = vect;
3337                         j++;
3338                 }
3339         } else {
3340                 /* For single vector all rings have an irq
3341                  * of zero.
3342                  */
3343                 for (i = 0; i < qdev->rx_ring_count; i++)
3344                         qdev->rx_ring[i].irq = 0;
3345         }
3346 }
3347
3348 /* Set the interrupt mask for this vector.  Each vector
3349  * will service 1 RSS ring and 1 or more TX completion
3350  * rings.  This function sets up a bit mask per vector
3351  * that indicates which rings it services.
3352  */
3353 static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3354 {
3355         int j, vect = ctx->intr;
3356         u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3357
3358         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3359                 /* Add the RSS ring serviced by this vector
3360                  * to the mask.
3361                  */
3362                 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3363                 /* Add the TX ring(s) serviced by this vector
3364                  * to the mask. */
3365                 for (j = 0; j < tx_rings_per_vector; j++) {
3366                         ctx->irq_mask |=
3367                         (1 << qdev->rx_ring[qdev->rss_ring_count +
3368                         (vect * tx_rings_per_vector) + j].cq_id);
3369                 }
3370         } else {
3371                 /* For single vector we just shift each queue's
3372                  * ID into the mask.
3373                  */
3374                 for (j = 0; j < qdev->rx_ring_count; j++)
3375                         ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3376         }
3377 }
3378
3379 /*
3380  * Here we build the intr_context structures based on
3381  * our rx_ring count and intr vector count.
3382  * The intr_context structure is used to hook each vector
3383  * to possibly different handlers.
3384  */
3385 static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3386 {
3387         int i = 0;
3388         struct intr_context *intr_context = &qdev->intr_context[0];
3389
3390         if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3391                 /* Each rx_ring has it's
3392                  * own intr_context since we have separate
3393                  * vectors for each queue.
3394                  */
3395                 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3396                         qdev->rx_ring[i].irq = i;
3397                         intr_context->intr = i;
3398                         intr_context->qdev = qdev;
3399                         /* Set up this vector's bit-mask that indicates
3400                          * which queues it services.
3401                          */
3402                         ql_set_irq_mask(qdev, intr_context);
3403                         /*
3404                          * We set up each vectors enable/disable/read bits so
3405                          * there's no bit/mask calculations in the critical path.
3406                          */
3407                         intr_context->intr_en_mask =
3408                             INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3409                             INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3410                             | i;
3411                         intr_context->intr_dis_mask =
3412                             INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3413                             INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3414                             INTR_EN_IHD | i;
3415                         intr_context->intr_read_mask =
3416                             INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3417                             INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3418                             i;
3419                         if (i == 0) {
3420                                 /* The first vector/queue handles
3421                                  * broadcast/multicast, fatal errors,
3422                                  * and firmware events.  This in addition
3423                                  * to normal inbound NAPI processing.
3424                                  */
3425                                 intr_context->handler = qlge_isr;
3426                                 sprintf(intr_context->name, "%s-rx-%d",
3427                                         qdev->ndev->name, i);
3428                         } else {
3429                                 /*
3430                                  * Inbound queues handle unicast frames only.
3431                                  */
3432                                 intr_context->handler = qlge_msix_rx_isr;
3433                                 sprintf(intr_context->name, "%s-rx-%d",
3434                                         qdev->ndev->name, i);
3435                         }
3436                 }
3437         } else {
3438                 /*
3439                  * All rx_rings use the same intr_context since
3440                  * there is only one vector.
3441                  */
3442                 intr_context->intr = 0;
3443                 intr_context->qdev = qdev;
3444                 /*
3445                  * We set up each vectors enable/disable/read bits so
3446                  * there's no bit/mask calculations in the critical path.
3447                  */
3448                 intr_context->intr_en_mask =
3449                     INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3450                 intr_context->intr_dis_mask =
3451                     INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3452                     INTR_EN_TYPE_DISABLE;
3453                 intr_context->intr_read_mask =
3454                     INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3455                 /*
3456                  * Single interrupt means one handler for all rings.
3457                  */
3458                 intr_context->handler = qlge_isr;
3459                 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
3460                 /* Set up this vector's bit-mask that indicates
3461                  * which queues it services. In this case there is
3462                  * a single vector so it will service all RSS and
3463                  * TX completion rings.
3464                  */
3465                 ql_set_irq_mask(qdev, intr_context);
3466         }
3467         /* Tell the TX completion rings which MSIx vector
3468          * they will be using.
3469          */
3470         ql_set_tx_vect(qdev);
3471 }
3472
3473 static void ql_free_irq(struct ql_adapter *qdev)
3474 {
3475         int i;
3476         struct intr_context *intr_context = &qdev->intr_context[0];
3477
3478         for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3479                 if (intr_context->hooked) {
3480                         if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3481                                 free_irq(qdev->msi_x_entry[i].vector,
3482                                          &qdev->rx_ring[i]);
3483                         } else {
3484                                 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
3485                         }
3486                 }
3487         }
3488         ql_disable_msix(qdev);
3489 }
3490
3491 static int ql_request_irq(struct ql_adapter *qdev)
3492 {
3493         int i;
3494         int status = 0;
3495         struct pci_dev *pdev = qdev->pdev;
3496         struct intr_context *intr_context = &qdev->intr_context[0];
3497
3498         ql_resolve_queues_to_irqs(qdev);
3499
3500         for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3501                 atomic_set(&intr_context->irq_cnt, 0);
3502                 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3503                         status = request_irq(qdev->msi_x_entry[i].vector,
3504                                              intr_context->handler,
3505                                              0,
3506                                              intr_context->name,
3507                                              &qdev->rx_ring[i]);
3508                         if (status) {
3509                                 netif_err(qdev, ifup, qdev->ndev,
3510                                           "Failed request for MSIX interrupt %d.\n",
3511                                           i);
3512                                 goto err_irq;
3513                         }
3514                 } else {
3515                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3516                                      "trying msi or legacy interrupts.\n");
3517                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3518                                      "%s: irq = %d.\n", __func__, pdev->irq);
3519                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3520                                      "%s: context->name = %s.\n", __func__,
3521                                      intr_context->name);
3522                         netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3523                                      "%s: dev_id = 0x%p.\n", __func__,
3524                                      &qdev->rx_ring[0]);
3525                         status =
3526                             request_irq(pdev->irq, qlge_isr,
3527                                         test_bit(QL_MSI_ENABLED,
3528                                                  &qdev->
3529                                                  flags) ? 0 : IRQF_SHARED,
3530                                         intr_context->name, &qdev->rx_ring[0]);
3531                         if (status)
3532                                 goto err_irq;
3533
3534                         netif_err(qdev, ifup, qdev->ndev,
3535                                   "Hooked intr %d, queue type %s, with name %s.\n",
3536                                   i,
3537                                   qdev->rx_ring[0].type == DEFAULT_Q ?
3538                                   "DEFAULT_Q" :
3539                                   qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
3540                                   qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3541                                   intr_context->name);
3542                 }
3543                 intr_context->hooked = 1;
3544         }
3545         return status;
3546 err_irq:
3547         netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n");
3548         ql_free_irq(qdev);
3549         return status;
3550 }
3551
3552 static int ql_start_rss(struct ql_adapter *qdev)
3553 {
3554         static const u8 init_hash_seed[] = {
3555                 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3556                 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3557                 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3558                 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3559                 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3560         };
3561         struct ricb *ricb = &qdev->ricb;
3562         int status = 0;
3563         int i;
3564         u8 *hash_id = (u8 *) ricb->hash_cq_id;
3565
3566         memset((void *)ricb, 0, sizeof(*ricb));
3567
3568         ricb->base_cq = RSS_L4K;
3569         ricb->flags =
3570                 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3571         ricb->mask = cpu_to_le16((u16)(0x3ff));
3572
3573         /*
3574          * Fill out the Indirection Table.
3575          */
3576         for (i = 0; i < 1024; i++)
3577                 hash_id[i] = (i & (qdev->rss_ring_count - 1));
3578
3579         memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3580         memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
3581
3582         status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
3583         if (status) {
3584                 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
3585                 return status;
3586         }
3587         return status;
3588 }
3589
3590 static int ql_clear_routing_entries(struct ql_adapter *qdev)
3591 {
3592         int i, status = 0;
3593
3594         status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3595         if (status)
3596                 return status;
3597         /* Clear all the entries in the routing table. */
3598         for (i = 0; i < 16; i++) {
3599                 status = ql_set_routing_reg(qdev, i, 0, 0);
3600                 if (status) {
3601                         netif_err(qdev, ifup, qdev->ndev,
3602                                   "Failed to init routing register for CAM packets.\n");
3603                         break;
3604                 }
3605         }
3606         ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3607         return status;
3608 }
3609
3610 /* Initialize the frame-to-queue routing. */
3611 static int ql_route_initialize(struct ql_adapter *qdev)
3612 {
3613         int status = 0;
3614
3615         /* Clear all the entries in the routing table. */
3616         status = ql_clear_routing_entries(qdev);
3617         if (status)
3618                 return status;
3619
3620         status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3621         if (status)
3622                 return status;
3623
3624         status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3625                                                 RT_IDX_IP_CSUM_ERR, 1);
3626         if (status) {
3627                 netif_err(qdev, ifup, qdev->ndev,
3628                         "Failed to init routing register "
3629                         "for IP CSUM error packets.\n");
3630                 goto exit;
3631         }
3632         status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3633                                                 RT_IDX_TU_CSUM_ERR, 1);
3634         if (status) {
3635                 netif_err(qdev, ifup, qdev->ndev,
3636                         "Failed to init routing register "
3637                         "for TCP/UDP CSUM error packets.\n");
3638                 goto exit;
3639         }
3640         status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3641         if (status) {
3642                 netif_err(qdev, ifup, qdev->ndev,
3643                           "Failed to init routing register for broadcast packets.\n");
3644                 goto exit;
3645         }
3646         /* If we have more than one inbound queue, then turn on RSS in the
3647          * routing block.
3648          */
3649         if (qdev->rss_ring_count > 1) {
3650                 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3651                                         RT_IDX_RSS_MATCH, 1);
3652                 if (status) {
3653                         netif_err(qdev, ifup, qdev->ndev,
3654                                   "Failed to init routing register for MATCH RSS packets.\n");
3655                         goto exit;
3656                 }
3657         }
3658
3659         status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3660                                     RT_IDX_CAM_HIT, 1);
3661         if (status)
3662                 netif_err(qdev, ifup, qdev->ndev,
3663                           "Failed to init routing register for CAM packets.\n");
3664 exit:
3665         ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3666         return status;
3667 }
3668
3669 int ql_cam_route_initialize(struct ql_adapter *qdev)
3670 {
3671         int status, set;
3672
3673         /* If check if the link is up and use to
3674          * determine if we are setting or clearing
3675          * the MAC address in the CAM.
3676          */
3677         set = ql_read32(qdev, STS);
3678         set &= qdev->port_link_up;
3679         status = ql_set_mac_addr(qdev, set);
3680         if (status) {
3681                 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
3682                 return status;
3683         }
3684
3685         status = ql_route_initialize(qdev);
3686         if (status)
3687                 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
3688
3689         return status;
3690 }
3691
3692 static int ql_adapter_initialize(struct ql_adapter *qdev)
3693 {
3694         u32 value, mask;
3695         int i;
3696         int status = 0;
3697
3698         /*
3699          * Set up the System register to halt on errors.
3700          */
3701         value = SYS_EFE | SYS_FAE;
3702         mask = value << 16;
3703         ql_write32(qdev, SYS, mask | value);
3704
3705         /* Set the default queue, and VLAN behavior. */
3706         value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
3707         mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
3708         ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3709
3710         /* Set the MPI interrupt to enabled. */
3711         ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3712
3713         /* Enable the function, set pagesize, enable error checking. */
3714         value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3715             FSC_EC | FSC_VM_PAGE_4K;
3716         value |= SPLT_SETTING;
3717
3718         /* Set/clear header splitting. */
3719         mask = FSC_VM_PAGESIZE_MASK |
3720             FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3721         ql_write32(qdev, FSC, mask | value);
3722
3723         ql_write32(qdev, SPLT_HDR, SPLT_LEN);
3724
3725         /* Set RX packet routing to use port/pci function on which the
3726          * packet arrived on in addition to usual frame routing.
3727          * This is helpful on bonding where both interfaces can have
3728          * the same MAC address.
3729          */
3730         ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
3731         /* Reroute all packets to our Interface.
3732          * They may have been routed to MPI firmware
3733          * due to WOL.
3734          */
3735         value = ql_read32(qdev, MGMT_RCV_CFG);
3736         value &= ~MGMT_RCV_CFG_RM;
3737         mask = 0xffff0000;
3738
3739         /* Sticky reg needs clearing due to WOL. */
3740         ql_write32(qdev, MGMT_RCV_CFG, mask);
3741         ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3742
3743         /* Default WOL is enable on Mezz cards */
3744         if (qdev->pdev->subsystem_device == 0x0068 ||
3745                         qdev->pdev->subsystem_device == 0x0180)
3746                 qdev->wol = WAKE_MAGIC;
3747
3748         /* Start up the rx queues. */
3749         for (i = 0; i < qdev->rx_ring_count; i++) {
3750                 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3751                 if (status) {
3752                         netif_err(qdev, ifup, qdev->ndev,
3753                                   "Failed to start rx ring[%d].\n", i);
3754                         return status;
3755                 }
3756         }
3757
3758         /* If there is more than one inbound completion queue
3759          * then download a RICB to configure RSS.
3760          */
3761         if (qdev->rss_ring_count > 1) {
3762                 status = ql_start_rss(qdev);
3763                 if (status) {
3764                         netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
3765                         return status;
3766                 }
3767         }
3768
3769         /* Start up the tx queues. */
3770         for (i = 0; i < qdev->tx_ring_count; i++) {
3771                 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3772                 if (status) {
3773                         netif_err(qdev, ifup, qdev->ndev,
3774                                   "Failed to start tx ring[%d].\n", i);
3775                         return status;
3776                 }
3777         }
3778
3779         /* Initialize the port and set the max framesize. */
3780         status = qdev->nic_ops->port_initialize(qdev);
3781         if (status)
3782                 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
3783
3784         /* Set up the MAC address and frame routing filter. */
3785         status = ql_cam_route_initialize(qdev);
3786         if (status) {
3787                 netif_err(qdev, ifup, qdev->ndev,
3788                           "Failed to init CAM/Routing tables.\n");
3789                 return status;
3790         }
3791
3792         /* Start NAPI for the RSS queues. */
3793         for (i = 0; i < qdev->rss_ring_count; i++)
3794                 napi_enable(&qdev->rx_ring[i].napi);
3795
3796         return status;
3797 }
3798
3799 /* Issue soft reset to chip. */
3800 static int ql_adapter_reset(struct ql_adapter *qdev)
3801 {
3802         u32 value;
3803         int status = 0;
3804         unsigned long end_jiffies;
3805
3806         /* Clear all the entries in the routing table. */
3807         status = ql_clear_routing_entries(qdev);
3808         if (status) {
3809                 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
3810                 return status;
3811         }
3812
3813         end_jiffies = jiffies +
3814                 max((unsigned long)1, usecs_to_jiffies(30));
3815
3816         /* Check if bit is set then skip the mailbox command and
3817          * clear the bit, else we are in normal reset process.
3818          */
3819         if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
3820                 /* Stop management traffic. */
3821                 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3822
3823                 /* Wait for the NIC and MGMNT FIFOs to empty. */
3824                 ql_wait_fifo_empty(qdev);
3825         } else
3826                 clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
3827
3828         ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3829
3830         do {
3831                 value = ql_read32(qdev, RST_FO);
3832                 if ((value & RST_FO_FR) == 0)
3833                         break;
3834                 cpu_relax();
3835         } while (time_before(jiffies, end_jiffies));
3836
3837         if (value & RST_FO_FR) {
3838                 netif_err(qdev, ifdown, qdev->ndev,
3839                           "ETIMEDOUT!!! errored out of resetting the chip!\n");
3840                 status = -ETIMEDOUT;
3841         }
3842
3843         /* Resume management traffic. */
3844         ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
3845         return status;
3846 }
3847
3848 static void ql_display_dev_info(struct net_device *ndev)
3849 {
3850         struct ql_adapter *qdev = netdev_priv(ndev);
3851
3852         netif_info(qdev, probe, qdev->ndev,
3853                    "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3854                    "XG Roll = %d, XG Rev = %d.\n",
3855                    qdev->func,
3856                    qdev->port,
3857                    qdev->chip_rev_id & 0x0000000f,
3858                    qdev->chip_rev_id >> 4 & 0x0000000f,
3859                    qdev->chip_rev_id >> 8 & 0x0000000f,
3860                    qdev->chip_rev_id >> 12 & 0x0000000f);
3861         netif_info(qdev, probe, qdev->ndev,
3862                    "MAC address %pM\n", ndev->dev_addr);
3863 }
3864
3865 static int ql_wol(struct ql_adapter *qdev)
3866 {
3867         int status = 0;
3868         u32 wol = MB_WOL_DISABLE;
3869
3870         /* The CAM is still intact after a reset, but if we
3871          * are doing WOL, then we may need to program the
3872          * routing regs. We would also need to issue the mailbox
3873          * commands to instruct the MPI what to do per the ethtool
3874          * settings.
3875          */
3876
3877         if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3878                         WAKE_MCAST | WAKE_BCAST)) {
3879                 netif_err(qdev, ifdown, qdev->ndev,
3880                           "Unsupported WOL parameter. qdev->wol = 0x%x.\n",
3881                           qdev->wol);
3882                 return -EINVAL;
3883         }
3884
3885         if (qdev->wol & WAKE_MAGIC) {
3886                 status = ql_mb_wol_set_magic(qdev, 1);
3887                 if (status) {
3888                         netif_err(qdev, ifdown, qdev->ndev,
3889                                   "Failed to set magic packet on %s.\n",
3890                                   qdev->ndev->name);
3891                         return status;
3892                 } else
3893                         netif_info(qdev, drv, qdev->ndev,
3894                                    "Enabled magic packet successfully on %s.\n",
3895                                    qdev->ndev->name);
3896
3897                 wol |= MB_WOL_MAGIC_PKT;
3898         }
3899
3900         if (qdev->wol) {
3901                 wol |= MB_WOL_MODE_ON;
3902                 status = ql_mb_wol_mode(qdev, wol);
3903                 netif_err(qdev, drv, qdev->ndev,
3904                           "WOL %s (wol code 0x%x) on %s\n",
3905                           (status == 0) ? "Successfully set" : "Failed",
3906                           wol, qdev->ndev->name);
3907         }
3908
3909         return status;
3910 }
3911
3912 static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
3913 {
3914
3915         /* Don't kill the reset worker thread if we
3916          * are in the process of recovery.
3917          */
3918         if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3919                 cancel_delayed_work_sync(&qdev->asic_reset_work);
3920         cancel_delayed_work_sync(&qdev->mpi_reset_work);
3921         cancel_delayed_work_sync(&qdev->mpi_work);
3922         cancel_delayed_work_sync(&qdev->mpi_idc_work);
3923         cancel_delayed_work_sync(&qdev->mpi_core_to_log);
3924         cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
3925 }
3926
3927 static int ql_adapter_down(struct ql_adapter *qdev)
3928 {
3929         int i, status = 0;
3930
3931         ql_link_off(qdev);
3932
3933         ql_cancel_all_work_sync(qdev);
3934
3935         for (i = 0; i < qdev->rss_ring_count; i++)
3936                 napi_disable(&qdev->rx_ring[i].napi);
3937
3938         clear_bit(QL_ADAPTER_UP, &qdev->flags);
3939
3940         ql_disable_interrupts(qdev);
3941
3942         ql_tx_ring_clean(qdev);
3943
3944         /* Call netif_napi_del() from common point.
3945          */
3946         for (i = 0; i < qdev->rss_ring_count; i++)
3947                 netif_napi_del(&qdev->rx_ring[i].napi);
3948
3949         status = ql_adapter_reset(qdev);
3950         if (status)
3951                 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
3952                           qdev->func);
3953         ql_free_rx_buffers(qdev);
3954
3955         return status;
3956 }
3957
3958 static int ql_adapter_up(struct ql_adapter *qdev)
3959 {
3960         int err = 0;
3961
3962         err = ql_adapter_initialize(qdev);
3963         if (err) {
3964                 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
3965                 goto err_init;
3966         }
3967         set_bit(QL_ADAPTER_UP, &qdev->flags);
3968         ql_alloc_rx_buffers(qdev);
3969         /* If the port is initialized and the
3970          * link is up the turn on the carrier.
3971          */
3972         if ((ql_read32(qdev, STS) & qdev->port_init) &&
3973                         (ql_read32(qdev, STS) & qdev->port_link_up))
3974                 ql_link_on(qdev);
3975         /* Restore rx mode. */
3976         clear_bit(QL_ALLMULTI, &qdev->flags);
3977         clear_bit(QL_PROMISCUOUS, &qdev->flags);
3978         qlge_set_multicast_list(qdev->ndev);
3979
3980         /* Restore vlan setting. */
3981         qlge_restore_vlan(qdev);
3982
3983         ql_enable_interrupts(qdev);
3984         ql_enable_all_completion_interrupts(qdev);
3985         netif_tx_start_all_queues(qdev->ndev);
3986
3987         return 0;
3988 err_init:
3989         ql_adapter_reset(qdev);
3990         return err;
3991 }
3992
3993 static void ql_release_adapter_resources(struct ql_adapter *qdev)
3994 {
3995         ql_free_mem_resources(qdev);
3996         ql_free_irq(qdev);
3997 }
3998
3999 static int ql_get_adapter_resources(struct ql_adapter *qdev)
4000 {
4001         int status = 0;
4002
4003         if (ql_alloc_mem_resources(qdev)) {
4004                 netif_err(qdev, ifup, qdev->ndev, "Unable to  allocate memory.\n");
4005                 return -ENOMEM;
4006         }
4007         status = ql_request_irq(qdev);
4008         return status;
4009 }
4010
4011 static int qlge_close(struct net_device *ndev)
4012 {
4013         struct ql_adapter *qdev = netdev_priv(ndev);
4014
4015         /* If we hit pci_channel_io_perm_failure
4016          * failure condition, then we already
4017          * brought the adapter down.
4018          */
4019         if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
4020                 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
4021                 clear_bit(QL_EEH_FATAL, &qdev->flags);
4022                 return 0;
4023         }
4024
4025         /*
4026          * Wait for device to recover from a reset.
4027          * (Rarely happens, but possible.)
4028          */
4029         while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
4030                 msleep(1);
4031         ql_adapter_down(qdev);
4032         ql_release_adapter_resources(qdev);
4033         return 0;
4034 }
4035
4036 static int ql_configure_rings(struct ql_adapter *qdev)
4037 {
4038         int i;
4039         struct rx_ring *rx_ring;
4040         struct tx_ring *tx_ring;
4041         int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
4042         unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4043                 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4044
4045         qdev->lbq_buf_order = get_order(lbq_buf_len);
4046
4047         /* In a perfect world we have one RSS ring for each CPU
4048          * and each has it's own vector.  To do that we ask for
4049          * cpu_cnt vectors.  ql_enable_msix() will adjust the
4050          * vector count to what we actually get.  We then
4051          * allocate an RSS ring for each.
4052          * Essentially, we are doing min(cpu_count, msix_vector_count).
4053          */
4054         qdev->intr_count = cpu_cnt;
4055         ql_enable_msix(qdev);
4056         /* Adjust the RSS ring count to the actual vector count. */
4057         qdev->rss_ring_count = qdev->intr_count;
4058         qdev->tx_ring_count = cpu_cnt;
4059         qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
4060
4061         for (i = 0; i < qdev->tx_ring_count; i++) {
4062                 tx_ring = &qdev->tx_ring[i];
4063                 memset((void *)tx_ring, 0, sizeof(*tx_ring));
4064                 tx_ring->qdev = qdev;
4065                 tx_ring->wq_id = i;
4066                 tx_ring->wq_len = qdev->tx_ring_size;
4067                 tx_ring->wq_size =
4068                     tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
4069
4070                 /*
4071                  * The completion queue ID for the tx rings start
4072                  * immediately after the rss rings.
4073                  */
4074                 tx_ring->cq_id = qdev->rss_ring_count + i;
4075         }
4076
4077         for (i = 0; i < qdev->rx_ring_count; i++) {
4078                 rx_ring = &qdev->rx_ring[i];
4079                 memset((void *)rx_ring, 0, sizeof(*rx_ring));
4080                 rx_ring->qdev = qdev;
4081                 rx_ring->cq_id = i;
4082                 rx_ring->cpu = i % cpu_cnt;     /* CPU to run handler on. */
4083                 if (i < qdev->rss_ring_count) {
4084                         /*
4085                          * Inbound (RSS) queues.
4086                          */
4087                         rx_ring->cq_len = qdev->rx_ring_size;
4088                         rx_ring->cq_size =
4089                             rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4090                         rx_ring->lbq_len = NUM_LARGE_BUFFERS;
4091                         rx_ring->lbq_size =
4092                             rx_ring->lbq_len * sizeof(__le64);
4093                         rx_ring->lbq_buf_size = (u16)lbq_buf_len;
4094                         rx_ring->sbq_len = NUM_SMALL_BUFFERS;
4095                         rx_ring->sbq_size =
4096                             rx_ring->sbq_len * sizeof(__le64);
4097                         rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
4098                         rx_ring->type = RX_Q;
4099                 } else {
4100                         /*
4101                          * Outbound queue handles outbound completions only.
4102                          */
4103                         /* outbound cq is same size as tx_ring it services. */
4104                         rx_ring->cq_len = qdev->tx_ring_size;
4105                         rx_ring->cq_size =
4106                             rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4107                         rx_ring->lbq_len = 0;
4108                         rx_ring->lbq_size = 0;
4109                         rx_ring->lbq_buf_size = 0;
4110                         rx_ring->sbq_len = 0;
4111                         rx_ring->sbq_size = 0;
4112                         rx_ring->sbq_buf_size = 0;
4113                         rx_ring->type = TX_Q;
4114                 }
4115         }
4116         return 0;
4117 }
4118
4119 static int qlge_open(struct net_device *ndev)
4120 {
4121         int err = 0;
4122         struct ql_adapter *qdev = netdev_priv(ndev);
4123
4124         err = ql_adapter_reset(qdev);
4125         if (err)
4126                 return err;
4127
4128         err = ql_configure_rings(qdev);
4129         if (err)
4130                 return err;
4131
4132         err = ql_get_adapter_resources(qdev);
4133         if (err)
4134                 goto error_up;
4135
4136         err = ql_adapter_up(qdev);
4137         if (err)
4138                 goto error_up;
4139
4140         return err;
4141
4142 error_up:
4143         ql_release_adapter_resources(qdev);
4144         return err;
4145 }
4146
4147 static int ql_change_rx_buffers(struct ql_adapter *qdev)
4148 {
4149         struct rx_ring *rx_ring;
4150         int i, status;
4151         u32 lbq_buf_len;
4152
4153         /* Wait for an outstanding reset to complete. */
4154         if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4155                 int i = 3;
4156                 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4157                         netif_err(qdev, ifup, qdev->ndev,
4158                                   "Waiting for adapter UP...\n");
4159                         ssleep(1);
4160                 }
4161
4162                 if (!i) {
4163                         netif_err(qdev, ifup, qdev->ndev,
4164                                   "Timed out waiting for adapter UP\n");
4165                         return -ETIMEDOUT;
4166                 }
4167         }
4168
4169         status = ql_adapter_down(qdev);
4170         if (status)
4171                 goto error;
4172
4173         /* Get the new rx buffer size. */
4174         lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4175                 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4176         qdev->lbq_buf_order = get_order(lbq_buf_len);
4177
4178         for (i = 0; i < qdev->rss_ring_count; i++) {
4179                 rx_ring = &qdev->rx_ring[i];
4180                 /* Set the new size. */
4181                 rx_ring->lbq_buf_size = lbq_buf_len;
4182         }
4183
4184         status = ql_adapter_up(qdev);
4185         if (status)
4186                 goto error;
4187
4188         return status;
4189 error:
4190         netif_alert(qdev, ifup, qdev->ndev,
4191                     "Driver up/down cycle failed, closing device.\n");
4192         set_bit(QL_ADAPTER_UP, &qdev->flags);
4193         dev_close(qdev->ndev);
4194         return status;
4195 }
4196
4197 static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4198 {
4199         struct ql_adapter *qdev = netdev_priv(ndev);
4200         int status;
4201
4202         if (ndev->mtu == 1500 && new_mtu == 9000) {
4203                 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
4204         } else if (ndev->mtu == 9000 && new_mtu == 1500) {
4205                 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
4206         } else
4207                 return -EINVAL;
4208
4209         queue_delayed_work(qdev->workqueue,
4210                         &qdev->mpi_port_cfg_work, 3*HZ);
4211
4212         ndev->mtu = new_mtu;
4213
4214         if (!netif_running(qdev->ndev)) {
4215                 return 0;
4216         }
4217
4218         status = ql_change_rx_buffers(qdev);
4219         if (status) {
4220                 netif_err(qdev, ifup, qdev->ndev,
4221                           "Changing MTU failed.\n");
4222         }
4223
4224         return status;
4225 }
4226
4227 static struct net_device_stats *qlge_get_stats(struct net_device
4228                                                *ndev)
4229 {
4230         struct ql_adapter *qdev = netdev_priv(ndev);
4231         struct rx_ring *rx_ring = &qdev->rx_ring[0];
4232         struct tx_ring *tx_ring = &qdev->tx_ring[0];
4233         unsigned long pkts, mcast, dropped, errors, bytes;
4234         int i;
4235
4236         /* Get RX stats. */
4237         pkts = mcast = dropped = errors = bytes = 0;
4238         for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4239                         pkts += rx_ring->rx_packets;
4240                         bytes += rx_ring->rx_bytes;
4241                         dropped += rx_ring->rx_dropped;
4242                         errors += rx_ring->rx_errors;
4243                         mcast += rx_ring->rx_multicast;
4244         }
4245         ndev->stats.rx_packets = pkts;
4246         ndev->stats.rx_bytes = bytes;
4247         ndev->stats.rx_dropped = dropped;
4248         ndev->stats.rx_errors = errors;
4249         ndev->stats.multicast = mcast;
4250
4251         /* Get TX stats. */
4252         pkts = errors = bytes = 0;
4253         for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4254                         pkts += tx_ring->tx_packets;
4255                         bytes += tx_ring->tx_bytes;
4256                         errors += tx_ring->tx_errors;
4257         }
4258         ndev->stats.tx_packets = pkts;
4259         ndev->stats.tx_bytes = bytes;
4260         ndev->stats.tx_errors = errors;
4261         return &ndev->stats;
4262 }
4263
4264 static void qlge_set_multicast_list(struct net_device *ndev)
4265 {
4266         struct ql_adapter *qdev = netdev_priv(ndev);
4267         struct netdev_hw_addr *ha;
4268         int i, status;
4269
4270         status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4271         if (status)
4272                 return;
4273         /*
4274          * Set or clear promiscuous mode if a
4275          * transition is taking place.
4276          */
4277         if (ndev->flags & IFF_PROMISC) {
4278                 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4279                         if (ql_set_routing_reg
4280                             (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
4281                                 netif_err(qdev, hw, qdev->ndev,
4282                                           "Failed to set promiscuous mode.\n");
4283                         } else {
4284                                 set_bit(QL_PROMISCUOUS, &qdev->flags);
4285                         }
4286                 }
4287         } else {
4288                 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4289                         if (ql_set_routing_reg
4290                             (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
4291                                 netif_err(qdev, hw, qdev->ndev,
4292                                           "Failed to clear promiscuous mode.\n");
4293                         } else {
4294                                 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4295                         }
4296                 }
4297         }
4298
4299         /*
4300          * Set or clear all multicast mode if a
4301          * transition is taking place.
4302          */
4303         if ((ndev->flags & IFF_ALLMULTI) ||
4304             (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
4305                 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4306                         if (ql_set_routing_reg
4307                             (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
4308                                 netif_err(qdev, hw, qdev->ndev,
4309                                           "Failed to set all-multi mode.\n");
4310                         } else {
4311                                 set_bit(QL_ALLMULTI, &qdev->flags);
4312                         }
4313                 }
4314         } else {
4315                 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4316                         if (ql_set_routing_reg
4317                             (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
4318                                 netif_err(qdev, hw, qdev->ndev,
4319                                           "Failed to clear all-multi mode.\n");
4320                         } else {
4321                                 clear_bit(QL_ALLMULTI, &qdev->flags);
4322                         }
4323                 }
4324         }
4325
4326         if (!netdev_mc_empty(ndev)) {
4327                 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4328                 if (status)
4329                         goto exit;
4330                 i = 0;
4331                 netdev_for_each_mc_addr(ha, ndev) {
4332                         if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
4333                                                 MAC_ADDR_TYPE_MULTI_MAC, i)) {
4334                                 netif_err(qdev, hw, qdev->ndev,
4335                                           "Failed to loadmulticast address.\n");
4336                                 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4337                                 goto exit;
4338                         }
4339                         i++;
4340                 }
4341                 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4342                 if (ql_set_routing_reg
4343                     (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
4344                         netif_err(qdev, hw, qdev->ndev,
4345                                   "Failed to set multicast match mode.\n");
4346                 } else {
4347                         set_bit(QL_ALLMULTI, &qdev->flags);
4348                 }
4349         }
4350 exit:
4351         ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
4352 }
4353
4354 static int qlge_set_mac_address(struct net_device *ndev, void *p)
4355 {
4356         struct ql_adapter *qdev = netdev_priv(ndev);
4357         struct sockaddr *addr = p;
4358         int status;
4359
4360         if (!is_valid_ether_addr(addr->sa_data))
4361                 return -EADDRNOTAVAIL;
4362         memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
4363         /* Update local copy of current mac address. */
4364         memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4365
4366         status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4367         if (status)
4368                 return status;
4369         status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4370                         MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
4371         if (status)
4372                 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
4373         ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4374         return status;
4375 }
4376
4377 static void qlge_tx_timeout(struct net_device *ndev)
4378 {
4379         struct ql_adapter *qdev = netdev_priv(ndev);
4380         ql_queue_asic_error(qdev);
4381 }
4382
4383 static void ql_asic_reset_work(struct work_struct *work)
4384 {
4385         struct ql_adapter *qdev =
4386             container_of(work, struct ql_adapter, asic_reset_work.work);
4387         int status;
4388         rtnl_lock();
4389         status = ql_adapter_down(qdev);
4390         if (status)
4391                 goto error;
4392
4393         status = ql_adapter_up(qdev);
4394         if (status)
4395                 goto error;
4396
4397         /* Restore rx mode. */
4398         clear_bit(QL_ALLMULTI, &qdev->flags);
4399         clear_bit(QL_PROMISCUOUS, &qdev->flags);
4400         qlge_set_multicast_list(qdev->ndev);
4401
4402         rtnl_unlock();
4403         return;
4404 error:
4405         netif_alert(qdev, ifup, qdev->ndev,
4406                     "Driver up/down cycle failed, closing device\n");
4407
4408         set_bit(QL_ADAPTER_UP, &qdev->flags);
4409         dev_close(qdev->ndev);
4410         rtnl_unlock();
4411 }
4412
4413 static const struct nic_operations qla8012_nic_ops = {
4414         .get_flash              = ql_get_8012_flash_params,
4415         .port_initialize        = ql_8012_port_initialize,
4416 };
4417
4418 static const struct nic_operations qla8000_nic_ops = {
4419         .get_flash              = ql_get_8000_flash_params,
4420         .port_initialize        = ql_8000_port_initialize,
4421 };
4422
4423 /* Find the pcie function number for the other NIC
4424  * on this chip.  Since both NIC functions share a
4425  * common firmware we have the lowest enabled function
4426  * do any common work.  Examples would be resetting
4427  * after a fatal firmware error, or doing a firmware
4428  * coredump.
4429  */
4430 static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
4431 {
4432         int status = 0;
4433         u32 temp;
4434         u32 nic_func1, nic_func2;
4435
4436         status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4437                         &temp);
4438         if (status)
4439                 return status;
4440
4441         nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4442                         MPI_TEST_NIC_FUNC_MASK);
4443         nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4444                         MPI_TEST_NIC_FUNC_MASK);
4445
4446         if (qdev->func == nic_func1)
4447                 qdev->alt_func = nic_func2;
4448         else if (qdev->func == nic_func2)
4449                 qdev->alt_func = nic_func1;
4450         else
4451                 status = -EIO;
4452
4453         return status;
4454 }
4455
4456 static int ql_get_board_info(struct ql_adapter *qdev)
4457 {
4458         int status;
4459         qdev->func =
4460             (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
4461         if (qdev->func > 3)
4462                 return -EIO;
4463
4464         status = ql_get_alt_pcie_func(qdev);
4465         if (status)
4466                 return status;
4467
4468         qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4469         if (qdev->port) {
4470                 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4471                 qdev->port_link_up = STS_PL1;
4472                 qdev->port_init = STS_PI1;
4473                 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4474                 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4475         } else {
4476                 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4477                 qdev->port_link_up = STS_PL0;
4478                 qdev->port_init = STS_PI0;
4479                 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4480                 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4481         }
4482         qdev->chip_rev_id = ql_read32(qdev, REV_ID);
4483         qdev->device_id = qdev->pdev->device;
4484         if (qdev->device_id == QLGE_DEVICE_ID_8012)
4485                 qdev->nic_ops = &qla8012_nic_ops;
4486         else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4487                 qdev->nic_ops = &qla8000_nic_ops;
4488         return status;
4489 }
4490
4491 static void ql_release_all(struct pci_dev *pdev)
4492 {
4493         struct net_device *ndev = pci_get_drvdata(pdev);
4494         struct ql_adapter *qdev = netdev_priv(ndev);
4495
4496         if (qdev->workqueue) {
4497                 destroy_workqueue(qdev->workqueue);
4498                 qdev->workqueue = NULL;
4499         }
4500
4501         if (qdev->reg_base)
4502                 iounmap(qdev->reg_base);
4503         if (qdev->doorbell_area)
4504                 iounmap(qdev->doorbell_area);
4505         vfree(qdev->mpi_coredump);
4506         pci_release_regions(pdev);
4507         pci_set_drvdata(pdev, NULL);
4508 }
4509
4510 static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
4511                           int cards_found)
4512 {
4513         struct ql_adapter *qdev = netdev_priv(ndev);
4514         int err = 0;
4515
4516         memset((void *)qdev, 0, sizeof(*qdev));
4517         err = pci_enable_device(pdev);
4518         if (err) {
4519                 dev_err(&pdev->dev, "PCI device enable failed.\n");
4520                 return err;
4521         }
4522
4523         qdev->ndev = ndev;
4524         qdev->pdev = pdev;
4525         pci_set_drvdata(pdev, ndev);
4526
4527         /* Set PCIe read request size */
4528         err = pcie_set_readrq(pdev, 4096);
4529         if (err) {
4530                 dev_err(&pdev->dev, "Set readrq failed.\n");
4531                 goto err_out1;
4532         }
4533
4534         err = pci_request_regions(pdev, DRV_NAME);
4535         if (err) {
4536                 dev_err(&pdev->dev, "PCI region request failed.\n");
4537                 return err;
4538         }
4539
4540         pci_set_master(pdev);
4541         if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4542                 set_bit(QL_DMA64, &qdev->flags);
4543                 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4544         } else {
4545                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4546                 if (!err)
4547                        err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
4548         }
4549
4550         if (err) {
4551                 dev_err(&pdev->dev, "No usable DMA configuration.\n");
4552                 goto err_out2;
4553         }
4554
4555         /* Set PCIe reset type for EEH to fundamental. */
4556         pdev->needs_freset = 1;
4557         pci_save_state(pdev);
4558         qdev->reg_base =
4559             ioremap_nocache(pci_resource_start(pdev, 1),
4560                             pci_resource_len(pdev, 1));
4561         if (!qdev->reg_base) {
4562                 dev_err(&pdev->dev, "Register mapping failed.\n");
4563                 err = -ENOMEM;
4564                 goto err_out2;
4565         }
4566
4567         qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4568         qdev->doorbell_area =
4569             ioremap_nocache(pci_resource_start(pdev, 3),
4570                             pci_resource_len(pdev, 3));
4571         if (!qdev->doorbell_area) {
4572                 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4573                 err = -ENOMEM;
4574                 goto err_out2;
4575         }
4576
4577         err = ql_get_board_info(qdev);
4578         if (err) {
4579                 dev_err(&pdev->dev, "Register access failed.\n");
4580                 err = -EIO;
4581                 goto err_out2;
4582         }
4583         qdev->msg_enable = netif_msg_init(debug, default_msg);
4584         spin_lock_init(&qdev->hw_lock);
4585         spin_lock_init(&qdev->stats_lock);
4586
4587         if (qlge_mpi_coredump) {
4588                 qdev->mpi_coredump =
4589                         vmalloc(sizeof(struct ql_mpi_coredump));
4590                 if (qdev->mpi_coredump == NULL) {
4591                         err = -ENOMEM;
4592                         goto err_out2;
4593                 }
4594                 if (qlge_force_coredump)
4595                         set_bit(QL_FRC_COREDUMP, &qdev->flags);
4596         }
4597         /* make sure the EEPROM is good */
4598         err = qdev->nic_ops->get_flash(qdev);
4599         if (err) {
4600                 dev_err(&pdev->dev, "Invalid FLASH.\n");
4601                 goto err_out2;
4602         }
4603
4604         /* Keep local copy of current mac address. */
4605         memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4606
4607         /* Set up the default ring sizes. */
4608         qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4609         qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4610
4611         /* Set up the coalescing parameters. */
4612         qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4613         qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4614         qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4615         qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4616
4617         /*
4618          * Set up the operating parameters.
4619          */
4620         qdev->workqueue = create_singlethread_workqueue(ndev->name);
4621         INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4622         INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4623         INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
4624         INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
4625         INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
4626         INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
4627         init_completion(&qdev->ide_completion);
4628         mutex_init(&qdev->mpi_mutex);
4629
4630         if (!cards_found) {
4631                 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4632                 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4633                          DRV_NAME, DRV_VERSION);
4634         }
4635         return 0;
4636 err_out2:
4637         ql_release_all(pdev);
4638 err_out1:
4639         pci_disable_device(pdev);
4640         return err;
4641 }
4642
4643 static const struct net_device_ops qlge_netdev_ops = {
4644         .ndo_open               = qlge_open,
4645         .ndo_stop               = qlge_close,
4646         .ndo_start_xmit         = qlge_send,
4647         .ndo_change_mtu         = qlge_change_mtu,
4648         .ndo_get_stats          = qlge_get_stats,
4649         .ndo_set_rx_mode        = qlge_set_multicast_list,
4650         .ndo_set_mac_address    = qlge_set_mac_address,
4651         .ndo_validate_addr      = eth_validate_addr,
4652         .ndo_tx_timeout         = qlge_tx_timeout,
4653         .ndo_fix_features       = qlge_fix_features,
4654         .ndo_set_features       = qlge_set_features,
4655         .ndo_vlan_rx_add_vid    = qlge_vlan_rx_add_vid,
4656         .ndo_vlan_rx_kill_vid   = qlge_vlan_rx_kill_vid,
4657 };
4658
4659 static void ql_timer(unsigned long data)
4660 {
4661         struct ql_adapter *qdev = (struct ql_adapter *)data;
4662         u32 var = 0;
4663
4664         var = ql_read32(qdev, STS);
4665         if (pci_channel_offline(qdev->pdev)) {
4666                 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
4667                 return;
4668         }
4669
4670         mod_timer(&qdev->timer, jiffies + (5*HZ));
4671 }
4672
4673 static int qlge_probe(struct pci_dev *pdev,
4674                       const struct pci_device_id *pci_entry)
4675 {
4676         struct net_device *ndev = NULL;
4677         struct ql_adapter *qdev = NULL;
4678         static int cards_found = 0;
4679         int err = 0;
4680
4681         ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4682                         min(MAX_CPUS, netif_get_num_default_rss_queues()));
4683         if (!ndev)
4684                 return -ENOMEM;
4685
4686         err = ql_init_device(pdev, ndev, cards_found);
4687         if (err < 0) {
4688                 free_netdev(ndev);
4689                 return err;
4690         }
4691
4692         qdev = netdev_priv(ndev);
4693         SET_NETDEV_DEV(ndev, &pdev->dev);
4694         ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
4695                 NETIF_F_TSO | NETIF_F_TSO_ECN |
4696                 NETIF_F_HW_VLAN_TX | NETIF_F_RXCSUM;
4697         ndev->features = ndev->hw_features |
4698                 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
4699         ndev->vlan_features = ndev->hw_features;
4700
4701         if (test_bit(QL_DMA64, &qdev->flags))
4702                 ndev->features |= NETIF_F_HIGHDMA;
4703
4704         /*
4705          * Set up net_device structure.
4706          */
4707         ndev->tx_queue_len = qdev->tx_ring_size;
4708         ndev->irq = pdev->irq;
4709
4710         ndev->netdev_ops = &qlge_netdev_ops;
4711         SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
4712         ndev->watchdog_timeo = 10 * HZ;
4713
4714         err = register_netdev(ndev);
4715         if (err) {
4716                 dev_err(&pdev->dev, "net device registration failed.\n");
4717                 ql_release_all(pdev);
4718                 pci_disable_device(pdev);
4719                 return err;
4720         }
4721         /* Start up the timer to trigger EEH if
4722          * the bus goes dead
4723          */
4724         init_timer_deferrable(&qdev->timer);
4725         qdev->timer.data = (unsigned long)qdev;
4726         qdev->timer.function = ql_timer;
4727         qdev->timer.expires = jiffies + (5*HZ);
4728         add_timer(&qdev->timer);
4729         ql_link_off(qdev);
4730         ql_display_dev_info(ndev);
4731         atomic_set(&qdev->lb_count, 0);
4732         cards_found++;
4733         return 0;
4734 }
4735
4736 netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4737 {
4738         return qlge_send(skb, ndev);
4739 }
4740
4741 int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4742 {
4743         return ql_clean_inbound_rx_ring(rx_ring, budget);
4744 }
4745
4746 static void qlge_remove(struct pci_dev *pdev)
4747 {
4748         struct net_device *ndev = pci_get_drvdata(pdev);
4749         struct ql_adapter *qdev = netdev_priv(ndev);
4750         del_timer_sync(&qdev->timer);
4751         ql_cancel_all_work_sync(qdev);
4752         unregister_netdev(ndev);
4753         ql_release_all(pdev);
4754         pci_disable_device(pdev);
4755         free_netdev(ndev);
4756 }
4757
4758 /* Clean up resources without touching hardware. */
4759 static void ql_eeh_close(struct net_device *ndev)
4760 {
4761         int i;
4762         struct ql_adapter *qdev = netdev_priv(ndev);
4763
4764         if (netif_carrier_ok(ndev)) {
4765                 netif_carrier_off(ndev);
4766                 netif_stop_queue(ndev);
4767         }
4768
4769         /* Disabling the timer */
4770         del_timer_sync(&qdev->timer);
4771         ql_cancel_all_work_sync(qdev);
4772
4773         for (i = 0; i < qdev->rss_ring_count; i++)
4774                 netif_napi_del(&qdev->rx_ring[i].napi);
4775
4776         clear_bit(QL_ADAPTER_UP, &qdev->flags);
4777         ql_tx_ring_clean(qdev);
4778         ql_free_rx_buffers(qdev);
4779         ql_release_adapter_resources(qdev);
4780 }
4781
4782 /*
4783  * This callback is called by the PCI subsystem whenever
4784  * a PCI bus error is detected.
4785  */
4786 static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4787                                                enum pci_channel_state state)
4788 {
4789         struct net_device *ndev = pci_get_drvdata(pdev);
4790         struct ql_adapter *qdev = netdev_priv(ndev);
4791
4792         switch (state) {
4793         case pci_channel_io_normal:
4794                 return PCI_ERS_RESULT_CAN_RECOVER;
4795         case pci_channel_io_frozen:
4796                 netif_device_detach(ndev);
4797                 if (netif_running(ndev))
4798                         ql_eeh_close(ndev);
4799                 pci_disable_device(pdev);
4800                 return PCI_ERS_RESULT_NEED_RESET;
4801         case pci_channel_io_perm_failure:
4802                 dev_err(&pdev->dev,
4803                         "%s: pci_channel_io_perm_failure.\n", __func__);
4804                 ql_eeh_close(ndev);
4805                 set_bit(QL_EEH_FATAL, &qdev->flags);
4806                 return PCI_ERS_RESULT_DISCONNECT;
4807         }
4808
4809         /* Request a slot reset. */
4810         return PCI_ERS_RESULT_NEED_RESET;
4811 }
4812
4813 /*
4814  * This callback is called after the PCI buss has been reset.
4815  * Basically, this tries to restart the card from scratch.
4816  * This is a shortened version of the device probe/discovery code,
4817  * it resembles the first-half of the () routine.
4818  */
4819 static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4820 {
4821         struct net_device *ndev = pci_get_drvdata(pdev);
4822         struct ql_adapter *qdev = netdev_priv(ndev);
4823
4824         pdev->error_state = pci_channel_io_normal;
4825
4826         pci_restore_state(pdev);
4827         if (pci_enable_device(pdev)) {
4828                 netif_err(qdev, ifup, qdev->ndev,
4829                           "Cannot re-enable PCI device after reset.\n");
4830                 return PCI_ERS_RESULT_DISCONNECT;
4831         }
4832         pci_set_master(pdev);
4833
4834         if (ql_adapter_reset(qdev)) {
4835                 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
4836                 set_bit(QL_EEH_FATAL, &qdev->flags);
4837                 return PCI_ERS_RESULT_DISCONNECT;
4838         }
4839
4840         return PCI_ERS_RESULT_RECOVERED;
4841 }
4842
4843 static void qlge_io_resume(struct pci_dev *pdev)
4844 {
4845         struct net_device *ndev = pci_get_drvdata(pdev);
4846         struct ql_adapter *qdev = netdev_priv(ndev);
4847         int err = 0;
4848
4849         if (netif_running(ndev)) {
4850                 err = qlge_open(ndev);
4851                 if (err) {
4852                         netif_err(qdev, ifup, qdev->ndev,
4853                                   "Device initialization failed after reset.\n");
4854                         return;
4855                 }
4856         } else {
4857                 netif_err(qdev, ifup, qdev->ndev,
4858                           "Device was not running prior to EEH.\n");
4859         }
4860         mod_timer(&qdev->timer, jiffies + (5*HZ));
4861         netif_device_attach(ndev);
4862 }
4863
4864 static const struct pci_error_handlers qlge_err_handler = {
4865         .error_detected = qlge_io_error_detected,
4866         .slot_reset = qlge_io_slot_reset,
4867         .resume = qlge_io_resume,
4868 };
4869
4870 static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4871 {
4872         struct net_device *ndev = pci_get_drvdata(pdev);
4873         struct ql_adapter *qdev = netdev_priv(ndev);
4874         int err;
4875
4876         netif_device_detach(ndev);
4877         del_timer_sync(&qdev->timer);
4878
4879         if (netif_running(ndev)) {
4880                 err = ql_adapter_down(qdev);
4881                 if (!err)
4882                         return err;
4883         }
4884
4885         ql_wol(qdev);
4886         err = pci_save_state(pdev);
4887         if (err)
4888                 return err;
4889
4890         pci_disable_device(pdev);
4891
4892         pci_set_power_state(pdev, pci_choose_state(pdev, state));
4893
4894         return 0;
4895 }
4896
4897 #ifdef CONFIG_PM
4898 static int qlge_resume(struct pci_dev *pdev)
4899 {
4900         struct net_device *ndev = pci_get_drvdata(pdev);
4901         struct ql_adapter *qdev = netdev_priv(ndev);
4902         int err;
4903
4904         pci_set_power_state(pdev, PCI_D0);
4905         pci_restore_state(pdev);
4906         err = pci_enable_device(pdev);
4907         if (err) {
4908                 netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
4909                 return err;
4910         }
4911         pci_set_master(pdev);
4912
4913         pci_enable_wake(pdev, PCI_D3hot, 0);
4914         pci_enable_wake(pdev, PCI_D3cold, 0);
4915
4916         if (netif_running(ndev)) {
4917                 err = ql_adapter_up(qdev);
4918                 if (err)
4919                         return err;
4920         }
4921
4922         mod_timer(&qdev->timer, jiffies + (5*HZ));
4923         netif_device_attach(ndev);
4924
4925         return 0;
4926 }
4927 #endif /* CONFIG_PM */
4928
4929 static void qlge_shutdown(struct pci_dev *pdev)
4930 {
4931         qlge_suspend(pdev, PMSG_SUSPEND);
4932 }
4933
4934 static struct pci_driver qlge_driver = {
4935         .name = DRV_NAME,
4936         .id_table = qlge_pci_tbl,
4937         .probe = qlge_probe,
4938         .remove = qlge_remove,
4939 #ifdef CONFIG_PM
4940         .suspend = qlge_suspend,
4941         .resume = qlge_resume,
4942 #endif
4943         .shutdown = qlge_shutdown,
4944         .err_handler = &qlge_err_handler
4945 };
4946
4947 static int __init qlge_init_module(void)
4948 {
4949         return pci_register_driver(&qlge_driver);
4950 }
4951
4952 static void __exit qlge_exit(void)
4953 {
4954         pci_unregister_driver(&qlge_driver);
4955 }
4956
4957 module_init(qlge_init_module);
4958 module_exit(qlge_exit);