2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
8 #include <linux/kernel.h>
9 #include <linux/bitops.h>
10 #include <linux/types.h>
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <linux/pci.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/pagemap.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dmapool.h>
19 #include <linux/mempool.h>
20 #include <linux/spinlock.h>
21 #include <linux/kthread.h>
22 #include <linux/interrupt.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
27 #include <linux/ipv6.h>
29 #include <linux/tcp.h>
30 #include <linux/udp.h>
31 #include <linux/if_arp.h>
32 #include <linux/if_ether.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/ethtool.h>
36 #include <linux/if_vlan.h>
37 #include <linux/skbuff.h>
38 #include <linux/delay.h>
40 #include <linux/vmalloc.h>
41 #include <linux/prefetch.h>
42 #include <net/ip6_checksum.h>
46 char qlge_driver_name[] = DRV_NAME;
47 const char qlge_driver_version[] = DRV_VERSION;
49 MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
50 MODULE_DESCRIPTION(DRV_STRING " ");
51 MODULE_LICENSE("GPL");
52 MODULE_VERSION(DRV_VERSION);
54 static const u32 default_msg =
55 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
56 /* NETIF_MSG_TIMER | */
61 /* NETIF_MSG_TX_QUEUED | */
62 /* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
63 /* NETIF_MSG_PKTDATA | */
64 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
66 static int debug = -1; /* defaults above */
67 module_param(debug, int, 0664);
68 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
73 static int qlge_irq_type = MSIX_IRQ;
74 module_param(qlge_irq_type, int, 0664);
75 MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
77 static int qlge_mpi_coredump;
78 module_param(qlge_mpi_coredump, int, 0);
79 MODULE_PARM_DESC(qlge_mpi_coredump,
80 "Option to enable MPI firmware dump. "
81 "Default is OFF - Do Not allocate memory. ");
83 static int qlge_force_coredump;
84 module_param(qlge_force_coredump, int, 0);
85 MODULE_PARM_DESC(qlge_force_coredump,
86 "Option to allow force of firmware core dump. "
87 "Default is OFF - Do not allow.");
89 static const struct pci_device_id qlge_pci_tbl[] = {
90 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
91 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
92 /* required last entry */
96 MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
98 static int ql_wol(struct ql_adapter *);
99 static void qlge_set_multicast_list(struct net_device *);
100 static int ql_adapter_down(struct ql_adapter *);
101 static int ql_adapter_up(struct ql_adapter *);
103 /* This hardware semaphore causes exclusive access to
104 * resources shared between the NIC driver, MPI firmware,
105 * FCOE firmware and the FC driver.
107 static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
112 case SEM_XGMAC0_MASK:
113 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
115 case SEM_XGMAC1_MASK:
116 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
119 sem_bits = SEM_SET << SEM_ICB_SHIFT;
121 case SEM_MAC_ADDR_MASK:
122 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
125 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
128 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
130 case SEM_RT_IDX_MASK:
131 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
133 case SEM_PROC_REG_MASK:
134 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
137 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
141 ql_write32(qdev, SEM, sem_bits | sem_mask);
142 return !(ql_read32(qdev, SEM) & sem_bits);
145 int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
147 unsigned int wait_count = 30;
149 if (!ql_sem_trylock(qdev, sem_mask))
152 } while (--wait_count);
156 void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
158 ql_write32(qdev, SEM, sem_mask);
159 ql_read32(qdev, SEM); /* flush */
162 /* This function waits for a specific bit to come ready
163 * in a given register. It is used mostly by the initialize
164 * process, but is also used in kernel thread API such as
165 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
167 int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
170 int count = UDELAY_COUNT;
173 temp = ql_read32(qdev, reg);
175 /* check for errors */
176 if (temp & err_bit) {
177 netif_alert(qdev, probe, qdev->ndev,
178 "register 0x%.08x access error, value = 0x%.08x!.\n",
181 } else if (temp & bit)
183 udelay(UDELAY_DELAY);
186 netif_alert(qdev, probe, qdev->ndev,
187 "Timed out waiting for reg %x to come ready.\n", reg);
191 /* The CFG register is used to download TX and RX control blocks
192 * to the chip. This function waits for an operation to complete.
194 static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
196 int count = UDELAY_COUNT;
200 temp = ql_read32(qdev, CFG);
205 udelay(UDELAY_DELAY);
212 /* Used to issue init control blocks to hw. Maps control block,
213 * sets address, triggers download, waits for completion.
215 int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
225 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
228 map = pci_map_single(qdev->pdev, ptr, size, direction);
229 if (pci_dma_mapping_error(qdev->pdev, map)) {
230 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
234 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
238 status = ql_wait_cfg(qdev, bit);
240 netif_err(qdev, ifup, qdev->ndev,
241 "Timed out waiting for CFG to come ready.\n");
245 ql_write32(qdev, ICB_L, (u32) map);
246 ql_write32(qdev, ICB_H, (u32) (map >> 32));
248 mask = CFG_Q_MASK | (bit << 16);
249 value = bit | (q_id << CFG_Q_SHIFT);
250 ql_write32(qdev, CFG, (mask | value));
253 * Wait for the bit to clear after signaling hw.
255 status = ql_wait_cfg(qdev, bit);
257 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
258 pci_unmap_single(qdev->pdev, map, size, direction);
262 /* Get a specific MAC address from the CAM. Used for debug and reg dump. */
263 int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
270 case MAC_ADDR_TYPE_MULTI_MAC:
271 case MAC_ADDR_TYPE_CAM_MAC:
274 ql_wait_reg_rdy(qdev,
275 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
278 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
279 (index << MAC_ADDR_IDX_SHIFT) | /* index */
280 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
282 ql_wait_reg_rdy(qdev,
283 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
286 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
288 ql_wait_reg_rdy(qdev,
289 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
292 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
293 (index << MAC_ADDR_IDX_SHIFT) | /* index */
294 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
296 ql_wait_reg_rdy(qdev,
297 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
300 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
301 if (type == MAC_ADDR_TYPE_CAM_MAC) {
303 ql_wait_reg_rdy(qdev,
304 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
307 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
308 (index << MAC_ADDR_IDX_SHIFT) | /* index */
309 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
311 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
315 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
319 case MAC_ADDR_TYPE_VLAN:
320 case MAC_ADDR_TYPE_MULTI_FLTR:
322 netif_crit(qdev, ifup, qdev->ndev,
323 "Address type %d not yet supported.\n", type);
330 /* Set up a MAC, multicast or VLAN address for the
331 * inbound frame matching.
333 static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
340 case MAC_ADDR_TYPE_MULTI_MAC:
342 u32 upper = (addr[0] << 8) | addr[1];
343 u32 lower = (addr[2] << 24) | (addr[3] << 16) |
344 (addr[4] << 8) | (addr[5]);
347 ql_wait_reg_rdy(qdev,
348 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
351 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
352 (index << MAC_ADDR_IDX_SHIFT) |
354 ql_write32(qdev, MAC_ADDR_DATA, lower);
356 ql_wait_reg_rdy(qdev,
357 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
360 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
361 (index << MAC_ADDR_IDX_SHIFT) |
364 ql_write32(qdev, MAC_ADDR_DATA, upper);
366 ql_wait_reg_rdy(qdev,
367 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
372 case MAC_ADDR_TYPE_CAM_MAC:
375 u32 upper = (addr[0] << 8) | addr[1];
377 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
380 ql_wait_reg_rdy(qdev,
381 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
384 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
385 (index << MAC_ADDR_IDX_SHIFT) | /* index */
387 ql_write32(qdev, MAC_ADDR_DATA, lower);
389 ql_wait_reg_rdy(qdev,
390 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
393 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
394 (index << MAC_ADDR_IDX_SHIFT) | /* index */
396 ql_write32(qdev, MAC_ADDR_DATA, upper);
398 ql_wait_reg_rdy(qdev,
399 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
402 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
403 (index << MAC_ADDR_IDX_SHIFT) | /* index */
405 /* This field should also include the queue id
406 and possibly the function id. Right now we hardcode
407 the route field to NIC core.
409 cam_output = (CAM_OUT_ROUTE_NIC |
411 func << CAM_OUT_FUNC_SHIFT) |
412 (0 << CAM_OUT_CQ_ID_SHIFT));
413 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
414 cam_output |= CAM_OUT_RV;
415 /* route to NIC core */
416 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
419 case MAC_ADDR_TYPE_VLAN:
421 u32 enable_bit = *((u32 *) &addr[0]);
422 /* For VLAN, the addr actually holds a bit that
423 * either enables or disables the vlan id we are
424 * addressing. It's either MAC_ADDR_E on or off.
425 * That's bit-27 we're talking about.
428 ql_wait_reg_rdy(qdev,
429 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
432 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
433 (index << MAC_ADDR_IDX_SHIFT) | /* index */
435 enable_bit); /* enable/disable */
438 case MAC_ADDR_TYPE_MULTI_FLTR:
440 netif_crit(qdev, ifup, qdev->ndev,
441 "Address type %d not yet supported.\n", type);
448 /* Set or clear MAC address in hardware. We sometimes
449 * have to clear it to prevent wrong frame routing
450 * especially in a bonding environment.
452 static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
455 char zero_mac_addr[ETH_ALEN];
459 addr = &qdev->current_mac_addr[0];
460 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
461 "Set Mac addr %pM\n", addr);
463 eth_zero_addr(zero_mac_addr);
464 addr = &zero_mac_addr[0];
465 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
466 "Clearing MAC address\n");
468 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
471 status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
472 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
473 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
475 netif_err(qdev, ifup, qdev->ndev,
476 "Failed to init mac address.\n");
480 void ql_link_on(struct ql_adapter *qdev)
482 netif_err(qdev, link, qdev->ndev, "Link is up.\n");
483 netif_carrier_on(qdev->ndev);
484 ql_set_mac_addr(qdev, 1);
487 void ql_link_off(struct ql_adapter *qdev)
489 netif_err(qdev, link, qdev->ndev, "Link is down.\n");
490 netif_carrier_off(qdev->ndev);
491 ql_set_mac_addr(qdev, 0);
494 /* Get a specific frame routing value from the CAM.
495 * Used for debug and reg dump.
497 int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
501 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
505 ql_write32(qdev, RT_IDX,
506 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
507 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
510 *value = ql_read32(qdev, RT_DATA);
515 /* The NIC function for this chip has 16 routing indexes. Each one can be used
516 * to route different frame types to various inbound queues. We send broadcast/
517 * multicast/error frames to the default queue for slow handling,
518 * and CAM hit/RSS frames to the fast handling queues.
520 static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
523 int status = -EINVAL; /* Return error if no mask match. */
529 value = RT_IDX_DST_CAM_Q | /* dest */
530 RT_IDX_TYPE_NICQ | /* type */
531 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
534 case RT_IDX_VALID: /* Promiscuous Mode frames. */
536 value = RT_IDX_DST_DFLT_Q | /* dest */
537 RT_IDX_TYPE_NICQ | /* type */
538 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
541 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
543 value = RT_IDX_DST_DFLT_Q | /* dest */
544 RT_IDX_TYPE_NICQ | /* type */
545 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
548 case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
550 value = RT_IDX_DST_DFLT_Q | /* dest */
551 RT_IDX_TYPE_NICQ | /* type */
552 (RT_IDX_IP_CSUM_ERR_SLOT <<
553 RT_IDX_IDX_SHIFT); /* index */
556 case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
558 value = RT_IDX_DST_DFLT_Q | /* dest */
559 RT_IDX_TYPE_NICQ | /* type */
560 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
561 RT_IDX_IDX_SHIFT); /* index */
564 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
566 value = RT_IDX_DST_DFLT_Q | /* dest */
567 RT_IDX_TYPE_NICQ | /* type */
568 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
571 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
573 value = RT_IDX_DST_DFLT_Q | /* dest */
574 RT_IDX_TYPE_NICQ | /* type */
575 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
578 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
580 value = RT_IDX_DST_DFLT_Q | /* dest */
581 RT_IDX_TYPE_NICQ | /* type */
582 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
585 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
587 value = RT_IDX_DST_RSS | /* dest */
588 RT_IDX_TYPE_NICQ | /* type */
589 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
592 case 0: /* Clear the E-bit on an entry. */
594 value = RT_IDX_DST_DFLT_Q | /* dest */
595 RT_IDX_TYPE_NICQ | /* type */
596 (index << RT_IDX_IDX_SHIFT);/* index */
600 netif_err(qdev, ifup, qdev->ndev,
601 "Mask type %d not yet supported.\n", mask);
607 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
610 value |= (enable ? RT_IDX_E : 0);
611 ql_write32(qdev, RT_IDX, value);
612 ql_write32(qdev, RT_DATA, enable ? mask : 0);
618 static void ql_enable_interrupts(struct ql_adapter *qdev)
620 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
623 static void ql_disable_interrupts(struct ql_adapter *qdev)
625 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
628 /* If we're running with multiple MSI-X vectors then we enable on the fly.
629 * Otherwise, we may have multiple outstanding workers and don't want to
630 * enable until the last one finishes. In this case, the irq_cnt gets
631 * incremented every time we queue a worker and decremented every time
632 * a worker finishes. Once it hits zero we enable the interrupt.
634 u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
637 unsigned long hw_flags = 0;
638 struct intr_context *ctx = qdev->intr_context + intr;
640 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
641 /* Always enable if we're MSIX multi interrupts and
642 * it's not the default (zeroeth) interrupt.
644 ql_write32(qdev, INTR_EN,
646 var = ql_read32(qdev, STS);
650 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
651 if (atomic_dec_and_test(&ctx->irq_cnt)) {
652 ql_write32(qdev, INTR_EN,
654 var = ql_read32(qdev, STS);
656 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
660 static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
663 struct intr_context *ctx;
665 /* HW disables for us if we're MSIX multi interrupts and
666 * it's not the default (zeroeth) interrupt.
668 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
671 ctx = qdev->intr_context + intr;
672 spin_lock(&qdev->hw_lock);
673 if (!atomic_read(&ctx->irq_cnt)) {
674 ql_write32(qdev, INTR_EN,
676 var = ql_read32(qdev, STS);
678 atomic_inc(&ctx->irq_cnt);
679 spin_unlock(&qdev->hw_lock);
683 static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
686 for (i = 0; i < qdev->intr_count; i++) {
687 /* The enable call does a atomic_dec_and_test
688 * and enables only if the result is zero.
689 * So we precharge it here.
691 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
693 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
694 ql_enable_completion_interrupt(qdev, i);
699 static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
703 __le16 *flash = (__le16 *)&qdev->flash;
705 status = strncmp((char *)&qdev->flash, str, 4);
707 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
711 for (i = 0; i < size; i++)
712 csum += le16_to_cpu(*flash++);
715 netif_err(qdev, ifup, qdev->ndev,
716 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
721 static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
724 /* wait for reg to come ready */
725 status = ql_wait_reg_rdy(qdev,
726 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
729 /* set up for reg read */
730 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
731 /* wait for reg to come ready */
732 status = ql_wait_reg_rdy(qdev,
733 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
736 /* This data is stored on flash as an array of
737 * __le32. Since ql_read32() returns cpu endian
738 * we need to swap it back.
740 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
745 static int ql_get_8000_flash_params(struct ql_adapter *qdev)
749 __le32 *p = (__le32 *)&qdev->flash;
753 /* Get flash offset for function and adjust
757 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
759 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
761 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
764 size = sizeof(struct flash_params_8000) / sizeof(u32);
765 for (i = 0; i < size; i++, p++) {
766 status = ql_read_flash_word(qdev, i+offset, p);
768 netif_err(qdev, ifup, qdev->ndev,
769 "Error reading flash.\n");
774 status = ql_validate_flash(qdev,
775 sizeof(struct flash_params_8000) / sizeof(u16),
778 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
783 /* Extract either manufacturer or BOFM modified
786 if (qdev->flash.flash_params_8000.data_type1 == 2)
788 qdev->flash.flash_params_8000.mac_addr1,
789 qdev->ndev->addr_len);
792 qdev->flash.flash_params_8000.mac_addr,
793 qdev->ndev->addr_len);
795 if (!is_valid_ether_addr(mac_addr)) {
796 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
801 memcpy(qdev->ndev->dev_addr,
803 qdev->ndev->addr_len);
806 ql_sem_unlock(qdev, SEM_FLASH_MASK);
810 static int ql_get_8012_flash_params(struct ql_adapter *qdev)
814 __le32 *p = (__le32 *)&qdev->flash;
816 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
818 /* Second function's parameters follow the first
824 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
827 for (i = 0; i < size; i++, p++) {
828 status = ql_read_flash_word(qdev, i+offset, p);
830 netif_err(qdev, ifup, qdev->ndev,
831 "Error reading flash.\n");
837 status = ql_validate_flash(qdev,
838 sizeof(struct flash_params_8012) / sizeof(u16),
841 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
846 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
851 memcpy(qdev->ndev->dev_addr,
852 qdev->flash.flash_params_8012.mac_addr,
853 qdev->ndev->addr_len);
856 ql_sem_unlock(qdev, SEM_FLASH_MASK);
860 /* xgmac register are located behind the xgmac_addr and xgmac_data
861 * register pair. Each read/write requires us to wait for the ready
862 * bit before reading/writing the data.
864 static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
867 /* wait for reg to come ready */
868 status = ql_wait_reg_rdy(qdev,
869 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
872 /* write the data to the data reg */
873 ql_write32(qdev, XGMAC_DATA, data);
874 /* trigger the write */
875 ql_write32(qdev, XGMAC_ADDR, reg);
879 /* xgmac register are located behind the xgmac_addr and xgmac_data
880 * register pair. Each read/write requires us to wait for the ready
881 * bit before reading/writing the data.
883 int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
886 /* wait for reg to come ready */
887 status = ql_wait_reg_rdy(qdev,
888 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
891 /* set up for reg read */
892 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
893 /* wait for reg to come ready */
894 status = ql_wait_reg_rdy(qdev,
895 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
899 *data = ql_read32(qdev, XGMAC_DATA);
904 /* This is used for reading the 64-bit statistics regs. */
905 int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
911 status = ql_read_xgmac_reg(qdev, reg, &lo);
915 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
919 *data = (u64) lo | ((u64) hi << 32);
925 static int ql_8000_port_initialize(struct ql_adapter *qdev)
929 * Get MPI firmware version for driver banner
932 status = ql_mb_about_fw(qdev);
935 status = ql_mb_get_fw_state(qdev);
938 /* Wake up a worker to get/set the TX/RX frame sizes. */
939 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
944 /* Take the MAC Core out of reset.
945 * Enable statistics counting.
946 * Take the transmitter/receiver out of reset.
947 * This functionality may be done in the MPI firmware at a
950 static int ql_8012_port_initialize(struct ql_adapter *qdev)
955 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
956 /* Another function has the semaphore, so
957 * wait for the port init bit to come ready.
959 netif_info(qdev, link, qdev->ndev,
960 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
961 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
963 netif_crit(qdev, link, qdev->ndev,
964 "Port initialize timed out.\n");
969 netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
970 /* Set the core reset. */
971 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
974 data |= GLOBAL_CFG_RESET;
975 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
979 /* Clear the core reset and turn on jumbo for receiver. */
980 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
981 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
982 data |= GLOBAL_CFG_TX_STAT_EN;
983 data |= GLOBAL_CFG_RX_STAT_EN;
984 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
988 /* Enable transmitter, and clear it's reset. */
989 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
992 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
993 data |= TX_CFG_EN; /* Enable the transmitter. */
994 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
998 /* Enable receiver and clear it's reset. */
999 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1002 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
1003 data |= RX_CFG_EN; /* Enable the receiver. */
1004 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1008 /* Turn on jumbo. */
1010 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1014 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1018 /* Signal to the world that the port is enabled. */
1019 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1021 ql_sem_unlock(qdev, qdev->xg_sem_mask);
1025 static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1027 return PAGE_SIZE << qdev->lbq_buf_order;
1030 /* Get the next large buffer. */
1031 static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
1033 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1034 rx_ring->lbq_curr_idx++;
1035 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1036 rx_ring->lbq_curr_idx = 0;
1037 rx_ring->lbq_free_cnt++;
1041 static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1042 struct rx_ring *rx_ring)
1044 struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1046 pci_dma_sync_single_for_cpu(qdev->pdev,
1047 dma_unmap_addr(lbq_desc, mapaddr),
1048 rx_ring->lbq_buf_size,
1049 PCI_DMA_FROMDEVICE);
1051 /* If it's the last chunk of our master page then
1054 if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1055 == ql_lbq_block_size(qdev))
1056 pci_unmap_page(qdev->pdev,
1057 lbq_desc->p.pg_chunk.map,
1058 ql_lbq_block_size(qdev),
1059 PCI_DMA_FROMDEVICE);
1063 /* Get the next small buffer. */
1064 static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
1066 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1067 rx_ring->sbq_curr_idx++;
1068 if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1069 rx_ring->sbq_curr_idx = 0;
1070 rx_ring->sbq_free_cnt++;
1074 /* Update an rx ring index. */
1075 static void ql_update_cq(struct rx_ring *rx_ring)
1077 rx_ring->cnsmr_idx++;
1078 rx_ring->curr_entry++;
1079 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1080 rx_ring->cnsmr_idx = 0;
1081 rx_ring->curr_entry = rx_ring->cq_base;
1085 static void ql_write_cq_idx(struct rx_ring *rx_ring)
1087 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1090 static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1091 struct bq_desc *lbq_desc)
1093 if (!rx_ring->pg_chunk.page) {
1095 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1097 qdev->lbq_buf_order);
1098 if (unlikely(!rx_ring->pg_chunk.page)) {
1099 netif_err(qdev, drv, qdev->ndev,
1100 "page allocation failed.\n");
1103 rx_ring->pg_chunk.offset = 0;
1104 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1105 0, ql_lbq_block_size(qdev),
1106 PCI_DMA_FROMDEVICE);
1107 if (pci_dma_mapping_error(qdev->pdev, map)) {
1108 __free_pages(rx_ring->pg_chunk.page,
1109 qdev->lbq_buf_order);
1110 rx_ring->pg_chunk.page = NULL;
1111 netif_err(qdev, drv, qdev->ndev,
1112 "PCI mapping failed.\n");
1115 rx_ring->pg_chunk.map = map;
1116 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1119 /* Copy the current master pg_chunk info
1120 * to the current descriptor.
1122 lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1124 /* Adjust the master page chunk for next
1127 rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1128 if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1129 rx_ring->pg_chunk.page = NULL;
1130 lbq_desc->p.pg_chunk.last_flag = 1;
1132 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1133 get_page(rx_ring->pg_chunk.page);
1134 lbq_desc->p.pg_chunk.last_flag = 0;
1138 /* Process (refill) a large buffer queue. */
1139 static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1141 u32 clean_idx = rx_ring->lbq_clean_idx;
1142 u32 start_idx = clean_idx;
1143 struct bq_desc *lbq_desc;
1147 while (rx_ring->lbq_free_cnt > 32) {
1148 for (i = (rx_ring->lbq_clean_idx % 16); i < 16; i++) {
1149 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1150 "lbq: try cleaning clean_idx = %d.\n",
1152 lbq_desc = &rx_ring->lbq[clean_idx];
1153 if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
1154 rx_ring->lbq_clean_idx = clean_idx;
1155 netif_err(qdev, ifup, qdev->ndev,
1156 "Could not get a page chunk, i=%d, clean_idx =%d .\n",
1161 map = lbq_desc->p.pg_chunk.map +
1162 lbq_desc->p.pg_chunk.offset;
1163 dma_unmap_addr_set(lbq_desc, mapaddr, map);
1164 dma_unmap_len_set(lbq_desc, maplen,
1165 rx_ring->lbq_buf_size);
1166 *lbq_desc->addr = cpu_to_le64(map);
1168 pci_dma_sync_single_for_device(qdev->pdev, map,
1169 rx_ring->lbq_buf_size,
1170 PCI_DMA_FROMDEVICE);
1172 if (clean_idx == rx_ring->lbq_len)
1176 rx_ring->lbq_clean_idx = clean_idx;
1177 rx_ring->lbq_prod_idx += 16;
1178 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1179 rx_ring->lbq_prod_idx = 0;
1180 rx_ring->lbq_free_cnt -= 16;
1183 if (start_idx != clean_idx) {
1184 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1185 "lbq: updating prod idx = %d.\n",
1186 rx_ring->lbq_prod_idx);
1187 ql_write_db_reg(rx_ring->lbq_prod_idx,
1188 rx_ring->lbq_prod_idx_db_reg);
1192 /* Process (refill) a small buffer queue. */
1193 static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1195 u32 clean_idx = rx_ring->sbq_clean_idx;
1196 u32 start_idx = clean_idx;
1197 struct bq_desc *sbq_desc;
1201 while (rx_ring->sbq_free_cnt > 16) {
1202 for (i = (rx_ring->sbq_clean_idx % 16); i < 16; i++) {
1203 sbq_desc = &rx_ring->sbq[clean_idx];
1204 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1205 "sbq: try cleaning clean_idx = %d.\n",
1207 if (sbq_desc->p.skb == NULL) {
1208 netif_printk(qdev, rx_status, KERN_DEBUG,
1210 "sbq: getting new skb for index %d.\n",
1213 netdev_alloc_skb(qdev->ndev,
1215 if (sbq_desc->p.skb == NULL) {
1216 rx_ring->sbq_clean_idx = clean_idx;
1219 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1220 map = pci_map_single(qdev->pdev,
1221 sbq_desc->p.skb->data,
1222 rx_ring->sbq_buf_size,
1223 PCI_DMA_FROMDEVICE);
1224 if (pci_dma_mapping_error(qdev->pdev, map)) {
1225 netif_err(qdev, ifup, qdev->ndev,
1226 "PCI mapping failed.\n");
1227 rx_ring->sbq_clean_idx = clean_idx;
1228 dev_kfree_skb_any(sbq_desc->p.skb);
1229 sbq_desc->p.skb = NULL;
1232 dma_unmap_addr_set(sbq_desc, mapaddr, map);
1233 dma_unmap_len_set(sbq_desc, maplen,
1234 rx_ring->sbq_buf_size);
1235 *sbq_desc->addr = cpu_to_le64(map);
1239 if (clean_idx == rx_ring->sbq_len)
1242 rx_ring->sbq_clean_idx = clean_idx;
1243 rx_ring->sbq_prod_idx += 16;
1244 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1245 rx_ring->sbq_prod_idx = 0;
1246 rx_ring->sbq_free_cnt -= 16;
1249 if (start_idx != clean_idx) {
1250 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1251 "sbq: updating prod idx = %d.\n",
1252 rx_ring->sbq_prod_idx);
1253 ql_write_db_reg(rx_ring->sbq_prod_idx,
1254 rx_ring->sbq_prod_idx_db_reg);
1258 static void ql_update_buffer_queues(struct ql_adapter *qdev,
1259 struct rx_ring *rx_ring)
1261 ql_update_sbq(qdev, rx_ring);
1262 ql_update_lbq(qdev, rx_ring);
1265 /* Unmaps tx buffers. Can be called from send() if a pci mapping
1266 * fails at some stage, or from the interrupt when a tx completes.
1268 static void ql_unmap_send(struct ql_adapter *qdev,
1269 struct tx_ring_desc *tx_ring_desc, int mapped)
1272 for (i = 0; i < mapped; i++) {
1273 if (i == 0 || (i == 7 && mapped > 7)) {
1275 * Unmap the skb->data area, or the
1276 * external sglist (AKA the Outbound
1277 * Address List (OAL)).
1278 * If its the zeroeth element, then it's
1279 * the skb->data area. If it's the 7th
1280 * element and there is more than 6 frags,
1284 netif_printk(qdev, tx_done, KERN_DEBUG,
1286 "unmapping OAL area.\n");
1288 pci_unmap_single(qdev->pdev,
1289 dma_unmap_addr(&tx_ring_desc->map[i],
1291 dma_unmap_len(&tx_ring_desc->map[i],
1295 netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1296 "unmapping frag %d.\n", i);
1297 pci_unmap_page(qdev->pdev,
1298 dma_unmap_addr(&tx_ring_desc->map[i],
1300 dma_unmap_len(&tx_ring_desc->map[i],
1301 maplen), PCI_DMA_TODEVICE);
1307 /* Map the buffers for this transmit. This will return
1308 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1310 static int ql_map_send(struct ql_adapter *qdev,
1311 struct ob_mac_iocb_req *mac_iocb_ptr,
1312 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1314 int len = skb_headlen(skb);
1316 int frag_idx, err, map_idx = 0;
1317 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1318 int frag_cnt = skb_shinfo(skb)->nr_frags;
1321 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1322 "frag_cnt = %d.\n", frag_cnt);
1325 * Map the skb buffer first.
1327 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1329 err = pci_dma_mapping_error(qdev->pdev, map);
1331 netif_err(qdev, tx_queued, qdev->ndev,
1332 "PCI mapping failed with error: %d\n", err);
1334 return NETDEV_TX_BUSY;
1337 tbd->len = cpu_to_le32(len);
1338 tbd->addr = cpu_to_le64(map);
1339 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1340 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1344 * This loop fills the remainder of the 8 address descriptors
1345 * in the IOCB. If there are more than 7 fragments, then the
1346 * eighth address desc will point to an external list (OAL).
1347 * When this happens, the remainder of the frags will be stored
1350 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1351 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1353 if (frag_idx == 6 && frag_cnt > 7) {
1354 /* Let's tack on an sglist.
1355 * Our control block will now
1357 * iocb->seg[0] = skb->data
1358 * iocb->seg[1] = frag[0]
1359 * iocb->seg[2] = frag[1]
1360 * iocb->seg[3] = frag[2]
1361 * iocb->seg[4] = frag[3]
1362 * iocb->seg[5] = frag[4]
1363 * iocb->seg[6] = frag[5]
1364 * iocb->seg[7] = ptr to OAL (external sglist)
1365 * oal->seg[0] = frag[6]
1366 * oal->seg[1] = frag[7]
1367 * oal->seg[2] = frag[8]
1368 * oal->seg[3] = frag[9]
1369 * oal->seg[4] = frag[10]
1372 /* Tack on the OAL in the eighth segment of IOCB. */
1373 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1376 err = pci_dma_mapping_error(qdev->pdev, map);
1378 netif_err(qdev, tx_queued, qdev->ndev,
1379 "PCI mapping outbound address list with error: %d\n",
1384 tbd->addr = cpu_to_le64(map);
1386 * The length is the number of fragments
1387 * that remain to be mapped times the length
1388 * of our sglist (OAL).
1391 cpu_to_le32((sizeof(struct tx_buf_desc) *
1392 (frag_cnt - frag_idx)) | TX_DESC_C);
1393 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1395 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1396 sizeof(struct oal));
1397 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1401 map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
1404 err = dma_mapping_error(&qdev->pdev->dev, map);
1406 netif_err(qdev, tx_queued, qdev->ndev,
1407 "PCI mapping frags failed with error: %d.\n",
1412 tbd->addr = cpu_to_le64(map);
1413 tbd->len = cpu_to_le32(skb_frag_size(frag));
1414 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1415 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1416 skb_frag_size(frag));
1419 /* Save the number of segments we've mapped. */
1420 tx_ring_desc->map_cnt = map_idx;
1421 /* Terminate the last segment. */
1422 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1423 return NETDEV_TX_OK;
1427 * If the first frag mapping failed, then i will be zero.
1428 * This causes the unmap of the skb->data area. Otherwise
1429 * we pass in the number of frags that mapped successfully
1430 * so they can be umapped.
1432 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1433 return NETDEV_TX_BUSY;
1436 /* Categorizing receive firmware frame errors */
1437 static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err,
1438 struct rx_ring *rx_ring)
1440 struct nic_stats *stats = &qdev->nic_stats;
1442 stats->rx_err_count++;
1443 rx_ring->rx_errors++;
1445 switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
1446 case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
1447 stats->rx_code_err++;
1449 case IB_MAC_IOCB_RSP_ERR_OVERSIZE:
1450 stats->rx_oversize_err++;
1452 case IB_MAC_IOCB_RSP_ERR_UNDERSIZE:
1453 stats->rx_undersize_err++;
1455 case IB_MAC_IOCB_RSP_ERR_PREAMBLE:
1456 stats->rx_preamble_err++;
1458 case IB_MAC_IOCB_RSP_ERR_FRAME_LEN:
1459 stats->rx_frame_len_err++;
1461 case IB_MAC_IOCB_RSP_ERR_CRC:
1462 stats->rx_crc_err++;
1469 * ql_update_mac_hdr_len - helper routine to update the mac header length
1470 * based on vlan tags if present
1472 static void ql_update_mac_hdr_len(struct ql_adapter *qdev,
1473 struct ib_mac_iocb_rsp *ib_mac_rsp,
1474 void *page, size_t *len)
1478 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
1480 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) {
1482 /* Look for stacked vlan tags in ethertype field */
1483 if (tags[6] == ETH_P_8021Q &&
1484 tags[8] == ETH_P_8021Q)
1485 *len += 2 * VLAN_HLEN;
1491 /* Process an inbound completion from an rx ring. */
1492 static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1493 struct rx_ring *rx_ring,
1494 struct ib_mac_iocb_rsp *ib_mac_rsp,
1498 struct sk_buff *skb;
1499 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1500 struct napi_struct *napi = &rx_ring->napi;
1502 /* Frame error, so drop the packet. */
1503 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1504 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1505 put_page(lbq_desc->p.pg_chunk.page);
1508 napi->dev = qdev->ndev;
1510 skb = napi_get_frags(napi);
1512 netif_err(qdev, drv, qdev->ndev,
1513 "Couldn't get an skb, exiting.\n");
1514 rx_ring->rx_dropped++;
1515 put_page(lbq_desc->p.pg_chunk.page);
1518 prefetch(lbq_desc->p.pg_chunk.va);
1519 __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1520 lbq_desc->p.pg_chunk.page,
1521 lbq_desc->p.pg_chunk.offset,
1525 skb->data_len += length;
1526 skb->truesize += length;
1527 skb_shinfo(skb)->nr_frags++;
1529 rx_ring->rx_packets++;
1530 rx_ring->rx_bytes += length;
1531 skb->ip_summed = CHECKSUM_UNNECESSARY;
1532 skb_record_rx_queue(skb, rx_ring->cq_id);
1533 if (vlan_id != 0xffff)
1534 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1535 napi_gro_frags(napi);
1538 /* Process an inbound completion from an rx ring. */
1539 static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1540 struct rx_ring *rx_ring,
1541 struct ib_mac_iocb_rsp *ib_mac_rsp,
1545 struct net_device *ndev = qdev->ndev;
1546 struct sk_buff *skb = NULL;
1548 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1549 struct napi_struct *napi = &rx_ring->napi;
1550 size_t hlen = ETH_HLEN;
1552 skb = netdev_alloc_skb(ndev, length);
1554 rx_ring->rx_dropped++;
1555 put_page(lbq_desc->p.pg_chunk.page);
1559 addr = lbq_desc->p.pg_chunk.va;
1562 /* Frame error, so drop the packet. */
1563 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1564 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1568 /* Update the MAC header length*/
1569 ql_update_mac_hdr_len(qdev, ib_mac_rsp, addr, &hlen);
1571 /* The max framesize filter on this chip is set higher than
1572 * MTU since FCoE uses 2k frames.
1574 if (skb->len > ndev->mtu + hlen) {
1575 netif_err(qdev, drv, qdev->ndev,
1576 "Segment too small, dropping.\n");
1577 rx_ring->rx_dropped++;
1580 memcpy(skb_put(skb, hlen), addr, hlen);
1581 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1582 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1584 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1585 lbq_desc->p.pg_chunk.offset + hlen,
1587 skb->len += length - hlen;
1588 skb->data_len += length - hlen;
1589 skb->truesize += length - hlen;
1591 rx_ring->rx_packets++;
1592 rx_ring->rx_bytes += skb->len;
1593 skb->protocol = eth_type_trans(skb, ndev);
1594 skb_checksum_none_assert(skb);
1596 if ((ndev->features & NETIF_F_RXCSUM) &&
1597 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1599 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1600 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1601 "TCP checksum done!\n");
1602 skb->ip_summed = CHECKSUM_UNNECESSARY;
1603 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1604 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1605 /* Unfragmented ipv4 UDP frame. */
1607 (struct iphdr *)((u8 *)addr + hlen);
1608 if (!(iph->frag_off &
1609 htons(IP_MF|IP_OFFSET))) {
1610 skb->ip_summed = CHECKSUM_UNNECESSARY;
1611 netif_printk(qdev, rx_status, KERN_DEBUG,
1613 "UDP checksum done!\n");
1618 skb_record_rx_queue(skb, rx_ring->cq_id);
1619 if (vlan_id != 0xffff)
1620 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1621 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1622 napi_gro_receive(napi, skb);
1624 netif_receive_skb(skb);
1627 dev_kfree_skb_any(skb);
1628 put_page(lbq_desc->p.pg_chunk.page);
1631 /* Process an inbound completion from an rx ring. */
1632 static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1633 struct rx_ring *rx_ring,
1634 struct ib_mac_iocb_rsp *ib_mac_rsp,
1638 struct net_device *ndev = qdev->ndev;
1639 struct sk_buff *skb = NULL;
1640 struct sk_buff *new_skb = NULL;
1641 struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1643 skb = sbq_desc->p.skb;
1644 /* Allocate new_skb and copy */
1645 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1646 if (new_skb == NULL) {
1647 rx_ring->rx_dropped++;
1650 skb_reserve(new_skb, NET_IP_ALIGN);
1651 memcpy(skb_put(new_skb, length), skb->data, length);
1654 /* Frame error, so drop the packet. */
1655 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1656 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1657 dev_kfree_skb_any(skb);
1661 /* loopback self test for ethtool */
1662 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1663 ql_check_lb_frame(qdev, skb);
1664 dev_kfree_skb_any(skb);
1668 /* The max framesize filter on this chip is set higher than
1669 * MTU since FCoE uses 2k frames.
1671 if (skb->len > ndev->mtu + ETH_HLEN) {
1672 dev_kfree_skb_any(skb);
1673 rx_ring->rx_dropped++;
1677 prefetch(skb->data);
1678 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1679 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1681 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1682 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1683 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1684 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1685 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1686 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1688 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1689 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1690 "Promiscuous Packet.\n");
1692 rx_ring->rx_packets++;
1693 rx_ring->rx_bytes += skb->len;
1694 skb->protocol = eth_type_trans(skb, ndev);
1695 skb_checksum_none_assert(skb);
1697 /* If rx checksum is on, and there are no
1698 * csum or frame errors.
1700 if ((ndev->features & NETIF_F_RXCSUM) &&
1701 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1703 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1704 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1705 "TCP checksum done!\n");
1706 skb->ip_summed = CHECKSUM_UNNECESSARY;
1707 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1708 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1709 /* Unfragmented ipv4 UDP frame. */
1710 struct iphdr *iph = (struct iphdr *) skb->data;
1711 if (!(iph->frag_off &
1712 htons(IP_MF|IP_OFFSET))) {
1713 skb->ip_summed = CHECKSUM_UNNECESSARY;
1714 netif_printk(qdev, rx_status, KERN_DEBUG,
1716 "UDP checksum done!\n");
1721 skb_record_rx_queue(skb, rx_ring->cq_id);
1722 if (vlan_id != 0xffff)
1723 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1724 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1725 napi_gro_receive(&rx_ring->napi, skb);
1727 netif_receive_skb(skb);
1730 static void ql_realign_skb(struct sk_buff *skb, int len)
1732 void *temp_addr = skb->data;
1734 /* Undo the skb_reserve(skb,32) we did before
1735 * giving to hardware, and realign data on
1736 * a 2-byte boundary.
1738 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1739 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1740 skb_copy_to_linear_data(skb, temp_addr,
1745 * This function builds an skb for the given inbound
1746 * completion. It will be rewritten for readability in the near
1747 * future, but for not it works well.
1749 static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1750 struct rx_ring *rx_ring,
1751 struct ib_mac_iocb_rsp *ib_mac_rsp)
1753 struct bq_desc *lbq_desc;
1754 struct bq_desc *sbq_desc;
1755 struct sk_buff *skb = NULL;
1756 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1757 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1758 size_t hlen = ETH_HLEN;
1761 * Handle the header buffer if present.
1763 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1764 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1765 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1766 "Header of %d bytes in small buffer.\n", hdr_len);
1768 * Headers fit nicely into a small buffer.
1770 sbq_desc = ql_get_curr_sbuf(rx_ring);
1771 pci_unmap_single(qdev->pdev,
1772 dma_unmap_addr(sbq_desc, mapaddr),
1773 dma_unmap_len(sbq_desc, maplen),
1774 PCI_DMA_FROMDEVICE);
1775 skb = sbq_desc->p.skb;
1776 ql_realign_skb(skb, hdr_len);
1777 skb_put(skb, hdr_len);
1778 sbq_desc->p.skb = NULL;
1782 * Handle the data buffer(s).
1784 if (unlikely(!length)) { /* Is there data too? */
1785 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1786 "No Data buffer in this packet.\n");
1790 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1791 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1792 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1793 "Headers in small, data of %d bytes in small, combine them.\n",
1796 * Data is less than small buffer size so it's
1797 * stuffed in a small buffer.
1798 * For this case we append the data
1799 * from the "data" small buffer to the "header" small
1802 sbq_desc = ql_get_curr_sbuf(rx_ring);
1803 pci_dma_sync_single_for_cpu(qdev->pdev,
1805 (sbq_desc, mapaddr),
1808 PCI_DMA_FROMDEVICE);
1809 memcpy(skb_put(skb, length),
1810 sbq_desc->p.skb->data, length);
1811 pci_dma_sync_single_for_device(qdev->pdev,
1818 PCI_DMA_FROMDEVICE);
1820 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1821 "%d bytes in a single small buffer.\n",
1823 sbq_desc = ql_get_curr_sbuf(rx_ring);
1824 skb = sbq_desc->p.skb;
1825 ql_realign_skb(skb, length);
1826 skb_put(skb, length);
1827 pci_unmap_single(qdev->pdev,
1828 dma_unmap_addr(sbq_desc,
1830 dma_unmap_len(sbq_desc,
1832 PCI_DMA_FROMDEVICE);
1833 sbq_desc->p.skb = NULL;
1835 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1836 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1837 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1838 "Header in small, %d bytes in large. Chain large to small!\n",
1841 * The data is in a single large buffer. We
1842 * chain it to the header buffer's skb and let
1845 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1846 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1847 "Chaining page at offset = %d, for %d bytes to skb.\n",
1848 lbq_desc->p.pg_chunk.offset, length);
1849 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1850 lbq_desc->p.pg_chunk.offset,
1853 skb->data_len += length;
1854 skb->truesize += length;
1857 * The headers and data are in a single large buffer. We
1858 * copy it to a new skb and let it go. This can happen with
1859 * jumbo mtu on a non-TCP/UDP frame.
1861 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1862 skb = netdev_alloc_skb(qdev->ndev, length);
1864 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1865 "No skb available, drop the packet.\n");
1868 pci_unmap_page(qdev->pdev,
1869 dma_unmap_addr(lbq_desc,
1871 dma_unmap_len(lbq_desc, maplen),
1872 PCI_DMA_FROMDEVICE);
1873 skb_reserve(skb, NET_IP_ALIGN);
1874 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1875 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1877 skb_fill_page_desc(skb, 0,
1878 lbq_desc->p.pg_chunk.page,
1879 lbq_desc->p.pg_chunk.offset,
1882 skb->data_len += length;
1883 skb->truesize += length;
1885 ql_update_mac_hdr_len(qdev, ib_mac_rsp,
1886 lbq_desc->p.pg_chunk.va,
1888 __pskb_pull_tail(skb, hlen);
1892 * The data is in a chain of large buffers
1893 * pointed to by a small buffer. We loop
1894 * thru and chain them to the our small header
1896 * frags: There are 18 max frags and our small
1897 * buffer will hold 32 of them. The thing is,
1898 * we'll use 3 max for our 9000 byte jumbo
1899 * frames. If the MTU goes up we could
1900 * eventually be in trouble.
1903 sbq_desc = ql_get_curr_sbuf(rx_ring);
1904 pci_unmap_single(qdev->pdev,
1905 dma_unmap_addr(sbq_desc, mapaddr),
1906 dma_unmap_len(sbq_desc, maplen),
1907 PCI_DMA_FROMDEVICE);
1908 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1910 * This is an non TCP/UDP IP frame, so
1911 * the headers aren't split into a small
1912 * buffer. We have to use the small buffer
1913 * that contains our sg list as our skb to
1914 * send upstairs. Copy the sg list here to
1915 * a local buffer and use it to find the
1918 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1919 "%d bytes of headers & data in chain of large.\n",
1921 skb = sbq_desc->p.skb;
1922 sbq_desc->p.skb = NULL;
1923 skb_reserve(skb, NET_IP_ALIGN);
1926 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1927 size = (length < rx_ring->lbq_buf_size) ? length :
1928 rx_ring->lbq_buf_size;
1930 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1931 "Adding page %d to skb for %d bytes.\n",
1933 skb_fill_page_desc(skb, i,
1934 lbq_desc->p.pg_chunk.page,
1935 lbq_desc->p.pg_chunk.offset,
1938 skb->data_len += size;
1939 skb->truesize += size;
1942 } while (length > 0);
1943 ql_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va,
1945 __pskb_pull_tail(skb, hlen);
1950 /* Process an inbound completion from an rx ring. */
1951 static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1952 struct rx_ring *rx_ring,
1953 struct ib_mac_iocb_rsp *ib_mac_rsp,
1956 struct net_device *ndev = qdev->ndev;
1957 struct sk_buff *skb = NULL;
1959 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1961 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1962 if (unlikely(!skb)) {
1963 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1964 "No skb available, drop packet.\n");
1965 rx_ring->rx_dropped++;
1969 /* Frame error, so drop the packet. */
1970 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1971 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1972 dev_kfree_skb_any(skb);
1976 /* The max framesize filter on this chip is set higher than
1977 * MTU since FCoE uses 2k frames.
1979 if (skb->len > ndev->mtu + ETH_HLEN) {
1980 dev_kfree_skb_any(skb);
1981 rx_ring->rx_dropped++;
1985 /* loopback self test for ethtool */
1986 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1987 ql_check_lb_frame(qdev, skb);
1988 dev_kfree_skb_any(skb);
1992 prefetch(skb->data);
1993 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1994 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1995 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1996 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1997 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1998 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1999 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
2000 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
2001 rx_ring->rx_multicast++;
2003 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
2004 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2005 "Promiscuous Packet.\n");
2008 skb->protocol = eth_type_trans(skb, ndev);
2009 skb_checksum_none_assert(skb);
2011 /* If rx checksum is on, and there are no
2012 * csum or frame errors.
2014 if ((ndev->features & NETIF_F_RXCSUM) &&
2015 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
2017 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
2018 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2019 "TCP checksum done!\n");
2020 skb->ip_summed = CHECKSUM_UNNECESSARY;
2021 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
2022 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
2023 /* Unfragmented ipv4 UDP frame. */
2024 struct iphdr *iph = (struct iphdr *) skb->data;
2025 if (!(iph->frag_off &
2026 htons(IP_MF|IP_OFFSET))) {
2027 skb->ip_summed = CHECKSUM_UNNECESSARY;
2028 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2029 "TCP checksum done!\n");
2034 rx_ring->rx_packets++;
2035 rx_ring->rx_bytes += skb->len;
2036 skb_record_rx_queue(skb, rx_ring->cq_id);
2037 if (vlan_id != 0xffff)
2038 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
2039 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
2040 napi_gro_receive(&rx_ring->napi, skb);
2042 netif_receive_skb(skb);
2045 /* Process an inbound completion from an rx ring. */
2046 static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2047 struct rx_ring *rx_ring,
2048 struct ib_mac_iocb_rsp *ib_mac_rsp)
2050 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
2051 u16 vlan_id = ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
2052 (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)) ?
2053 ((le16_to_cpu(ib_mac_rsp->vlan_id) &
2054 IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2056 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2058 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2059 /* The data and headers are split into
2062 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2064 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2065 /* The data fit in a single small buffer.
2066 * Allocate a new skb, copy the data and
2067 * return the buffer to the free pool.
2069 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2071 } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2072 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2073 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2074 /* TCP packet in a page chunk that's been checksummed.
2075 * Tack it on to our GRO skb and let it go.
2077 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2079 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2080 /* Non-TCP packet in a page chunk. Allocate an
2081 * skb, tack it on frags, and send it up.
2083 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2086 /* Non-TCP/UDP large frames that span multiple buffers
2087 * can be processed corrrectly by the split frame logic.
2089 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2093 return (unsigned long)length;
2096 /* Process an outbound completion from an rx ring. */
2097 static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2098 struct ob_mac_iocb_rsp *mac_rsp)
2100 struct tx_ring *tx_ring;
2101 struct tx_ring_desc *tx_ring_desc;
2103 QL_DUMP_OB_MAC_RSP(mac_rsp);
2104 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2105 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2106 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
2107 tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2108 tx_ring->tx_packets++;
2109 dev_kfree_skb(tx_ring_desc->skb);
2110 tx_ring_desc->skb = NULL;
2112 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2115 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2116 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2117 netif_warn(qdev, tx_done, qdev->ndev,
2118 "Total descriptor length did not match transfer length.\n");
2120 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2121 netif_warn(qdev, tx_done, qdev->ndev,
2122 "Frame too short to be valid, not sent.\n");
2124 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2125 netif_warn(qdev, tx_done, qdev->ndev,
2126 "Frame too long, but sent anyway.\n");
2128 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
2129 netif_warn(qdev, tx_done, qdev->ndev,
2130 "PCI backplane error. Frame not sent.\n");
2133 atomic_inc(&tx_ring->tx_count);
2136 /* Fire up a handler to reset the MPI processor. */
2137 void ql_queue_fw_error(struct ql_adapter *qdev)
2140 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2143 void ql_queue_asic_error(struct ql_adapter *qdev)
2146 ql_disable_interrupts(qdev);
2147 /* Clear adapter up bit to signal the recovery
2148 * process that it shouldn't kill the reset worker
2151 clear_bit(QL_ADAPTER_UP, &qdev->flags);
2152 /* Set asic recovery bit to indicate reset process that we are
2153 * in fatal error recovery process rather than normal close
2155 set_bit(QL_ASIC_RECOVERY, &qdev->flags);
2156 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2159 static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2160 struct ib_ae_iocb_rsp *ib_ae_rsp)
2162 switch (ib_ae_rsp->event) {
2163 case MGMT_ERR_EVENT:
2164 netif_err(qdev, rx_err, qdev->ndev,
2165 "Management Processor Fatal Error.\n");
2166 ql_queue_fw_error(qdev);
2169 case CAM_LOOKUP_ERR_EVENT:
2170 netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2171 netdev_err(qdev->ndev, "This event shouldn't occur.\n");
2172 ql_queue_asic_error(qdev);
2175 case SOFT_ECC_ERROR_EVENT:
2176 netdev_err(qdev->ndev, "Soft ECC error detected.\n");
2177 ql_queue_asic_error(qdev);
2180 case PCI_ERR_ANON_BUF_RD:
2181 netdev_err(qdev->ndev, "PCI error occurred when reading "
2182 "anonymous buffers from rx_ring %d.\n",
2184 ql_queue_asic_error(qdev);
2188 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2190 ql_queue_asic_error(qdev);
2195 static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2197 struct ql_adapter *qdev = rx_ring->qdev;
2198 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2199 struct ob_mac_iocb_rsp *net_rsp = NULL;
2202 struct tx_ring *tx_ring;
2203 /* While there are entries in the completion queue. */
2204 while (prod != rx_ring->cnsmr_idx) {
2206 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2207 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2208 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2210 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2212 switch (net_rsp->opcode) {
2214 case OPCODE_OB_MAC_TSO_IOCB:
2215 case OPCODE_OB_MAC_IOCB:
2216 ql_process_mac_tx_intr(qdev, net_rsp);
2219 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2220 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2224 ql_update_cq(rx_ring);
2225 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2229 ql_write_cq_idx(rx_ring);
2230 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2231 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
2232 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2234 * The queue got stopped because the tx_ring was full.
2235 * Wake it up, because it's now at least 25% empty.
2237 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2243 static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2245 struct ql_adapter *qdev = rx_ring->qdev;
2246 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2247 struct ql_net_rsp_iocb *net_rsp;
2250 /* While there are entries in the completion queue. */
2251 while (prod != rx_ring->cnsmr_idx) {
2253 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2254 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2255 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2257 net_rsp = rx_ring->curr_entry;
2259 switch (net_rsp->opcode) {
2260 case OPCODE_IB_MAC_IOCB:
2261 ql_process_mac_rx_intr(qdev, rx_ring,
2262 (struct ib_mac_iocb_rsp *)
2266 case OPCODE_IB_AE_IOCB:
2267 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2271 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2272 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2277 ql_update_cq(rx_ring);
2278 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2279 if (count == budget)
2282 ql_update_buffer_queues(qdev, rx_ring);
2283 ql_write_cq_idx(rx_ring);
2287 static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2289 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2290 struct ql_adapter *qdev = rx_ring->qdev;
2291 struct rx_ring *trx_ring;
2292 int i, work_done = 0;
2293 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
2295 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2296 "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
2298 /* Service the TX rings first. They start
2299 * right after the RSS rings. */
2300 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2301 trx_ring = &qdev->rx_ring[i];
2302 /* If this TX completion ring belongs to this vector and
2303 * it's not empty then service it.
2305 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2306 (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2307 trx_ring->cnsmr_idx)) {
2308 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2309 "%s: Servicing TX completion ring %d.\n",
2310 __func__, trx_ring->cq_id);
2311 ql_clean_outbound_rx_ring(trx_ring);
2316 * Now service the RSS ring if it's active.
2318 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2319 rx_ring->cnsmr_idx) {
2320 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2321 "%s: Servicing RX completion ring %d.\n",
2322 __func__, rx_ring->cq_id);
2323 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2326 if (work_done < budget) {
2327 napi_complete(napi);
2328 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2333 static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
2335 struct ql_adapter *qdev = netdev_priv(ndev);
2337 if (features & NETIF_F_HW_VLAN_CTAG_RX) {
2338 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2339 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2341 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2346 * qlge_update_hw_vlan_features - helper routine to reinitialize the adapter
2347 * based on the features to enable/disable hardware vlan accel
2349 static int qlge_update_hw_vlan_features(struct net_device *ndev,
2350 netdev_features_t features)
2352 struct ql_adapter *qdev = netdev_priv(ndev);
2354 bool need_restart = netif_running(ndev);
2357 status = ql_adapter_down(qdev);
2359 netif_err(qdev, link, qdev->ndev,
2360 "Failed to bring down the adapter\n");
2365 /* update the features with resent change */
2366 ndev->features = features;
2369 status = ql_adapter_up(qdev);
2371 netif_err(qdev, link, qdev->ndev,
2372 "Failed to bring up the adapter\n");
2380 static netdev_features_t qlge_fix_features(struct net_device *ndev,
2381 netdev_features_t features)
2385 /* Update the behavior of vlan accel in the adapter */
2386 err = qlge_update_hw_vlan_features(ndev, features);
2393 static int qlge_set_features(struct net_device *ndev,
2394 netdev_features_t features)
2396 netdev_features_t changed = ndev->features ^ features;
2398 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
2399 qlge_vlan_mode(ndev, features);
2404 static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
2406 u32 enable_bit = MAC_ADDR_E;
2409 err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2410 MAC_ADDR_TYPE_VLAN, vid);
2412 netif_err(qdev, ifup, qdev->ndev,
2413 "Failed to init vlan address.\n");
2417 static int qlge_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
2419 struct ql_adapter *qdev = netdev_priv(ndev);
2423 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2427 err = __qlge_vlan_rx_add_vid(qdev, vid);
2428 set_bit(vid, qdev->active_vlans);
2430 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2435 static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
2440 err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2441 MAC_ADDR_TYPE_VLAN, vid);
2443 netif_err(qdev, ifup, qdev->ndev,
2444 "Failed to clear vlan address.\n");
2448 static int qlge_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
2450 struct ql_adapter *qdev = netdev_priv(ndev);
2454 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2458 err = __qlge_vlan_rx_kill_vid(qdev, vid);
2459 clear_bit(vid, qdev->active_vlans);
2461 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2466 static void qlge_restore_vlan(struct ql_adapter *qdev)
2471 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2475 for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
2476 __qlge_vlan_rx_add_vid(qdev, vid);
2478 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2481 /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2482 static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2484 struct rx_ring *rx_ring = dev_id;
2485 napi_schedule(&rx_ring->napi);
2489 /* This handles a fatal error, MPI activity, and the default
2490 * rx_ring in an MSI-X multiple vector environment.
2491 * In MSI/Legacy environment it also process the rest of
2494 static irqreturn_t qlge_isr(int irq, void *dev_id)
2496 struct rx_ring *rx_ring = dev_id;
2497 struct ql_adapter *qdev = rx_ring->qdev;
2498 struct intr_context *intr_context = &qdev->intr_context[0];
2502 spin_lock(&qdev->hw_lock);
2503 if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
2504 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2505 "Shared Interrupt, Not ours!\n");
2506 spin_unlock(&qdev->hw_lock);
2509 spin_unlock(&qdev->hw_lock);
2511 var = ql_disable_completion_interrupt(qdev, intr_context->intr);
2514 * Check for fatal error.
2517 ql_queue_asic_error(qdev);
2518 netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
2519 var = ql_read32(qdev, ERR_STS);
2520 netdev_err(qdev->ndev, "Resetting chip. "
2521 "Error Status Register = 0x%x\n", var);
2526 * Check MPI processor activity.
2528 if ((var & STS_PI) &&
2529 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
2531 * We've got an async event or mailbox completion.
2532 * Handle it and clear the source of the interrupt.
2534 netif_err(qdev, intr, qdev->ndev,
2535 "Got MPI processor interrupt.\n");
2536 ql_disable_completion_interrupt(qdev, intr_context->intr);
2537 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2538 queue_delayed_work_on(smp_processor_id(),
2539 qdev->workqueue, &qdev->mpi_work, 0);
2544 * Get the bit-mask that shows the active queues for this
2545 * pass. Compare it to the queues that this irq services
2546 * and call napi if there's a match.
2548 var = ql_read32(qdev, ISR1);
2549 if (var & intr_context->irq_mask) {
2550 netif_info(qdev, intr, qdev->ndev,
2551 "Waking handler for rx_ring[0].\n");
2552 ql_disable_completion_interrupt(qdev, intr_context->intr);
2553 napi_schedule(&rx_ring->napi);
2556 ql_enable_completion_interrupt(qdev, intr_context->intr);
2557 return work_done ? IRQ_HANDLED : IRQ_NONE;
2560 static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2563 if (skb_is_gso(skb)) {
2565 __be16 l3_proto = vlan_get_protocol(skb);
2567 err = skb_cow_head(skb, 0);
2571 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2572 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2573 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2574 mac_iocb_ptr->total_hdrs_len =
2575 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2576 mac_iocb_ptr->net_trans_offset =
2577 cpu_to_le16(skb_network_offset(skb) |
2578 skb_transport_offset(skb)
2579 << OB_MAC_TRANSPORT_HDR_SHIFT);
2580 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2581 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2582 if (likely(l3_proto == htons(ETH_P_IP))) {
2583 struct iphdr *iph = ip_hdr(skb);
2585 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2586 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2590 } else if (l3_proto == htons(ETH_P_IPV6)) {
2591 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2592 tcp_hdr(skb)->check =
2593 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2594 &ipv6_hdr(skb)->daddr,
2602 static void ql_hw_csum_setup(struct sk_buff *skb,
2603 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2606 struct iphdr *iph = ip_hdr(skb);
2608 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2609 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2610 mac_iocb_ptr->net_trans_offset =
2611 cpu_to_le16(skb_network_offset(skb) |
2612 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2614 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2615 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2616 if (likely(iph->protocol == IPPROTO_TCP)) {
2617 check = &(tcp_hdr(skb)->check);
2618 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2619 mac_iocb_ptr->total_hdrs_len =
2620 cpu_to_le16(skb_transport_offset(skb) +
2621 (tcp_hdr(skb)->doff << 2));
2623 check = &(udp_hdr(skb)->check);
2624 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2625 mac_iocb_ptr->total_hdrs_len =
2626 cpu_to_le16(skb_transport_offset(skb) +
2627 sizeof(struct udphdr));
2629 *check = ~csum_tcpudp_magic(iph->saddr,
2630 iph->daddr, len, iph->protocol, 0);
2633 static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2635 struct tx_ring_desc *tx_ring_desc;
2636 struct ob_mac_iocb_req *mac_iocb_ptr;
2637 struct ql_adapter *qdev = netdev_priv(ndev);
2639 struct tx_ring *tx_ring;
2640 u32 tx_ring_idx = (u32) skb->queue_mapping;
2642 tx_ring = &qdev->tx_ring[tx_ring_idx];
2644 if (skb_padto(skb, ETH_ZLEN))
2645 return NETDEV_TX_OK;
2647 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2648 netif_info(qdev, tx_queued, qdev->ndev,
2649 "%s: BUG! shutting down tx queue %d due to lack of resources.\n",
2650 __func__, tx_ring_idx);
2651 netif_stop_subqueue(ndev, tx_ring->wq_id);
2652 tx_ring->tx_errors++;
2653 return NETDEV_TX_BUSY;
2655 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2656 mac_iocb_ptr = tx_ring_desc->queue_entry;
2657 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
2659 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2660 mac_iocb_ptr->tid = tx_ring_desc->index;
2661 /* We use the upper 32-bits to store the tx queue for this IO.
2662 * When we get the completion we can use it to establish the context.
2664 mac_iocb_ptr->txq_idx = tx_ring_idx;
2665 tx_ring_desc->skb = skb;
2667 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2669 if (skb_vlan_tag_present(skb)) {
2670 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2671 "Adding a vlan tag %d.\n", skb_vlan_tag_get(skb));
2672 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2673 mac_iocb_ptr->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
2675 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2677 dev_kfree_skb_any(skb);
2678 return NETDEV_TX_OK;
2679 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2680 ql_hw_csum_setup(skb,
2681 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2683 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2685 netif_err(qdev, tx_queued, qdev->ndev,
2686 "Could not map the segments.\n");
2687 tx_ring->tx_errors++;
2688 return NETDEV_TX_BUSY;
2690 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2691 tx_ring->prod_idx++;
2692 if (tx_ring->prod_idx == tx_ring->wq_len)
2693 tx_ring->prod_idx = 0;
2696 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
2697 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2698 "tx queued, slot %d, len %d\n",
2699 tx_ring->prod_idx, skb->len);
2701 atomic_dec(&tx_ring->tx_count);
2703 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2704 netif_stop_subqueue(ndev, tx_ring->wq_id);
2705 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2707 * The queue got stopped because the tx_ring was full.
2708 * Wake it up, because it's now at least 25% empty.
2710 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2712 return NETDEV_TX_OK;
2716 static void ql_free_shadow_space(struct ql_adapter *qdev)
2718 if (qdev->rx_ring_shadow_reg_area) {
2719 pci_free_consistent(qdev->pdev,
2721 qdev->rx_ring_shadow_reg_area,
2722 qdev->rx_ring_shadow_reg_dma);
2723 qdev->rx_ring_shadow_reg_area = NULL;
2725 if (qdev->tx_ring_shadow_reg_area) {
2726 pci_free_consistent(qdev->pdev,
2728 qdev->tx_ring_shadow_reg_area,
2729 qdev->tx_ring_shadow_reg_dma);
2730 qdev->tx_ring_shadow_reg_area = NULL;
2734 static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2736 qdev->rx_ring_shadow_reg_area =
2737 pci_zalloc_consistent(qdev->pdev, PAGE_SIZE,
2738 &qdev->rx_ring_shadow_reg_dma);
2739 if (qdev->rx_ring_shadow_reg_area == NULL) {
2740 netif_err(qdev, ifup, qdev->ndev,
2741 "Allocation of RX shadow space failed.\n");
2745 qdev->tx_ring_shadow_reg_area =
2746 pci_zalloc_consistent(qdev->pdev, PAGE_SIZE,
2747 &qdev->tx_ring_shadow_reg_dma);
2748 if (qdev->tx_ring_shadow_reg_area == NULL) {
2749 netif_err(qdev, ifup, qdev->ndev,
2750 "Allocation of TX shadow space failed.\n");
2751 goto err_wqp_sh_area;
2756 pci_free_consistent(qdev->pdev,
2758 qdev->rx_ring_shadow_reg_area,
2759 qdev->rx_ring_shadow_reg_dma);
2763 static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2765 struct tx_ring_desc *tx_ring_desc;
2767 struct ob_mac_iocb_req *mac_iocb_ptr;
2769 mac_iocb_ptr = tx_ring->wq_base;
2770 tx_ring_desc = tx_ring->q;
2771 for (i = 0; i < tx_ring->wq_len; i++) {
2772 tx_ring_desc->index = i;
2773 tx_ring_desc->skb = NULL;
2774 tx_ring_desc->queue_entry = mac_iocb_ptr;
2778 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2781 static void ql_free_tx_resources(struct ql_adapter *qdev,
2782 struct tx_ring *tx_ring)
2784 if (tx_ring->wq_base) {
2785 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2786 tx_ring->wq_base, tx_ring->wq_base_dma);
2787 tx_ring->wq_base = NULL;
2793 static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2794 struct tx_ring *tx_ring)
2797 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2798 &tx_ring->wq_base_dma);
2800 if ((tx_ring->wq_base == NULL) ||
2801 tx_ring->wq_base_dma & WQ_ADDR_ALIGN)
2805 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2806 if (tx_ring->q == NULL)
2811 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2812 tx_ring->wq_base, tx_ring->wq_base_dma);
2813 tx_ring->wq_base = NULL;
2815 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
2819 static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2821 struct bq_desc *lbq_desc;
2823 uint32_t curr_idx, clean_idx;
2825 curr_idx = rx_ring->lbq_curr_idx;
2826 clean_idx = rx_ring->lbq_clean_idx;
2827 while (curr_idx != clean_idx) {
2828 lbq_desc = &rx_ring->lbq[curr_idx];
2830 if (lbq_desc->p.pg_chunk.last_flag) {
2831 pci_unmap_page(qdev->pdev,
2832 lbq_desc->p.pg_chunk.map,
2833 ql_lbq_block_size(qdev),
2834 PCI_DMA_FROMDEVICE);
2835 lbq_desc->p.pg_chunk.last_flag = 0;
2838 put_page(lbq_desc->p.pg_chunk.page);
2839 lbq_desc->p.pg_chunk.page = NULL;
2841 if (++curr_idx == rx_ring->lbq_len)
2845 if (rx_ring->pg_chunk.page) {
2846 pci_unmap_page(qdev->pdev, rx_ring->pg_chunk.map,
2847 ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE);
2848 put_page(rx_ring->pg_chunk.page);
2849 rx_ring->pg_chunk.page = NULL;
2853 static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2856 struct bq_desc *sbq_desc;
2858 for (i = 0; i < rx_ring->sbq_len; i++) {
2859 sbq_desc = &rx_ring->sbq[i];
2860 if (sbq_desc == NULL) {
2861 netif_err(qdev, ifup, qdev->ndev,
2862 "sbq_desc %d is NULL.\n", i);
2865 if (sbq_desc->p.skb) {
2866 pci_unmap_single(qdev->pdev,
2867 dma_unmap_addr(sbq_desc, mapaddr),
2868 dma_unmap_len(sbq_desc, maplen),
2869 PCI_DMA_FROMDEVICE);
2870 dev_kfree_skb(sbq_desc->p.skb);
2871 sbq_desc->p.skb = NULL;
2876 /* Free all large and small rx buffers associated
2877 * with the completion queues for this device.
2879 static void ql_free_rx_buffers(struct ql_adapter *qdev)
2882 struct rx_ring *rx_ring;
2884 for (i = 0; i < qdev->rx_ring_count; i++) {
2885 rx_ring = &qdev->rx_ring[i];
2887 ql_free_lbq_buffers(qdev, rx_ring);
2889 ql_free_sbq_buffers(qdev, rx_ring);
2893 static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2895 struct rx_ring *rx_ring;
2898 for (i = 0; i < qdev->rx_ring_count; i++) {
2899 rx_ring = &qdev->rx_ring[i];
2900 if (rx_ring->type != TX_Q)
2901 ql_update_buffer_queues(qdev, rx_ring);
2905 static void ql_init_lbq_ring(struct ql_adapter *qdev,
2906 struct rx_ring *rx_ring)
2909 struct bq_desc *lbq_desc;
2910 __le64 *bq = rx_ring->lbq_base;
2912 memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2913 for (i = 0; i < rx_ring->lbq_len; i++) {
2914 lbq_desc = &rx_ring->lbq[i];
2915 memset(lbq_desc, 0, sizeof(*lbq_desc));
2916 lbq_desc->index = i;
2917 lbq_desc->addr = bq;
2922 static void ql_init_sbq_ring(struct ql_adapter *qdev,
2923 struct rx_ring *rx_ring)
2926 struct bq_desc *sbq_desc;
2927 __le64 *bq = rx_ring->sbq_base;
2929 memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
2930 for (i = 0; i < rx_ring->sbq_len; i++) {
2931 sbq_desc = &rx_ring->sbq[i];
2932 memset(sbq_desc, 0, sizeof(*sbq_desc));
2933 sbq_desc->index = i;
2934 sbq_desc->addr = bq;
2939 static void ql_free_rx_resources(struct ql_adapter *qdev,
2940 struct rx_ring *rx_ring)
2942 /* Free the small buffer queue. */
2943 if (rx_ring->sbq_base) {
2944 pci_free_consistent(qdev->pdev,
2946 rx_ring->sbq_base, rx_ring->sbq_base_dma);
2947 rx_ring->sbq_base = NULL;
2950 /* Free the small buffer queue control blocks. */
2951 kfree(rx_ring->sbq);
2952 rx_ring->sbq = NULL;
2954 /* Free the large buffer queue. */
2955 if (rx_ring->lbq_base) {
2956 pci_free_consistent(qdev->pdev,
2958 rx_ring->lbq_base, rx_ring->lbq_base_dma);
2959 rx_ring->lbq_base = NULL;
2962 /* Free the large buffer queue control blocks. */
2963 kfree(rx_ring->lbq);
2964 rx_ring->lbq = NULL;
2966 /* Free the rx queue. */
2967 if (rx_ring->cq_base) {
2968 pci_free_consistent(qdev->pdev,
2970 rx_ring->cq_base, rx_ring->cq_base_dma);
2971 rx_ring->cq_base = NULL;
2975 /* Allocate queues and buffers for this completions queue based
2976 * on the values in the parameter structure. */
2977 static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2978 struct rx_ring *rx_ring)
2982 * Allocate the completion queue for this rx_ring.
2985 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2986 &rx_ring->cq_base_dma);
2988 if (rx_ring->cq_base == NULL) {
2989 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
2993 if (rx_ring->sbq_len) {
2995 * Allocate small buffer queue.
2998 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2999 &rx_ring->sbq_base_dma);
3001 if (rx_ring->sbq_base == NULL) {
3002 netif_err(qdev, ifup, qdev->ndev,
3003 "Small buffer queue allocation failed.\n");
3008 * Allocate small buffer queue control blocks.
3010 rx_ring->sbq = kmalloc_array(rx_ring->sbq_len,
3011 sizeof(struct bq_desc),
3013 if (rx_ring->sbq == NULL)
3016 ql_init_sbq_ring(qdev, rx_ring);
3019 if (rx_ring->lbq_len) {
3021 * Allocate large buffer queue.
3024 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
3025 &rx_ring->lbq_base_dma);
3027 if (rx_ring->lbq_base == NULL) {
3028 netif_err(qdev, ifup, qdev->ndev,
3029 "Large buffer queue allocation failed.\n");
3033 * Allocate large buffer queue control blocks.
3035 rx_ring->lbq = kmalloc_array(rx_ring->lbq_len,
3036 sizeof(struct bq_desc),
3038 if (rx_ring->lbq == NULL)
3041 ql_init_lbq_ring(qdev, rx_ring);
3047 ql_free_rx_resources(qdev, rx_ring);
3051 static void ql_tx_ring_clean(struct ql_adapter *qdev)
3053 struct tx_ring *tx_ring;
3054 struct tx_ring_desc *tx_ring_desc;
3058 * Loop through all queues and free
3061 for (j = 0; j < qdev->tx_ring_count; j++) {
3062 tx_ring = &qdev->tx_ring[j];
3063 for (i = 0; i < tx_ring->wq_len; i++) {
3064 tx_ring_desc = &tx_ring->q[i];
3065 if (tx_ring_desc && tx_ring_desc->skb) {
3066 netif_err(qdev, ifdown, qdev->ndev,
3067 "Freeing lost SKB %p, from queue %d, index %d.\n",
3068 tx_ring_desc->skb, j,
3069 tx_ring_desc->index);
3070 ql_unmap_send(qdev, tx_ring_desc,
3071 tx_ring_desc->map_cnt);
3072 dev_kfree_skb(tx_ring_desc->skb);
3073 tx_ring_desc->skb = NULL;
3079 static void ql_free_mem_resources(struct ql_adapter *qdev)
3083 for (i = 0; i < qdev->tx_ring_count; i++)
3084 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
3085 for (i = 0; i < qdev->rx_ring_count; i++)
3086 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
3087 ql_free_shadow_space(qdev);
3090 static int ql_alloc_mem_resources(struct ql_adapter *qdev)
3094 /* Allocate space for our shadow registers and such. */
3095 if (ql_alloc_shadow_space(qdev))
3098 for (i = 0; i < qdev->rx_ring_count; i++) {
3099 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
3100 netif_err(qdev, ifup, qdev->ndev,
3101 "RX resource allocation failed.\n");
3105 /* Allocate tx queue resources */
3106 for (i = 0; i < qdev->tx_ring_count; i++) {
3107 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
3108 netif_err(qdev, ifup, qdev->ndev,
3109 "TX resource allocation failed.\n");
3116 ql_free_mem_resources(qdev);
3120 /* Set up the rx ring control block and pass it to the chip.
3121 * The control block is defined as
3122 * "Completion Queue Initialization Control Block", or cqicb.
3124 static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
3126 struct cqicb *cqicb = &rx_ring->cqicb;
3127 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
3128 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3129 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
3130 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3131 void __iomem *doorbell_area =
3132 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
3136 __le64 *base_indirect_ptr;
3139 /* Set up the shadow registers for this ring. */
3140 rx_ring->prod_idx_sh_reg = shadow_reg;
3141 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
3142 *rx_ring->prod_idx_sh_reg = 0;
3143 shadow_reg += sizeof(u64);
3144 shadow_reg_dma += sizeof(u64);
3145 rx_ring->lbq_base_indirect = shadow_reg;
3146 rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
3147 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3148 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3149 rx_ring->sbq_base_indirect = shadow_reg;
3150 rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3152 /* PCI doorbell mem area + 0x00 for consumer index register */
3153 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
3154 rx_ring->cnsmr_idx = 0;
3155 rx_ring->curr_entry = rx_ring->cq_base;
3157 /* PCI doorbell mem area + 0x04 for valid register */
3158 rx_ring->valid_db_reg = doorbell_area + 0x04;
3160 /* PCI doorbell mem area + 0x18 for large buffer consumer */
3161 rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
3163 /* PCI doorbell mem area + 0x1c */
3164 rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
3166 memset((void *)cqicb, 0, sizeof(struct cqicb));
3167 cqicb->msix_vect = rx_ring->irq;
3169 bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3170 cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
3172 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
3174 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
3177 * Set up the control block load flags.
3179 cqicb->flags = FLAGS_LC | /* Load queue base address */
3180 FLAGS_LV | /* Load MSI-X vector */
3181 FLAGS_LI; /* Load irq delay values */
3182 if (rx_ring->lbq_len) {
3183 cqicb->flags |= FLAGS_LL; /* Load lbq values */
3184 tmp = (u64)rx_ring->lbq_base_dma;
3185 base_indirect_ptr = rx_ring->lbq_base_indirect;
3188 *base_indirect_ptr = cpu_to_le64(tmp);
3189 tmp += DB_PAGE_SIZE;
3190 base_indirect_ptr++;
3192 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3194 cpu_to_le64(rx_ring->lbq_base_indirect_dma);
3195 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3196 (u16) rx_ring->lbq_buf_size;
3197 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3198 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3199 (u16) rx_ring->lbq_len;
3200 cqicb->lbq_len = cpu_to_le16(bq_len);
3201 rx_ring->lbq_prod_idx = 0;
3202 rx_ring->lbq_curr_idx = 0;
3203 rx_ring->lbq_clean_idx = 0;
3204 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
3206 if (rx_ring->sbq_len) {
3207 cqicb->flags |= FLAGS_LS; /* Load sbq values */
3208 tmp = (u64)rx_ring->sbq_base_dma;
3209 base_indirect_ptr = rx_ring->sbq_base_indirect;
3212 *base_indirect_ptr = cpu_to_le64(tmp);
3213 tmp += DB_PAGE_SIZE;
3214 base_indirect_ptr++;
3216 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
3218 cpu_to_le64(rx_ring->sbq_base_indirect_dma);
3219 cqicb->sbq_buf_size =
3220 cpu_to_le16((u16)(rx_ring->sbq_buf_size));
3221 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3222 (u16) rx_ring->sbq_len;
3223 cqicb->sbq_len = cpu_to_le16(bq_len);
3224 rx_ring->sbq_prod_idx = 0;
3225 rx_ring->sbq_curr_idx = 0;
3226 rx_ring->sbq_clean_idx = 0;
3227 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
3229 switch (rx_ring->type) {
3231 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3232 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3235 /* Inbound completion handling rx_rings run in
3236 * separate NAPI contexts.
3238 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3240 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3241 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3244 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3245 "Invalid rx_ring->type = %d.\n", rx_ring->type);
3247 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3248 CFG_LCQ, rx_ring->cq_id);
3250 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
3256 static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3258 struct wqicb *wqicb = (struct wqicb *)tx_ring;
3259 void __iomem *doorbell_area =
3260 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3261 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3262 (tx_ring->wq_id * sizeof(u64));
3263 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3264 (tx_ring->wq_id * sizeof(u64));
3268 * Assign doorbell registers for this tx_ring.
3270 /* TX PCI doorbell mem area for tx producer index */
3271 tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
3272 tx_ring->prod_idx = 0;
3273 /* TX PCI doorbell mem area + 0x04 */
3274 tx_ring->valid_db_reg = doorbell_area + 0x04;
3277 * Assign shadow registers for this tx_ring.
3279 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3280 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3282 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3283 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3284 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3285 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3287 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
3289 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
3291 ql_init_tx_ring(qdev, tx_ring);
3293 err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
3294 (u16) tx_ring->wq_id);
3296 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
3302 static void ql_disable_msix(struct ql_adapter *qdev)
3304 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3305 pci_disable_msix(qdev->pdev);
3306 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3307 kfree(qdev->msi_x_entry);
3308 qdev->msi_x_entry = NULL;
3309 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3310 pci_disable_msi(qdev->pdev);
3311 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3315 /* We start by trying to get the number of vectors
3316 * stored in qdev->intr_count. If we don't get that
3317 * many then we reduce the count and try again.
3319 static void ql_enable_msix(struct ql_adapter *qdev)
3323 /* Get the MSIX vectors. */
3324 if (qlge_irq_type == MSIX_IRQ) {
3325 /* Try to alloc space for the msix struct,
3326 * if it fails then go to MSI/legacy.
3328 qdev->msi_x_entry = kcalloc(qdev->intr_count,
3329 sizeof(struct msix_entry),
3331 if (!qdev->msi_x_entry) {
3332 qlge_irq_type = MSI_IRQ;
3336 for (i = 0; i < qdev->intr_count; i++)
3337 qdev->msi_x_entry[i].entry = i;
3339 err = pci_enable_msix_range(qdev->pdev, qdev->msi_x_entry,
3340 1, qdev->intr_count);
3342 kfree(qdev->msi_x_entry);
3343 qdev->msi_x_entry = NULL;
3344 netif_warn(qdev, ifup, qdev->ndev,
3345 "MSI-X Enable failed, trying MSI.\n");
3346 qlge_irq_type = MSI_IRQ;
3348 qdev->intr_count = err;
3349 set_bit(QL_MSIX_ENABLED, &qdev->flags);
3350 netif_info(qdev, ifup, qdev->ndev,
3351 "MSI-X Enabled, got %d vectors.\n",
3357 qdev->intr_count = 1;
3358 if (qlge_irq_type == MSI_IRQ) {
3359 if (!pci_enable_msi(qdev->pdev)) {
3360 set_bit(QL_MSI_ENABLED, &qdev->flags);
3361 netif_info(qdev, ifup, qdev->ndev,
3362 "Running with MSI interrupts.\n");
3366 qlge_irq_type = LEG_IRQ;
3367 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3368 "Running with legacy interrupts.\n");
3371 /* Each vector services 1 RSS ring and and 1 or more
3372 * TX completion rings. This function loops through
3373 * the TX completion rings and assigns the vector that
3374 * will service it. An example would be if there are
3375 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3376 * This would mean that vector 0 would service RSS ring 0
3377 * and TX completion rings 0,1,2 and 3. Vector 1 would
3378 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3380 static void ql_set_tx_vect(struct ql_adapter *qdev)
3383 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3385 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3386 /* Assign irq vectors to TX rx_rings.*/
3387 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3388 i < qdev->rx_ring_count; i++) {
3389 if (j == tx_rings_per_vector) {
3393 qdev->rx_ring[i].irq = vect;
3397 /* For single vector all rings have an irq
3400 for (i = 0; i < qdev->rx_ring_count; i++)
3401 qdev->rx_ring[i].irq = 0;
3405 /* Set the interrupt mask for this vector. Each vector
3406 * will service 1 RSS ring and 1 or more TX completion
3407 * rings. This function sets up a bit mask per vector
3408 * that indicates which rings it services.
3410 static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3412 int j, vect = ctx->intr;
3413 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3415 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3416 /* Add the RSS ring serviced by this vector
3419 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3420 /* Add the TX ring(s) serviced by this vector
3422 for (j = 0; j < tx_rings_per_vector; j++) {
3424 (1 << qdev->rx_ring[qdev->rss_ring_count +
3425 (vect * tx_rings_per_vector) + j].cq_id);
3428 /* For single vector we just shift each queue's
3431 for (j = 0; j < qdev->rx_ring_count; j++)
3432 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3437 * Here we build the intr_context structures based on
3438 * our rx_ring count and intr vector count.
3439 * The intr_context structure is used to hook each vector
3440 * to possibly different handlers.
3442 static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3445 struct intr_context *intr_context = &qdev->intr_context[0];
3447 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3448 /* Each rx_ring has it's
3449 * own intr_context since we have separate
3450 * vectors for each queue.
3452 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3453 qdev->rx_ring[i].irq = i;
3454 intr_context->intr = i;
3455 intr_context->qdev = qdev;
3456 /* Set up this vector's bit-mask that indicates
3457 * which queues it services.
3459 ql_set_irq_mask(qdev, intr_context);
3461 * We set up each vectors enable/disable/read bits so
3462 * there's no bit/mask calculations in the critical path.
3464 intr_context->intr_en_mask =
3465 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3466 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3468 intr_context->intr_dis_mask =
3469 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3470 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3472 intr_context->intr_read_mask =
3473 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3474 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3477 /* The first vector/queue handles
3478 * broadcast/multicast, fatal errors,
3479 * and firmware events. This in addition
3480 * to normal inbound NAPI processing.
3482 intr_context->handler = qlge_isr;
3483 sprintf(intr_context->name, "%s-rx-%d",
3484 qdev->ndev->name, i);
3487 * Inbound queues handle unicast frames only.
3489 intr_context->handler = qlge_msix_rx_isr;
3490 sprintf(intr_context->name, "%s-rx-%d",
3491 qdev->ndev->name, i);
3496 * All rx_rings use the same intr_context since
3497 * there is only one vector.
3499 intr_context->intr = 0;
3500 intr_context->qdev = qdev;
3502 * We set up each vectors enable/disable/read bits so
3503 * there's no bit/mask calculations in the critical path.
3505 intr_context->intr_en_mask =
3506 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3507 intr_context->intr_dis_mask =
3508 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3509 INTR_EN_TYPE_DISABLE;
3510 intr_context->intr_read_mask =
3511 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3513 * Single interrupt means one handler for all rings.
3515 intr_context->handler = qlge_isr;
3516 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
3517 /* Set up this vector's bit-mask that indicates
3518 * which queues it services. In this case there is
3519 * a single vector so it will service all RSS and
3520 * TX completion rings.
3522 ql_set_irq_mask(qdev, intr_context);
3524 /* Tell the TX completion rings which MSIx vector
3525 * they will be using.
3527 ql_set_tx_vect(qdev);
3530 static void ql_free_irq(struct ql_adapter *qdev)
3533 struct intr_context *intr_context = &qdev->intr_context[0];
3535 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3536 if (intr_context->hooked) {
3537 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3538 free_irq(qdev->msi_x_entry[i].vector,
3541 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
3545 ql_disable_msix(qdev);
3548 static int ql_request_irq(struct ql_adapter *qdev)
3552 struct pci_dev *pdev = qdev->pdev;
3553 struct intr_context *intr_context = &qdev->intr_context[0];
3555 ql_resolve_queues_to_irqs(qdev);
3557 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3558 atomic_set(&intr_context->irq_cnt, 0);
3559 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3560 status = request_irq(qdev->msi_x_entry[i].vector,
3561 intr_context->handler,
3566 netif_err(qdev, ifup, qdev->ndev,
3567 "Failed request for MSIX interrupt %d.\n",
3572 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3573 "trying msi or legacy interrupts.\n");
3574 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3575 "%s: irq = %d.\n", __func__, pdev->irq);
3576 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3577 "%s: context->name = %s.\n", __func__,
3578 intr_context->name);
3579 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3580 "%s: dev_id = 0x%p.\n", __func__,
3583 request_irq(pdev->irq, qlge_isr,
3584 test_bit(QL_MSI_ENABLED,
3586 flags) ? 0 : IRQF_SHARED,
3587 intr_context->name, &qdev->rx_ring[0]);
3591 netif_err(qdev, ifup, qdev->ndev,
3592 "Hooked intr %d, queue type %s, with name %s.\n",
3594 qdev->rx_ring[0].type == DEFAULT_Q ?
3596 qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
3597 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3598 intr_context->name);
3600 intr_context->hooked = 1;
3604 netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!\n");
3609 static int ql_start_rss(struct ql_adapter *qdev)
3611 static const u8 init_hash_seed[] = {
3612 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3613 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3614 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3615 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3616 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3618 struct ricb *ricb = &qdev->ricb;
3621 u8 *hash_id = (u8 *) ricb->hash_cq_id;
3623 memset((void *)ricb, 0, sizeof(*ricb));
3625 ricb->base_cq = RSS_L4K;
3627 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3628 ricb->mask = cpu_to_le16((u16)(0x3ff));
3631 * Fill out the Indirection Table.
3633 for (i = 0; i < 1024; i++)
3634 hash_id[i] = (i & (qdev->rss_ring_count - 1));
3636 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3637 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
3639 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
3641 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
3647 static int ql_clear_routing_entries(struct ql_adapter *qdev)
3651 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3654 /* Clear all the entries in the routing table. */
3655 for (i = 0; i < 16; i++) {
3656 status = ql_set_routing_reg(qdev, i, 0, 0);
3658 netif_err(qdev, ifup, qdev->ndev,
3659 "Failed to init routing register for CAM packets.\n");
3663 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3667 /* Initialize the frame-to-queue routing. */
3668 static int ql_route_initialize(struct ql_adapter *qdev)
3672 /* Clear all the entries in the routing table. */
3673 status = ql_clear_routing_entries(qdev);
3677 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3681 status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3682 RT_IDX_IP_CSUM_ERR, 1);
3684 netif_err(qdev, ifup, qdev->ndev,
3685 "Failed to init routing register "
3686 "for IP CSUM error packets.\n");
3689 status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3690 RT_IDX_TU_CSUM_ERR, 1);
3692 netif_err(qdev, ifup, qdev->ndev,
3693 "Failed to init routing register "
3694 "for TCP/UDP CSUM error packets.\n");
3697 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3699 netif_err(qdev, ifup, qdev->ndev,
3700 "Failed to init routing register for broadcast packets.\n");
3703 /* If we have more than one inbound queue, then turn on RSS in the
3706 if (qdev->rss_ring_count > 1) {
3707 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3708 RT_IDX_RSS_MATCH, 1);
3710 netif_err(qdev, ifup, qdev->ndev,
3711 "Failed to init routing register for MATCH RSS packets.\n");
3716 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3719 netif_err(qdev, ifup, qdev->ndev,
3720 "Failed to init routing register for CAM packets.\n");
3722 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3726 int ql_cam_route_initialize(struct ql_adapter *qdev)
3730 /* If check if the link is up and use to
3731 * determine if we are setting or clearing
3732 * the MAC address in the CAM.
3734 set = ql_read32(qdev, STS);
3735 set &= qdev->port_link_up;
3736 status = ql_set_mac_addr(qdev, set);
3738 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
3742 status = ql_route_initialize(qdev);
3744 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
3749 static int ql_adapter_initialize(struct ql_adapter *qdev)
3756 * Set up the System register to halt on errors.
3758 value = SYS_EFE | SYS_FAE;
3760 ql_write32(qdev, SYS, mask | value);
3762 /* Set the default queue, and VLAN behavior. */
3763 value = NIC_RCV_CFG_DFQ;
3764 mask = NIC_RCV_CFG_DFQ_MASK;
3765 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
3766 value |= NIC_RCV_CFG_RV;
3767 mask |= (NIC_RCV_CFG_RV << 16);
3769 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3771 /* Set the MPI interrupt to enabled. */
3772 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3774 /* Enable the function, set pagesize, enable error checking. */
3775 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3776 FSC_EC | FSC_VM_PAGE_4K;
3777 value |= SPLT_SETTING;
3779 /* Set/clear header splitting. */
3780 mask = FSC_VM_PAGESIZE_MASK |
3781 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3782 ql_write32(qdev, FSC, mask | value);
3784 ql_write32(qdev, SPLT_HDR, SPLT_LEN);
3786 /* Set RX packet routing to use port/pci function on which the
3787 * packet arrived on in addition to usual frame routing.
3788 * This is helpful on bonding where both interfaces can have
3789 * the same MAC address.
3791 ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
3792 /* Reroute all packets to our Interface.
3793 * They may have been routed to MPI firmware
3796 value = ql_read32(qdev, MGMT_RCV_CFG);
3797 value &= ~MGMT_RCV_CFG_RM;
3800 /* Sticky reg needs clearing due to WOL. */
3801 ql_write32(qdev, MGMT_RCV_CFG, mask);
3802 ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3804 /* Default WOL is enable on Mezz cards */
3805 if (qdev->pdev->subsystem_device == 0x0068 ||
3806 qdev->pdev->subsystem_device == 0x0180)
3807 qdev->wol = WAKE_MAGIC;
3809 /* Start up the rx queues. */
3810 for (i = 0; i < qdev->rx_ring_count; i++) {
3811 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3813 netif_err(qdev, ifup, qdev->ndev,
3814 "Failed to start rx ring[%d].\n", i);
3819 /* If there is more than one inbound completion queue
3820 * then download a RICB to configure RSS.
3822 if (qdev->rss_ring_count > 1) {
3823 status = ql_start_rss(qdev);
3825 netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
3830 /* Start up the tx queues. */
3831 for (i = 0; i < qdev->tx_ring_count; i++) {
3832 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3834 netif_err(qdev, ifup, qdev->ndev,
3835 "Failed to start tx ring[%d].\n", i);
3840 /* Initialize the port and set the max framesize. */
3841 status = qdev->nic_ops->port_initialize(qdev);
3843 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
3845 /* Set up the MAC address and frame routing filter. */
3846 status = ql_cam_route_initialize(qdev);
3848 netif_err(qdev, ifup, qdev->ndev,
3849 "Failed to init CAM/Routing tables.\n");
3853 /* Start NAPI for the RSS queues. */
3854 for (i = 0; i < qdev->rss_ring_count; i++)
3855 napi_enable(&qdev->rx_ring[i].napi);
3860 /* Issue soft reset to chip. */
3861 static int ql_adapter_reset(struct ql_adapter *qdev)
3865 unsigned long end_jiffies;
3867 /* Clear all the entries in the routing table. */
3868 status = ql_clear_routing_entries(qdev);
3870 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
3874 /* Check if bit is set then skip the mailbox command and
3875 * clear the bit, else we are in normal reset process.
3877 if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
3878 /* Stop management traffic. */
3879 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3881 /* Wait for the NIC and MGMNT FIFOs to empty. */
3882 ql_wait_fifo_empty(qdev);
3884 clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
3886 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3888 end_jiffies = jiffies + usecs_to_jiffies(30);
3890 value = ql_read32(qdev, RST_FO);
3891 if ((value & RST_FO_FR) == 0)
3894 } while (time_before(jiffies, end_jiffies));
3896 if (value & RST_FO_FR) {
3897 netif_err(qdev, ifdown, qdev->ndev,
3898 "ETIMEDOUT!!! errored out of resetting the chip!\n");
3899 status = -ETIMEDOUT;
3902 /* Resume management traffic. */
3903 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
3907 static void ql_display_dev_info(struct net_device *ndev)
3909 struct ql_adapter *qdev = netdev_priv(ndev);
3911 netif_info(qdev, probe, qdev->ndev,
3912 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3913 "XG Roll = %d, XG Rev = %d.\n",
3916 qdev->chip_rev_id & 0x0000000f,
3917 qdev->chip_rev_id >> 4 & 0x0000000f,
3918 qdev->chip_rev_id >> 8 & 0x0000000f,
3919 qdev->chip_rev_id >> 12 & 0x0000000f);
3920 netif_info(qdev, probe, qdev->ndev,
3921 "MAC address %pM\n", ndev->dev_addr);
3924 static int ql_wol(struct ql_adapter *qdev)
3927 u32 wol = MB_WOL_DISABLE;
3929 /* The CAM is still intact after a reset, but if we
3930 * are doing WOL, then we may need to program the
3931 * routing regs. We would also need to issue the mailbox
3932 * commands to instruct the MPI what to do per the ethtool
3936 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3937 WAKE_MCAST | WAKE_BCAST)) {
3938 netif_err(qdev, ifdown, qdev->ndev,
3939 "Unsupported WOL parameter. qdev->wol = 0x%x.\n",
3944 if (qdev->wol & WAKE_MAGIC) {
3945 status = ql_mb_wol_set_magic(qdev, 1);
3947 netif_err(qdev, ifdown, qdev->ndev,
3948 "Failed to set magic packet on %s.\n",
3952 netif_info(qdev, drv, qdev->ndev,
3953 "Enabled magic packet successfully on %s.\n",
3956 wol |= MB_WOL_MAGIC_PKT;
3960 wol |= MB_WOL_MODE_ON;
3961 status = ql_mb_wol_mode(qdev, wol);
3962 netif_err(qdev, drv, qdev->ndev,
3963 "WOL %s (wol code 0x%x) on %s\n",
3964 (status == 0) ? "Successfully set" : "Failed",
3965 wol, qdev->ndev->name);
3971 static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
3974 /* Don't kill the reset worker thread if we
3975 * are in the process of recovery.
3977 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3978 cancel_delayed_work_sync(&qdev->asic_reset_work);
3979 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3980 cancel_delayed_work_sync(&qdev->mpi_work);
3981 cancel_delayed_work_sync(&qdev->mpi_idc_work);
3982 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
3983 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
3986 static int ql_adapter_down(struct ql_adapter *qdev)
3992 ql_cancel_all_work_sync(qdev);
3994 for (i = 0; i < qdev->rss_ring_count; i++)
3995 napi_disable(&qdev->rx_ring[i].napi);
3997 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3999 ql_disable_interrupts(qdev);
4001 ql_tx_ring_clean(qdev);
4003 /* Call netif_napi_del() from common point.
4005 for (i = 0; i < qdev->rss_ring_count; i++)
4006 netif_napi_del(&qdev->rx_ring[i].napi);
4008 status = ql_adapter_reset(qdev);
4010 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
4012 ql_free_rx_buffers(qdev);
4017 static int ql_adapter_up(struct ql_adapter *qdev)
4021 err = ql_adapter_initialize(qdev);
4023 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
4026 set_bit(QL_ADAPTER_UP, &qdev->flags);
4027 ql_alloc_rx_buffers(qdev);
4028 /* If the port is initialized and the
4029 * link is up the turn on the carrier.
4031 if ((ql_read32(qdev, STS) & qdev->port_init) &&
4032 (ql_read32(qdev, STS) & qdev->port_link_up))
4034 /* Restore rx mode. */
4035 clear_bit(QL_ALLMULTI, &qdev->flags);
4036 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4037 qlge_set_multicast_list(qdev->ndev);
4039 /* Restore vlan setting. */
4040 qlge_restore_vlan(qdev);
4042 ql_enable_interrupts(qdev);
4043 ql_enable_all_completion_interrupts(qdev);
4044 netif_tx_start_all_queues(qdev->ndev);
4048 ql_adapter_reset(qdev);
4052 static void ql_release_adapter_resources(struct ql_adapter *qdev)
4054 ql_free_mem_resources(qdev);
4058 static int ql_get_adapter_resources(struct ql_adapter *qdev)
4062 if (ql_alloc_mem_resources(qdev)) {
4063 netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
4066 status = ql_request_irq(qdev);
4070 static int qlge_close(struct net_device *ndev)
4072 struct ql_adapter *qdev = netdev_priv(ndev);
4074 /* If we hit pci_channel_io_perm_failure
4075 * failure condition, then we already
4076 * brought the adapter down.
4078 if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
4079 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
4080 clear_bit(QL_EEH_FATAL, &qdev->flags);
4085 * Wait for device to recover from a reset.
4086 * (Rarely happens, but possible.)
4088 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
4090 ql_adapter_down(qdev);
4091 ql_release_adapter_resources(qdev);
4095 static int ql_configure_rings(struct ql_adapter *qdev)
4098 struct rx_ring *rx_ring;
4099 struct tx_ring *tx_ring;
4100 int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
4101 unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4102 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4104 qdev->lbq_buf_order = get_order(lbq_buf_len);
4106 /* In a perfect world we have one RSS ring for each CPU
4107 * and each has it's own vector. To do that we ask for
4108 * cpu_cnt vectors. ql_enable_msix() will adjust the
4109 * vector count to what we actually get. We then
4110 * allocate an RSS ring for each.
4111 * Essentially, we are doing min(cpu_count, msix_vector_count).
4113 qdev->intr_count = cpu_cnt;
4114 ql_enable_msix(qdev);
4115 /* Adjust the RSS ring count to the actual vector count. */
4116 qdev->rss_ring_count = qdev->intr_count;
4117 qdev->tx_ring_count = cpu_cnt;
4118 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
4120 for (i = 0; i < qdev->tx_ring_count; i++) {
4121 tx_ring = &qdev->tx_ring[i];
4122 memset((void *)tx_ring, 0, sizeof(*tx_ring));
4123 tx_ring->qdev = qdev;
4125 tx_ring->wq_len = qdev->tx_ring_size;
4127 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
4130 * The completion queue ID for the tx rings start
4131 * immediately after the rss rings.
4133 tx_ring->cq_id = qdev->rss_ring_count + i;
4136 for (i = 0; i < qdev->rx_ring_count; i++) {
4137 rx_ring = &qdev->rx_ring[i];
4138 memset((void *)rx_ring, 0, sizeof(*rx_ring));
4139 rx_ring->qdev = qdev;
4141 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
4142 if (i < qdev->rss_ring_count) {
4144 * Inbound (RSS) queues.
4146 rx_ring->cq_len = qdev->rx_ring_size;
4148 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4149 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
4151 rx_ring->lbq_len * sizeof(__le64);
4152 rx_ring->lbq_buf_size = (u16)lbq_buf_len;
4153 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
4155 rx_ring->sbq_len * sizeof(__le64);
4156 rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
4157 rx_ring->type = RX_Q;
4160 * Outbound queue handles outbound completions only.
4162 /* outbound cq is same size as tx_ring it services. */
4163 rx_ring->cq_len = qdev->tx_ring_size;
4165 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4166 rx_ring->lbq_len = 0;
4167 rx_ring->lbq_size = 0;
4168 rx_ring->lbq_buf_size = 0;
4169 rx_ring->sbq_len = 0;
4170 rx_ring->sbq_size = 0;
4171 rx_ring->sbq_buf_size = 0;
4172 rx_ring->type = TX_Q;
4178 static int qlge_open(struct net_device *ndev)
4181 struct ql_adapter *qdev = netdev_priv(ndev);
4183 err = ql_adapter_reset(qdev);
4187 err = ql_configure_rings(qdev);
4191 err = ql_get_adapter_resources(qdev);
4195 err = ql_adapter_up(qdev);
4202 ql_release_adapter_resources(qdev);
4206 static int ql_change_rx_buffers(struct ql_adapter *qdev)
4208 struct rx_ring *rx_ring;
4212 /* Wait for an outstanding reset to complete. */
4213 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4215 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4216 netif_err(qdev, ifup, qdev->ndev,
4217 "Waiting for adapter UP...\n");
4222 netif_err(qdev, ifup, qdev->ndev,
4223 "Timed out waiting for adapter UP\n");
4228 status = ql_adapter_down(qdev);
4232 /* Get the new rx buffer size. */
4233 lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4234 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4235 qdev->lbq_buf_order = get_order(lbq_buf_len);
4237 for (i = 0; i < qdev->rss_ring_count; i++) {
4238 rx_ring = &qdev->rx_ring[i];
4239 /* Set the new size. */
4240 rx_ring->lbq_buf_size = lbq_buf_len;
4243 status = ql_adapter_up(qdev);
4249 netif_alert(qdev, ifup, qdev->ndev,
4250 "Driver up/down cycle failed, closing device.\n");
4251 set_bit(QL_ADAPTER_UP, &qdev->flags);
4252 dev_close(qdev->ndev);
4256 static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4258 struct ql_adapter *qdev = netdev_priv(ndev);
4261 if (ndev->mtu == 1500 && new_mtu == 9000) {
4262 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
4263 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
4264 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
4268 queue_delayed_work(qdev->workqueue,
4269 &qdev->mpi_port_cfg_work, 3*HZ);
4271 ndev->mtu = new_mtu;
4273 if (!netif_running(qdev->ndev)) {
4277 status = ql_change_rx_buffers(qdev);
4279 netif_err(qdev, ifup, qdev->ndev,
4280 "Changing MTU failed.\n");
4286 static struct net_device_stats *qlge_get_stats(struct net_device
4289 struct ql_adapter *qdev = netdev_priv(ndev);
4290 struct rx_ring *rx_ring = &qdev->rx_ring[0];
4291 struct tx_ring *tx_ring = &qdev->tx_ring[0];
4292 unsigned long pkts, mcast, dropped, errors, bytes;
4296 pkts = mcast = dropped = errors = bytes = 0;
4297 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4298 pkts += rx_ring->rx_packets;
4299 bytes += rx_ring->rx_bytes;
4300 dropped += rx_ring->rx_dropped;
4301 errors += rx_ring->rx_errors;
4302 mcast += rx_ring->rx_multicast;
4304 ndev->stats.rx_packets = pkts;
4305 ndev->stats.rx_bytes = bytes;
4306 ndev->stats.rx_dropped = dropped;
4307 ndev->stats.rx_errors = errors;
4308 ndev->stats.multicast = mcast;
4311 pkts = errors = bytes = 0;
4312 for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4313 pkts += tx_ring->tx_packets;
4314 bytes += tx_ring->tx_bytes;
4315 errors += tx_ring->tx_errors;
4317 ndev->stats.tx_packets = pkts;
4318 ndev->stats.tx_bytes = bytes;
4319 ndev->stats.tx_errors = errors;
4320 return &ndev->stats;
4323 static void qlge_set_multicast_list(struct net_device *ndev)
4325 struct ql_adapter *qdev = netdev_priv(ndev);
4326 struct netdev_hw_addr *ha;
4329 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4333 * Set or clear promiscuous mode if a
4334 * transition is taking place.
4336 if (ndev->flags & IFF_PROMISC) {
4337 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4338 if (ql_set_routing_reg
4339 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
4340 netif_err(qdev, hw, qdev->ndev,
4341 "Failed to set promiscuous mode.\n");
4343 set_bit(QL_PROMISCUOUS, &qdev->flags);
4347 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4348 if (ql_set_routing_reg
4349 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
4350 netif_err(qdev, hw, qdev->ndev,
4351 "Failed to clear promiscuous mode.\n");
4353 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4359 * Set or clear all multicast mode if a
4360 * transition is taking place.
4362 if ((ndev->flags & IFF_ALLMULTI) ||
4363 (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
4364 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4365 if (ql_set_routing_reg
4366 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
4367 netif_err(qdev, hw, qdev->ndev,
4368 "Failed to set all-multi mode.\n");
4370 set_bit(QL_ALLMULTI, &qdev->flags);
4374 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4375 if (ql_set_routing_reg
4376 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
4377 netif_err(qdev, hw, qdev->ndev,
4378 "Failed to clear all-multi mode.\n");
4380 clear_bit(QL_ALLMULTI, &qdev->flags);
4385 if (!netdev_mc_empty(ndev)) {
4386 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4390 netdev_for_each_mc_addr(ha, ndev) {
4391 if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
4392 MAC_ADDR_TYPE_MULTI_MAC, i)) {
4393 netif_err(qdev, hw, qdev->ndev,
4394 "Failed to loadmulticast address.\n");
4395 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4400 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4401 if (ql_set_routing_reg
4402 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
4403 netif_err(qdev, hw, qdev->ndev,
4404 "Failed to set multicast match mode.\n");
4406 set_bit(QL_ALLMULTI, &qdev->flags);
4410 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
4413 static int qlge_set_mac_address(struct net_device *ndev, void *p)
4415 struct ql_adapter *qdev = netdev_priv(ndev);
4416 struct sockaddr *addr = p;
4419 if (!is_valid_ether_addr(addr->sa_data))
4420 return -EADDRNOTAVAIL;
4421 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
4422 /* Update local copy of current mac address. */
4423 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4425 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4428 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4429 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
4431 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
4432 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4436 static void qlge_tx_timeout(struct net_device *ndev)
4438 struct ql_adapter *qdev = netdev_priv(ndev);
4439 ql_queue_asic_error(qdev);
4442 static void ql_asic_reset_work(struct work_struct *work)
4444 struct ql_adapter *qdev =
4445 container_of(work, struct ql_adapter, asic_reset_work.work);
4448 status = ql_adapter_down(qdev);
4452 status = ql_adapter_up(qdev);
4456 /* Restore rx mode. */
4457 clear_bit(QL_ALLMULTI, &qdev->flags);
4458 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4459 qlge_set_multicast_list(qdev->ndev);
4464 netif_alert(qdev, ifup, qdev->ndev,
4465 "Driver up/down cycle failed, closing device\n");
4467 set_bit(QL_ADAPTER_UP, &qdev->flags);
4468 dev_close(qdev->ndev);
4472 static const struct nic_operations qla8012_nic_ops = {
4473 .get_flash = ql_get_8012_flash_params,
4474 .port_initialize = ql_8012_port_initialize,
4477 static const struct nic_operations qla8000_nic_ops = {
4478 .get_flash = ql_get_8000_flash_params,
4479 .port_initialize = ql_8000_port_initialize,
4482 /* Find the pcie function number for the other NIC
4483 * on this chip. Since both NIC functions share a
4484 * common firmware we have the lowest enabled function
4485 * do any common work. Examples would be resetting
4486 * after a fatal firmware error, or doing a firmware
4489 static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
4493 u32 nic_func1, nic_func2;
4495 status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4500 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4501 MPI_TEST_NIC_FUNC_MASK);
4502 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4503 MPI_TEST_NIC_FUNC_MASK);
4505 if (qdev->func == nic_func1)
4506 qdev->alt_func = nic_func2;
4507 else if (qdev->func == nic_func2)
4508 qdev->alt_func = nic_func1;
4515 static int ql_get_board_info(struct ql_adapter *qdev)
4519 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
4523 status = ql_get_alt_pcie_func(qdev);
4527 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4529 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4530 qdev->port_link_up = STS_PL1;
4531 qdev->port_init = STS_PI1;
4532 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4533 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4535 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4536 qdev->port_link_up = STS_PL0;
4537 qdev->port_init = STS_PI0;
4538 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4539 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4541 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
4542 qdev->device_id = qdev->pdev->device;
4543 if (qdev->device_id == QLGE_DEVICE_ID_8012)
4544 qdev->nic_ops = &qla8012_nic_ops;
4545 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4546 qdev->nic_ops = &qla8000_nic_ops;
4550 static void ql_release_all(struct pci_dev *pdev)
4552 struct net_device *ndev = pci_get_drvdata(pdev);
4553 struct ql_adapter *qdev = netdev_priv(ndev);
4555 if (qdev->workqueue) {
4556 destroy_workqueue(qdev->workqueue);
4557 qdev->workqueue = NULL;
4561 iounmap(qdev->reg_base);
4562 if (qdev->doorbell_area)
4563 iounmap(qdev->doorbell_area);
4564 vfree(qdev->mpi_coredump);
4565 pci_release_regions(pdev);
4568 static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
4571 struct ql_adapter *qdev = netdev_priv(ndev);
4574 memset((void *)qdev, 0, sizeof(*qdev));
4575 err = pci_enable_device(pdev);
4577 dev_err(&pdev->dev, "PCI device enable failed.\n");
4583 pci_set_drvdata(pdev, ndev);
4585 /* Set PCIe read request size */
4586 err = pcie_set_readrq(pdev, 4096);
4588 dev_err(&pdev->dev, "Set readrq failed.\n");
4592 err = pci_request_regions(pdev, DRV_NAME);
4594 dev_err(&pdev->dev, "PCI region request failed.\n");
4598 pci_set_master(pdev);
4599 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4600 set_bit(QL_DMA64, &qdev->flags);
4601 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4603 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4605 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
4609 dev_err(&pdev->dev, "No usable DMA configuration.\n");
4613 /* Set PCIe reset type for EEH to fundamental. */
4614 pdev->needs_freset = 1;
4615 pci_save_state(pdev);
4617 ioremap_nocache(pci_resource_start(pdev, 1),
4618 pci_resource_len(pdev, 1));
4619 if (!qdev->reg_base) {
4620 dev_err(&pdev->dev, "Register mapping failed.\n");
4625 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4626 qdev->doorbell_area =
4627 ioremap_nocache(pci_resource_start(pdev, 3),
4628 pci_resource_len(pdev, 3));
4629 if (!qdev->doorbell_area) {
4630 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4635 err = ql_get_board_info(qdev);
4637 dev_err(&pdev->dev, "Register access failed.\n");
4641 qdev->msg_enable = netif_msg_init(debug, default_msg);
4642 spin_lock_init(&qdev->hw_lock);
4643 spin_lock_init(&qdev->stats_lock);
4645 if (qlge_mpi_coredump) {
4646 qdev->mpi_coredump =
4647 vmalloc(sizeof(struct ql_mpi_coredump));
4648 if (qdev->mpi_coredump == NULL) {
4652 if (qlge_force_coredump)
4653 set_bit(QL_FRC_COREDUMP, &qdev->flags);
4655 /* make sure the EEPROM is good */
4656 err = qdev->nic_ops->get_flash(qdev);
4658 dev_err(&pdev->dev, "Invalid FLASH.\n");
4662 /* Keep local copy of current mac address. */
4663 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4665 /* Set up the default ring sizes. */
4666 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4667 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4669 /* Set up the coalescing parameters. */
4670 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4671 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4672 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4673 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4676 * Set up the operating parameters.
4678 qdev->workqueue = create_singlethread_workqueue(ndev->name);
4679 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4680 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4681 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
4682 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
4683 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
4684 INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
4685 init_completion(&qdev->ide_completion);
4686 mutex_init(&qdev->mpi_mutex);
4689 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4690 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4691 DRV_NAME, DRV_VERSION);
4695 ql_release_all(pdev);
4697 pci_disable_device(pdev);
4701 static const struct net_device_ops qlge_netdev_ops = {
4702 .ndo_open = qlge_open,
4703 .ndo_stop = qlge_close,
4704 .ndo_start_xmit = qlge_send,
4705 .ndo_change_mtu = qlge_change_mtu,
4706 .ndo_get_stats = qlge_get_stats,
4707 .ndo_set_rx_mode = qlge_set_multicast_list,
4708 .ndo_set_mac_address = qlge_set_mac_address,
4709 .ndo_validate_addr = eth_validate_addr,
4710 .ndo_tx_timeout = qlge_tx_timeout,
4711 .ndo_fix_features = qlge_fix_features,
4712 .ndo_set_features = qlge_set_features,
4713 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
4714 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
4717 static void ql_timer(unsigned long data)
4719 struct ql_adapter *qdev = (struct ql_adapter *)data;
4722 var = ql_read32(qdev, STS);
4723 if (pci_channel_offline(qdev->pdev)) {
4724 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
4728 mod_timer(&qdev->timer, jiffies + (5*HZ));
4731 static int qlge_probe(struct pci_dev *pdev,
4732 const struct pci_device_id *pci_entry)
4734 struct net_device *ndev = NULL;
4735 struct ql_adapter *qdev = NULL;
4736 static int cards_found = 0;
4739 ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4740 min(MAX_CPUS, netif_get_num_default_rss_queues()));
4744 err = ql_init_device(pdev, ndev, cards_found);
4750 qdev = netdev_priv(ndev);
4751 SET_NETDEV_DEV(ndev, &pdev->dev);
4752 ndev->hw_features = NETIF_F_SG |
4756 NETIF_F_HW_VLAN_CTAG_TX |
4757 NETIF_F_HW_VLAN_CTAG_RX |
4758 NETIF_F_HW_VLAN_CTAG_FILTER |
4760 ndev->features = ndev->hw_features;
4761 ndev->vlan_features = ndev->hw_features;
4762 /* vlan gets same features (except vlan filter) */
4763 ndev->vlan_features &= ~(NETIF_F_HW_VLAN_CTAG_FILTER |
4764 NETIF_F_HW_VLAN_CTAG_TX |
4765 NETIF_F_HW_VLAN_CTAG_RX);
4767 if (test_bit(QL_DMA64, &qdev->flags))
4768 ndev->features |= NETIF_F_HIGHDMA;
4771 * Set up net_device structure.
4773 ndev->tx_queue_len = qdev->tx_ring_size;
4774 ndev->irq = pdev->irq;
4776 ndev->netdev_ops = &qlge_netdev_ops;
4777 ndev->ethtool_ops = &qlge_ethtool_ops;
4778 ndev->watchdog_timeo = 10 * HZ;
4780 err = register_netdev(ndev);
4782 dev_err(&pdev->dev, "net device registration failed.\n");
4783 ql_release_all(pdev);
4784 pci_disable_device(pdev);
4788 /* Start up the timer to trigger EEH if
4791 init_timer_deferrable(&qdev->timer);
4792 qdev->timer.data = (unsigned long)qdev;
4793 qdev->timer.function = ql_timer;
4794 qdev->timer.expires = jiffies + (5*HZ);
4795 add_timer(&qdev->timer);
4797 ql_display_dev_info(ndev);
4798 atomic_set(&qdev->lb_count, 0);
4803 netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4805 return qlge_send(skb, ndev);
4808 int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4810 return ql_clean_inbound_rx_ring(rx_ring, budget);
4813 static void qlge_remove(struct pci_dev *pdev)
4815 struct net_device *ndev = pci_get_drvdata(pdev);
4816 struct ql_adapter *qdev = netdev_priv(ndev);
4817 del_timer_sync(&qdev->timer);
4818 ql_cancel_all_work_sync(qdev);
4819 unregister_netdev(ndev);
4820 ql_release_all(pdev);
4821 pci_disable_device(pdev);
4825 /* Clean up resources without touching hardware. */
4826 static void ql_eeh_close(struct net_device *ndev)
4829 struct ql_adapter *qdev = netdev_priv(ndev);
4831 if (netif_carrier_ok(ndev)) {
4832 netif_carrier_off(ndev);
4833 netif_stop_queue(ndev);
4836 /* Disabling the timer */
4837 del_timer_sync(&qdev->timer);
4838 ql_cancel_all_work_sync(qdev);
4840 for (i = 0; i < qdev->rss_ring_count; i++)
4841 netif_napi_del(&qdev->rx_ring[i].napi);
4843 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4844 ql_tx_ring_clean(qdev);
4845 ql_free_rx_buffers(qdev);
4846 ql_release_adapter_resources(qdev);
4850 * This callback is called by the PCI subsystem whenever
4851 * a PCI bus error is detected.
4853 static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4854 enum pci_channel_state state)
4856 struct net_device *ndev = pci_get_drvdata(pdev);
4857 struct ql_adapter *qdev = netdev_priv(ndev);
4860 case pci_channel_io_normal:
4861 return PCI_ERS_RESULT_CAN_RECOVER;
4862 case pci_channel_io_frozen:
4863 netif_device_detach(ndev);
4864 if (netif_running(ndev))
4866 pci_disable_device(pdev);
4867 return PCI_ERS_RESULT_NEED_RESET;
4868 case pci_channel_io_perm_failure:
4870 "%s: pci_channel_io_perm_failure.\n", __func__);
4872 set_bit(QL_EEH_FATAL, &qdev->flags);
4873 return PCI_ERS_RESULT_DISCONNECT;
4876 /* Request a slot reset. */
4877 return PCI_ERS_RESULT_NEED_RESET;
4881 * This callback is called after the PCI buss has been reset.
4882 * Basically, this tries to restart the card from scratch.
4883 * This is a shortened version of the device probe/discovery code,
4884 * it resembles the first-half of the () routine.
4886 static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4888 struct net_device *ndev = pci_get_drvdata(pdev);
4889 struct ql_adapter *qdev = netdev_priv(ndev);
4891 pdev->error_state = pci_channel_io_normal;
4893 pci_restore_state(pdev);
4894 if (pci_enable_device(pdev)) {
4895 netif_err(qdev, ifup, qdev->ndev,
4896 "Cannot re-enable PCI device after reset.\n");
4897 return PCI_ERS_RESULT_DISCONNECT;
4899 pci_set_master(pdev);
4901 if (ql_adapter_reset(qdev)) {
4902 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
4903 set_bit(QL_EEH_FATAL, &qdev->flags);
4904 return PCI_ERS_RESULT_DISCONNECT;
4907 return PCI_ERS_RESULT_RECOVERED;
4910 static void qlge_io_resume(struct pci_dev *pdev)
4912 struct net_device *ndev = pci_get_drvdata(pdev);
4913 struct ql_adapter *qdev = netdev_priv(ndev);
4916 if (netif_running(ndev)) {
4917 err = qlge_open(ndev);
4919 netif_err(qdev, ifup, qdev->ndev,
4920 "Device initialization failed after reset.\n");
4924 netif_err(qdev, ifup, qdev->ndev,
4925 "Device was not running prior to EEH.\n");
4927 mod_timer(&qdev->timer, jiffies + (5*HZ));
4928 netif_device_attach(ndev);
4931 static const struct pci_error_handlers qlge_err_handler = {
4932 .error_detected = qlge_io_error_detected,
4933 .slot_reset = qlge_io_slot_reset,
4934 .resume = qlge_io_resume,
4937 static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4939 struct net_device *ndev = pci_get_drvdata(pdev);
4940 struct ql_adapter *qdev = netdev_priv(ndev);
4943 netif_device_detach(ndev);
4944 del_timer_sync(&qdev->timer);
4946 if (netif_running(ndev)) {
4947 err = ql_adapter_down(qdev);
4953 err = pci_save_state(pdev);
4957 pci_disable_device(pdev);
4959 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4965 static int qlge_resume(struct pci_dev *pdev)
4967 struct net_device *ndev = pci_get_drvdata(pdev);
4968 struct ql_adapter *qdev = netdev_priv(ndev);
4971 pci_set_power_state(pdev, PCI_D0);
4972 pci_restore_state(pdev);
4973 err = pci_enable_device(pdev);
4975 netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
4978 pci_set_master(pdev);
4980 pci_enable_wake(pdev, PCI_D3hot, 0);
4981 pci_enable_wake(pdev, PCI_D3cold, 0);
4983 if (netif_running(ndev)) {
4984 err = ql_adapter_up(qdev);
4989 mod_timer(&qdev->timer, jiffies + (5*HZ));
4990 netif_device_attach(ndev);
4994 #endif /* CONFIG_PM */
4996 static void qlge_shutdown(struct pci_dev *pdev)
4998 qlge_suspend(pdev, PMSG_SUSPEND);
5001 static struct pci_driver qlge_driver = {
5003 .id_table = qlge_pci_tbl,
5004 .probe = qlge_probe,
5005 .remove = qlge_remove,
5007 .suspend = qlge_suspend,
5008 .resume = qlge_resume,
5010 .shutdown = qlge_shutdown,
5011 .err_handler = &qlge_err_handler
5014 module_pci_driver(qlge_driver);