1 /* bnx2x_cmn.h: Broadcom Everest network driver.
3 * Copyright (c) 2007-2011 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
20 #include <linux/types.h>
21 #include <linux/netdevice.h>
26 extern int num_queues;
28 /************************ Macros ********************************/
29 #define BNX2X_PCI_FREE(x, y, size) \
32 dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \
38 #define BNX2X_FREE(x) \
46 #define BNX2X_PCI_ALLOC(x, y, size) \
48 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
51 memset((void *)x, 0, size); \
54 #define BNX2X_ALLOC(x, size) \
56 x = kzalloc(size, GFP_KERNEL); \
61 /*********************** Interfaces ****************************
62 * Functions that need to be implemented by each driver version
66 * bnx2x_initial_phy_init - initialize link parameters structure variables.
69 * @load_mode: current mode
71 u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode);
74 * bnx2x_link_set - configure hw according to link parameters structure.
78 void bnx2x_link_set(struct bnx2x *bp);
81 * bnx2x_link_test - query link status.
86 * Returns 0 if link is UP.
88 u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes);
91 * bnx2x__link_status_update - handles link status change.
95 void bnx2x__link_status_update(struct bnx2x *bp);
98 * bnx2x_link_report - report link status to upper layer.
102 void bnx2x_link_report(struct bnx2x *bp);
104 /* None-atomic version of bnx2x_link_report() */
105 void __bnx2x_link_report(struct bnx2x *bp);
108 * bnx2x_get_mf_speed - calculate MF speed.
112 * Takes into account current linespeed and MF configuration.
114 u16 bnx2x_get_mf_speed(struct bnx2x *bp);
117 * bnx2x_msix_sp_int - MSI-X slowpath interrupt handler
120 * @dev_instance: private instance
122 irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance);
125 * bnx2x_interrupt - non MSI-X interrupt handler
128 * @dev_instance: private instance
130 irqreturn_t bnx2x_interrupt(int irq, void *dev_instance);
134 * bnx2x_cnic_notify - send command to cnic driver
139 int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
142 * bnx2x_setup_cnic_irq_info - provides cnic with IRQ information
146 void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
150 * bnx2x_int_enable - enable HW interrupts.
154 void bnx2x_int_enable(struct bnx2x *bp);
157 * bnx2x_int_disable_sync - disable interrupts.
160 * @disable_hw: true, disable HW interrupts.
162 * This function ensures that there are no
163 * ISRs or SP DPCs (sp_task) are running after it returns.
165 void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw);
168 * bnx2x_init_firmware - loads device firmware
172 int bnx2x_init_firmware(struct bnx2x *bp);
175 * bnx2x_init_hw - init HW blocks according to current initialization stage.
178 * @load_code: COMMON, PORT or FUNCTION
180 int bnx2x_init_hw(struct bnx2x *bp, u32 load_code);
183 * bnx2x_nic_init - init driver internals.
186 * @load_code: COMMON, PORT or FUNCTION
193 void bnx2x_nic_init(struct bnx2x *bp, u32 load_code);
196 * bnx2x_alloc_mem - allocate driver's memory.
200 int bnx2x_alloc_mem(struct bnx2x *bp);
203 * bnx2x_free_mem - release driver's memory.
207 void bnx2x_free_mem(struct bnx2x *bp);
210 * bnx2x_setup_client - setup eth client.
213 * @fp: pointer to fastpath structure
214 * @is_leading: boolean
216 int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
220 * bnx2x_set_num_queues - set number of queues according to mode.
224 void bnx2x_set_num_queues(struct bnx2x *bp);
227 * bnx2x_chip_cleanup - cleanup chip internals.
230 * @unload_mode: COMMON, PORT, FUNCTION
232 * - Cleanup MAC configuration.
236 void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode);
239 * bnx2x_acquire_hw_lock - acquire HW lock.
242 * @resource: resource bit which was locked
244 int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource);
247 * bnx2x_release_hw_lock - release HW lock.
250 * @resource: resource bit which was locked
252 int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource);
255 * bnx2x_set_eth_mac - configure eth MAC address in the HW
260 * Configures according to the value in netdev->dev_addr.
262 void bnx2x_set_eth_mac(struct bnx2x *bp, int set);
266 * bnx2x_set_fip_eth_mac_addr - Set/Clear FIP MAC(s)
269 * @set: set or clear the CAM entry
271 * Used next enties in the CAM after the ETH MAC(s).
272 * This function will wait until the ramdord completion returns.
273 * Return 0 if cussess, -ENODEV if ramrod doesn't return.
275 int bnx2x_set_fip_eth_mac_addr(struct bnx2x *bp, int set);
278 * bnx2x_set_all_enode_macs - Set/Clear ALL_ENODE mcast MAC.
283 int bnx2x_set_all_enode_macs(struct bnx2x *bp, int set);
287 * bnx2x_set_rx_mode - set MAC filtering configurations.
291 * called with netif_tx_lock from dev_mcast.c
293 void bnx2x_set_rx_mode(struct net_device *dev);
296 * bnx2x_set_storm_rx_mode - configure MAC filtering rules in a FW.
300 void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
302 /* Parity errors related */
303 void bnx2x_inc_load_cnt(struct bnx2x *bp);
304 u32 bnx2x_dec_load_cnt(struct bnx2x *bp);
305 bool bnx2x_chk_parity_attn(struct bnx2x *bp);
306 bool bnx2x_reset_is_done(struct bnx2x *bp);
307 void bnx2x_disable_close_the_gate(struct bnx2x *bp);
310 * bnx2x_stats_handle - perform statistics handling according to event.
313 * @event: bnx2x_stats_event
315 void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
318 * bnx2x_sp_event - handle ramrods completion.
320 * @fp: fastpath handle for the event
321 * @rr_cqe: eth_rx_cqe
323 void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe);
326 * bnx2x_func_start - init function
330 * Must be called before sending CLIENT_SETUP for the first client.
332 int bnx2x_func_start(struct bnx2x *bp);
335 * bnx2x_ilt_set_info - prepare ILT configurations.
339 void bnx2x_ilt_set_info(struct bnx2x *bp);
342 * bnx2x_dcbx_init - initialize dcbx protocol.
346 void bnx2x_dcbx_init(struct bnx2x *bp);
349 * bnx2x_set_power_state - set power state to the requested value.
352 * @state: required state D0 or D3hot
354 * Currently only D0 and D3hot are supported.
356 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
359 * bnx2x_update_max_mf_config - update MAX part of MF configuration in HW.
364 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value);
366 void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl);
368 /* dev_close main block */
369 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
371 /* dev_open main block */
372 int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
374 /* hard_xmit callback */
375 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev);
377 /* select_queue callback */
378 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb);
381 int bnx2x_reload_if_running(struct net_device *dev);
383 int bnx2x_change_mac_addr(struct net_device *dev, void *p);
385 /* NAPI poll Rx part */
386 int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget);
388 /* NAPI poll Tx part */
389 int bnx2x_tx_int(struct bnx2x_fastpath *fp);
391 /* suspend/resume callbacks */
392 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state);
393 int bnx2x_resume(struct pci_dev *pdev);
395 /* Release IRQ vectors */
396 void bnx2x_free_irq(struct bnx2x *bp);
398 void bnx2x_free_fp_mem(struct bnx2x *bp);
399 int bnx2x_alloc_fp_mem(struct bnx2x *bp);
401 void bnx2x_init_rx_rings(struct bnx2x *bp);
402 void bnx2x_free_skbs(struct bnx2x *bp);
403 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw);
404 void bnx2x_netif_start(struct bnx2x *bp);
407 * bnx2x_enable_msix - set msix configuration.
411 * fills msix_table, requests vectors, updates num_queues
412 * according to number of available vectors.
414 int bnx2x_enable_msix(struct bnx2x *bp);
417 * bnx2x_enable_msi - request msi mode from OS, updated internals accordingly
421 int bnx2x_enable_msi(struct bnx2x *bp);
424 * bnx2x_poll - NAPI callback
426 * @napi: napi structure
430 int bnx2x_poll(struct napi_struct *napi, int budget);
433 * bnx2x_alloc_mem_bp - allocate memories outsize main driver structure
437 int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp);
440 * bnx2x_free_mem_bp - release memories outsize main driver structure
444 void bnx2x_free_mem_bp(struct bnx2x *bp);
447 * bnx2x_change_mtu - change mtu netdev callback
450 * @new_mtu: requested mtu
453 int bnx2x_change_mtu(struct net_device *dev, int new_mtu);
455 u32 bnx2x_fix_features(struct net_device *dev, u32 features);
456 int bnx2x_set_features(struct net_device *dev, u32 features);
459 * bnx2x_tx_timeout - tx timeout netdev callback
463 void bnx2x_tx_timeout(struct net_device *dev);
465 static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
467 barrier(); /* status block is written to by the chip */
468 fp->fp_hc_idx = fp->sb_running_index[SM_RX_ID];
471 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
472 struct bnx2x_fastpath *fp,
473 u16 bd_prod, u16 rx_comp_prod,
476 struct ustorm_eth_rx_producers rx_prods = {0};
479 /* Update producers */
480 rx_prods.bd_prod = bd_prod;
481 rx_prods.cqe_prod = rx_comp_prod;
482 rx_prods.sge_prod = rx_sge_prod;
485 * Make sure that the BD and SGE data is updated before updating the
486 * producers since FW might read the BD/SGE right after the producer
488 * This is only applicable for weak-ordered memory model archs such
489 * as IA-64. The following barrier is also mandatory since FW will
490 * assumes BDs must have buffers.
494 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
496 BAR_USTRORM_INTMEM + fp->ustorm_rx_prods_offset + i*4,
497 ((u32 *)&rx_prods)[i]);
499 mmiowb(); /* keep prod updates ordered */
501 DP(NETIF_MSG_RX_STATUS,
502 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
503 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
506 static inline void bnx2x_igu_ack_sb_gen(struct bnx2x *bp, u8 igu_sb_id,
507 u8 segment, u16 index, u8 op,
508 u8 update, u32 igu_addr)
510 struct igu_regular cmd_data = {0};
512 cmd_data.sb_id_and_flags =
513 ((index << IGU_REGULAR_SB_INDEX_SHIFT) |
514 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
515 (update << IGU_REGULAR_BUPDATE_SHIFT) |
516 (op << IGU_REGULAR_ENABLE_INT_SHIFT));
518 DP(NETIF_MSG_HW, "write 0x%08x to IGU addr 0x%x\n",
519 cmd_data.sb_id_and_flags, igu_addr);
520 REG_WR(bp, igu_addr, cmd_data.sb_id_and_flags);
522 /* Make sure that ACK is written */
527 static inline void bnx2x_igu_clear_sb_gen(struct bnx2x *bp,
528 u8 idu_sb_id, bool is_Pf)
530 u32 data, ctl, cnt = 100;
531 u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
532 u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
533 u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
534 u32 sb_bit = 1 << (idu_sb_id%32);
535 u32 func_encode = BP_FUNC(bp) |
536 ((is_Pf == true ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT);
537 u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
539 /* Not supported in BC mode */
540 if (CHIP_INT_MODE_IS_BC(bp))
543 data = (IGU_USE_REGISTER_cstorm_type_0_sb_cleanup
544 << IGU_REGULAR_CLEANUP_TYPE_SHIFT) |
545 IGU_REGULAR_CLEANUP_SET |
546 IGU_REGULAR_BCLEANUP;
548 ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT |
549 func_encode << IGU_CTRL_REG_FID_SHIFT |
550 IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
552 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
553 data, igu_addr_data);
554 REG_WR(bp, igu_addr_data, data);
557 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
559 REG_WR(bp, igu_addr_ctl, ctl);
563 /* wait for clean up to finish */
564 while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt)
568 if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) {
569 DP(NETIF_MSG_HW, "Unable to finish IGU cleanup: "
570 "idu_sb_id %d offset %d bit %d (cnt %d)\n",
571 idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
575 static inline void bnx2x_hc_ack_sb(struct bnx2x *bp, u8 sb_id,
576 u8 storm, u16 index, u8 op, u8 update)
578 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
579 COMMAND_REG_INT_ACK);
580 struct igu_ack_register igu_ack;
582 igu_ack.status_block_index = index;
583 igu_ack.sb_id_and_flags =
584 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
585 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
586 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
587 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
589 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
590 (*(u32 *)&igu_ack), hc_addr);
591 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
593 /* Make sure that ACK is written */
598 static inline void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
599 u16 index, u8 op, u8 update)
601 u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
603 bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update,
607 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 storm,
608 u16 index, u8 op, u8 update)
610 if (bp->common.int_block == INT_BLOCK_HC)
611 bnx2x_hc_ack_sb(bp, igu_sb_id, storm, index, op, update);
615 if (CHIP_INT_MODE_IS_BC(bp))
617 else if (igu_sb_id != bp->igu_dsb_id)
618 segment = IGU_SEG_ACCESS_DEF;
619 else if (storm == ATTENTION_ID)
620 segment = IGU_SEG_ACCESS_ATTN;
622 segment = IGU_SEG_ACCESS_DEF;
623 bnx2x_igu_ack_sb(bp, igu_sb_id, segment, index, op, update);
627 static inline u16 bnx2x_hc_ack_int(struct bnx2x *bp)
629 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
630 COMMAND_REG_SIMD_MASK);
631 u32 result = REG_RD(bp, hc_addr);
633 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
640 static inline u16 bnx2x_igu_ack_int(struct bnx2x *bp)
642 u32 igu_addr = (BAR_IGU_INTMEM + IGU_REG_SISR_MDPC_WMASK_LSB_UPPER*8);
643 u32 result = REG_RD(bp, igu_addr);
645 DP(NETIF_MSG_HW, "read 0x%08x from IGU addr 0x%x\n",
652 static inline u16 bnx2x_ack_int(struct bnx2x *bp)
655 if (bp->common.int_block == INT_BLOCK_HC)
656 return bnx2x_hc_ack_int(bp);
658 return bnx2x_igu_ack_int(bp);
661 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
663 /* Tell compiler that consumer and producer can change */
665 return fp->tx_pkt_prod != fp->tx_pkt_cons;
668 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
674 prod = fp->tx_bd_prod;
675 cons = fp->tx_bd_cons;
677 /* NUM_TX_RINGS = number of "next-page" entries
678 It will be used as a threshold */
679 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
681 #ifdef BNX2X_STOP_ON_ERROR
683 WARN_ON(used > fp->bp->tx_ring_size);
684 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
687 return (s16)(fp->bp->tx_ring_size) - used;
690 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
694 /* Tell compiler that status block fields can change */
696 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
697 return hw_cons != fp->tx_pkt_cons;
700 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
704 /* Tell compiler that status block fields can change */
706 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
707 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
709 return (fp->rx_comp_cons != rx_cons_sb);
713 * disables tx from stack point of view
717 static inline void bnx2x_tx_disable(struct bnx2x *bp)
719 netif_tx_disable(bp->dev);
720 netif_carrier_off(bp->dev);
723 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
724 struct bnx2x_fastpath *fp, u16 index)
726 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
727 struct page *page = sw_buf->page;
728 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
730 /* Skip "next page" elements */
734 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
735 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
736 __free_pages(page, PAGES_PER_SGE_SHIFT);
743 static inline void bnx2x_add_all_napi(struct bnx2x *bp)
747 /* Add NAPI objects */
748 for_each_napi_queue(bp, i)
749 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
750 bnx2x_poll, BNX2X_NAPI_WEIGHT);
753 static inline void bnx2x_del_all_napi(struct bnx2x *bp)
757 for_each_napi_queue(bp, i)
758 netif_napi_del(&bnx2x_fp(bp, i, napi));
761 static inline void bnx2x_disable_msi(struct bnx2x *bp)
763 if (bp->flags & USING_MSIX_FLAG) {
764 pci_disable_msix(bp->pdev);
765 bp->flags &= ~USING_MSIX_FLAG;
766 } else if (bp->flags & USING_MSI_FLAG) {
767 pci_disable_msi(bp->pdev);
768 bp->flags &= ~USING_MSI_FLAG;
772 static inline int bnx2x_calc_num_queues(struct bnx2x *bp)
775 min_t(int, num_queues, BNX2X_MAX_QUEUES(bp)) :
776 min_t(int, num_online_cpus(), BNX2X_MAX_QUEUES(bp));
779 static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
783 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
784 int idx = RX_SGE_CNT * i - 1;
786 for (j = 0; j < 2; j++) {
787 SGE_MASK_CLEAR_BIT(fp, idx);
793 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
795 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
796 memset(fp->sge_mask, 0xff,
797 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
799 /* Clear the two last indices in the page to 1:
800 these are the indices that correspond to the "next" element,
801 hence will never be indicated and should be removed from
803 bnx2x_clear_sge_mask_next_elems(fp);
806 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
807 struct bnx2x_fastpath *fp, u16 index)
809 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
810 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
811 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
814 if (unlikely(page == NULL))
817 mapping = dma_map_page(&bp->pdev->dev, page, 0,
818 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
819 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
820 __free_pages(page, PAGES_PER_SGE_SHIFT);
825 dma_unmap_addr_set(sw_buf, mapping, mapping);
827 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
828 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
833 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
834 struct bnx2x_fastpath *fp, u16 index)
837 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
838 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
841 skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
842 if (unlikely(skb == NULL))
845 mapping = dma_map_single(&bp->pdev->dev, skb->data, fp->rx_buf_size,
847 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
848 dev_kfree_skb_any(skb);
853 dma_unmap_addr_set(rx_buf, mapping, mapping);
855 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
856 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
861 /* note that we are not allocating a new skb,
862 * we are just moving one from cons to prod
863 * we are not creating a new mapping,
864 * so there is no need to check for dma_mapping_error().
866 static inline void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
869 struct bnx2x *bp = fp->bp;
870 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
871 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
872 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
873 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
875 dma_sync_single_for_device(&bp->pdev->dev,
876 dma_unmap_addr(cons_rx_buf, mapping),
877 RX_COPY_THRESH, DMA_FROM_DEVICE);
879 prod_rx_buf->skb = cons_rx_buf->skb;
880 dma_unmap_addr_set(prod_rx_buf, mapping,
881 dma_unmap_addr(cons_rx_buf, mapping));
885 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
886 struct bnx2x_fastpath *fp, int last)
893 for (i = 0; i < last; i++)
894 bnx2x_free_rx_sge(bp, fp, i);
897 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
898 struct bnx2x_fastpath *fp, int last)
902 for (i = 0; i < last; i++) {
903 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
904 struct sk_buff *skb = rx_buf->skb;
907 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
911 if (fp->tpa_state[i] == BNX2X_TPA_START)
912 dma_unmap_single(&bp->pdev->dev,
913 dma_unmap_addr(rx_buf, mapping),
914 fp->rx_buf_size, DMA_FROM_DEVICE);
921 static inline void bnx2x_init_tx_ring_one(struct bnx2x_fastpath *fp)
925 for (i = 1; i <= NUM_TX_RINGS; i++) {
926 struct eth_tx_next_bd *tx_next_bd =
927 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
929 tx_next_bd->addr_hi =
930 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
931 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
932 tx_next_bd->addr_lo =
933 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
934 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
937 SET_FLAG(fp->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1);
938 fp->tx_db.data.zero_fill1 = 0;
939 fp->tx_db.data.prod = 0;
948 static inline void bnx2x_init_tx_rings(struct bnx2x *bp)
952 for_each_tx_queue(bp, i)
953 bnx2x_init_tx_ring_one(&bp->fp[i]);
956 static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp)
960 for (i = 1; i <= NUM_RX_RINGS; i++) {
961 struct eth_rx_bd *rx_bd;
963 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
965 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
966 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
968 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
969 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
973 static inline void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
977 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
978 struct eth_rx_sge *sge;
980 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
982 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
983 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
986 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
987 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
991 static inline void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
994 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
995 struct eth_rx_cqe_next_page *nextpg;
997 nextpg = (struct eth_rx_cqe_next_page *)
998 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
1000 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
1001 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
1003 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
1004 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
1008 /* Returns the number of actually allocated BDs */
1009 static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
1012 struct bnx2x *bp = fp->bp;
1013 u16 ring_prod, cqe_ring_prod;
1016 fp->rx_comp_cons = 0;
1017 cqe_ring_prod = ring_prod = 0;
1019 /* This routine is called only during fo init so
1020 * fp->eth_q_stats.rx_skb_alloc_failed = 0
1022 for (i = 0; i < rx_ring_size; i++) {
1023 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
1024 fp->eth_q_stats.rx_skb_alloc_failed++;
1027 ring_prod = NEXT_RX_IDX(ring_prod);
1028 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
1029 WARN_ON(ring_prod <= (i - fp->eth_q_stats.rx_skb_alloc_failed));
1032 if (fp->eth_q_stats.rx_skb_alloc_failed)
1033 BNX2X_ERR("was only able to allocate "
1034 "%d rx skbs on queue[%d]\n",
1035 (i - fp->eth_q_stats.rx_skb_alloc_failed), fp->index);
1037 fp->rx_bd_prod = ring_prod;
1038 /* Limit the CQE producer by the CQE ring size */
1039 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
1041 fp->rx_pkt = fp->rx_calls = 0;
1043 return i - fp->eth_q_stats.rx_skb_alloc_failed;
1047 static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp)
1049 bnx2x_fcoe(bp, cl_id) = BNX2X_FCOE_ETH_CL_ID +
1050 BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
1051 bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID;
1052 bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID;
1053 bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id;
1054 bnx2x_fcoe(bp, bp) = bp;
1055 bnx2x_fcoe(bp, state) = BNX2X_FP_STATE_CLOSED;
1056 bnx2x_fcoe(bp, index) = FCOE_IDX;
1057 bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX;
1058 bnx2x_fcoe(bp, tx_cons_sb) = BNX2X_FCOE_L2_TX_INDEX;
1059 /* qZone id equals to FW (per path) client id */
1060 bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fcoe(bp, cl_id) +
1061 BP_PORT(bp)*(CHIP_IS_E2(bp) ? ETH_MAX_RX_CLIENTS_E2 :
1062 ETH_MAX_RX_CLIENTS_E1H);
1064 bnx2x_fcoe(bp, ustorm_rx_prods_offset) = CHIP_IS_E2(bp) ?
1065 USTORM_RX_PRODS_E2_OFFSET(bnx2x_fcoe(bp, cl_qzone_id)) :
1066 USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), bnx2x_fcoe_fp(bp)->cl_id);
1071 int bnx2x_get_link_cfg_idx(struct bnx2x *bp);
1073 static inline void __storm_memset_struct(struct bnx2x *bp,
1074 u32 addr, size_t size, u32 *data)
1077 for (i = 0; i < size/4; i++)
1078 REG_WR(bp, addr + (i * 4), data[i]);
1081 static inline void storm_memset_mac_filters(struct bnx2x *bp,
1082 struct tstorm_eth_mac_filter_config *mac_filters,
1085 size_t size = sizeof(struct tstorm_eth_mac_filter_config);
1087 u32 addr = BAR_TSTRORM_INTMEM +
1088 TSTORM_MAC_FILTER_CONFIG_OFFSET(abs_fid);
1090 __storm_memset_struct(bp, addr, size, (u32 *)mac_filters);
1093 static inline void storm_memset_cmng(struct bnx2x *bp,
1094 struct cmng_struct_per_port *cmng,
1098 sizeof(struct rate_shaping_vars_per_port) +
1099 sizeof(struct fairness_vars_per_port) +
1100 sizeof(struct safc_struct_per_port) +
1101 sizeof(struct pfc_struct_per_port);
1103 u32 addr = BAR_XSTRORM_INTMEM +
1104 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port);
1106 __storm_memset_struct(bp, addr, size, (u32 *)cmng);
1108 addr += size + 4 /* SKIP DCB+LLFC */;
1109 size = sizeof(struct cmng_struct_per_port) -
1110 size /* written */ - 4 /*skipped*/;
1112 __storm_memset_struct(bp, addr, size,
1113 (u32 *)(cmng->traffic_type_to_priority_cos));
1116 /* HW Lock for shared dual port PHYs */
1117 void bnx2x_acquire_phy_lock(struct bnx2x *bp);
1118 void bnx2x_release_phy_lock(struct bnx2x *bp);
1121 * bnx2x_extract_max_cfg - extract MAX BW part from MF configuration.
1123 * @bp: driver handle
1124 * @mf_cfg: MF configuration
1127 static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg)
1129 u16 max_cfg = (mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
1130 FUNC_MF_CFG_MAX_BW_SHIFT;
1132 BNX2X_ERR("Illegal configuration detected for Max BW - "
1133 "using 100 instead\n");
1139 #endif /* BNX2X_CMN_H */