1 /* b44.c: Broadcom 44xx/47xx Fast Ethernet device driver.
3 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4 * Copyright (C) 2004 Pekka Pietikainen (pp@ee.oulu.fi)
5 * Copyright (C) 2004 Florian Schirmer (jolt@tuxbox.org)
6 * Copyright (C) 2006 Felix Fietkau (nbd@openwrt.org)
7 * Copyright (C) 2006 Broadcom Corporation.
8 * Copyright (C) 2007 Michael Buesch <m@bues.ch>
10 * Distribute under GPL.
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/moduleparam.h>
18 #include <linux/types.h>
19 #include <linux/netdevice.h>
20 #include <linux/ethtool.h>
21 #include <linux/mii.h>
22 #include <linux/if_ether.h>
23 #include <linux/if_vlan.h>
24 #include <linux/etherdevice.h>
25 #include <linux/pci.h>
26 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/interrupt.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/ssb/ssb.h>
31 #include <linux/slab.h>
33 #include <asm/uaccess.h>
40 #define DRV_MODULE_NAME "b44"
41 #define DRV_MODULE_VERSION "2.0"
42 #define DRV_DESCRIPTION "Broadcom 44xx/47xx 10/100 PCI ethernet driver"
44 #define B44_DEF_MSG_ENABLE \
54 /* length of time before we decide the hardware is borked,
55 * and dev->tx_timeout() should be called to fix the problem
57 #define B44_TX_TIMEOUT (5 * HZ)
59 /* hardware minimum and maximum for a single frame's data payload */
60 #define B44_MIN_MTU 60
61 #define B44_MAX_MTU 1500
63 #define B44_RX_RING_SIZE 512
64 #define B44_DEF_RX_RING_PENDING 200
65 #define B44_RX_RING_BYTES (sizeof(struct dma_desc) * \
67 #define B44_TX_RING_SIZE 512
68 #define B44_DEF_TX_RING_PENDING (B44_TX_RING_SIZE - 1)
69 #define B44_TX_RING_BYTES (sizeof(struct dma_desc) * \
72 #define TX_RING_GAP(BP) \
73 (B44_TX_RING_SIZE - (BP)->tx_pending)
74 #define TX_BUFFS_AVAIL(BP) \
75 (((BP)->tx_cons <= (BP)->tx_prod) ? \
76 (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod : \
77 (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
78 #define NEXT_TX(N) (((N) + 1) & (B44_TX_RING_SIZE - 1))
80 #define RX_PKT_OFFSET (RX_HEADER_LEN + 2)
81 #define RX_PKT_BUF_SZ (1536 + RX_PKT_OFFSET)
83 /* minimum number of free TX descriptors required to wake up TX process */
84 #define B44_TX_WAKEUP_THRESH (B44_TX_RING_SIZE / 4)
86 /* b44 internal pattern match filter info */
87 #define B44_PATTERN_BASE 0x400
88 #define B44_PATTERN_SIZE 0x80
89 #define B44_PMASK_BASE 0x600
90 #define B44_PMASK_SIZE 0x10
91 #define B44_MAX_PATTERNS 16
92 #define B44_ETHIPV6UDP_HLEN 62
93 #define B44_ETHIPV4UDP_HLEN 42
95 MODULE_AUTHOR("Felix Fietkau, Florian Schirmer, Pekka Pietikainen, David S. Miller");
96 MODULE_DESCRIPTION(DRV_DESCRIPTION);
97 MODULE_LICENSE("GPL");
98 MODULE_VERSION(DRV_MODULE_VERSION);
100 static int b44_debug = -1; /* -1 == use B44_DEF_MSG_ENABLE as value */
101 module_param(b44_debug, int, 0);
102 MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
105 #ifdef CONFIG_B44_PCI
106 static DEFINE_PCI_DEVICE_TABLE(b44_pci_tbl) = {
107 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401) },
108 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0) },
109 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1) },
110 { 0 } /* terminate list with empty entry */
112 MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
114 static struct pci_driver b44_pci_driver = {
115 .name = DRV_MODULE_NAME,
116 .id_table = b44_pci_tbl,
118 #endif /* CONFIG_B44_PCI */
120 static const struct ssb_device_id b44_ssb_tbl[] = {
121 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_ETHERNET, SSB_ANY_REV),
124 MODULE_DEVICE_TABLE(ssb, b44_ssb_tbl);
126 static void b44_halt(struct b44 *);
127 static void b44_init_rings(struct b44 *);
129 #define B44_FULL_RESET 1
130 #define B44_FULL_RESET_SKIP_PHY 2
131 #define B44_PARTIAL_RESET 3
132 #define B44_CHIP_RESET_FULL 4
133 #define B44_CHIP_RESET_PARTIAL 5
135 static void b44_init_hw(struct b44 *, int);
137 static int dma_desc_sync_size;
140 static const char b44_gstrings[][ETH_GSTRING_LEN] = {
141 #define _B44(x...) # x,
146 static inline void b44_sync_dma_desc_for_device(struct ssb_device *sdev,
148 unsigned long offset,
149 enum dma_data_direction dir)
151 dma_sync_single_for_device(sdev->dma_dev, dma_base + offset,
152 dma_desc_sync_size, dir);
155 static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev,
157 unsigned long offset,
158 enum dma_data_direction dir)
160 dma_sync_single_for_cpu(sdev->dma_dev, dma_base + offset,
161 dma_desc_sync_size, dir);
164 static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
166 return ssb_read32(bp->sdev, reg);
169 static inline void bw32(const struct b44 *bp,
170 unsigned long reg, unsigned long val)
172 ssb_write32(bp->sdev, reg, val);
175 static int b44_wait_bit(struct b44 *bp, unsigned long reg,
176 u32 bit, unsigned long timeout, const int clear)
180 for (i = 0; i < timeout; i++) {
181 u32 val = br32(bp, reg);
183 if (clear && !(val & bit))
185 if (!clear && (val & bit))
191 netdev_err(bp->dev, "BUG! Timeout waiting for bit %08x of register %lx to %s\n",
192 bit, reg, clear ? "clear" : "set");
199 static inline void __b44_cam_read(struct b44 *bp, unsigned char *data, int index)
203 bw32(bp, B44_CAM_CTRL, (CAM_CTRL_READ |
204 (index << CAM_CTRL_INDEX_SHIFT)));
206 b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
208 val = br32(bp, B44_CAM_DATA_LO);
210 data[2] = (val >> 24) & 0xFF;
211 data[3] = (val >> 16) & 0xFF;
212 data[4] = (val >> 8) & 0xFF;
213 data[5] = (val >> 0) & 0xFF;
215 val = br32(bp, B44_CAM_DATA_HI);
217 data[0] = (val >> 8) & 0xFF;
218 data[1] = (val >> 0) & 0xFF;
221 static inline void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
225 val = ((u32) data[2]) << 24;
226 val |= ((u32) data[3]) << 16;
227 val |= ((u32) data[4]) << 8;
228 val |= ((u32) data[5]) << 0;
229 bw32(bp, B44_CAM_DATA_LO, val);
230 val = (CAM_DATA_HI_VALID |
231 (((u32) data[0]) << 8) |
232 (((u32) data[1]) << 0));
233 bw32(bp, B44_CAM_DATA_HI, val);
234 bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
235 (index << CAM_CTRL_INDEX_SHIFT)));
236 b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
239 static inline void __b44_disable_ints(struct b44 *bp)
241 bw32(bp, B44_IMASK, 0);
244 static void b44_disable_ints(struct b44 *bp)
246 __b44_disable_ints(bp);
248 /* Flush posted writes. */
252 static void b44_enable_ints(struct b44 *bp)
254 bw32(bp, B44_IMASK, bp->imask);
257 static int __b44_readphy(struct b44 *bp, int phy_addr, int reg, u32 *val)
261 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
262 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
263 (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
264 (phy_addr << MDIO_DATA_PMD_SHIFT) |
265 (reg << MDIO_DATA_RA_SHIFT) |
266 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
267 err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
268 *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
273 static int __b44_writephy(struct b44 *bp, int phy_addr, int reg, u32 val)
275 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
276 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
277 (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
278 (phy_addr << MDIO_DATA_PMD_SHIFT) |
279 (reg << MDIO_DATA_RA_SHIFT) |
280 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
281 (val & MDIO_DATA_DATA)));
282 return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
285 static inline int b44_readphy(struct b44 *bp, int reg, u32 *val)
287 if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
290 return __b44_readphy(bp, bp->phy_addr, reg, val);
293 static inline int b44_writephy(struct b44 *bp, int reg, u32 val)
295 if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
298 return __b44_writephy(bp, bp->phy_addr, reg, val);
301 /* miilib interface */
302 static int b44_mii_read(struct net_device *dev, int phy_id, int location)
305 struct b44 *bp = netdev_priv(dev);
306 int rc = __b44_readphy(bp, phy_id, location, &val);
312 static void b44_mii_write(struct net_device *dev, int phy_id, int location,
315 struct b44 *bp = netdev_priv(dev);
316 __b44_writephy(bp, phy_id, location, val);
319 static int b44_phy_reset(struct b44 *bp)
324 if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
326 err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
330 err = b44_readphy(bp, MII_BMCR, &val);
332 if (val & BMCR_RESET) {
333 netdev_err(bp->dev, "PHY Reset would not complete\n");
341 static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
345 bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
346 bp->flags |= pause_flags;
348 val = br32(bp, B44_RXCONFIG);
349 if (pause_flags & B44_FLAG_RX_PAUSE)
350 val |= RXCONFIG_FLOW;
352 val &= ~RXCONFIG_FLOW;
353 bw32(bp, B44_RXCONFIG, val);
355 val = br32(bp, B44_MAC_FLOW);
356 if (pause_flags & B44_FLAG_TX_PAUSE)
357 val |= (MAC_FLOW_PAUSE_ENAB |
358 (0xc0 & MAC_FLOW_RX_HI_WATER));
360 val &= ~MAC_FLOW_PAUSE_ENAB;
361 bw32(bp, B44_MAC_FLOW, val);
364 static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
368 /* The driver supports only rx pause by default because
369 the b44 mac tx pause mechanism generates excessive
371 Use ethtool to turn on b44 tx pause if necessary.
373 if ((local & ADVERTISE_PAUSE_CAP) &&
374 (local & ADVERTISE_PAUSE_ASYM)){
375 if ((remote & LPA_PAUSE_ASYM) &&
376 !(remote & LPA_PAUSE_CAP))
377 pause_enab |= B44_FLAG_RX_PAUSE;
380 __b44_set_flow_ctrl(bp, pause_enab);
383 #ifdef CONFIG_BCM47XX
384 #include <bcm47xx_nvram.h>
385 static void b44_wap54g10_workaround(struct b44 *bp)
392 * workaround for bad hardware design in Linksys WAP54G v1.0
393 * see https://dev.openwrt.org/ticket/146
394 * check and reset bit "isolate"
396 if (bcm47xx_nvram_getenv("boardnum", buf, sizeof(buf)) < 0)
398 if (simple_strtoul(buf, NULL, 0) == 2) {
399 err = __b44_readphy(bp, 0, MII_BMCR, &val);
402 if (!(val & BMCR_ISOLATE))
404 val &= ~BMCR_ISOLATE;
405 err = __b44_writephy(bp, 0, MII_BMCR, val);
411 pr_warning("PHY: cannot reset MII transceiver isolate bit\n");
414 static inline void b44_wap54g10_workaround(struct b44 *bp)
419 static int b44_setup_phy(struct b44 *bp)
424 b44_wap54g10_workaround(bp);
426 if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
428 if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
430 if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
431 val & MII_ALEDCTRL_ALLMSK)) != 0)
433 if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
435 if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
436 val | MII_TLEDCTRL_ENABLE)) != 0)
439 if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
440 u32 adv = ADVERTISE_CSMA;
442 if (bp->flags & B44_FLAG_ADV_10HALF)
443 adv |= ADVERTISE_10HALF;
444 if (bp->flags & B44_FLAG_ADV_10FULL)
445 adv |= ADVERTISE_10FULL;
446 if (bp->flags & B44_FLAG_ADV_100HALF)
447 adv |= ADVERTISE_100HALF;
448 if (bp->flags & B44_FLAG_ADV_100FULL)
449 adv |= ADVERTISE_100FULL;
451 if (bp->flags & B44_FLAG_PAUSE_AUTO)
452 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
454 if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
456 if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
457 BMCR_ANRESTART))) != 0)
462 if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
464 bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
465 if (bp->flags & B44_FLAG_100_BASE_T)
466 bmcr |= BMCR_SPEED100;
467 if (bp->flags & B44_FLAG_FULL_DUPLEX)
468 bmcr |= BMCR_FULLDPLX;
469 if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
472 /* Since we will not be negotiating there is no safe way
473 * to determine if the link partner supports flow control
474 * or not. So just disable it completely in this case.
476 b44_set_flow_ctrl(bp, 0, 0);
483 static void b44_stats_update(struct b44 *bp)
488 val = &bp->hw_stats.tx_good_octets;
489 u64_stats_update_begin(&bp->hw_stats.syncp);
491 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
492 *val++ += br32(bp, reg);
498 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
499 *val++ += br32(bp, reg);
502 u64_stats_update_end(&bp->hw_stats.syncp);
505 static void b44_link_report(struct b44 *bp)
507 if (!netif_carrier_ok(bp->dev)) {
508 netdev_info(bp->dev, "Link is down\n");
510 netdev_info(bp->dev, "Link is up at %d Mbps, %s duplex\n",
511 (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
512 (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
514 netdev_info(bp->dev, "Flow control is %s for TX and %s for RX\n",
515 (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
516 (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
520 static void b44_check_phy(struct b44 *bp)
524 if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) {
525 bp->flags |= B44_FLAG_100_BASE_T;
526 bp->flags |= B44_FLAG_FULL_DUPLEX;
527 if (!netif_carrier_ok(bp->dev)) {
528 u32 val = br32(bp, B44_TX_CTRL);
529 val |= TX_CTRL_DUPLEX;
530 bw32(bp, B44_TX_CTRL, val);
531 netif_carrier_on(bp->dev);
537 if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
538 !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
540 if (aux & MII_AUXCTRL_SPEED)
541 bp->flags |= B44_FLAG_100_BASE_T;
543 bp->flags &= ~B44_FLAG_100_BASE_T;
544 if (aux & MII_AUXCTRL_DUPLEX)
545 bp->flags |= B44_FLAG_FULL_DUPLEX;
547 bp->flags &= ~B44_FLAG_FULL_DUPLEX;
549 if (!netif_carrier_ok(bp->dev) &&
550 (bmsr & BMSR_LSTATUS)) {
551 u32 val = br32(bp, B44_TX_CTRL);
552 u32 local_adv, remote_adv;
554 if (bp->flags & B44_FLAG_FULL_DUPLEX)
555 val |= TX_CTRL_DUPLEX;
557 val &= ~TX_CTRL_DUPLEX;
558 bw32(bp, B44_TX_CTRL, val);
560 if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
561 !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
562 !b44_readphy(bp, MII_LPA, &remote_adv))
563 b44_set_flow_ctrl(bp, local_adv, remote_adv);
566 netif_carrier_on(bp->dev);
568 } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
570 netif_carrier_off(bp->dev);
574 if (bmsr & BMSR_RFAULT)
575 netdev_warn(bp->dev, "Remote fault detected in PHY\n");
577 netdev_warn(bp->dev, "Jabber detected in PHY\n");
581 static void b44_timer(unsigned long __opaque)
583 struct b44 *bp = (struct b44 *) __opaque;
585 spin_lock_irq(&bp->lock);
589 b44_stats_update(bp);
591 spin_unlock_irq(&bp->lock);
593 mod_timer(&bp->timer, round_jiffies(jiffies + HZ));
596 static void b44_tx(struct b44 *bp)
599 unsigned bytes_compl = 0, pkts_compl = 0;
601 cur = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
602 cur /= sizeof(struct dma_desc);
604 /* XXX needs updating when NETIF_F_SG is supported */
605 for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
606 struct ring_info *rp = &bp->tx_buffers[cons];
607 struct sk_buff *skb = rp->skb;
611 dma_unmap_single(bp->sdev->dma_dev,
617 bytes_compl += skb->len;
620 dev_kfree_skb_irq(skb);
623 netdev_completed_queue(bp->dev, pkts_compl, bytes_compl);
625 if (netif_queue_stopped(bp->dev) &&
626 TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
627 netif_wake_queue(bp->dev);
629 bw32(bp, B44_GPTIMER, 0);
632 /* Works like this. This chip writes a 'struct rx_header" 30 bytes
633 * before the DMA address you give it. So we allocate 30 more bytes
634 * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
635 * point the chip at 30 bytes past where the rx_header will go.
637 static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
640 struct ring_info *src_map, *map;
641 struct rx_header *rh;
649 src_map = &bp->rx_buffers[src_idx];
650 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
651 map = &bp->rx_buffers[dest_idx];
652 skb = netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ);
656 mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
660 /* Hardware bug work-around, the chip is unable to do PCI DMA
661 to/from anything above 1GB :-( */
662 if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
663 mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
665 if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
666 dma_unmap_single(bp->sdev->dma_dev, mapping,
667 RX_PKT_BUF_SZ, DMA_FROM_DEVICE);
668 dev_kfree_skb_any(skb);
669 skb = alloc_skb(RX_PKT_BUF_SZ, GFP_ATOMIC | GFP_DMA);
672 mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
675 if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
676 mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
677 if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
678 dma_unmap_single(bp->sdev->dma_dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
679 dev_kfree_skb_any(skb);
682 bp->force_copybreak = 1;
685 rh = (struct rx_header *) skb->data;
691 map->mapping = mapping;
696 ctrl = (DESC_CTRL_LEN & RX_PKT_BUF_SZ);
697 if (dest_idx == (B44_RX_RING_SIZE - 1))
698 ctrl |= DESC_CTRL_EOT;
700 dp = &bp->rx_ring[dest_idx];
701 dp->ctrl = cpu_to_le32(ctrl);
702 dp->addr = cpu_to_le32((u32) mapping + bp->dma_offset);
704 if (bp->flags & B44_FLAG_RX_RING_HACK)
705 b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
706 dest_idx * sizeof(*dp),
709 return RX_PKT_BUF_SZ;
712 static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
714 struct dma_desc *src_desc, *dest_desc;
715 struct ring_info *src_map, *dest_map;
716 struct rx_header *rh;
720 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
721 dest_desc = &bp->rx_ring[dest_idx];
722 dest_map = &bp->rx_buffers[dest_idx];
723 src_desc = &bp->rx_ring[src_idx];
724 src_map = &bp->rx_buffers[src_idx];
726 dest_map->skb = src_map->skb;
727 rh = (struct rx_header *) src_map->skb->data;
730 dest_map->mapping = src_map->mapping;
732 if (bp->flags & B44_FLAG_RX_RING_HACK)
733 b44_sync_dma_desc_for_cpu(bp->sdev, bp->rx_ring_dma,
734 src_idx * sizeof(*src_desc),
737 ctrl = src_desc->ctrl;
738 if (dest_idx == (B44_RX_RING_SIZE - 1))
739 ctrl |= cpu_to_le32(DESC_CTRL_EOT);
741 ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
743 dest_desc->ctrl = ctrl;
744 dest_desc->addr = src_desc->addr;
748 if (bp->flags & B44_FLAG_RX_RING_HACK)
749 b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
750 dest_idx * sizeof(*dest_desc),
753 dma_sync_single_for_device(bp->sdev->dma_dev, dest_map->mapping,
758 static int b44_rx(struct b44 *bp, int budget)
764 prod = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
765 prod /= sizeof(struct dma_desc);
768 while (cons != prod && budget > 0) {
769 struct ring_info *rp = &bp->rx_buffers[cons];
770 struct sk_buff *skb = rp->skb;
771 dma_addr_t map = rp->mapping;
772 struct rx_header *rh;
775 dma_sync_single_for_cpu(bp->sdev->dma_dev, map,
778 rh = (struct rx_header *) skb->data;
779 len = le16_to_cpu(rh->len);
780 if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) ||
781 (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
783 b44_recycle_rx(bp, cons, bp->rx_prod);
785 bp->dev->stats.rx_dropped++;
795 len = le16_to_cpu(rh->len);
796 } while (len == 0 && i++ < 5);
804 if (!bp->force_copybreak && len > RX_COPY_THRESHOLD) {
806 skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
809 dma_unmap_single(bp->sdev->dma_dev, map,
810 skb_size, DMA_FROM_DEVICE);
811 /* Leave out rx_header */
812 skb_put(skb, len + RX_PKT_OFFSET);
813 skb_pull(skb, RX_PKT_OFFSET);
815 struct sk_buff *copy_skb;
817 b44_recycle_rx(bp, cons, bp->rx_prod);
818 copy_skb = netdev_alloc_skb_ip_align(bp->dev, len);
819 if (copy_skb == NULL)
820 goto drop_it_no_recycle;
822 skb_put(copy_skb, len);
823 /* DMA sync done above, copy just the actual packet */
824 skb_copy_from_linear_data_offset(skb, RX_PKT_OFFSET,
825 copy_skb->data, len);
828 skb_checksum_none_assert(skb);
829 skb->protocol = eth_type_trans(skb, bp->dev);
830 netif_receive_skb(skb);
834 bp->rx_prod = (bp->rx_prod + 1) &
835 (B44_RX_RING_SIZE - 1);
836 cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
840 bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
845 static int b44_poll(struct napi_struct *napi, int budget)
847 struct b44 *bp = container_of(napi, struct b44, napi);
851 spin_lock_irqsave(&bp->lock, flags);
853 if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
854 /* spin_lock(&bp->tx_lock); */
856 /* spin_unlock(&bp->tx_lock); */
858 if (bp->istat & ISTAT_RFO) { /* fast recovery, in ~20msec */
859 bp->istat &= ~ISTAT_RFO;
860 b44_disable_ints(bp);
861 ssb_device_enable(bp->sdev, 0); /* resets ISTAT_RFO */
863 b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
864 netif_wake_queue(bp->dev);
867 spin_unlock_irqrestore(&bp->lock, flags);
870 if (bp->istat & ISTAT_RX)
871 work_done += b44_rx(bp, budget);
873 if (bp->istat & ISTAT_ERRORS) {
874 spin_lock_irqsave(&bp->lock, flags);
877 b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
878 netif_wake_queue(bp->dev);
879 spin_unlock_irqrestore(&bp->lock, flags);
883 if (work_done < budget) {
891 static irqreturn_t b44_interrupt(int irq, void *dev_id)
893 struct net_device *dev = dev_id;
894 struct b44 *bp = netdev_priv(dev);
898 spin_lock(&bp->lock);
900 istat = br32(bp, B44_ISTAT);
901 imask = br32(bp, B44_IMASK);
903 /* The interrupt mask register controls which interrupt bits
904 * will actually raise an interrupt to the CPU when set by hw/firmware,
905 * but doesn't mask off the bits.
911 if (unlikely(!netif_running(dev))) {
912 netdev_info(dev, "late interrupt\n");
916 if (napi_schedule_prep(&bp->napi)) {
917 /* NOTE: These writes are posted by the readback of
918 * the ISTAT register below.
921 __b44_disable_ints(bp);
922 __napi_schedule(&bp->napi);
926 bw32(bp, B44_ISTAT, istat);
929 spin_unlock(&bp->lock);
930 return IRQ_RETVAL(handled);
933 static void b44_tx_timeout(struct net_device *dev)
935 struct b44 *bp = netdev_priv(dev);
937 netdev_err(dev, "transmit timed out, resetting\n");
939 spin_lock_irq(&bp->lock);
943 b44_init_hw(bp, B44_FULL_RESET);
945 spin_unlock_irq(&bp->lock);
949 netif_wake_queue(dev);
952 static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
954 struct b44 *bp = netdev_priv(dev);
955 int rc = NETDEV_TX_OK;
957 u32 len, entry, ctrl;
961 spin_lock_irqsave(&bp->lock, flags);
963 /* This is a hard error, log it. */
964 if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
965 netif_stop_queue(dev);
966 netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
970 mapping = dma_map_single(bp->sdev->dma_dev, skb->data, len, DMA_TO_DEVICE);
971 if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
972 struct sk_buff *bounce_skb;
974 /* Chip can't handle DMA to/from >1GB, use bounce buffer */
975 if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
976 dma_unmap_single(bp->sdev->dma_dev, mapping, len,
979 bounce_skb = alloc_skb(len, GFP_ATOMIC | GFP_DMA);
983 mapping = dma_map_single(bp->sdev->dma_dev, bounce_skb->data,
985 if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
986 if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
987 dma_unmap_single(bp->sdev->dma_dev, mapping,
989 dev_kfree_skb_any(bounce_skb);
993 skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), len);
994 dev_kfree_skb_any(skb);
999 bp->tx_buffers[entry].skb = skb;
1000 bp->tx_buffers[entry].mapping = mapping;
1002 ctrl = (len & DESC_CTRL_LEN);
1003 ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
1004 if (entry == (B44_TX_RING_SIZE - 1))
1005 ctrl |= DESC_CTRL_EOT;
1007 bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
1008 bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
1010 if (bp->flags & B44_FLAG_TX_RING_HACK)
1011 b44_sync_dma_desc_for_device(bp->sdev, bp->tx_ring_dma,
1012 entry * sizeof(bp->tx_ring[0]),
1015 entry = NEXT_TX(entry);
1017 bp->tx_prod = entry;
1021 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1022 if (bp->flags & B44_FLAG_BUGGY_TXPTR)
1023 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1024 if (bp->flags & B44_FLAG_REORDER_BUG)
1025 br32(bp, B44_DMATX_PTR);
1027 netdev_sent_queue(dev, skb->len);
1029 if (TX_BUFFS_AVAIL(bp) < 1)
1030 netif_stop_queue(dev);
1033 spin_unlock_irqrestore(&bp->lock, flags);
1038 rc = NETDEV_TX_BUSY;
1042 static int b44_change_mtu(struct net_device *dev, int new_mtu)
1044 struct b44 *bp = netdev_priv(dev);
1046 if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU)
1049 if (!netif_running(dev)) {
1050 /* We'll just catch it later when the
1057 spin_lock_irq(&bp->lock);
1061 b44_init_hw(bp, B44_FULL_RESET);
1062 spin_unlock_irq(&bp->lock);
1064 b44_enable_ints(bp);
1069 /* Free up pending packets in all rx/tx rings.
1071 * The chip has been shut down and the driver detached from
1072 * the networking, so no interrupts or new tx packets will
1073 * end up in the driver. bp->lock is not held and we are not
1074 * in an interrupt context and thus may sleep.
1076 static void b44_free_rings(struct b44 *bp)
1078 struct ring_info *rp;
1081 for (i = 0; i < B44_RX_RING_SIZE; i++) {
1082 rp = &bp->rx_buffers[i];
1084 if (rp->skb == NULL)
1086 dma_unmap_single(bp->sdev->dma_dev, rp->mapping, RX_PKT_BUF_SZ,
1088 dev_kfree_skb_any(rp->skb);
1092 /* XXX needs changes once NETIF_F_SG is set... */
1093 for (i = 0; i < B44_TX_RING_SIZE; i++) {
1094 rp = &bp->tx_buffers[i];
1096 if (rp->skb == NULL)
1098 dma_unmap_single(bp->sdev->dma_dev, rp->mapping, rp->skb->len,
1100 dev_kfree_skb_any(rp->skb);
1105 /* Initialize tx/rx rings for packet processing.
1107 * The chip has been shut down and the driver detached from
1108 * the networking, so no interrupts or new tx packets will
1109 * end up in the driver.
1111 static void b44_init_rings(struct b44 *bp)
1117 memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1118 memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1120 if (bp->flags & B44_FLAG_RX_RING_HACK)
1121 dma_sync_single_for_device(bp->sdev->dma_dev, bp->rx_ring_dma,
1122 DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
1124 if (bp->flags & B44_FLAG_TX_RING_HACK)
1125 dma_sync_single_for_device(bp->sdev->dma_dev, bp->tx_ring_dma,
1126 DMA_TABLE_BYTES, DMA_TO_DEVICE);
1128 for (i = 0; i < bp->rx_pending; i++) {
1129 if (b44_alloc_rx_skb(bp, -1, i) < 0)
1135 * Must not be invoked with interrupt sources disabled and
1136 * the hardware shutdown down.
1138 static void b44_free_consistent(struct b44 *bp)
1140 kfree(bp->rx_buffers);
1141 bp->rx_buffers = NULL;
1142 kfree(bp->tx_buffers);
1143 bp->tx_buffers = NULL;
1145 if (bp->flags & B44_FLAG_RX_RING_HACK) {
1146 dma_unmap_single(bp->sdev->dma_dev, bp->rx_ring_dma,
1147 DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
1150 dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
1151 bp->rx_ring, bp->rx_ring_dma);
1153 bp->flags &= ~B44_FLAG_RX_RING_HACK;
1156 if (bp->flags & B44_FLAG_TX_RING_HACK) {
1157 dma_unmap_single(bp->sdev->dma_dev, bp->tx_ring_dma,
1158 DMA_TABLE_BYTES, DMA_TO_DEVICE);
1161 dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
1162 bp->tx_ring, bp->tx_ring_dma);
1164 bp->flags &= ~B44_FLAG_TX_RING_HACK;
1169 * Must not be invoked with interrupt sources disabled and
1170 * the hardware shutdown down. Can sleep.
1172 static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
1176 size = B44_RX_RING_SIZE * sizeof(struct ring_info);
1177 bp->rx_buffers = kzalloc(size, gfp);
1178 if (!bp->rx_buffers)
1181 size = B44_TX_RING_SIZE * sizeof(struct ring_info);
1182 bp->tx_buffers = kzalloc(size, gfp);
1183 if (!bp->tx_buffers)
1186 size = DMA_TABLE_BYTES;
1187 bp->rx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
1188 &bp->rx_ring_dma, gfp);
1190 /* Allocation may have failed due to pci_alloc_consistent
1191 insisting on use of GFP_DMA, which is more restrictive
1192 than necessary... */
1193 struct dma_desc *rx_ring;
1194 dma_addr_t rx_ring_dma;
1196 rx_ring = kzalloc(size, gfp);
1200 rx_ring_dma = dma_map_single(bp->sdev->dma_dev, rx_ring,
1204 if (dma_mapping_error(bp->sdev->dma_dev, rx_ring_dma) ||
1205 rx_ring_dma + size > DMA_BIT_MASK(30)) {
1210 bp->rx_ring = rx_ring;
1211 bp->rx_ring_dma = rx_ring_dma;
1212 bp->flags |= B44_FLAG_RX_RING_HACK;
1215 bp->tx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
1216 &bp->tx_ring_dma, gfp);
1218 /* Allocation may have failed due to ssb_dma_alloc_consistent
1219 insisting on use of GFP_DMA, which is more restrictive
1220 than necessary... */
1221 struct dma_desc *tx_ring;
1222 dma_addr_t tx_ring_dma;
1224 tx_ring = kzalloc(size, gfp);
1228 tx_ring_dma = dma_map_single(bp->sdev->dma_dev, tx_ring,
1232 if (dma_mapping_error(bp->sdev->dma_dev, tx_ring_dma) ||
1233 tx_ring_dma + size > DMA_BIT_MASK(30)) {
1238 bp->tx_ring = tx_ring;
1239 bp->tx_ring_dma = tx_ring_dma;
1240 bp->flags |= B44_FLAG_TX_RING_HACK;
1246 b44_free_consistent(bp);
1250 /* bp->lock is held. */
1251 static void b44_clear_stats(struct b44 *bp)
1255 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1256 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
1258 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
1262 /* bp->lock is held. */
1263 static void b44_chip_reset(struct b44 *bp, int reset_kind)
1265 struct ssb_device *sdev = bp->sdev;
1268 was_enabled = ssb_device_is_enabled(bp->sdev);
1270 ssb_device_enable(bp->sdev, 0);
1271 ssb_pcicore_dev_irqvecs_enable(&sdev->bus->pcicore, sdev);
1274 bw32(bp, B44_RCV_LAZY, 0);
1275 bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
1276 b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1);
1277 bw32(bp, B44_DMATX_CTRL, 0);
1278 bp->tx_prod = bp->tx_cons = 0;
1279 if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
1280 b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
1283 bw32(bp, B44_DMARX_CTRL, 0);
1284 bp->rx_prod = bp->rx_cons = 0;
1287 b44_clear_stats(bp);
1290 * Don't enable PHY if we are doing a partial reset
1291 * we are probably going to power down
1293 if (reset_kind == B44_CHIP_RESET_PARTIAL)
1296 switch (sdev->bus->bustype) {
1297 case SSB_BUSTYPE_SSB:
1298 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1299 (DIV_ROUND_CLOSEST(ssb_clockspeed(sdev->bus),
1301 & MDIO_CTRL_MAXF_MASK)));
1303 case SSB_BUSTYPE_PCI:
1304 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1305 (0x0d & MDIO_CTRL_MAXF_MASK)));
1307 case SSB_BUSTYPE_PCMCIA:
1308 case SSB_BUSTYPE_SDIO:
1309 WARN_ON(1); /* A device with this bus does not exist. */
1313 br32(bp, B44_MDIO_CTRL);
1315 if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
1316 bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
1317 br32(bp, B44_ENET_CTRL);
1318 bp->flags &= ~B44_FLAG_INTERNAL_PHY;
1320 u32 val = br32(bp, B44_DEVCTRL);
1322 if (val & DEVCTRL_EPR) {
1323 bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
1324 br32(bp, B44_DEVCTRL);
1327 bp->flags |= B44_FLAG_INTERNAL_PHY;
1331 /* bp->lock is held. */
1332 static void b44_halt(struct b44 *bp)
1334 b44_disable_ints(bp);
1337 /* power down PHY */
1338 netdev_info(bp->dev, "powering down PHY\n");
1339 bw32(bp, B44_MAC_CTRL, MAC_CTRL_PHY_PDOWN);
1340 /* now reset the chip, but without enabling the MAC&PHY
1341 * part of it. This has to be done _after_ we shut down the PHY */
1342 b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
1345 /* bp->lock is held. */
1346 static void __b44_set_mac_addr(struct b44 *bp)
1348 bw32(bp, B44_CAM_CTRL, 0);
1349 if (!(bp->dev->flags & IFF_PROMISC)) {
1352 __b44_cam_write(bp, bp->dev->dev_addr, 0);
1353 val = br32(bp, B44_CAM_CTRL);
1354 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1358 static int b44_set_mac_addr(struct net_device *dev, void *p)
1360 struct b44 *bp = netdev_priv(dev);
1361 struct sockaddr *addr = p;
1364 if (netif_running(dev))
1367 if (!is_valid_ether_addr(addr->sa_data))
1370 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1372 spin_lock_irq(&bp->lock);
1374 val = br32(bp, B44_RXCONFIG);
1375 if (!(val & RXCONFIG_CAM_ABSENT))
1376 __b44_set_mac_addr(bp);
1378 spin_unlock_irq(&bp->lock);
1383 /* Called at device open time to get the chip ready for
1384 * packet processing. Invoked with bp->lock held.
1386 static void __b44_set_rx_mode(struct net_device *);
1387 static void b44_init_hw(struct b44 *bp, int reset_kind)
1391 b44_chip_reset(bp, B44_CHIP_RESET_FULL);
1392 if (reset_kind == B44_FULL_RESET) {
1397 /* Enable CRC32, set proper LED modes and power on PHY */
1398 bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
1399 bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
1401 /* This sets the MAC address too. */
1402 __b44_set_rx_mode(bp->dev);
1404 /* MTU + eth header + possible VLAN tag + struct rx_header */
1405 bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1406 bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1408 bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
1409 if (reset_kind == B44_PARTIAL_RESET) {
1410 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1411 (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1413 bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1414 bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1415 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1416 (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1417 bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1419 bw32(bp, B44_DMARX_PTR, bp->rx_pending);
1420 bp->rx_prod = bp->rx_pending;
1422 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1425 val = br32(bp, B44_ENET_CTRL);
1426 bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
1428 netdev_reset_queue(bp->dev);
1431 static int b44_open(struct net_device *dev)
1433 struct b44 *bp = netdev_priv(dev);
1436 err = b44_alloc_consistent(bp, GFP_KERNEL);
1440 napi_enable(&bp->napi);
1443 b44_init_hw(bp, B44_FULL_RESET);
1447 err = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
1448 if (unlikely(err < 0)) {
1449 napi_disable(&bp->napi);
1450 b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
1452 b44_free_consistent(bp);
1456 init_timer(&bp->timer);
1457 bp->timer.expires = jiffies + HZ;
1458 bp->timer.data = (unsigned long) bp;
1459 bp->timer.function = b44_timer;
1460 add_timer(&bp->timer);
1462 b44_enable_ints(bp);
1463 netif_start_queue(dev);
1468 #ifdef CONFIG_NET_POLL_CONTROLLER
1470 * Polling receive - used by netconsole and other diagnostic tools
1471 * to allow network i/o with interrupts disabled.
1473 static void b44_poll_controller(struct net_device *dev)
1475 disable_irq(dev->irq);
1476 b44_interrupt(dev->irq, dev);
1477 enable_irq(dev->irq);
1481 static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset)
1484 u32 *pattern = (u32 *) pp;
1486 for (i = 0; i < bytes; i += sizeof(u32)) {
1487 bw32(bp, B44_FILT_ADDR, table_offset + i);
1488 bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]);
1492 static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset)
1495 int k, j, len = offset;
1496 int ethaddr_bytes = ETH_ALEN;
1498 memset(ppattern + offset, 0xff, magicsync);
1499 for (j = 0; j < magicsync; j++)
1500 set_bit(len++, (unsigned long *) pmask);
1502 for (j = 0; j < B44_MAX_PATTERNS; j++) {
1503 if ((B44_PATTERN_SIZE - len) >= ETH_ALEN)
1504 ethaddr_bytes = ETH_ALEN;
1506 ethaddr_bytes = B44_PATTERN_SIZE - len;
1507 if (ethaddr_bytes <=0)
1509 for (k = 0; k< ethaddr_bytes; k++) {
1510 ppattern[offset + magicsync +
1511 (j * ETH_ALEN) + k] = macaddr[k];
1512 set_bit(len++, (unsigned long *) pmask);
1518 /* Setup magic packet patterns in the b44 WOL
1519 * pattern matching filter.
1521 static void b44_setup_pseudo_magicp(struct b44 *bp)
1525 int plen0, plen1, plen2;
1527 u8 pwol_mask[B44_PMASK_SIZE];
1529 pwol_pattern = kzalloc(B44_PATTERN_SIZE, GFP_KERNEL);
1533 /* Ipv4 magic packet pattern - pattern 0.*/
1534 memset(pwol_mask, 0, B44_PMASK_SIZE);
1535 plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1536 B44_ETHIPV4UDP_HLEN);
1538 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE);
1539 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE);
1541 /* Raw ethernet II magic packet pattern - pattern 1 */
1542 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1543 memset(pwol_mask, 0, B44_PMASK_SIZE);
1544 plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1547 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1548 B44_PATTERN_BASE + B44_PATTERN_SIZE);
1549 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1550 B44_PMASK_BASE + B44_PMASK_SIZE);
1552 /* Ipv6 magic packet pattern - pattern 2 */
1553 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1554 memset(pwol_mask, 0, B44_PMASK_SIZE);
1555 plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1556 B44_ETHIPV6UDP_HLEN);
1558 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1559 B44_PATTERN_BASE + B44_PATTERN_SIZE + B44_PATTERN_SIZE);
1560 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1561 B44_PMASK_BASE + B44_PMASK_SIZE + B44_PMASK_SIZE);
1563 kfree(pwol_pattern);
1565 /* set these pattern's lengths: one less than each real length */
1566 val = plen0 | (plen1 << 8) | (plen2 << 16) | WKUP_LEN_ENABLE_THREE;
1567 bw32(bp, B44_WKUP_LEN, val);
1569 /* enable wakeup pattern matching */
1570 val = br32(bp, B44_DEVCTRL);
1571 bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE);
1575 #ifdef CONFIG_B44_PCI
1576 static void b44_setup_wol_pci(struct b44 *bp)
1580 if (bp->sdev->bus->bustype != SSB_BUSTYPE_SSB) {
1581 bw32(bp, SSB_TMSLOW, br32(bp, SSB_TMSLOW) | SSB_TMSLOW_PE);
1582 pci_read_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, &val);
1583 pci_write_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, val | SSB_PE);
1587 static inline void b44_setup_wol_pci(struct b44 *bp) { }
1588 #endif /* CONFIG_B44_PCI */
1590 static void b44_setup_wol(struct b44 *bp)
1594 bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
1596 if (bp->flags & B44_FLAG_B0_ANDLATER) {
1598 bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE);
1600 val = bp->dev->dev_addr[2] << 24 |
1601 bp->dev->dev_addr[3] << 16 |
1602 bp->dev->dev_addr[4] << 8 |
1603 bp->dev->dev_addr[5];
1604 bw32(bp, B44_ADDR_LO, val);
1606 val = bp->dev->dev_addr[0] << 8 |
1607 bp->dev->dev_addr[1];
1608 bw32(bp, B44_ADDR_HI, val);
1610 val = br32(bp, B44_DEVCTRL);
1611 bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE);
1614 b44_setup_pseudo_magicp(bp);
1616 b44_setup_wol_pci(bp);
1619 static int b44_close(struct net_device *dev)
1621 struct b44 *bp = netdev_priv(dev);
1623 netif_stop_queue(dev);
1625 napi_disable(&bp->napi);
1627 del_timer_sync(&bp->timer);
1629 spin_lock_irq(&bp->lock);
1633 netif_carrier_off(dev);
1635 spin_unlock_irq(&bp->lock);
1637 free_irq(dev->irq, dev);
1639 if (bp->flags & B44_FLAG_WOL_ENABLE) {
1640 b44_init_hw(bp, B44_PARTIAL_RESET);
1644 b44_free_consistent(bp);
1649 static struct rtnl_link_stats64 *b44_get_stats64(struct net_device *dev,
1650 struct rtnl_link_stats64 *nstat)
1652 struct b44 *bp = netdev_priv(dev);
1653 struct b44_hw_stats *hwstat = &bp->hw_stats;
1657 start = u64_stats_fetch_begin_bh(&hwstat->syncp);
1659 /* Convert HW stats into rtnl_link_stats64 stats. */
1660 nstat->rx_packets = hwstat->rx_pkts;
1661 nstat->tx_packets = hwstat->tx_pkts;
1662 nstat->rx_bytes = hwstat->rx_octets;
1663 nstat->tx_bytes = hwstat->tx_octets;
1664 nstat->tx_errors = (hwstat->tx_jabber_pkts +
1665 hwstat->tx_oversize_pkts +
1666 hwstat->tx_underruns +
1667 hwstat->tx_excessive_cols +
1668 hwstat->tx_late_cols);
1669 nstat->multicast = hwstat->tx_multicast_pkts;
1670 nstat->collisions = hwstat->tx_total_cols;
1672 nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1673 hwstat->rx_undersize);
1674 nstat->rx_over_errors = hwstat->rx_missed_pkts;
1675 nstat->rx_frame_errors = hwstat->rx_align_errs;
1676 nstat->rx_crc_errors = hwstat->rx_crc_errs;
1677 nstat->rx_errors = (hwstat->rx_jabber_pkts +
1678 hwstat->rx_oversize_pkts +
1679 hwstat->rx_missed_pkts +
1680 hwstat->rx_crc_align_errs +
1681 hwstat->rx_undersize +
1682 hwstat->rx_crc_errs +
1683 hwstat->rx_align_errs +
1684 hwstat->rx_symbol_errs);
1686 nstat->tx_aborted_errors = hwstat->tx_underruns;
1688 /* Carrier lost counter seems to be broken for some devices */
1689 nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1691 } while (u64_stats_fetch_retry_bh(&hwstat->syncp, start));
1696 static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1698 struct netdev_hw_addr *ha;
1701 num_ents = min_t(int, netdev_mc_count(dev), B44_MCAST_TABLE_SIZE);
1703 netdev_for_each_mc_addr(ha, dev) {
1706 __b44_cam_write(bp, ha->addr, i++ + 1);
1711 static void __b44_set_rx_mode(struct net_device *dev)
1713 struct b44 *bp = netdev_priv(dev);
1716 val = br32(bp, B44_RXCONFIG);
1717 val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
1718 if ((dev->flags & IFF_PROMISC) || (val & RXCONFIG_CAM_ABSENT)) {
1719 val |= RXCONFIG_PROMISC;
1720 bw32(bp, B44_RXCONFIG, val);
1722 unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
1725 __b44_set_mac_addr(bp);
1727 if ((dev->flags & IFF_ALLMULTI) ||
1728 (netdev_mc_count(dev) > B44_MCAST_TABLE_SIZE))
1729 val |= RXCONFIG_ALLMULTI;
1731 i = __b44_load_mcast(bp, dev);
1734 __b44_cam_write(bp, zero, i);
1736 bw32(bp, B44_RXCONFIG, val);
1737 val = br32(bp, B44_CAM_CTRL);
1738 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1742 static void b44_set_rx_mode(struct net_device *dev)
1744 struct b44 *bp = netdev_priv(dev);
1746 spin_lock_irq(&bp->lock);
1747 __b44_set_rx_mode(dev);
1748 spin_unlock_irq(&bp->lock);
1751 static u32 b44_get_msglevel(struct net_device *dev)
1753 struct b44 *bp = netdev_priv(dev);
1754 return bp->msg_enable;
1757 static void b44_set_msglevel(struct net_device *dev, u32 value)
1759 struct b44 *bp = netdev_priv(dev);
1760 bp->msg_enable = value;
1763 static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1765 struct b44 *bp = netdev_priv(dev);
1766 struct ssb_bus *bus = bp->sdev->bus;
1768 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
1769 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
1770 switch (bus->bustype) {
1771 case SSB_BUSTYPE_PCI:
1772 strlcpy(info->bus_info, pci_name(bus->host_pci), sizeof(info->bus_info));
1774 case SSB_BUSTYPE_SSB:
1775 strlcpy(info->bus_info, "SSB", sizeof(info->bus_info));
1777 case SSB_BUSTYPE_PCMCIA:
1778 case SSB_BUSTYPE_SDIO:
1779 WARN_ON(1); /* A device with this bus does not exist. */
1784 static int b44_nway_reset(struct net_device *dev)
1786 struct b44 *bp = netdev_priv(dev);
1790 spin_lock_irq(&bp->lock);
1791 b44_readphy(bp, MII_BMCR, &bmcr);
1792 b44_readphy(bp, MII_BMCR, &bmcr);
1794 if (bmcr & BMCR_ANENABLE) {
1795 b44_writephy(bp, MII_BMCR,
1796 bmcr | BMCR_ANRESTART);
1799 spin_unlock_irq(&bp->lock);
1804 static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1806 struct b44 *bp = netdev_priv(dev);
1808 cmd->supported = (SUPPORTED_Autoneg);
1809 cmd->supported |= (SUPPORTED_100baseT_Half |
1810 SUPPORTED_100baseT_Full |
1811 SUPPORTED_10baseT_Half |
1812 SUPPORTED_10baseT_Full |
1815 cmd->advertising = 0;
1816 if (bp->flags & B44_FLAG_ADV_10HALF)
1817 cmd->advertising |= ADVERTISED_10baseT_Half;
1818 if (bp->flags & B44_FLAG_ADV_10FULL)
1819 cmd->advertising |= ADVERTISED_10baseT_Full;
1820 if (bp->flags & B44_FLAG_ADV_100HALF)
1821 cmd->advertising |= ADVERTISED_100baseT_Half;
1822 if (bp->flags & B44_FLAG_ADV_100FULL)
1823 cmd->advertising |= ADVERTISED_100baseT_Full;
1824 cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1825 ethtool_cmd_speed_set(cmd, ((bp->flags & B44_FLAG_100_BASE_T) ?
1826 SPEED_100 : SPEED_10));
1827 cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1828 DUPLEX_FULL : DUPLEX_HALF;
1830 cmd->phy_address = bp->phy_addr;
1831 cmd->transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ?
1832 XCVR_INTERNAL : XCVR_EXTERNAL;
1833 cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
1834 AUTONEG_DISABLE : AUTONEG_ENABLE;
1835 if (cmd->autoneg == AUTONEG_ENABLE)
1836 cmd->advertising |= ADVERTISED_Autoneg;
1837 if (!netif_running(dev)){
1838 ethtool_cmd_speed_set(cmd, 0);
1846 static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1848 struct b44 *bp = netdev_priv(dev);
1849 u32 speed = ethtool_cmd_speed(cmd);
1851 /* We do not support gigabit. */
1852 if (cmd->autoneg == AUTONEG_ENABLE) {
1853 if (cmd->advertising &
1854 (ADVERTISED_1000baseT_Half |
1855 ADVERTISED_1000baseT_Full))
1857 } else if ((speed != SPEED_100 &&
1858 speed != SPEED_10) ||
1859 (cmd->duplex != DUPLEX_HALF &&
1860 cmd->duplex != DUPLEX_FULL)) {
1864 spin_lock_irq(&bp->lock);
1866 if (cmd->autoneg == AUTONEG_ENABLE) {
1867 bp->flags &= ~(B44_FLAG_FORCE_LINK |
1868 B44_FLAG_100_BASE_T |
1869 B44_FLAG_FULL_DUPLEX |
1870 B44_FLAG_ADV_10HALF |
1871 B44_FLAG_ADV_10FULL |
1872 B44_FLAG_ADV_100HALF |
1873 B44_FLAG_ADV_100FULL);
1874 if (cmd->advertising == 0) {
1875 bp->flags |= (B44_FLAG_ADV_10HALF |
1876 B44_FLAG_ADV_10FULL |
1877 B44_FLAG_ADV_100HALF |
1878 B44_FLAG_ADV_100FULL);
1880 if (cmd->advertising & ADVERTISED_10baseT_Half)
1881 bp->flags |= B44_FLAG_ADV_10HALF;
1882 if (cmd->advertising & ADVERTISED_10baseT_Full)
1883 bp->flags |= B44_FLAG_ADV_10FULL;
1884 if (cmd->advertising & ADVERTISED_100baseT_Half)
1885 bp->flags |= B44_FLAG_ADV_100HALF;
1886 if (cmd->advertising & ADVERTISED_100baseT_Full)
1887 bp->flags |= B44_FLAG_ADV_100FULL;
1890 bp->flags |= B44_FLAG_FORCE_LINK;
1891 bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
1892 if (speed == SPEED_100)
1893 bp->flags |= B44_FLAG_100_BASE_T;
1894 if (cmd->duplex == DUPLEX_FULL)
1895 bp->flags |= B44_FLAG_FULL_DUPLEX;
1898 if (netif_running(dev))
1901 spin_unlock_irq(&bp->lock);
1906 static void b44_get_ringparam(struct net_device *dev,
1907 struct ethtool_ringparam *ering)
1909 struct b44 *bp = netdev_priv(dev);
1911 ering->rx_max_pending = B44_RX_RING_SIZE - 1;
1912 ering->rx_pending = bp->rx_pending;
1914 /* XXX ethtool lacks a tx_max_pending, oops... */
1917 static int b44_set_ringparam(struct net_device *dev,
1918 struct ethtool_ringparam *ering)
1920 struct b44 *bp = netdev_priv(dev);
1922 if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
1923 (ering->rx_mini_pending != 0) ||
1924 (ering->rx_jumbo_pending != 0) ||
1925 (ering->tx_pending > B44_TX_RING_SIZE - 1))
1928 spin_lock_irq(&bp->lock);
1930 bp->rx_pending = ering->rx_pending;
1931 bp->tx_pending = ering->tx_pending;
1935 b44_init_hw(bp, B44_FULL_RESET);
1936 netif_wake_queue(bp->dev);
1937 spin_unlock_irq(&bp->lock);
1939 b44_enable_ints(bp);
1944 static void b44_get_pauseparam(struct net_device *dev,
1945 struct ethtool_pauseparam *epause)
1947 struct b44 *bp = netdev_priv(dev);
1950 (bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
1952 (bp->flags & B44_FLAG_RX_PAUSE) != 0;
1954 (bp->flags & B44_FLAG_TX_PAUSE) != 0;
1957 static int b44_set_pauseparam(struct net_device *dev,
1958 struct ethtool_pauseparam *epause)
1960 struct b44 *bp = netdev_priv(dev);
1962 spin_lock_irq(&bp->lock);
1963 if (epause->autoneg)
1964 bp->flags |= B44_FLAG_PAUSE_AUTO;
1966 bp->flags &= ~B44_FLAG_PAUSE_AUTO;
1967 if (epause->rx_pause)
1968 bp->flags |= B44_FLAG_RX_PAUSE;
1970 bp->flags &= ~B44_FLAG_RX_PAUSE;
1971 if (epause->tx_pause)
1972 bp->flags |= B44_FLAG_TX_PAUSE;
1974 bp->flags &= ~B44_FLAG_TX_PAUSE;
1975 if (bp->flags & B44_FLAG_PAUSE_AUTO) {
1978 b44_init_hw(bp, B44_FULL_RESET);
1980 __b44_set_flow_ctrl(bp, bp->flags);
1982 spin_unlock_irq(&bp->lock);
1984 b44_enable_ints(bp);
1989 static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1993 memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
1998 static int b44_get_sset_count(struct net_device *dev, int sset)
2002 return ARRAY_SIZE(b44_gstrings);
2008 static void b44_get_ethtool_stats(struct net_device *dev,
2009 struct ethtool_stats *stats, u64 *data)
2011 struct b44 *bp = netdev_priv(dev);
2012 struct b44_hw_stats *hwstat = &bp->hw_stats;
2013 u64 *data_src, *data_dst;
2017 spin_lock_irq(&bp->lock);
2018 b44_stats_update(bp);
2019 spin_unlock_irq(&bp->lock);
2022 data_src = &hwstat->tx_good_octets;
2024 start = u64_stats_fetch_begin_bh(&hwstat->syncp);
2026 for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
2027 *data_dst++ = *data_src++;
2029 } while (u64_stats_fetch_retry_bh(&hwstat->syncp, start));
2032 static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2034 struct b44 *bp = netdev_priv(dev);
2036 wol->supported = WAKE_MAGIC;
2037 if (bp->flags & B44_FLAG_WOL_ENABLE)
2038 wol->wolopts = WAKE_MAGIC;
2041 memset(&wol->sopass, 0, sizeof(wol->sopass));
2044 static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2046 struct b44 *bp = netdev_priv(dev);
2048 spin_lock_irq(&bp->lock);
2049 if (wol->wolopts & WAKE_MAGIC)
2050 bp->flags |= B44_FLAG_WOL_ENABLE;
2052 bp->flags &= ~B44_FLAG_WOL_ENABLE;
2053 spin_unlock_irq(&bp->lock);
2058 static const struct ethtool_ops b44_ethtool_ops = {
2059 .get_drvinfo = b44_get_drvinfo,
2060 .get_settings = b44_get_settings,
2061 .set_settings = b44_set_settings,
2062 .nway_reset = b44_nway_reset,
2063 .get_link = ethtool_op_get_link,
2064 .get_wol = b44_get_wol,
2065 .set_wol = b44_set_wol,
2066 .get_ringparam = b44_get_ringparam,
2067 .set_ringparam = b44_set_ringparam,
2068 .get_pauseparam = b44_get_pauseparam,
2069 .set_pauseparam = b44_set_pauseparam,
2070 .get_msglevel = b44_get_msglevel,
2071 .set_msglevel = b44_set_msglevel,
2072 .get_strings = b44_get_strings,
2073 .get_sset_count = b44_get_sset_count,
2074 .get_ethtool_stats = b44_get_ethtool_stats,
2077 static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2079 struct mii_ioctl_data *data = if_mii(ifr);
2080 struct b44 *bp = netdev_priv(dev);
2083 if (!netif_running(dev))
2086 spin_lock_irq(&bp->lock);
2087 err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL);
2088 spin_unlock_irq(&bp->lock);
2093 static int b44_get_invariants(struct b44 *bp)
2095 struct ssb_device *sdev = bp->sdev;
2099 bp->dma_offset = ssb_dma_translation(sdev);
2101 if (sdev->bus->bustype == SSB_BUSTYPE_SSB &&
2103 addr = sdev->bus->sprom.et1mac;
2104 bp->phy_addr = sdev->bus->sprom.et1phyaddr;
2106 addr = sdev->bus->sprom.et0mac;
2107 bp->phy_addr = sdev->bus->sprom.et0phyaddr;
2109 /* Some ROMs have buggy PHY addresses with the high
2110 * bits set (sign extension?). Truncate them to a
2111 * valid PHY address. */
2112 bp->phy_addr &= 0x1F;
2114 memcpy(bp->dev->dev_addr, addr, ETH_ALEN);
2116 if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
2117 pr_err("Invalid MAC address found in EEPROM\n");
2121 bp->imask = IMASK_DEF;
2123 /* XXX - really required?
2124 bp->flags |= B44_FLAG_BUGGY_TXPTR;
2127 if (bp->sdev->id.revision >= 7)
2128 bp->flags |= B44_FLAG_B0_ANDLATER;
2133 static const struct net_device_ops b44_netdev_ops = {
2134 .ndo_open = b44_open,
2135 .ndo_stop = b44_close,
2136 .ndo_start_xmit = b44_start_xmit,
2137 .ndo_get_stats64 = b44_get_stats64,
2138 .ndo_set_rx_mode = b44_set_rx_mode,
2139 .ndo_set_mac_address = b44_set_mac_addr,
2140 .ndo_validate_addr = eth_validate_addr,
2141 .ndo_do_ioctl = b44_ioctl,
2142 .ndo_tx_timeout = b44_tx_timeout,
2143 .ndo_change_mtu = b44_change_mtu,
2144 #ifdef CONFIG_NET_POLL_CONTROLLER
2145 .ndo_poll_controller = b44_poll_controller,
2149 static int b44_init_one(struct ssb_device *sdev,
2150 const struct ssb_device_id *ent)
2152 struct net_device *dev;
2158 pr_info_once("%s version %s\n", DRV_DESCRIPTION, DRV_MODULE_VERSION);
2160 dev = alloc_etherdev(sizeof(*bp));
2166 SET_NETDEV_DEV(dev, sdev->dev);
2168 /* No interesting netdevice features in this card... */
2171 bp = netdev_priv(dev);
2174 bp->force_copybreak = 0;
2176 bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
2178 spin_lock_init(&bp->lock);
2180 bp->rx_pending = B44_DEF_RX_RING_PENDING;
2181 bp->tx_pending = B44_DEF_TX_RING_PENDING;
2183 dev->netdev_ops = &b44_netdev_ops;
2184 netif_napi_add(dev, &bp->napi, b44_poll, 64);
2185 dev->watchdog_timeo = B44_TX_TIMEOUT;
2186 dev->irq = sdev->irq;
2187 SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
2189 err = ssb_bus_powerup(sdev->bus, 0);
2192 "Failed to powerup the bus\n");
2193 goto err_out_free_dev;
2196 if (dma_set_mask_and_coherent(sdev->dma_dev, DMA_BIT_MASK(30))) {
2198 "Required 30BIT DMA mask unsupported by the system\n");
2199 goto err_out_powerdown;
2202 err = b44_get_invariants(bp);
2205 "Problem fetching invariants of chip, aborting\n");
2206 goto err_out_powerdown;
2209 bp->mii_if.dev = dev;
2210 bp->mii_if.mdio_read = b44_mii_read;
2211 bp->mii_if.mdio_write = b44_mii_write;
2212 bp->mii_if.phy_id = bp->phy_addr;
2213 bp->mii_if.phy_id_mask = 0x1f;
2214 bp->mii_if.reg_num_mask = 0x1f;
2216 /* By default, advertise all speed/duplex settings. */
2217 bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
2218 B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
2220 /* By default, auto-negotiate PAUSE. */
2221 bp->flags |= B44_FLAG_PAUSE_AUTO;
2223 err = register_netdev(dev);
2225 dev_err(sdev->dev, "Cannot register net device, aborting\n");
2226 goto err_out_powerdown;
2229 netif_carrier_off(dev);
2231 ssb_set_drvdata(sdev, dev);
2233 /* Chip reset provides power to the b44 MAC & PCI cores, which
2234 * is necessary for MAC register access.
2236 b44_chip_reset(bp, B44_CHIP_RESET_FULL);
2238 /* do a phy reset to test if there is an active phy */
2239 if (b44_phy_reset(bp) < 0)
2240 bp->phy_addr = B44_PHY_ADDR_NO_PHY;
2242 netdev_info(dev, "%s %pM\n", DRV_DESCRIPTION, dev->dev_addr);
2247 ssb_bus_may_powerdown(sdev->bus);
2256 static void b44_remove_one(struct ssb_device *sdev)
2258 struct net_device *dev = ssb_get_drvdata(sdev);
2260 unregister_netdev(dev);
2261 ssb_device_disable(sdev, 0);
2262 ssb_bus_may_powerdown(sdev->bus);
2264 ssb_pcihost_set_power_state(sdev, PCI_D3hot);
2265 ssb_set_drvdata(sdev, NULL);
2268 static int b44_suspend(struct ssb_device *sdev, pm_message_t state)
2270 struct net_device *dev = ssb_get_drvdata(sdev);
2271 struct b44 *bp = netdev_priv(dev);
2273 if (!netif_running(dev))
2276 del_timer_sync(&bp->timer);
2278 spin_lock_irq(&bp->lock);
2281 netif_carrier_off(bp->dev);
2282 netif_device_detach(bp->dev);
2285 spin_unlock_irq(&bp->lock);
2287 free_irq(dev->irq, dev);
2288 if (bp->flags & B44_FLAG_WOL_ENABLE) {
2289 b44_init_hw(bp, B44_PARTIAL_RESET);
2293 ssb_pcihost_set_power_state(sdev, PCI_D3hot);
2297 static int b44_resume(struct ssb_device *sdev)
2299 struct net_device *dev = ssb_get_drvdata(sdev);
2300 struct b44 *bp = netdev_priv(dev);
2303 rc = ssb_bus_powerup(sdev->bus, 0);
2306 "Failed to powerup the bus\n");
2310 if (!netif_running(dev))
2313 spin_lock_irq(&bp->lock);
2315 b44_init_hw(bp, B44_FULL_RESET);
2316 spin_unlock_irq(&bp->lock);
2319 * As a shared interrupt, the handler can be called immediately. To be
2320 * able to check the interrupt status the hardware must already be
2321 * powered back on (b44_init_hw).
2323 rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
2325 netdev_err(dev, "request_irq failed\n");
2326 spin_lock_irq(&bp->lock);
2329 spin_unlock_irq(&bp->lock);
2333 netif_device_attach(bp->dev);
2335 b44_enable_ints(bp);
2336 netif_wake_queue(dev);
2338 mod_timer(&bp->timer, jiffies + 1);
2343 static struct ssb_driver b44_ssb_driver = {
2344 .name = DRV_MODULE_NAME,
2345 .id_table = b44_ssb_tbl,
2346 .probe = b44_init_one,
2347 .remove = b44_remove_one,
2348 .suspend = b44_suspend,
2349 .resume = b44_resume,
2352 static inline int __init b44_pci_init(void)
2355 #ifdef CONFIG_B44_PCI
2356 err = ssb_pcihost_register(&b44_pci_driver);
2361 static inline void b44_pci_exit(void)
2363 #ifdef CONFIG_B44_PCI
2364 ssb_pcihost_unregister(&b44_pci_driver);
2368 static int __init b44_init(void)
2370 unsigned int dma_desc_align_size = dma_get_cache_alignment();
2373 /* Setup paramaters for syncing RX/TX DMA descriptors */
2374 dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
2376 err = b44_pci_init();
2379 err = ssb_driver_register(&b44_ssb_driver);
2385 static void __exit b44_cleanup(void)
2387 ssb_driver_unregister(&b44_ssb_driver);
2391 module_init(b44_init);
2392 module_exit(b44_cleanup);