2 * Network device driver for the BMAC ethernet controller on
3 * Apple Powermacs. Assumes it's under a DBDMA controller.
5 * Copyright (C) 1998 Randy Gobbel.
7 * May 1999, Al Viro: proper release of /proc/net/bmac entry, switched to
8 * dynamic procfs inode.
10 #include <linux/interrupt.h>
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/netdevice.h>
14 #include <linux/etherdevice.h>
15 #include <linux/delay.h>
16 #include <linux/string.h>
17 #include <linux/timer.h>
18 #include <linux/proc_fs.h>
19 #include <linux/init.h>
20 #include <linux/spinlock.h>
21 #include <linux/crc32.h>
22 #include <linux/bitrev.h>
23 #include <linux/ethtool.h>
24 #include <linux/slab.h>
26 #include <asm/dbdma.h>
29 #include <asm/pgtable.h>
30 #include <asm/machdep.h>
31 #include <asm/pmac_feature.h>
32 #include <asm/macio.h>
37 #define trunc_page(x) ((void *)(((unsigned long)(x)) & ~((unsigned long)(PAGE_SIZE - 1))))
38 #define round_page(x) trunc_page(((unsigned long)(x)) + ((unsigned long)(PAGE_SIZE - 1)))
41 * CRC polynomial - used in working out multicast filter bits.
43 #define ENET_CRCPOLY 0x04c11db7
45 /* switch to use multicast code lifted from sunhme driver */
46 #define SUNHME_MULTICAST
50 #define MAX_TX_ACTIVE 1
52 #define ETHERMINPACKET 64
54 #define RX_BUFLEN (ETHERMTU + 14 + ETHERCRC + 2)
55 #define TX_TIMEOUT HZ /* 1 second */
57 /* Bits in transmit DMA status */
58 #define TX_DMA_ERR 0x80
63 /* volatile struct bmac *bmac; */
64 struct sk_buff_head *queue;
65 volatile struct dbdma_regs __iomem *tx_dma;
67 volatile struct dbdma_regs __iomem *rx_dma;
69 volatile struct dbdma_cmd *tx_cmds; /* xmit dma command list */
70 volatile struct dbdma_cmd *rx_cmds; /* recv dma command list */
71 struct macio_dev *mdev;
73 struct sk_buff *rx_bufs[N_RX_RING];
76 struct sk_buff *tx_bufs[N_TX_RING];
79 unsigned char tx_fullup;
80 struct timer_list tx_timeout;
84 unsigned short hash_use_count[64];
85 unsigned short hash_table_mask[4];
89 #if 0 /* Move that to ethtool */
91 typedef struct bmac_reg_entry {
93 unsigned short reg_offset;
96 #define N_REG_ENTRIES 31
98 static bmac_reg_entry_t reg_entries[N_REG_ENTRIES] = {
100 {"MEMDATAHI", MEMDATAHI},
101 {"MEMDATALO", MEMDATALO},
134 static unsigned char *bmac_emergency_rxbuf;
137 * Number of bytes of private data per BMAC: allow enough for
138 * the rx and tx dma commands plus a branch dma command each,
139 * and another 16 bytes to allow us to align the dma command
140 * buffers on a 16 byte boundary.
142 #define PRIV_BYTES (sizeof(struct bmac_data) \
143 + (N_RX_RING + N_TX_RING + 4) * sizeof(struct dbdma_cmd) \
144 + sizeof(struct sk_buff_head))
146 static int bmac_open(struct net_device *dev);
147 static int bmac_close(struct net_device *dev);
148 static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev);
149 static void bmac_set_multicast(struct net_device *dev);
150 static void bmac_reset_and_enable(struct net_device *dev);
151 static void bmac_start_chip(struct net_device *dev);
152 static void bmac_init_chip(struct net_device *dev);
153 static void bmac_init_registers(struct net_device *dev);
154 static void bmac_enable_and_reset_chip(struct net_device *dev);
155 static int bmac_set_address(struct net_device *dev, void *addr);
156 static irqreturn_t bmac_misc_intr(int irq, void *dev_id);
157 static irqreturn_t bmac_txdma_intr(int irq, void *dev_id);
158 static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id);
159 static void bmac_set_timeout(struct net_device *dev);
160 static void bmac_tx_timeout(unsigned long data);
161 static int bmac_output(struct sk_buff *skb, struct net_device *dev);
162 static void bmac_start(struct net_device *dev);
164 #define DBDMA_SET(x) ( ((x) | (x) << 16) )
165 #define DBDMA_CLEAR(x) ( (x) << 16)
168 dbdma_st32(volatile __u32 __iomem *a, unsigned long x)
170 __asm__ volatile( "stwbrx %0,0,%1" : : "r" (x), "r" (a) : "memory");
173 static inline unsigned long
174 dbdma_ld32(volatile __u32 __iomem *a)
177 __asm__ volatile ("lwbrx %0,0,%1" : "=r" (swap) : "r" (a));
182 dbdma_continue(volatile struct dbdma_regs __iomem *dmap)
184 dbdma_st32(&dmap->control,
185 DBDMA_SET(RUN|WAKE) | DBDMA_CLEAR(PAUSE|DEAD));
190 dbdma_reset(volatile struct dbdma_regs __iomem *dmap)
192 dbdma_st32(&dmap->control,
193 DBDMA_CLEAR(ACTIVE|DEAD|WAKE|FLUSH|PAUSE|RUN));
195 while (dbdma_ld32(&dmap->status) & RUN)
200 dbdma_setcmd(volatile struct dbdma_cmd *cp,
201 unsigned short cmd, unsigned count, unsigned long addr,
202 unsigned long cmd_dep)
204 out_le16(&cp->command, cmd);
205 out_le16(&cp->req_count, count);
206 out_le32(&cp->phy_addr, addr);
207 out_le32(&cp->cmd_dep, cmd_dep);
208 out_le16(&cp->xfer_status, 0);
209 out_le16(&cp->res_count, 0);
213 void bmwrite(struct net_device *dev, unsigned long reg_offset, unsigned data )
215 out_le16((void __iomem *)dev->base_addr + reg_offset, data);
220 unsigned short bmread(struct net_device *dev, unsigned long reg_offset )
222 return in_le16((void __iomem *)dev->base_addr + reg_offset);
226 bmac_enable_and_reset_chip(struct net_device *dev)
228 struct bmac_data *bp = netdev_priv(dev);
229 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
230 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
237 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 1);
240 #define MIFDELAY udelay(10)
243 bmac_mif_readbits(struct net_device *dev, int nb)
245 unsigned int val = 0;
248 bmwrite(dev, MIFCSR, 0);
250 if (bmread(dev, MIFCSR) & 8)
252 bmwrite(dev, MIFCSR, 1);
255 bmwrite(dev, MIFCSR, 0);
257 bmwrite(dev, MIFCSR, 1);
263 bmac_mif_writebits(struct net_device *dev, unsigned int val, int nb)
268 b = (val & (1 << nb))? 6: 4;
269 bmwrite(dev, MIFCSR, b);
271 bmwrite(dev, MIFCSR, b|1);
277 bmac_mif_read(struct net_device *dev, unsigned int addr)
281 bmwrite(dev, MIFCSR, 4);
283 bmac_mif_writebits(dev, ~0U, 32);
284 bmac_mif_writebits(dev, 6, 4);
285 bmac_mif_writebits(dev, addr, 10);
286 bmwrite(dev, MIFCSR, 2);
288 bmwrite(dev, MIFCSR, 1);
290 val = bmac_mif_readbits(dev, 17);
291 bmwrite(dev, MIFCSR, 4);
297 bmac_mif_write(struct net_device *dev, unsigned int addr, unsigned int val)
299 bmwrite(dev, MIFCSR, 4);
301 bmac_mif_writebits(dev, ~0U, 32);
302 bmac_mif_writebits(dev, 5, 4);
303 bmac_mif_writebits(dev, addr, 10);
304 bmac_mif_writebits(dev, 2, 2);
305 bmac_mif_writebits(dev, val, 16);
306 bmac_mif_writebits(dev, 3, 2);
310 bmac_init_registers(struct net_device *dev)
312 struct bmac_data *bp = netdev_priv(dev);
313 volatile unsigned short regValue;
314 unsigned short *pWord16;
317 /* XXDEBUG(("bmac: enter init_registers\n")); */
319 bmwrite(dev, RXRST, RxResetValue);
320 bmwrite(dev, TXRST, TxResetBit);
326 regValue = bmread(dev, TXRST); /* wait for reset to clear..acknowledge */
327 } while ((regValue & TxResetBit) && i > 0);
329 if (!bp->is_bmac_plus) {
330 regValue = bmread(dev, XCVRIF);
331 regValue |= ClkBit | SerialMode | COLActiveLow;
332 bmwrite(dev, XCVRIF, regValue);
336 bmwrite(dev, RSEED, (unsigned short)0x1968);
338 regValue = bmread(dev, XIFC);
339 regValue |= TxOutputEnable;
340 bmwrite(dev, XIFC, regValue);
344 /* set collision counters to 0 */
345 bmwrite(dev, NCCNT, 0);
346 bmwrite(dev, NTCNT, 0);
347 bmwrite(dev, EXCNT, 0);
348 bmwrite(dev, LTCNT, 0);
350 /* set rx counters to 0 */
351 bmwrite(dev, FRCNT, 0);
352 bmwrite(dev, LECNT, 0);
353 bmwrite(dev, AECNT, 0);
354 bmwrite(dev, FECNT, 0);
355 bmwrite(dev, RXCV, 0);
357 /* set tx fifo information */
358 bmwrite(dev, TXTH, 4); /* 4 octets before tx starts */
360 bmwrite(dev, TXFIFOCSR, 0); /* first disable txFIFO */
361 bmwrite(dev, TXFIFOCSR, TxFIFOEnable );
363 /* set rx fifo information */
364 bmwrite(dev, RXFIFOCSR, 0); /* first disable rxFIFO */
365 bmwrite(dev, RXFIFOCSR, RxFIFOEnable );
367 //bmwrite(dev, TXCFG, TxMACEnable); /* TxNeverGiveUp maybe later */
368 bmread(dev, STATUS); /* read it just to clear it */
370 /* zero out the chip Hash Filter registers */
371 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0;
372 bmwrite(dev, BHASH3, bp->hash_table_mask[0]); /* bits 15 - 0 */
373 bmwrite(dev, BHASH2, bp->hash_table_mask[1]); /* bits 31 - 16 */
374 bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */
375 bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */
377 pWord16 = (unsigned short *)dev->dev_addr;
378 bmwrite(dev, MADD0, *pWord16++);
379 bmwrite(dev, MADD1, *pWord16++);
380 bmwrite(dev, MADD2, *pWord16);
382 bmwrite(dev, RXCFG, RxCRCNoStrip | RxHashFilterEnable | RxRejectOwnPackets);
384 bmwrite(dev, INTDISABLE, EnableNormal);
389 bmac_disable_interrupts(struct net_device *dev)
391 bmwrite(dev, INTDISABLE, DisableAll);
395 bmac_enable_interrupts(struct net_device *dev)
397 bmwrite(dev, INTDISABLE, EnableNormal);
403 bmac_start_chip(struct net_device *dev)
405 struct bmac_data *bp = netdev_priv(dev);
406 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
407 unsigned short oldConfig;
409 /* enable rx dma channel */
412 oldConfig = bmread(dev, TXCFG);
413 bmwrite(dev, TXCFG, oldConfig | TxMACEnable );
415 /* turn on rx plus any other bits already on (promiscuous possibly) */
416 oldConfig = bmread(dev, RXCFG);
417 bmwrite(dev, RXCFG, oldConfig | RxMACEnable );
422 bmac_init_phy(struct net_device *dev)
425 struct bmac_data *bp = netdev_priv(dev);
427 printk(KERN_DEBUG "phy registers:");
428 for (addr = 0; addr < 32; ++addr) {
431 printk(KERN_CONT " %.4x", bmac_mif_read(dev, addr));
433 printk(KERN_CONT "\n");
435 if (bp->is_bmac_plus) {
436 unsigned int capable, ctrl;
438 ctrl = bmac_mif_read(dev, 0);
439 capable = ((bmac_mif_read(dev, 1) & 0xf800) >> 6) | 1;
440 if (bmac_mif_read(dev, 4) != capable ||
441 (ctrl & 0x1000) == 0) {
442 bmac_mif_write(dev, 4, capable);
443 bmac_mif_write(dev, 0, 0x1200);
445 bmac_mif_write(dev, 0, 0x1000);
449 static void bmac_init_chip(struct net_device *dev)
452 bmac_init_registers(dev);
456 static int bmac_suspend(struct macio_dev *mdev, pm_message_t state)
458 struct net_device* dev = macio_get_drvdata(mdev);
459 struct bmac_data *bp = netdev_priv(dev);
461 unsigned short config;
464 netif_device_detach(dev);
465 /* prolly should wait for dma to finish & turn off the chip */
466 spin_lock_irqsave(&bp->lock, flags);
467 if (bp->timeout_active) {
468 del_timer(&bp->tx_timeout);
469 bp->timeout_active = 0;
471 disable_irq(dev->irq);
472 disable_irq(bp->tx_dma_intr);
473 disable_irq(bp->rx_dma_intr);
475 spin_unlock_irqrestore(&bp->lock, flags);
477 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
478 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
480 config = bmread(dev, RXCFG);
481 bmwrite(dev, RXCFG, (config & ~RxMACEnable));
482 config = bmread(dev, TXCFG);
483 bmwrite(dev, TXCFG, (config & ~TxMACEnable));
484 bmwrite(dev, INTDISABLE, DisableAll); /* disable all intrs */
485 /* disable rx and tx dma */
486 st_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
487 st_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
488 /* free some skb's */
489 for (i=0; i<N_RX_RING; i++) {
490 if (bp->rx_bufs[i] != NULL) {
491 dev_kfree_skb(bp->rx_bufs[i]);
492 bp->rx_bufs[i] = NULL;
495 for (i = 0; i<N_TX_RING; i++) {
496 if (bp->tx_bufs[i] != NULL) {
497 dev_kfree_skb(bp->tx_bufs[i]);
498 bp->tx_bufs[i] = NULL;
502 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
506 static int bmac_resume(struct macio_dev *mdev)
508 struct net_device* dev = macio_get_drvdata(mdev);
509 struct bmac_data *bp = netdev_priv(dev);
511 /* see if this is enough */
513 bmac_reset_and_enable(dev);
515 enable_irq(dev->irq);
516 enable_irq(bp->tx_dma_intr);
517 enable_irq(bp->rx_dma_intr);
518 netif_device_attach(dev);
522 #endif /* CONFIG_PM */
524 static int bmac_set_address(struct net_device *dev, void *addr)
526 struct bmac_data *bp = netdev_priv(dev);
527 unsigned char *p = addr;
528 unsigned short *pWord16;
532 XXDEBUG(("bmac: enter set_address\n"));
533 spin_lock_irqsave(&bp->lock, flags);
535 for (i = 0; i < 6; ++i) {
536 dev->dev_addr[i] = p[i];
538 /* load up the hardware address */
539 pWord16 = (unsigned short *)dev->dev_addr;
540 bmwrite(dev, MADD0, *pWord16++);
541 bmwrite(dev, MADD1, *pWord16++);
542 bmwrite(dev, MADD2, *pWord16);
544 spin_unlock_irqrestore(&bp->lock, flags);
545 XXDEBUG(("bmac: exit set_address\n"));
549 static inline void bmac_set_timeout(struct net_device *dev)
551 struct bmac_data *bp = netdev_priv(dev);
554 spin_lock_irqsave(&bp->lock, flags);
555 if (bp->timeout_active)
556 del_timer(&bp->tx_timeout);
557 bp->tx_timeout.expires = jiffies + TX_TIMEOUT;
558 bp->tx_timeout.function = bmac_tx_timeout;
559 bp->tx_timeout.data = (unsigned long) dev;
560 add_timer(&bp->tx_timeout);
561 bp->timeout_active = 1;
562 spin_unlock_irqrestore(&bp->lock, flags);
566 bmac_construct_xmt(struct sk_buff *skb, volatile struct dbdma_cmd *cp)
574 baddr = virt_to_bus(vaddr);
576 dbdma_setcmd(cp, (OUTPUT_LAST | INTR_ALWAYS | WAIT_IFCLR), len, baddr, 0);
580 bmac_construct_rxbuff(struct sk_buff *skb, volatile struct dbdma_cmd *cp)
582 unsigned char *addr = skb? skb->data: bmac_emergency_rxbuf;
584 dbdma_setcmd(cp, (INPUT_LAST | INTR_ALWAYS), RX_BUFLEN,
585 virt_to_bus(addr), 0);
589 bmac_init_tx_ring(struct bmac_data *bp)
591 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
593 memset((char *)bp->tx_cmds, 0, (N_TX_RING+1) * sizeof(struct dbdma_cmd));
599 /* put a branch at the end of the tx command list */
600 dbdma_setcmd(&bp->tx_cmds[N_TX_RING],
601 (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->tx_cmds));
605 out_le32(&td->wait_sel, 0x00200020);
606 out_le32(&td->cmdptr, virt_to_bus(bp->tx_cmds));
610 bmac_init_rx_ring(struct net_device *dev)
612 struct bmac_data *bp = netdev_priv(dev);
613 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
617 /* initialize list of sk_buffs for receiving and set up recv dma */
618 memset((char *)bp->rx_cmds, 0,
619 (N_RX_RING + 1) * sizeof(struct dbdma_cmd));
620 for (i = 0; i < N_RX_RING; i++) {
621 if ((skb = bp->rx_bufs[i]) == NULL) {
622 bp->rx_bufs[i] = skb = netdev_alloc_skb(dev, RX_BUFLEN + 2);
626 bmac_construct_rxbuff(skb, &bp->rx_cmds[i]);
632 /* Put a branch back to the beginning of the receive command list */
633 dbdma_setcmd(&bp->rx_cmds[N_RX_RING],
634 (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->rx_cmds));
638 out_le32(&rd->cmdptr, virt_to_bus(bp->rx_cmds));
644 static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev)
646 struct bmac_data *bp = netdev_priv(dev);
647 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
650 /* see if there's a free slot in the tx ring */
651 /* XXDEBUG(("bmac_xmit_start: empty=%d fill=%d\n", */
652 /* bp->tx_empty, bp->tx_fill)); */
656 if (i == bp->tx_empty) {
657 netif_stop_queue(dev);
659 XXDEBUG(("bmac_transmit_packet: tx ring full\n"));
660 return -1; /* can't take it at the moment */
663 dbdma_setcmd(&bp->tx_cmds[i], DBDMA_STOP, 0, 0, 0);
665 bmac_construct_xmt(skb, &bp->tx_cmds[bp->tx_fill]);
667 bp->tx_bufs[bp->tx_fill] = skb;
670 dev->stats.tx_bytes += skb->len;
677 static int rxintcount;
679 static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id)
681 struct net_device *dev = (struct net_device *) dev_id;
682 struct bmac_data *bp = netdev_priv(dev);
683 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
684 volatile struct dbdma_cmd *cp;
687 unsigned int residual;
691 spin_lock_irqsave(&bp->lock, flags);
693 if (++rxintcount < 10) {
694 XXDEBUG(("bmac_rxdma_intr\n"));
701 cp = &bp->rx_cmds[i];
702 stat = ld_le16(&cp->xfer_status);
703 residual = ld_le16(&cp->res_count);
704 if ((stat & ACTIVE) == 0)
706 nb = RX_BUFLEN - residual - 2;
707 if (nb < (ETHERMINPACKET - ETHERCRC)) {
709 dev->stats.rx_length_errors++;
710 dev->stats.rx_errors++;
712 skb = bp->rx_bufs[i];
713 bp->rx_bufs[i] = NULL;
718 skb->protocol = eth_type_trans(skb, dev);
720 ++dev->stats.rx_packets;
721 dev->stats.rx_bytes += nb;
723 ++dev->stats.rx_dropped;
725 if ((skb = bp->rx_bufs[i]) == NULL) {
726 bp->rx_bufs[i] = skb = netdev_alloc_skb(dev, RX_BUFLEN + 2);
728 skb_reserve(bp->rx_bufs[i], 2);
730 bmac_construct_rxbuff(skb, &bp->rx_cmds[i]);
731 st_le16(&cp->res_count, 0);
732 st_le16(&cp->xfer_status, 0);
734 if (++i >= N_RX_RING) i = 0;
743 spin_unlock_irqrestore(&bp->lock, flags);
745 if (rxintcount < 10) {
746 XXDEBUG(("bmac_rxdma_intr done\n"));
751 static int txintcount;
753 static irqreturn_t bmac_txdma_intr(int irq, void *dev_id)
755 struct net_device *dev = (struct net_device *) dev_id;
756 struct bmac_data *bp = netdev_priv(dev);
757 volatile struct dbdma_cmd *cp;
761 spin_lock_irqsave(&bp->lock, flags);
763 if (txintcount++ < 10) {
764 XXDEBUG(("bmac_txdma_intr\n"));
767 /* del_timer(&bp->tx_timeout); */
768 /* bp->timeout_active = 0; */
771 cp = &bp->tx_cmds[bp->tx_empty];
772 stat = ld_le16(&cp->xfer_status);
773 if (txintcount < 10) {
774 XXDEBUG(("bmac_txdma_xfer_stat=%#0x\n", stat));
776 if (!(stat & ACTIVE)) {
778 * status field might not have been filled by DBDMA
780 if (cp == bus_to_virt(in_le32(&bp->tx_dma->cmdptr)))
784 if (bp->tx_bufs[bp->tx_empty]) {
785 ++dev->stats.tx_packets;
786 dev_kfree_skb_irq(bp->tx_bufs[bp->tx_empty]);
788 bp->tx_bufs[bp->tx_empty] = NULL;
790 netif_wake_queue(dev);
791 if (++bp->tx_empty >= N_TX_RING)
793 if (bp->tx_empty == bp->tx_fill)
797 spin_unlock_irqrestore(&bp->lock, flags);
799 if (txintcount < 10) {
800 XXDEBUG(("bmac_txdma_intr done->bmac_start\n"));
807 #ifndef SUNHME_MULTICAST
808 /* Real fast bit-reversal algorithm, 6-bit values */
809 static int reverse6[64] = {
810 0x0,0x20,0x10,0x30,0x8,0x28,0x18,0x38,
811 0x4,0x24,0x14,0x34,0xc,0x2c,0x1c,0x3c,
812 0x2,0x22,0x12,0x32,0xa,0x2a,0x1a,0x3a,
813 0x6,0x26,0x16,0x36,0xe,0x2e,0x1e,0x3e,
814 0x1,0x21,0x11,0x31,0x9,0x29,0x19,0x39,
815 0x5,0x25,0x15,0x35,0xd,0x2d,0x1d,0x3d,
816 0x3,0x23,0x13,0x33,0xb,0x2b,0x1b,0x3b,
817 0x7,0x27,0x17,0x37,0xf,0x2f,0x1f,0x3f
821 crc416(unsigned int curval, unsigned short nxtval)
823 register unsigned int counter, cur = curval, next = nxtval;
824 register int high_crc_set, low_data_set;
827 next = ((next & 0x00FF) << 8) | (next >> 8);
829 /* Compute bit-by-bit */
830 for (counter = 0; counter < 16; ++counter) {
831 /* is high CRC bit set? */
832 if ((cur & 0x80000000) == 0) high_crc_set = 0;
833 else high_crc_set = 1;
837 if ((next & 0x0001) == 0) low_data_set = 0;
838 else low_data_set = 1;
843 if (high_crc_set ^ low_data_set) cur = cur ^ ENET_CRCPOLY;
849 bmac_crc(unsigned short *address)
853 XXDEBUG(("bmac_crc: addr=%#04x, %#04x, %#04x\n", *address, address[1], address[2]));
854 newcrc = crc416(0xffffffff, *address); /* address bits 47 - 32 */
855 newcrc = crc416(newcrc, address[1]); /* address bits 31 - 16 */
856 newcrc = crc416(newcrc, address[2]); /* address bits 15 - 0 */
862 * Add requested mcast addr to BMac's hash table filter.
867 bmac_addhash(struct bmac_data *bp, unsigned char *addr)
872 if (!(*addr)) return;
873 crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */
874 crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */
875 if (bp->hash_use_count[crc]++) return; /* This bit is already set */
877 mask = (unsigned char)1 << mask;
878 bp->hash_use_count[crc/16] |= mask;
882 bmac_removehash(struct bmac_data *bp, unsigned char *addr)
887 /* Now, delete the address from the filter copy, as indicated */
888 crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */
889 crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */
890 if (bp->hash_use_count[crc] == 0) return; /* That bit wasn't in use! */
891 if (--bp->hash_use_count[crc]) return; /* That bit is still in use */
893 mask = ((unsigned char)1 << mask) ^ 0xffff; /* To turn off bit */
894 bp->hash_table_mask[crc/16] &= mask;
898 * Sync the adapter with the software copy of the multicast mask
899 * (logical address filter).
903 bmac_rx_off(struct net_device *dev)
905 unsigned short rx_cfg;
907 rx_cfg = bmread(dev, RXCFG);
908 rx_cfg &= ~RxMACEnable;
909 bmwrite(dev, RXCFG, rx_cfg);
911 rx_cfg = bmread(dev, RXCFG);
912 } while (rx_cfg & RxMACEnable);
916 bmac_rx_on(struct net_device *dev, int hash_enable, int promisc_enable)
918 unsigned short rx_cfg;
920 rx_cfg = bmread(dev, RXCFG);
921 rx_cfg |= RxMACEnable;
922 if (hash_enable) rx_cfg |= RxHashFilterEnable;
923 else rx_cfg &= ~RxHashFilterEnable;
924 if (promisc_enable) rx_cfg |= RxPromiscEnable;
925 else rx_cfg &= ~RxPromiscEnable;
926 bmwrite(dev, RXRST, RxResetValue);
927 bmwrite(dev, RXFIFOCSR, 0); /* first disable rxFIFO */
928 bmwrite(dev, RXFIFOCSR, RxFIFOEnable );
929 bmwrite(dev, RXCFG, rx_cfg );
934 bmac_update_hash_table_mask(struct net_device *dev, struct bmac_data *bp)
936 bmwrite(dev, BHASH3, bp->hash_table_mask[0]); /* bits 15 - 0 */
937 bmwrite(dev, BHASH2, bp->hash_table_mask[1]); /* bits 31 - 16 */
938 bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */
939 bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */
944 bmac_add_multi(struct net_device *dev,
945 struct bmac_data *bp, unsigned char *addr)
947 /* XXDEBUG(("bmac: enter bmac_add_multi\n")); */
948 bmac_addhash(bp, addr);
950 bmac_update_hash_table_mask(dev, bp);
951 bmac_rx_on(dev, 1, (dev->flags & IFF_PROMISC)? 1 : 0);
952 /* XXDEBUG(("bmac: exit bmac_add_multi\n")); */
956 bmac_remove_multi(struct net_device *dev,
957 struct bmac_data *bp, unsigned char *addr)
959 bmac_removehash(bp, addr);
961 bmac_update_hash_table_mask(dev, bp);
962 bmac_rx_on(dev, 1, (dev->flags & IFF_PROMISC)? 1 : 0);
966 /* Set or clear the multicast filter for this adaptor.
967 num_addrs == -1 Promiscuous mode, receive all packets
968 num_addrs == 0 Normal mode, clear multicast list
969 num_addrs > 0 Multicast mode, receive normal and MC packets, and do
970 best-effort filtering.
972 static void bmac_set_multicast(struct net_device *dev)
974 struct netdev_hw_addr *ha;
975 struct bmac_data *bp = netdev_priv(dev);
976 int num_addrs = netdev_mc_count(dev);
977 unsigned short rx_cfg;
983 XXDEBUG(("bmac: enter bmac_set_multicast, n_addrs=%d\n", num_addrs));
985 if((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
986 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0xffff;
987 bmac_update_hash_table_mask(dev, bp);
988 rx_cfg = bmac_rx_on(dev, 1, 0);
989 XXDEBUG(("bmac: all multi, rx_cfg=%#08x\n"));
990 } else if ((dev->flags & IFF_PROMISC) || (num_addrs < 0)) {
991 rx_cfg = bmread(dev, RXCFG);
992 rx_cfg |= RxPromiscEnable;
993 bmwrite(dev, RXCFG, rx_cfg);
994 rx_cfg = bmac_rx_on(dev, 0, 1);
995 XXDEBUG(("bmac: promisc mode enabled, rx_cfg=%#08x\n", rx_cfg));
997 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0;
998 for (i=0; i<64; i++) bp->hash_use_count[i] = 0;
999 if (num_addrs == 0) {
1000 rx_cfg = bmac_rx_on(dev, 0, 0);
1001 XXDEBUG(("bmac: multi disabled, rx_cfg=%#08x\n", rx_cfg));
1003 netdev_for_each_mc_addr(ha, dev)
1004 bmac_addhash(bp, ha->addr);
1005 bmac_update_hash_table_mask(dev, bp);
1006 rx_cfg = bmac_rx_on(dev, 1, 0);
1007 XXDEBUG(("bmac: multi enabled, rx_cfg=%#08x\n", rx_cfg));
1010 /* XXDEBUG(("bmac: exit bmac_set_multicast\n")); */
1012 #else /* ifdef SUNHME_MULTICAST */
1014 /* The version of set_multicast below was lifted from sunhme.c */
1016 static void bmac_set_multicast(struct net_device *dev)
1018 struct netdev_hw_addr *ha;
1020 unsigned short rx_cfg;
1023 if((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
1024 bmwrite(dev, BHASH0, 0xffff);
1025 bmwrite(dev, BHASH1, 0xffff);
1026 bmwrite(dev, BHASH2, 0xffff);
1027 bmwrite(dev, BHASH3, 0xffff);
1028 } else if(dev->flags & IFF_PROMISC) {
1029 rx_cfg = bmread(dev, RXCFG);
1030 rx_cfg |= RxPromiscEnable;
1031 bmwrite(dev, RXCFG, rx_cfg);
1035 rx_cfg = bmread(dev, RXCFG);
1036 rx_cfg &= ~RxPromiscEnable;
1037 bmwrite(dev, RXCFG, rx_cfg);
1039 for(i = 0; i < 4; i++) hash_table[i] = 0;
1041 netdev_for_each_mc_addr(ha, dev) {
1042 crc = ether_crc_le(6, ha->addr);
1044 hash_table[crc >> 4] |= 1 << (crc & 0xf);
1046 bmwrite(dev, BHASH0, hash_table[0]);
1047 bmwrite(dev, BHASH1, hash_table[1]);
1048 bmwrite(dev, BHASH2, hash_table[2]);
1049 bmwrite(dev, BHASH3, hash_table[3]);
1052 #endif /* SUNHME_MULTICAST */
1054 static int miscintcount;
1056 static irqreturn_t bmac_misc_intr(int irq, void *dev_id)
1058 struct net_device *dev = (struct net_device *) dev_id;
1059 unsigned int status = bmread(dev, STATUS);
1060 if (miscintcount++ < 10) {
1061 XXDEBUG(("bmac_misc_intr\n"));
1063 /* XXDEBUG(("bmac_misc_intr, status=%#08x\n", status)); */
1064 /* bmac_txdma_intr_inner(irq, dev_id); */
1065 /* if (status & FrameReceived) dev->stats.rx_dropped++; */
1066 if (status & RxErrorMask) dev->stats.rx_errors++;
1067 if (status & RxCRCCntExp) dev->stats.rx_crc_errors++;
1068 if (status & RxLenCntExp) dev->stats.rx_length_errors++;
1069 if (status & RxOverFlow) dev->stats.rx_over_errors++;
1070 if (status & RxAlignCntExp) dev->stats.rx_frame_errors++;
1072 /* if (status & FrameSent) dev->stats.tx_dropped++; */
1073 if (status & TxErrorMask) dev->stats.tx_errors++;
1074 if (status & TxUnderrun) dev->stats.tx_fifo_errors++;
1075 if (status & TxNormalCollExp) dev->stats.collisions++;
1080 * Procedure for reading EEPROM
1082 #define SROMAddressLength 5
1083 #define DataInOn 0x0008
1084 #define DataInOff 0x0000
1086 #define ChipSelect 0x0001
1087 #define SDIShiftCount 3
1088 #define SD0ShiftCount 2
1089 #define DelayValue 1000 /* number of microseconds */
1090 #define SROMStartOffset 10 /* this is in words */
1091 #define SROMReadCount 3 /* number of words to read from SROM */
1092 #define SROMAddressBits 6
1093 #define EnetAddressOffset 20
1095 static unsigned char
1096 bmac_clock_out_bit(struct net_device *dev)
1098 unsigned short data;
1101 bmwrite(dev, SROMCSR, ChipSelect | Clk);
1104 data = bmread(dev, SROMCSR);
1106 val = (data >> SD0ShiftCount) & 1;
1108 bmwrite(dev, SROMCSR, ChipSelect);
1115 bmac_clock_in_bit(struct net_device *dev, unsigned int val)
1117 unsigned short data;
1119 if (val != 0 && val != 1) return;
1121 data = (val << SDIShiftCount);
1122 bmwrite(dev, SROMCSR, data | ChipSelect );
1125 bmwrite(dev, SROMCSR, data | ChipSelect | Clk );
1128 bmwrite(dev, SROMCSR, data | ChipSelect);
1133 reset_and_select_srom(struct net_device *dev)
1136 bmwrite(dev, SROMCSR, 0);
1139 /* send it the read command (110) */
1140 bmac_clock_in_bit(dev, 1);
1141 bmac_clock_in_bit(dev, 1);
1142 bmac_clock_in_bit(dev, 0);
1145 static unsigned short
1146 read_srom(struct net_device *dev, unsigned int addr, unsigned int addr_len)
1148 unsigned short data, val;
1151 /* send out the address we want to read from */
1152 for (i = 0; i < addr_len; i++) {
1153 val = addr >> (addr_len-i-1);
1154 bmac_clock_in_bit(dev, val & 1);
1157 /* Now read in the 16-bit data */
1159 for (i = 0; i < 16; i++) {
1160 val = bmac_clock_out_bit(dev);
1164 bmwrite(dev, SROMCSR, 0);
1170 * It looks like Cogent and SMC use different methods for calculating
1171 * checksums. What a pain..
1175 bmac_verify_checksum(struct net_device *dev)
1177 unsigned short data, storedCS;
1179 reset_and_select_srom(dev);
1180 data = read_srom(dev, 3, SROMAddressBits);
1181 storedCS = ((data >> 8) & 0x0ff) | ((data << 8) & 0xff00);
1188 bmac_get_station_address(struct net_device *dev, unsigned char *ea)
1191 unsigned short data;
1193 for (i = 0; i < 6; i++)
1195 reset_and_select_srom(dev);
1196 data = read_srom(dev, i + EnetAddressOffset/2, SROMAddressBits);
1197 ea[2*i] = bitrev8(data & 0x0ff);
1198 ea[2*i+1] = bitrev8((data >> 8) & 0x0ff);
1202 static void bmac_reset_and_enable(struct net_device *dev)
1204 struct bmac_data *bp = netdev_priv(dev);
1205 unsigned long flags;
1206 struct sk_buff *skb;
1207 unsigned char *data;
1209 spin_lock_irqsave(&bp->lock, flags);
1210 bmac_enable_and_reset_chip(dev);
1211 bmac_init_tx_ring(bp);
1212 bmac_init_rx_ring(dev);
1213 bmac_init_chip(dev);
1214 bmac_start_chip(dev);
1215 bmwrite(dev, INTDISABLE, EnableNormal);
1219 * It seems that the bmac can't receive until it's transmitted
1220 * a packet. So we give it a dummy packet to transmit.
1222 skb = netdev_alloc_skb(dev, ETHERMINPACKET);
1224 data = skb_put(skb, ETHERMINPACKET);
1225 memset(data, 0, ETHERMINPACKET);
1226 memcpy(data, dev->dev_addr, 6);
1227 memcpy(data+6, dev->dev_addr, 6);
1228 bmac_transmit_packet(skb, dev);
1230 spin_unlock_irqrestore(&bp->lock, flags);
1233 static const struct ethtool_ops bmac_ethtool_ops = {
1234 .get_link = ethtool_op_get_link,
1237 static const struct net_device_ops bmac_netdev_ops = {
1238 .ndo_open = bmac_open,
1239 .ndo_stop = bmac_close,
1240 .ndo_start_xmit = bmac_output,
1241 .ndo_set_rx_mode = bmac_set_multicast,
1242 .ndo_set_mac_address = bmac_set_address,
1243 .ndo_change_mtu = eth_change_mtu,
1244 .ndo_validate_addr = eth_validate_addr,
1247 static int bmac_probe(struct macio_dev *mdev, const struct of_device_id *match)
1250 struct bmac_data *bp;
1251 const unsigned char *prop_addr;
1252 unsigned char addr[6];
1253 struct net_device *dev;
1254 int is_bmac_plus = ((int)match->data) != 0;
1256 if (macio_resource_count(mdev) != 3 || macio_irq_count(mdev) != 3) {
1257 printk(KERN_ERR "BMAC: can't use, need 3 addrs and 3 intrs\n");
1260 prop_addr = of_get_property(macio_get_of_node(mdev),
1261 "mac-address", NULL);
1262 if (prop_addr == NULL) {
1263 prop_addr = of_get_property(macio_get_of_node(mdev),
1264 "local-mac-address", NULL);
1265 if (prop_addr == NULL) {
1266 printk(KERN_ERR "BMAC: Can't get mac-address\n");
1270 memcpy(addr, prop_addr, sizeof(addr));
1272 dev = alloc_etherdev(PRIV_BYTES);
1276 bp = netdev_priv(dev);
1277 SET_NETDEV_DEV(dev, &mdev->ofdev.dev);
1278 macio_set_drvdata(mdev, dev);
1281 spin_lock_init(&bp->lock);
1283 if (macio_request_resources(mdev, "bmac")) {
1284 printk(KERN_ERR "BMAC: can't request IO resource !\n");
1288 dev->base_addr = (unsigned long)
1289 ioremap(macio_resource_start(mdev, 0), macio_resource_len(mdev, 0));
1290 if (dev->base_addr == 0)
1293 dev->irq = macio_irq(mdev, 0);
1295 bmac_enable_and_reset_chip(dev);
1296 bmwrite(dev, INTDISABLE, DisableAll);
1298 rev = addr[0] == 0 && addr[1] == 0xA0;
1299 for (j = 0; j < 6; ++j)
1300 dev->dev_addr[j] = rev ? bitrev8(addr[j]): addr[j];
1302 /* Enable chip without interrupts for now */
1303 bmac_enable_and_reset_chip(dev);
1304 bmwrite(dev, INTDISABLE, DisableAll);
1306 dev->netdev_ops = &bmac_netdev_ops;
1307 dev->ethtool_ops = &bmac_ethtool_ops;
1309 bmac_get_station_address(dev, addr);
1310 if (bmac_verify_checksum(dev) != 0)
1311 goto err_out_iounmap;
1313 bp->is_bmac_plus = is_bmac_plus;
1314 bp->tx_dma = ioremap(macio_resource_start(mdev, 1), macio_resource_len(mdev, 1));
1316 goto err_out_iounmap;
1317 bp->tx_dma_intr = macio_irq(mdev, 1);
1318 bp->rx_dma = ioremap(macio_resource_start(mdev, 2), macio_resource_len(mdev, 2));
1320 goto err_out_iounmap_tx;
1321 bp->rx_dma_intr = macio_irq(mdev, 2);
1323 bp->tx_cmds = (volatile struct dbdma_cmd *) DBDMA_ALIGN(bp + 1);
1324 bp->rx_cmds = bp->tx_cmds + N_TX_RING + 1;
1326 bp->queue = (struct sk_buff_head *)(bp->rx_cmds + N_RX_RING + 1);
1327 skb_queue_head_init(bp->queue);
1329 init_timer(&bp->tx_timeout);
1331 ret = request_irq(dev->irq, bmac_misc_intr, 0, "BMAC-misc", dev);
1333 printk(KERN_ERR "BMAC: can't get irq %d\n", dev->irq);
1334 goto err_out_iounmap_rx;
1336 ret = request_irq(bp->tx_dma_intr, bmac_txdma_intr, 0, "BMAC-txdma", dev);
1338 printk(KERN_ERR "BMAC: can't get irq %d\n", bp->tx_dma_intr);
1341 ret = request_irq(bp->rx_dma_intr, bmac_rxdma_intr, 0, "BMAC-rxdma", dev);
1343 printk(KERN_ERR "BMAC: can't get irq %d\n", bp->rx_dma_intr);
1347 /* Mask chip interrupts and disable chip, will be
1348 * re-enabled on open()
1350 disable_irq(dev->irq);
1351 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
1353 if (register_netdev(dev) != 0) {
1354 printk(KERN_ERR "BMAC: Ethernet registration failed\n");
1358 printk(KERN_INFO "%s: BMAC%s at %pM",
1359 dev->name, (is_bmac_plus ? "+" : ""), dev->dev_addr);
1360 XXDEBUG((", base_addr=%#0lx", dev->base_addr));
1366 free_irq(bp->rx_dma_intr, dev);
1368 free_irq(bp->tx_dma_intr, dev);
1370 free_irq(dev->irq, dev);
1372 iounmap(bp->rx_dma);
1374 iounmap(bp->tx_dma);
1376 iounmap((void __iomem *)dev->base_addr);
1378 macio_release_resources(mdev);
1380 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
1386 static int bmac_open(struct net_device *dev)
1388 struct bmac_data *bp = netdev_priv(dev);
1389 /* XXDEBUG(("bmac: enter open\n")); */
1390 /* reset the chip */
1392 bmac_reset_and_enable(dev);
1393 enable_irq(dev->irq);
1397 static int bmac_close(struct net_device *dev)
1399 struct bmac_data *bp = netdev_priv(dev);
1400 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
1401 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
1402 unsigned short config;
1407 /* disable rx and tx */
1408 config = bmread(dev, RXCFG);
1409 bmwrite(dev, RXCFG, (config & ~RxMACEnable));
1411 config = bmread(dev, TXCFG);
1412 bmwrite(dev, TXCFG, (config & ~TxMACEnable));
1414 bmwrite(dev, INTDISABLE, DisableAll); /* disable all intrs */
1416 /* disable rx and tx dma */
1417 st_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
1418 st_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
1420 /* free some skb's */
1421 XXDEBUG(("bmac: free rx bufs\n"));
1422 for (i=0; i<N_RX_RING; i++) {
1423 if (bp->rx_bufs[i] != NULL) {
1424 dev_kfree_skb(bp->rx_bufs[i]);
1425 bp->rx_bufs[i] = NULL;
1428 XXDEBUG(("bmac: free tx bufs\n"));
1429 for (i = 0; i<N_TX_RING; i++) {
1430 if (bp->tx_bufs[i] != NULL) {
1431 dev_kfree_skb(bp->tx_bufs[i]);
1432 bp->tx_bufs[i] = NULL;
1435 XXDEBUG(("bmac: all bufs freed\n"));
1438 disable_irq(dev->irq);
1439 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
1445 bmac_start(struct net_device *dev)
1447 struct bmac_data *bp = netdev_priv(dev);
1449 struct sk_buff *skb;
1450 unsigned long flags;
1455 spin_lock_irqsave(&bp->lock, flags);
1457 i = bp->tx_fill + 1;
1460 if (i == bp->tx_empty)
1462 skb = skb_dequeue(bp->queue);
1465 bmac_transmit_packet(skb, dev);
1467 spin_unlock_irqrestore(&bp->lock, flags);
1471 bmac_output(struct sk_buff *skb, struct net_device *dev)
1473 struct bmac_data *bp = netdev_priv(dev);
1474 skb_queue_tail(bp->queue, skb);
1476 return NETDEV_TX_OK;
1479 static void bmac_tx_timeout(unsigned long data)
1481 struct net_device *dev = (struct net_device *) data;
1482 struct bmac_data *bp = netdev_priv(dev);
1483 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
1484 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
1485 volatile struct dbdma_cmd *cp;
1486 unsigned long flags;
1487 unsigned short config, oldConfig;
1490 XXDEBUG(("bmac: tx_timeout called\n"));
1491 spin_lock_irqsave(&bp->lock, flags);
1492 bp->timeout_active = 0;
1494 /* update various counters */
1495 /* bmac_handle_misc_intrs(bp, 0); */
1497 cp = &bp->tx_cmds[bp->tx_empty];
1498 /* XXDEBUG((KERN_DEBUG "bmac: tx dmastat=%x %x runt=%d pr=%x fs=%x fc=%x\n", */
1499 /* ld_le32(&td->status), ld_le16(&cp->xfer_status), bp->tx_bad_runt, */
1500 /* mb->pr, mb->xmtfs, mb->fifofc)); */
1502 /* turn off both tx and rx and reset the chip */
1503 config = bmread(dev, RXCFG);
1504 bmwrite(dev, RXCFG, (config & ~RxMACEnable));
1505 config = bmread(dev, TXCFG);
1506 bmwrite(dev, TXCFG, (config & ~TxMACEnable));
1507 out_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD));
1508 printk(KERN_ERR "bmac: transmit timeout - resetting\n");
1509 bmac_enable_and_reset_chip(dev);
1511 /* restart rx dma */
1512 cp = bus_to_virt(ld_le32(&rd->cmdptr));
1513 out_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD));
1514 out_le16(&cp->xfer_status, 0);
1515 out_le32(&rd->cmdptr, virt_to_bus(cp));
1516 out_le32(&rd->control, DBDMA_SET(RUN|WAKE));
1518 /* fix up the transmit side */
1519 XXDEBUG((KERN_DEBUG "bmac: tx empty=%d fill=%d fullup=%d\n",
1520 bp->tx_empty, bp->tx_fill, bp->tx_fullup));
1522 ++dev->stats.tx_errors;
1523 if (i != bp->tx_fill) {
1524 dev_kfree_skb(bp->tx_bufs[i]);
1525 bp->tx_bufs[i] = NULL;
1526 if (++i >= N_TX_RING) i = 0;
1530 netif_wake_queue(dev);
1531 if (i != bp->tx_fill) {
1532 cp = &bp->tx_cmds[i];
1533 out_le16(&cp->xfer_status, 0);
1534 out_le16(&cp->command, OUTPUT_LAST);
1535 out_le32(&td->cmdptr, virt_to_bus(cp));
1536 out_le32(&td->control, DBDMA_SET(RUN));
1537 /* bmac_set_timeout(dev); */
1538 XXDEBUG((KERN_DEBUG "bmac: starting %d\n", i));
1541 /* turn it back on */
1542 oldConfig = bmread(dev, RXCFG);
1543 bmwrite(dev, RXCFG, oldConfig | RxMACEnable );
1544 oldConfig = bmread(dev, TXCFG);
1545 bmwrite(dev, TXCFG, oldConfig | TxMACEnable );
1547 spin_unlock_irqrestore(&bp->lock, flags);
1551 static void dump_dbdma(volatile struct dbdma_cmd *cp,int count)
1555 for (i=0;i< count;i++) {
1558 printk("dbdma req 0x%x addr 0x%x baddr 0x%x xfer/res 0x%x\n",
1570 bmac_proc_info(char *buffer, char **start, off_t offset, int length)
1577 if (bmac_devs == NULL)
1580 len += sprintf(buffer, "BMAC counters & registers\n");
1582 for (i = 0; i<N_REG_ENTRIES; i++) {
1583 len += sprintf(buffer + len, "%s: %#08x\n",
1584 reg_entries[i].name,
1585 bmread(bmac_devs, reg_entries[i].reg_offset));
1593 if (pos > offset+length) break;
1596 *start = buffer + (offset - begin);
1597 len -= (offset - begin);
1599 if (len > length) len = length;
1605 static int bmac_remove(struct macio_dev *mdev)
1607 struct net_device *dev = macio_get_drvdata(mdev);
1608 struct bmac_data *bp = netdev_priv(dev);
1610 unregister_netdev(dev);
1612 free_irq(dev->irq, dev);
1613 free_irq(bp->tx_dma_intr, dev);
1614 free_irq(bp->rx_dma_intr, dev);
1616 iounmap((void __iomem *)dev->base_addr);
1617 iounmap(bp->tx_dma);
1618 iounmap(bp->rx_dma);
1620 macio_release_resources(mdev);
1627 static struct of_device_id bmac_match[] =
1635 .compatible = "bmac+",
1640 MODULE_DEVICE_TABLE (of, bmac_match);
1642 static struct macio_driver bmac_driver =
1646 .owner = THIS_MODULE,
1647 .of_match_table = bmac_match,
1649 .probe = bmac_probe,
1650 .remove = bmac_remove,
1652 .suspend = bmac_suspend,
1653 .resume = bmac_resume,
1658 static int __init bmac_init(void)
1660 if (bmac_emergency_rxbuf == NULL) {
1661 bmac_emergency_rxbuf = kmalloc(RX_BUFLEN, GFP_KERNEL);
1662 if (bmac_emergency_rxbuf == NULL)
1666 return macio_register_driver(&bmac_driver);
1669 static void __exit bmac_exit(void)
1671 macio_unregister_driver(&bmac_driver);
1673 kfree(bmac_emergency_rxbuf);
1674 bmac_emergency_rxbuf = NULL;
1677 MODULE_AUTHOR("Randy Gobbel/Paul Mackerras");
1678 MODULE_DESCRIPTION("PowerMac BMAC ethernet driver.");
1679 MODULE_LICENSE("GPL");
1681 module_init(bmac_init);
1682 module_exit(bmac_exit);