3 * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
5 * Copyright © 2005 Agere Systems Inc.
9 *------------------------------------------------------------------------------
11 * et1310_tx.c - Routines used to perform data transmission.
13 *------------------------------------------------------------------------------
17 * This software is provided subject to the following terms and conditions,
18 * which you should read carefully before using the software. Using this
19 * software indicates your acceptance of these terms and conditions. If you do
20 * not agree with these terms and conditions, do not use the software.
22 * Copyright © 2005 Agere Systems Inc.
23 * All rights reserved.
25 * Redistribution and use in source or binary forms, with or without
26 * modifications, are permitted provided that the following conditions are met:
28 * . Redistributions of source code must retain the above copyright notice, this
29 * list of conditions and the following Disclaimer as comments in the code as
30 * well as in the documentation and/or other materials provided with the
33 * . Redistributions in binary form must reproduce the above copyright notice,
34 * this list of conditions and the following Disclaimer in the documentation
35 * and/or other materials provided with the distribution.
37 * . Neither the name of Agere Systems Inc. nor the names of the contributors
38 * may be used to endorse or promote products derived from this software
39 * without specific prior written permission.
43 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
44 * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
45 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
46 * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
47 * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
48 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
49 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
50 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
51 * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
52 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
53 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
58 #include "et131x_version.h"
59 #include "et131x_debug.h"
60 #include "et131x_defs.h"
62 #include <linux/pci.h>
63 #include <linux/init.h>
64 #include <linux/module.h>
65 #include <linux/types.h>
66 #include <linux/kernel.h>
68 #include <linux/sched.h>
69 #include <linux/ptrace.h>
70 #include <linux/slab.h>
71 #include <linux/ctype.h>
72 #include <linux/string.h>
73 #include <linux/timer.h>
74 #include <linux/interrupt.h>
76 #include <linux/delay.h>
78 #include <linux/bitops.h>
79 #include <asm/system.h>
81 #include <linux/netdevice.h>
82 #include <linux/etherdevice.h>
83 #include <linux/skbuff.h>
84 #include <linux/if_arp.h>
85 #include <linux/ioport.h>
87 #include "et1310_phy.h"
88 #include "et1310_pm.h"
89 #include "et1310_jagcore.h"
91 #include "et131x_adapter.h"
92 #include "et131x_initpci.h"
93 #include "et131x_isr.h"
95 #include "et1310_tx.h"
98 /* Data for debugging facilities */
99 #ifdef CONFIG_ET131X_DEBUG
100 extern dbg_info_t *et131x_dbginfo;
101 #endif /* CONFIG_ET131X_DEBUG */
103 static void et131x_update_tcb_list(struct et131x_adapter *etdev);
104 static void et131x_check_send_wait_list(struct et131x_adapter *etdev);
105 static inline void et131x_free_send_packet(struct et131x_adapter *etdev,
107 static int et131x_send_packet(struct sk_buff *skb,
108 struct et131x_adapter *etdev);
109 static int nic_send_packet(struct et131x_adapter *etdev, PMP_TCB pMpTcb);
112 * et131x_tx_dma_memory_alloc
113 * @adapter: pointer to our private adapter structure
115 * Returns 0 on success and errno on failure (as defined in errno.h).
117 * Allocates memory that will be visible both to the device and to the CPU.
118 * The OS will pass us packets, pointers to which we will insert in the Tx
119 * Descriptor queue. The device will read this queue to find the packets in
120 * memory. The device will update the "status" in memory each time it xmits a
123 int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
126 TX_RING_t *tx_ring = &adapter->TxRing;
128 DBG_ENTER(et131x_dbginfo);
130 /* Allocate memory for the TCB's (Transmit Control Block) */
131 adapter->TxRing.MpTcbMem = (MP_TCB *)kcalloc(NUM_TCB, sizeof(MP_TCB),
132 GFP_ATOMIC | GFP_DMA);
133 if (!adapter->TxRing.MpTcbMem) {
134 DBG_ERROR(et131x_dbginfo, "Cannot alloc memory for TCBs\n");
135 DBG_LEAVE(et131x_dbginfo);
139 /* Allocate enough memory for the Tx descriptor ring, and allocate
140 * some extra so that the ring can be aligned on a 4k boundary.
142 desc_size = (sizeof(TX_DESC_ENTRY_t) * NUM_DESC_PER_RING_TX) + 4096 - 1;
143 tx_ring->pTxDescRingVa =
144 (PTX_DESC_ENTRY_t) pci_alloc_consistent(adapter->pdev, desc_size,
145 &tx_ring->pTxDescRingPa);
146 if (!adapter->TxRing.pTxDescRingVa) {
147 DBG_ERROR(et131x_dbginfo, "Cannot alloc memory for Tx Ring\n");
148 DBG_LEAVE(et131x_dbginfo);
152 /* Save physical address
154 * NOTE: pci_alloc_consistent(), used above to alloc DMA regions,
155 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
156 * are ever returned, make sure the high part is retrieved here before
157 * storing the adjusted address.
159 tx_ring->pTxDescRingAdjustedPa = tx_ring->pTxDescRingPa;
161 /* Align Tx Descriptor Ring on a 4k (0x1000) byte boundary */
162 et131x_align_allocated_memory(adapter,
163 &tx_ring->pTxDescRingAdjustedPa,
164 &tx_ring->TxDescOffset, 0x0FFF);
166 tx_ring->pTxDescRingVa += tx_ring->TxDescOffset;
168 /* Allocate memory for the Tx status block */
169 tx_ring->pTxStatusVa = pci_alloc_consistent(adapter->pdev,
170 sizeof(TX_STATUS_BLOCK_t),
171 &tx_ring->pTxStatusPa);
172 if (!adapter->TxRing.pTxStatusPa) {
173 DBG_ERROR(et131x_dbginfo,
174 "Cannot alloc memory for Tx status block\n");
175 DBG_LEAVE(et131x_dbginfo);
179 /* Allocate memory for a dummy buffer */
180 tx_ring->pTxDummyBlkVa = pci_alloc_consistent(adapter->pdev,
182 &tx_ring->pTxDummyBlkPa);
183 if (!adapter->TxRing.pTxDummyBlkPa) {
184 DBG_ERROR(et131x_dbginfo,
185 "Cannot alloc memory for Tx dummy buffer\n");
186 DBG_LEAVE(et131x_dbginfo);
190 DBG_LEAVE(et131x_dbginfo);
195 * et131x_tx_dma_memory_free - Free all memory allocated within this module
196 * @adapter: pointer to our private adapter structure
198 * Returns 0 on success and errno on failure (as defined in errno.h).
200 void et131x_tx_dma_memory_free(struct et131x_adapter *adapter)
204 DBG_ENTER(et131x_dbginfo);
206 if (adapter->TxRing.pTxDescRingVa) {
207 /* Free memory relating to Tx rings here */
208 adapter->TxRing.pTxDescRingVa -= adapter->TxRing.TxDescOffset;
211 (sizeof(TX_DESC_ENTRY_t) * NUM_DESC_PER_RING_TX) + 4096 - 1;
213 pci_free_consistent(adapter->pdev,
215 adapter->TxRing.pTxDescRingVa,
216 adapter->TxRing.pTxDescRingPa);
218 adapter->TxRing.pTxDescRingVa = NULL;
221 /* Free memory for the Tx status block */
222 if (adapter->TxRing.pTxStatusVa) {
223 pci_free_consistent(adapter->pdev,
224 sizeof(TX_STATUS_BLOCK_t),
225 adapter->TxRing.pTxStatusVa,
226 adapter->TxRing.pTxStatusPa);
228 adapter->TxRing.pTxStatusVa = NULL;
231 /* Free memory for the dummy buffer */
232 if (adapter->TxRing.pTxDummyBlkVa) {
233 pci_free_consistent(adapter->pdev,
235 adapter->TxRing.pTxDummyBlkVa,
236 adapter->TxRing.pTxDummyBlkPa);
238 adapter->TxRing.pTxDummyBlkVa = NULL;
241 /* Free the memory for MP_TCB structures */
242 kfree(adapter->TxRing.MpTcbMem);
244 DBG_LEAVE(et131x_dbginfo);
248 * ConfigTxDmaRegs - Set up the tx dma section of the JAGCore.
249 * @etdev: pointer to our private adapter structure
251 void ConfigTxDmaRegs(struct et131x_adapter *etdev)
253 struct _TXDMA_t __iomem *txdma = &etdev->regs->txdma;
255 DBG_ENTER(et131x_dbginfo);
257 /* Load the hardware with the start of the transmit descriptor ring. */
258 writel((uint32_t) (etdev->TxRing.pTxDescRingAdjustedPa >> 32),
260 writel((uint32_t) etdev->TxRing.pTxDescRingAdjustedPa,
263 /* Initialise the transmit DMA engine */
264 writel(NUM_DESC_PER_RING_TX - 1, &txdma->pr_num_des.value);
266 /* Load the completion writeback physical address
268 * NOTE: pci_alloc_consistent(), used above to alloc DMA regions,
269 * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
270 * are ever returned, make sure the high part is retrieved here before
271 * storing the adjusted address.
273 writel(0, &txdma->dma_wb_base_hi);
274 writel(etdev->TxRing.pTxStatusPa, &txdma->dma_wb_base_lo);
276 memset(etdev->TxRing.pTxStatusVa, 0, sizeof(TX_STATUS_BLOCK_t));
278 writel(0, &txdma->service_request);
279 etdev->TxRing.txDmaReadyToSend = 0;
281 DBG_LEAVE(et131x_dbginfo);
285 * et131x_tx_dma_disable - Stop of Tx_DMA on the ET1310
286 * @etdev: pointer to our adapter structure
288 void et131x_tx_dma_disable(struct et131x_adapter *etdev)
290 DBG_ENTER(et131x_dbginfo);
292 /* Setup the tramsmit dma configuration register */
293 writel(0x101, &etdev->regs->txdma.csr.value);
295 DBG_LEAVE(et131x_dbginfo);
299 * et131x_tx_dma_enable - re-start of Tx_DMA on the ET1310.
300 * @etdev: pointer to our adapter structure
302 * Mainly used after a return to the D0 (full-power) state from a lower state.
304 void et131x_tx_dma_enable(struct et131x_adapter *etdev)
306 DBG_ENTER(et131x_dbginfo);
308 if (etdev->RegistryPhyLoopbk) {
309 /* TxDMA is disabled for loopback operation. */
310 writel(0x101, &etdev->regs->txdma.csr.value);
312 TXDMA_CSR_t csr = { 0 };
314 /* Setup the transmit dma configuration register for normal
317 csr.bits.sngl_epkt_mode = 1;
319 csr.bits.cache_thrshld = PARM_DMA_CACHE_DEF;
320 writel(csr.value, &etdev->regs->txdma.csr.value);
323 DBG_LEAVE(et131x_dbginfo);
327 * et131x_init_send - Initialize send data structures
328 * @adapter: pointer to our private adapter structure
330 void et131x_init_send(struct et131x_adapter *adapter)
336 DBG_ENTER(et131x_dbginfo);
338 /* Setup some convenience pointers */
339 tx_ring = &adapter->TxRing;
340 pMpTcb = adapter->TxRing.MpTcbMem;
342 tx_ring->TCBReadyQueueHead = pMpTcb;
344 /* Go through and set up each TCB */
345 for (TcbCount = 0; TcbCount < NUM_TCB; TcbCount++) {
346 memset(pMpTcb, 0, sizeof(MP_TCB));
348 /* Set the link pointer in HW TCB to the next TCB in the
349 * chain. If this is the last TCB in the chain, also set the
352 if (TcbCount < NUM_TCB - 1) {
353 pMpTcb->Next = pMpTcb + 1;
355 tx_ring->TCBReadyQueueTail = pMpTcb;
356 pMpTcb->Next = (PMP_TCB) NULL;
362 /* Curr send queue should now be empty */
363 tx_ring->CurrSendHead = (PMP_TCB) NULL;
364 tx_ring->CurrSendTail = (PMP_TCB) NULL;
366 INIT_LIST_HEAD(&adapter->TxRing.SendWaitQueue);
368 DBG_LEAVE(et131x_dbginfo);
372 * et131x_send_packets - This function is called by the OS to send packets
373 * @skb: the packet(s) to send
374 * @netdev:device on which to TX the above packet(s)
376 * Return 0 in almost all cases; non-zero value in extreme hard failure only
378 int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev)
381 struct et131x_adapter *etdev = NULL;
383 DBG_TX_ENTER(et131x_dbginfo);
385 etdev = netdev_priv(netdev);
387 /* Send these packets
389 * NOTE: The Linux Tx entry point is only given one packet at a time
390 * to Tx, so the PacketCount and it's array used makes no sense here
393 /* Queue is not empty or TCB is not available */
394 if (!list_empty(&etdev->TxRing.SendWaitQueue) ||
395 MP_TCB_RESOURCES_NOT_AVAILABLE(etdev)) {
396 /* NOTE: If there's an error on send, no need to queue the
397 * packet under Linux; if we just send an error up to the
398 * netif layer, it will resend the skb to us.
400 DBG_VERBOSE(et131x_dbginfo, "TCB Resources Not Available\n");
403 /* We need to see if the link is up; if it's not, make the
404 * netif layer think we're good and drop the packet
407 * if( MP_SHOULD_FAIL_SEND( etdev ) ||
408 * etdev->DriverNoPhyAccess )
410 if (MP_SHOULD_FAIL_SEND(etdev) || etdev->DriverNoPhyAccess
411 || !netif_carrier_ok(netdev)) {
412 DBG_VERBOSE(et131x_dbginfo,
413 "Can't Tx, Link is DOWN; drop the packet\n");
415 dev_kfree_skb_any(skb);
418 etdev->net_stats.tx_dropped++;
420 status = et131x_send_packet(skb, etdev);
422 if (status == -ENOMEM) {
424 /* NOTE: If there's an error on send, no need
425 * to queue the packet under Linux; if we just
426 * send an error up to the netif layer, it
427 * will resend the skb to us.
429 DBG_WARNING(et131x_dbginfo,
430 "Resources problem, Queue tx packet\n");
431 } else if (status != 0) {
432 /* On any other error, make netif think we're
433 * OK and drop the packet
435 DBG_WARNING(et131x_dbginfo,
436 "General error, drop packet\n");
438 dev_kfree_skb_any(skb);
441 etdev->net_stats.tx_dropped++;
446 DBG_TX_LEAVE(et131x_dbginfo);
451 * et131x_send_packet - Do the work to send a packet
452 * @skb: the packet(s) to send
453 * @etdev: a pointer to the device's private adapter structure
455 * Return 0 in almost all cases; non-zero value in extreme hard failure only.
457 * Assumption: Send spinlock has been acquired
459 static int et131x_send_packet(struct sk_buff *skb,
460 struct et131x_adapter *etdev)
463 PMP_TCB pMpTcb = NULL;
467 DBG_TX_ENTER(et131x_dbginfo);
469 /* Is our buffer scattered, or continuous? */
470 if (skb_shinfo(skb)->nr_frags == 0) {
471 DBG_TX(et131x_dbginfo, "Scattered buffer: NO\n");
473 DBG_TX(et131x_dbginfo, "Scattered buffer: YES, Num Frags: %d\n",
474 skb_shinfo(skb)->nr_frags);
477 /* All packets must have at least a MAC address and a protocol type */
478 if (skb->len < ETH_HLEN) {
479 DBG_ERROR(et131x_dbginfo,
480 "Packet size < ETH_HLEN (14 bytes)\n");
481 DBG_LEAVE(et131x_dbginfo);
485 /* Get a TCB for this packet */
486 spin_lock_irqsave(&etdev->TCBReadyQLock, flags);
488 pMpTcb = etdev->TxRing.TCBReadyQueueHead;
490 if (pMpTcb == NULL) {
491 spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
493 DBG_WARNING(et131x_dbginfo, "Can't obtain a TCB\n");
494 DBG_TX_LEAVE(et131x_dbginfo);
498 etdev->TxRing.TCBReadyQueueHead = pMpTcb->Next;
500 if (etdev->TxRing.TCBReadyQueueHead == NULL)
501 etdev->TxRing.TCBReadyQueueTail = NULL;
503 spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
505 pMpTcb->PacketLength = skb->len;
506 pMpTcb->Packet = skb;
508 if ((skb->data != NULL) && ((skb->len - skb->data_len) >= 6)) {
509 shbufva = (uint16_t *) skb->data;
511 if ((shbufva[0] == 0xffff) &&
512 (shbufva[1] == 0xffff) && (shbufva[2] == 0xffff)) {
513 pMpTcb->Flags |= fMP_DEST_BROAD;
514 } else if ((shbufva[0] & 0x3) == 0x0001) {
515 pMpTcb->Flags |= fMP_DEST_MULTI;
521 /* Call the NIC specific send handler. */
523 status = nic_send_packet(etdev, pMpTcb);
526 spin_lock_irqsave(&etdev->TCBReadyQLock, flags);
528 if (etdev->TxRing.TCBReadyQueueTail) {
529 etdev->TxRing.TCBReadyQueueTail->Next = pMpTcb;
531 /* Apparently ready Q is empty. */
532 etdev->TxRing.TCBReadyQueueHead = pMpTcb;
535 etdev->TxRing.TCBReadyQueueTail = pMpTcb;
537 spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
539 DBG_TX_LEAVE(et131x_dbginfo);
543 DBG_ASSERT(etdev->TxRing.nBusySend <= NUM_TCB);
545 DBG_TX_LEAVE(et131x_dbginfo);
550 * nic_send_packet - NIC specific send handler for version B silicon.
551 * @etdev: pointer to our adapter
552 * @pMpTcb: pointer to MP_TCB
554 * Returns 0 or errno.
556 static int nic_send_packet(struct et131x_adapter *etdev, PMP_TCB pMpTcb)
559 TX_DESC_ENTRY_t CurDesc[24];
560 uint32_t FragmentNumber = 0;
561 uint32_t thiscopy, remainder;
562 struct sk_buff *pPacket = pMpTcb->Packet;
563 uint32_t FragListCount = skb_shinfo(pPacket)->nr_frags + 1;
564 struct skb_frag_struct *pFragList = &skb_shinfo(pPacket)->frags[0];
567 DBG_TX_ENTER(et131x_dbginfo);
569 /* Part of the optimizations of this send routine restrict us to
570 * sending 24 fragments at a pass. In practice we should never see
571 * more than 5 fragments.
573 * NOTE: The older version of this function (below) can handle any
574 * number of fragments. If needed, we can call this function,
575 * although it is less efficient.
577 if (FragListCount > 23) {
578 DBG_TX_LEAVE(et131x_dbginfo);
582 memset(CurDesc, 0, sizeof(TX_DESC_ENTRY_t) * (FragListCount + 1));
584 for (loopIndex = 0; loopIndex < FragListCount; loopIndex++) {
585 /* If there is something in this element, lets get a
586 * descriptor from the ring and get the necessary data
588 if (loopIndex == 0) {
589 /* If the fragments are smaller than a standard MTU,
590 * then map them to a single descriptor in the Tx
591 * Desc ring. However, if they're larger, as is
592 * possible with support for jumbo packets, then
593 * split them each across 2 descriptors.
595 * This will work until we determine why the hardware
596 * doesn't seem to like large fragments.
598 if ((pPacket->len - pPacket->data_len) <= 1514) {
599 DBG_TX(et131x_dbginfo,
600 "Got packet of length %d, "
601 "filling desc entry %d, "
603 (pPacket->len - pPacket->data_len),
604 etdev->TxRing.txDmaReadyToSend, pMpTcb);
606 CurDesc[FragmentNumber].DataBufferPtrHigh = 0;
608 CurDesc[FragmentNumber].word2.bits.
610 pPacket->len - pPacket->data_len;
612 /* NOTE: Here, the dma_addr_t returned from
613 * pci_map_single() is implicitly cast as a
614 * uint32_t. Although dma_addr_t can be
615 * 64-bit, the address returned by
616 * pci_map_single() is always 32-bit
617 * addressable (as defined by the pci/dma
620 CurDesc[FragmentNumber++].DataBufferPtrLow =
621 pci_map_single(etdev->pdev,
627 DBG_TX(et131x_dbginfo,
628 "Got packet of length %d, "
629 "filling desc entry %d, "
631 (pPacket->len - pPacket->data_len),
632 etdev->TxRing.txDmaReadyToSend, pMpTcb);
634 CurDesc[FragmentNumber].DataBufferPtrHigh = 0;
636 CurDesc[FragmentNumber].word2.bits.
638 ((pPacket->len - pPacket->data_len) / 2);
640 /* NOTE: Here, the dma_addr_t returned from
641 * pci_map_single() is implicitly cast as a
642 * uint32_t. Although dma_addr_t can be
643 * 64-bit, the address returned by
644 * pci_map_single() is always 32-bit
645 * addressable (as defined by the pci/dma
648 CurDesc[FragmentNumber++].DataBufferPtrLow =
649 pci_map_single(etdev->pdev,
652 pPacket->data_len) / 2),
654 CurDesc[FragmentNumber].DataBufferPtrHigh = 0;
656 CurDesc[FragmentNumber].word2.bits.
658 ((pPacket->len - pPacket->data_len) / 2);
660 /* NOTE: Here, the dma_addr_t returned from
661 * pci_map_single() is implicitly cast as a
662 * uint32_t. Although dma_addr_t can be
663 * 64-bit, the address returned by
664 * pci_map_single() is always 32-bit
665 * addressable (as defined by the pci/dma
668 CurDesc[FragmentNumber++].DataBufferPtrLow =
669 pci_map_single(etdev->pdev,
672 pPacket->data_len) / 2),
674 pPacket->data_len) / 2),
678 DBG_TX(et131x_dbginfo,
679 "Got packet of length %d,"
680 "filling desc entry %d\n"
682 pFragList[loopIndex].size,
683 etdev->TxRing.txDmaReadyToSend,
686 CurDesc[FragmentNumber].DataBufferPtrHigh = 0;
688 CurDesc[FragmentNumber].word2.bits.length_in_bytes =
689 pFragList[loopIndex - 1].size;
691 /* NOTE: Here, the dma_addr_t returned from
692 * pci_map_page() is implicitly cast as a uint32_t.
693 * Although dma_addr_t can be 64-bit, the address
694 * returned by pci_map_page() is always 32-bit
695 * addressable (as defined by the pci/dma subsystem)
697 CurDesc[FragmentNumber++].DataBufferPtrLow =
698 pci_map_page(etdev->pdev,
699 pFragList[loopIndex - 1].page,
700 pFragList[loopIndex - 1].page_offset,
701 pFragList[loopIndex - 1].size,
706 if (FragmentNumber == 0) {
707 DBG_WARNING(et131x_dbginfo, "No. frags is 0\n");
711 if (etdev->linkspeed == TRUEPHY_SPEED_1000MBPS) {
712 if (++etdev->TxRing.TxPacketsSinceLastinterrupt ==
713 PARM_TX_NUM_BUFS_DEF) {
714 CurDesc[FragmentNumber - 1].word3.value = 0x5;
715 etdev->TxRing.TxPacketsSinceLastinterrupt = 0;
717 CurDesc[FragmentNumber - 1].word3.value = 0x1;
720 CurDesc[FragmentNumber - 1].word3.value = 0x5;
723 CurDesc[0].word3.bits.f = 1;
725 pMpTcb->WrIndexStart = etdev->TxRing.txDmaReadyToSend;
726 pMpTcb->PacketStaleCount = 0;
728 spin_lock_irqsave(&etdev->SendHWLock, flags);
730 thiscopy = NUM_DESC_PER_RING_TX -
731 INDEX10(etdev->TxRing.txDmaReadyToSend);
733 if (thiscopy >= FragmentNumber) {
735 thiscopy = FragmentNumber;
737 remainder = FragmentNumber - thiscopy;
740 memcpy(etdev->TxRing.pTxDescRingVa +
741 INDEX10(etdev->TxRing.txDmaReadyToSend), CurDesc,
742 sizeof(TX_DESC_ENTRY_t) * thiscopy);
744 add_10bit(&etdev->TxRing.txDmaReadyToSend, thiscopy);
746 if (INDEX10(etdev->TxRing.txDmaReadyToSend)== 0 ||
747 INDEX10(etdev->TxRing.txDmaReadyToSend) == NUM_DESC_PER_RING_TX) {
748 etdev->TxRing.txDmaReadyToSend &= ~ET_DMA10_MASK;
749 etdev->TxRing.txDmaReadyToSend ^= ET_DMA10_WRAP;
753 memcpy(etdev->TxRing.pTxDescRingVa,
755 sizeof(TX_DESC_ENTRY_t) * remainder);
757 add_10bit(&etdev->TxRing.txDmaReadyToSend, remainder);
760 if (INDEX10(etdev->TxRing.txDmaReadyToSend) == 0) {
761 if (etdev->TxRing.txDmaReadyToSend)
762 pMpTcb->WrIndex = NUM_DESC_PER_RING_TX - 1;
764 pMpTcb->WrIndex= ET_DMA10_WRAP | (NUM_DESC_PER_RING_TX - 1);
766 pMpTcb->WrIndex = etdev->TxRing.txDmaReadyToSend - 1;
768 spin_lock(&etdev->TCBSendQLock);
770 if (etdev->TxRing.CurrSendTail)
771 etdev->TxRing.CurrSendTail->Next = pMpTcb;
773 etdev->TxRing.CurrSendHead = pMpTcb;
775 etdev->TxRing.CurrSendTail = pMpTcb;
777 DBG_ASSERT(pMpTcb->Next == NULL);
779 etdev->TxRing.nBusySend++;
781 spin_unlock(&etdev->TCBSendQLock);
783 /* Write the new write pointer back to the device. */
784 writel(etdev->TxRing.txDmaReadyToSend,
785 &etdev->regs->txdma.service_request);
787 /* For Gig only, we use Tx Interrupt coalescing. Enable the software
788 * timer to wake us up if this packet isn't followed by N more.
790 if (etdev->linkspeed == TRUEPHY_SPEED_1000MBPS) {
791 writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
792 &etdev->regs->global.watchdog_timer);
795 spin_unlock_irqrestore(&etdev->SendHWLock, flags);
797 DBG_TX_LEAVE(et131x_dbginfo);
802 * NOTE: For now, keep this older version of NICSendPacket around for
803 * reference, even though it's not used
808 * NICSendPacket - NIC specific send handler.
809 * @etdev: pointer to our adapter
810 * @pMpTcb: pointer to MP_TCB
812 * Returns 0 on succes, errno on failure.
814 * This version of the send routine is designed for version A silicon.
815 * Assumption - Send spinlock has been acquired.
817 static int nic_send_packet(struct et131x_adapter *etdev, PMP_TCB pMpTcb)
819 uint32_t loopIndex, fragIndex, loopEnd;
820 uint32_t splitfirstelem = 0;
821 uint32_t SegmentSize = 0;
822 TX_DESC_ENTRY_t CurDesc;
823 TX_DESC_ENTRY_t *CurDescPostCopy = NULL;
824 uint32_t SlotsAvailable;
825 DMA10W_t ServiceComplete;
827 struct sk_buff *pPacket = pMpTcb->Packet;
828 uint32_t FragListCount = skb_shinfo(pPacket)->nr_frags + 1;
829 struct skb_frag_struct *pFragList = &skb_shinfo(pPacket)->frags[0];
831 DBG_TX_ENTER(et131x_dbginfo);
833 ServiceComplete.value =
834 readl(&etdev->regs->txdma.NewServiceComplete.value);
837 * Attempt to fix TWO hardware bugs:
838 * 1) NEVER write an odd number of descriptors.
839 * 2) If packet length is less than NIC_MIN_PACKET_SIZE, then pad the
840 * packet to NIC_MIN_PACKET_SIZE bytes by adding a new last
841 * descriptor IN HALF DUPLEX MODE ONLY
842 * NOTE that (2) interacts with (1). If the packet is less than
843 * NIC_MIN_PACKET_SIZE bytes then we will append a descriptor.
844 * Therefore if it is even now, it will eventually end up odd, and
845 * so will need adjusting.
847 * VLAN tags get involved since VLAN tags add another one or two
850 DBG_TX(et131x_dbginfo,
851 "pMpTcb->PacketLength: %d\n", pMpTcb->PacketLength);
853 if ((etdev->duplex_mode == 0)
854 && (pMpTcb->PacketLength < NIC_MIN_PACKET_SIZE)) {
855 DBG_TX(et131x_dbginfo,
856 "HALF DUPLEX mode AND len < MIN_PKT_SIZE\n");
857 if ((FragListCount & 0x1) == 0) {
858 DBG_TX(et131x_dbginfo,
859 "Even number of descs, split 1st elem\n");
861 /* SegmentSize = pFragList[0].size / 2; */
862 SegmentSize = (pPacket->len - pPacket->data_len) / 2;
864 } else if (FragListCount & 0x1) {
865 DBG_TX(et131x_dbginfo, "Odd number of descs, split 1st elem\n");
868 /* SegmentSize = pFragList[0].size / 2; */
869 SegmentSize = (pPacket->len - pPacket->data_len) / 2;
872 spin_lock_irqsave(&etdev->SendHWLock, flags);
874 if (etdev->TxRing.txDmaReadyToSend.bits.serv_req_wrap ==
875 ServiceComplete.bits.serv_cpl_wrap) {
876 /* The ring hasn't wrapped. Slots available should be
877 * (RING_SIZE) - the difference between the two pointers.
879 SlotsAvailable = NUM_DESC_PER_RING_TX -
880 (etdev->TxRing.txDmaReadyToSend.bits.serv_req -
881 ServiceComplete.bits.serv_cpl);
883 /* The ring has wrapped. Slots available should be the
884 * difference between the two pointers.
886 SlotsAvailable = ServiceComplete.bits.serv_cpl -
887 etdev->TxRing.txDmaReadyToSend.bits.serv_req;
890 if ((FragListCount + splitfirstelem) > SlotsAvailable) {
891 DBG_WARNING(et131x_dbginfo,
892 "Not Enough Space in Tx Desc Ring\n");
893 spin_unlock_irqrestore(&etdev->SendHWLock, flags);
897 loopEnd = (FragListCount) + splitfirstelem;
900 DBG_TX(et131x_dbginfo,
902 "Packet (SKB) : 0x%p\t Packet->len: %d\t Packet->data_len: %d\n"
903 "FragListCount : %d\t splitfirstelem: %d\t loopEnd:%d\n",
905 pPacket, pPacket->len, pPacket->data_len,
906 FragListCount, splitfirstelem, loopEnd);
908 for (loopIndex = 0; loopIndex < loopEnd; loopIndex++) {
909 if (loopIndex > splitfirstelem)
912 DBG_TX(et131x_dbginfo,
913 "In loop, loopIndex: %d\t fragIndex: %d\n", loopIndex,
916 /* If there is something in this element, let's get a
917 * descriptor from the ring and get the necessary data
919 DBG_TX(et131x_dbginfo,
921 "filling desc entry %d\n",
923 etdev->TxRing.txDmaReadyToSend.bits.serv_req);
926 * NOTE - Should we do a paranoia check here to make sure the fragment
927 * actually has a length? It's HIGHLY unlikely the fragment would
931 /* NOTE - Currently always getting 32-bit addrs, and
932 * dma_addr_t is only 32-bit, so leave "high" ptr
934 * CurDesc.DataBufferPtrHigh = 0;
937 CurDesc.word2.value = 0;
938 CurDesc.word3.value = 0;
940 if (fragIndex == 0) {
941 if (splitfirstelem) {
942 DBG_TX(et131x_dbginfo,
943 "Split first element: YES\n");
945 if (loopIndex == 0) {
946 DBG_TX(et131x_dbginfo,
947 "Got fragment of length %d, fragIndex: %d\n",
951 DBG_TX(et131x_dbginfo,
958 CurDesc.DataBufferPtrLow =
959 pci_map_single(etdev->
965 DBG_TX(et131x_dbginfo,
966 "pci_map_single() returns: 0x%08x\n",
970 DBG_TX(et131x_dbginfo,
971 "Got fragment of length %d, fragIndex: %d\n",
975 DBG_TX(et131x_dbginfo,
976 "Leftover Size: %d\n",
986 CurDesc.DataBufferPtrLow =
987 pci_map_single(etdev->
998 DBG_TX(et131x_dbginfo,
999 "pci_map_single() returns: 0x%08x\n",
1004 DBG_TX(et131x_dbginfo,
1005 "Split first element: NO\n");
1007 CurDesc.word2.bits.length_in_bytes =
1008 pPacket->len - pPacket->data_len;
1010 CurDesc.DataBufferPtrLow =
1011 pci_map_single(etdev->pdev,
1016 DBG_TX(et131x_dbginfo,
1017 "pci_map_single() returns: 0x%08x\n",
1018 CurDesc.DataBufferPtrLow);
1022 CurDesc.word2.bits.length_in_bytes =
1023 pFragList[fragIndex - 1].size;
1024 CurDesc.DataBufferPtrLow =
1025 pci_map_page(etdev->pdev,
1026 pFragList[fragIndex - 1].page,
1027 pFragList[fragIndex -
1029 pFragList[fragIndex - 1].size,
1031 DBG_TX(et131x_dbginfo,
1032 "pci_map_page() returns: 0x%08x\n",
1033 CurDesc.DataBufferPtrLow);
1036 if (loopIndex == 0) {
1037 /* This is the first descriptor of the packet
1039 * Set the "f" bit to indicate this is the
1040 * first descriptor in the packet.
1042 DBG_TX(et131x_dbginfo,
1043 "This is our FIRST descriptor\n");
1044 CurDesc.word3.bits.f = 1;
1046 pMpTcb->WrIndexStart =
1047 etdev->TxRing.txDmaReadyToSend;
1050 if ((loopIndex == (loopEnd - 1)) &&
1051 (etdev->duplex_mode ||
1052 (pMpTcb->PacketLength >= NIC_MIN_PACKET_SIZE))) {
1053 /* This is the Last descriptor of the packet */
1054 DBG_TX(et131x_dbginfo,
1055 "THIS is our LAST descriptor\n");
1057 if (etdev->linkspeed ==
1058 TRUEPHY_SPEED_1000MBPS) {
1059 if (++etdev->TxRing.
1060 TxPacketsSinceLastinterrupt >=
1061 PARM_TX_NUM_BUFS_DEF) {
1062 CurDesc.word3.value = 0x5;
1064 TxPacketsSinceLastinterrupt
1067 CurDesc.word3.value = 0x1;
1070 CurDesc.word3.value = 0x5;
1073 /* Following index will be used during freeing
1077 etdev->TxRing.txDmaReadyToSend;
1078 pMpTcb->PacketStaleCount = 0;
1081 /* Copy the descriptor (filled above) into the
1082 * descriptor ring at the next free entry. Advance
1083 * the "next free entry" variable
1085 memcpy(etdev->TxRing.pTxDescRingVa +
1086 etdev->TxRing.txDmaReadyToSend.bits.serv_req,
1087 &CurDesc, sizeof(TX_DESC_ENTRY_t));
1090 etdev->TxRing.pTxDescRingVa +
1091 etdev->TxRing.txDmaReadyToSend.bits.serv_req;
1093 DBG_TX(et131x_dbginfo,
1094 "CURRENT DESCRIPTOR\n"
1095 "\tAddress : 0x%p\n"
1096 "\tDataBufferPtrHigh : 0x%08x\n"
1097 "\tDataBufferPtrLow : 0x%08x\n"
1098 "\tword2 : 0x%08x\n"
1099 "\tword3 : 0x%08x\n",
1101 CurDescPostCopy->DataBufferPtrHigh,
1102 CurDescPostCopy->DataBufferPtrLow,
1103 CurDescPostCopy->word2.value,
1104 CurDescPostCopy->word3.value);
1106 if (++etdev->TxRing.txDmaReadyToSend.bits.serv_req >=
1107 NUM_DESC_PER_RING_TX) {
1108 if (etdev->TxRing.txDmaReadyToSend.bits.
1110 etdev->TxRing.txDmaReadyToSend.
1113 etdev->TxRing.txDmaReadyToSend.
1120 if (etdev->duplex_mode == 0 &&
1121 pMpTcb->PacketLength < NIC_MIN_PACKET_SIZE) {
1122 /* NOTE - Same 32/64-bit issue as above... */
1123 CurDesc.DataBufferPtrHigh = 0x0;
1124 CurDesc.DataBufferPtrLow = etdev->TxRing.pTxDummyBlkPa;
1125 CurDesc.word2.value = 0;
1127 if (etdev->linkspeed == TRUEPHY_SPEED_1000MBPS) {
1128 if (++etdev->TxRing.TxPacketsSinceLastinterrupt >=
1129 PARM_TX_NUM_BUFS_DEF) {
1130 CurDesc.word3.value = 0x5;
1131 etdev->TxRing.TxPacketsSinceLastinterrupt =
1134 CurDesc.word3.value = 0x1;
1137 CurDesc.word3.value = 0x5;
1140 CurDesc.word2.bits.length_in_bytes =
1141 NIC_MIN_PACKET_SIZE - pMpTcb->PacketLength;
1143 pMpTcb->WrIndex = etdev->TxRing.txDmaReadyToSend;
1145 memcpy(etdev->TxRing.pTxDescRingVa +
1146 etdev->TxRing.txDmaReadyToSend.bits.serv_req,
1147 &CurDesc, sizeof(TX_DESC_ENTRY_t));
1150 etdev->TxRing.pTxDescRingVa +
1151 etdev->TxRing.txDmaReadyToSend.bits.serv_req;
1153 DBG_TX(et131x_dbginfo,
1154 "CURRENT DESCRIPTOR\n"
1155 "\tAddress : 0x%p\n"
1156 "\tDataBufferPtrHigh : 0x%08x\n"
1157 "\tDataBufferPtrLow : 0x%08x\n"
1158 "\tword2 : 0x%08x\n"
1159 "\tword3 : 0x%08x\n",
1161 CurDescPostCopy->DataBufferPtrHigh,
1162 CurDescPostCopy->DataBufferPtrLow,
1163 CurDescPostCopy->word2.value,
1164 CurDescPostCopy->word3.value);
1166 if (++etdev->TxRing.txDmaReadyToSend.bits.serv_req >=
1167 NUM_DESC_PER_RING_TX) {
1168 if (etdev->TxRing.txDmaReadyToSend.bits.
1170 etdev->TxRing.txDmaReadyToSend.value = 0;
1172 etdev->TxRing.txDmaReadyToSend.value = 0x400;
1176 DBG_TX(et131x_dbginfo, "Padding descriptor %d by %d bytes\n",
1177 /* etdev->TxRing.txDmaReadyToSend.value, */
1178 etdev->TxRing.txDmaReadyToSend.bits.serv_req,
1179 NIC_MIN_PACKET_SIZE - pMpTcb->PacketLength);
1182 spin_lock(&etdev->TCBSendQLock);
1184 if (etdev->TxRing.CurrSendTail)
1185 etdev->TxRing.CurrSendTail->Next = pMpTcb;
1187 etdev->TxRing.CurrSendHead = pMpTcb;
1189 etdev->TxRing.CurrSendTail = pMpTcb;
1191 DBG_ASSERT(pMpTcb->Next == NULL);
1193 etdev->TxRing.nBusySend++;
1195 spin_unlock(&etdev->TCBSendQLock);
1197 /* Write the new write pointer back to the device. */
1198 writel(etdev->TxRing.txDmaReadyToSend.value,
1199 &etdev->regs->txdma.service_request.value);
1201 #ifdef CONFIG_ET131X_DEBUG
1202 DumpDeviceBlock(DBG_TX_ON, etdev, 1);
1205 /* For Gig only, we use Tx Interrupt coalescing. Enable the software
1206 * timer to wake us up if this packet isn't followed by N more.
1208 if (etdev->linkspeed == TRUEPHY_SPEED_1000MBPS) {
1209 writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
1210 &etdev->regs->global.watchdog_timer);
1213 spin_unlock_irqrestore(&etdev->SendHWLock, flags);
1215 DBG_TX_LEAVE(et131x_dbginfo);
1222 * et131x_free_send_packet - Recycle a MP_TCB, complete the packet if necessary
1223 * @etdev: pointer to our adapter
1224 * @pMpTcb: pointer to MP_TCB
1226 * Assumption - Send spinlock has been acquired
1228 inline void et131x_free_send_packet(struct et131x_adapter *etdev,
1231 unsigned long flags;
1232 TX_DESC_ENTRY_t *desc = NULL;
1233 struct net_device_stats *stats = &etdev->net_stats;
1235 if (pMpTcb->Flags & fMP_DEST_BROAD)
1236 atomic_inc(&etdev->Stats.brdcstxmt);
1237 else if (pMpTcb->Flags & fMP_DEST_MULTI)
1238 atomic_inc(&etdev->Stats.multixmt);
1240 atomic_inc(&etdev->Stats.unixmt);
1242 if (pMpTcb->Packet) {
1243 stats->tx_bytes += pMpTcb->Packet->len;
1245 /* Iterate through the TX descriptors on the ring
1246 * corresponding to this packet and umap the fragments
1249 DBG_TX(et131x_dbginfo,
1250 "Unmap descriptors Here\n"
1253 "TCB PacketLength : %d\n"
1254 "TCB WrIndexS.value : 0x%08x\n"
1255 "TCB WrIndex.value : 0x%08x\n",
1258 pMpTcb->PacketLength,
1259 pMpTcb->WrIndexStart,
1264 (TX_DESC_ENTRY_t *) (etdev->TxRing.pTxDescRingVa +
1265 INDEX10(pMpTcb->WrIndexStart));
1267 DBG_TX(et131x_dbginfo,
1268 "CURRENT DESCRIPTOR\n"
1269 "\tAddress : 0x%p\n"
1270 "\tDataBufferPtrHigh : 0x%08x\n"
1271 "\tDataBufferPtrLow : 0x%08x\n"
1272 "\tword2 : 0x%08x\n"
1273 "\tword3 : 0x%08x\n",
1275 desc->DataBufferPtrHigh,
1276 desc->DataBufferPtrLow,
1280 pci_unmap_single(etdev->pdev,
1281 desc->DataBufferPtrLow,
1282 desc->word2.value, PCI_DMA_TODEVICE);
1284 add_10bit(&pMpTcb->WrIndexStart, 1);
1285 if (INDEX10(pMpTcb->WrIndexStart) >=
1286 NUM_DESC_PER_RING_TX) {
1287 pMpTcb->WrIndexStart &= ~ET_DMA10_MASK;
1288 pMpTcb->WrIndexStart ^= ET_DMA10_WRAP;
1290 } while (desc != (etdev->TxRing.pTxDescRingVa +
1291 INDEX10(pMpTcb->WrIndex)));
1293 DBG_TX(et131x_dbginfo,
1294 "Free Packet (SKB) : 0x%p\n", pMpTcb->Packet);
1296 dev_kfree_skb_any(pMpTcb->Packet);
1299 memset(pMpTcb, 0, sizeof(MP_TCB));
1301 /* Add the TCB to the Ready Q */
1302 spin_lock_irqsave(&etdev->TCBReadyQLock, flags);
1304 etdev->Stats.opackets++;
1306 if (etdev->TxRing.TCBReadyQueueTail) {
1307 etdev->TxRing.TCBReadyQueueTail->Next = pMpTcb;
1309 /* Apparently ready Q is empty. */
1310 etdev->TxRing.TCBReadyQueueHead = pMpTcb;
1313 etdev->TxRing.TCBReadyQueueTail = pMpTcb;
1315 spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
1317 DBG_ASSERT(etdev->TxRing.nBusySend >= 0);
1321 * et131x_free_busy_send_packets - Free and complete the stopped active sends
1322 * @etdev: pointer to our adapter
1324 * Assumption - Send spinlock has been acquired
1326 void et131x_free_busy_send_packets(struct et131x_adapter *etdev)
1329 struct list_head *entry;
1330 unsigned long flags;
1331 uint32_t FreeCounter = 0;
1333 DBG_ENTER(et131x_dbginfo);
1335 while (!list_empty(&etdev->TxRing.SendWaitQueue)) {
1336 spin_lock_irqsave(&etdev->SendWaitLock, flags);
1338 etdev->TxRing.nWaitSend--;
1339 spin_unlock_irqrestore(&etdev->SendWaitLock, flags);
1341 entry = etdev->TxRing.SendWaitQueue.next;
1344 etdev->TxRing.nWaitSend = 0;
1346 /* Any packets being sent? Check the first TCB on the send list */
1347 spin_lock_irqsave(&etdev->TCBSendQLock, flags);
1349 pMpTcb = etdev->TxRing.CurrSendHead;
1351 while ((pMpTcb != NULL) && (FreeCounter < NUM_TCB)) {
1352 PMP_TCB pNext = pMpTcb->Next;
1354 etdev->TxRing.CurrSendHead = pNext;
1357 etdev->TxRing.CurrSendTail = NULL;
1359 etdev->TxRing.nBusySend--;
1361 spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
1363 DBG_VERBOSE(et131x_dbginfo, "pMpTcb = 0x%p\n", pMpTcb);
1366 et131x_free_send_packet(etdev, pMpTcb);
1368 spin_lock_irqsave(&etdev->TCBSendQLock, flags);
1370 pMpTcb = etdev->TxRing.CurrSendHead;
1373 if (FreeCounter == NUM_TCB) {
1374 DBG_ERROR(et131x_dbginfo,
1375 "MpFreeBusySendPackets exited loop for a bad reason\n");
1379 spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
1381 etdev->TxRing.nBusySend = 0;
1383 DBG_LEAVE(et131x_dbginfo);
1387 * et131x_handle_send_interrupt - Interrupt handler for sending processing
1388 * @etdev: pointer to our adapter
1390 * Re-claim the send resources, complete sends and get more to send from
1391 * the send wait queue.
1393 * Assumption - Send spinlock has been acquired
1395 void et131x_handle_send_interrupt(struct et131x_adapter *etdev)
1397 DBG_TX_ENTER(et131x_dbginfo);
1399 /* Mark as completed any packets which have been sent by the device. */
1400 et131x_update_tcb_list(etdev);
1402 /* If we queued any transmits because we didn't have any TCBs earlier,
1403 * dequeue and send those packets now, as long as we have free TCBs.
1405 et131x_check_send_wait_list(etdev);
1407 DBG_TX_LEAVE(et131x_dbginfo);
1411 * et131x_update_tcb_list - Helper routine for Send Interrupt handler
1412 * @etdev: pointer to our adapter
1414 * Re-claims the send resources and completes sends. Can also be called as
1415 * part of the NIC send routine when the "ServiceComplete" indication has
1418 static void et131x_update_tcb_list(struct et131x_adapter *etdev)
1420 unsigned long flags;
1421 u32 ServiceComplete;
1425 ServiceComplete = readl(&etdev->regs->txdma.NewServiceComplete);
1426 index = INDEX10(ServiceComplete);
1428 /* Has the ring wrapped? Process any descriptors that do not have
1429 * the same "wrap" indicator as the current completion indicator
1431 spin_lock_irqsave(&etdev->TCBSendQLock, flags);
1433 pMpTcb = etdev->TxRing.CurrSendHead;
1436 ((ServiceComplete ^ pMpTcb->WrIndex) & ET_DMA10_WRAP) &&
1437 index < INDEX10(pMpTcb->WrIndex)) {
1438 etdev->TxRing.nBusySend--;
1439 etdev->TxRing.CurrSendHead = pMpTcb->Next;
1440 if (pMpTcb->Next == NULL)
1441 etdev->TxRing.CurrSendTail = NULL;
1443 spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
1444 et131x_free_send_packet(etdev, pMpTcb);
1445 spin_lock_irqsave(&etdev->TCBSendQLock, flags);
1447 /* Goto the next packet */
1448 pMpTcb = etdev->TxRing.CurrSendHead;
1451 !((ServiceComplete ^ pMpTcb->WrIndex) & ET_DMA10_WRAP)
1452 && index > (pMpTcb->WrIndex & ET_DMA10_MASK)) {
1453 etdev->TxRing.nBusySend--;
1454 etdev->TxRing.CurrSendHead = pMpTcb->Next;
1455 if (pMpTcb->Next == NULL)
1456 etdev->TxRing.CurrSendTail = NULL;
1458 spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
1459 et131x_free_send_packet(etdev, pMpTcb);
1460 spin_lock_irqsave(&etdev->TCBSendQLock, flags);
1462 /* Goto the next packet */
1463 pMpTcb = etdev->TxRing.CurrSendHead;
1466 /* Wake up the queue when we hit a low-water mark */
1467 if (etdev->TxRing.nBusySend <= (NUM_TCB / 3))
1468 netif_wake_queue(etdev->netdev);
1470 spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
1474 * et131x_check_send_wait_list - Helper routine for the interrupt handler
1475 * @etdev: pointer to our adapter
1477 * Takes packets from the send wait queue and posts them to the device (if
1480 static void et131x_check_send_wait_list(struct et131x_adapter *etdev)
1482 unsigned long flags;
1484 spin_lock_irqsave(&etdev->SendWaitLock, flags);
1486 while (!list_empty(&etdev->TxRing.SendWaitQueue) &&
1487 MP_TCB_RESOURCES_AVAILABLE(etdev)) {
1488 struct list_head *entry;
1490 DBG_VERBOSE(et131x_dbginfo, "Tx packets on the wait queue\n");
1492 entry = etdev->TxRing.SendWaitQueue.next;
1494 etdev->TxRing.nWaitSend--;
1496 DBG_WARNING(et131x_dbginfo,
1497 "MpHandleSendInterrupt - sent a queued pkt. Waiting %d\n",
1498 etdev->TxRing.nWaitSend);
1501 spin_unlock_irqrestore(&etdev->SendWaitLock, flags);