ACPI video: introduce module parameter video.use_bios_initial_backlight
[firefly-linux-kernel-4.4.55.git] / drivers / net / typhoon.c
1 /* typhoon.c: A Linux Ethernet device driver for 3Com 3CR990 family of NICs */
2 /*
3         Written 2002-2004 by David Dillow <dave@thedillows.org>
4         Based on code written 1998-2000 by Donald Becker <becker@scyld.com> and
5         Linux 2.2.x driver by David P. McLean <davidpmclean@yahoo.com>.
6
7         This software may be used and distributed according to the terms of
8         the GNU General Public License (GPL), incorporated herein by reference.
9         Drivers based on or derived from this code fall under the GPL and must
10         retain the authorship, copyright and license notice.  This file is not
11         a complete program and may only be used when the entire operating
12         system is licensed under the GPL.
13
14         This software is available on a public web site. It may enable
15         cryptographic capabilities of the 3Com hardware, and may be
16         exported from the United States under License Exception "TSU"
17         pursuant to 15 C.F.R. Section 740.13(e).
18
19         This work was funded by the National Library of Medicine under
20         the Department of Energy project number 0274DD06D1 and NLM project
21         number Y1-LM-2015-01.
22
23         This driver is designed for the 3Com 3CR990 Family of cards with the
24         3XP Processor. It has been tested on x86 and sparc64.
25
26         KNOWN ISSUES:
27         *) Cannot DMA Rx packets to a 2 byte aligned address. Also firmware
28                 issue. Hopefully 3Com will fix it.
29         *) Waiting for a command response takes 8ms due to non-preemptable
30                 polling. Only significant for getting stats and creating
31                 SAs, but an ugly wart never the less.
32
33         TODO:
34         *) Doesn't do IPSEC offloading. Yet. Keep yer pants on, it's coming.
35         *) Add more support for ethtool (especially for NIC stats)
36         *) Allow disabling of RX checksum offloading
37         *) Fix MAC changing to work while the interface is up
38                 (Need to put commands on the TX ring, which changes
39                 the locking)
40         *) Add in FCS to {rx,tx}_bytes, since the hardware doesn't. See
41                 http://oss.sgi.com/cgi-bin/mesg.cgi?a=netdev&i=20031215152211.7003fe8e.rddunlap%40osdl.org
42 */
43
44 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
45  * Setting to > 1518 effectively disables this feature.
46  */
47 static int rx_copybreak = 200;
48
49 /* Should we use MMIO or Port IO?
50  * 0: Port IO
51  * 1: MMIO
52  * 2: Try MMIO, fallback to Port IO
53  */
54 static unsigned int use_mmio = 2;
55
56 /* end user-configurable values */
57
58 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
59  */
60 static const int multicast_filter_limit = 32;
61
62 /* Operational parameters that are set at compile time. */
63
64 /* Keep the ring sizes a power of two for compile efficiency.
65  * The compiler will convert <unsigned>'%'<2^N> into a bit mask.
66  * Making the Tx ring too large decreases the effectiveness of channel
67  * bonding and packet priority.
68  * There are no ill effects from too-large receive rings.
69  *
70  * We don't currently use the Hi Tx ring so, don't make it very big.
71  *
72  * Beware that if we start using the Hi Tx ring, we will need to change
73  * typhoon_num_free_tx() and typhoon_tx_complete() to account for that.
74  */
75 #define TXHI_ENTRIES            2
76 #define TXLO_ENTRIES            128
77 #define RX_ENTRIES              32
78 #define COMMAND_ENTRIES         16
79 #define RESPONSE_ENTRIES        32
80
81 #define COMMAND_RING_SIZE       (COMMAND_ENTRIES * sizeof(struct cmd_desc))
82 #define RESPONSE_RING_SIZE      (RESPONSE_ENTRIES * sizeof(struct resp_desc))
83
84 /* The 3XP will preload and remove 64 entries from the free buffer
85  * list, and we need one entry to keep the ring from wrapping, so
86  * to keep this a power of two, we use 128 entries.
87  */
88 #define RXFREE_ENTRIES          128
89 #define RXENT_ENTRIES           (RXFREE_ENTRIES - 1)
90
91 /* Operational parameters that usually are not changed. */
92
93 /* Time in jiffies before concluding the transmitter is hung. */
94 #define TX_TIMEOUT  (2*HZ)
95
96 #define PKT_BUF_SZ              1536
97 #define FIRMWARE_NAME           "3com/typhoon.bin"
98
99 #define pr_fmt(fmt)             KBUILD_MODNAME " " fmt
100
101 #include <linux/module.h>
102 #include <linux/kernel.h>
103 #include <linux/sched.h>
104 #include <linux/string.h>
105 #include <linux/timer.h>
106 #include <linux/errno.h>
107 #include <linux/ioport.h>
108 #include <linux/interrupt.h>
109 #include <linux/pci.h>
110 #include <linux/netdevice.h>
111 #include <linux/etherdevice.h>
112 #include <linux/skbuff.h>
113 #include <linux/mm.h>
114 #include <linux/init.h>
115 #include <linux/delay.h>
116 #include <linux/ethtool.h>
117 #include <linux/if_vlan.h>
118 #include <linux/crc32.h>
119 #include <linux/bitops.h>
120 #include <asm/processor.h>
121 #include <asm/io.h>
122 #include <asm/uaccess.h>
123 #include <linux/in6.h>
124 #include <linux/dma-mapping.h>
125 #include <linux/firmware.h>
126 #include <generated/utsrelease.h>
127
128 #include "typhoon.h"
129
130 MODULE_AUTHOR("David Dillow <dave@thedillows.org>");
131 MODULE_VERSION(UTS_RELEASE);
132 MODULE_LICENSE("GPL");
133 MODULE_FIRMWARE(FIRMWARE_NAME);
134 MODULE_DESCRIPTION("3Com Typhoon Family (3C990, 3CR990, and variants)");
135 MODULE_PARM_DESC(rx_copybreak, "Packets smaller than this are copied and "
136                                "the buffer given back to the NIC. Default "
137                                "is 200.");
138 MODULE_PARM_DESC(use_mmio, "Use MMIO (1) or PIO(0) to access the NIC. "
139                            "Default is to try MMIO and fallback to PIO.");
140 module_param(rx_copybreak, int, 0);
141 module_param(use_mmio, int, 0);
142
143 #if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
144 #warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
145 #undef NETIF_F_TSO
146 #endif
147
148 #if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
149 #error TX ring too small!
150 #endif
151
152 struct typhoon_card_info {
153         const char *name;
154         const int capabilities;
155 };
156
157 #define TYPHOON_CRYPTO_NONE             0x00
158 #define TYPHOON_CRYPTO_DES              0x01
159 #define TYPHOON_CRYPTO_3DES             0x02
160 #define TYPHOON_CRYPTO_VARIABLE         0x04
161 #define TYPHOON_FIBER                   0x08
162 #define TYPHOON_WAKEUP_NEEDS_RESET      0x10
163
164 enum typhoon_cards {
165         TYPHOON_TX = 0, TYPHOON_TX95, TYPHOON_TX97, TYPHOON_SVR,
166         TYPHOON_SVR95, TYPHOON_SVR97, TYPHOON_TXM, TYPHOON_BSVR,
167         TYPHOON_FX95, TYPHOON_FX97, TYPHOON_FX95SVR, TYPHOON_FX97SVR,
168         TYPHOON_FXM,
169 };
170
171 /* directly indexed by enum typhoon_cards, above */
172 static struct typhoon_card_info typhoon_card_info[] __devinitdata = {
173         { "3Com Typhoon (3C990-TX)",
174                 TYPHOON_CRYPTO_NONE},
175         { "3Com Typhoon (3CR990-TX-95)",
176                 TYPHOON_CRYPTO_DES},
177         { "3Com Typhoon (3CR990-TX-97)",
178                 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
179         { "3Com Typhoon (3C990SVR)",
180                 TYPHOON_CRYPTO_NONE},
181         { "3Com Typhoon (3CR990SVR95)",
182                 TYPHOON_CRYPTO_DES},
183         { "3Com Typhoon (3CR990SVR97)",
184                 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
185         { "3Com Typhoon2 (3C990B-TX-M)",
186                 TYPHOON_CRYPTO_VARIABLE},
187         { "3Com Typhoon2 (3C990BSVR)",
188                 TYPHOON_CRYPTO_VARIABLE},
189         { "3Com Typhoon (3CR990-FX-95)",
190                 TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
191         { "3Com Typhoon (3CR990-FX-97)",
192                 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
193         { "3Com Typhoon (3CR990-FX-95 Server)",
194                 TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
195         { "3Com Typhoon (3CR990-FX-97 Server)",
196                 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
197         { "3Com Typhoon2 (3C990B-FX-97)",
198                 TYPHOON_CRYPTO_VARIABLE | TYPHOON_FIBER},
199 };
200
201 /* Notes on the new subsystem numbering scheme:
202  * bits 0-1 indicate crypto capabilities: (0) variable, (1) DES, or (2) 3DES
203  * bit 4 indicates if this card has secured firmware (we don't support it)
204  * bit 8 indicates if this is a (0) copper or (1) fiber card
205  * bits 12-16 indicate card type: (0) client and (1) server
206  */
207 static DEFINE_PCI_DEVICE_TABLE(typhoon_pci_tbl) = {
208         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990,
209           PCI_ANY_ID, PCI_ANY_ID, 0, 0,TYPHOON_TX },
210         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_95,
211           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX95 },
212         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_97,
213           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX97 },
214         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
215           PCI_ANY_ID, 0x1000, 0, 0, TYPHOON_TXM },
216         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
217           PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FXM },
218         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
219           PCI_ANY_ID, 0x2000, 0, 0, TYPHOON_BSVR },
220         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
221           PCI_ANY_ID, 0x1101, 0, 0, TYPHOON_FX95 },
222         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
223           PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FX97 },
224         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
225           PCI_ANY_ID, 0x2101, 0, 0, TYPHOON_FX95SVR },
226         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
227           PCI_ANY_ID, 0x2102, 0, 0, TYPHOON_FX97SVR },
228         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR95,
229           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR95 },
230         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR97,
231           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR97 },
232         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR,
233           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR },
234         { 0, }
235 };
236 MODULE_DEVICE_TABLE(pci, typhoon_pci_tbl);
237
238 /* Define the shared memory area
239  * Align everything the 3XP will normally be using.
240  * We'll need to move/align txHi if we start using that ring.
241  */
242 #define __3xp_aligned   ____cacheline_aligned
243 struct typhoon_shared {
244         struct typhoon_interface        iface;
245         struct typhoon_indexes          indexes                 __3xp_aligned;
246         struct tx_desc                  txLo[TXLO_ENTRIES]      __3xp_aligned;
247         struct rx_desc                  rxLo[RX_ENTRIES]        __3xp_aligned;
248         struct rx_desc                  rxHi[RX_ENTRIES]        __3xp_aligned;
249         struct cmd_desc                 cmd[COMMAND_ENTRIES]    __3xp_aligned;
250         struct resp_desc                resp[RESPONSE_ENTRIES]  __3xp_aligned;
251         struct rx_free                  rxBuff[RXFREE_ENTRIES]  __3xp_aligned;
252         u32                             zeroWord;
253         struct tx_desc                  txHi[TXHI_ENTRIES];
254 } __packed;
255
256 struct rxbuff_ent {
257         struct sk_buff *skb;
258         dma_addr_t      dma_addr;
259 };
260
261 struct typhoon {
262         /* Tx cache line section */
263         struct transmit_ring    txLoRing        ____cacheline_aligned;
264         struct pci_dev *        tx_pdev;
265         void __iomem            *tx_ioaddr;
266         u32                     txlo_dma_addr;
267
268         /* Irq/Rx cache line section */
269         void __iomem            *ioaddr         ____cacheline_aligned;
270         struct typhoon_indexes *indexes;
271         u8                      awaiting_resp;
272         u8                      duplex;
273         u8                      speed;
274         u8                      card_state;
275         struct basic_ring       rxLoRing;
276         struct pci_dev *        pdev;
277         struct net_device *     dev;
278         struct napi_struct      napi;
279         struct basic_ring       rxHiRing;
280         struct basic_ring       rxBuffRing;
281         struct rxbuff_ent       rxbuffers[RXENT_ENTRIES];
282
283         /* general section */
284         spinlock_t              command_lock    ____cacheline_aligned;
285         struct basic_ring       cmdRing;
286         struct basic_ring       respRing;
287         struct net_device_stats stats;
288         struct net_device_stats stats_saved;
289         struct typhoon_shared * shared;
290         dma_addr_t              shared_dma;
291         __le16                  xcvr_select;
292         __le16                  wol_events;
293         __le32                  offload;
294
295         /* unused stuff (future use) */
296         int                     capabilities;
297         struct transmit_ring    txHiRing;
298 };
299
300 enum completion_wait_values {
301         NoWait = 0, WaitNoSleep, WaitSleep,
302 };
303
304 /* These are the values for the typhoon.card_state variable.
305  * These determine where the statistics will come from in get_stats().
306  * The sleep image does not support the statistics we need.
307  */
308 enum state_values {
309         Sleeping = 0, Running,
310 };
311
312 /* PCI writes are not guaranteed to be posted in order, but outstanding writes
313  * cannot pass a read, so this forces current writes to post.
314  */
315 #define typhoon_post_pci_writes(x) \
316         do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0)
317
318 /* We'll wait up to six seconds for a reset, and half a second normally.
319  */
320 #define TYPHOON_UDELAY                  50
321 #define TYPHOON_RESET_TIMEOUT_SLEEP     (6 * HZ)
322 #define TYPHOON_RESET_TIMEOUT_NOSLEEP   ((6 * 1000000) / TYPHOON_UDELAY)
323 #define TYPHOON_WAIT_TIMEOUT            ((1000000 / 2) / TYPHOON_UDELAY)
324
325 #if defined(NETIF_F_TSO)
326 #define skb_tso_size(x)         (skb_shinfo(x)->gso_size)
327 #define TSO_NUM_DESCRIPTORS     2
328 #define TSO_OFFLOAD_ON          TYPHOON_OFFLOAD_TCP_SEGMENT
329 #else
330 #define NETIF_F_TSO             0
331 #define skb_tso_size(x)         0
332 #define TSO_NUM_DESCRIPTORS     0
333 #define TSO_OFFLOAD_ON          0
334 #endif
335
336 static inline void
337 typhoon_inc_index(u32 *index, const int count, const int num_entries)
338 {
339         /* Increment a ring index -- we can use this for all rings execept
340          * the Rx rings, as they use different size descriptors
341          * otherwise, everything is the same size as a cmd_desc
342          */
343         *index += count * sizeof(struct cmd_desc);
344         *index %= num_entries * sizeof(struct cmd_desc);
345 }
346
347 static inline void
348 typhoon_inc_cmd_index(u32 *index, const int count)
349 {
350         typhoon_inc_index(index, count, COMMAND_ENTRIES);
351 }
352
353 static inline void
354 typhoon_inc_resp_index(u32 *index, const int count)
355 {
356         typhoon_inc_index(index, count, RESPONSE_ENTRIES);
357 }
358
359 static inline void
360 typhoon_inc_rxfree_index(u32 *index, const int count)
361 {
362         typhoon_inc_index(index, count, RXFREE_ENTRIES);
363 }
364
365 static inline void
366 typhoon_inc_tx_index(u32 *index, const int count)
367 {
368         /* if we start using the Hi Tx ring, this needs updateing */
369         typhoon_inc_index(index, count, TXLO_ENTRIES);
370 }
371
372 static inline void
373 typhoon_inc_rx_index(u32 *index, const int count)
374 {
375         /* sizeof(struct rx_desc) != sizeof(struct cmd_desc) */
376         *index += count * sizeof(struct rx_desc);
377         *index %= RX_ENTRIES * sizeof(struct rx_desc);
378 }
379
380 static int
381 typhoon_reset(void __iomem *ioaddr, int wait_type)
382 {
383         int i, err = 0;
384         int timeout;
385
386         if(wait_type == WaitNoSleep)
387                 timeout = TYPHOON_RESET_TIMEOUT_NOSLEEP;
388         else
389                 timeout = TYPHOON_RESET_TIMEOUT_SLEEP;
390
391         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
392         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
393
394         iowrite32(TYPHOON_RESET_ALL, ioaddr + TYPHOON_REG_SOFT_RESET);
395         typhoon_post_pci_writes(ioaddr);
396         udelay(1);
397         iowrite32(TYPHOON_RESET_NONE, ioaddr + TYPHOON_REG_SOFT_RESET);
398
399         if(wait_type != NoWait) {
400                 for(i = 0; i < timeout; i++) {
401                         if(ioread32(ioaddr + TYPHOON_REG_STATUS) ==
402                            TYPHOON_STATUS_WAITING_FOR_HOST)
403                                 goto out;
404
405                         if(wait_type == WaitSleep)
406                                 schedule_timeout_uninterruptible(1);
407                         else
408                                 udelay(TYPHOON_UDELAY);
409                 }
410
411                 err = -ETIMEDOUT;
412         }
413
414 out:
415         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
416         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
417
418         /* The 3XP seems to need a little extra time to complete the load
419          * of the sleep image before we can reliably boot it. Failure to
420          * do this occasionally results in a hung adapter after boot in
421          * typhoon_init_one() while trying to read the MAC address or
422          * putting the card to sleep. 3Com's driver waits 5ms, but
423          * that seems to be overkill. However, if we can sleep, we might
424          * as well give it that much time. Otherwise, we'll give it 500us,
425          * which should be enough (I've see it work well at 100us, but still
426          * saw occasional problems.)
427          */
428         if(wait_type == WaitSleep)
429                 msleep(5);
430         else
431                 udelay(500);
432         return err;
433 }
434
435 static int
436 typhoon_wait_status(void __iomem *ioaddr, u32 wait_value)
437 {
438         int i, err = 0;
439
440         for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
441                 if(ioread32(ioaddr + TYPHOON_REG_STATUS) == wait_value)
442                         goto out;
443                 udelay(TYPHOON_UDELAY);
444         }
445
446         err = -ETIMEDOUT;
447
448 out:
449         return err;
450 }
451
452 static inline void
453 typhoon_media_status(struct net_device *dev, struct resp_desc *resp)
454 {
455         if(resp->parm1 & TYPHOON_MEDIA_STAT_NO_LINK)
456                 netif_carrier_off(dev);
457         else
458                 netif_carrier_on(dev);
459 }
460
461 static inline void
462 typhoon_hello(struct typhoon *tp)
463 {
464         struct basic_ring *ring = &tp->cmdRing;
465         struct cmd_desc *cmd;
466
467         /* We only get a hello request if we've not sent anything to the
468          * card in a long while. If the lock is held, then we're in the
469          * process of issuing a command, so we don't need to respond.
470          */
471         if(spin_trylock(&tp->command_lock)) {
472                 cmd = (struct cmd_desc *)(ring->ringBase + ring->lastWrite);
473                 typhoon_inc_cmd_index(&ring->lastWrite, 1);
474
475                 INIT_COMMAND_NO_RESPONSE(cmd, TYPHOON_CMD_HELLO_RESP);
476                 wmb();
477                 iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
478                 spin_unlock(&tp->command_lock);
479         }
480 }
481
482 static int
483 typhoon_process_response(struct typhoon *tp, int resp_size,
484                                 struct resp_desc *resp_save)
485 {
486         struct typhoon_indexes *indexes = tp->indexes;
487         struct resp_desc *resp;
488         u8 *base = tp->respRing.ringBase;
489         int count, len, wrap_len;
490         u32 cleared;
491         u32 ready;
492
493         cleared = le32_to_cpu(indexes->respCleared);
494         ready = le32_to_cpu(indexes->respReady);
495         while(cleared != ready) {
496                 resp = (struct resp_desc *)(base + cleared);
497                 count = resp->numDesc + 1;
498                 if(resp_save && resp->seqNo) {
499                         if(count > resp_size) {
500                                 resp_save->flags = TYPHOON_RESP_ERROR;
501                                 goto cleanup;
502                         }
503
504                         wrap_len = 0;
505                         len = count * sizeof(*resp);
506                         if(unlikely(cleared + len > RESPONSE_RING_SIZE)) {
507                                 wrap_len = cleared + len - RESPONSE_RING_SIZE;
508                                 len = RESPONSE_RING_SIZE - cleared;
509                         }
510
511                         memcpy(resp_save, resp, len);
512                         if(unlikely(wrap_len)) {
513                                 resp_save += len / sizeof(*resp);
514                                 memcpy(resp_save, base, wrap_len);
515                         }
516
517                         resp_save = NULL;
518                 } else if(resp->cmd == TYPHOON_CMD_READ_MEDIA_STATUS) {
519                         typhoon_media_status(tp->dev, resp);
520                 } else if(resp->cmd == TYPHOON_CMD_HELLO_RESP) {
521                         typhoon_hello(tp);
522                 } else {
523                         netdev_err(tp->dev,
524                                    "dumping unexpected response 0x%04x:%d:0x%02x:0x%04x:%08x:%08x\n",
525                                    le16_to_cpu(resp->cmd),
526                                    resp->numDesc, resp->flags,
527                                    le16_to_cpu(resp->parm1),
528                                    le32_to_cpu(resp->parm2),
529                                    le32_to_cpu(resp->parm3));
530                 }
531
532 cleanup:
533                 typhoon_inc_resp_index(&cleared, count);
534         }
535
536         indexes->respCleared = cpu_to_le32(cleared);
537         wmb();
538         return resp_save == NULL;
539 }
540
541 static inline int
542 typhoon_num_free(int lastWrite, int lastRead, int ringSize)
543 {
544         /* this works for all descriptors but rx_desc, as they are a
545          * different size than the cmd_desc -- everyone else is the same
546          */
547         lastWrite /= sizeof(struct cmd_desc);
548         lastRead /= sizeof(struct cmd_desc);
549         return (ringSize + lastRead - lastWrite - 1) % ringSize;
550 }
551
552 static inline int
553 typhoon_num_free_cmd(struct typhoon *tp)
554 {
555         int lastWrite = tp->cmdRing.lastWrite;
556         int cmdCleared = le32_to_cpu(tp->indexes->cmdCleared);
557
558         return typhoon_num_free(lastWrite, cmdCleared, COMMAND_ENTRIES);
559 }
560
561 static inline int
562 typhoon_num_free_resp(struct typhoon *tp)
563 {
564         int respReady = le32_to_cpu(tp->indexes->respReady);
565         int respCleared = le32_to_cpu(tp->indexes->respCleared);
566
567         return typhoon_num_free(respReady, respCleared, RESPONSE_ENTRIES);
568 }
569
570 static inline int
571 typhoon_num_free_tx(struct transmit_ring *ring)
572 {
573         /* if we start using the Hi Tx ring, this needs updating */
574         return typhoon_num_free(ring->lastWrite, ring->lastRead, TXLO_ENTRIES);
575 }
576
577 static int
578 typhoon_issue_command(struct typhoon *tp, int num_cmd, struct cmd_desc *cmd,
579                       int num_resp, struct resp_desc *resp)
580 {
581         struct typhoon_indexes *indexes = tp->indexes;
582         struct basic_ring *ring = &tp->cmdRing;
583         struct resp_desc local_resp;
584         int i, err = 0;
585         int got_resp;
586         int freeCmd, freeResp;
587         int len, wrap_len;
588
589         spin_lock(&tp->command_lock);
590
591         freeCmd = typhoon_num_free_cmd(tp);
592         freeResp = typhoon_num_free_resp(tp);
593
594         if(freeCmd < num_cmd || freeResp < num_resp) {
595                 netdev_err(tp->dev, "no descs for cmd, had (needed) %d (%d) cmd, %d (%d) resp\n",
596                            freeCmd, num_cmd, freeResp, num_resp);
597                 err = -ENOMEM;
598                 goto out;
599         }
600
601         if(cmd->flags & TYPHOON_CMD_RESPOND) {
602                 /* If we're expecting a response, but the caller hasn't given
603                  * us a place to put it, we'll provide one.
604                  */
605                 tp->awaiting_resp = 1;
606                 if(resp == NULL) {
607                         resp = &local_resp;
608                         num_resp = 1;
609                 }
610         }
611
612         wrap_len = 0;
613         len = num_cmd * sizeof(*cmd);
614         if(unlikely(ring->lastWrite + len > COMMAND_RING_SIZE)) {
615                 wrap_len = ring->lastWrite + len - COMMAND_RING_SIZE;
616                 len = COMMAND_RING_SIZE - ring->lastWrite;
617         }
618
619         memcpy(ring->ringBase + ring->lastWrite, cmd, len);
620         if(unlikely(wrap_len)) {
621                 struct cmd_desc *wrap_ptr = cmd;
622                 wrap_ptr += len / sizeof(*cmd);
623                 memcpy(ring->ringBase, wrap_ptr, wrap_len);
624         }
625
626         typhoon_inc_cmd_index(&ring->lastWrite, num_cmd);
627
628         /* "I feel a presence... another warrior is on the mesa."
629          */
630         wmb();
631         iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
632         typhoon_post_pci_writes(tp->ioaddr);
633
634         if((cmd->flags & TYPHOON_CMD_RESPOND) == 0)
635                 goto out;
636
637         /* Ugh. We'll be here about 8ms, spinning our thumbs, unable to
638          * preempt or do anything other than take interrupts. So, don't
639          * wait for a response unless you have to.
640          *
641          * I've thought about trying to sleep here, but we're called
642          * from many contexts that don't allow that. Also, given the way
643          * 3Com has implemented irq coalescing, we would likely timeout --
644          * this has been observed in real life!
645          *
646          * The big killer is we have to wait to get stats from the card,
647          * though we could go to a periodic refresh of those if we don't
648          * mind them getting somewhat stale. The rest of the waiting
649          * commands occur during open/close/suspend/resume, so they aren't
650          * time critical. Creating SAs in the future will also have to
651          * wait here.
652          */
653         got_resp = 0;
654         for(i = 0; i < TYPHOON_WAIT_TIMEOUT && !got_resp; i++) {
655                 if(indexes->respCleared != indexes->respReady)
656                         got_resp = typhoon_process_response(tp, num_resp,
657                                                                 resp);
658                 udelay(TYPHOON_UDELAY);
659         }
660
661         if(!got_resp) {
662                 err = -ETIMEDOUT;
663                 goto out;
664         }
665
666         /* Collect the error response even if we don't care about the
667          * rest of the response
668          */
669         if(resp->flags & TYPHOON_RESP_ERROR)
670                 err = -EIO;
671
672 out:
673         if(tp->awaiting_resp) {
674                 tp->awaiting_resp = 0;
675                 smp_wmb();
676
677                 /* Ugh. If a response was added to the ring between
678                  * the call to typhoon_process_response() and the clearing
679                  * of tp->awaiting_resp, we could have missed the interrupt
680                  * and it could hang in the ring an indeterminate amount of
681                  * time. So, check for it, and interrupt ourselves if this
682                  * is the case.
683                  */
684                 if(indexes->respCleared != indexes->respReady)
685                         iowrite32(1, tp->ioaddr + TYPHOON_REG_SELF_INTERRUPT);
686         }
687
688         spin_unlock(&tp->command_lock);
689         return err;
690 }
691
692 static inline void
693 typhoon_tso_fill(struct sk_buff *skb, struct transmit_ring *txRing,
694                         u32 ring_dma)
695 {
696         struct tcpopt_desc *tcpd;
697         u32 tcpd_offset = ring_dma;
698
699         tcpd = (struct tcpopt_desc *) (txRing->ringBase + txRing->lastWrite);
700         tcpd_offset += txRing->lastWrite;
701         tcpd_offset += offsetof(struct tcpopt_desc, bytesTx);
702         typhoon_inc_tx_index(&txRing->lastWrite, 1);
703
704         tcpd->flags = TYPHOON_OPT_DESC | TYPHOON_OPT_TCP_SEG;
705         tcpd->numDesc = 1;
706         tcpd->mss_flags = cpu_to_le16(skb_tso_size(skb));
707         tcpd->mss_flags |= TYPHOON_TSO_FIRST | TYPHOON_TSO_LAST;
708         tcpd->respAddrLo = cpu_to_le32(tcpd_offset);
709         tcpd->bytesTx = cpu_to_le32(skb->len);
710         tcpd->status = 0;
711 }
712
713 static netdev_tx_t
714 typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
715 {
716         struct typhoon *tp = netdev_priv(dev);
717         struct transmit_ring *txRing;
718         struct tx_desc *txd, *first_txd;
719         dma_addr_t skb_dma;
720         int numDesc;
721
722         /* we have two rings to choose from, but we only use txLo for now
723          * If we start using the Hi ring as well, we'll need to update
724          * typhoon_stop_runtime(), typhoon_interrupt(), typhoon_num_free_tx(),
725          * and TXHI_ENTRIES to match, as well as update the TSO code below
726          * to get the right DMA address
727          */
728         txRing = &tp->txLoRing;
729
730         /* We need one descriptor for each fragment of the sk_buff, plus the
731          * one for the ->data area of it.
732          *
733          * The docs say a maximum of 16 fragment descriptors per TCP option
734          * descriptor, then make a new packet descriptor and option descriptor
735          * for the next 16 fragments. The engineers say just an option
736          * descriptor is needed. I've tested up to 26 fragments with a single
737          * packet descriptor/option descriptor combo, so I use that for now.
738          *
739          * If problems develop with TSO, check this first.
740          */
741         numDesc = skb_shinfo(skb)->nr_frags + 1;
742         if (skb_is_gso(skb))
743                 numDesc++;
744
745         /* When checking for free space in the ring, we need to also
746          * account for the initial Tx descriptor, and we always must leave
747          * at least one descriptor unused in the ring so that it doesn't
748          * wrap and look empty.
749          *
750          * The only time we should loop here is when we hit the race
751          * between marking the queue awake and updating the cleared index.
752          * Just loop and it will appear. This comes from the acenic driver.
753          */
754         while(unlikely(typhoon_num_free_tx(txRing) < (numDesc + 2)))
755                 smp_rmb();
756
757         first_txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
758         typhoon_inc_tx_index(&txRing->lastWrite, 1);
759
760         first_txd->flags = TYPHOON_TX_DESC | TYPHOON_DESC_VALID;
761         first_txd->numDesc = 0;
762         first_txd->len = 0;
763         first_txd->tx_addr = (u64)((unsigned long) skb);
764         first_txd->processFlags = 0;
765
766         if(skb->ip_summed == CHECKSUM_PARTIAL) {
767                 /* The 3XP will figure out if this is UDP/TCP */
768                 first_txd->processFlags |= TYPHOON_TX_PF_TCP_CHKSUM;
769                 first_txd->processFlags |= TYPHOON_TX_PF_UDP_CHKSUM;
770                 first_txd->processFlags |= TYPHOON_TX_PF_IP_CHKSUM;
771         }
772
773         if(vlan_tx_tag_present(skb)) {
774                 first_txd->processFlags |=
775                     TYPHOON_TX_PF_INSERT_VLAN | TYPHOON_TX_PF_VLAN_PRIORITY;
776                 first_txd->processFlags |=
777                     cpu_to_le32(htons(vlan_tx_tag_get(skb)) <<
778                                 TYPHOON_TX_PF_VLAN_TAG_SHIFT);
779         }
780
781         if (skb_is_gso(skb)) {
782                 first_txd->processFlags |= TYPHOON_TX_PF_TCP_SEGMENT;
783                 first_txd->numDesc++;
784
785                 typhoon_tso_fill(skb, txRing, tp->txlo_dma_addr);
786         }
787
788         txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
789         typhoon_inc_tx_index(&txRing->lastWrite, 1);
790
791         /* No need to worry about padding packet -- the firmware pads
792          * it with zeros to ETH_ZLEN for us.
793          */
794         if(skb_shinfo(skb)->nr_frags == 0) {
795                 skb_dma = pci_map_single(tp->tx_pdev, skb->data, skb->len,
796                                        PCI_DMA_TODEVICE);
797                 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
798                 txd->len = cpu_to_le16(skb->len);
799                 txd->frag.addr = cpu_to_le32(skb_dma);
800                 txd->frag.addrHi = 0;
801                 first_txd->numDesc++;
802         } else {
803                 int i, len;
804
805                 len = skb_headlen(skb);
806                 skb_dma = pci_map_single(tp->tx_pdev, skb->data, len,
807                                          PCI_DMA_TODEVICE);
808                 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
809                 txd->len = cpu_to_le16(len);
810                 txd->frag.addr = cpu_to_le32(skb_dma);
811                 txd->frag.addrHi = 0;
812                 first_txd->numDesc++;
813
814                 for(i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
815                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
816                         void *frag_addr;
817
818                         txd = (struct tx_desc *) (txRing->ringBase +
819                                                 txRing->lastWrite);
820                         typhoon_inc_tx_index(&txRing->lastWrite, 1);
821
822                         len = frag->size;
823                         frag_addr = (void *) page_address(frag->page) +
824                                                 frag->page_offset;
825                         skb_dma = pci_map_single(tp->tx_pdev, frag_addr, len,
826                                          PCI_DMA_TODEVICE);
827                         txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
828                         txd->len = cpu_to_le16(len);
829                         txd->frag.addr = cpu_to_le32(skb_dma);
830                         txd->frag.addrHi = 0;
831                         first_txd->numDesc++;
832                 }
833         }
834
835         /* Kick the 3XP
836          */
837         wmb();
838         iowrite32(txRing->lastWrite, tp->tx_ioaddr + txRing->writeRegister);
839
840         /* If we don't have room to put the worst case packet on the
841          * queue, then we must stop the queue. We need 2 extra
842          * descriptors -- one to prevent ring wrap, and one for the
843          * Tx header.
844          */
845         numDesc = MAX_SKB_FRAGS + TSO_NUM_DESCRIPTORS + 1;
846
847         if(typhoon_num_free_tx(txRing) < (numDesc + 2)) {
848                 netif_stop_queue(dev);
849
850                 /* A Tx complete IRQ could have gotten inbetween, making
851                  * the ring free again. Only need to recheck here, since
852                  * Tx is serialized.
853                  */
854                 if(typhoon_num_free_tx(txRing) >= (numDesc + 2))
855                         netif_wake_queue(dev);
856         }
857
858         return NETDEV_TX_OK;
859 }
860
861 static void
862 typhoon_set_rx_mode(struct net_device *dev)
863 {
864         struct typhoon *tp = netdev_priv(dev);
865         struct cmd_desc xp_cmd;
866         u32 mc_filter[2];
867         __le16 filter;
868
869         filter = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
870         if(dev->flags & IFF_PROMISC) {
871                 filter |= TYPHOON_RX_FILTER_PROMISCOUS;
872         } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
873                   (dev->flags & IFF_ALLMULTI)) {
874                 /* Too many to match, or accept all multicasts. */
875                 filter |= TYPHOON_RX_FILTER_ALL_MCAST;
876         } else if (!netdev_mc_empty(dev)) {
877                 struct netdev_hw_addr *ha;
878
879                 memset(mc_filter, 0, sizeof(mc_filter));
880                 netdev_for_each_mc_addr(ha, dev) {
881                         int bit = ether_crc(ETH_ALEN, ha->addr) & 0x3f;
882                         mc_filter[bit >> 5] |= 1 << (bit & 0x1f);
883                 }
884
885                 INIT_COMMAND_NO_RESPONSE(&xp_cmd,
886                                          TYPHOON_CMD_SET_MULTICAST_HASH);
887                 xp_cmd.parm1 = TYPHOON_MCAST_HASH_SET;
888                 xp_cmd.parm2 = cpu_to_le32(mc_filter[0]);
889                 xp_cmd.parm3 = cpu_to_le32(mc_filter[1]);
890                 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
891
892                 filter |= TYPHOON_RX_FILTER_MCAST_HASH;
893         }
894
895         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
896         xp_cmd.parm1 = filter;
897         typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
898 }
899
900 static int
901 typhoon_do_get_stats(struct typhoon *tp)
902 {
903         struct net_device_stats *stats = &tp->stats;
904         struct net_device_stats *saved = &tp->stats_saved;
905         struct cmd_desc xp_cmd;
906         struct resp_desc xp_resp[7];
907         struct stats_resp *s = (struct stats_resp *) xp_resp;
908         int err;
909
910         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_STATS);
911         err = typhoon_issue_command(tp, 1, &xp_cmd, 7, xp_resp);
912         if(err < 0)
913                 return err;
914
915         /* 3Com's Linux driver uses txMultipleCollisions as it's
916          * collisions value, but there is some other collision info as well...
917          *
918          * The extra status reported would be a good candidate for
919          * ethtool_ops->get_{strings,stats}()
920          */
921         stats->tx_packets = le32_to_cpu(s->txPackets) +
922                         saved->tx_packets;
923         stats->tx_bytes = le64_to_cpu(s->txBytes) +
924                         saved->tx_bytes;
925         stats->tx_errors = le32_to_cpu(s->txCarrierLost) +
926                         saved->tx_errors;
927         stats->tx_carrier_errors = le32_to_cpu(s->txCarrierLost) +
928                         saved->tx_carrier_errors;
929         stats->collisions = le32_to_cpu(s->txMultipleCollisions) +
930                         saved->collisions;
931         stats->rx_packets = le32_to_cpu(s->rxPacketsGood) +
932                         saved->rx_packets;
933         stats->rx_bytes = le64_to_cpu(s->rxBytesGood) +
934                         saved->rx_bytes;
935         stats->rx_fifo_errors = le32_to_cpu(s->rxFifoOverruns) +
936                         saved->rx_fifo_errors;
937         stats->rx_errors = le32_to_cpu(s->rxFifoOverruns) +
938                         le32_to_cpu(s->BadSSD) + le32_to_cpu(s->rxCrcErrors) +
939                         saved->rx_errors;
940         stats->rx_crc_errors = le32_to_cpu(s->rxCrcErrors) +
941                         saved->rx_crc_errors;
942         stats->rx_length_errors = le32_to_cpu(s->rxOversized) +
943                         saved->rx_length_errors;
944         tp->speed = (s->linkStatus & TYPHOON_LINK_100MBPS) ?
945                         SPEED_100 : SPEED_10;
946         tp->duplex = (s->linkStatus & TYPHOON_LINK_FULL_DUPLEX) ?
947                         DUPLEX_FULL : DUPLEX_HALF;
948
949         return 0;
950 }
951
952 static struct net_device_stats *
953 typhoon_get_stats(struct net_device *dev)
954 {
955         struct typhoon *tp = netdev_priv(dev);
956         struct net_device_stats *stats = &tp->stats;
957         struct net_device_stats *saved = &tp->stats_saved;
958
959         smp_rmb();
960         if(tp->card_state == Sleeping)
961                 return saved;
962
963         if(typhoon_do_get_stats(tp) < 0) {
964                 netdev_err(dev, "error getting stats\n");
965                 return saved;
966         }
967
968         return stats;
969 }
970
971 static int
972 typhoon_set_mac_address(struct net_device *dev, void *addr)
973 {
974         struct sockaddr *saddr = (struct sockaddr *) addr;
975
976         if(netif_running(dev))
977                 return -EBUSY;
978
979         memcpy(dev->dev_addr, saddr->sa_data, dev->addr_len);
980         return 0;
981 }
982
983 static void
984 typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
985 {
986         struct typhoon *tp = netdev_priv(dev);
987         struct pci_dev *pci_dev = tp->pdev;
988         struct cmd_desc xp_cmd;
989         struct resp_desc xp_resp[3];
990
991         smp_rmb();
992         if(tp->card_state == Sleeping) {
993                 strcpy(info->fw_version, "Sleep image");
994         } else {
995                 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
996                 if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
997                         strcpy(info->fw_version, "Unknown runtime");
998                 } else {
999                         u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
1000                         snprintf(info->fw_version, 32, "%02x.%03x.%03x",
1001                                  sleep_ver >> 24, (sleep_ver >> 12) & 0xfff,
1002                                  sleep_ver & 0xfff);
1003                 }
1004         }
1005
1006         strcpy(info->driver, KBUILD_MODNAME);
1007         strcpy(info->version, UTS_RELEASE);
1008         strcpy(info->bus_info, pci_name(pci_dev));
1009 }
1010
1011 static int
1012 typhoon_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1013 {
1014         struct typhoon *tp = netdev_priv(dev);
1015
1016         cmd->supported = SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
1017                                 SUPPORTED_Autoneg;
1018
1019         switch (tp->xcvr_select) {
1020         case TYPHOON_XCVR_10HALF:
1021                 cmd->advertising = ADVERTISED_10baseT_Half;
1022                 break;
1023         case TYPHOON_XCVR_10FULL:
1024                 cmd->advertising = ADVERTISED_10baseT_Full;
1025                 break;
1026         case TYPHOON_XCVR_100HALF:
1027                 cmd->advertising = ADVERTISED_100baseT_Half;
1028                 break;
1029         case TYPHOON_XCVR_100FULL:
1030                 cmd->advertising = ADVERTISED_100baseT_Full;
1031                 break;
1032         case TYPHOON_XCVR_AUTONEG:
1033                 cmd->advertising = ADVERTISED_10baseT_Half |
1034                                             ADVERTISED_10baseT_Full |
1035                                             ADVERTISED_100baseT_Half |
1036                                             ADVERTISED_100baseT_Full |
1037                                             ADVERTISED_Autoneg;
1038                 break;
1039         }
1040
1041         if(tp->capabilities & TYPHOON_FIBER) {
1042                 cmd->supported |= SUPPORTED_FIBRE;
1043                 cmd->advertising |= ADVERTISED_FIBRE;
1044                 cmd->port = PORT_FIBRE;
1045         } else {
1046                 cmd->supported |= SUPPORTED_10baseT_Half |
1047                                         SUPPORTED_10baseT_Full |
1048                                         SUPPORTED_TP;
1049                 cmd->advertising |= ADVERTISED_TP;
1050                 cmd->port = PORT_TP;
1051         }
1052
1053         /* need to get stats to make these link speed/duplex valid */
1054         typhoon_do_get_stats(tp);
1055         cmd->speed = tp->speed;
1056         cmd->duplex = tp->duplex;
1057         cmd->phy_address = 0;
1058         cmd->transceiver = XCVR_INTERNAL;
1059         if(tp->xcvr_select == TYPHOON_XCVR_AUTONEG)
1060                 cmd->autoneg = AUTONEG_ENABLE;
1061         else
1062                 cmd->autoneg = AUTONEG_DISABLE;
1063         cmd->maxtxpkt = 1;
1064         cmd->maxrxpkt = 1;
1065
1066         return 0;
1067 }
1068
1069 static int
1070 typhoon_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1071 {
1072         struct typhoon *tp = netdev_priv(dev);
1073         struct cmd_desc xp_cmd;
1074         __le16 xcvr;
1075         int err;
1076
1077         err = -EINVAL;
1078         if(cmd->autoneg == AUTONEG_ENABLE) {
1079                 xcvr = TYPHOON_XCVR_AUTONEG;
1080         } else {
1081                 if(cmd->duplex == DUPLEX_HALF) {
1082                         if(cmd->speed == SPEED_10)
1083                                 xcvr = TYPHOON_XCVR_10HALF;
1084                         else if(cmd->speed == SPEED_100)
1085                                 xcvr = TYPHOON_XCVR_100HALF;
1086                         else
1087                                 goto out;
1088                 } else if(cmd->duplex == DUPLEX_FULL) {
1089                         if(cmd->speed == SPEED_10)
1090                                 xcvr = TYPHOON_XCVR_10FULL;
1091                         else if(cmd->speed == SPEED_100)
1092                                 xcvr = TYPHOON_XCVR_100FULL;
1093                         else
1094                                 goto out;
1095                 } else
1096                         goto out;
1097         }
1098
1099         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
1100         xp_cmd.parm1 = xcvr;
1101         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1102         if(err < 0)
1103                 goto out;
1104
1105         tp->xcvr_select = xcvr;
1106         if(cmd->autoneg == AUTONEG_ENABLE) {
1107                 tp->speed = 0xff;       /* invalid */
1108                 tp->duplex = 0xff;      /* invalid */
1109         } else {
1110                 tp->speed = cmd->speed;
1111                 tp->duplex = cmd->duplex;
1112         }
1113
1114 out:
1115         return err;
1116 }
1117
1118 static void
1119 typhoon_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1120 {
1121         struct typhoon *tp = netdev_priv(dev);
1122
1123         wol->supported = WAKE_PHY | WAKE_MAGIC;
1124         wol->wolopts = 0;
1125         if(tp->wol_events & TYPHOON_WAKE_LINK_EVENT)
1126                 wol->wolopts |= WAKE_PHY;
1127         if(tp->wol_events & TYPHOON_WAKE_MAGIC_PKT)
1128                 wol->wolopts |= WAKE_MAGIC;
1129         memset(&wol->sopass, 0, sizeof(wol->sopass));
1130 }
1131
1132 static int
1133 typhoon_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1134 {
1135         struct typhoon *tp = netdev_priv(dev);
1136
1137         if(wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
1138                 return -EINVAL;
1139
1140         tp->wol_events = 0;
1141         if(wol->wolopts & WAKE_PHY)
1142                 tp->wol_events |= TYPHOON_WAKE_LINK_EVENT;
1143         if(wol->wolopts & WAKE_MAGIC)
1144                 tp->wol_events |= TYPHOON_WAKE_MAGIC_PKT;
1145
1146         return 0;
1147 }
1148
1149 static u32
1150 typhoon_get_rx_csum(struct net_device *dev)
1151 {
1152         /* For now, we don't allow turning off RX checksums.
1153          */
1154         return 1;
1155 }
1156
1157 static int
1158 typhoon_set_flags(struct net_device *dev, u32 data)
1159 {
1160         /* There's no way to turn off the RX VLAN offloading and stripping
1161          * on the current 3XP firmware -- it does not respect the offload
1162          * settings -- so we only allow the user to toggle the TX processing.
1163          */
1164         if (!(data & ETH_FLAG_RXVLAN))
1165                 return -EINVAL;
1166
1167         return ethtool_op_set_flags(dev, data,
1168                                     ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN);
1169 }
1170
1171 static void
1172 typhoon_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
1173 {
1174         ering->rx_max_pending = RXENT_ENTRIES;
1175         ering->rx_mini_max_pending = 0;
1176         ering->rx_jumbo_max_pending = 0;
1177         ering->tx_max_pending = TXLO_ENTRIES - 1;
1178
1179         ering->rx_pending = RXENT_ENTRIES;
1180         ering->rx_mini_pending = 0;
1181         ering->rx_jumbo_pending = 0;
1182         ering->tx_pending = TXLO_ENTRIES - 1;
1183 }
1184
1185 static const struct ethtool_ops typhoon_ethtool_ops = {
1186         .get_settings           = typhoon_get_settings,
1187         .set_settings           = typhoon_set_settings,
1188         .get_drvinfo            = typhoon_get_drvinfo,
1189         .get_wol                = typhoon_get_wol,
1190         .set_wol                = typhoon_set_wol,
1191         .get_link               = ethtool_op_get_link,
1192         .get_rx_csum            = typhoon_get_rx_csum,
1193         .set_tx_csum            = ethtool_op_set_tx_csum,
1194         .set_sg                 = ethtool_op_set_sg,
1195         .set_tso                = ethtool_op_set_tso,
1196         .get_ringparam          = typhoon_get_ringparam,
1197         .set_flags              = typhoon_set_flags,
1198         .get_flags              = ethtool_op_get_flags,
1199 };
1200
1201 static int
1202 typhoon_wait_interrupt(void __iomem *ioaddr)
1203 {
1204         int i, err = 0;
1205
1206         for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
1207                 if(ioread32(ioaddr + TYPHOON_REG_INTR_STATUS) &
1208                    TYPHOON_INTR_BOOTCMD)
1209                         goto out;
1210                 udelay(TYPHOON_UDELAY);
1211         }
1212
1213         err = -ETIMEDOUT;
1214
1215 out:
1216         iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
1217         return err;
1218 }
1219
1220 #define shared_offset(x)        offsetof(struct typhoon_shared, x)
1221
1222 static void
1223 typhoon_init_interface(struct typhoon *tp)
1224 {
1225         struct typhoon_interface *iface = &tp->shared->iface;
1226         dma_addr_t shared_dma;
1227
1228         memset(tp->shared, 0, sizeof(struct typhoon_shared));
1229
1230         /* The *Hi members of iface are all init'd to zero by the memset().
1231          */
1232         shared_dma = tp->shared_dma + shared_offset(indexes);
1233         iface->ringIndex = cpu_to_le32(shared_dma);
1234
1235         shared_dma = tp->shared_dma + shared_offset(txLo);
1236         iface->txLoAddr = cpu_to_le32(shared_dma);
1237         iface->txLoSize = cpu_to_le32(TXLO_ENTRIES * sizeof(struct tx_desc));
1238
1239         shared_dma = tp->shared_dma + shared_offset(txHi);
1240         iface->txHiAddr = cpu_to_le32(shared_dma);
1241         iface->txHiSize = cpu_to_le32(TXHI_ENTRIES * sizeof(struct tx_desc));
1242
1243         shared_dma = tp->shared_dma + shared_offset(rxBuff);
1244         iface->rxBuffAddr = cpu_to_le32(shared_dma);
1245         iface->rxBuffSize = cpu_to_le32(RXFREE_ENTRIES *
1246                                         sizeof(struct rx_free));
1247
1248         shared_dma = tp->shared_dma + shared_offset(rxLo);
1249         iface->rxLoAddr = cpu_to_le32(shared_dma);
1250         iface->rxLoSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
1251
1252         shared_dma = tp->shared_dma + shared_offset(rxHi);
1253         iface->rxHiAddr = cpu_to_le32(shared_dma);
1254         iface->rxHiSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
1255
1256         shared_dma = tp->shared_dma + shared_offset(cmd);
1257         iface->cmdAddr = cpu_to_le32(shared_dma);
1258         iface->cmdSize = cpu_to_le32(COMMAND_RING_SIZE);
1259
1260         shared_dma = tp->shared_dma + shared_offset(resp);
1261         iface->respAddr = cpu_to_le32(shared_dma);
1262         iface->respSize = cpu_to_le32(RESPONSE_RING_SIZE);
1263
1264         shared_dma = tp->shared_dma + shared_offset(zeroWord);
1265         iface->zeroAddr = cpu_to_le32(shared_dma);
1266
1267         tp->indexes = &tp->shared->indexes;
1268         tp->txLoRing.ringBase = (u8 *) tp->shared->txLo;
1269         tp->txHiRing.ringBase = (u8 *) tp->shared->txHi;
1270         tp->rxLoRing.ringBase = (u8 *) tp->shared->rxLo;
1271         tp->rxHiRing.ringBase = (u8 *) tp->shared->rxHi;
1272         tp->rxBuffRing.ringBase = (u8 *) tp->shared->rxBuff;
1273         tp->cmdRing.ringBase = (u8 *) tp->shared->cmd;
1274         tp->respRing.ringBase = (u8 *) tp->shared->resp;
1275
1276         tp->txLoRing.writeRegister = TYPHOON_REG_TX_LO_READY;
1277         tp->txHiRing.writeRegister = TYPHOON_REG_TX_HI_READY;
1278
1279         tp->txlo_dma_addr = le32_to_cpu(iface->txLoAddr);
1280         tp->card_state = Sleeping;
1281
1282         tp->offload = TYPHOON_OFFLOAD_IP_CHKSUM | TYPHOON_OFFLOAD_TCP_CHKSUM;
1283         tp->offload |= TYPHOON_OFFLOAD_UDP_CHKSUM | TSO_OFFLOAD_ON;
1284         tp->offload |= TYPHOON_OFFLOAD_VLAN;
1285
1286         spin_lock_init(&tp->command_lock);
1287
1288         /* Force the writes to the shared memory area out before continuing. */
1289         wmb();
1290 }
1291
1292 static void
1293 typhoon_init_rings(struct typhoon *tp)
1294 {
1295         memset(tp->indexes, 0, sizeof(struct typhoon_indexes));
1296
1297         tp->txLoRing.lastWrite = 0;
1298         tp->txHiRing.lastWrite = 0;
1299         tp->rxLoRing.lastWrite = 0;
1300         tp->rxHiRing.lastWrite = 0;
1301         tp->rxBuffRing.lastWrite = 0;
1302         tp->cmdRing.lastWrite = 0;
1303         tp->respRing.lastWrite = 0;
1304
1305         tp->txLoRing.lastRead = 0;
1306         tp->txHiRing.lastRead = 0;
1307 }
1308
1309 static const struct firmware *typhoon_fw;
1310
1311 static int
1312 typhoon_request_firmware(struct typhoon *tp)
1313 {
1314         const struct typhoon_file_header *fHdr;
1315         const struct typhoon_section_header *sHdr;
1316         const u8 *image_data;
1317         u32 numSections;
1318         u32 section_len;
1319         u32 remaining;
1320         int err;
1321
1322         if (typhoon_fw)
1323                 return 0;
1324
1325         err = request_firmware(&typhoon_fw, FIRMWARE_NAME, &tp->pdev->dev);
1326         if (err) {
1327                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
1328                            FIRMWARE_NAME);
1329                 return err;
1330         }
1331
1332         image_data = (u8 *) typhoon_fw->data;
1333         remaining = typhoon_fw->size;
1334         if (remaining < sizeof(struct typhoon_file_header))
1335                 goto invalid_fw;
1336
1337         fHdr = (struct typhoon_file_header *) image_data;
1338         if (memcmp(fHdr->tag, "TYPHOON", 8))
1339                 goto invalid_fw;
1340
1341         numSections = le32_to_cpu(fHdr->numSections);
1342         image_data += sizeof(struct typhoon_file_header);
1343         remaining -= sizeof(struct typhoon_file_header);
1344
1345         while (numSections--) {
1346                 if (remaining < sizeof(struct typhoon_section_header))
1347                         goto invalid_fw;
1348
1349                 sHdr = (struct typhoon_section_header *) image_data;
1350                 image_data += sizeof(struct typhoon_section_header);
1351                 section_len = le32_to_cpu(sHdr->len);
1352
1353                 if (remaining < section_len)
1354                         goto invalid_fw;
1355
1356                 image_data += section_len;
1357                 remaining -= section_len;
1358         }
1359
1360         return 0;
1361
1362 invalid_fw:
1363         netdev_err(tp->dev, "Invalid firmware image\n");
1364         release_firmware(typhoon_fw);
1365         typhoon_fw = NULL;
1366         return -EINVAL;
1367 }
1368
1369 static int
1370 typhoon_download_firmware(struct typhoon *tp)
1371 {
1372         void __iomem *ioaddr = tp->ioaddr;
1373         struct pci_dev *pdev = tp->pdev;
1374         const struct typhoon_file_header *fHdr;
1375         const struct typhoon_section_header *sHdr;
1376         const u8 *image_data;
1377         void *dpage;
1378         dma_addr_t dpage_dma;
1379         __sum16 csum;
1380         u32 irqEnabled;
1381         u32 irqMasked;
1382         u32 numSections;
1383         u32 section_len;
1384         u32 len;
1385         u32 load_addr;
1386         u32 hmac;
1387         int i;
1388         int err;
1389
1390         image_data = (u8 *) typhoon_fw->data;
1391         fHdr = (struct typhoon_file_header *) image_data;
1392
1393         /* Cannot just map the firmware image using pci_map_single() as
1394          * the firmware is vmalloc()'d and may not be physically contiguous,
1395          * so we allocate some consistent memory to copy the sections into.
1396          */
1397         err = -ENOMEM;
1398         dpage = pci_alloc_consistent(pdev, PAGE_SIZE, &dpage_dma);
1399         if(!dpage) {
1400                 netdev_err(tp->dev, "no DMA mem for firmware\n");
1401                 goto err_out;
1402         }
1403
1404         irqEnabled = ioread32(ioaddr + TYPHOON_REG_INTR_ENABLE);
1405         iowrite32(irqEnabled | TYPHOON_INTR_BOOTCMD,
1406                ioaddr + TYPHOON_REG_INTR_ENABLE);
1407         irqMasked = ioread32(ioaddr + TYPHOON_REG_INTR_MASK);
1408         iowrite32(irqMasked | TYPHOON_INTR_BOOTCMD,
1409                ioaddr + TYPHOON_REG_INTR_MASK);
1410
1411         err = -ETIMEDOUT;
1412         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
1413                 netdev_err(tp->dev, "card ready timeout\n");
1414                 goto err_out_irq;
1415         }
1416
1417         numSections = le32_to_cpu(fHdr->numSections);
1418         load_addr = le32_to_cpu(fHdr->startAddr);
1419
1420         iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
1421         iowrite32(load_addr, ioaddr + TYPHOON_REG_DOWNLOAD_BOOT_ADDR);
1422         hmac = le32_to_cpu(fHdr->hmacDigest[0]);
1423         iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_0);
1424         hmac = le32_to_cpu(fHdr->hmacDigest[1]);
1425         iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_1);
1426         hmac = le32_to_cpu(fHdr->hmacDigest[2]);
1427         iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_2);
1428         hmac = le32_to_cpu(fHdr->hmacDigest[3]);
1429         iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_3);
1430         hmac = le32_to_cpu(fHdr->hmacDigest[4]);
1431         iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_4);
1432         typhoon_post_pci_writes(ioaddr);
1433         iowrite32(TYPHOON_BOOTCMD_RUNTIME_IMAGE, ioaddr + TYPHOON_REG_COMMAND);
1434
1435         image_data += sizeof(struct typhoon_file_header);
1436
1437         /* The ioread32() in typhoon_wait_interrupt() will force the
1438          * last write to the command register to post, so
1439          * we don't need a typhoon_post_pci_writes() after it.
1440          */
1441         for(i = 0; i < numSections; i++) {
1442                 sHdr = (struct typhoon_section_header *) image_data;
1443                 image_data += sizeof(struct typhoon_section_header);
1444                 load_addr = le32_to_cpu(sHdr->startAddr);
1445                 section_len = le32_to_cpu(sHdr->len);
1446
1447                 while(section_len) {
1448                         len = min_t(u32, section_len, PAGE_SIZE);
1449
1450                         if(typhoon_wait_interrupt(ioaddr) < 0 ||
1451                            ioread32(ioaddr + TYPHOON_REG_STATUS) !=
1452                            TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
1453                                 netdev_err(tp->dev, "segment ready timeout\n");
1454                                 goto err_out_irq;
1455                         }
1456
1457                         /* Do an pseudo IPv4 checksum on the data -- first
1458                          * need to convert each u16 to cpu order before
1459                          * summing. Fortunately, due to the properties of
1460                          * the checksum, we can do this once, at the end.
1461                          */
1462                         csum = csum_fold(csum_partial_copy_nocheck(image_data,
1463                                                                    dpage, len,
1464                                                                    0));
1465
1466                         iowrite32(len, ioaddr + TYPHOON_REG_BOOT_LENGTH);
1467                         iowrite32(le16_to_cpu((__force __le16)csum),
1468                                         ioaddr + TYPHOON_REG_BOOT_CHECKSUM);
1469                         iowrite32(load_addr,
1470                                         ioaddr + TYPHOON_REG_BOOT_DEST_ADDR);
1471                         iowrite32(0, ioaddr + TYPHOON_REG_BOOT_DATA_HI);
1472                         iowrite32(dpage_dma, ioaddr + TYPHOON_REG_BOOT_DATA_LO);
1473                         typhoon_post_pci_writes(ioaddr);
1474                         iowrite32(TYPHOON_BOOTCMD_SEG_AVAILABLE,
1475                                         ioaddr + TYPHOON_REG_COMMAND);
1476
1477                         image_data += len;
1478                         load_addr += len;
1479                         section_len -= len;
1480                 }
1481         }
1482
1483         if(typhoon_wait_interrupt(ioaddr) < 0 ||
1484            ioread32(ioaddr + TYPHOON_REG_STATUS) !=
1485            TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
1486                 netdev_err(tp->dev, "final segment ready timeout\n");
1487                 goto err_out_irq;
1488         }
1489
1490         iowrite32(TYPHOON_BOOTCMD_DNLD_COMPLETE, ioaddr + TYPHOON_REG_COMMAND);
1491
1492         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
1493                 netdev_err(tp->dev, "boot ready timeout, status 0x%0x\n",
1494                            ioread32(ioaddr + TYPHOON_REG_STATUS));
1495                 goto err_out_irq;
1496         }
1497
1498         err = 0;
1499
1500 err_out_irq:
1501         iowrite32(irqMasked, ioaddr + TYPHOON_REG_INTR_MASK);
1502         iowrite32(irqEnabled, ioaddr + TYPHOON_REG_INTR_ENABLE);
1503
1504         pci_free_consistent(pdev, PAGE_SIZE, dpage, dpage_dma);
1505
1506 err_out:
1507         return err;
1508 }
1509
1510 static int
1511 typhoon_boot_3XP(struct typhoon *tp, u32 initial_status)
1512 {
1513         void __iomem *ioaddr = tp->ioaddr;
1514
1515         if(typhoon_wait_status(ioaddr, initial_status) < 0) {
1516                 netdev_err(tp->dev, "boot ready timeout\n");
1517                 goto out_timeout;
1518         }
1519
1520         iowrite32(0, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_HI);
1521         iowrite32(tp->shared_dma, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_LO);
1522         typhoon_post_pci_writes(ioaddr);
1523         iowrite32(TYPHOON_BOOTCMD_REG_BOOT_RECORD,
1524                                 ioaddr + TYPHOON_REG_COMMAND);
1525
1526         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_RUNNING) < 0) {
1527                 netdev_err(tp->dev, "boot finish timeout (status 0x%x)\n",
1528                            ioread32(ioaddr + TYPHOON_REG_STATUS));
1529                 goto out_timeout;
1530         }
1531
1532         /* Clear the Transmit and Command ready registers
1533          */
1534         iowrite32(0, ioaddr + TYPHOON_REG_TX_HI_READY);
1535         iowrite32(0, ioaddr + TYPHOON_REG_CMD_READY);
1536         iowrite32(0, ioaddr + TYPHOON_REG_TX_LO_READY);
1537         typhoon_post_pci_writes(ioaddr);
1538         iowrite32(TYPHOON_BOOTCMD_BOOT, ioaddr + TYPHOON_REG_COMMAND);
1539
1540         return 0;
1541
1542 out_timeout:
1543         return -ETIMEDOUT;
1544 }
1545
1546 static u32
1547 typhoon_clean_tx(struct typhoon *tp, struct transmit_ring *txRing,
1548                         volatile __le32 * index)
1549 {
1550         u32 lastRead = txRing->lastRead;
1551         struct tx_desc *tx;
1552         dma_addr_t skb_dma;
1553         int dma_len;
1554         int type;
1555
1556         while(lastRead != le32_to_cpu(*index)) {
1557                 tx = (struct tx_desc *) (txRing->ringBase + lastRead);
1558                 type = tx->flags & TYPHOON_TYPE_MASK;
1559
1560                 if(type == TYPHOON_TX_DESC) {
1561                         /* This tx_desc describes a packet.
1562                          */
1563                         unsigned long ptr = tx->tx_addr;
1564                         struct sk_buff *skb = (struct sk_buff *) ptr;
1565                         dev_kfree_skb_irq(skb);
1566                 } else if(type == TYPHOON_FRAG_DESC) {
1567                         /* This tx_desc describes a memory mapping. Free it.
1568                          */
1569                         skb_dma = (dma_addr_t) le32_to_cpu(tx->frag.addr);
1570                         dma_len = le16_to_cpu(tx->len);
1571                         pci_unmap_single(tp->pdev, skb_dma, dma_len,
1572                                        PCI_DMA_TODEVICE);
1573                 }
1574
1575                 tx->flags = 0;
1576                 typhoon_inc_tx_index(&lastRead, 1);
1577         }
1578
1579         return lastRead;
1580 }
1581
1582 static void
1583 typhoon_tx_complete(struct typhoon *tp, struct transmit_ring *txRing,
1584                         volatile __le32 * index)
1585 {
1586         u32 lastRead;
1587         int numDesc = MAX_SKB_FRAGS + 1;
1588
1589         /* This will need changing if we start to use the Hi Tx ring. */
1590         lastRead = typhoon_clean_tx(tp, txRing, index);
1591         if(netif_queue_stopped(tp->dev) && typhoon_num_free(txRing->lastWrite,
1592                                 lastRead, TXLO_ENTRIES) > (numDesc + 2))
1593                 netif_wake_queue(tp->dev);
1594
1595         txRing->lastRead = lastRead;
1596         smp_wmb();
1597 }
1598
1599 static void
1600 typhoon_recycle_rx_skb(struct typhoon *tp, u32 idx)
1601 {
1602         struct typhoon_indexes *indexes = tp->indexes;
1603         struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
1604         struct basic_ring *ring = &tp->rxBuffRing;
1605         struct rx_free *r;
1606
1607         if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
1608                                 le32_to_cpu(indexes->rxBuffCleared)) {
1609                 /* no room in ring, just drop the skb
1610                  */
1611                 dev_kfree_skb_any(rxb->skb);
1612                 rxb->skb = NULL;
1613                 return;
1614         }
1615
1616         r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
1617         typhoon_inc_rxfree_index(&ring->lastWrite, 1);
1618         r->virtAddr = idx;
1619         r->physAddr = cpu_to_le32(rxb->dma_addr);
1620
1621         /* Tell the card about it */
1622         wmb();
1623         indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
1624 }
1625
1626 static int
1627 typhoon_alloc_rx_skb(struct typhoon *tp, u32 idx)
1628 {
1629         struct typhoon_indexes *indexes = tp->indexes;
1630         struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
1631         struct basic_ring *ring = &tp->rxBuffRing;
1632         struct rx_free *r;
1633         struct sk_buff *skb;
1634         dma_addr_t dma_addr;
1635
1636         rxb->skb = NULL;
1637
1638         if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
1639                                 le32_to_cpu(indexes->rxBuffCleared))
1640                 return -ENOMEM;
1641
1642         skb = dev_alloc_skb(PKT_BUF_SZ);
1643         if(!skb)
1644                 return -ENOMEM;
1645
1646 #if 0
1647         /* Please, 3com, fix the firmware to allow DMA to a unaligned
1648          * address! Pretty please?
1649          */
1650         skb_reserve(skb, 2);
1651 #endif
1652
1653         skb->dev = tp->dev;
1654         dma_addr = pci_map_single(tp->pdev, skb->data,
1655                                   PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
1656
1657         /* Since no card does 64 bit DAC, the high bits will never
1658          * change from zero.
1659          */
1660         r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
1661         typhoon_inc_rxfree_index(&ring->lastWrite, 1);
1662         r->virtAddr = idx;
1663         r->physAddr = cpu_to_le32(dma_addr);
1664         rxb->skb = skb;
1665         rxb->dma_addr = dma_addr;
1666
1667         /* Tell the card about it */
1668         wmb();
1669         indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
1670         return 0;
1671 }
1672
1673 static int
1674 typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile __le32 * ready,
1675            volatile __le32 * cleared, int budget)
1676 {
1677         struct rx_desc *rx;
1678         struct sk_buff *skb, *new_skb;
1679         struct rxbuff_ent *rxb;
1680         dma_addr_t dma_addr;
1681         u32 local_ready;
1682         u32 rxaddr;
1683         int pkt_len;
1684         u32 idx;
1685         __le32 csum_bits;
1686         int received;
1687
1688         received = 0;
1689         local_ready = le32_to_cpu(*ready);
1690         rxaddr = le32_to_cpu(*cleared);
1691         while(rxaddr != local_ready && budget > 0) {
1692                 rx = (struct rx_desc *) (rxRing->ringBase + rxaddr);
1693                 idx = rx->addr;
1694                 rxb = &tp->rxbuffers[idx];
1695                 skb = rxb->skb;
1696                 dma_addr = rxb->dma_addr;
1697
1698                 typhoon_inc_rx_index(&rxaddr, 1);
1699
1700                 if(rx->flags & TYPHOON_RX_ERROR) {
1701                         typhoon_recycle_rx_skb(tp, idx);
1702                         continue;
1703                 }
1704
1705                 pkt_len = le16_to_cpu(rx->frameLen);
1706
1707                 if(pkt_len < rx_copybreak &&
1708                    (new_skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1709                         skb_reserve(new_skb, 2);
1710                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr,
1711                                                     PKT_BUF_SZ,
1712                                                     PCI_DMA_FROMDEVICE);
1713                         skb_copy_to_linear_data(new_skb, skb->data, pkt_len);
1714                         pci_dma_sync_single_for_device(tp->pdev, dma_addr,
1715                                                        PKT_BUF_SZ,
1716                                                        PCI_DMA_FROMDEVICE);
1717                         skb_put(new_skb, pkt_len);
1718                         typhoon_recycle_rx_skb(tp, idx);
1719                 } else {
1720                         new_skb = skb;
1721                         skb_put(new_skb, pkt_len);
1722                         pci_unmap_single(tp->pdev, dma_addr, PKT_BUF_SZ,
1723                                        PCI_DMA_FROMDEVICE);
1724                         typhoon_alloc_rx_skb(tp, idx);
1725                 }
1726                 new_skb->protocol = eth_type_trans(new_skb, tp->dev);
1727                 csum_bits = rx->rxStatus & (TYPHOON_RX_IP_CHK_GOOD |
1728                         TYPHOON_RX_UDP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD);
1729                 if(csum_bits ==
1730                    (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD) ||
1731                    csum_bits ==
1732                    (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_UDP_CHK_GOOD)) {
1733                         new_skb->ip_summed = CHECKSUM_UNNECESSARY;
1734                 } else
1735                         skb_checksum_none_assert(new_skb);
1736
1737                 if (rx->rxStatus & TYPHOON_RX_VLAN)
1738                         __vlan_hwaccel_put_tag(new_skb,
1739                                                ntohl(rx->vlanTag) & 0xffff);
1740                 netif_receive_skb(new_skb);
1741
1742                 received++;
1743                 budget--;
1744         }
1745         *cleared = cpu_to_le32(rxaddr);
1746
1747         return received;
1748 }
1749
1750 static void
1751 typhoon_fill_free_ring(struct typhoon *tp)
1752 {
1753         u32 i;
1754
1755         for(i = 0; i < RXENT_ENTRIES; i++) {
1756                 struct rxbuff_ent *rxb = &tp->rxbuffers[i];
1757                 if(rxb->skb)
1758                         continue;
1759                 if(typhoon_alloc_rx_skb(tp, i) < 0)
1760                         break;
1761         }
1762 }
1763
1764 static int
1765 typhoon_poll(struct napi_struct *napi, int budget)
1766 {
1767         struct typhoon *tp = container_of(napi, struct typhoon, napi);
1768         struct typhoon_indexes *indexes = tp->indexes;
1769         int work_done;
1770
1771         rmb();
1772         if(!tp->awaiting_resp && indexes->respReady != indexes->respCleared)
1773                         typhoon_process_response(tp, 0, NULL);
1774
1775         if(le32_to_cpu(indexes->txLoCleared) != tp->txLoRing.lastRead)
1776                 typhoon_tx_complete(tp, &tp->txLoRing, &indexes->txLoCleared);
1777
1778         work_done = 0;
1779
1780         if(indexes->rxHiCleared != indexes->rxHiReady) {
1781                 work_done += typhoon_rx(tp, &tp->rxHiRing, &indexes->rxHiReady,
1782                                         &indexes->rxHiCleared, budget);
1783         }
1784
1785         if(indexes->rxLoCleared != indexes->rxLoReady) {
1786                 work_done += typhoon_rx(tp, &tp->rxLoRing, &indexes->rxLoReady,
1787                                         &indexes->rxLoCleared, budget - work_done);
1788         }
1789
1790         if(le32_to_cpu(indexes->rxBuffCleared) == tp->rxBuffRing.lastWrite) {
1791                 /* rxBuff ring is empty, try to fill it. */
1792                 typhoon_fill_free_ring(tp);
1793         }
1794
1795         if (work_done < budget) {
1796                 napi_complete(napi);
1797                 iowrite32(TYPHOON_INTR_NONE,
1798                                 tp->ioaddr + TYPHOON_REG_INTR_MASK);
1799                 typhoon_post_pci_writes(tp->ioaddr);
1800         }
1801
1802         return work_done;
1803 }
1804
1805 static irqreturn_t
1806 typhoon_interrupt(int irq, void *dev_instance)
1807 {
1808         struct net_device *dev = dev_instance;
1809         struct typhoon *tp = netdev_priv(dev);
1810         void __iomem *ioaddr = tp->ioaddr;
1811         u32 intr_status;
1812
1813         intr_status = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
1814         if(!(intr_status & TYPHOON_INTR_HOST_INT))
1815                 return IRQ_NONE;
1816
1817         iowrite32(intr_status, ioaddr + TYPHOON_REG_INTR_STATUS);
1818
1819         if (napi_schedule_prep(&tp->napi)) {
1820                 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
1821                 typhoon_post_pci_writes(ioaddr);
1822                 __napi_schedule(&tp->napi);
1823         } else {
1824                 netdev_err(dev, "Error, poll already scheduled\n");
1825         }
1826         return IRQ_HANDLED;
1827 }
1828
1829 static void
1830 typhoon_free_rx_rings(struct typhoon *tp)
1831 {
1832         u32 i;
1833
1834         for(i = 0; i < RXENT_ENTRIES; i++) {
1835                 struct rxbuff_ent *rxb = &tp->rxbuffers[i];
1836                 if(rxb->skb) {
1837                         pci_unmap_single(tp->pdev, rxb->dma_addr, PKT_BUF_SZ,
1838                                        PCI_DMA_FROMDEVICE);
1839                         dev_kfree_skb(rxb->skb);
1840                         rxb->skb = NULL;
1841                 }
1842         }
1843 }
1844
1845 static int
1846 typhoon_sleep(struct typhoon *tp, pci_power_t state, __le16 events)
1847 {
1848         struct pci_dev *pdev = tp->pdev;
1849         void __iomem *ioaddr = tp->ioaddr;
1850         struct cmd_desc xp_cmd;
1851         int err;
1852
1853         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_ENABLE_WAKE_EVENTS);
1854         xp_cmd.parm1 = events;
1855         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1856         if(err < 0) {
1857                 netdev_err(tp->dev, "typhoon_sleep(): wake events cmd err %d\n",
1858                            err);
1859                 return err;
1860         }
1861
1862         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_GOTO_SLEEP);
1863         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1864         if(err < 0) {
1865                 netdev_err(tp->dev, "typhoon_sleep(): sleep cmd err %d\n", err);
1866                 return err;
1867         }
1868
1869         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_SLEEPING) < 0)
1870                 return -ETIMEDOUT;
1871
1872         /* Since we cannot monitor the status of the link while sleeping,
1873          * tell the world it went away.
1874          */
1875         netif_carrier_off(tp->dev);
1876
1877         pci_enable_wake(tp->pdev, state, 1);
1878         pci_disable_device(pdev);
1879         return pci_set_power_state(pdev, state);
1880 }
1881
1882 static int
1883 typhoon_wakeup(struct typhoon *tp, int wait_type)
1884 {
1885         struct pci_dev *pdev = tp->pdev;
1886         void __iomem *ioaddr = tp->ioaddr;
1887
1888         pci_set_power_state(pdev, PCI_D0);
1889         pci_restore_state(pdev);
1890
1891         /* Post 2.x.x versions of the Sleep Image require a reset before
1892          * we can download the Runtime Image. But let's not make users of
1893          * the old firmware pay for the reset.
1894          */
1895         iowrite32(TYPHOON_BOOTCMD_WAKEUP, ioaddr + TYPHOON_REG_COMMAND);
1896         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0 ||
1897                         (tp->capabilities & TYPHOON_WAKEUP_NEEDS_RESET))
1898                 return typhoon_reset(ioaddr, wait_type);
1899
1900         return 0;
1901 }
1902
1903 static int
1904 typhoon_start_runtime(struct typhoon *tp)
1905 {
1906         struct net_device *dev = tp->dev;
1907         void __iomem *ioaddr = tp->ioaddr;
1908         struct cmd_desc xp_cmd;
1909         int err;
1910
1911         typhoon_init_rings(tp);
1912         typhoon_fill_free_ring(tp);
1913
1914         err = typhoon_download_firmware(tp);
1915         if(err < 0) {
1916                 netdev_err(tp->dev, "cannot load runtime on 3XP\n");
1917                 goto error_out;
1918         }
1919
1920         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
1921                 netdev_err(tp->dev, "cannot boot 3XP\n");
1922                 err = -EIO;
1923                 goto error_out;
1924         }
1925
1926         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAX_PKT_SIZE);
1927         xp_cmd.parm1 = cpu_to_le16(PKT_BUF_SZ);
1928         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1929         if(err < 0)
1930                 goto error_out;
1931
1932         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
1933         xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0]));
1934         xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2]));
1935         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1936         if(err < 0)
1937                 goto error_out;
1938
1939         /* Disable IRQ coalescing -- we can reenable it when 3Com gives
1940          * us some more information on how to control it.
1941          */
1942         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_IRQ_COALESCE_CTRL);
1943         xp_cmd.parm1 = 0;
1944         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1945         if(err < 0)
1946                 goto error_out;
1947
1948         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
1949         xp_cmd.parm1 = tp->xcvr_select;
1950         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1951         if(err < 0)
1952                 goto error_out;
1953
1954         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_VLAN_TYPE_WRITE);
1955         xp_cmd.parm1 = cpu_to_le16(ETH_P_8021Q);
1956         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1957         if(err < 0)
1958                 goto error_out;
1959
1960         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_OFFLOAD_TASKS);
1961         xp_cmd.parm2 = tp->offload;
1962         xp_cmd.parm3 = tp->offload;
1963         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1964         if(err < 0)
1965                 goto error_out;
1966
1967         typhoon_set_rx_mode(dev);
1968
1969         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_ENABLE);
1970         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1971         if(err < 0)
1972                 goto error_out;
1973
1974         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_ENABLE);
1975         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1976         if(err < 0)
1977                 goto error_out;
1978
1979         tp->card_state = Running;
1980         smp_wmb();
1981
1982         iowrite32(TYPHOON_INTR_ENABLE_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE);
1983         iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_MASK);
1984         typhoon_post_pci_writes(ioaddr);
1985
1986         return 0;
1987
1988 error_out:
1989         typhoon_reset(ioaddr, WaitNoSleep);
1990         typhoon_free_rx_rings(tp);
1991         typhoon_init_rings(tp);
1992         return err;
1993 }
1994
1995 static int
1996 typhoon_stop_runtime(struct typhoon *tp, int wait_type)
1997 {
1998         struct typhoon_indexes *indexes = tp->indexes;
1999         struct transmit_ring *txLo = &tp->txLoRing;
2000         void __iomem *ioaddr = tp->ioaddr;
2001         struct cmd_desc xp_cmd;
2002         int i;
2003
2004         /* Disable interrupts early, since we can't schedule a poll
2005          * when called with !netif_running(). This will be posted
2006          * when we force the posting of the command.
2007          */
2008         iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE);
2009
2010         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_DISABLE);
2011         typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2012
2013         /* Wait 1/2 sec for any outstanding transmits to occur
2014          * We'll cleanup after the reset if this times out.
2015          */
2016         for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
2017                 if(indexes->txLoCleared == cpu_to_le32(txLo->lastWrite))
2018                         break;
2019                 udelay(TYPHOON_UDELAY);
2020         }
2021
2022         if(i == TYPHOON_WAIT_TIMEOUT)
2023                 netdev_err(tp->dev, "halt timed out waiting for Tx to complete\n");
2024
2025         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_DISABLE);
2026         typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2027
2028         /* save the statistics so when we bring the interface up again,
2029          * the values reported to userspace are correct.
2030          */
2031         tp->card_state = Sleeping;
2032         smp_wmb();
2033         typhoon_do_get_stats(tp);
2034         memcpy(&tp->stats_saved, &tp->stats, sizeof(struct net_device_stats));
2035
2036         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_HALT);
2037         typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2038
2039         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_HALTED) < 0)
2040                 netdev_err(tp->dev, "timed out waiting for 3XP to halt\n");
2041
2042         if(typhoon_reset(ioaddr, wait_type) < 0) {
2043                 netdev_err(tp->dev, "unable to reset 3XP\n");
2044                 return -ETIMEDOUT;
2045         }
2046
2047         /* cleanup any outstanding Tx packets */
2048         if(indexes->txLoCleared != cpu_to_le32(txLo->lastWrite)) {
2049                 indexes->txLoCleared = cpu_to_le32(txLo->lastWrite);
2050                 typhoon_clean_tx(tp, &tp->txLoRing, &indexes->txLoCleared);
2051         }
2052
2053         return 0;
2054 }
2055
2056 static void
2057 typhoon_tx_timeout(struct net_device *dev)
2058 {
2059         struct typhoon *tp = netdev_priv(dev);
2060
2061         if(typhoon_reset(tp->ioaddr, WaitNoSleep) < 0) {
2062                 netdev_warn(dev, "could not reset in tx timeout\n");
2063                 goto truly_dead;
2064         }
2065
2066         /* If we ever start using the Hi ring, it will need cleaning too */
2067         typhoon_clean_tx(tp, &tp->txLoRing, &tp->indexes->txLoCleared);
2068         typhoon_free_rx_rings(tp);
2069
2070         if(typhoon_start_runtime(tp) < 0) {
2071                 netdev_err(dev, "could not start runtime in tx timeout\n");
2072                 goto truly_dead;
2073         }
2074
2075         netif_wake_queue(dev);
2076         return;
2077
2078 truly_dead:
2079         /* Reset the hardware, and turn off carrier to avoid more timeouts */
2080         typhoon_reset(tp->ioaddr, NoWait);
2081         netif_carrier_off(dev);
2082 }
2083
2084 static int
2085 typhoon_open(struct net_device *dev)
2086 {
2087         struct typhoon *tp = netdev_priv(dev);
2088         int err;
2089
2090         err = typhoon_request_firmware(tp);
2091         if (err)
2092                 goto out;
2093
2094         err = typhoon_wakeup(tp, WaitSleep);
2095         if(err < 0) {
2096                 netdev_err(dev, "unable to wakeup device\n");
2097                 goto out_sleep;
2098         }
2099
2100         err = request_irq(dev->irq, typhoon_interrupt, IRQF_SHARED,
2101                                 dev->name, dev);
2102         if(err < 0)
2103                 goto out_sleep;
2104
2105         napi_enable(&tp->napi);
2106
2107         err = typhoon_start_runtime(tp);
2108         if(err < 0) {
2109                 napi_disable(&tp->napi);
2110                 goto out_irq;
2111         }
2112
2113         netif_start_queue(dev);
2114         return 0;
2115
2116 out_irq:
2117         free_irq(dev->irq, dev);
2118
2119 out_sleep:
2120         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2121                 netdev_err(dev, "unable to reboot into sleep img\n");
2122                 typhoon_reset(tp->ioaddr, NoWait);
2123                 goto out;
2124         }
2125
2126         if(typhoon_sleep(tp, PCI_D3hot, 0) < 0)
2127                 netdev_err(dev, "unable to go back to sleep\n");
2128
2129 out:
2130         return err;
2131 }
2132
2133 static int
2134 typhoon_close(struct net_device *dev)
2135 {
2136         struct typhoon *tp = netdev_priv(dev);
2137
2138         netif_stop_queue(dev);
2139         napi_disable(&tp->napi);
2140
2141         if(typhoon_stop_runtime(tp, WaitSleep) < 0)
2142                 netdev_err(dev, "unable to stop runtime\n");
2143
2144         /* Make sure there is no irq handler running on a different CPU. */
2145         free_irq(dev->irq, dev);
2146
2147         typhoon_free_rx_rings(tp);
2148         typhoon_init_rings(tp);
2149
2150         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0)
2151                 netdev_err(dev, "unable to boot sleep image\n");
2152
2153         if(typhoon_sleep(tp, PCI_D3hot, 0) < 0)
2154                 netdev_err(dev, "unable to put card to sleep\n");
2155
2156         return 0;
2157 }
2158
2159 #ifdef CONFIG_PM
2160 static int
2161 typhoon_resume(struct pci_dev *pdev)
2162 {
2163         struct net_device *dev = pci_get_drvdata(pdev);
2164         struct typhoon *tp = netdev_priv(dev);
2165
2166         /* If we're down, resume when we are upped.
2167          */
2168         if(!netif_running(dev))
2169                 return 0;
2170
2171         if(typhoon_wakeup(tp, WaitNoSleep) < 0) {
2172                 netdev_err(dev, "critical: could not wake up in resume\n");
2173                 goto reset;
2174         }
2175
2176         if(typhoon_start_runtime(tp) < 0) {
2177                 netdev_err(dev, "critical: could not start runtime in resume\n");
2178                 goto reset;
2179         }
2180
2181         netif_device_attach(dev);
2182         return 0;
2183
2184 reset:
2185         typhoon_reset(tp->ioaddr, NoWait);
2186         return -EBUSY;
2187 }
2188
2189 static int
2190 typhoon_suspend(struct pci_dev *pdev, pm_message_t state)
2191 {
2192         struct net_device *dev = pci_get_drvdata(pdev);
2193         struct typhoon *tp = netdev_priv(dev);
2194         struct cmd_desc xp_cmd;
2195
2196         /* If we're down, we're already suspended.
2197          */
2198         if(!netif_running(dev))
2199                 return 0;
2200
2201         /* TYPHOON_OFFLOAD_VLAN is always on now, so this doesn't work */
2202         if(tp->wol_events & TYPHOON_WAKE_MAGIC_PKT)
2203                 netdev_warn(dev, "cannot do WAKE_MAGIC with VLAN offloading\n");
2204
2205         netif_device_detach(dev);
2206
2207         if(typhoon_stop_runtime(tp, WaitNoSleep) < 0) {
2208                 netdev_err(dev, "unable to stop runtime\n");
2209                 goto need_resume;
2210         }
2211
2212         typhoon_free_rx_rings(tp);
2213         typhoon_init_rings(tp);
2214
2215         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2216                 netdev_err(dev, "unable to boot sleep image\n");
2217                 goto need_resume;
2218         }
2219
2220         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
2221         xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0]));
2222         xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2]));
2223         if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
2224                 netdev_err(dev, "unable to set mac address in suspend\n");
2225                 goto need_resume;
2226         }
2227
2228         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
2229         xp_cmd.parm1 = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
2230         if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
2231                 netdev_err(dev, "unable to set rx filter in suspend\n");
2232                 goto need_resume;
2233         }
2234
2235         if(typhoon_sleep(tp, pci_choose_state(pdev, state), tp->wol_events) < 0) {
2236                 netdev_err(dev, "unable to put card to sleep\n");
2237                 goto need_resume;
2238         }
2239
2240         return 0;
2241
2242 need_resume:
2243         typhoon_resume(pdev);
2244         return -EBUSY;
2245 }
2246 #endif
2247
2248 static int __devinit
2249 typhoon_test_mmio(struct pci_dev *pdev)
2250 {
2251         void __iomem *ioaddr = pci_iomap(pdev, 1, 128);
2252         int mode = 0;
2253         u32 val;
2254
2255         if(!ioaddr)
2256                 goto out;
2257
2258         if(ioread32(ioaddr + TYPHOON_REG_STATUS) !=
2259                                 TYPHOON_STATUS_WAITING_FOR_HOST)
2260                 goto out_unmap;
2261
2262         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
2263         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
2264         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE);
2265
2266         /* Ok, see if we can change our interrupt status register by
2267          * sending ourselves an interrupt. If so, then MMIO works.
2268          * The 50usec delay is arbitrary -- it could probably be smaller.
2269          */
2270         val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2271         if((val & TYPHOON_INTR_SELF) == 0) {
2272                 iowrite32(1, ioaddr + TYPHOON_REG_SELF_INTERRUPT);
2273                 ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2274                 udelay(50);
2275                 val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2276                 if(val & TYPHOON_INTR_SELF)
2277                         mode = 1;
2278         }
2279
2280         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
2281         iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
2282         iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE);
2283         ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2284
2285 out_unmap:
2286         pci_iounmap(pdev, ioaddr);
2287
2288 out:
2289         if(!mode)
2290                 pr_info("%s: falling back to port IO\n", pci_name(pdev));
2291         return mode;
2292 }
2293
2294 static const struct net_device_ops typhoon_netdev_ops = {
2295         .ndo_open               = typhoon_open,
2296         .ndo_stop               = typhoon_close,
2297         .ndo_start_xmit         = typhoon_start_tx,
2298         .ndo_set_multicast_list = typhoon_set_rx_mode,
2299         .ndo_tx_timeout         = typhoon_tx_timeout,
2300         .ndo_get_stats          = typhoon_get_stats,
2301         .ndo_validate_addr      = eth_validate_addr,
2302         .ndo_set_mac_address    = typhoon_set_mac_address,
2303         .ndo_change_mtu         = eth_change_mtu,
2304 };
2305
2306 static int __devinit
2307 typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2308 {
2309         struct net_device *dev;
2310         struct typhoon *tp;
2311         int card_id = (int) ent->driver_data;
2312         void __iomem *ioaddr;
2313         void *shared;
2314         dma_addr_t shared_dma;
2315         struct cmd_desc xp_cmd;
2316         struct resp_desc xp_resp[3];
2317         int err = 0;
2318         const char *err_msg;
2319
2320         dev = alloc_etherdev(sizeof(*tp));
2321         if(dev == NULL) {
2322                 err_msg = "unable to alloc new net device";
2323                 err = -ENOMEM;
2324                 goto error_out;
2325         }
2326         SET_NETDEV_DEV(dev, &pdev->dev);
2327
2328         err = pci_enable_device(pdev);
2329         if(err < 0) {
2330                 err_msg = "unable to enable device";
2331                 goto error_out_dev;
2332         }
2333
2334         err = pci_set_mwi(pdev);
2335         if(err < 0) {
2336                 err_msg = "unable to set MWI";
2337                 goto error_out_disable;
2338         }
2339
2340         err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2341         if(err < 0) {
2342                 err_msg = "No usable DMA configuration";
2343                 goto error_out_mwi;
2344         }
2345
2346         /* sanity checks on IO and MMIO BARs
2347          */
2348         if(!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
2349                 err_msg = "region #1 not a PCI IO resource, aborting";
2350                 err = -ENODEV;
2351                 goto error_out_mwi;
2352         }
2353         if(pci_resource_len(pdev, 0) < 128) {
2354                 err_msg = "Invalid PCI IO region size, aborting";
2355                 err = -ENODEV;
2356                 goto error_out_mwi;
2357         }
2358         if(!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
2359                 err_msg = "region #1 not a PCI MMIO resource, aborting";
2360                 err = -ENODEV;
2361                 goto error_out_mwi;
2362         }
2363         if(pci_resource_len(pdev, 1) < 128) {
2364                 err_msg = "Invalid PCI MMIO region size, aborting";
2365                 err = -ENODEV;
2366                 goto error_out_mwi;
2367         }
2368
2369         err = pci_request_regions(pdev, KBUILD_MODNAME);
2370         if(err < 0) {
2371                 err_msg = "could not request regions";
2372                 goto error_out_mwi;
2373         }
2374
2375         /* map our registers
2376          */
2377         if(use_mmio != 0 && use_mmio != 1)
2378                 use_mmio = typhoon_test_mmio(pdev);
2379
2380         ioaddr = pci_iomap(pdev, use_mmio, 128);
2381         if (!ioaddr) {
2382                 err_msg = "cannot remap registers, aborting";
2383                 err = -EIO;
2384                 goto error_out_regions;
2385         }
2386
2387         /* allocate pci dma space for rx and tx descriptor rings
2388          */
2389         shared = pci_alloc_consistent(pdev, sizeof(struct typhoon_shared),
2390                                       &shared_dma);
2391         if(!shared) {
2392                 err_msg = "could not allocate DMA memory";
2393                 err = -ENOMEM;
2394                 goto error_out_remap;
2395         }
2396
2397         dev->irq = pdev->irq;
2398         tp = netdev_priv(dev);
2399         tp->shared = (struct typhoon_shared *) shared;
2400         tp->shared_dma = shared_dma;
2401         tp->pdev = pdev;
2402         tp->tx_pdev = pdev;
2403         tp->ioaddr = ioaddr;
2404         tp->tx_ioaddr = ioaddr;
2405         tp->dev = dev;
2406
2407         /* Init sequence:
2408          * 1) Reset the adapter to clear any bad juju
2409          * 2) Reload the sleep image
2410          * 3) Boot the sleep image
2411          * 4) Get the hardware address.
2412          * 5) Put the card to sleep.
2413          */
2414         if (typhoon_reset(ioaddr, WaitSleep) < 0) {
2415                 err_msg = "could not reset 3XP";
2416                 err = -EIO;
2417                 goto error_out_dma;
2418         }
2419
2420         /* Now that we've reset the 3XP and are sure it's not going to
2421          * write all over memory, enable bus mastering, and save our
2422          * state for resuming after a suspend.
2423          */
2424         pci_set_master(pdev);
2425         pci_save_state(pdev);
2426
2427         typhoon_init_interface(tp);
2428         typhoon_init_rings(tp);
2429
2430         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2431                 err_msg = "cannot boot 3XP sleep image";
2432                 err = -EIO;
2433                 goto error_out_reset;
2434         }
2435
2436         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_MAC_ADDRESS);
2437         if(typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp) < 0) {
2438                 err_msg = "cannot read MAC address";
2439                 err = -EIO;
2440                 goto error_out_reset;
2441         }
2442
2443         *(__be16 *)&dev->dev_addr[0] = htons(le16_to_cpu(xp_resp[0].parm1));
2444         *(__be32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2));
2445
2446         if(!is_valid_ether_addr(dev->dev_addr)) {
2447                 err_msg = "Could not obtain valid ethernet address, aborting";
2448                 goto error_out_reset;
2449         }
2450
2451         /* Read the Sleep Image version last, so the response is valid
2452          * later when we print out the version reported.
2453          */
2454         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
2455         if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
2456                 err_msg = "Could not get Sleep Image version";
2457                 goto error_out_reset;
2458         }
2459
2460         tp->capabilities = typhoon_card_info[card_id].capabilities;
2461         tp->xcvr_select = TYPHOON_XCVR_AUTONEG;
2462
2463         /* Typhoon 1.0 Sleep Images return one response descriptor to the
2464          * READ_VERSIONS command. Those versions are OK after waking up
2465          * from sleep without needing a reset. Typhoon 1.1+ Sleep Images
2466          * seem to need a little extra help to get started. Since we don't
2467          * know how to nudge it along, just kick it.
2468          */
2469         if(xp_resp[0].numDesc != 0)
2470                 tp->capabilities |= TYPHOON_WAKEUP_NEEDS_RESET;
2471
2472         if(typhoon_sleep(tp, PCI_D3hot, 0) < 0) {
2473                 err_msg = "cannot put adapter to sleep";
2474                 err = -EIO;
2475                 goto error_out_reset;
2476         }
2477
2478         /* The chip-specific entries in the device structure. */
2479         dev->netdev_ops         = &typhoon_netdev_ops;
2480         netif_napi_add(dev, &tp->napi, typhoon_poll, 16);
2481         dev->watchdog_timeo     = TX_TIMEOUT;
2482
2483         SET_ETHTOOL_OPS(dev, &typhoon_ethtool_ops);
2484
2485         /* We can handle scatter gather, up to 16 entries, and
2486          * we can do IP checksumming (only version 4, doh...)
2487          */
2488         dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
2489         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2490         dev->features |= NETIF_F_TSO;
2491
2492         if(register_netdev(dev) < 0) {
2493                 err_msg = "unable to register netdev";
2494                 goto error_out_reset;
2495         }
2496
2497         pci_set_drvdata(pdev, dev);
2498
2499         netdev_info(dev, "%s at %s 0x%llx, %pM\n",
2500                     typhoon_card_info[card_id].name,
2501                     use_mmio ? "MMIO" : "IO",
2502                     (unsigned long long)pci_resource_start(pdev, use_mmio),
2503                     dev->dev_addr);
2504
2505         /* xp_resp still contains the response to the READ_VERSIONS command.
2506          * For debugging, let the user know what version he has.
2507          */
2508         if(xp_resp[0].numDesc == 0) {
2509                 /* This is the Typhoon 1.0 type Sleep Image, last 16 bits
2510                  * of version is Month/Day of build.
2511                  */
2512                 u16 monthday = le32_to_cpu(xp_resp[0].parm2) & 0xffff;
2513                 netdev_info(dev, "Typhoon 1.0 Sleep Image built %02u/%02u/2000\n",
2514                             monthday >> 8, monthday & 0xff);
2515         } else if(xp_resp[0].numDesc == 2) {
2516                 /* This is the Typhoon 1.1+ type Sleep Image
2517                  */
2518                 u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
2519                 u8 *ver_string = (u8 *) &xp_resp[1];
2520                 ver_string[25] = 0;
2521                 netdev_info(dev, "Typhoon 1.1+ Sleep Image version %02x.%03x.%03x %s\n",
2522                             sleep_ver >> 24, (sleep_ver >> 12) & 0xfff,
2523                             sleep_ver & 0xfff, ver_string);
2524         } else {
2525                 netdev_warn(dev, "Unknown Sleep Image version (%u:%04x)\n",
2526                             xp_resp[0].numDesc, le32_to_cpu(xp_resp[0].parm2));
2527         }
2528
2529         return 0;
2530
2531 error_out_reset:
2532         typhoon_reset(ioaddr, NoWait);
2533
2534 error_out_dma:
2535         pci_free_consistent(pdev, sizeof(struct typhoon_shared),
2536                             shared, shared_dma);
2537 error_out_remap:
2538         pci_iounmap(pdev, ioaddr);
2539 error_out_regions:
2540         pci_release_regions(pdev);
2541 error_out_mwi:
2542         pci_clear_mwi(pdev);
2543 error_out_disable:
2544         pci_disable_device(pdev);
2545 error_out_dev:
2546         free_netdev(dev);
2547 error_out:
2548         pr_err("%s: %s\n", pci_name(pdev), err_msg);
2549         return err;
2550 }
2551
2552 static void __devexit
2553 typhoon_remove_one(struct pci_dev *pdev)
2554 {
2555         struct net_device *dev = pci_get_drvdata(pdev);
2556         struct typhoon *tp = netdev_priv(dev);
2557
2558         unregister_netdev(dev);
2559         pci_set_power_state(pdev, PCI_D0);
2560         pci_restore_state(pdev);
2561         typhoon_reset(tp->ioaddr, NoWait);
2562         pci_iounmap(pdev, tp->ioaddr);
2563         pci_free_consistent(pdev, sizeof(struct typhoon_shared),
2564                             tp->shared, tp->shared_dma);
2565         pci_release_regions(pdev);
2566         pci_clear_mwi(pdev);
2567         pci_disable_device(pdev);
2568         pci_set_drvdata(pdev, NULL);
2569         free_netdev(dev);
2570 }
2571
2572 static struct pci_driver typhoon_driver = {
2573         .name           = KBUILD_MODNAME,
2574         .id_table       = typhoon_pci_tbl,
2575         .probe          = typhoon_init_one,
2576         .remove         = __devexit_p(typhoon_remove_one),
2577 #ifdef CONFIG_PM
2578         .suspend        = typhoon_suspend,
2579         .resume         = typhoon_resume,
2580 #endif
2581 };
2582
2583 static int __init
2584 typhoon_init(void)
2585 {
2586         return pci_register_driver(&typhoon_driver);
2587 }
2588
2589 static void __exit
2590 typhoon_cleanup(void)
2591 {
2592         if (typhoon_fw)
2593                 release_firmware(typhoon_fw);
2594         pci_unregister_driver(&typhoon_driver);
2595 }
2596
2597 module_init(typhoon_init);
2598 module_exit(typhoon_cleanup);