ACPI / dock: fix error return code in dock_add()
[firefly-linux-kernel-4.4.55.git] / drivers / net / irda / via-ircc.c
1 /********************************************************************
2  Filename:      via-ircc.c
3  Version:       1.0 
4  Description:   Driver for the VIA VT8231/VT8233 IrDA chipsets
5  Author:        VIA Technologies,inc
6  Date  :        08/06/2003
7
8 Copyright (c) 1998-2003 VIA Technologies, Inc.
9
10 This program is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free Software
12 Foundation; either version 2, or (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful, but WITHOUT
15 ANY WARRANTIES OR REPRESENTATIONS; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
17 See the GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License along with
20 this program; if not, write to the Free Software Foundation, Inc.,
21 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22
23 F01 Oct/02/02: Modify code for V0.11(move out back to back transfer)
24 F02 Oct/28/02: Add SB device ID for 3147 and 3177.
25  Comment :
26        jul/09/2002 : only implement two kind of dongle currently.
27        Oct/02/2002 : work on VT8231 and VT8233 .
28        Aug/06/2003 : change driver format to pci driver .
29
30 2004-02-16: <sda@bdit.de>
31 - Removed unneeded 'legacy' pci stuff.
32 - Make sure SIR mode is set (hw_init()) before calling mode-dependent stuff.
33 - On speed change from core, don't send SIR frame with new speed. 
34   Use current speed and change speeds later.
35 - Make module-param dongle_id actually work.
36 - New dongle_id 17 (0x11): TDFS4500. Single-ended SIR only. 
37   Tested with home-grown PCB on EPIA boards.
38 - Code cleanup.
39        
40  ********************************************************************/
41 #include <linux/module.h>
42 #include <linux/kernel.h>
43 #include <linux/types.h>
44 #include <linux/skbuff.h>
45 #include <linux/netdevice.h>
46 #include <linux/ioport.h>
47 #include <linux/delay.h>
48 #include <linux/init.h>
49 #include <linux/interrupt.h>
50 #include <linux/rtnetlink.h>
51 #include <linux/pci.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/gfp.h>
54
55 #include <asm/io.h>
56 #include <asm/dma.h>
57 #include <asm/byteorder.h>
58
59 #include <linux/pm.h>
60
61 #include <net/irda/wrapper.h>
62 #include <net/irda/irda.h>
63 #include <net/irda/irda_device.h>
64
65 #include "via-ircc.h"
66
67 #define VIA_MODULE_NAME "via-ircc"
68 #define CHIP_IO_EXTENT 0x40
69
70 static char *driver_name = VIA_MODULE_NAME;
71
72 /* Module parameters */
73 static int qos_mtt_bits = 0x07; /* 1 ms or more */
74 static int dongle_id = 0;       /* default: probe */
75
76 /* We can't guess the type of connected dongle, user *must* supply it. */
77 module_param(dongle_id, int, 0);
78
79 /* Some prototypes */
80 static int via_ircc_open(struct pci_dev *pdev, chipio_t *info,
81                          unsigned int id);
82 static int via_ircc_dma_receive(struct via_ircc_cb *self);
83 static int via_ircc_dma_receive_complete(struct via_ircc_cb *self,
84                                          int iobase);
85 static netdev_tx_t via_ircc_hard_xmit_sir(struct sk_buff *skb,
86                                                 struct net_device *dev);
87 static netdev_tx_t via_ircc_hard_xmit_fir(struct sk_buff *skb,
88                                                 struct net_device *dev);
89 static void via_hw_init(struct via_ircc_cb *self);
90 static void via_ircc_change_speed(struct via_ircc_cb *self, __u32 baud);
91 static irqreturn_t via_ircc_interrupt(int irq, void *dev_id);
92 static int via_ircc_is_receiving(struct via_ircc_cb *self);
93 static int via_ircc_read_dongle_id(int iobase);
94
95 static int via_ircc_net_open(struct net_device *dev);
96 static int via_ircc_net_close(struct net_device *dev);
97 static int via_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq,
98                               int cmd);
99 static void via_ircc_change_dongle_speed(int iobase, int speed,
100                                          int dongle_id);
101 static int RxTimerHandler(struct via_ircc_cb *self, int iobase);
102 static void hwreset(struct via_ircc_cb *self);
103 static int via_ircc_dma_xmit(struct via_ircc_cb *self, u16 iobase);
104 static int upload_rxdata(struct via_ircc_cb *self, int iobase);
105 static int via_init_one(struct pci_dev *pcidev, const struct pci_device_id *id);
106 static void via_remove_one(struct pci_dev *pdev);
107
108 /* FIXME : Should use udelay() instead, even if we are x86 only - Jean II */
109 static void iodelay(int udelay)
110 {
111         u8 data;
112         int i;
113
114         for (i = 0; i < udelay; i++) {
115                 data = inb(0x80);
116         }
117 }
118
119 static DEFINE_PCI_DEVICE_TABLE(via_pci_tbl) = {
120         { PCI_VENDOR_ID_VIA, 0x8231, PCI_ANY_ID, PCI_ANY_ID,0,0,0 },
121         { PCI_VENDOR_ID_VIA, 0x3109, PCI_ANY_ID, PCI_ANY_ID,0,0,1 },
122         { PCI_VENDOR_ID_VIA, 0x3074, PCI_ANY_ID, PCI_ANY_ID,0,0,2 },
123         { PCI_VENDOR_ID_VIA, 0x3147, PCI_ANY_ID, PCI_ANY_ID,0,0,3 },
124         { PCI_VENDOR_ID_VIA, 0x3177, PCI_ANY_ID, PCI_ANY_ID,0,0,4 },
125         { 0, }
126 };
127
128 MODULE_DEVICE_TABLE(pci,via_pci_tbl);
129
130
131 static struct pci_driver via_driver = {
132         .name           = VIA_MODULE_NAME,
133         .id_table       = via_pci_tbl,
134         .probe          = via_init_one,
135         .remove         = via_remove_one,
136 };
137
138
139 /*
140  * Function via_ircc_init ()
141  *
142  *    Initialize chip. Just find out chip type and resource.
143  */
144 static int __init via_ircc_init(void)
145 {
146         int rc;
147
148         IRDA_DEBUG(3, "%s()\n", __func__);
149
150         rc = pci_register_driver(&via_driver);
151         if (rc < 0) {
152                 IRDA_DEBUG(0, "%s(): error rc = %d, returning  -ENODEV...\n",
153                            __func__, rc);
154                 return -ENODEV;
155         }
156         return 0;
157 }
158
159 static int via_init_one(struct pci_dev *pcidev, const struct pci_device_id *id)
160 {
161         int rc;
162         u8 temp,oldPCI_40,oldPCI_44,bTmp,bTmp1;
163         u16 Chipset,FirDRQ1,FirDRQ0,FirIRQ,FirIOBase;
164         chipio_t info;
165
166         IRDA_DEBUG(2, "%s(): Device ID=(0X%X)\n", __func__, id->device);
167
168         rc = pci_enable_device (pcidev);
169         if (rc) {
170                 IRDA_DEBUG(0, "%s(): error rc = %d\n", __func__, rc);
171                 return -ENODEV;
172         }
173
174         // South Bridge exist
175         if ( ReadLPCReg(0x20) != 0x3C )
176                 Chipset=0x3096;
177         else
178                 Chipset=0x3076;
179
180         if (Chipset==0x3076) {
181                 IRDA_DEBUG(2, "%s(): Chipset = 3076\n", __func__);
182
183                 WriteLPCReg(7,0x0c );
184                 temp=ReadLPCReg(0x30);//check if BIOS Enable Fir
185                 if((temp&0x01)==1) {   // BIOS close or no FIR
186                         WriteLPCReg(0x1d, 0x82 );
187                         WriteLPCReg(0x23,0x18);
188                         temp=ReadLPCReg(0xF0);
189                         if((temp&0x01)==0) {
190                                 temp=(ReadLPCReg(0x74)&0x03);    //DMA
191                                 FirDRQ0=temp + 4;
192                                 temp=(ReadLPCReg(0x74)&0x0C) >> 2;
193                                 FirDRQ1=temp + 4;
194                         } else {
195                                 temp=(ReadLPCReg(0x74)&0x0C) >> 2;    //DMA
196                                 FirDRQ0=temp + 4;
197                                 FirDRQ1=FirDRQ0;
198                         }
199                         FirIRQ=(ReadLPCReg(0x70)&0x0f);         //IRQ
200                         FirIOBase=ReadLPCReg(0x60 ) << 8;       //IO Space :high byte
201                         FirIOBase=FirIOBase| ReadLPCReg(0x61) ; //low byte
202                         FirIOBase=FirIOBase  ;
203                         info.fir_base=FirIOBase;
204                         info.irq=FirIRQ;
205                         info.dma=FirDRQ1;
206                         info.dma2=FirDRQ0;
207                         pci_read_config_byte(pcidev,0x40,&bTmp);
208                         pci_write_config_byte(pcidev,0x40,((bTmp | 0x08) & 0xfe));
209                         pci_read_config_byte(pcidev,0x42,&bTmp);
210                         pci_write_config_byte(pcidev,0x42,(bTmp | 0xf0));
211                         pci_write_config_byte(pcidev,0x5a,0xc0);
212                         WriteLPCReg(0x28, 0x70 );
213                         if (via_ircc_open(pcidev, &info, 0x3076) == 0)
214                                 rc=0;
215                 } else
216                         rc = -ENODEV; //IR not turn on   
217         } else { //Not VT1211
218                 IRDA_DEBUG(2, "%s(): Chipset = 3096\n", __func__);
219
220                 pci_read_config_byte(pcidev,0x67,&bTmp);//check if BIOS Enable Fir
221                 if((bTmp&0x01)==1) {  // BIOS enable FIR
222                         //Enable Double DMA clock
223                         pci_read_config_byte(pcidev,0x42,&oldPCI_40);
224                         pci_write_config_byte(pcidev,0x42,oldPCI_40 | 0x80);
225                         pci_read_config_byte(pcidev,0x40,&oldPCI_40);
226                         pci_write_config_byte(pcidev,0x40,oldPCI_40 & 0xf7);
227                         pci_read_config_byte(pcidev,0x44,&oldPCI_44);
228                         pci_write_config_byte(pcidev,0x44,0x4e);
229   //---------- read configuration from Function0 of south bridge
230                         if((bTmp&0x02)==0) {
231                                 pci_read_config_byte(pcidev,0x44,&bTmp1); //DMA
232                                 FirDRQ0 = (bTmp1 & 0x30) >> 4;
233                                 pci_read_config_byte(pcidev,0x44,&bTmp1);
234                                 FirDRQ1 = (bTmp1 & 0xc0) >> 6;
235                         } else  {
236                                 pci_read_config_byte(pcidev,0x44,&bTmp1);    //DMA
237                                 FirDRQ0 = (bTmp1 & 0x30) >> 4 ;
238                                 FirDRQ1=0;
239                         }
240                         pci_read_config_byte(pcidev,0x47,&bTmp1);  //IRQ
241                         FirIRQ = bTmp1 & 0x0f;
242
243                         pci_read_config_byte(pcidev,0x69,&bTmp);
244                         FirIOBase = bTmp << 8;//hight byte
245                         pci_read_config_byte(pcidev,0x68,&bTmp);
246                         FirIOBase = (FirIOBase | bTmp ) & 0xfff0;
247   //-------------------------
248                         info.fir_base=FirIOBase;
249                         info.irq=FirIRQ;
250                         info.dma=FirDRQ1;
251                         info.dma2=FirDRQ0;
252                         if (via_ircc_open(pcidev, &info, 0x3096) == 0)
253                                 rc=0;
254                 } else
255                         rc = -ENODEV; //IR not turn on !!!!!
256         }//Not VT1211
257
258         IRDA_DEBUG(2, "%s(): End - rc = %d\n", __func__, rc);
259         return rc;
260 }
261
262 static void __exit via_ircc_cleanup(void)
263 {
264         IRDA_DEBUG(3, "%s()\n", __func__);
265
266         /* Cleanup all instances of the driver */
267         pci_unregister_driver (&via_driver); 
268 }
269
270 static const struct net_device_ops via_ircc_sir_ops = {
271         .ndo_start_xmit = via_ircc_hard_xmit_sir,
272         .ndo_open = via_ircc_net_open,
273         .ndo_stop = via_ircc_net_close,
274         .ndo_do_ioctl = via_ircc_net_ioctl,
275 };
276 static const struct net_device_ops via_ircc_fir_ops = {
277         .ndo_start_xmit = via_ircc_hard_xmit_fir,
278         .ndo_open = via_ircc_net_open,
279         .ndo_stop = via_ircc_net_close,
280         .ndo_do_ioctl = via_ircc_net_ioctl,
281 };
282
283 /*
284  * Function via_ircc_open(pdev, iobase, irq)
285  *
286  *    Open driver instance
287  *
288  */
289 static int via_ircc_open(struct pci_dev *pdev, chipio_t *info, unsigned int id)
290 {
291         struct net_device *dev;
292         struct via_ircc_cb *self;
293         int err;
294
295         IRDA_DEBUG(3, "%s()\n", __func__);
296
297         /* Allocate new instance of the driver */
298         dev = alloc_irdadev(sizeof(struct via_ircc_cb));
299         if (dev == NULL) 
300                 return -ENOMEM;
301
302         self = netdev_priv(dev);
303         self->netdev = dev;
304         spin_lock_init(&self->lock);
305
306         pci_set_drvdata(pdev, self);
307
308         /* Initialize Resource */
309         self->io.cfg_base = info->cfg_base;
310         self->io.fir_base = info->fir_base;
311         self->io.irq = info->irq;
312         self->io.fir_ext = CHIP_IO_EXTENT;
313         self->io.dma = info->dma;
314         self->io.dma2 = info->dma2;
315         self->io.fifo_size = 32;
316         self->chip_id = id;
317         self->st_fifo.len = 0;
318         self->RxDataReady = 0;
319
320         /* Reserve the ioports that we need */
321         if (!request_region(self->io.fir_base, self->io.fir_ext, driver_name)) {
322                 IRDA_DEBUG(0, "%s(), can't get iobase of 0x%03x\n",
323                            __func__, self->io.fir_base);
324                 err = -ENODEV;
325                 goto err_out1;
326         }
327         
328         /* Initialize QoS for this device */
329         irda_init_max_qos_capabilies(&self->qos);
330
331         /* Check if user has supplied the dongle id or not */
332         if (!dongle_id)
333                 dongle_id = via_ircc_read_dongle_id(self->io.fir_base);
334         self->io.dongle_id = dongle_id;
335
336         /* The only value we must override it the baudrate */
337         /* Maximum speeds and capabilities are dongle-dependent. */
338         switch( self->io.dongle_id ){
339         case 0x0d:
340                 self->qos.baud_rate.bits =
341                     IR_9600 | IR_19200 | IR_38400 | IR_57600 | IR_115200 |
342                     IR_576000 | IR_1152000 | (IR_4000000 << 8);
343                 break;
344         default:
345                 self->qos.baud_rate.bits =
346                     IR_9600 | IR_19200 | IR_38400 | IR_57600 | IR_115200;
347                 break;
348         }
349
350         /* Following was used for testing:
351          *
352          *   self->qos.baud_rate.bits = IR_9600;
353          *
354          * Is is no good, as it prohibits (error-prone) speed-changes.
355          */
356
357         self->qos.min_turn_time.bits = qos_mtt_bits;
358         irda_qos_bits_to_value(&self->qos);
359
360         /* Max DMA buffer size needed = (data_size + 6) * (window_size) + 6; */
361         self->rx_buff.truesize = 14384 + 2048;
362         self->tx_buff.truesize = 14384 + 2048;
363
364         /* Allocate memory if needed */
365         self->rx_buff.head =
366                 dma_alloc_coherent(&pdev->dev, self->rx_buff.truesize,
367                                    &self->rx_buff_dma, GFP_KERNEL | __GFP_ZERO);
368         if (self->rx_buff.head == NULL) {
369                 err = -ENOMEM;
370                 goto err_out2;
371         }
372
373         self->tx_buff.head =
374                 dma_alloc_coherent(&pdev->dev, self->tx_buff.truesize,
375                                    &self->tx_buff_dma, GFP_KERNEL | __GFP_ZERO);
376         if (self->tx_buff.head == NULL) {
377                 err = -ENOMEM;
378                 goto err_out3;
379         }
380
381         self->rx_buff.in_frame = FALSE;
382         self->rx_buff.state = OUTSIDE_FRAME;
383         self->tx_buff.data = self->tx_buff.head;
384         self->rx_buff.data = self->rx_buff.head;
385
386         /* Reset Tx queue info */
387         self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
388         self->tx_fifo.tail = self->tx_buff.head;
389
390         /* Override the network functions we need to use */
391         dev->netdev_ops = &via_ircc_sir_ops;
392
393         err = register_netdev(dev);
394         if (err)
395                 goto err_out4;
396
397         IRDA_MESSAGE("IrDA: Registered device %s (via-ircc)\n", dev->name);
398
399         /* Initialise the hardware..
400         */
401         self->io.speed = 9600;
402         via_hw_init(self);
403         return 0;
404  err_out4:
405         dma_free_coherent(&pdev->dev, self->tx_buff.truesize,
406                           self->tx_buff.head, self->tx_buff_dma);
407  err_out3:
408         dma_free_coherent(&pdev->dev, self->rx_buff.truesize,
409                           self->rx_buff.head, self->rx_buff_dma);
410  err_out2:
411         release_region(self->io.fir_base, self->io.fir_ext);
412  err_out1:
413         pci_set_drvdata(pdev, NULL);
414         free_netdev(dev);
415         return err;
416 }
417
418 /*
419  * Function via_remove_one(pdev)
420  *
421  *    Close driver instance
422  *
423  */
424 static void via_remove_one(struct pci_dev *pdev)
425 {
426         struct via_ircc_cb *self = pci_get_drvdata(pdev);
427         int iobase;
428
429         IRDA_DEBUG(3, "%s()\n", __func__);
430
431         iobase = self->io.fir_base;
432
433         ResetChip(iobase, 5);   //hardware reset.
434         /* Remove netdevice */
435         unregister_netdev(self->netdev);
436
437         /* Release the PORT that this driver is using */
438         IRDA_DEBUG(2, "%s(), Releasing Region %03x\n",
439                    __func__, self->io.fir_base);
440         release_region(self->io.fir_base, self->io.fir_ext);
441         if (self->tx_buff.head)
442                 dma_free_coherent(&pdev->dev, self->tx_buff.truesize,
443                                   self->tx_buff.head, self->tx_buff_dma);
444         if (self->rx_buff.head)
445                 dma_free_coherent(&pdev->dev, self->rx_buff.truesize,
446                                   self->rx_buff.head, self->rx_buff_dma);
447         pci_set_drvdata(pdev, NULL);
448
449         free_netdev(self->netdev);
450
451         pci_disable_device(pdev);
452 }
453
454 /*
455  * Function via_hw_init(self)
456  *
457  *    Returns non-negative on success.
458  *
459  * Formerly via_ircc_setup 
460  */
461 static void via_hw_init(struct via_ircc_cb *self)
462 {
463         int iobase = self->io.fir_base;
464
465         IRDA_DEBUG(3, "%s()\n", __func__);
466
467         SetMaxRxPacketSize(iobase, 0x0fff);     //set to max:4095
468         // FIFO Init
469         EnRXFIFOReadyInt(iobase, OFF);
470         EnRXFIFOHalfLevelInt(iobase, OFF);
471         EnTXFIFOHalfLevelInt(iobase, OFF);
472         EnTXFIFOUnderrunEOMInt(iobase, ON);
473         EnTXFIFOReadyInt(iobase, OFF);
474         InvertTX(iobase, OFF);
475         InvertRX(iobase, OFF);
476
477         if (ReadLPCReg(0x20) == 0x3c)
478                 WriteLPCReg(0xF0, 0);   // for VT1211
479         /* Int Init */
480         EnRXSpecInt(iobase, ON);
481
482         /* The following is basically hwreset */
483         /* If this is the case, why not just call hwreset() ? Jean II */
484         ResetChip(iobase, 5);
485         EnableDMA(iobase, OFF);
486         EnableTX(iobase, OFF);
487         EnableRX(iobase, OFF);
488         EnRXDMA(iobase, OFF);
489         EnTXDMA(iobase, OFF);
490         RXStart(iobase, OFF);
491         TXStart(iobase, OFF);
492         InitCard(iobase);
493         CommonInit(iobase);
494         SIRFilter(iobase, ON);
495         SetSIR(iobase, ON);
496         CRC16(iobase, ON);
497         EnTXCRC(iobase, 0);
498         WriteReg(iobase, I_ST_CT_0, 0x00);
499         SetBaudRate(iobase, 9600);
500         SetPulseWidth(iobase, 12);
501         SetSendPreambleCount(iobase, 0);
502
503         self->io.speed = 9600;
504         self->st_fifo.len = 0;
505
506         via_ircc_change_dongle_speed(iobase, self->io.speed,
507                                      self->io.dongle_id);
508
509         WriteReg(iobase, I_ST_CT_0, 0x80);
510 }
511
512 /*
513  * Function via_ircc_read_dongle_id (void)
514  *
515  */
516 static int via_ircc_read_dongle_id(int iobase)
517 {
518         int dongle_id = 9;      /* Default to IBM */
519
520         IRDA_ERROR("via-ircc: dongle probing not supported, please specify dongle_id module parameter.\n");
521         return dongle_id;
522 }
523
524 /*
525  * Function via_ircc_change_dongle_speed (iobase, speed, dongle_id)
526  *    Change speed of the attach dongle
527  *    only implement two type of dongle currently.
528  */
529 static void via_ircc_change_dongle_speed(int iobase, int speed,
530                                          int dongle_id)
531 {
532         u8 mode = 0;
533
534         /* speed is unused, as we use IsSIROn()/IsMIROn() */
535         speed = speed;
536
537         IRDA_DEBUG(1, "%s(): change_dongle_speed to %d for 0x%x, %d\n",
538                    __func__, speed, iobase, dongle_id);
539
540         switch (dongle_id) {
541
542                 /* Note: The dongle_id's listed here are derived from
543                  * nsc-ircc.c */ 
544
545         case 0x08:              /* HP HSDL-2300, HP HSDL-3600/HSDL-3610 */
546                 UseOneRX(iobase, ON);   // use one RX pin   RX1,RX2
547                 InvertTX(iobase, OFF);
548                 InvertRX(iobase, OFF);
549
550                 EnRX2(iobase, ON);      //sir to rx2
551                 EnGPIOtoRX2(iobase, OFF);
552
553                 if (IsSIROn(iobase)) {  //sir
554                         // Mode select Off
555                         SlowIRRXLowActive(iobase, ON);
556                         udelay(1000);
557                         SlowIRRXLowActive(iobase, OFF);
558                 } else {
559                         if (IsMIROn(iobase)) {  //mir
560                                 // Mode select On
561                                 SlowIRRXLowActive(iobase, OFF);
562                                 udelay(20);
563                         } else {        // fir
564                                 if (IsFIROn(iobase)) {  //fir
565                                         // Mode select On
566                                         SlowIRRXLowActive(iobase, OFF);
567                                         udelay(20);
568                                 }
569                         }
570                 }
571                 break;
572
573         case 0x09:              /* IBM31T1100 or Temic TFDS6000/TFDS6500 */
574                 UseOneRX(iobase, ON);   //use ONE RX....RX1
575                 InvertTX(iobase, OFF);
576                 InvertRX(iobase, OFF);  // invert RX pin
577
578                 EnRX2(iobase, ON);
579                 EnGPIOtoRX2(iobase, OFF);
580                 if (IsSIROn(iobase)) {  //sir
581                         // Mode select On
582                         SlowIRRXLowActive(iobase, ON);
583                         udelay(20);
584                         // Mode select Off
585                         SlowIRRXLowActive(iobase, OFF);
586                 }
587                 if (IsMIROn(iobase)) {  //mir
588                         // Mode select On
589                         SlowIRRXLowActive(iobase, OFF);
590                         udelay(20);
591                         // Mode select Off
592                         SlowIRRXLowActive(iobase, ON);
593                 } else {        // fir
594                         if (IsFIROn(iobase)) {  //fir
595                                 // Mode select On
596                                 SlowIRRXLowActive(iobase, OFF);
597                                 // TX On
598                                 WriteTX(iobase, ON);
599                                 udelay(20);
600                                 // Mode select OFF
601                                 SlowIRRXLowActive(iobase, ON);
602                                 udelay(20);
603                                 // TX Off
604                                 WriteTX(iobase, OFF);
605                         }
606                 }
607                 break;
608
609         case 0x0d:
610                 UseOneRX(iobase, OFF);  // use two RX pin   RX1,RX2
611                 InvertTX(iobase, OFF);
612                 InvertRX(iobase, OFF);
613                 SlowIRRXLowActive(iobase, OFF);
614                 if (IsSIROn(iobase)) {  //sir
615                         EnGPIOtoRX2(iobase, OFF);
616                         WriteGIO(iobase, OFF);
617                         EnRX2(iobase, OFF);     //sir to rx2
618                 } else {        // fir mir
619                         EnGPIOtoRX2(iobase, OFF);
620                         WriteGIO(iobase, OFF);
621                         EnRX2(iobase, OFF);     //fir to rx
622                 }
623                 break;
624
625         case 0x11:              /* Temic TFDS4500 */
626
627                 IRDA_DEBUG(2, "%s: Temic TFDS4500: One RX pin, TX normal, RX inverted.\n", __func__);
628
629                 UseOneRX(iobase, ON);   //use ONE RX....RX1
630                 InvertTX(iobase, OFF);
631                 InvertRX(iobase, ON);   // invert RX pin
632         
633                 EnRX2(iobase, ON);      //sir to rx2
634                 EnGPIOtoRX2(iobase, OFF);
635
636                 if( IsSIROn(iobase) ){  //sir
637
638                         // Mode select On
639                         SlowIRRXLowActive(iobase, ON);
640                         udelay(20);
641                         // Mode select Off
642                         SlowIRRXLowActive(iobase, OFF);
643
644                 } else{
645                         IRDA_DEBUG(0, "%s: Warning: TFDS4500 not running in SIR mode !\n", __func__);
646                 }
647                 break;
648
649         case 0x0ff:             /* Vishay */
650                 if (IsSIROn(iobase))
651                         mode = 0;
652                 else if (IsMIROn(iobase))
653                         mode = 1;
654                 else if (IsFIROn(iobase))
655                         mode = 2;
656                 else if (IsVFIROn(iobase))
657                         mode = 5;       //VFIR-16
658                 SI_SetMode(iobase, mode);
659                 break;
660
661         default:
662                 IRDA_ERROR("%s: Error: dongle_id %d unsupported !\n",
663                            __func__, dongle_id);
664         }
665 }
666
667 /*
668  * Function via_ircc_change_speed (self, baud)
669  *
670  *    Change the speed of the device
671  *
672  */
673 static void via_ircc_change_speed(struct via_ircc_cb *self, __u32 speed)
674 {
675         struct net_device *dev = self->netdev;
676         u16 iobase;
677         u8 value = 0, bTmp;
678
679         iobase = self->io.fir_base;
680         /* Update accounting for new speed */
681         self->io.speed = speed;
682         IRDA_DEBUG(1, "%s: change_speed to %d bps.\n", __func__, speed);
683
684         WriteReg(iobase, I_ST_CT_0, 0x0);
685
686         /* Controller mode sellection */
687         switch (speed) {
688         case 2400:
689         case 9600:
690         case 19200:
691         case 38400:
692         case 57600:
693         case 115200:
694                 value = (115200/speed)-1;
695                 SetSIR(iobase, ON);
696                 CRC16(iobase, ON);
697                 break;
698         case 576000:
699                 /* FIXME: this can't be right, as it's the same as 115200,
700                  * and 576000 is MIR, not SIR. */
701                 value = 0;
702                 SetSIR(iobase, ON);
703                 CRC16(iobase, ON);
704                 break;
705         case 1152000:
706                 value = 0;
707                 SetMIR(iobase, ON);
708                 /* FIXME: CRC ??? */
709                 break;
710         case 4000000:
711                 value = 0;
712                 SetFIR(iobase, ON);
713                 SetPulseWidth(iobase, 0);
714                 SetSendPreambleCount(iobase, 14);
715                 CRC16(iobase, OFF);
716                 EnTXCRC(iobase, ON);
717                 break;
718         case 16000000:
719                 value = 0;
720                 SetVFIR(iobase, ON);
721                 /* FIXME: CRC ??? */
722                 break;
723         default:
724                 value = 0;
725                 break;
726         }
727
728         /* Set baudrate to 0x19[2..7] */
729         bTmp = (ReadReg(iobase, I_CF_H_1) & 0x03);
730         bTmp |= value << 2;
731         WriteReg(iobase, I_CF_H_1, bTmp);
732
733         /* Some dongles may need to be informed about speed changes. */
734         via_ircc_change_dongle_speed(iobase, speed, self->io.dongle_id);
735
736         /* Set FIFO size to 64 */
737         SetFIFO(iobase, 64);
738
739         /* Enable IR */
740         WriteReg(iobase, I_ST_CT_0, 0x80);
741
742         // EnTXFIFOHalfLevelInt(iobase,ON);
743
744         /* Enable some interrupts so we can receive frames */
745         //EnAllInt(iobase,ON);
746
747         if (IsSIROn(iobase)) {
748                 SIRFilter(iobase, ON);
749                 SIRRecvAny(iobase, ON);
750         } else {
751                 SIRFilter(iobase, OFF);
752                 SIRRecvAny(iobase, OFF);
753         }
754
755         if (speed > 115200) {
756                 /* Install FIR xmit handler */
757                 dev->netdev_ops = &via_ircc_fir_ops;
758                 via_ircc_dma_receive(self);
759         } else {
760                 /* Install SIR xmit handler */
761                 dev->netdev_ops = &via_ircc_sir_ops;
762         }
763         netif_wake_queue(dev);
764 }
765
766 /*
767  * Function via_ircc_hard_xmit (skb, dev)
768  *
769  *    Transmit the frame!
770  *
771  */
772 static netdev_tx_t via_ircc_hard_xmit_sir(struct sk_buff *skb,
773                                                 struct net_device *dev)
774 {
775         struct via_ircc_cb *self;
776         unsigned long flags;
777         u16 iobase;
778         __u32 speed;
779
780         self = netdev_priv(dev);
781         IRDA_ASSERT(self != NULL, return NETDEV_TX_OK;);
782         iobase = self->io.fir_base;
783
784         netif_stop_queue(dev);
785         /* Check if we need to change the speed */
786         speed = irda_get_next_speed(skb);
787         if ((speed != self->io.speed) && (speed != -1)) {
788                 /* Check for empty frame */
789                 if (!skb->len) {
790                         via_ircc_change_speed(self, speed);
791                         dev->trans_start = jiffies;
792                         dev_kfree_skb(skb);
793                         return NETDEV_TX_OK;
794                 } else
795                         self->new_speed = speed;
796         }
797         InitCard(iobase);
798         CommonInit(iobase);
799         SIRFilter(iobase, ON);
800         SetSIR(iobase, ON);
801         CRC16(iobase, ON);
802         EnTXCRC(iobase, 0);
803         WriteReg(iobase, I_ST_CT_0, 0x00);
804
805         spin_lock_irqsave(&self->lock, flags);
806         self->tx_buff.data = self->tx_buff.head;
807         self->tx_buff.len =
808             async_wrap_skb(skb, self->tx_buff.data,
809                            self->tx_buff.truesize);
810
811         dev->stats.tx_bytes += self->tx_buff.len;
812         /* Send this frame with old speed */
813         SetBaudRate(iobase, self->io.speed);
814         SetPulseWidth(iobase, 12);
815         SetSendPreambleCount(iobase, 0);
816         WriteReg(iobase, I_ST_CT_0, 0x80);
817
818         EnableTX(iobase, ON);
819         EnableRX(iobase, OFF);
820
821         ResetChip(iobase, 0);
822         ResetChip(iobase, 1);
823         ResetChip(iobase, 2);
824         ResetChip(iobase, 3);
825         ResetChip(iobase, 4);
826
827         EnAllInt(iobase, ON);
828         EnTXDMA(iobase, ON);
829         EnRXDMA(iobase, OFF);
830
831         irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len,
832                        DMA_TX_MODE);
833
834         SetSendByte(iobase, self->tx_buff.len);
835         RXStart(iobase, OFF);
836         TXStart(iobase, ON);
837
838         dev->trans_start = jiffies;
839         spin_unlock_irqrestore(&self->lock, flags);
840         dev_kfree_skb(skb);
841         return NETDEV_TX_OK;
842 }
843
844 static netdev_tx_t via_ircc_hard_xmit_fir(struct sk_buff *skb,
845                                                 struct net_device *dev)
846 {
847         struct via_ircc_cb *self;
848         u16 iobase;
849         __u32 speed;
850         unsigned long flags;
851
852         self = netdev_priv(dev);
853         iobase = self->io.fir_base;
854
855         if (self->st_fifo.len)
856                 return NETDEV_TX_OK;
857         if (self->chip_id == 0x3076)
858                 iodelay(1500);
859         else
860                 udelay(1500);
861         netif_stop_queue(dev);
862         speed = irda_get_next_speed(skb);
863         if ((speed != self->io.speed) && (speed != -1)) {
864                 if (!skb->len) {
865                         via_ircc_change_speed(self, speed);
866                         dev->trans_start = jiffies;
867                         dev_kfree_skb(skb);
868                         return NETDEV_TX_OK;
869                 } else
870                         self->new_speed = speed;
871         }
872         spin_lock_irqsave(&self->lock, flags);
873         self->tx_fifo.queue[self->tx_fifo.free].start = self->tx_fifo.tail;
874         self->tx_fifo.queue[self->tx_fifo.free].len = skb->len;
875
876         self->tx_fifo.tail += skb->len;
877         dev->stats.tx_bytes += skb->len;
878         skb_copy_from_linear_data(skb,
879                       self->tx_fifo.queue[self->tx_fifo.free].start, skb->len);
880         self->tx_fifo.len++;
881         self->tx_fifo.free++;
882 //F01   if (self->tx_fifo.len == 1) {
883         via_ircc_dma_xmit(self, iobase);
884 //F01   }
885 //F01   if (self->tx_fifo.free < (MAX_TX_WINDOW -1 )) netif_wake_queue(self->netdev);
886         dev->trans_start = jiffies;
887         dev_kfree_skb(skb);
888         spin_unlock_irqrestore(&self->lock, flags);
889         return NETDEV_TX_OK;
890
891 }
892
893 static int via_ircc_dma_xmit(struct via_ircc_cb *self, u16 iobase)
894 {
895         EnTXDMA(iobase, OFF);
896         self->io.direction = IO_XMIT;
897         EnPhys(iobase, ON);
898         EnableTX(iobase, ON);
899         EnableRX(iobase, OFF);
900         ResetChip(iobase, 0);
901         ResetChip(iobase, 1);
902         ResetChip(iobase, 2);
903         ResetChip(iobase, 3);
904         ResetChip(iobase, 4);
905         EnAllInt(iobase, ON);
906         EnTXDMA(iobase, ON);
907         EnRXDMA(iobase, OFF);
908         irda_setup_dma(self->io.dma,
909                        ((u8 *)self->tx_fifo.queue[self->tx_fifo.ptr].start -
910                         self->tx_buff.head) + self->tx_buff_dma,
911                        self->tx_fifo.queue[self->tx_fifo.ptr].len, DMA_TX_MODE);
912         IRDA_DEBUG(1, "%s: tx_fifo.ptr=%x,len=%x,tx_fifo.len=%x..\n",
913                    __func__, self->tx_fifo.ptr,
914                    self->tx_fifo.queue[self->tx_fifo.ptr].len,
915                    self->tx_fifo.len);
916
917         SetSendByte(iobase, self->tx_fifo.queue[self->tx_fifo.ptr].len);
918         RXStart(iobase, OFF);
919         TXStart(iobase, ON);
920         return 0;
921
922 }
923
924 /*
925  * Function via_ircc_dma_xmit_complete (self)
926  *
927  *    The transfer of a frame in finished. This function will only be called 
928  *    by the interrupt handler
929  *
930  */
931 static int via_ircc_dma_xmit_complete(struct via_ircc_cb *self)
932 {
933         int iobase;
934         int ret = TRUE;
935         u8 Tx_status;
936
937         IRDA_DEBUG(3, "%s()\n", __func__);
938
939         iobase = self->io.fir_base;
940         /* Disable DMA */
941 //      DisableDmaChannel(self->io.dma);
942         /* Check for underrun! */
943         /* Clear bit, by writing 1 into it */
944         Tx_status = GetTXStatus(iobase);
945         if (Tx_status & 0x08) {
946                 self->netdev->stats.tx_errors++;
947                 self->netdev->stats.tx_fifo_errors++;
948                 hwreset(self);
949         /* how to clear underrun? */
950         } else {
951                 self->netdev->stats.tx_packets++;
952                 ResetChip(iobase, 3);
953                 ResetChip(iobase, 4);
954         }
955         /* Check if we need to change the speed */
956         if (self->new_speed) {
957                 via_ircc_change_speed(self, self->new_speed);
958                 self->new_speed = 0;
959         }
960
961         /* Finished with this frame, so prepare for next */
962         if (IsFIROn(iobase)) {
963                 if (self->tx_fifo.len) {
964                         self->tx_fifo.len--;
965                         self->tx_fifo.ptr++;
966                 }
967         }
968         IRDA_DEBUG(1,
969                    "%s: tx_fifo.len=%x ,tx_fifo.ptr=%x,tx_fifo.free=%x...\n",
970                    __func__,
971                    self->tx_fifo.len, self->tx_fifo.ptr, self->tx_fifo.free);
972 /* F01_S
973         // Any frames to be sent back-to-back? 
974         if (self->tx_fifo.len) {
975                 // Not finished yet! 
976                 via_ircc_dma_xmit(self, iobase);
977                 ret = FALSE;
978         } else { 
979 F01_E*/
980         // Reset Tx FIFO info 
981         self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
982         self->tx_fifo.tail = self->tx_buff.head;
983 //F01   }
984
985         // Make sure we have room for more frames 
986 //F01   if (self->tx_fifo.free < (MAX_TX_WINDOW -1 )) {
987         // Not busy transmitting anymore 
988         // Tell the network layer, that we can accept more frames 
989         netif_wake_queue(self->netdev);
990 //F01   }
991         return ret;
992 }
993
994 /*
995  * Function via_ircc_dma_receive (self)
996  *
997  *    Set configuration for receive a frame.
998  *
999  */
1000 static int via_ircc_dma_receive(struct via_ircc_cb *self)
1001 {
1002         int iobase;
1003
1004         iobase = self->io.fir_base;
1005
1006         IRDA_DEBUG(3, "%s()\n", __func__);
1007
1008         self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
1009         self->tx_fifo.tail = self->tx_buff.head;
1010         self->RxDataReady = 0;
1011         self->io.direction = IO_RECV;
1012         self->rx_buff.data = self->rx_buff.head;
1013         self->st_fifo.len = self->st_fifo.pending_bytes = 0;
1014         self->st_fifo.tail = self->st_fifo.head = 0;
1015
1016         EnPhys(iobase, ON);
1017         EnableTX(iobase, OFF);
1018         EnableRX(iobase, ON);
1019
1020         ResetChip(iobase, 0);
1021         ResetChip(iobase, 1);
1022         ResetChip(iobase, 2);
1023         ResetChip(iobase, 3);
1024         ResetChip(iobase, 4);
1025
1026         EnAllInt(iobase, ON);
1027         EnTXDMA(iobase, OFF);
1028         EnRXDMA(iobase, ON);
1029         irda_setup_dma(self->io.dma2, self->rx_buff_dma,
1030                   self->rx_buff.truesize, DMA_RX_MODE);
1031         TXStart(iobase, OFF);
1032         RXStart(iobase, ON);
1033
1034         return 0;
1035 }
1036
1037 /*
1038  * Function via_ircc_dma_receive_complete (self)
1039  *
1040  *    Controller Finished with receiving frames,
1041  *    and this routine is call by ISR
1042  *    
1043  */
1044 static int via_ircc_dma_receive_complete(struct via_ircc_cb *self,
1045                                          int iobase)
1046 {
1047         struct st_fifo *st_fifo;
1048         struct sk_buff *skb;
1049         int len, i;
1050         u8 status = 0;
1051
1052         iobase = self->io.fir_base;
1053         st_fifo = &self->st_fifo;
1054
1055         if (self->io.speed < 4000000) { //Speed below FIR
1056                 len = GetRecvByte(iobase, self);
1057                 skb = dev_alloc_skb(len + 1);
1058                 if (skb == NULL)
1059                         return FALSE;
1060                 // Make sure IP header gets aligned 
1061                 skb_reserve(skb, 1);
1062                 skb_put(skb, len - 2);
1063                 if (self->chip_id == 0x3076) {
1064                         for (i = 0; i < len - 2; i++)
1065                                 skb->data[i] = self->rx_buff.data[i * 2];
1066                 } else {
1067                         if (self->chip_id == 0x3096) {
1068                                 for (i = 0; i < len - 2; i++)
1069                                         skb->data[i] =
1070                                             self->rx_buff.data[i];
1071                         }
1072                 }
1073                 // Move to next frame 
1074                 self->rx_buff.data += len;
1075                 self->netdev->stats.rx_bytes += len;
1076                 self->netdev->stats.rx_packets++;
1077                 skb->dev = self->netdev;
1078                 skb_reset_mac_header(skb);
1079                 skb->protocol = htons(ETH_P_IRDA);
1080                 netif_rx(skb);
1081                 return TRUE;
1082         }
1083
1084         else {                  //FIR mode
1085                 len = GetRecvByte(iobase, self);
1086                 if (len == 0)
1087                         return TRUE;    //interrupt only, data maybe move by RxT  
1088                 if (((len - 4) < 2) || ((len - 4) > 2048)) {
1089                         IRDA_DEBUG(1, "%s(): Trouble:len=%x,CurCount=%x,LastCount=%x..\n",
1090                                    __func__, len, RxCurCount(iobase, self),
1091                                    self->RxLastCount);
1092                         hwreset(self);
1093                         return FALSE;
1094                 }
1095                 IRDA_DEBUG(2, "%s(): fifo.len=%x,len=%x,CurCount=%x..\n",
1096                            __func__,
1097                            st_fifo->len, len - 4, RxCurCount(iobase, self));
1098
1099                 st_fifo->entries[st_fifo->tail].status = status;
1100                 st_fifo->entries[st_fifo->tail].len = len;
1101                 st_fifo->pending_bytes += len;
1102                 st_fifo->tail++;
1103                 st_fifo->len++;
1104                 if (st_fifo->tail > MAX_RX_WINDOW)
1105                         st_fifo->tail = 0;
1106                 self->RxDataReady = 0;
1107
1108                 // It maybe have MAX_RX_WINDOW package receive by
1109                 // receive_complete before Timer IRQ
1110 /* F01_S
1111           if (st_fifo->len < (MAX_RX_WINDOW+2 )) { 
1112                   RXStart(iobase,ON);
1113                   SetTimer(iobase,4);
1114           }
1115           else    { 
1116 F01_E */
1117                 EnableRX(iobase, OFF);
1118                 EnRXDMA(iobase, OFF);
1119                 RXStart(iobase, OFF);
1120 //F01_S
1121                 // Put this entry back in fifo 
1122                 if (st_fifo->head > MAX_RX_WINDOW)
1123                         st_fifo->head = 0;
1124                 status = st_fifo->entries[st_fifo->head].status;
1125                 len = st_fifo->entries[st_fifo->head].len;
1126                 st_fifo->head++;
1127                 st_fifo->len--;
1128
1129                 skb = dev_alloc_skb(len + 1 - 4);
1130                 /*
1131                  * if frame size, data ptr, or skb ptr are wrong, then get next
1132                  * entry.
1133                  */
1134                 if ((skb == NULL) || (skb->data == NULL) ||
1135                     (self->rx_buff.data == NULL) || (len < 6)) {
1136                         self->netdev->stats.rx_dropped++;
1137                         kfree_skb(skb);
1138                         return TRUE;
1139                 }
1140                 skb_reserve(skb, 1);
1141                 skb_put(skb, len - 4);
1142
1143                 skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4);
1144                 IRDA_DEBUG(2, "%s(): len=%x.rx_buff=%p\n", __func__,
1145                            len - 4, self->rx_buff.data);
1146
1147                 // Move to next frame 
1148                 self->rx_buff.data += len;
1149                 self->netdev->stats.rx_bytes += len;
1150                 self->netdev->stats.rx_packets++;
1151                 skb->dev = self->netdev;
1152                 skb_reset_mac_header(skb);
1153                 skb->protocol = htons(ETH_P_IRDA);
1154                 netif_rx(skb);
1155
1156 //F01_E
1157         }                       //FIR
1158         return TRUE;
1159
1160 }
1161
1162 /*
1163  * if frame is received , but no INT ,then use this routine to upload frame.
1164  */
1165 static int upload_rxdata(struct via_ircc_cb *self, int iobase)
1166 {
1167         struct sk_buff *skb;
1168         int len;
1169         struct st_fifo *st_fifo;
1170         st_fifo = &self->st_fifo;
1171
1172         len = GetRecvByte(iobase, self);
1173
1174         IRDA_DEBUG(2, "%s(): len=%x\n", __func__, len);
1175
1176         if ((len - 4) < 2) {
1177                 self->netdev->stats.rx_dropped++;
1178                 return FALSE;
1179         }
1180
1181         skb = dev_alloc_skb(len + 1);
1182         if (skb == NULL) {
1183                 self->netdev->stats.rx_dropped++;
1184                 return FALSE;
1185         }
1186         skb_reserve(skb, 1);
1187         skb_put(skb, len - 4 + 1);
1188         skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4 + 1);
1189         st_fifo->tail++;
1190         st_fifo->len++;
1191         if (st_fifo->tail > MAX_RX_WINDOW)
1192                 st_fifo->tail = 0;
1193         // Move to next frame 
1194         self->rx_buff.data += len;
1195         self->netdev->stats.rx_bytes += len;
1196         self->netdev->stats.rx_packets++;
1197         skb->dev = self->netdev;
1198         skb_reset_mac_header(skb);
1199         skb->protocol = htons(ETH_P_IRDA);
1200         netif_rx(skb);
1201         if (st_fifo->len < (MAX_RX_WINDOW + 2)) {
1202                 RXStart(iobase, ON);
1203         } else {
1204                 EnableRX(iobase, OFF);
1205                 EnRXDMA(iobase, OFF);
1206                 RXStart(iobase, OFF);
1207         }
1208         return TRUE;
1209 }
1210
1211 /*
1212  * Implement back to back receive , use this routine to upload data.
1213  */
1214
1215 static int RxTimerHandler(struct via_ircc_cb *self, int iobase)
1216 {
1217         struct st_fifo *st_fifo;
1218         struct sk_buff *skb;
1219         int len;
1220         u8 status;
1221
1222         st_fifo = &self->st_fifo;
1223
1224         if (CkRxRecv(iobase, self)) {
1225                 // if still receiving ,then return ,don't upload frame 
1226                 self->RetryCount = 0;
1227                 SetTimer(iobase, 20);
1228                 self->RxDataReady++;
1229                 return FALSE;
1230         } else
1231                 self->RetryCount++;
1232
1233         if ((self->RetryCount >= 1) ||
1234             ((st_fifo->pending_bytes + 2048) > self->rx_buff.truesize) ||
1235             (st_fifo->len >= (MAX_RX_WINDOW))) {
1236                 while (st_fifo->len > 0) {      //upload frame
1237                         // Put this entry back in fifo 
1238                         if (st_fifo->head > MAX_RX_WINDOW)
1239                                 st_fifo->head = 0;
1240                         status = st_fifo->entries[st_fifo->head].status;
1241                         len = st_fifo->entries[st_fifo->head].len;
1242                         st_fifo->head++;
1243                         st_fifo->len--;
1244
1245                         skb = dev_alloc_skb(len + 1 - 4);
1246                         /*
1247                          * if frame size, data ptr, or skb ptr are wrong,
1248                          * then get next entry.
1249                          */
1250                         if ((skb == NULL) || (skb->data == NULL) ||
1251                             (self->rx_buff.data == NULL) || (len < 6)) {
1252                                 self->netdev->stats.rx_dropped++;
1253                                 continue;
1254                         }
1255                         skb_reserve(skb, 1);
1256                         skb_put(skb, len - 4);
1257                         skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4);
1258
1259                         IRDA_DEBUG(2, "%s(): len=%x.head=%x\n", __func__,
1260                                    len - 4, st_fifo->head);
1261
1262                         // Move to next frame 
1263                         self->rx_buff.data += len;
1264                         self->netdev->stats.rx_bytes += len;
1265                         self->netdev->stats.rx_packets++;
1266                         skb->dev = self->netdev;
1267                         skb_reset_mac_header(skb);
1268                         skb->protocol = htons(ETH_P_IRDA);
1269                         netif_rx(skb);
1270                 }               //while
1271                 self->RetryCount = 0;
1272
1273                 IRDA_DEBUG(2,
1274                            "%s(): End of upload HostStatus=%x,RxStatus=%x\n",
1275                            __func__,
1276                            GetHostStatus(iobase), GetRXStatus(iobase));
1277
1278                 /*
1279                  * if frame is receive complete at this routine ,then upload
1280                  * frame.
1281                  */
1282                 if ((GetRXStatus(iobase) & 0x10) &&
1283                     (RxCurCount(iobase, self) != self->RxLastCount)) {
1284                         upload_rxdata(self, iobase);
1285                         if (irda_device_txqueue_empty(self->netdev))
1286                                 via_ircc_dma_receive(self);
1287                 }
1288         }                       // timer detect complete
1289         else
1290                 SetTimer(iobase, 4);
1291         return TRUE;
1292
1293 }
1294
1295
1296
1297 /*
1298  * Function via_ircc_interrupt (irq, dev_id)
1299  *
1300  *    An interrupt from the chip has arrived. Time to do some work
1301  *
1302  */
1303 static irqreturn_t via_ircc_interrupt(int dummy, void *dev_id)
1304 {
1305         struct net_device *dev = dev_id;
1306         struct via_ircc_cb *self = netdev_priv(dev);
1307         int iobase;
1308         u8 iHostIntType, iRxIntType, iTxIntType;
1309
1310         iobase = self->io.fir_base;
1311         spin_lock(&self->lock);
1312         iHostIntType = GetHostStatus(iobase);
1313
1314         IRDA_DEBUG(4, "%s(): iHostIntType %02x:  %s %s %s  %02x\n",
1315                    __func__, iHostIntType,
1316                    (iHostIntType & 0x40) ? "Timer" : "",
1317                    (iHostIntType & 0x20) ? "Tx" : "",
1318                    (iHostIntType & 0x10) ? "Rx" : "",
1319                    (iHostIntType & 0x0e) >> 1);
1320
1321         if ((iHostIntType & 0x40) != 0) {       //Timer Event
1322                 self->EventFlag.TimeOut++;
1323                 ClearTimerInt(iobase, 1);
1324                 if (self->io.direction == IO_XMIT) {
1325                         via_ircc_dma_xmit(self, iobase);
1326                 }
1327                 if (self->io.direction == IO_RECV) {
1328                         /*
1329                          * frame ready hold too long, must reset.
1330                          */
1331                         if (self->RxDataReady > 30) {
1332                                 hwreset(self);
1333                                 if (irda_device_txqueue_empty(self->netdev)) {
1334                                         via_ircc_dma_receive(self);
1335                                 }
1336                         } else {        // call this to upload frame.
1337                                 RxTimerHandler(self, iobase);
1338                         }
1339                 }               //RECV
1340         }                       //Timer Event
1341         if ((iHostIntType & 0x20) != 0) {       //Tx Event
1342                 iTxIntType = GetTXStatus(iobase);
1343
1344                 IRDA_DEBUG(4, "%s(): iTxIntType %02x:  %s %s %s %s\n",
1345                            __func__, iTxIntType,
1346                            (iTxIntType & 0x08) ? "FIFO underr." : "",
1347                            (iTxIntType & 0x04) ? "EOM" : "",
1348                            (iTxIntType & 0x02) ? "FIFO ready" : "",
1349                            (iTxIntType & 0x01) ? "Early EOM" : "");
1350
1351                 if (iTxIntType & 0x4) {
1352                         self->EventFlag.EOMessage++;    // read and will auto clean
1353                         if (via_ircc_dma_xmit_complete(self)) {
1354                                 if (irda_device_txqueue_empty
1355                                     (self->netdev)) {
1356                                         via_ircc_dma_receive(self);
1357                                 }
1358                         } else {
1359                                 self->EventFlag.Unknown++;
1360                         }
1361                 }               //EOP
1362         }                       //Tx Event
1363         //----------------------------------------
1364         if ((iHostIntType & 0x10) != 0) {       //Rx Event
1365                 /* Check if DMA has finished */
1366                 iRxIntType = GetRXStatus(iobase);
1367
1368                 IRDA_DEBUG(4, "%s(): iRxIntType %02x:  %s %s %s %s %s %s %s\n",
1369                            __func__, iRxIntType,
1370                            (iRxIntType & 0x80) ? "PHY err."     : "",
1371                            (iRxIntType & 0x40) ? "CRC err"      : "",
1372                            (iRxIntType & 0x20) ? "FIFO overr."  : "",
1373                            (iRxIntType & 0x10) ? "EOF"          : "",
1374                            (iRxIntType & 0x08) ? "RxData"       : "",
1375                            (iRxIntType & 0x02) ? "RxMaxLen"     : "",
1376                            (iRxIntType & 0x01) ? "SIR bad"      : "");
1377                 if (!iRxIntType)
1378                         IRDA_DEBUG(3, "%s(): RxIRQ =0\n", __func__);
1379
1380                 if (iRxIntType & 0x10) {
1381                         if (via_ircc_dma_receive_complete(self, iobase)) {
1382 //F01       if(!(IsFIROn(iobase)))  via_ircc_dma_receive(self);
1383                                 via_ircc_dma_receive(self);
1384                         }
1385                 }               // No ERR     
1386                 else {          //ERR
1387                         IRDA_DEBUG(4, "%s(): RxIRQ ERR:iRxIntType=%x,HostIntType=%x,CurCount=%x,RxLastCount=%x_____\n",
1388                                    __func__, iRxIntType, iHostIntType,
1389                                    RxCurCount(iobase, self),
1390                                    self->RxLastCount);
1391
1392                         if (iRxIntType & 0x20) {        //FIFO OverRun ERR
1393                                 ResetChip(iobase, 0);
1394                                 ResetChip(iobase, 1);
1395                         } else {        //PHY,CRC ERR
1396
1397                                 if (iRxIntType != 0x08)
1398                                         hwreset(self);  //F01
1399                         }
1400                         via_ircc_dma_receive(self);
1401                 }               //ERR
1402
1403         }                       //Rx Event
1404         spin_unlock(&self->lock);
1405         return IRQ_RETVAL(iHostIntType);
1406 }
1407
1408 static void hwreset(struct via_ircc_cb *self)
1409 {
1410         int iobase;
1411         iobase = self->io.fir_base;
1412
1413         IRDA_DEBUG(3, "%s()\n", __func__);
1414
1415         ResetChip(iobase, 5);
1416         EnableDMA(iobase, OFF);
1417         EnableTX(iobase, OFF);
1418         EnableRX(iobase, OFF);
1419         EnRXDMA(iobase, OFF);
1420         EnTXDMA(iobase, OFF);
1421         RXStart(iobase, OFF);
1422         TXStart(iobase, OFF);
1423         InitCard(iobase);
1424         CommonInit(iobase);
1425         SIRFilter(iobase, ON);
1426         SetSIR(iobase, ON);
1427         CRC16(iobase, ON);
1428         EnTXCRC(iobase, 0);
1429         WriteReg(iobase, I_ST_CT_0, 0x00);
1430         SetBaudRate(iobase, 9600);
1431         SetPulseWidth(iobase, 12);
1432         SetSendPreambleCount(iobase, 0);
1433         WriteReg(iobase, I_ST_CT_0, 0x80);
1434
1435         /* Restore speed. */
1436         via_ircc_change_speed(self, self->io.speed);
1437
1438         self->st_fifo.len = 0;
1439 }
1440
1441 /*
1442  * Function via_ircc_is_receiving (self)
1443  *
1444  *    Return TRUE is we are currently receiving a frame
1445  *
1446  */
1447 static int via_ircc_is_receiving(struct via_ircc_cb *self)
1448 {
1449         int status = FALSE;
1450         int iobase;
1451
1452         IRDA_ASSERT(self != NULL, return FALSE;);
1453
1454         iobase = self->io.fir_base;
1455         if (CkRxRecv(iobase, self))
1456                 status = TRUE;
1457
1458         IRDA_DEBUG(2, "%s(): status=%x....\n", __func__, status);
1459
1460         return status;
1461 }
1462
1463
1464 /*
1465  * Function via_ircc_net_open (dev)
1466  *
1467  *    Start the device
1468  *
1469  */
1470 static int via_ircc_net_open(struct net_device *dev)
1471 {
1472         struct via_ircc_cb *self;
1473         int iobase;
1474         char hwname[32];
1475
1476         IRDA_DEBUG(3, "%s()\n", __func__);
1477
1478         IRDA_ASSERT(dev != NULL, return -1;);
1479         self = netdev_priv(dev);
1480         dev->stats.rx_packets = 0;
1481         IRDA_ASSERT(self != NULL, return 0;);
1482         iobase = self->io.fir_base;
1483         if (request_irq(self->io.irq, via_ircc_interrupt, 0, dev->name, dev)) {
1484                 IRDA_WARNING("%s, unable to allocate irq=%d\n", driver_name,
1485                              self->io.irq);
1486                 return -EAGAIN;
1487         }
1488         /*
1489          * Always allocate the DMA channel after the IRQ, and clean up on 
1490          * failure.
1491          */
1492         if (request_dma(self->io.dma, dev->name)) {
1493                 IRDA_WARNING("%s, unable to allocate dma=%d\n", driver_name,
1494                              self->io.dma);
1495                 free_irq(self->io.irq, dev);
1496                 return -EAGAIN;
1497         }
1498         if (self->io.dma2 != self->io.dma) {
1499                 if (request_dma(self->io.dma2, dev->name)) {
1500                         IRDA_WARNING("%s, unable to allocate dma2=%d\n",
1501                                      driver_name, self->io.dma2);
1502                         free_irq(self->io.irq, dev);
1503                         free_dma(self->io.dma);
1504                         return -EAGAIN;
1505                 }
1506         }
1507
1508
1509         /* turn on interrupts */
1510         EnAllInt(iobase, ON);
1511         EnInternalLoop(iobase, OFF);
1512         EnExternalLoop(iobase, OFF);
1513
1514         /* */
1515         via_ircc_dma_receive(self);
1516
1517         /* Ready to play! */
1518         netif_start_queue(dev);
1519
1520         /* 
1521          * Open new IrLAP layer instance, now that everything should be
1522          * initialized properly 
1523          */
1524         sprintf(hwname, "VIA @ 0x%x", iobase);
1525         self->irlap = irlap_open(dev, &self->qos, hwname);
1526
1527         self->RxLastCount = 0;
1528
1529         return 0;
1530 }
1531
1532 /*
1533  * Function via_ircc_net_close (dev)
1534  *
1535  *    Stop the device
1536  *
1537  */
1538 static int via_ircc_net_close(struct net_device *dev)
1539 {
1540         struct via_ircc_cb *self;
1541         int iobase;
1542
1543         IRDA_DEBUG(3, "%s()\n", __func__);
1544
1545         IRDA_ASSERT(dev != NULL, return -1;);
1546         self = netdev_priv(dev);
1547         IRDA_ASSERT(self != NULL, return 0;);
1548
1549         /* Stop device */
1550         netif_stop_queue(dev);
1551         /* Stop and remove instance of IrLAP */
1552         if (self->irlap)
1553                 irlap_close(self->irlap);
1554         self->irlap = NULL;
1555         iobase = self->io.fir_base;
1556         EnTXDMA(iobase, OFF);
1557         EnRXDMA(iobase, OFF);
1558         DisableDmaChannel(self->io.dma);
1559
1560         /* Disable interrupts */
1561         EnAllInt(iobase, OFF);
1562         free_irq(self->io.irq, dev);
1563         free_dma(self->io.dma);
1564         if (self->io.dma2 != self->io.dma)
1565                 free_dma(self->io.dma2);
1566
1567         return 0;
1568 }
1569
1570 /*
1571  * Function via_ircc_net_ioctl (dev, rq, cmd)
1572  *
1573  *    Process IOCTL commands for this device
1574  *
1575  */
1576 static int via_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq,
1577                               int cmd)
1578 {
1579         struct if_irda_req *irq = (struct if_irda_req *) rq;
1580         struct via_ircc_cb *self;
1581         unsigned long flags;
1582         int ret = 0;
1583
1584         IRDA_ASSERT(dev != NULL, return -1;);
1585         self = netdev_priv(dev);
1586         IRDA_ASSERT(self != NULL, return -1;);
1587         IRDA_DEBUG(1, "%s(), %s, (cmd=0x%X)\n", __func__, dev->name,
1588                    cmd);
1589         /* Disable interrupts & save flags */
1590         spin_lock_irqsave(&self->lock, flags);
1591         switch (cmd) {
1592         case SIOCSBANDWIDTH:    /* Set bandwidth */
1593                 if (!capable(CAP_NET_ADMIN)) {
1594                         ret = -EPERM;
1595                         goto out;
1596                 }
1597                 via_ircc_change_speed(self, irq->ifr_baudrate);
1598                 break;
1599         case SIOCSMEDIABUSY:    /* Set media busy */
1600                 if (!capable(CAP_NET_ADMIN)) {
1601                         ret = -EPERM;
1602                         goto out;
1603                 }
1604                 irda_device_set_media_busy(self->netdev, TRUE);
1605                 break;
1606         case SIOCGRECEIVING:    /* Check if we are receiving right now */
1607                 irq->ifr_receiving = via_ircc_is_receiving(self);
1608                 break;
1609         default:
1610                 ret = -EOPNOTSUPP;
1611         }
1612       out:
1613         spin_unlock_irqrestore(&self->lock, flags);
1614         return ret;
1615 }
1616
1617 MODULE_AUTHOR("VIA Technologies,inc");
1618 MODULE_DESCRIPTION("VIA IrDA Device Driver");
1619 MODULE_LICENSE("GPL");
1620
1621 module_init(via_ircc_init);
1622 module_exit(via_ircc_cleanup);